]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-2.9.1-3.8.6-201304091939.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.9.1-3.8.6-201304091939.patch
CommitLineData
c5ce905b
PK
1diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2index b89a739..b47493f 100644
3--- a/Documentation/dontdiff
4+++ b/Documentation/dontdiff
5@@ -2,9 +2,11 @@
6 *.aux
7 *.bin
8 *.bz2
9+*.c.[012]*.*
10 *.cis
11 *.cpio
12 *.csp
13+*.dbg
14 *.dsp
15 *.dvi
16 *.elf
17@@ -14,6 +16,7 @@
18 *.gcov
19 *.gen.S
20 *.gif
21+*.gmo
22 *.grep
23 *.grp
24 *.gz
25@@ -48,14 +51,17 @@
26 *.tab.h
27 *.tex
28 *.ver
29+*.vim
30 *.xml
31 *.xz
32 *_MODULES
33+*_reg_safe.h
34 *_vga16.c
35 *~
36 \#*#
37 *.9
38-.*
39+.[^g]*
40+.gen*
41 .*.d
42 .mm
43 53c700_d.h
44@@ -69,6 +75,7 @@ Image
45 Module.markers
46 Module.symvers
47 PENDING
48+PERF*
49 SCCS
50 System.map*
51 TAGS
52@@ -80,6 +87,7 @@ aic7*seq.h*
53 aicasm
54 aicdb.h*
55 altivec*.c
56+ashldi3.S
57 asm-offsets.h
58 asm_offsets.h
59 autoconf.h*
60@@ -92,19 +100,24 @@ bounds.h
61 bsetup
62 btfixupprep
63 build
64+builtin-policy.h
65 bvmlinux
66 bzImage*
67 capability_names.h
68 capflags.c
69 classlist.h*
70+clut_vga16.c
71+common-cmds.h
72 comp*.log
73 compile.h*
74 conf
75 config
76 config-*
77 config_data.h*
78+config.c
79 config.mak
80 config.mak.autogen
81+config.tmp
82 conmakehash
83 consolemap_deftbl.c*
84 cpustr.h
85@@ -115,9 +128,11 @@ devlist.h*
86 dnotify_test
87 docproc
88 dslm
89+dtc-lexer.lex.c
90 elf2ecoff
91 elfconfig.h*
92 evergreen_reg_safe.h
93+exception_policy.conf
94 fixdep
95 flask.h
96 fore200e_mkfirm
97@@ -125,12 +140,15 @@ fore200e_pca_fw.c*
98 gconf
99 gconf.glade.h
100 gen-devlist
101+gen-kdb_cmds.c
102 gen_crc32table
103 gen_init_cpio
104 generated
105 genheaders
106 genksyms
107 *_gray256.c
108+hash
109+hid-example
110 hpet_example
111 hugepage-mmap
112 hugepage-shm
113@@ -145,14 +163,14 @@ int32.c
114 int4.c
115 int8.c
116 kallsyms
117-kconfig
118+kern_constants.h
119 keywords.c
120 ksym.c*
121 ksym.h*
122 kxgettext
123 lex.c
124 lex.*.c
125-linux
126+lib1funcs.S
127 logo_*.c
128 logo_*_clut224.c
129 logo_*_mono.c
130@@ -162,14 +180,15 @@ mach-types.h
131 machtypes.h
132 map
133 map_hugetlb
134-media
135 mconf
136+mdp
137 miboot*
138 mk_elfconfig
139 mkboot
140 mkbugboot
141 mkcpustr
142 mkdep
143+mkpiggy
144 mkprep
145 mkregtable
146 mktables
147@@ -185,6 +204,8 @@ oui.c*
148 page-types
149 parse.c
150 parse.h
151+parse-events*
152+pasyms.h
153 patches*
154 pca200e.bin
155 pca200e_ecd.bin2
156@@ -194,6 +215,7 @@ perf-archive
157 piggyback
158 piggy.gzip
159 piggy.S
160+pmu-*
161 pnmtologo
162 ppc_defs.h*
163 pss_boot.h
164@@ -203,7 +225,10 @@ r200_reg_safe.h
165 r300_reg_safe.h
166 r420_reg_safe.h
167 r600_reg_safe.h
168+realmode.lds
169+realmode.relocs
170 recordmcount
171+regdb.c
172 relocs
173 rlim_names.h
174 rn50_reg_safe.h
175@@ -213,8 +238,12 @@ series
176 setup
177 setup.bin
178 setup.elf
179+signing_key*
180+size_overflow_hash.h
181 sImage
182+slabinfo
183 sm_tbl*
184+sortextable
185 split-include
186 syscalltab.h
187 tables.c
188@@ -224,6 +253,7 @@ tftpboot.img
189 timeconst.h
190 times.h*
191 trix_boot.h
192+user_constants.h
193 utsrelease.h*
194 vdso-syms.lds
195 vdso.lds
196@@ -235,13 +265,17 @@ vdso32.lds
197 vdso32.so.dbg
198 vdso64.lds
199 vdso64.so.dbg
200+vdsox32.lds
201+vdsox32-syms.lds
202 version.h*
203 vmImage
204 vmlinux
205 vmlinux-*
206 vmlinux.aout
207 vmlinux.bin.all
208+vmlinux.bin.bz2
209 vmlinux.lds
210+vmlinux.relocs
211 vmlinuz
212 voffset.h
213 vsyscall.lds
214@@ -249,9 +283,12 @@ vsyscall_32.lds
215 wanxlfw.inc
216 uImage
217 unifdef
218+utsrelease.h
219 wakeup.bin
220 wakeup.elf
221 wakeup.lds
222+x509*
223 zImage*
224 zconf.hash.c
225+zconf.lex.c
226 zoffset.h
227diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
228index 986614d..e8bfedc 100644
229--- a/Documentation/kernel-parameters.txt
230+++ b/Documentation/kernel-parameters.txt
231@@ -922,6 +922,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
232 Format: <unsigned int> such that (rxsize & ~0x1fffc0) == 0.
233 Default: 1024
234
235+ grsec_proc_gid= [GRKERNSEC_PROC_USERGROUP] Chooses GID to
236+ ignore grsecurity's /proc restrictions
237+
238+
239 hashdist= [KNL,NUMA] Large hashes allocated during boot
240 are distributed across NUMA nodes. Defaults on
241 for 64-bit NUMA, off otherwise.
242@@ -2121,6 +2125,18 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
243 the specified number of seconds. This is to be used if
244 your oopses keep scrolling off the screen.
245
246+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
247+ virtualization environments that don't cope well with the
248+ expand down segment used by UDEREF on X86-32 or the frequent
249+ page table updates on X86-64.
250+
251+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
252+
253+ pax_extra_latent_entropy
254+ Enable a very simple form of latent entropy extraction
255+ from the first 4GB of memory as the bootmem allocator
256+ passes the memory pages to the buddy allocator.
257+
258 pcbit= [HW,ISDN]
259
260 pcd. [PARIDE]
261diff --git a/Makefile b/Makefile
262index 10075d6..dcb3e14 100644
263--- a/Makefile
264+++ b/Makefile
265@@ -241,8 +241,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
266
267 HOSTCC = gcc
268 HOSTCXX = g++
269-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
270-HOSTCXXFLAGS = -O2
271+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
272+HOSTCLFAGS += $(call cc-option, -Wno-empty-body)
273+HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
274
275 # Decide whether to build built-in, modular, or both.
276 # Normally, just do built-in.
277@@ -414,8 +415,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
278 # Rules shared between *config targets and build targets
279
280 # Basic helpers built in scripts/
281-PHONY += scripts_basic
282-scripts_basic:
283+PHONY += scripts_basic gcc-plugins
284+scripts_basic: gcc-plugins
285 $(Q)$(MAKE) $(build)=scripts/basic
286 $(Q)rm -f .tmp_quiet_recordmcount
287
288@@ -575,6 +576,65 @@ else
289 KBUILD_CFLAGS += -O2
290 endif
291
292+ifndef DISABLE_PAX_PLUGINS
293+ifeq ($(call cc-ifversion, -ge, 0408, y), y)
294+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCXX)" "$(HOSTCXX)" "$(CC)")
295+else
296+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)" "$(CC)")
297+endif
298+ifneq ($(PLUGINCC),)
299+ifdef CONFIG_PAX_CONSTIFY_PLUGIN
300+CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
301+endif
302+ifdef CONFIG_PAX_MEMORY_STACKLEAK
303+STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
304+STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
305+endif
306+ifdef CONFIG_KALLOCSTAT_PLUGIN
307+KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
308+endif
309+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
310+KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
311+KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
312+KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
313+endif
314+ifdef CONFIG_CHECKER_PLUGIN
315+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
316+CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
317+endif
318+endif
319+COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
320+ifdef CONFIG_PAX_SIZE_OVERFLOW
321+SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
322+endif
323+ifdef CONFIG_PAX_LATENT_ENTROPY
324+LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN
325+endif
326+ifdef CONFIG_PAX_MEMORY_STRUCTLEAK
327+STRUCTLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/structleak_plugin.so -DSTRUCTLEAK_PLUGIN
328+endif
329+GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
330+GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
331+GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY_PLUGIN_CFLAGS) $(STRUCTLEAK_PLUGIN_CFLAGS)
332+GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
333+export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGINS_AFLAGS CONSTIFY_PLUGIN
334+ifeq ($(KBUILD_EXTMOD),)
335+gcc-plugins:
336+ $(Q)$(MAKE) $(build)=tools/gcc
337+else
338+gcc-plugins: ;
339+endif
340+else
341+gcc-plugins:
342+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
343+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
344+else
345+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
346+endif
347+ $(Q)echo "PAX_MEMORY_STACKLEAK, constification, PAX_LATENT_ENTROPY and other features will be less secure. PAX_SIZE_OVERFLOW will not be active."
348+endif
349+endif
350+
351 include $(srctree)/arch/$(SRCARCH)/Makefile
352
353 ifdef CONFIG_READABLE_ASM
354@@ -731,7 +791,7 @@ export mod_sign_cmd
355
356
357 ifeq ($(KBUILD_EXTMOD),)
358-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
359+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
360
361 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
362 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
363@@ -778,6 +838,8 @@ endif
364
365 # The actual objects are generated when descending,
366 # make sure no implicit rule kicks in
367+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
368+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
369 $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
370
371 # Handle descending into subdirectories listed in $(vmlinux-dirs)
372@@ -787,7 +849,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
373 # Error messages still appears in the original language
374
375 PHONY += $(vmlinux-dirs)
376-$(vmlinux-dirs): prepare scripts
377+$(vmlinux-dirs): gcc-plugins prepare scripts
378 $(Q)$(MAKE) $(build)=$@
379
380 # Store (new) KERNELRELASE string in include/config/kernel.release
381@@ -831,6 +893,7 @@ prepare0: archprepare FORCE
382 $(Q)$(MAKE) $(build)=.
383
384 # All the preparing..
385+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
386 prepare: prepare0
387
388 # Generate some files
389@@ -938,6 +1001,8 @@ all: modules
390 # using awk while concatenating to the final file.
391
392 PHONY += modules
393+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
394+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
395 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
396 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
397 @$(kecho) ' Building modules, stage 2.';
398@@ -953,7 +1018,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
399
400 # Target to prepare building external modules
401 PHONY += modules_prepare
402-modules_prepare: prepare scripts
403+modules_prepare: gcc-plugins prepare scripts
404
405 # Target to install modules
406 PHONY += modules_install
407@@ -1019,7 +1084,7 @@ MRPROPER_FILES += .config .config.old .version .old_version $(version_h) \
408 Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
409 signing_key.priv signing_key.x509 x509.genkey \
410 extra_certificates signing_key.x509.keyid \
411- signing_key.x509.signer
412+ signing_key.x509.signer tools/gcc/size_overflow_hash.h
413
414 # clean - Delete most, but leave enough to build external modules
415 #
416@@ -1059,6 +1124,7 @@ distclean: mrproper
417 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
418 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
419 -o -name '.*.rej' \
420+ -o -name '.*.rej' -o -name '*.so' \
421 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
422 -type f -print | xargs rm -f
423
424@@ -1219,6 +1285,8 @@ PHONY += $(module-dirs) modules
425 $(module-dirs): crmodverdir $(objtree)/Module.symvers
426 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
427
428+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
429+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
430 modules: $(module-dirs)
431 @$(kecho) ' Building modules, stage 2.';
432 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
433@@ -1355,17 +1423,21 @@ else
434 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
435 endif
436
437-%.s: %.c prepare scripts FORCE
438+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
439+%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
440+%.s: %.c gcc-plugins prepare scripts FORCE
441 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
442 %.i: %.c prepare scripts FORCE
443 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
444-%.o: %.c prepare scripts FORCE
445+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
446+%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
447+%.o: %.c gcc-plugins prepare scripts FORCE
448 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
449 %.lst: %.c prepare scripts FORCE
450 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
451-%.s: %.S prepare scripts FORCE
452+%.s: %.S gcc-plugins prepare scripts FORCE
453 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
454-%.o: %.S prepare scripts FORCE
455+%.o: %.S gcc-plugins prepare scripts FORCE
456 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
457 %.symtypes: %.c prepare scripts FORCE
458 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
459@@ -1375,11 +1447,15 @@ endif
460 $(cmd_crmodverdir)
461 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
462 $(build)=$(build-dir)
463-%/: prepare scripts FORCE
464+%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
465+%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
466+%/: gcc-plugins prepare scripts FORCE
467 $(cmd_crmodverdir)
468 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
469 $(build)=$(build-dir)
470-%.ko: prepare scripts FORCE
471+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
472+%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
473+%.ko: gcc-plugins prepare scripts FORCE
474 $(cmd_crmodverdir)
475 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
476 $(build)=$(build-dir) $(@:.ko=.o)
477diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
478index c2cbe4f..f7264b4 100644
479--- a/arch/alpha/include/asm/atomic.h
480+++ b/arch/alpha/include/asm/atomic.h
481@@ -250,6 +250,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
482 #define atomic_dec(v) atomic_sub(1,(v))
483 #define atomic64_dec(v) atomic64_sub(1,(v))
484
485+#define atomic64_read_unchecked(v) atomic64_read(v)
486+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
487+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
488+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
489+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
490+#define atomic64_inc_unchecked(v) atomic64_inc(v)
491+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
492+#define atomic64_dec_unchecked(v) atomic64_dec(v)
493+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
494+
495 #define smp_mb__before_atomic_dec() smp_mb()
496 #define smp_mb__after_atomic_dec() smp_mb()
497 #define smp_mb__before_atomic_inc() smp_mb()
498diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
499index ad368a9..fbe0f25 100644
500--- a/arch/alpha/include/asm/cache.h
501+++ b/arch/alpha/include/asm/cache.h
502@@ -4,19 +4,19 @@
503 #ifndef __ARCH_ALPHA_CACHE_H
504 #define __ARCH_ALPHA_CACHE_H
505
506+#include <linux/const.h>
507
508 /* Bytes per L1 (data) cache line. */
509 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
510-# define L1_CACHE_BYTES 64
511 # define L1_CACHE_SHIFT 6
512 #else
513 /* Both EV4 and EV5 are write-through, read-allocate,
514 direct-mapped, physical.
515 */
516-# define L1_CACHE_BYTES 32
517 # define L1_CACHE_SHIFT 5
518 #endif
519
520+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
521 #define SMP_CACHE_BYTES L1_CACHE_BYTES
522
523 #endif
524diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
525index 968d999..d36b2df 100644
526--- a/arch/alpha/include/asm/elf.h
527+++ b/arch/alpha/include/asm/elf.h
528@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
529
530 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
531
532+#ifdef CONFIG_PAX_ASLR
533+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
534+
535+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
536+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
537+#endif
538+
539 /* $0 is set by ld.so to a pointer to a function which might be
540 registered using atexit. This provides a mean for the dynamic
541 linker to call DT_FINI functions for shared libraries that have
542diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
543index bc2a0da..8ad11ee 100644
544--- a/arch/alpha/include/asm/pgalloc.h
545+++ b/arch/alpha/include/asm/pgalloc.h
546@@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
547 pgd_set(pgd, pmd);
548 }
549
550+static inline void
551+pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
552+{
553+ pgd_populate(mm, pgd, pmd);
554+}
555+
556 extern pgd_t *pgd_alloc(struct mm_struct *mm);
557
558 static inline void
559diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
560index 81a4342..348b927 100644
561--- a/arch/alpha/include/asm/pgtable.h
562+++ b/arch/alpha/include/asm/pgtable.h
563@@ -102,6 +102,17 @@ struct vm_area_struct;
564 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
565 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
566 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
567+
568+#ifdef CONFIG_PAX_PAGEEXEC
569+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
570+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
571+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
572+#else
573+# define PAGE_SHARED_NOEXEC PAGE_SHARED
574+# define PAGE_COPY_NOEXEC PAGE_COPY
575+# define PAGE_READONLY_NOEXEC PAGE_READONLY
576+#endif
577+
578 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
579
580 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
581diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
582index 2fd00b7..cfd5069 100644
583--- a/arch/alpha/kernel/module.c
584+++ b/arch/alpha/kernel/module.c
585@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
586
587 /* The small sections were sorted to the end of the segment.
588 The following should definitely cover them. */
589- gp = (u64)me->module_core + me->core_size - 0x8000;
590+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
591 got = sechdrs[me->arch.gotsecindex].sh_addr;
592
593 for (i = 0; i < n; i++) {
594diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
595index 14db93e..47bed62 100644
596--- a/arch/alpha/kernel/osf_sys.c
597+++ b/arch/alpha/kernel/osf_sys.c
598@@ -1295,16 +1295,16 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
599 generic version except that we know how to honor ADDR_LIMIT_32BIT. */
600
601 static unsigned long
602-arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
603- unsigned long limit)
604+arch_get_unmapped_area_1(struct file *filp, unsigned long addr, unsigned long len,
605+ unsigned long limit, unsigned long flags)
606 {
607 struct vm_area_struct *vma = find_vma(current->mm, addr);
608-
609+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
610 while (1) {
611 /* At this point: (!vma || addr < vma->vm_end). */
612 if (limit - len < addr)
613 return -ENOMEM;
614- if (!vma || addr + len <= vma->vm_start)
615+ if (check_heap_stack_gap(vma, addr, len, offset))
616 return addr;
617 addr = vma->vm_end;
618 vma = vma->vm_next;
619@@ -1340,20 +1340,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
620 merely specific addresses, but regions of memory -- perhaps
621 this feature should be incorporated into all ports? */
622
623+#ifdef CONFIG_PAX_RANDMMAP
624+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
625+#endif
626+
627 if (addr) {
628- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
629+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(addr), len, limit, flags);
630 if (addr != (unsigned long) -ENOMEM)
631 return addr;
632 }
633
634 /* Next, try allocating at TASK_UNMAPPED_BASE. */
635- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
636- len, limit);
637+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(current->mm->mmap_base), len, limit, flags);
638+
639 if (addr != (unsigned long) -ENOMEM)
640 return addr;
641
642 /* Finally, try allocating in low memory. */
643- addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit);
644+ addr = arch_get_unmapped_area_1 (filp, PAGE_SIZE, len, limit, flags);
645
646 return addr;
647 }
648diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
649index 0c4132d..88f0d53 100644
650--- a/arch/alpha/mm/fault.c
651+++ b/arch/alpha/mm/fault.c
652@@ -53,6 +53,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
653 __reload_thread(pcb);
654 }
655
656+#ifdef CONFIG_PAX_PAGEEXEC
657+/*
658+ * PaX: decide what to do with offenders (regs->pc = fault address)
659+ *
660+ * returns 1 when task should be killed
661+ * 2 when patched PLT trampoline was detected
662+ * 3 when unpatched PLT trampoline was detected
663+ */
664+static int pax_handle_fetch_fault(struct pt_regs *regs)
665+{
666+
667+#ifdef CONFIG_PAX_EMUPLT
668+ int err;
669+
670+ do { /* PaX: patched PLT emulation #1 */
671+ unsigned int ldah, ldq, jmp;
672+
673+ err = get_user(ldah, (unsigned int *)regs->pc);
674+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
675+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
676+
677+ if (err)
678+ break;
679+
680+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
681+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
682+ jmp == 0x6BFB0000U)
683+ {
684+ unsigned long r27, addr;
685+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
686+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
687+
688+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
689+ err = get_user(r27, (unsigned long *)addr);
690+ if (err)
691+ break;
692+
693+ regs->r27 = r27;
694+ regs->pc = r27;
695+ return 2;
696+ }
697+ } while (0);
698+
699+ do { /* PaX: patched PLT emulation #2 */
700+ unsigned int ldah, lda, br;
701+
702+ err = get_user(ldah, (unsigned int *)regs->pc);
703+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
704+ err |= get_user(br, (unsigned int *)(regs->pc+8));
705+
706+ if (err)
707+ break;
708+
709+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
710+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
711+ (br & 0xFFE00000U) == 0xC3E00000U)
712+ {
713+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
714+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
715+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
716+
717+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
718+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
719+ return 2;
720+ }
721+ } while (0);
722+
723+ do { /* PaX: unpatched PLT emulation */
724+ unsigned int br;
725+
726+ err = get_user(br, (unsigned int *)regs->pc);
727+
728+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
729+ unsigned int br2, ldq, nop, jmp;
730+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
731+
732+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
733+ err = get_user(br2, (unsigned int *)addr);
734+ err |= get_user(ldq, (unsigned int *)(addr+4));
735+ err |= get_user(nop, (unsigned int *)(addr+8));
736+ err |= get_user(jmp, (unsigned int *)(addr+12));
737+ err |= get_user(resolver, (unsigned long *)(addr+16));
738+
739+ if (err)
740+ break;
741+
742+ if (br2 == 0xC3600000U &&
743+ ldq == 0xA77B000CU &&
744+ nop == 0x47FF041FU &&
745+ jmp == 0x6B7B0000U)
746+ {
747+ regs->r28 = regs->pc+4;
748+ regs->r27 = addr+16;
749+ regs->pc = resolver;
750+ return 3;
751+ }
752+ }
753+ } while (0);
754+#endif
755+
756+ return 1;
757+}
758+
759+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
760+{
761+ unsigned long i;
762+
763+ printk(KERN_ERR "PAX: bytes at PC: ");
764+ for (i = 0; i < 5; i++) {
765+ unsigned int c;
766+ if (get_user(c, (unsigned int *)pc+i))
767+ printk(KERN_CONT "???????? ");
768+ else
769+ printk(KERN_CONT "%08x ", c);
770+ }
771+ printk("\n");
772+}
773+#endif
774
775 /*
776 * This routine handles page faults. It determines the address,
777@@ -133,8 +251,29 @@ retry:
778 good_area:
779 si_code = SEGV_ACCERR;
780 if (cause < 0) {
781- if (!(vma->vm_flags & VM_EXEC))
782+ if (!(vma->vm_flags & VM_EXEC)) {
783+
784+#ifdef CONFIG_PAX_PAGEEXEC
785+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
786+ goto bad_area;
787+
788+ up_read(&mm->mmap_sem);
789+ switch (pax_handle_fetch_fault(regs)) {
790+
791+#ifdef CONFIG_PAX_EMUPLT
792+ case 2:
793+ case 3:
794+ return;
795+#endif
796+
797+ }
798+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
799+ do_group_exit(SIGKILL);
800+#else
801 goto bad_area;
802+#endif
803+
804+ }
805 } else if (!cause) {
806 /* Allow reads even for write-only mappings */
807 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
808diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
809index 67874b8..9aa2d62 100644
810--- a/arch/arm/Kconfig
811+++ b/arch/arm/Kconfig
812@@ -1427,6 +1427,16 @@ config ARM_ERRATA_775420
813 to deadlock. This workaround puts DSB before executing ISB if
814 an abort may occur on cache maintenance.
815
816+config ARM_ERRATA_798181
817+ bool "ARM errata: TLBI/DSB failure on Cortex-A15"
818+ depends on CPU_V7 && SMP
819+ help
820+ On Cortex-A15 (r0p0..r3p2) the TLBI*IS/DSB operations are not
821+ adequately shooting down all use of the old entries. This
822+ option enables the Linux kernel workaround for this erratum
823+ which sends an IPI to the CPUs that are running the same ASID
824+ as the one being invalidated.
825+
826 endmenu
827
828 source "arch/arm/common/Kconfig"
829@@ -1813,7 +1823,7 @@ config ALIGNMENT_TRAP
830
831 config UACCESS_WITH_MEMCPY
832 bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user()"
833- depends on MMU
834+ depends on MMU && !PAX_MEMORY_UDEREF
835 default y if CPU_FEROCEON
836 help
837 Implement faster copy_to_user and clear_user methods for CPU
838diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c
839index 87dfa902..3a523fc 100644
840--- a/arch/arm/common/gic.c
841+++ b/arch/arm/common/gic.c
842@@ -81,7 +81,7 @@ static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
843 * Supported arch specific GIC irq extension.
844 * Default make them NULL.
845 */
846-struct irq_chip gic_arch_extn = {
847+irq_chip_no_const gic_arch_extn __read_only = {
848 .irq_eoi = NULL,
849 .irq_mask = NULL,
850 .irq_unmask = NULL,
851@@ -329,7 +329,7 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
852 chained_irq_exit(chip, desc);
853 }
854
855-static struct irq_chip gic_chip = {
856+static irq_chip_no_const gic_chip __read_only = {
857 .name = "GIC",
858 .irq_mask = gic_mask_irq,
859 .irq_unmask = gic_unmask_irq,
860diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
861index c79f61f..9ac0642 100644
862--- a/arch/arm/include/asm/atomic.h
863+++ b/arch/arm/include/asm/atomic.h
864@@ -17,17 +17,35 @@
865 #include <asm/barrier.h>
866 #include <asm/cmpxchg.h>
867
868+#ifdef CONFIG_GENERIC_ATOMIC64
869+#include <asm-generic/atomic64.h>
870+#endif
871+
872 #define ATOMIC_INIT(i) { (i) }
873
874 #ifdef __KERNEL__
875
876+#define _ASM_EXTABLE(from, to) \
877+" .pushsection __ex_table,\"a\"\n"\
878+" .align 3\n" \
879+" .long " #from ", " #to"\n" \
880+" .popsection"
881+
882 /*
883 * On ARM, ordinary assignment (str instruction) doesn't clear the local
884 * strex/ldrex monitor on some implementations. The reason we can use it for
885 * atomic_set() is the clrex or dummy strex done on every exception return.
886 */
887 #define atomic_read(v) (*(volatile int *)&(v)->counter)
888+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
889+{
890+ return v->counter;
891+}
892 #define atomic_set(v,i) (((v)->counter) = (i))
893+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
894+{
895+ v->counter = i;
896+}
897
898 #if __LINUX_ARM_ARCH__ >= 6
899
900@@ -42,6 +60,35 @@ static inline void atomic_add(int i, atomic_t *v)
901 int result;
902
903 __asm__ __volatile__("@ atomic_add\n"
904+"1: ldrex %1, [%3]\n"
905+" adds %0, %1, %4\n"
906+
907+#ifdef CONFIG_PAX_REFCOUNT
908+" bvc 3f\n"
909+"2: bkpt 0xf103\n"
910+"3:\n"
911+#endif
912+
913+" strex %1, %0, [%3]\n"
914+" teq %1, #0\n"
915+" bne 1b"
916+
917+#ifdef CONFIG_PAX_REFCOUNT
918+"\n4:\n"
919+ _ASM_EXTABLE(2b, 4b)
920+#endif
921+
922+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
923+ : "r" (&v->counter), "Ir" (i)
924+ : "cc");
925+}
926+
927+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
928+{
929+ unsigned long tmp;
930+ int result;
931+
932+ __asm__ __volatile__("@ atomic_add_unchecked\n"
933 "1: ldrex %0, [%3]\n"
934 " add %0, %0, %4\n"
935 " strex %1, %0, [%3]\n"
936@@ -60,6 +107,42 @@ static inline int atomic_add_return(int i, atomic_t *v)
937 smp_mb();
938
939 __asm__ __volatile__("@ atomic_add_return\n"
940+"1: ldrex %1, [%3]\n"
941+" adds %0, %1, %4\n"
942+
943+#ifdef CONFIG_PAX_REFCOUNT
944+" bvc 3f\n"
945+" mov %0, %1\n"
946+"2: bkpt 0xf103\n"
947+"3:\n"
948+#endif
949+
950+" strex %1, %0, [%3]\n"
951+" teq %1, #0\n"
952+" bne 1b"
953+
954+#ifdef CONFIG_PAX_REFCOUNT
955+"\n4:\n"
956+ _ASM_EXTABLE(2b, 4b)
957+#endif
958+
959+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
960+ : "r" (&v->counter), "Ir" (i)
961+ : "cc");
962+
963+ smp_mb();
964+
965+ return result;
966+}
967+
968+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
969+{
970+ unsigned long tmp;
971+ int result;
972+
973+ smp_mb();
974+
975+ __asm__ __volatile__("@ atomic_add_return_unchecked\n"
976 "1: ldrex %0, [%3]\n"
977 " add %0, %0, %4\n"
978 " strex %1, %0, [%3]\n"
979@@ -80,6 +163,35 @@ static inline void atomic_sub(int i, atomic_t *v)
980 int result;
981
982 __asm__ __volatile__("@ atomic_sub\n"
983+"1: ldrex %1, [%3]\n"
984+" subs %0, %1, %4\n"
985+
986+#ifdef CONFIG_PAX_REFCOUNT
987+" bvc 3f\n"
988+"2: bkpt 0xf103\n"
989+"3:\n"
990+#endif
991+
992+" strex %1, %0, [%3]\n"
993+" teq %1, #0\n"
994+" bne 1b"
995+
996+#ifdef CONFIG_PAX_REFCOUNT
997+"\n4:\n"
998+ _ASM_EXTABLE(2b, 4b)
999+#endif
1000+
1001+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1002+ : "r" (&v->counter), "Ir" (i)
1003+ : "cc");
1004+}
1005+
1006+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
1007+{
1008+ unsigned long tmp;
1009+ int result;
1010+
1011+ __asm__ __volatile__("@ atomic_sub_unchecked\n"
1012 "1: ldrex %0, [%3]\n"
1013 " sub %0, %0, %4\n"
1014 " strex %1, %0, [%3]\n"
1015@@ -98,11 +210,25 @@ static inline int atomic_sub_return(int i, atomic_t *v)
1016 smp_mb();
1017
1018 __asm__ __volatile__("@ atomic_sub_return\n"
1019-"1: ldrex %0, [%3]\n"
1020-" sub %0, %0, %4\n"
1021+"1: ldrex %1, [%3]\n"
1022+" subs %0, %1, %4\n"
1023+
1024+#ifdef CONFIG_PAX_REFCOUNT
1025+" bvc 3f\n"
1026+" mov %0, %1\n"
1027+"2: bkpt 0xf103\n"
1028+"3:\n"
1029+#endif
1030+
1031 " strex %1, %0, [%3]\n"
1032 " teq %1, #0\n"
1033 " bne 1b"
1034+
1035+#ifdef CONFIG_PAX_REFCOUNT
1036+"\n4:\n"
1037+ _ASM_EXTABLE(2b, 4b)
1038+#endif
1039+
1040 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1041 : "r" (&v->counter), "Ir" (i)
1042 : "cc");
1043@@ -134,6 +260,28 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
1044 return oldval;
1045 }
1046
1047+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
1048+{
1049+ unsigned long oldval, res;
1050+
1051+ smp_mb();
1052+
1053+ do {
1054+ __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
1055+ "ldrex %1, [%3]\n"
1056+ "mov %0, #0\n"
1057+ "teq %1, %4\n"
1058+ "strexeq %0, %5, [%3]\n"
1059+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1060+ : "r" (&ptr->counter), "Ir" (old), "r" (new)
1061+ : "cc");
1062+ } while (res);
1063+
1064+ smp_mb();
1065+
1066+ return oldval;
1067+}
1068+
1069 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1070 {
1071 unsigned long tmp, tmp2;
1072@@ -167,7 +315,17 @@ static inline int atomic_add_return(int i, atomic_t *v)
1073
1074 return val;
1075 }
1076+
1077+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
1078+{
1079+ return atomic_add_return(i, v);
1080+}
1081+
1082 #define atomic_add(i, v) (void) atomic_add_return(i, v)
1083+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
1084+{
1085+ (void) atomic_add_return(i, v);
1086+}
1087
1088 static inline int atomic_sub_return(int i, atomic_t *v)
1089 {
1090@@ -182,6 +340,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
1091 return val;
1092 }
1093 #define atomic_sub(i, v) (void) atomic_sub_return(i, v)
1094+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
1095+{
1096+ (void) atomic_sub_return(i, v);
1097+}
1098
1099 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1100 {
1101@@ -197,6 +359,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1102 return ret;
1103 }
1104
1105+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
1106+{
1107+ return atomic_cmpxchg(v, old, new);
1108+}
1109+
1110 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1111 {
1112 unsigned long flags;
1113@@ -209,6 +376,10 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1114 #endif /* __LINUX_ARM_ARCH__ */
1115
1116 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
1117+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
1118+{
1119+ return xchg(&v->counter, new);
1120+}
1121
1122 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1123 {
1124@@ -221,11 +392,27 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1125 }
1126
1127 #define atomic_inc(v) atomic_add(1, v)
1128+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
1129+{
1130+ atomic_add_unchecked(1, v);
1131+}
1132 #define atomic_dec(v) atomic_sub(1, v)
1133+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
1134+{
1135+ atomic_sub_unchecked(1, v);
1136+}
1137
1138 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
1139+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
1140+{
1141+ return atomic_add_return_unchecked(1, v) == 0;
1142+}
1143 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
1144 #define atomic_inc_return(v) (atomic_add_return(1, v))
1145+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
1146+{
1147+ return atomic_add_return_unchecked(1, v);
1148+}
1149 #define atomic_dec_return(v) (atomic_sub_return(1, v))
1150 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
1151
1152@@ -241,6 +428,14 @@ typedef struct {
1153 u64 __aligned(8) counter;
1154 } atomic64_t;
1155
1156+#ifdef CONFIG_PAX_REFCOUNT
1157+typedef struct {
1158+ u64 __aligned(8) counter;
1159+} atomic64_unchecked_t;
1160+#else
1161+typedef atomic64_t atomic64_unchecked_t;
1162+#endif
1163+
1164 #define ATOMIC64_INIT(i) { (i) }
1165
1166 static inline u64 atomic64_read(const atomic64_t *v)
1167@@ -256,6 +451,19 @@ static inline u64 atomic64_read(const atomic64_t *v)
1168 return result;
1169 }
1170
1171+static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *v)
1172+{
1173+ u64 result;
1174+
1175+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1176+" ldrexd %0, %H0, [%1]"
1177+ : "=&r" (result)
1178+ : "r" (&v->counter), "Qo" (v->counter)
1179+ );
1180+
1181+ return result;
1182+}
1183+
1184 static inline void atomic64_set(atomic64_t *v, u64 i)
1185 {
1186 u64 tmp;
1187@@ -270,6 +478,20 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
1188 : "cc");
1189 }
1190
1191+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, u64 i)
1192+{
1193+ u64 tmp;
1194+
1195+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1196+"1: ldrexd %0, %H0, [%2]\n"
1197+" strexd %0, %3, %H3, [%2]\n"
1198+" teq %0, #0\n"
1199+" bne 1b"
1200+ : "=&r" (tmp), "=Qo" (v->counter)
1201+ : "r" (&v->counter), "r" (i)
1202+ : "cc");
1203+}
1204+
1205 static inline void atomic64_add(u64 i, atomic64_t *v)
1206 {
1207 u64 result;
1208@@ -278,6 +500,36 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1209 __asm__ __volatile__("@ atomic64_add\n"
1210 "1: ldrexd %0, %H0, [%3]\n"
1211 " adds %0, %0, %4\n"
1212+" adcs %H0, %H0, %H4\n"
1213+
1214+#ifdef CONFIG_PAX_REFCOUNT
1215+" bvc 3f\n"
1216+"2: bkpt 0xf103\n"
1217+"3:\n"
1218+#endif
1219+
1220+" strexd %1, %0, %H0, [%3]\n"
1221+" teq %1, #0\n"
1222+" bne 1b"
1223+
1224+#ifdef CONFIG_PAX_REFCOUNT
1225+"\n4:\n"
1226+ _ASM_EXTABLE(2b, 4b)
1227+#endif
1228+
1229+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1230+ : "r" (&v->counter), "r" (i)
1231+ : "cc");
1232+}
1233+
1234+static inline void atomic64_add_unchecked(u64 i, atomic64_unchecked_t *v)
1235+{
1236+ u64 result;
1237+ unsigned long tmp;
1238+
1239+ __asm__ __volatile__("@ atomic64_add_unchecked\n"
1240+"1: ldrexd %0, %H0, [%3]\n"
1241+" adds %0, %0, %4\n"
1242 " adc %H0, %H0, %H4\n"
1243 " strexd %1, %0, %H0, [%3]\n"
1244 " teq %1, #0\n"
1245@@ -289,12 +541,49 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1246
1247 static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
1248 {
1249- u64 result;
1250- unsigned long tmp;
1251+ u64 result, tmp;
1252
1253 smp_mb();
1254
1255 __asm__ __volatile__("@ atomic64_add_return\n"
1256+"1: ldrexd %1, %H1, [%3]\n"
1257+" adds %0, %1, %4\n"
1258+" adcs %H0, %H1, %H4\n"
1259+
1260+#ifdef CONFIG_PAX_REFCOUNT
1261+" bvc 3f\n"
1262+" mov %0, %1\n"
1263+" mov %H0, %H1\n"
1264+"2: bkpt 0xf103\n"
1265+"3:\n"
1266+#endif
1267+
1268+" strexd %1, %0, %H0, [%3]\n"
1269+" teq %1, #0\n"
1270+" bne 1b"
1271+
1272+#ifdef CONFIG_PAX_REFCOUNT
1273+"\n4:\n"
1274+ _ASM_EXTABLE(2b, 4b)
1275+#endif
1276+
1277+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1278+ : "r" (&v->counter), "r" (i)
1279+ : "cc");
1280+
1281+ smp_mb();
1282+
1283+ return result;
1284+}
1285+
1286+static inline u64 atomic64_add_return_unchecked(u64 i, atomic64_unchecked_t *v)
1287+{
1288+ u64 result;
1289+ unsigned long tmp;
1290+
1291+ smp_mb();
1292+
1293+ __asm__ __volatile__("@ atomic64_add_return_unchecked\n"
1294 "1: ldrexd %0, %H0, [%3]\n"
1295 " adds %0, %0, %4\n"
1296 " adc %H0, %H0, %H4\n"
1297@@ -318,6 +607,36 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1298 __asm__ __volatile__("@ atomic64_sub\n"
1299 "1: ldrexd %0, %H0, [%3]\n"
1300 " subs %0, %0, %4\n"
1301+" sbcs %H0, %H0, %H4\n"
1302+
1303+#ifdef CONFIG_PAX_REFCOUNT
1304+" bvc 3f\n"
1305+"2: bkpt 0xf103\n"
1306+"3:\n"
1307+#endif
1308+
1309+" strexd %1, %0, %H0, [%3]\n"
1310+" teq %1, #0\n"
1311+" bne 1b"
1312+
1313+#ifdef CONFIG_PAX_REFCOUNT
1314+"\n4:\n"
1315+ _ASM_EXTABLE(2b, 4b)
1316+#endif
1317+
1318+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1319+ : "r" (&v->counter), "r" (i)
1320+ : "cc");
1321+}
1322+
1323+static inline void atomic64_sub_unchecked(u64 i, atomic64_unchecked_t *v)
1324+{
1325+ u64 result;
1326+ unsigned long tmp;
1327+
1328+ __asm__ __volatile__("@ atomic64_sub_unchecked\n"
1329+"1: ldrexd %0, %H0, [%3]\n"
1330+" subs %0, %0, %4\n"
1331 " sbc %H0, %H0, %H4\n"
1332 " strexd %1, %0, %H0, [%3]\n"
1333 " teq %1, #0\n"
1334@@ -329,18 +648,32 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1335
1336 static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
1337 {
1338- u64 result;
1339- unsigned long tmp;
1340+ u64 result, tmp;
1341
1342 smp_mb();
1343
1344 __asm__ __volatile__("@ atomic64_sub_return\n"
1345-"1: ldrexd %0, %H0, [%3]\n"
1346-" subs %0, %0, %4\n"
1347-" sbc %H0, %H0, %H4\n"
1348+"1: ldrexd %1, %H1, [%3]\n"
1349+" subs %0, %1, %4\n"
1350+" sbcs %H0, %H1, %H4\n"
1351+
1352+#ifdef CONFIG_PAX_REFCOUNT
1353+" bvc 3f\n"
1354+" mov %0, %1\n"
1355+" mov %H0, %H1\n"
1356+"2: bkpt 0xf103\n"
1357+"3:\n"
1358+#endif
1359+
1360 " strexd %1, %0, %H0, [%3]\n"
1361 " teq %1, #0\n"
1362 " bne 1b"
1363+
1364+#ifdef CONFIG_PAX_REFCOUNT
1365+"\n4:\n"
1366+ _ASM_EXTABLE(2b, 4b)
1367+#endif
1368+
1369 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1370 : "r" (&v->counter), "r" (i)
1371 : "cc");
1372@@ -374,6 +707,30 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
1373 return oldval;
1374 }
1375
1376+static inline u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old, u64 new)
1377+{
1378+ u64 oldval;
1379+ unsigned long res;
1380+
1381+ smp_mb();
1382+
1383+ do {
1384+ __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
1385+ "ldrexd %1, %H1, [%3]\n"
1386+ "mov %0, #0\n"
1387+ "teq %1, %4\n"
1388+ "teqeq %H1, %H4\n"
1389+ "strexdeq %0, %5, %H5, [%3]"
1390+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1391+ : "r" (&ptr->counter), "r" (old), "r" (new)
1392+ : "cc");
1393+ } while (res);
1394+
1395+ smp_mb();
1396+
1397+ return oldval;
1398+}
1399+
1400 static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1401 {
1402 u64 result;
1403@@ -397,21 +754,34 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1404
1405 static inline u64 atomic64_dec_if_positive(atomic64_t *v)
1406 {
1407- u64 result;
1408- unsigned long tmp;
1409+ u64 result, tmp;
1410
1411 smp_mb();
1412
1413 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
1414-"1: ldrexd %0, %H0, [%3]\n"
1415-" subs %0, %0, #1\n"
1416-" sbc %H0, %H0, #0\n"
1417+"1: ldrexd %1, %H1, [%3]\n"
1418+" subs %0, %1, #1\n"
1419+" sbcs %H0, %H1, #0\n"
1420+
1421+#ifdef CONFIG_PAX_REFCOUNT
1422+" bvc 3f\n"
1423+" mov %0, %1\n"
1424+" mov %H0, %H1\n"
1425+"2: bkpt 0xf103\n"
1426+"3:\n"
1427+#endif
1428+
1429 " teq %H0, #0\n"
1430-" bmi 2f\n"
1431+" bmi 4f\n"
1432 " strexd %1, %0, %H0, [%3]\n"
1433 " teq %1, #0\n"
1434 " bne 1b\n"
1435-"2:"
1436+"4:\n"
1437+
1438+#ifdef CONFIG_PAX_REFCOUNT
1439+ _ASM_EXTABLE(2b, 4b)
1440+#endif
1441+
1442 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1443 : "r" (&v->counter)
1444 : "cc");
1445@@ -434,13 +804,25 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1446 " teq %0, %5\n"
1447 " teqeq %H0, %H5\n"
1448 " moveq %1, #0\n"
1449-" beq 2f\n"
1450+" beq 4f\n"
1451 " adds %0, %0, %6\n"
1452-" adc %H0, %H0, %H6\n"
1453+" adcs %H0, %H0, %H6\n"
1454+
1455+#ifdef CONFIG_PAX_REFCOUNT
1456+" bvc 3f\n"
1457+"2: bkpt 0xf103\n"
1458+"3:\n"
1459+#endif
1460+
1461 " strexd %2, %0, %H0, [%4]\n"
1462 " teq %2, #0\n"
1463 " bne 1b\n"
1464-"2:"
1465+"4:\n"
1466+
1467+#ifdef CONFIG_PAX_REFCOUNT
1468+ _ASM_EXTABLE(2b, 4b)
1469+#endif
1470+
1471 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
1472 : "r" (&v->counter), "r" (u), "r" (a)
1473 : "cc");
1474@@ -453,10 +835,13 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1475
1476 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
1477 #define atomic64_inc(v) atomic64_add(1LL, (v))
1478+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
1479 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
1480+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
1481 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
1482 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
1483 #define atomic64_dec(v) atomic64_sub(1LL, (v))
1484+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
1485 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
1486 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
1487 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
1488diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
1489index 75fe66b..ba3dee4 100644
1490--- a/arch/arm/include/asm/cache.h
1491+++ b/arch/arm/include/asm/cache.h
1492@@ -4,8 +4,10 @@
1493 #ifndef __ASMARM_CACHE_H
1494 #define __ASMARM_CACHE_H
1495
1496+#include <linux/const.h>
1497+
1498 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
1499-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1500+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1501
1502 /*
1503 * Memory returned by kmalloc() may be used for DMA, so we must make
1504@@ -24,5 +26,6 @@
1505 #endif
1506
1507 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
1508+#define __read_only __attribute__ ((__section__(".data..read_only")))
1509
1510 #endif
1511diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1512index e1489c5..d418304 100644
1513--- a/arch/arm/include/asm/cacheflush.h
1514+++ b/arch/arm/include/asm/cacheflush.h
1515@@ -116,7 +116,7 @@ struct cpu_cache_fns {
1516 void (*dma_unmap_area)(const void *, size_t, int);
1517
1518 void (*dma_flush_range)(const void *, const void *);
1519-};
1520+} __no_const;
1521
1522 /*
1523 * Select the calling method
1524diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h
1525index 6dcc164..b14d917 100644
1526--- a/arch/arm/include/asm/checksum.h
1527+++ b/arch/arm/include/asm/checksum.h
1528@@ -37,7 +37,19 @@ __wsum
1529 csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
1530
1531 __wsum
1532-csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1533+__csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1534+
1535+static inline __wsum
1536+csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr)
1537+{
1538+ __wsum ret;
1539+ pax_open_userland();
1540+ ret = __csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
1541+ pax_close_userland();
1542+ return ret;
1543+}
1544+
1545+
1546
1547 /*
1548 * Fold a partial checksum without adding pseudo headers
1549diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
1550index 7eb18c1..e38b6d2 100644
1551--- a/arch/arm/include/asm/cmpxchg.h
1552+++ b/arch/arm/include/asm/cmpxchg.h
1553@@ -102,6 +102,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
1554
1555 #define xchg(ptr,x) \
1556 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1557+#define xchg_unchecked(ptr,x) \
1558+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1559
1560 #include <asm-generic/cmpxchg-local.h>
1561
1562diff --git a/arch/arm/include/asm/delay.h b/arch/arm/include/asm/delay.h
1563index 720799f..2f67631 100644
1564--- a/arch/arm/include/asm/delay.h
1565+++ b/arch/arm/include/asm/delay.h
1566@@ -25,9 +25,9 @@ extern struct arm_delay_ops {
1567 void (*const_udelay)(unsigned long);
1568 void (*udelay)(unsigned long);
1569 bool const_clock;
1570-} arm_delay_ops;
1571+} *arm_delay_ops;
1572
1573-#define __delay(n) arm_delay_ops.delay(n)
1574+#define __delay(n) arm_delay_ops->delay(n)
1575
1576 /*
1577 * This function intentionally does not exist; if you see references to
1578@@ -48,8 +48,8 @@ extern void __bad_udelay(void);
1579 * first constant multiplications gets optimized away if the delay is
1580 * a constant)
1581 */
1582-#define __udelay(n) arm_delay_ops.udelay(n)
1583-#define __const_udelay(n) arm_delay_ops.const_udelay(n)
1584+#define __udelay(n) arm_delay_ops->udelay(n)
1585+#define __const_udelay(n) arm_delay_ops->const_udelay(n)
1586
1587 #define udelay(n) \
1588 (__builtin_constant_p(n) ? \
1589diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
1590index 6ddbe44..b5e38b1 100644
1591--- a/arch/arm/include/asm/domain.h
1592+++ b/arch/arm/include/asm/domain.h
1593@@ -48,18 +48,37 @@
1594 * Domain types
1595 */
1596 #define DOMAIN_NOACCESS 0
1597-#define DOMAIN_CLIENT 1
1598 #ifdef CONFIG_CPU_USE_DOMAINS
1599+#define DOMAIN_USERCLIENT 1
1600+#define DOMAIN_KERNELCLIENT 1
1601 #define DOMAIN_MANAGER 3
1602+#define DOMAIN_VECTORS DOMAIN_USER
1603 #else
1604+
1605+#ifdef CONFIG_PAX_KERNEXEC
1606 #define DOMAIN_MANAGER 1
1607+#define DOMAIN_KERNEXEC 3
1608+#else
1609+#define DOMAIN_MANAGER 1
1610+#endif
1611+
1612+#ifdef CONFIG_PAX_MEMORY_UDEREF
1613+#define DOMAIN_USERCLIENT 0
1614+#define DOMAIN_UDEREF 1
1615+#define DOMAIN_VECTORS DOMAIN_KERNEL
1616+#else
1617+#define DOMAIN_USERCLIENT 1
1618+#define DOMAIN_VECTORS DOMAIN_USER
1619+#endif
1620+#define DOMAIN_KERNELCLIENT 1
1621+
1622 #endif
1623
1624 #define domain_val(dom,type) ((type) << (2*(dom)))
1625
1626 #ifndef __ASSEMBLY__
1627
1628-#ifdef CONFIG_CPU_USE_DOMAINS
1629+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
1630 static inline void set_domain(unsigned val)
1631 {
1632 asm volatile(
1633@@ -68,15 +87,7 @@ static inline void set_domain(unsigned val)
1634 isb();
1635 }
1636
1637-#define modify_domain(dom,type) \
1638- do { \
1639- struct thread_info *thread = current_thread_info(); \
1640- unsigned int domain = thread->cpu_domain; \
1641- domain &= ~domain_val(dom, DOMAIN_MANAGER); \
1642- thread->cpu_domain = domain | domain_val(dom, type); \
1643- set_domain(thread->cpu_domain); \
1644- } while (0)
1645-
1646+extern void modify_domain(unsigned int dom, unsigned int type);
1647 #else
1648 static inline void set_domain(unsigned val) { }
1649 static inline void modify_domain(unsigned dom, unsigned type) { }
1650diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
1651index 38050b1..9d90e8b 100644
1652--- a/arch/arm/include/asm/elf.h
1653+++ b/arch/arm/include/asm/elf.h
1654@@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1655 the loader. We need to make sure that it is out of the way of the program
1656 that it will "exec", and that there is sufficient room for the brk. */
1657
1658-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1659+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1660+
1661+#ifdef CONFIG_PAX_ASLR
1662+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
1663+
1664+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1665+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1666+#endif
1667
1668 /* When the program starts, a1 contains a pointer to a function to be
1669 registered with atexit, as per the SVR4 ABI. A value of 0 means we
1670@@ -126,8 +133,4 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1671 extern void elf_set_personality(const struct elf32_hdr *);
1672 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
1673
1674-struct mm_struct;
1675-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1676-#define arch_randomize_brk arch_randomize_brk
1677-
1678 #endif
1679diff --git a/arch/arm/include/asm/fncpy.h b/arch/arm/include/asm/fncpy.h
1680index de53547..52b9a28 100644
1681--- a/arch/arm/include/asm/fncpy.h
1682+++ b/arch/arm/include/asm/fncpy.h
1683@@ -81,7 +81,9 @@
1684 BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) || \
1685 (__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1))); \
1686 \
1687+ pax_open_kernel(); \
1688 memcpy(dest_buf, (void const *)(__funcp_address & ~1), size); \
1689+ pax_close_kernel(); \
1690 flush_icache_range((unsigned long)(dest_buf), \
1691 (unsigned long)(dest_buf) + (size)); \
1692 \
1693diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
1694index e42cf59..7b94b8f 100644
1695--- a/arch/arm/include/asm/futex.h
1696+++ b/arch/arm/include/asm/futex.h
1697@@ -50,6 +50,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1698 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1699 return -EFAULT;
1700
1701+ pax_open_userland();
1702+
1703 smp_mb();
1704 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
1705 "1: ldrex %1, [%4]\n"
1706@@ -65,6 +67,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1707 : "cc", "memory");
1708 smp_mb();
1709
1710+ pax_close_userland();
1711+
1712 *uval = val;
1713 return ret;
1714 }
1715@@ -95,6 +99,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1716 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1717 return -EFAULT;
1718
1719+ pax_open_userland();
1720+
1721 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
1722 "1: " TUSER(ldr) " %1, [%4]\n"
1723 " teq %1, %2\n"
1724@@ -105,6 +111,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1725 : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
1726 : "cc", "memory");
1727
1728+ pax_close_userland();
1729+
1730 *uval = val;
1731 return ret;
1732 }
1733@@ -127,6 +135,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1734 return -EFAULT;
1735
1736 pagefault_disable(); /* implies preempt_disable() */
1737+ pax_open_userland();
1738
1739 switch (op) {
1740 case FUTEX_OP_SET:
1741@@ -148,6 +157,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1742 ret = -ENOSYS;
1743 }
1744
1745+ pax_close_userland();
1746 pagefault_enable(); /* subsumes preempt_enable() */
1747
1748 if (!ret) {
1749diff --git a/arch/arm/include/asm/hardware/gic.h b/arch/arm/include/asm/hardware/gic.h
1750index 4b1ce6c..bea3f73 100644
1751--- a/arch/arm/include/asm/hardware/gic.h
1752+++ b/arch/arm/include/asm/hardware/gic.h
1753@@ -34,9 +34,10 @@
1754
1755 #ifndef __ASSEMBLY__
1756 #include <linux/irqdomain.h>
1757+#include <linux/irq.h>
1758 struct device_node;
1759
1760-extern struct irq_chip gic_arch_extn;
1761+extern irq_chip_no_const gic_arch_extn;
1762
1763 void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
1764 u32 offset, struct device_node *);
1765diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h
1766index 8c5e828..91b99ab 100644
1767--- a/arch/arm/include/asm/highmem.h
1768+++ b/arch/arm/include/asm/highmem.h
1769@@ -41,6 +41,13 @@ extern void kunmap_high(struct page *page);
1770 #endif
1771 #endif
1772
1773+/*
1774+ * Needed to be able to broadcast the TLB invalidation for kmap.
1775+ */
1776+#ifdef CONFIG_ARM_ERRATA_798181
1777+#undef ARCH_NEEDS_KMAP_HIGH_GET
1778+#endif
1779+
1780 #ifdef ARCH_NEEDS_KMAP_HIGH_GET
1781 extern void *kmap_high_get(struct page *page);
1782 #else
1783diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1784index 83eb2f7..ed77159 100644
1785--- a/arch/arm/include/asm/kmap_types.h
1786+++ b/arch/arm/include/asm/kmap_types.h
1787@@ -4,6 +4,6 @@
1788 /*
1789 * This is the "bare minimum". AIO seems to require this.
1790 */
1791-#define KM_TYPE_NR 16
1792+#define KM_TYPE_NR 17
1793
1794 #endif
1795diff --git a/arch/arm/include/asm/mach/dma.h b/arch/arm/include/asm/mach/dma.h
1796index 9e614a1..3302cca 100644
1797--- a/arch/arm/include/asm/mach/dma.h
1798+++ b/arch/arm/include/asm/mach/dma.h
1799@@ -22,7 +22,7 @@ struct dma_ops {
1800 int (*residue)(unsigned int, dma_t *); /* optional */
1801 int (*setspeed)(unsigned int, dma_t *, int); /* optional */
1802 const char *type;
1803-};
1804+} __do_const;
1805
1806 struct dma_struct {
1807 void *addr; /* single DMA address */
1808diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
1809index 2fe141f..192dc01 100644
1810--- a/arch/arm/include/asm/mach/map.h
1811+++ b/arch/arm/include/asm/mach/map.h
1812@@ -27,13 +27,16 @@ struct map_desc {
1813 #define MT_MINICLEAN 6
1814 #define MT_LOW_VECTORS 7
1815 #define MT_HIGH_VECTORS 8
1816-#define MT_MEMORY 9
1817+#define MT_MEMORY_RWX 9
1818 #define MT_ROM 10
1819-#define MT_MEMORY_NONCACHED 11
1820+#define MT_MEMORY_NONCACHED_RX 11
1821 #define MT_MEMORY_DTCM 12
1822 #define MT_MEMORY_ITCM 13
1823 #define MT_MEMORY_SO 14
1824 #define MT_MEMORY_DMA_READY 15
1825+#define MT_MEMORY_RW 16
1826+#define MT_MEMORY_RX 17
1827+#define MT_MEMORY_NONCACHED_RW 18
1828
1829 #ifdef CONFIG_MMU
1830 extern void iotable_init(struct map_desc *, int);
1831diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h
1832index 863a661..a7b85e0 100644
1833--- a/arch/arm/include/asm/mmu_context.h
1834+++ b/arch/arm/include/asm/mmu_context.h
1835@@ -27,6 +27,8 @@ void __check_vmalloc_seq(struct mm_struct *mm);
1836 void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
1837 #define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; })
1838
1839+DECLARE_PER_CPU(atomic64_t, active_asids);
1840+
1841 #else /* !CONFIG_CPU_HAS_ASID */
1842
1843 #ifdef CONFIG_MMU
1844diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
1845index 53426c6..c7baff3 100644
1846--- a/arch/arm/include/asm/outercache.h
1847+++ b/arch/arm/include/asm/outercache.h
1848@@ -35,7 +35,7 @@ struct outer_cache_fns {
1849 #endif
1850 void (*set_debug)(unsigned long);
1851 void (*resume)(void);
1852-};
1853+} __no_const;
1854
1855 #ifdef CONFIG_OUTER_CACHE
1856
1857diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
1858index 812a494..71fc0b6 100644
1859--- a/arch/arm/include/asm/page.h
1860+++ b/arch/arm/include/asm/page.h
1861@@ -114,7 +114,7 @@ struct cpu_user_fns {
1862 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
1863 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
1864 unsigned long vaddr, struct vm_area_struct *vma);
1865-};
1866+} __no_const;
1867
1868 #ifdef MULTI_USER
1869 extern struct cpu_user_fns cpu_user;
1870diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
1871index 943504f..c37a730 100644
1872--- a/arch/arm/include/asm/pgalloc.h
1873+++ b/arch/arm/include/asm/pgalloc.h
1874@@ -17,6 +17,7 @@
1875 #include <asm/processor.h>
1876 #include <asm/cacheflush.h>
1877 #include <asm/tlbflush.h>
1878+#include <asm/system_info.h>
1879
1880 #define check_pgt_cache() do { } while (0)
1881
1882@@ -43,6 +44,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1883 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
1884 }
1885
1886+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1887+{
1888+ pud_populate(mm, pud, pmd);
1889+}
1890+
1891 #else /* !CONFIG_ARM_LPAE */
1892
1893 /*
1894@@ -51,6 +57,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1895 #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
1896 #define pmd_free(mm, pmd) do { } while (0)
1897 #define pud_populate(mm,pmd,pte) BUG()
1898+#define pud_populate_kernel(mm,pmd,pte) BUG()
1899
1900 #endif /* CONFIG_ARM_LPAE */
1901
1902@@ -126,6 +133,19 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
1903 __free_page(pte);
1904 }
1905
1906+static inline void __section_update(pmd_t *pmdp, unsigned long addr, pmdval_t prot)
1907+{
1908+#ifdef CONFIG_ARM_LPAE
1909+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1910+#else
1911+ if (addr & SECTION_SIZE)
1912+ pmdp[1] = __pmd(pmd_val(pmdp[1]) | prot);
1913+ else
1914+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1915+#endif
1916+ flush_pmd_entry(pmdp);
1917+}
1918+
1919 static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
1920 pmdval_t prot)
1921 {
1922@@ -155,7 +175,7 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
1923 static inline void
1924 pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
1925 {
1926- __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE);
1927+ __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE | __supported_pmd_mask);
1928 }
1929 #define pmd_pgtable(pmd) pmd_page(pmd)
1930
1931diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
1932index 5cfba15..f415e1a 100644
1933--- a/arch/arm/include/asm/pgtable-2level-hwdef.h
1934+++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
1935@@ -20,12 +20,15 @@
1936 #define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0)
1937 #define PMD_TYPE_TABLE (_AT(pmdval_t, 1) << 0)
1938 #define PMD_TYPE_SECT (_AT(pmdval_t, 2) << 0)
1939+#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 2) /* v7 */
1940 #define PMD_BIT4 (_AT(pmdval_t, 1) << 4)
1941 #define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5)
1942 #define PMD_PROTECTION (_AT(pmdval_t, 1) << 9) /* v5 */
1943+
1944 /*
1945 * - section
1946 */
1947+#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
1948 #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
1949 #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
1950 #define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */
1951@@ -37,6 +40,7 @@
1952 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 17) /* v6 */
1953 #define PMD_SECT_SUPER (_AT(pmdval_t, 1) << 18) /* v6 */
1954 #define PMD_SECT_AF (_AT(pmdval_t, 0))
1955+#define PMD_SECT_RDONLY (_AT(pmdval_t, 0))
1956
1957 #define PMD_SECT_UNCACHED (_AT(pmdval_t, 0))
1958 #define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE)
1959@@ -66,6 +70,7 @@
1960 * - extended small page/tiny page
1961 */
1962 #define PTE_EXT_XN (_AT(pteval_t, 1) << 0) /* v6 */
1963+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 2) /* v7 */
1964 #define PTE_EXT_AP_MASK (_AT(pteval_t, 3) << 4)
1965 #define PTE_EXT_AP0 (_AT(pteval_t, 1) << 4)
1966 #define PTE_EXT_AP1 (_AT(pteval_t, 2) << 4)
1967diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
1968index f97ee02..07f1be5 100644
1969--- a/arch/arm/include/asm/pgtable-2level.h
1970+++ b/arch/arm/include/asm/pgtable-2level.h
1971@@ -125,6 +125,7 @@
1972 #define L_PTE_XN (_AT(pteval_t, 1) << 9)
1973 #define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */
1974 #define L_PTE_NONE (_AT(pteval_t, 1) << 11)
1975+#define L_PTE_PXN (_AT(pteval_t, 1) << 12) /* v7*/
1976
1977 /*
1978 * These are the memory types, defined to be compatible with
1979diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h
1980index d795282..a43ea90 100644
1981--- a/arch/arm/include/asm/pgtable-3level-hwdef.h
1982+++ b/arch/arm/include/asm/pgtable-3level-hwdef.h
1983@@ -32,15 +32,18 @@
1984 #define PMD_TYPE_SECT (_AT(pmdval_t, 1) << 0)
1985 #define PMD_BIT4 (_AT(pmdval_t, 0))
1986 #define PMD_DOMAIN(x) (_AT(pmdval_t, 0))
1987+#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 59) /* PXNTable */
1988
1989 /*
1990 * - section
1991 */
1992 #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
1993 #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
1994+#define PMD_SECT_RDONLY (_AT(pmdval_t, 1) << 7)
1995 #define PMD_SECT_S (_AT(pmdval_t, 3) << 8)
1996 #define PMD_SECT_AF (_AT(pmdval_t, 1) << 10)
1997 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 11)
1998+#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 53)
1999 #define PMD_SECT_XN (_AT(pmdval_t, 1) << 54)
2000 #define PMD_SECT_AP_WRITE (_AT(pmdval_t, 0))
2001 #define PMD_SECT_AP_READ (_AT(pmdval_t, 0))
2002@@ -66,6 +69,7 @@
2003 #define PTE_EXT_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
2004 #define PTE_EXT_AF (_AT(pteval_t, 1) << 10) /* Access Flag */
2005 #define PTE_EXT_NG (_AT(pteval_t, 1) << 11) /* nG */
2006+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 53) /* PXN */
2007 #define PTE_EXT_XN (_AT(pteval_t, 1) << 54) /* XN */
2008
2009 /*
2010diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
2011index a3f3792..7b932a6 100644
2012--- a/arch/arm/include/asm/pgtable-3level.h
2013+++ b/arch/arm/include/asm/pgtable-3level.h
2014@@ -74,6 +74,7 @@
2015 #define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */
2016 #define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
2017 #define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */
2018+#define L_PTE_PXN (_AT(pteval_t, 1) << 53) /* PXN */
2019 #define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
2020 #define L_PTE_DIRTY (_AT(pteval_t, 1) << 55) /* unused */
2021 #define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56) /* unused */
2022@@ -82,6 +83,7 @@
2023 /*
2024 * To be used in assembly code with the upper page attributes.
2025 */
2026+#define L_PTE_PXN_HIGH (1 << (53 - 32))
2027 #define L_PTE_XN_HIGH (1 << (54 - 32))
2028 #define L_PTE_DIRTY_HIGH (1 << (55 - 32))
2029
2030diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
2031index c094749..a6ff605 100644
2032--- a/arch/arm/include/asm/pgtable.h
2033+++ b/arch/arm/include/asm/pgtable.h
2034@@ -30,6 +30,9 @@
2035 #include <asm/pgtable-2level.h>
2036 #endif
2037
2038+#define ktla_ktva(addr) (addr)
2039+#define ktva_ktla(addr) (addr)
2040+
2041 /*
2042 * Just any arbitrary offset to the start of the vmalloc VM area: the
2043 * current 8MB value just means that there will be a 8MB "hole" after the
2044@@ -45,6 +48,9 @@
2045 #define LIBRARY_TEXT_START 0x0c000000
2046
2047 #ifndef __ASSEMBLY__
2048+extern pteval_t __supported_pte_mask;
2049+extern pmdval_t __supported_pmd_mask;
2050+
2051 extern void __pte_error(const char *file, int line, pte_t);
2052 extern void __pmd_error(const char *file, int line, pmd_t);
2053 extern void __pgd_error(const char *file, int line, pgd_t);
2054@@ -53,6 +59,50 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2055 #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
2056 #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd)
2057
2058+#define __HAVE_ARCH_PAX_OPEN_KERNEL
2059+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
2060+
2061+#ifdef CONFIG_PAX_KERNEXEC
2062+#include <asm/domain.h>
2063+#include <linux/thread_info.h>
2064+#include <linux/preempt.h>
2065+#endif
2066+
2067+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2068+static inline int test_domain(int domain, int domaintype)
2069+{
2070+ return ((current_thread_info()->cpu_domain) & domain_val(domain, 3)) == domain_val(domain, domaintype);
2071+}
2072+#endif
2073+
2074+#ifdef CONFIG_PAX_KERNEXEC
2075+static inline unsigned long pax_open_kernel(void) {
2076+#ifdef CONFIG_ARM_LPAE
2077+ /* TODO */
2078+#else
2079+ preempt_disable();
2080+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC));
2081+ modify_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC);
2082+#endif
2083+ return 0;
2084+}
2085+
2086+static inline unsigned long pax_close_kernel(void) {
2087+#ifdef CONFIG_ARM_LPAE
2088+ /* TODO */
2089+#else
2090+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_MANAGER));
2091+ /* DOMAIN_MANAGER = "client" under KERNEXEC */
2092+ modify_domain(DOMAIN_KERNEL, DOMAIN_MANAGER);
2093+ preempt_enable_no_resched();
2094+#endif
2095+ return 0;
2096+}
2097+#else
2098+static inline unsigned long pax_open_kernel(void) { return 0; }
2099+static inline unsigned long pax_close_kernel(void) { return 0; }
2100+#endif
2101+
2102 /*
2103 * This is the lowest virtual address we can permit any user space
2104 * mapping to be mapped at. This is particularly important for
2105@@ -63,8 +113,8 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2106 /*
2107 * The pgprot_* and protection_map entries will be fixed up in runtime
2108 * to include the cachable and bufferable bits based on memory policy,
2109- * as well as any architecture dependent bits like global/ASID and SMP
2110- * shared mapping bits.
2111+ * as well as any architecture dependent bits like global/ASID, PXN,
2112+ * and SMP shared mapping bits.
2113 */
2114 #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
2115
2116@@ -241,7 +291,7 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
2117 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
2118 {
2119 const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER |
2120- L_PTE_NONE | L_PTE_VALID;
2121+ L_PTE_NONE | L_PTE_VALID | __supported_pte_mask;
2122 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
2123 return pte;
2124 }
2125diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h
2126index f3628fb..a0672dd 100644
2127--- a/arch/arm/include/asm/proc-fns.h
2128+++ b/arch/arm/include/asm/proc-fns.h
2129@@ -75,7 +75,7 @@ extern struct processor {
2130 unsigned int suspend_size;
2131 void (*do_suspend)(void *);
2132 void (*do_resume)(void *);
2133-} processor;
2134+} __do_const processor;
2135
2136 #ifndef MULTI_CPU
2137 extern void cpu_proc_init(void);
2138diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h
2139index 06e7d50..8a8e251 100644
2140--- a/arch/arm/include/asm/processor.h
2141+++ b/arch/arm/include/asm/processor.h
2142@@ -65,9 +65,8 @@ struct thread_struct {
2143 regs->ARM_cpsr |= PSR_ENDSTATE; \
2144 regs->ARM_pc = pc & ~1; /* pc */ \
2145 regs->ARM_sp = sp; /* sp */ \
2146- regs->ARM_r2 = stack[2]; /* r2 (envp) */ \
2147- regs->ARM_r1 = stack[1]; /* r1 (argv) */ \
2148- regs->ARM_r0 = stack[0]; /* r0 (argc) */ \
2149+ /* r2 (envp), r1 (argv), r0 (argc) */ \
2150+ (void)copy_from_user(&regs->ARM_r0, (const char __user *)stack, 3 * sizeof(unsigned long)); \
2151 nommu_start_thread(regs); \
2152 })
2153
2154diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
2155index d3a22be..3a69ad5 100644
2156--- a/arch/arm/include/asm/smp.h
2157+++ b/arch/arm/include/asm/smp.h
2158@@ -107,7 +107,7 @@ struct smp_operations {
2159 int (*cpu_disable)(unsigned int cpu);
2160 #endif
2161 #endif
2162-};
2163+} __no_const;
2164
2165 /*
2166 * set platform specific SMP operations
2167diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
2168index cddda1f..ff357f7 100644
2169--- a/arch/arm/include/asm/thread_info.h
2170+++ b/arch/arm/include/asm/thread_info.h
2171@@ -77,9 +77,9 @@ struct thread_info {
2172 .flags = 0, \
2173 .preempt_count = INIT_PREEMPT_COUNT, \
2174 .addr_limit = KERNEL_DS, \
2175- .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2176- domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2177- domain_val(DOMAIN_IO, DOMAIN_CLIENT), \
2178+ .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) | \
2179+ domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT) | \
2180+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT), \
2181 .restart_block = { \
2182 .fn = do_no_restart_syscall, \
2183 }, \
2184@@ -152,6 +152,12 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2185 #define TIF_SYSCALL_AUDIT 9
2186 #define TIF_SYSCALL_TRACEPOINT 10
2187 #define TIF_SECCOMP 11 /* seccomp syscall filtering active */
2188+
2189+/* within 8 bits of TIF_SYSCALL_TRACE
2190+ * to meet flexible second operand requirements
2191+ */
2192+#define TIF_GRSEC_SETXID 12
2193+
2194 #define TIF_USING_IWMMXT 17
2195 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
2196 #define TIF_RESTORE_SIGMASK 20
2197@@ -165,10 +171,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2198 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
2199 #define _TIF_SECCOMP (1 << TIF_SECCOMP)
2200 #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
2201+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
2202
2203 /* Checks for any syscall work in entry-common.S */
2204 #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
2205- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
2206+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | _TIF_GRSEC_SETXID)
2207
2208 /*
2209 * Change these and you break ASM code in entry-common.S
2210diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
2211index 6e924d3..a9f3ddf 100644
2212--- a/arch/arm/include/asm/tlbflush.h
2213+++ b/arch/arm/include/asm/tlbflush.h
2214@@ -430,6 +430,21 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
2215 }
2216 }
2217
2218+#ifdef CONFIG_ARM_ERRATA_798181
2219+static inline void dummy_flush_tlb_a15_erratum(void)
2220+{
2221+ /*
2222+ * Dummy TLBIMVAIS. Using the unmapped address 0 and ASID 0.
2223+ */
2224+ asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (0));
2225+ dsb();
2226+}
2227+#else
2228+static inline void dummy_flush_tlb_a15_erratum(void)
2229+{
2230+}
2231+#endif
2232+
2233 /*
2234 * flush_pmd_entry
2235 *
2236diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
2237index 7e1f760..752fcb7 100644
2238--- a/arch/arm/include/asm/uaccess.h
2239+++ b/arch/arm/include/asm/uaccess.h
2240@@ -18,6 +18,7 @@
2241 #include <asm/domain.h>
2242 #include <asm/unified.h>
2243 #include <asm/compiler.h>
2244+#include <asm/pgtable.h>
2245
2246 #define VERIFY_READ 0
2247 #define VERIFY_WRITE 1
2248@@ -60,10 +61,34 @@ extern int __put_user_bad(void);
2249 #define USER_DS TASK_SIZE
2250 #define get_fs() (current_thread_info()->addr_limit)
2251
2252+static inline void pax_open_userland(void)
2253+{
2254+
2255+#ifdef CONFIG_PAX_MEMORY_UDEREF
2256+ if (get_fs() == USER_DS) {
2257+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_UDEREF));
2258+ modify_domain(DOMAIN_USER, DOMAIN_UDEREF);
2259+ }
2260+#endif
2261+
2262+}
2263+
2264+static inline void pax_close_userland(void)
2265+{
2266+
2267+#ifdef CONFIG_PAX_MEMORY_UDEREF
2268+ if (get_fs() == USER_DS) {
2269+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_NOACCESS));
2270+ modify_domain(DOMAIN_USER, DOMAIN_NOACCESS);
2271+ }
2272+#endif
2273+
2274+}
2275+
2276 static inline void set_fs(mm_segment_t fs)
2277 {
2278 current_thread_info()->addr_limit = fs;
2279- modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
2280+ modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_KERNELCLIENT : DOMAIN_MANAGER);
2281 }
2282
2283 #define segment_eq(a,b) ((a) == (b))
2284@@ -143,8 +168,12 @@ extern int __get_user_4(void *);
2285
2286 #define get_user(x,p) \
2287 ({ \
2288+ int __e; \
2289 might_fault(); \
2290- __get_user_check(x,p); \
2291+ pax_open_userland(); \
2292+ __e = __get_user_check(x,p); \
2293+ pax_close_userland(); \
2294+ __e; \
2295 })
2296
2297 extern int __put_user_1(void *, unsigned int);
2298@@ -188,8 +217,12 @@ extern int __put_user_8(void *, unsigned long long);
2299
2300 #define put_user(x,p) \
2301 ({ \
2302+ int __e; \
2303 might_fault(); \
2304- __put_user_check(x,p); \
2305+ pax_open_userland(); \
2306+ __e = __put_user_check(x,p); \
2307+ pax_close_userland(); \
2308+ __e; \
2309 })
2310
2311 #else /* CONFIG_MMU */
2312@@ -230,13 +263,17 @@ static inline void set_fs(mm_segment_t fs)
2313 #define __get_user(x,ptr) \
2314 ({ \
2315 long __gu_err = 0; \
2316+ pax_open_userland(); \
2317 __get_user_err((x),(ptr),__gu_err); \
2318+ pax_close_userland(); \
2319 __gu_err; \
2320 })
2321
2322 #define __get_user_error(x,ptr,err) \
2323 ({ \
2324+ pax_open_userland(); \
2325 __get_user_err((x),(ptr),err); \
2326+ pax_close_userland(); \
2327 (void) 0; \
2328 })
2329
2330@@ -312,13 +349,17 @@ do { \
2331 #define __put_user(x,ptr) \
2332 ({ \
2333 long __pu_err = 0; \
2334+ pax_open_userland(); \
2335 __put_user_err((x),(ptr),__pu_err); \
2336+ pax_close_userland(); \
2337 __pu_err; \
2338 })
2339
2340 #define __put_user_error(x,ptr,err) \
2341 ({ \
2342+ pax_open_userland(); \
2343 __put_user_err((x),(ptr),err); \
2344+ pax_close_userland(); \
2345 (void) 0; \
2346 })
2347
2348@@ -418,11 +459,44 @@ do { \
2349
2350
2351 #ifdef CONFIG_MMU
2352-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
2353-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
2354+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
2355+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
2356+
2357+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
2358+{
2359+ unsigned long ret;
2360+
2361+ check_object_size(to, n, false);
2362+ pax_open_userland();
2363+ ret = ___copy_from_user(to, from, n);
2364+ pax_close_userland();
2365+ return ret;
2366+}
2367+
2368+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
2369+{
2370+ unsigned long ret;
2371+
2372+ check_object_size(from, n, true);
2373+ pax_open_userland();
2374+ ret = ___copy_to_user(to, from, n);
2375+ pax_close_userland();
2376+ return ret;
2377+}
2378+
2379 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
2380-extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
2381+extern unsigned long __must_check ___clear_user(void __user *addr, unsigned long n);
2382 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
2383+
2384+static inline unsigned long __must_check __clear_user(void __user *addr, unsigned long n)
2385+{
2386+ unsigned long ret;
2387+ pax_open_userland();
2388+ ret = ___clear_user(addr, n);
2389+ pax_close_userland();
2390+ return ret;
2391+}
2392+
2393 #else
2394 #define __copy_from_user(to,from,n) (memcpy(to, (void __force *)from, n), 0)
2395 #define __copy_to_user(to,from,n) (memcpy((void __force *)to, from, n), 0)
2396@@ -431,6 +505,9 @@ extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned l
2397
2398 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2399 {
2400+ if ((long)n < 0)
2401+ return n;
2402+
2403 if (access_ok(VERIFY_READ, from, n))
2404 n = __copy_from_user(to, from, n);
2405 else /* security hole - plug it */
2406@@ -440,6 +517,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
2407
2408 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2409 {
2410+ if ((long)n < 0)
2411+ return n;
2412+
2413 if (access_ok(VERIFY_WRITE, to, n))
2414 n = __copy_to_user(to, from, n);
2415 return n;
2416diff --git a/arch/arm/include/uapi/asm/ptrace.h b/arch/arm/include/uapi/asm/ptrace.h
2417index 96ee092..37f1844 100644
2418--- a/arch/arm/include/uapi/asm/ptrace.h
2419+++ b/arch/arm/include/uapi/asm/ptrace.h
2420@@ -73,7 +73,7 @@
2421 * ARMv7 groups of PSR bits
2422 */
2423 #define APSR_MASK 0xf80f0000 /* N, Z, C, V, Q and GE flags */
2424-#define PSR_ISET_MASK 0x01000010 /* ISA state (J, T) mask */
2425+#define PSR_ISET_MASK 0x01000020 /* ISA state (J, T) mask */
2426 #define PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
2427 #define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */
2428
2429diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
2430index 60d3b73..d27ee09 100644
2431--- a/arch/arm/kernel/armksyms.c
2432+++ b/arch/arm/kernel/armksyms.c
2433@@ -89,9 +89,9 @@ EXPORT_SYMBOL(__memzero);
2434 #ifdef CONFIG_MMU
2435 EXPORT_SYMBOL(copy_page);
2436
2437-EXPORT_SYMBOL(__copy_from_user);
2438-EXPORT_SYMBOL(__copy_to_user);
2439-EXPORT_SYMBOL(__clear_user);
2440+EXPORT_SYMBOL(___copy_from_user);
2441+EXPORT_SYMBOL(___copy_to_user);
2442+EXPORT_SYMBOL(___clear_user);
2443
2444 EXPORT_SYMBOL(__get_user_1);
2445 EXPORT_SYMBOL(__get_user_2);
2446diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
2447index 0f82098..3dbd3ee 100644
2448--- a/arch/arm/kernel/entry-armv.S
2449+++ b/arch/arm/kernel/entry-armv.S
2450@@ -47,6 +47,87 @@
2451 9997:
2452 .endm
2453
2454+ .macro pax_enter_kernel
2455+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2456+ @ make aligned space for saved DACR
2457+ sub sp, sp, #8
2458+ @ save regs
2459+ stmdb sp!, {r1, r2}
2460+ @ read DACR from cpu_domain into r1
2461+ mov r2, sp
2462+ @ assume 8K pages, since we have to split the immediate in two
2463+ bic r2, r2, #(0x1fc0)
2464+ bic r2, r2, #(0x3f)
2465+ ldr r1, [r2, #TI_CPU_DOMAIN]
2466+ @ store old DACR on stack
2467+ str r1, [sp, #8]
2468+#ifdef CONFIG_PAX_KERNEXEC
2469+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2470+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2471+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2472+#endif
2473+#ifdef CONFIG_PAX_MEMORY_UDEREF
2474+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2475+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2476+#endif
2477+ @ write r1 to current_thread_info()->cpu_domain
2478+ str r1, [r2, #TI_CPU_DOMAIN]
2479+ @ write r1 to DACR
2480+ mcr p15, 0, r1, c3, c0, 0
2481+ @ instruction sync
2482+ instr_sync
2483+ @ restore regs
2484+ ldmia sp!, {r1, r2}
2485+#endif
2486+ .endm
2487+
2488+ .macro pax_open_userland
2489+#ifdef CONFIG_PAX_MEMORY_UDEREF
2490+ @ save regs
2491+ stmdb sp!, {r0, r1}
2492+ @ read DACR from cpu_domain into r1
2493+ mov r0, sp
2494+ @ assume 8K pages, since we have to split the immediate in two
2495+ bic r0, r0, #(0x1fc0)
2496+ bic r0, r0, #(0x3f)
2497+ ldr r1, [r0, #TI_CPU_DOMAIN]
2498+ @ set current DOMAIN_USER to DOMAIN_CLIENT
2499+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2500+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2501+ @ write r1 to current_thread_info()->cpu_domain
2502+ str r1, [r0, #TI_CPU_DOMAIN]
2503+ @ write r1 to DACR
2504+ mcr p15, 0, r1, c3, c0, 0
2505+ @ instruction sync
2506+ instr_sync
2507+ @ restore regs
2508+ ldmia sp!, {r0, r1}
2509+#endif
2510+ .endm
2511+
2512+ .macro pax_close_userland
2513+#ifdef CONFIG_PAX_MEMORY_UDEREF
2514+ @ save regs
2515+ stmdb sp!, {r0, r1}
2516+ @ read DACR from cpu_domain into r1
2517+ mov r0, sp
2518+ @ assume 8K pages, since we have to split the immediate in two
2519+ bic r0, r0, #(0x1fc0)
2520+ bic r0, r0, #(0x3f)
2521+ ldr r1, [r0, #TI_CPU_DOMAIN]
2522+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2523+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2524+ @ write r1 to current_thread_info()->cpu_domain
2525+ str r1, [r0, #TI_CPU_DOMAIN]
2526+ @ write r1 to DACR
2527+ mcr p15, 0, r1, c3, c0, 0
2528+ @ instruction sync
2529+ instr_sync
2530+ @ restore regs
2531+ ldmia sp!, {r0, r1}
2532+#endif
2533+ .endm
2534+
2535 .macro pabt_helper
2536 @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
2537 #ifdef MULTI_PABORT
2538@@ -89,11 +170,15 @@
2539 * Invalid mode handlers
2540 */
2541 .macro inv_entry, reason
2542+
2543+ pax_enter_kernel
2544+
2545 sub sp, sp, #S_FRAME_SIZE
2546 ARM( stmib sp, {r1 - lr} )
2547 THUMB( stmia sp, {r0 - r12} )
2548 THUMB( str sp, [sp, #S_SP] )
2549 THUMB( str lr, [sp, #S_LR] )
2550+
2551 mov r1, #\reason
2552 .endm
2553
2554@@ -149,7 +234,11 @@ ENDPROC(__und_invalid)
2555 .macro svc_entry, stack_hole=0
2556 UNWIND(.fnstart )
2557 UNWIND(.save {r0 - pc} )
2558+
2559+ pax_enter_kernel
2560+
2561 sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2562+
2563 #ifdef CONFIG_THUMB2_KERNEL
2564 SPFIX( str r0, [sp] ) @ temporarily saved
2565 SPFIX( mov r0, sp )
2566@@ -164,7 +253,12 @@ ENDPROC(__und_invalid)
2567 ldmia r0, {r3 - r5}
2568 add r7, sp, #S_SP - 4 @ here for interlock avoidance
2569 mov r6, #-1 @ "" "" "" ""
2570+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2571+ @ offset sp by 8 as done in pax_enter_kernel
2572+ add r2, sp, #(S_FRAME_SIZE + \stack_hole + 4)
2573+#else
2574 add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2575+#endif
2576 SPFIX( addeq r2, r2, #4 )
2577 str r3, [sp, #-4]! @ save the "real" r0 copied
2578 @ from the exception stack
2579@@ -359,6 +453,9 @@ ENDPROC(__pabt_svc)
2580 .macro usr_entry
2581 UNWIND(.fnstart )
2582 UNWIND(.cantunwind ) @ don't unwind the user space
2583+
2584+ pax_enter_kernel_user
2585+
2586 sub sp, sp, #S_FRAME_SIZE
2587 ARM( stmib sp, {r1 - r12} )
2588 THUMB( stmia sp, {r0 - r12} )
2589@@ -456,7 +553,9 @@ __und_usr:
2590 tst r3, #PSR_T_BIT @ Thumb mode?
2591 bne __und_usr_thumb
2592 sub r4, r2, #4 @ ARM instr at LR - 4
2593+ pax_open_userland
2594 1: ldrt r0, [r4]
2595+ pax_close_userland
2596 #ifdef CONFIG_CPU_ENDIAN_BE8
2597 rev r0, r0 @ little endian instruction
2598 #endif
2599@@ -491,10 +590,14 @@ __und_usr_thumb:
2600 */
2601 .arch armv6t2
2602 #endif
2603+ pax_open_userland
2604 2: ldrht r5, [r4]
2605+ pax_close_userland
2606 cmp r5, #0xe800 @ 32bit instruction if xx != 0
2607 blo __und_usr_fault_16 @ 16bit undefined instruction
2608+ pax_open_userland
2609 3: ldrht r0, [r2]
2610+ pax_close_userland
2611 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
2612 str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
2613 orr r0, r0, r5, lsl #16
2614@@ -733,7 +836,7 @@ ENTRY(__switch_to)
2615 THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack
2616 THUMB( str sp, [ip], #4 )
2617 THUMB( str lr, [ip], #4 )
2618-#ifdef CONFIG_CPU_USE_DOMAINS
2619+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC)
2620 ldr r6, [r2, #TI_CPU_DOMAIN]
2621 #endif
2622 set_tls r3, r4, r5
2623@@ -742,7 +845,7 @@ ENTRY(__switch_to)
2624 ldr r8, =__stack_chk_guard
2625 ldr r7, [r7, #TSK_STACK_CANARY]
2626 #endif
2627-#ifdef CONFIG_CPU_USE_DOMAINS
2628+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC)
2629 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
2630 #endif
2631 mov r5, r0
2632diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
2633index a6c301e..908821b 100644
2634--- a/arch/arm/kernel/entry-common.S
2635+++ b/arch/arm/kernel/entry-common.S
2636@@ -10,18 +10,46 @@
2637
2638 #include <asm/unistd.h>
2639 #include <asm/ftrace.h>
2640+#include <asm/domain.h>
2641 #include <asm/unwind.h>
2642
2643+#include "entry-header.S"
2644+
2645 #ifdef CONFIG_NEED_RET_TO_USER
2646 #include <mach/entry-macro.S>
2647 #else
2648 .macro arch_ret_to_user, tmp1, tmp2
2649+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2650+ @ save regs
2651+ stmdb sp!, {r1, r2}
2652+ @ read DACR from cpu_domain into r1
2653+ mov r2, sp
2654+ @ assume 8K pages, since we have to split the immediate in two
2655+ bic r2, r2, #(0x1fc0)
2656+ bic r2, r2, #(0x3f)
2657+ ldr r1, [r2, #TI_CPU_DOMAIN]
2658+#ifdef CONFIG_PAX_KERNEXEC
2659+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2660+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2661+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2662+#endif
2663+#ifdef CONFIG_PAX_MEMORY_UDEREF
2664+ @ set current DOMAIN_USER to DOMAIN_UDEREF
2665+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2666+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2667+#endif
2668+ @ write r1 to current_thread_info()->cpu_domain
2669+ str r1, [r2, #TI_CPU_DOMAIN]
2670+ @ write r1 to DACR
2671+ mcr p15, 0, r1, c3, c0, 0
2672+ @ instruction sync
2673+ instr_sync
2674+ @ restore regs
2675+ ldmia sp!, {r1, r2}
2676+#endif
2677 .endm
2678 #endif
2679
2680-#include "entry-header.S"
2681-
2682-
2683 .align 5
2684 /*
2685 * This is the fast syscall return path. We do as little as
2686@@ -339,6 +367,7 @@ ENDPROC(ftrace_stub)
2687
2688 .align 5
2689 ENTRY(vector_swi)
2690+
2691 sub sp, sp, #S_FRAME_SIZE
2692 stmia sp, {r0 - r12} @ Calling r0 - r12
2693 ARM( add r8, sp, #S_PC )
2694@@ -388,6 +417,12 @@ ENTRY(vector_swi)
2695 ldr scno, [lr, #-4] @ get SWI instruction
2696 #endif
2697
2698+ /*
2699+ * do this here to avoid a performance hit of wrapping the code above
2700+ * that directly dereferences userland to parse the SWI instruction
2701+ */
2702+ pax_enter_kernel_user
2703+
2704 #ifdef CONFIG_ALIGNMENT_TRAP
2705 ldr ip, __cr_alignment
2706 ldr ip, [ip]
2707diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
2708index 9a8531e..812e287 100644
2709--- a/arch/arm/kernel/entry-header.S
2710+++ b/arch/arm/kernel/entry-header.S
2711@@ -73,9 +73,66 @@
2712 msr cpsr_c, \rtemp @ switch back to the SVC mode
2713 .endm
2714
2715+ .macro pax_enter_kernel_user
2716+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2717+ @ save regs
2718+ stmdb sp!, {r0, r1}
2719+ @ read DACR from cpu_domain into r1
2720+ mov r0, sp
2721+ @ assume 8K pages, since we have to split the immediate in two
2722+ bic r0, r0, #(0x1fc0)
2723+ bic r0, r0, #(0x3f)
2724+ ldr r1, [r0, #TI_CPU_DOMAIN]
2725+#ifdef CONFIG_PAX_MEMORY_UDEREF
2726+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2727+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2728+#endif
2729+#ifdef CONFIG_PAX_KERNEXEC
2730+ @ set current DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2731+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2732+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2733+#endif
2734+ @ write r1 to current_thread_info()->cpu_domain
2735+ str r1, [r0, #TI_CPU_DOMAIN]
2736+ @ write r1 to DACR
2737+ mcr p15, 0, r1, c3, c0, 0
2738+ @ instruction sync
2739+ instr_sync
2740+ @ restore regs
2741+ ldmia sp!, {r0, r1}
2742+#endif
2743+ .endm
2744+
2745+ .macro pax_exit_kernel
2746+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2747+ @ save regs
2748+ stmdb sp!, {r0, r1}
2749+ @ read old DACR from stack into r1
2750+ ldr r1, [sp, #(8 + S_SP)]
2751+ sub r1, r1, #8
2752+ ldr r1, [r1]
2753+
2754+ @ write r1 to current_thread_info()->cpu_domain
2755+ mov r0, sp
2756+ @ assume 8K pages, since we have to split the immediate in two
2757+ bic r0, r0, #(0x1fc0)
2758+ bic r0, r0, #(0x3f)
2759+ str r1, [r0, #TI_CPU_DOMAIN]
2760+ @ write r1 to DACR
2761+ mcr p15, 0, r1, c3, c0, 0
2762+ @ instruction sync
2763+ instr_sync
2764+ @ restore regs
2765+ ldmia sp!, {r0, r1}
2766+#endif
2767+ .endm
2768+
2769 #ifndef CONFIG_THUMB2_KERNEL
2770 .macro svc_exit, rpsr
2771 msr spsr_cxsf, \rpsr
2772+
2773+ pax_exit_kernel
2774+
2775 #if defined(CONFIG_CPU_V6)
2776 ldr r0, [sp]
2777 strex r1, r2, [sp] @ clear the exclusive monitor
2778@@ -121,6 +178,9 @@
2779 .endm
2780 #else /* CONFIG_THUMB2_KERNEL */
2781 .macro svc_exit, rpsr
2782+
2783+ pax_exit_kernel
2784+
2785 ldr lr, [sp, #S_SP] @ top of the stack
2786 ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
2787 clrex @ clear the exclusive monitor
2788diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
2789index 2adda11..7fbe958 100644
2790--- a/arch/arm/kernel/fiq.c
2791+++ b/arch/arm/kernel/fiq.c
2792@@ -82,7 +82,9 @@ void set_fiq_handler(void *start, unsigned int length)
2793 #if defined(CONFIG_CPU_USE_DOMAINS)
2794 memcpy((void *)0xffff001c, start, length);
2795 #else
2796+ pax_open_kernel();
2797 memcpy(vectors_page + 0x1c, start, length);
2798+ pax_close_kernel();
2799 #endif
2800 flush_icache_range(0xffff001c, 0xffff001c + length);
2801 if (!vectors_high())
2802diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
2803index e0eb9a1..caee108 100644
2804--- a/arch/arm/kernel/head.S
2805+++ b/arch/arm/kernel/head.S
2806@@ -52,7 +52,9 @@
2807 .equ swapper_pg_dir, KERNEL_RAM_VADDR - PG_DIR_SIZE
2808
2809 .macro pgtbl, rd, phys
2810- add \rd, \phys, #TEXT_OFFSET - PG_DIR_SIZE
2811+ mov \rd, #TEXT_OFFSET
2812+ sub \rd, #PG_DIR_SIZE
2813+ add \rd, \rd, \phys
2814 .endm
2815
2816 /*
2817@@ -267,7 +269,7 @@ __create_page_tables:
2818 addne r6, r6, #1 << SECTION_SHIFT
2819 strne r6, [r3]
2820
2821-#if defined(CONFIG_LPAE) && defined(CONFIG_CPU_ENDIAN_BE8)
2822+#if defined(CONFIG_ARM_LPAE) && defined(CONFIG_CPU_ENDIAN_BE8)
2823 sub r4, r4, #4 @ Fixup page table pointer
2824 @ for 64-bit descriptors
2825 #endif
2826@@ -434,7 +436,7 @@ __enable_mmu:
2827 mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2828 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2829 domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
2830- domain_val(DOMAIN_IO, DOMAIN_CLIENT))
2831+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT))
2832 mcr p15, 0, r5, c3, c0, 0 @ load domain access register
2833 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
2834 #endif
2835diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
2836index 5ff2e77..556d030 100644
2837--- a/arch/arm/kernel/hw_breakpoint.c
2838+++ b/arch/arm/kernel/hw_breakpoint.c
2839@@ -1011,7 +1011,7 @@ static int __cpuinit dbg_reset_notify(struct notifier_block *self,
2840 return NOTIFY_OK;
2841 }
2842
2843-static struct notifier_block __cpuinitdata dbg_reset_nb = {
2844+static struct notifier_block dbg_reset_nb = {
2845 .notifier_call = dbg_reset_notify,
2846 };
2847
2848diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
2849index 1e9be5d..03edbc2 100644
2850--- a/arch/arm/kernel/module.c
2851+++ b/arch/arm/kernel/module.c
2852@@ -37,12 +37,37 @@
2853 #endif
2854
2855 #ifdef CONFIG_MMU
2856-void *module_alloc(unsigned long size)
2857+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
2858 {
2859+ if (!size || PAGE_ALIGN(size) > MODULES_END - MODULES_VADDR)
2860+ return NULL;
2861 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
2862- GFP_KERNEL, PAGE_KERNEL_EXEC, -1,
2863+ GFP_KERNEL, prot, -1,
2864 __builtin_return_address(0));
2865 }
2866+
2867+void *module_alloc(unsigned long size)
2868+{
2869+
2870+#ifdef CONFIG_PAX_KERNEXEC
2871+ return __module_alloc(size, PAGE_KERNEL);
2872+#else
2873+ return __module_alloc(size, PAGE_KERNEL_EXEC);
2874+#endif
2875+
2876+}
2877+
2878+#ifdef CONFIG_PAX_KERNEXEC
2879+void module_free_exec(struct module *mod, void *module_region)
2880+{
2881+ module_free(mod, module_region);
2882+}
2883+
2884+void *module_alloc_exec(unsigned long size)
2885+{
2886+ return __module_alloc(size, PAGE_KERNEL_EXEC);
2887+}
2888+#endif
2889 #endif
2890
2891 int
2892diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
2893index 07314af..c46655c 100644
2894--- a/arch/arm/kernel/patch.c
2895+++ b/arch/arm/kernel/patch.c
2896@@ -18,6 +18,7 @@ void __kprobes __patch_text(void *addr, unsigned int insn)
2897 bool thumb2 = IS_ENABLED(CONFIG_THUMB2_KERNEL);
2898 int size;
2899
2900+ pax_open_kernel();
2901 if (thumb2 && __opcode_is_thumb16(insn)) {
2902 *(u16 *)addr = __opcode_to_mem_thumb16(insn);
2903 size = sizeof(u16);
2904@@ -39,6 +40,7 @@ void __kprobes __patch_text(void *addr, unsigned int insn)
2905 *(u32 *)addr = insn;
2906 size = sizeof(u32);
2907 }
2908+ pax_close_kernel();
2909
2910 flush_icache_range((uintptr_t)(addr),
2911 (uintptr_t)(addr) + size);
2912diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
2913index 5f66206..dce492f 100644
2914--- a/arch/arm/kernel/perf_event_cpu.c
2915+++ b/arch/arm/kernel/perf_event_cpu.c
2916@@ -171,7 +171,7 @@ static int __cpuinit cpu_pmu_notify(struct notifier_block *b,
2917 return NOTIFY_OK;
2918 }
2919
2920-static struct notifier_block __cpuinitdata cpu_pmu_hotplug_notifier = {
2921+static struct notifier_block cpu_pmu_hotplug_notifier = {
2922 .notifier_call = cpu_pmu_notify,
2923 };
2924
2925diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
2926index c6dec5f..e0fddd1 100644
2927--- a/arch/arm/kernel/process.c
2928+++ b/arch/arm/kernel/process.c
2929@@ -28,7 +28,6 @@
2930 #include <linux/tick.h>
2931 #include <linux/utsname.h>
2932 #include <linux/uaccess.h>
2933-#include <linux/random.h>
2934 #include <linux/hw_breakpoint.h>
2935 #include <linux/cpuidle.h>
2936 #include <linux/leds.h>
2937@@ -256,9 +255,10 @@ void machine_power_off(void)
2938 machine_shutdown();
2939 if (pm_power_off)
2940 pm_power_off();
2941+ BUG();
2942 }
2943
2944-void machine_restart(char *cmd)
2945+__noreturn void machine_restart(char *cmd)
2946 {
2947 machine_shutdown();
2948
2949@@ -283,8 +283,8 @@ void __show_regs(struct pt_regs *regs)
2950 init_utsname()->release,
2951 (int)strcspn(init_utsname()->version, " "),
2952 init_utsname()->version);
2953- print_symbol("PC is at %s\n", instruction_pointer(regs));
2954- print_symbol("LR is at %s\n", regs->ARM_lr);
2955+ printk("PC is at %pA\n", instruction_pointer(regs));
2956+ printk("LR is at %pA\n", regs->ARM_lr);
2957 printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
2958 "sp : %08lx ip : %08lx fp : %08lx\n",
2959 regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
2960@@ -452,12 +452,6 @@ unsigned long get_wchan(struct task_struct *p)
2961 return 0;
2962 }
2963
2964-unsigned long arch_randomize_brk(struct mm_struct *mm)
2965-{
2966- unsigned long range_end = mm->brk + 0x02000000;
2967- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
2968-}
2969-
2970 #ifdef CONFIG_MMU
2971 /*
2972 * The vectors page is always readable from user space for the
2973@@ -470,9 +464,8 @@ static int __init gate_vma_init(void)
2974 {
2975 gate_vma.vm_start = 0xffff0000;
2976 gate_vma.vm_end = 0xffff0000 + PAGE_SIZE;
2977- gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
2978- gate_vma.vm_flags = VM_READ | VM_EXEC |
2979- VM_MAYREAD | VM_MAYEXEC;
2980+ gate_vma.vm_flags = VM_NONE;
2981+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
2982 return 0;
2983 }
2984 arch_initcall(gate_vma_init);
2985diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
2986index 03deeff..741ce88 100644
2987--- a/arch/arm/kernel/ptrace.c
2988+++ b/arch/arm/kernel/ptrace.c
2989@@ -937,10 +937,19 @@ static int tracehook_report_syscall(struct pt_regs *regs,
2990 return current_thread_info()->syscall;
2991 }
2992
2993+#ifdef CONFIG_GRKERNSEC_SETXID
2994+extern void gr_delayed_cred_worker(void);
2995+#endif
2996+
2997 asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
2998 {
2999 current_thread_info()->syscall = scno;
3000
3001+#ifdef CONFIG_GRKERNSEC_SETXID
3002+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
3003+ gr_delayed_cred_worker();
3004+#endif
3005+
3006 /* Do the secure computing check first; failures should be fast. */
3007 if (secure_computing(scno) == -1)
3008 return -1;
3009diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
3010index 3f6cbb2..39305c7 100644
3011--- a/arch/arm/kernel/setup.c
3012+++ b/arch/arm/kernel/setup.c
3013@@ -97,21 +97,23 @@ EXPORT_SYMBOL(system_serial_high);
3014 unsigned int elf_hwcap __read_mostly;
3015 EXPORT_SYMBOL(elf_hwcap);
3016
3017+pteval_t __supported_pte_mask __read_only;
3018+pmdval_t __supported_pmd_mask __read_only;
3019
3020 #ifdef MULTI_CPU
3021-struct processor processor __read_mostly;
3022+struct processor processor;
3023 #endif
3024 #ifdef MULTI_TLB
3025-struct cpu_tlb_fns cpu_tlb __read_mostly;
3026+struct cpu_tlb_fns cpu_tlb __read_only;
3027 #endif
3028 #ifdef MULTI_USER
3029-struct cpu_user_fns cpu_user __read_mostly;
3030+struct cpu_user_fns cpu_user __read_only;
3031 #endif
3032 #ifdef MULTI_CACHE
3033-struct cpu_cache_fns cpu_cache __read_mostly;
3034+struct cpu_cache_fns cpu_cache __read_only;
3035 #endif
3036 #ifdef CONFIG_OUTER_CACHE
3037-struct outer_cache_fns outer_cache __read_mostly;
3038+struct outer_cache_fns outer_cache __read_only;
3039 EXPORT_SYMBOL(outer_cache);
3040 #endif
3041
3042@@ -236,9 +238,13 @@ static int __get_cpu_architecture(void)
3043 asm("mrc p15, 0, %0, c0, c1, 4"
3044 : "=r" (mmfr0));
3045 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
3046- (mmfr0 & 0x000000f0) >= 0x00000030)
3047+ (mmfr0 & 0x000000f0) >= 0x00000030) {
3048 cpu_arch = CPU_ARCH_ARMv7;
3049- else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3050+ if ((mmfr0 & 0x0000000f) == 0x00000005 || (mmfr0 & 0x0000000f) == 0x00000004) {
3051+ __supported_pte_mask |= L_PTE_PXN;
3052+ __supported_pmd_mask |= PMD_PXNTABLE;
3053+ }
3054+ } else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3055 (mmfr0 & 0x000000f0) == 0x00000020)
3056 cpu_arch = CPU_ARCH_ARMv6;
3057 else
3058@@ -462,7 +468,7 @@ static void __init setup_processor(void)
3059 __cpu_architecture = __get_cpu_architecture();
3060
3061 #ifdef MULTI_CPU
3062- processor = *list->proc;
3063+ memcpy((void *)&processor, list->proc, sizeof processor);
3064 #endif
3065 #ifdef MULTI_TLB
3066 cpu_tlb = *list->tlb;
3067@@ -524,7 +530,7 @@ int __init arm_add_memory(phys_addr_t start, phys_addr_t size)
3068 size -= start & ~PAGE_MASK;
3069 bank->start = PAGE_ALIGN(start);
3070
3071-#ifndef CONFIG_LPAE
3072+#ifndef CONFIG_ARM_LPAE
3073 if (bank->start + size < bank->start) {
3074 printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in "
3075 "32-bit physical address space\n", (long long)start);
3076diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
3077index 56f72d2..6924200 100644
3078--- a/arch/arm/kernel/signal.c
3079+++ b/arch/arm/kernel/signal.c
3080@@ -433,22 +433,14 @@ setup_return(struct pt_regs *regs, struct k_sigaction *ka,
3081 __put_user(sigreturn_codes[idx+1], rc+1))
3082 return 1;
3083
3084- if (cpsr & MODE32_BIT) {
3085- /*
3086- * 32-bit code can use the new high-page
3087- * signal return code support.
3088- */
3089- retcode = KERN_SIGRETURN_CODE + (idx << 2) + thumb;
3090- } else {
3091- /*
3092- * Ensure that the instruction cache sees
3093- * the return code written onto the stack.
3094- */
3095- flush_icache_range((unsigned long)rc,
3096- (unsigned long)(rc + 2));
3097+ /*
3098+ * Ensure that the instruction cache sees
3099+ * the return code written onto the stack.
3100+ */
3101+ flush_icache_range((unsigned long)rc,
3102+ (unsigned long)(rc + 2));
3103
3104- retcode = ((unsigned long)rc) + thumb;
3105- }
3106+ retcode = ((unsigned long)rc) + thumb;
3107 }
3108
3109 regs->ARM_r0 = usig;
3110diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
3111index 58af91c..343ce99 100644
3112--- a/arch/arm/kernel/smp.c
3113+++ b/arch/arm/kernel/smp.c
3114@@ -70,7 +70,7 @@ enum ipi_msg_type {
3115
3116 static DECLARE_COMPLETION(cpu_running);
3117
3118-static struct smp_operations smp_ops;
3119+static struct smp_operations smp_ops __read_only;
3120
3121 void __init smp_set_ops(struct smp_operations *ops)
3122 {
3123diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c
3124index 02c5d2c..e5695ad 100644
3125--- a/arch/arm/kernel/smp_tlb.c
3126+++ b/arch/arm/kernel/smp_tlb.c
3127@@ -12,6 +12,7 @@
3128
3129 #include <asm/smp_plat.h>
3130 #include <asm/tlbflush.h>
3131+#include <asm/mmu_context.h>
3132
3133 /**********************************************************************/
3134
3135@@ -64,12 +65,72 @@ static inline void ipi_flush_tlb_kernel_range(void *arg)
3136 local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end);
3137 }
3138
3139+#ifdef CONFIG_ARM_ERRATA_798181
3140+static int erratum_a15_798181(void)
3141+{
3142+ unsigned int midr = read_cpuid_id();
3143+
3144+ /* Cortex-A15 r0p0..r3p2 affected */
3145+ if ((midr & 0xff0ffff0) != 0x410fc0f0 || midr > 0x413fc0f2)
3146+ return 0;
3147+ return 1;
3148+}
3149+#else
3150+static int erratum_a15_798181(void)
3151+{
3152+ return 0;
3153+}
3154+#endif
3155+
3156+static void ipi_flush_tlb_a15_erratum(void *arg)
3157+{
3158+ dmb();
3159+}
3160+
3161+static void broadcast_tlb_a15_erratum(void)
3162+{
3163+ if (!erratum_a15_798181())
3164+ return;
3165+
3166+ dummy_flush_tlb_a15_erratum();
3167+ smp_call_function_many(cpu_online_mask, ipi_flush_tlb_a15_erratum,
3168+ NULL, 1);
3169+}
3170+
3171+static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm)
3172+{
3173+ int cpu;
3174+ cpumask_t mask = { CPU_BITS_NONE };
3175+
3176+ if (!erratum_a15_798181())
3177+ return;
3178+
3179+ dummy_flush_tlb_a15_erratum();
3180+ for_each_online_cpu(cpu) {
3181+ if (cpu == smp_processor_id())
3182+ continue;
3183+ /*
3184+ * We only need to send an IPI if the other CPUs are running
3185+ * the same ASID as the one being invalidated. There is no
3186+ * need for locking around the active_asids check since the
3187+ * switch_mm() function has at least one dmb() (as required by
3188+ * this workaround) in case a context switch happens on
3189+ * another CPU after the condition below.
3190+ */
3191+ if (atomic64_read(&mm->context.id) ==
3192+ atomic64_read(&per_cpu(active_asids, cpu)))
3193+ cpumask_set_cpu(cpu, &mask);
3194+ }
3195+ smp_call_function_many(&mask, ipi_flush_tlb_a15_erratum, NULL, 1);
3196+}
3197+
3198 void flush_tlb_all(void)
3199 {
3200 if (tlb_ops_need_broadcast())
3201 on_each_cpu(ipi_flush_tlb_all, NULL, 1);
3202 else
3203 local_flush_tlb_all();
3204+ broadcast_tlb_a15_erratum();
3205 }
3206
3207 void flush_tlb_mm(struct mm_struct *mm)
3208@@ -78,6 +139,7 @@ void flush_tlb_mm(struct mm_struct *mm)
3209 on_each_cpu_mask(mm_cpumask(mm), ipi_flush_tlb_mm, mm, 1);
3210 else
3211 local_flush_tlb_mm(mm);
3212+ broadcast_tlb_mm_a15_erratum(mm);
3213 }
3214
3215 void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
3216@@ -90,6 +152,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
3217 &ta, 1);
3218 } else
3219 local_flush_tlb_page(vma, uaddr);
3220+ broadcast_tlb_mm_a15_erratum(vma->vm_mm);
3221 }
3222
3223 void flush_tlb_kernel_page(unsigned long kaddr)
3224@@ -100,6 +163,7 @@ void flush_tlb_kernel_page(unsigned long kaddr)
3225 on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1);
3226 } else
3227 local_flush_tlb_kernel_page(kaddr);
3228+ broadcast_tlb_a15_erratum();
3229 }
3230
3231 void flush_tlb_range(struct vm_area_struct *vma,
3232@@ -114,6 +178,7 @@ void flush_tlb_range(struct vm_area_struct *vma,
3233 &ta, 1);
3234 } else
3235 local_flush_tlb_range(vma, start, end);
3236+ broadcast_tlb_mm_a15_erratum(vma->vm_mm);
3237 }
3238
3239 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
3240@@ -125,5 +190,6 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
3241 on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1);
3242 } else
3243 local_flush_tlb_kernel_range(start, end);
3244+ broadcast_tlb_a15_erratum();
3245 }
3246
3247diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
3248index b0179b8..829510e 100644
3249--- a/arch/arm/kernel/traps.c
3250+++ b/arch/arm/kernel/traps.c
3251@@ -57,7 +57,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
3252 void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
3253 {
3254 #ifdef CONFIG_KALLSYMS
3255- printk("[<%08lx>] (%pS) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
3256+ printk("[<%08lx>] (%pA) from [<%08lx>] (%pA)\n", where, (void *)where, from, (void *)from);
3257 #else
3258 printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
3259 #endif
3260@@ -266,6 +266,8 @@ static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
3261 static int die_owner = -1;
3262 static unsigned int die_nest_count;
3263
3264+extern void gr_handle_kernel_exploit(void);
3265+
3266 static unsigned long oops_begin(void)
3267 {
3268 int cpu;
3269@@ -308,6 +310,9 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
3270 panic("Fatal exception in interrupt");
3271 if (panic_on_oops)
3272 panic("Fatal exception");
3273+
3274+ gr_handle_kernel_exploit();
3275+
3276 if (signr)
3277 do_exit(signr);
3278 }
3279@@ -601,7 +606,9 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
3280 * The user helper at 0xffff0fe0 must be used instead.
3281 * (see entry-armv.S for details)
3282 */
3283+ pax_open_kernel();
3284 *((unsigned int *)0xffff0ff0) = regs->ARM_r0;
3285+ pax_close_kernel();
3286 }
3287 return 0;
3288
3289@@ -841,13 +848,10 @@ void __init early_trap_init(void *vectors_base)
3290 */
3291 kuser_get_tls_init(vectors);
3292
3293- /*
3294- * Copy signal return handlers into the vector page, and
3295- * set sigreturn to be a pointer to these.
3296- */
3297- memcpy((void *)(vectors + KERN_SIGRETURN_CODE - CONFIG_VECTORS_BASE),
3298- sigreturn_codes, sizeof(sigreturn_codes));
3299-
3300 flush_icache_range(vectors, vectors + PAGE_SIZE);
3301- modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
3302+
3303+#ifndef CONFIG_PAX_MEMORY_UDEREF
3304+ modify_domain(DOMAIN_USER, DOMAIN_USERCLIENT);
3305+#endif
3306+
3307 }
3308diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
3309index 11c1785..c67d54c 100644
3310--- a/arch/arm/kernel/vmlinux.lds.S
3311+++ b/arch/arm/kernel/vmlinux.lds.S
3312@@ -8,7 +8,11 @@
3313 #include <asm/thread_info.h>
3314 #include <asm/memory.h>
3315 #include <asm/page.h>
3316-
3317+
3318+#ifdef CONFIG_PAX_KERNEXEC
3319+#include <asm/pgtable.h>
3320+#endif
3321+
3322 #define PROC_INFO \
3323 . = ALIGN(4); \
3324 VMLINUX_SYMBOL(__proc_info_begin) = .; \
3325@@ -90,6 +94,11 @@ SECTIONS
3326 _text = .;
3327 HEAD_TEXT
3328 }
3329+
3330+#ifdef CONFIG_PAX_KERNEXEC
3331+ . = ALIGN(1<<SECTION_SHIFT);
3332+#endif
3333+
3334 .text : { /* Real text segment */
3335 _stext = .; /* Text and read-only data */
3336 __exception_text_start = .;
3337@@ -144,6 +153,10 @@ SECTIONS
3338
3339 _etext = .; /* End of text and rodata section */
3340
3341+#ifdef CONFIG_PAX_KERNEXEC
3342+ . = ALIGN(1<<SECTION_SHIFT);
3343+#endif
3344+
3345 #ifndef CONFIG_XIP_KERNEL
3346 . = ALIGN(PAGE_SIZE);
3347 __init_begin = .;
3348@@ -203,6 +216,11 @@ SECTIONS
3349 . = PAGE_OFFSET + TEXT_OFFSET;
3350 #else
3351 __init_end = .;
3352+
3353+#ifdef CONFIG_PAX_KERNEXEC
3354+ . = ALIGN(1<<SECTION_SHIFT);
3355+#endif
3356+
3357 . = ALIGN(THREAD_SIZE);
3358 __data_loc = .;
3359 #endif
3360diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
3361index 14a0d98..7771a7d 100644
3362--- a/arch/arm/lib/clear_user.S
3363+++ b/arch/arm/lib/clear_user.S
3364@@ -12,14 +12,14 @@
3365
3366 .text
3367
3368-/* Prototype: int __clear_user(void *addr, size_t sz)
3369+/* Prototype: int ___clear_user(void *addr, size_t sz)
3370 * Purpose : clear some user memory
3371 * Params : addr - user memory address to clear
3372 * : sz - number of bytes to clear
3373 * Returns : number of bytes NOT cleared
3374 */
3375 ENTRY(__clear_user_std)
3376-WEAK(__clear_user)
3377+WEAK(___clear_user)
3378 stmfd sp!, {r1, lr}
3379 mov r2, #0
3380 cmp r1, #4
3381@@ -44,7 +44,7 @@ WEAK(__clear_user)
3382 USER( strnebt r2, [r0])
3383 mov r0, #0
3384 ldmfd sp!, {r1, pc}
3385-ENDPROC(__clear_user)
3386+ENDPROC(___clear_user)
3387 ENDPROC(__clear_user_std)
3388
3389 .pushsection .fixup,"ax"
3390diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
3391index 66a477a..bee61d3 100644
3392--- a/arch/arm/lib/copy_from_user.S
3393+++ b/arch/arm/lib/copy_from_user.S
3394@@ -16,7 +16,7 @@
3395 /*
3396 * Prototype:
3397 *
3398- * size_t __copy_from_user(void *to, const void *from, size_t n)
3399+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
3400 *
3401 * Purpose:
3402 *
3403@@ -84,11 +84,11 @@
3404
3405 .text
3406
3407-ENTRY(__copy_from_user)
3408+ENTRY(___copy_from_user)
3409
3410 #include "copy_template.S"
3411
3412-ENDPROC(__copy_from_user)
3413+ENDPROC(___copy_from_user)
3414
3415 .pushsection .fixup,"ax"
3416 .align 0
3417diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
3418index 6ee2f67..d1cce76 100644
3419--- a/arch/arm/lib/copy_page.S
3420+++ b/arch/arm/lib/copy_page.S
3421@@ -10,6 +10,7 @@
3422 * ASM optimised string functions
3423 */
3424 #include <linux/linkage.h>
3425+#include <linux/const.h>
3426 #include <asm/assembler.h>
3427 #include <asm/asm-offsets.h>
3428 #include <asm/cache.h>
3429diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
3430index d066df6..df28194 100644
3431--- a/arch/arm/lib/copy_to_user.S
3432+++ b/arch/arm/lib/copy_to_user.S
3433@@ -16,7 +16,7 @@
3434 /*
3435 * Prototype:
3436 *
3437- * size_t __copy_to_user(void *to, const void *from, size_t n)
3438+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
3439 *
3440 * Purpose:
3441 *
3442@@ -88,11 +88,11 @@
3443 .text
3444
3445 ENTRY(__copy_to_user_std)
3446-WEAK(__copy_to_user)
3447+WEAK(___copy_to_user)
3448
3449 #include "copy_template.S"
3450
3451-ENDPROC(__copy_to_user)
3452+ENDPROC(___copy_to_user)
3453 ENDPROC(__copy_to_user_std)
3454
3455 .pushsection .fixup,"ax"
3456diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
3457index 7d08b43..f7ca7ea 100644
3458--- a/arch/arm/lib/csumpartialcopyuser.S
3459+++ b/arch/arm/lib/csumpartialcopyuser.S
3460@@ -57,8 +57,8 @@
3461 * Returns : r0 = checksum, [[sp, #0], #0] = 0 or -EFAULT
3462 */
3463
3464-#define FN_ENTRY ENTRY(csum_partial_copy_from_user)
3465-#define FN_EXIT ENDPROC(csum_partial_copy_from_user)
3466+#define FN_ENTRY ENTRY(__csum_partial_copy_from_user)
3467+#define FN_EXIT ENDPROC(__csum_partial_copy_from_user)
3468
3469 #include "csumpartialcopygeneric.S"
3470
3471diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
3472index 6b93f6a..1aa92d0 100644
3473--- a/arch/arm/lib/delay.c
3474+++ b/arch/arm/lib/delay.c
3475@@ -28,12 +28,15 @@
3476 /*
3477 * Default to the loop-based delay implementation.
3478 */
3479-struct arm_delay_ops arm_delay_ops = {
3480+static struct arm_delay_ops arm_loop_delay_ops = {
3481 .delay = __loop_delay,
3482 .const_udelay = __loop_const_udelay,
3483 .udelay = __loop_udelay,
3484+ .const_clock = false,
3485 };
3486
3487+struct arm_delay_ops *arm_delay_ops __read_only = &arm_loop_delay_ops;
3488+
3489 static const struct delay_timer *delay_timer;
3490 static bool delay_calibrated;
3491
3492@@ -67,6 +70,13 @@ static void __timer_udelay(unsigned long usecs)
3493 __timer_const_udelay(usecs * UDELAY_MULT);
3494 }
3495
3496+static struct arm_delay_ops arm_timer_delay_ops = {
3497+ .delay = __timer_delay,
3498+ .const_udelay = __timer_const_udelay,
3499+ .udelay = __timer_udelay,
3500+ .const_clock = true,
3501+};
3502+
3503 void __init register_current_timer_delay(const struct delay_timer *timer)
3504 {
3505 if (!delay_calibrated) {
3506@@ -74,10 +84,7 @@ void __init register_current_timer_delay(const struct delay_timer *timer)
3507 delay_timer = timer;
3508 lpj_fine = timer->freq / HZ;
3509 loops_per_jiffy = lpj_fine;
3510- arm_delay_ops.delay = __timer_delay;
3511- arm_delay_ops.const_udelay = __timer_const_udelay;
3512- arm_delay_ops.udelay = __timer_udelay;
3513- arm_delay_ops.const_clock = true;
3514+ arm_delay_ops = &arm_timer_delay_ops;
3515 delay_calibrated = true;
3516 } else {
3517 pr_info("Ignoring duplicate/late registration of read_current_timer delay\n");
3518diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
3519index 025f742..8432b08 100644
3520--- a/arch/arm/lib/uaccess_with_memcpy.c
3521+++ b/arch/arm/lib/uaccess_with_memcpy.c
3522@@ -104,7 +104,7 @@ out:
3523 }
3524
3525 unsigned long
3526-__copy_to_user(void __user *to, const void *from, unsigned long n)
3527+___copy_to_user(void __user *to, const void *from, unsigned long n)
3528 {
3529 /*
3530 * This test is stubbed out of the main function above to keep
3531diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
3532index bac21a5..b67ef8e 100644
3533--- a/arch/arm/mach-kirkwood/common.c
3534+++ b/arch/arm/mach-kirkwood/common.c
3535@@ -150,7 +150,16 @@ static void clk_gate_fn_disable(struct clk_hw *hw)
3536 clk_gate_ops.disable(hw);
3537 }
3538
3539-static struct clk_ops clk_gate_fn_ops;
3540+static int clk_gate_fn_is_enabled(struct clk_hw *hw)
3541+{
3542+ return clk_gate_ops.is_enabled(hw);
3543+}
3544+
3545+static struct clk_ops clk_gate_fn_ops = {
3546+ .enable = clk_gate_fn_enable,
3547+ .disable = clk_gate_fn_disable,
3548+ .is_enabled = clk_gate_fn_is_enabled,
3549+};
3550
3551 static struct clk __init *clk_register_gate_fn(struct device *dev,
3552 const char *name,
3553@@ -184,14 +193,6 @@ static struct clk __init *clk_register_gate_fn(struct device *dev,
3554 gate_fn->fn_en = fn_en;
3555 gate_fn->fn_dis = fn_dis;
3556
3557- /* ops is the gate ops, but with our enable/disable functions */
3558- if (clk_gate_fn_ops.enable != clk_gate_fn_enable ||
3559- clk_gate_fn_ops.disable != clk_gate_fn_disable) {
3560- clk_gate_fn_ops = clk_gate_ops;
3561- clk_gate_fn_ops.enable = clk_gate_fn_enable;
3562- clk_gate_fn_ops.disable = clk_gate_fn_disable;
3563- }
3564-
3565 clk = clk_register(dev, &gate_fn->gate.hw);
3566
3567 if (IS_ERR(clk))
3568diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
3569index 0abb30f..54064da 100644
3570--- a/arch/arm/mach-omap2/board-n8x0.c
3571+++ b/arch/arm/mach-omap2/board-n8x0.c
3572@@ -631,7 +631,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
3573 }
3574 #endif
3575
3576-static struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
3577+static struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
3578 .late_init = n8x0_menelaus_late_init,
3579 };
3580
3581diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
3582index 8033cb7..2f7cb62 100644
3583--- a/arch/arm/mach-omap2/gpmc.c
3584+++ b/arch/arm/mach-omap2/gpmc.c
3585@@ -139,7 +139,6 @@ struct omap3_gpmc_regs {
3586 };
3587
3588 static struct gpmc_client_irq gpmc_client_irq[GPMC_NR_IRQ];
3589-static struct irq_chip gpmc_irq_chip;
3590 static unsigned gpmc_irq_start;
3591
3592 static struct resource gpmc_mem_root;
3593@@ -700,6 +699,18 @@ static void gpmc_irq_noop(struct irq_data *data) { }
3594
3595 static unsigned int gpmc_irq_noop_ret(struct irq_data *data) { return 0; }
3596
3597+static struct irq_chip gpmc_irq_chip = {
3598+ .name = "gpmc",
3599+ .irq_startup = gpmc_irq_noop_ret,
3600+ .irq_enable = gpmc_irq_enable,
3601+ .irq_disable = gpmc_irq_disable,
3602+ .irq_shutdown = gpmc_irq_noop,
3603+ .irq_ack = gpmc_irq_noop,
3604+ .irq_mask = gpmc_irq_noop,
3605+ .irq_unmask = gpmc_irq_noop,
3606+
3607+};
3608+
3609 static int gpmc_setup_irq(void)
3610 {
3611 int i;
3612@@ -714,15 +725,6 @@ static int gpmc_setup_irq(void)
3613 return gpmc_irq_start;
3614 }
3615
3616- gpmc_irq_chip.name = "gpmc";
3617- gpmc_irq_chip.irq_startup = gpmc_irq_noop_ret;
3618- gpmc_irq_chip.irq_enable = gpmc_irq_enable;
3619- gpmc_irq_chip.irq_disable = gpmc_irq_disable;
3620- gpmc_irq_chip.irq_shutdown = gpmc_irq_noop;
3621- gpmc_irq_chip.irq_ack = gpmc_irq_noop;
3622- gpmc_irq_chip.irq_mask = gpmc_irq_noop;
3623- gpmc_irq_chip.irq_unmask = gpmc_irq_noop;
3624-
3625 gpmc_client_irq[0].bitmask = GPMC_IRQ_FIFOEVENTENABLE;
3626 gpmc_client_irq[1].bitmask = GPMC_IRQ_COUNT_EVENT;
3627
3628diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
3629index 5d3b4f4..ddba3c0 100644
3630--- a/arch/arm/mach-omap2/omap-wakeupgen.c
3631+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
3632@@ -340,7 +340,7 @@ static int __cpuinit irq_cpu_hotplug_notify(struct notifier_block *self,
3633 return NOTIFY_OK;
3634 }
3635
3636-static struct notifier_block __refdata irq_hotplug_notifier = {
3637+static struct notifier_block irq_hotplug_notifier = {
3638 .notifier_call = irq_cpu_hotplug_notify,
3639 };
3640
3641diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
3642index e065daa..7b1ad9b 100644
3643--- a/arch/arm/mach-omap2/omap_device.c
3644+++ b/arch/arm/mach-omap2/omap_device.c
3645@@ -686,7 +686,7 @@ void omap_device_delete(struct omap_device *od)
3646 * passes along the return value of omap_device_build_ss().
3647 */
3648 struct platform_device __init *omap_device_build(const char *pdev_name, int pdev_id,
3649- struct omap_hwmod *oh, void *pdata,
3650+ struct omap_hwmod *oh, const void *pdata,
3651 int pdata_len,
3652 struct omap_device_pm_latency *pm_lats,
3653 int pm_lats_cnt, int is_early_device)
3654@@ -720,7 +720,7 @@ struct platform_device __init *omap_device_build(const char *pdev_name, int pdev
3655 */
3656 struct platform_device __init *omap_device_build_ss(const char *pdev_name, int pdev_id,
3657 struct omap_hwmod **ohs, int oh_cnt,
3658- void *pdata, int pdata_len,
3659+ const void *pdata, int pdata_len,
3660 struct omap_device_pm_latency *pm_lats,
3661 int pm_lats_cnt, int is_early_device)
3662 {
3663diff --git a/arch/arm/mach-omap2/omap_device.h b/arch/arm/mach-omap2/omap_device.h
3664index 0933c59..42b8e2d 100644
3665--- a/arch/arm/mach-omap2/omap_device.h
3666+++ b/arch/arm/mach-omap2/omap_device.h
3667@@ -91,14 +91,14 @@ int omap_device_shutdown(struct platform_device *pdev);
3668 /* Core code interface */
3669
3670 struct platform_device *omap_device_build(const char *pdev_name, int pdev_id,
3671- struct omap_hwmod *oh, void *pdata,
3672+ struct omap_hwmod *oh, const void *pdata,
3673 int pdata_len,
3674 struct omap_device_pm_latency *pm_lats,
3675 int pm_lats_cnt, int is_early_device);
3676
3677 struct platform_device *omap_device_build_ss(const char *pdev_name, int pdev_id,
3678 struct omap_hwmod **oh, int oh_cnt,
3679- void *pdata, int pdata_len,
3680+ const void *pdata, int pdata_len,
3681 struct omap_device_pm_latency *pm_lats,
3682 int pm_lats_cnt, int is_early_device);
3683
3684diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
3685index 4653efb..8c60bf7 100644
3686--- a/arch/arm/mach-omap2/omap_hwmod.c
3687+++ b/arch/arm/mach-omap2/omap_hwmod.c
3688@@ -189,10 +189,10 @@ struct omap_hwmod_soc_ops {
3689 int (*init_clkdm)(struct omap_hwmod *oh);
3690 void (*update_context_lost)(struct omap_hwmod *oh);
3691 int (*get_context_lost)(struct omap_hwmod *oh);
3692-};
3693+} __no_const;
3694
3695 /* soc_ops: adapts the omap_hwmod code to the currently-booted SoC */
3696-static struct omap_hwmod_soc_ops soc_ops;
3697+static struct omap_hwmod_soc_ops soc_ops __read_only;
3698
3699 /* omap_hwmod_list contains all registered struct omap_hwmods */
3700 static LIST_HEAD(omap_hwmod_list);
3701diff --git a/arch/arm/mach-omap2/wd_timer.c b/arch/arm/mach-omap2/wd_timer.c
3702index 7c2b4ed..b2ea51f 100644
3703--- a/arch/arm/mach-omap2/wd_timer.c
3704+++ b/arch/arm/mach-omap2/wd_timer.c
3705@@ -110,7 +110,9 @@ static int __init omap_init_wdt(void)
3706 struct omap_hwmod *oh;
3707 char *oh_name = "wd_timer2";
3708 char *dev_name = "omap_wdt";
3709- struct omap_wd_timer_platform_data pdata;
3710+ static struct omap_wd_timer_platform_data pdata = {
3711+ .read_reset_sources = prm_read_reset_sources
3712+ };
3713
3714 if (!cpu_class_is_omap2() || of_have_populated_dt())
3715 return 0;
3716@@ -121,8 +123,6 @@ static int __init omap_init_wdt(void)
3717 return -EINVAL;
3718 }
3719
3720- pdata.read_reset_sources = prm_read_reset_sources;
3721-
3722 pdev = omap_device_build(dev_name, id, oh, &pdata,
3723 sizeof(struct omap_wd_timer_platform_data),
3724 NULL, 0, 0);
3725diff --git a/arch/arm/mach-ux500/include/mach/setup.h b/arch/arm/mach-ux500/include/mach/setup.h
3726index 6be4c4d..32ac32a 100644
3727--- a/arch/arm/mach-ux500/include/mach/setup.h
3728+++ b/arch/arm/mach-ux500/include/mach/setup.h
3729@@ -38,13 +38,6 @@ extern struct sys_timer ux500_timer;
3730 .type = MT_DEVICE, \
3731 }
3732
3733-#define __MEM_DEV_DESC(x, sz) { \
3734- .virtual = IO_ADDRESS(x), \
3735- .pfn = __phys_to_pfn(x), \
3736- .length = sz, \
3737- .type = MT_MEMORY, \
3738-}
3739-
3740 extern struct smp_operations ux500_smp_ops;
3741 extern void ux500_cpu_die(unsigned int cpu);
3742
3743diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
3744index 3fd629d..8b1aca9 100644
3745--- a/arch/arm/mm/Kconfig
3746+++ b/arch/arm/mm/Kconfig
3747@@ -425,7 +425,7 @@ config CPU_32v5
3748
3749 config CPU_32v6
3750 bool
3751- select CPU_USE_DOMAINS if CPU_V6 && MMU
3752+ select CPU_USE_DOMAINS if CPU_V6 && MMU && !PAX_KERNEXEC
3753 select TLS_REG_EMUL if !CPU_32v6K && !MMU
3754
3755 config CPU_32v6K
3756@@ -577,6 +577,7 @@ config CPU_CP15_MPU
3757
3758 config CPU_USE_DOMAINS
3759 bool
3760+ depends on !ARM_LPAE && !PAX_KERNEXEC
3761 help
3762 This option enables or disables the use of domain switching
3763 via the set_fs() function.
3764diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
3765index db26e2e..ee44569 100644
3766--- a/arch/arm/mm/alignment.c
3767+++ b/arch/arm/mm/alignment.c
3768@@ -211,10 +211,12 @@ union offset_union {
3769 #define __get16_unaligned_check(ins,val,addr) \
3770 do { \
3771 unsigned int err = 0, v, a = addr; \
3772+ pax_open_userland(); \
3773 __get8_unaligned_check(ins,v,a,err); \
3774 val = v << ((BE) ? 8 : 0); \
3775 __get8_unaligned_check(ins,v,a,err); \
3776 val |= v << ((BE) ? 0 : 8); \
3777+ pax_close_userland(); \
3778 if (err) \
3779 goto fault; \
3780 } while (0)
3781@@ -228,6 +230,7 @@ union offset_union {
3782 #define __get32_unaligned_check(ins,val,addr) \
3783 do { \
3784 unsigned int err = 0, v, a = addr; \
3785+ pax_open_userland(); \
3786 __get8_unaligned_check(ins,v,a,err); \
3787 val = v << ((BE) ? 24 : 0); \
3788 __get8_unaligned_check(ins,v,a,err); \
3789@@ -236,6 +239,7 @@ union offset_union {
3790 val |= v << ((BE) ? 8 : 16); \
3791 __get8_unaligned_check(ins,v,a,err); \
3792 val |= v << ((BE) ? 0 : 24); \
3793+ pax_close_userland(); \
3794 if (err) \
3795 goto fault; \
3796 } while (0)
3797@@ -249,6 +253,7 @@ union offset_union {
3798 #define __put16_unaligned_check(ins,val,addr) \
3799 do { \
3800 unsigned int err = 0, v = val, a = addr; \
3801+ pax_open_userland(); \
3802 __asm__( FIRST_BYTE_16 \
3803 ARM( "1: "ins" %1, [%2], #1\n" ) \
3804 THUMB( "1: "ins" %1, [%2]\n" ) \
3805@@ -268,6 +273,7 @@ union offset_union {
3806 " .popsection\n" \
3807 : "=r" (err), "=&r" (v), "=&r" (a) \
3808 : "0" (err), "1" (v), "2" (a)); \
3809+ pax_close_userland(); \
3810 if (err) \
3811 goto fault; \
3812 } while (0)
3813@@ -281,6 +287,7 @@ union offset_union {
3814 #define __put32_unaligned_check(ins,val,addr) \
3815 do { \
3816 unsigned int err = 0, v = val, a = addr; \
3817+ pax_open_userland(); \
3818 __asm__( FIRST_BYTE_32 \
3819 ARM( "1: "ins" %1, [%2], #1\n" ) \
3820 THUMB( "1: "ins" %1, [%2]\n" ) \
3821@@ -310,6 +317,7 @@ union offset_union {
3822 " .popsection\n" \
3823 : "=r" (err), "=&r" (v), "=&r" (a) \
3824 : "0" (err), "1" (v), "2" (a)); \
3825+ pax_close_userland(); \
3826 if (err) \
3827 goto fault; \
3828 } while (0)
3829diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
3830index d07df17..59d5493 100644
3831--- a/arch/arm/mm/context.c
3832+++ b/arch/arm/mm/context.c
3833@@ -45,7 +45,7 @@ static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
3834 static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
3835 static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
3836
3837-static DEFINE_PER_CPU(atomic64_t, active_asids);
3838+DEFINE_PER_CPU(atomic64_t, active_asids);
3839 static DEFINE_PER_CPU(u64, reserved_asids);
3840 static cpumask_t tlb_flush_pending;
3841
3842@@ -209,8 +209,10 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
3843 atomic64_set(&mm->context.id, asid);
3844 }
3845
3846- if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending))
3847+ if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) {
3848 local_flush_tlb_all();
3849+ dummy_flush_tlb_a15_erratum();
3850+ }
3851
3852 atomic64_set(&per_cpu(active_asids, cpu), asid);
3853 cpumask_set_cpu(cpu, mm_cpumask(mm));
3854diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
3855index 5dbf13f..1a60561 100644
3856--- a/arch/arm/mm/fault.c
3857+++ b/arch/arm/mm/fault.c
3858@@ -25,6 +25,7 @@
3859 #include <asm/system_misc.h>
3860 #include <asm/system_info.h>
3861 #include <asm/tlbflush.h>
3862+#include <asm/sections.h>
3863
3864 #include "fault.h"
3865
3866@@ -138,6 +139,20 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
3867 if (fixup_exception(regs))
3868 return;
3869
3870+#ifdef CONFIG_PAX_KERNEXEC
3871+ if ((fsr & FSR_WRITE) &&
3872+ (((unsigned long)_stext <= addr && addr < init_mm.end_code) ||
3873+ (MODULES_VADDR <= addr && addr < MODULES_END)))
3874+ {
3875+ if (current->signal->curr_ip)
3876+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3877+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
3878+ else
3879+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
3880+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
3881+ }
3882+#endif
3883+
3884 /*
3885 * No handler, we'll have to terminate things with extreme prejudice.
3886 */
3887@@ -174,6 +189,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
3888 }
3889 #endif
3890
3891+#ifdef CONFIG_PAX_PAGEEXEC
3892+ if (fsr & FSR_LNX_PF) {
3893+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
3894+ do_group_exit(SIGKILL);
3895+ }
3896+#endif
3897+
3898 tsk->thread.address = addr;
3899 tsk->thread.error_code = fsr;
3900 tsk->thread.trap_no = 14;
3901@@ -398,6 +420,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
3902 }
3903 #endif /* CONFIG_MMU */
3904
3905+#ifdef CONFIG_PAX_PAGEEXEC
3906+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3907+{
3908+ long i;
3909+
3910+ printk(KERN_ERR "PAX: bytes at PC: ");
3911+ for (i = 0; i < 20; i++) {
3912+ unsigned char c;
3913+ if (get_user(c, (__force unsigned char __user *)pc+i))
3914+ printk(KERN_CONT "?? ");
3915+ else
3916+ printk(KERN_CONT "%02x ", c);
3917+ }
3918+ printk("\n");
3919+
3920+ printk(KERN_ERR "PAX: bytes at SP-4: ");
3921+ for (i = -1; i < 20; i++) {
3922+ unsigned long c;
3923+ if (get_user(c, (__force unsigned long __user *)sp+i))
3924+ printk(KERN_CONT "???????? ");
3925+ else
3926+ printk(KERN_CONT "%08lx ", c);
3927+ }
3928+ printk("\n");
3929+}
3930+#endif
3931+
3932 /*
3933 * First Level Translation Fault Handler
3934 *
3935@@ -543,9 +592,22 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
3936 const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
3937 struct siginfo info;
3938
3939+#ifdef CONFIG_PAX_MEMORY_UDEREF
3940+ if (addr < TASK_SIZE && is_domain_fault(fsr)) {
3941+ if (current->signal->curr_ip)
3942+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3943+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3944+ else
3945+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
3946+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3947+ goto die;
3948+ }
3949+#endif
3950+
3951 if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
3952 return;
3953
3954+die:
3955 printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n",
3956 inf->name, fsr, addr);
3957
3958@@ -575,9 +637,49 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
3959 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
3960 struct siginfo info;
3961
3962+ if (user_mode(regs)) {
3963+ if (addr == 0xffff0fe0UL) {
3964+ /*
3965+ * PaX: __kuser_get_tls emulation
3966+ */
3967+ regs->ARM_r0 = current_thread_info()->tp_value;
3968+ regs->ARM_pc = regs->ARM_lr;
3969+ return;
3970+ }
3971+ }
3972+
3973+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
3974+ else if (is_domain_fault(ifsr) || is_xn_fault(ifsr)) {
3975+ if (current->signal->curr_ip)
3976+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3977+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
3978+ addr >= TASK_SIZE ? "non-executable kernel" : "userland", addr);
3979+ else
3980+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", current->comm, task_pid_nr(current),
3981+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
3982+ addr >= TASK_SIZE ? "non-executable kernel" : "userland", addr);
3983+ goto die;
3984+ }
3985+#endif
3986+
3987+#ifdef CONFIG_PAX_REFCOUNT
3988+ if (fsr_fs(ifsr) == FAULT_CODE_DEBUG) {
3989+ unsigned int bkpt;
3990+
3991+ if (!probe_kernel_address((unsigned int *)addr, bkpt) && bkpt == 0xe12f1073) {
3992+ current->thread.error_code = ifsr;
3993+ current->thread.trap_no = 0;
3994+ pax_report_refcount_overflow(regs);
3995+ fixup_exception(regs);
3996+ return;
3997+ }
3998+ }
3999+#endif
4000+
4001 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
4002 return;
4003
4004+die:
4005 printk(KERN_ALERT "Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
4006 inf->name, ifsr, addr);
4007
4008diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
4009index cf08bdf..772656c 100644
4010--- a/arch/arm/mm/fault.h
4011+++ b/arch/arm/mm/fault.h
4012@@ -3,6 +3,7 @@
4013
4014 /*
4015 * Fault status register encodings. We steal bit 31 for our own purposes.
4016+ * Set when the FSR value is from an instruction fault.
4017 */
4018 #define FSR_LNX_PF (1 << 31)
4019 #define FSR_WRITE (1 << 11)
4020@@ -22,6 +23,17 @@ static inline int fsr_fs(unsigned int fsr)
4021 }
4022 #endif
4023
4024+/* valid for LPAE and !LPAE */
4025+static inline int is_xn_fault(unsigned int fsr)
4026+{
4027+ return ((fsr_fs(fsr) & 0x3c) == 0xc);
4028+}
4029+
4030+static inline int is_domain_fault(unsigned int fsr)
4031+{
4032+ return ((fsr_fs(fsr) & 0xD) == 0x9);
4033+}
4034+
4035 void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
4036 unsigned long search_exception_table(unsigned long addr);
4037
4038diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
4039index ad722f1..763fdd3 100644
4040--- a/arch/arm/mm/init.c
4041+++ b/arch/arm/mm/init.c
4042@@ -30,6 +30,8 @@
4043 #include <asm/setup.h>
4044 #include <asm/tlb.h>
4045 #include <asm/fixmap.h>
4046+#include <asm/system_info.h>
4047+#include <asm/cp15.h>
4048
4049 #include <asm/mach/arch.h>
4050 #include <asm/mach/map.h>
4051@@ -736,7 +738,46 @@ void free_initmem(void)
4052 {
4053 #ifdef CONFIG_HAVE_TCM
4054 extern char __tcm_start, __tcm_end;
4055+#endif
4056
4057+#ifdef CONFIG_PAX_KERNEXEC
4058+ unsigned long addr;
4059+ pgd_t *pgd;
4060+ pud_t *pud;
4061+ pmd_t *pmd;
4062+ int cpu_arch = cpu_architecture();
4063+ unsigned int cr = get_cr();
4064+
4065+ if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
4066+ /* make pages tables, etc before .text NX */
4067+ for (addr = PAGE_OFFSET; addr < (unsigned long)_stext; addr += SECTION_SIZE) {
4068+ pgd = pgd_offset_k(addr);
4069+ pud = pud_offset(pgd, addr);
4070+ pmd = pmd_offset(pud, addr);
4071+ __section_update(pmd, addr, PMD_SECT_XN);
4072+ }
4073+ /* make init NX */
4074+ for (addr = (unsigned long)__init_begin; addr < (unsigned long)_sdata; addr += SECTION_SIZE) {
4075+ pgd = pgd_offset_k(addr);
4076+ pud = pud_offset(pgd, addr);
4077+ pmd = pmd_offset(pud, addr);
4078+ __section_update(pmd, addr, PMD_SECT_XN);
4079+ }
4080+ /* make kernel code/rodata RX */
4081+ for (addr = (unsigned long)_stext; addr < (unsigned long)__init_begin; addr += SECTION_SIZE) {
4082+ pgd = pgd_offset_k(addr);
4083+ pud = pud_offset(pgd, addr);
4084+ pmd = pmd_offset(pud, addr);
4085+#ifdef CONFIG_ARM_LPAE
4086+ __section_update(pmd, addr, PMD_SECT_RDONLY);
4087+#else
4088+ __section_update(pmd, addr, PMD_SECT_APX|PMD_SECT_AP_WRITE);
4089+#endif
4090+ }
4091+ }
4092+#endif
4093+
4094+#ifdef CONFIG_HAVE_TCM
4095 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
4096 totalram_pages += free_area(__phys_to_pfn(__pa(&__tcm_start)),
4097 __phys_to_pfn(__pa(&__tcm_end)),
4098diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
4099index 88fd86c..7a224ce 100644
4100--- a/arch/arm/mm/ioremap.c
4101+++ b/arch/arm/mm/ioremap.c
4102@@ -335,9 +335,9 @@ __arm_ioremap_exec(unsigned long phys_addr, size_t size, bool cached)
4103 unsigned int mtype;
4104
4105 if (cached)
4106- mtype = MT_MEMORY;
4107+ mtype = MT_MEMORY_RX;
4108 else
4109- mtype = MT_MEMORY_NONCACHED;
4110+ mtype = MT_MEMORY_NONCACHED_RX;
4111
4112 return __arm_ioremap_caller(phys_addr, size, mtype,
4113 __builtin_return_address(0));
4114diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
4115index 10062ce..aa96dd7 100644
4116--- a/arch/arm/mm/mmap.c
4117+++ b/arch/arm/mm/mmap.c
4118@@ -59,6 +59,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4119 struct vm_area_struct *vma;
4120 int do_align = 0;
4121 int aliasing = cache_is_vipt_aliasing();
4122+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4123 struct vm_unmapped_area_info info;
4124
4125 /*
4126@@ -81,6 +82,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4127 if (len > TASK_SIZE)
4128 return -ENOMEM;
4129
4130+#ifdef CONFIG_PAX_RANDMMAP
4131+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4132+#endif
4133+
4134 if (addr) {
4135 if (do_align)
4136 addr = COLOUR_ALIGN(addr, pgoff);
4137@@ -88,8 +93,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4138 addr = PAGE_ALIGN(addr);
4139
4140 vma = find_vma(mm, addr);
4141- if (TASK_SIZE - len >= addr &&
4142- (!vma || addr + len <= vma->vm_start))
4143+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4144 return addr;
4145 }
4146
4147@@ -112,6 +116,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4148 unsigned long addr = addr0;
4149 int do_align = 0;
4150 int aliasing = cache_is_vipt_aliasing();
4151+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4152 struct vm_unmapped_area_info info;
4153
4154 /*
4155@@ -132,6 +137,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4156 return addr;
4157 }
4158
4159+#ifdef CONFIG_PAX_RANDMMAP
4160+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4161+#endif
4162+
4163 /* requesting a specific address */
4164 if (addr) {
4165 if (do_align)
4166@@ -139,8 +148,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4167 else
4168 addr = PAGE_ALIGN(addr);
4169 vma = find_vma(mm, addr);
4170- if (TASK_SIZE - len >= addr &&
4171- (!vma || addr + len <= vma->vm_start))
4172+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4173 return addr;
4174 }
4175
4176@@ -162,6 +170,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4177 VM_BUG_ON(addr != -ENOMEM);
4178 info.flags = 0;
4179 info.low_limit = mm->mmap_base;
4180+
4181+#ifdef CONFIG_PAX_RANDMMAP
4182+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4183+ info.low_limit += mm->delta_mmap;
4184+#endif
4185+
4186 info.high_limit = TASK_SIZE;
4187 addr = vm_unmapped_area(&info);
4188 }
4189@@ -173,6 +187,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4190 {
4191 unsigned long random_factor = 0UL;
4192
4193+#ifdef CONFIG_PAX_RANDMMAP
4194+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4195+#endif
4196+
4197 /* 8 bits of randomness in 20 address space bits */
4198 if ((current->flags & PF_RANDOMIZE) &&
4199 !(current->personality & ADDR_NO_RANDOMIZE))
4200@@ -180,10 +198,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4201
4202 if (mmap_is_legacy()) {
4203 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4204+
4205+#ifdef CONFIG_PAX_RANDMMAP
4206+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4207+ mm->mmap_base += mm->delta_mmap;
4208+#endif
4209+
4210 mm->get_unmapped_area = arch_get_unmapped_area;
4211 mm->unmap_area = arch_unmap_area;
4212 } else {
4213 mm->mmap_base = mmap_base(random_factor);
4214+
4215+#ifdef CONFIG_PAX_RANDMMAP
4216+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4217+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4218+#endif
4219+
4220 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4221 mm->unmap_area = arch_unmap_area_topdown;
4222 }
4223diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
4224index ce328c7..35b88dc 100644
4225--- a/arch/arm/mm/mmu.c
4226+++ b/arch/arm/mm/mmu.c
4227@@ -35,6 +35,23 @@
4228
4229 #include "mm.h"
4230
4231+
4232+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
4233+void modify_domain(unsigned int dom, unsigned int type)
4234+{
4235+ struct thread_info *thread = current_thread_info();
4236+ unsigned int domain = thread->cpu_domain;
4237+ /*
4238+ * DOMAIN_MANAGER might be defined to some other value,
4239+ * use the arch-defined constant
4240+ */
4241+ domain &= ~domain_val(dom, 3);
4242+ thread->cpu_domain = domain | domain_val(dom, type);
4243+ set_domain(thread->cpu_domain);
4244+}
4245+EXPORT_SYMBOL(modify_domain);
4246+#endif
4247+
4248 /*
4249 * empty_zero_page is a special page that is used for
4250 * zero-initialized data and COW.
4251@@ -195,10 +212,18 @@ void adjust_cr(unsigned long mask, unsigned long set)
4252 }
4253 #endif
4254
4255-#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN
4256+#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY
4257 #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
4258
4259-static struct mem_type mem_types[] = {
4260+#ifdef CONFIG_PAX_KERNEXEC
4261+#define L_PTE_KERNEXEC L_PTE_RDONLY
4262+#define PMD_SECT_KERNEXEC PMD_SECT_RDONLY
4263+#else
4264+#define L_PTE_KERNEXEC L_PTE_DIRTY
4265+#define PMD_SECT_KERNEXEC PMD_SECT_AP_WRITE
4266+#endif
4267+
4268+static struct mem_type mem_types[] __read_only = {
4269 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
4270 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
4271 L_PTE_SHARED,
4272@@ -227,16 +252,16 @@ static struct mem_type mem_types[] = {
4273 [MT_UNCACHED] = {
4274 .prot_pte = PROT_PTE_DEVICE,
4275 .prot_l1 = PMD_TYPE_TABLE,
4276- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4277+ .prot_sect = PROT_SECT_DEVICE,
4278 .domain = DOMAIN_IO,
4279 },
4280 [MT_CACHECLEAN] = {
4281- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4282+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4283 .domain = DOMAIN_KERNEL,
4284 },
4285 #ifndef CONFIG_ARM_LPAE
4286 [MT_MINICLEAN] = {
4287- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
4288+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE | PMD_SECT_RDONLY,
4289 .domain = DOMAIN_KERNEL,
4290 },
4291 #endif
4292@@ -244,36 +269,54 @@ static struct mem_type mem_types[] = {
4293 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4294 L_PTE_RDONLY,
4295 .prot_l1 = PMD_TYPE_TABLE,
4296- .domain = DOMAIN_USER,
4297+ .domain = DOMAIN_VECTORS,
4298 },
4299 [MT_HIGH_VECTORS] = {
4300 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4301- L_PTE_USER | L_PTE_RDONLY,
4302+ L_PTE_RDONLY,
4303 .prot_l1 = PMD_TYPE_TABLE,
4304- .domain = DOMAIN_USER,
4305+ .domain = DOMAIN_VECTORS,
4306 },
4307- [MT_MEMORY] = {
4308+ [MT_MEMORY_RWX] = {
4309 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4310 .prot_l1 = PMD_TYPE_TABLE,
4311 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4312 .domain = DOMAIN_KERNEL,
4313 },
4314+ [MT_MEMORY_RW] = {
4315+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4316+ .prot_l1 = PMD_TYPE_TABLE,
4317+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4318+ .domain = DOMAIN_KERNEL,
4319+ },
4320+ [MT_MEMORY_RX] = {
4321+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
4322+ .prot_l1 = PMD_TYPE_TABLE,
4323+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4324+ .domain = DOMAIN_KERNEL,
4325+ },
4326 [MT_ROM] = {
4327- .prot_sect = PMD_TYPE_SECT,
4328+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4329 .domain = DOMAIN_KERNEL,
4330 },
4331- [MT_MEMORY_NONCACHED] = {
4332+ [MT_MEMORY_NONCACHED_RW] = {
4333 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4334 L_PTE_MT_BUFFERABLE,
4335 .prot_l1 = PMD_TYPE_TABLE,
4336 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4337 .domain = DOMAIN_KERNEL,
4338 },
4339+ [MT_MEMORY_NONCACHED_RX] = {
4340+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC |
4341+ L_PTE_MT_BUFFERABLE,
4342+ .prot_l1 = PMD_TYPE_TABLE,
4343+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4344+ .domain = DOMAIN_KERNEL,
4345+ },
4346 [MT_MEMORY_DTCM] = {
4347- .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4348- L_PTE_XN,
4349+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4350 .prot_l1 = PMD_TYPE_TABLE,
4351- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4352+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4353 .domain = DOMAIN_KERNEL,
4354 },
4355 [MT_MEMORY_ITCM] = {
4356@@ -283,10 +326,10 @@ static struct mem_type mem_types[] = {
4357 },
4358 [MT_MEMORY_SO] = {
4359 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4360- L_PTE_MT_UNCACHED | L_PTE_XN,
4361+ L_PTE_MT_UNCACHED,
4362 .prot_l1 = PMD_TYPE_TABLE,
4363 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S |
4364- PMD_SECT_UNCACHED | PMD_SECT_XN,
4365+ PMD_SECT_UNCACHED,
4366 .domain = DOMAIN_KERNEL,
4367 },
4368 [MT_MEMORY_DMA_READY] = {
4369@@ -371,9 +414,35 @@ static void __init build_mem_type_table(void)
4370 * to prevent speculative instruction fetches.
4371 */
4372 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN;
4373+ mem_types[MT_DEVICE].prot_pte |= L_PTE_XN;
4374 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
4375+ mem_types[MT_DEVICE_NONSHARED].prot_pte |= L_PTE_XN;
4376 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
4377+ mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_XN;
4378 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
4379+ mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_XN;
4380+
4381+ /* Mark other regions on ARMv6+ as execute-never */
4382+
4383+#ifdef CONFIG_PAX_KERNEXEC
4384+ mem_types[MT_UNCACHED].prot_sect |= PMD_SECT_XN;
4385+ mem_types[MT_UNCACHED].prot_pte |= L_PTE_XN;
4386+ mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_XN;
4387+ mem_types[MT_CACHECLEAN].prot_pte |= L_PTE_XN;
4388+#ifndef CONFIG_ARM_LPAE
4389+ mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_XN;
4390+ mem_types[MT_MINICLEAN].prot_pte |= L_PTE_XN;
4391+#endif
4392+ mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_XN;
4393+ mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_XN;
4394+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_XN;
4395+ mem_types[MT_MEMORY_NONCACHED_RW].prot_pte |= PMD_SECT_XN;
4396+ mem_types[MT_MEMORY_DTCM].prot_sect |= PMD_SECT_XN;
4397+ mem_types[MT_MEMORY_DTCM].prot_pte |= L_PTE_XN;
4398+#endif
4399+
4400+ mem_types[MT_MEMORY_SO].prot_sect |= PMD_SECT_XN;
4401+ mem_types[MT_MEMORY_SO].prot_pte |= L_PTE_XN;
4402 }
4403 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
4404 /*
4405@@ -432,6 +501,9 @@ static void __init build_mem_type_table(void)
4406 * from SVC mode and no access from userspace.
4407 */
4408 mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4409+#ifdef CONFIG_PAX_KERNEXEC
4410+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4411+#endif
4412 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4413 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4414 #endif
4415@@ -448,11 +520,17 @@ static void __init build_mem_type_table(void)
4416 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
4417 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
4418 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
4419- mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
4420- mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
4421+ mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
4422+ mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
4423+ mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
4424+ mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;
4425+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_S;
4426+ mem_types[MT_MEMORY_RX].prot_pte |= L_PTE_SHARED;
4427 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
4428- mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
4429- mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
4430+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_S;
4431+ mem_types[MT_MEMORY_NONCACHED_RW].prot_pte |= L_PTE_SHARED;
4432+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= PMD_SECT_S;
4433+ mem_types[MT_MEMORY_NONCACHED_RX].prot_pte |= L_PTE_SHARED;
4434 }
4435 }
4436
4437@@ -463,15 +541,20 @@ static void __init build_mem_type_table(void)
4438 if (cpu_arch >= CPU_ARCH_ARMv6) {
4439 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
4440 /* Non-cacheable Normal is XCB = 001 */
4441- mem_types[MT_MEMORY_NONCACHED].prot_sect |=
4442+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |=
4443+ PMD_SECT_BUFFERED;
4444+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |=
4445 PMD_SECT_BUFFERED;
4446 } else {
4447 /* For both ARMv6 and non-TEX-remapping ARMv7 */
4448- mem_types[MT_MEMORY_NONCACHED].prot_sect |=
4449+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |=
4450+ PMD_SECT_TEX(1);
4451+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |=
4452 PMD_SECT_TEX(1);
4453 }
4454 } else {
4455- mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4456+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_BUFFERABLE;
4457+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= PMD_SECT_BUFFERABLE;
4458 }
4459
4460 #ifdef CONFIG_ARM_LPAE
4461@@ -487,6 +570,8 @@ static void __init build_mem_type_table(void)
4462 vecs_pgprot |= PTE_EXT_AF;
4463 #endif
4464
4465+ user_pgprot |= __supported_pte_mask;
4466+
4467 for (i = 0; i < 16; i++) {
4468 pteval_t v = pgprot_val(protection_map[i]);
4469 protection_map[i] = __pgprot(v | user_pgprot);
4470@@ -501,10 +586,15 @@ static void __init build_mem_type_table(void)
4471
4472 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
4473 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
4474- mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
4475- mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
4476+ mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
4477+ mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
4478+ mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
4479+ mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
4480+ mem_types[MT_MEMORY_RX].prot_sect |= ecc_mask | cp->pmd;
4481+ mem_types[MT_MEMORY_RX].prot_pte |= kern_pgprot;
4482 mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
4483- mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
4484+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= ecc_mask;
4485+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= ecc_mask;
4486 mem_types[MT_ROM].prot_sect |= cp->pmd;
4487
4488 switch (cp->pmd) {
4489@@ -1105,18 +1195,15 @@ void __init arm_mm_memblock_reserve(void)
4490 * called function. This means you can't use any function or debugging
4491 * method which may touch any device, otherwise the kernel _will_ crash.
4492 */
4493+
4494+static char vectors[PAGE_SIZE] __read_only __aligned(PAGE_SIZE);
4495+
4496 static void __init devicemaps_init(struct machine_desc *mdesc)
4497 {
4498 struct map_desc map;
4499 unsigned long addr;
4500- void *vectors;
4501
4502- /*
4503- * Allocate the vector page early.
4504- */
4505- vectors = early_alloc(PAGE_SIZE);
4506-
4507- early_trap_init(vectors);
4508+ early_trap_init(&vectors);
4509
4510 for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
4511 pmd_clear(pmd_off_k(addr));
4512@@ -1156,7 +1243,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
4513 * location (0xffff0000). If we aren't using high-vectors, also
4514 * create a mapping at the low-vectors virtual address.
4515 */
4516- map.pfn = __phys_to_pfn(virt_to_phys(vectors));
4517+ map.pfn = __phys_to_pfn(virt_to_phys(&vectors));
4518 map.virtual = 0xffff0000;
4519 map.length = PAGE_SIZE;
4520 map.type = MT_HIGH_VECTORS;
4521@@ -1214,8 +1301,39 @@ static void __init map_lowmem(void)
4522 map.pfn = __phys_to_pfn(start);
4523 map.virtual = __phys_to_virt(start);
4524 map.length = end - start;
4525- map.type = MT_MEMORY;
4526
4527+#ifdef CONFIG_PAX_KERNEXEC
4528+ if (map.virtual <= (unsigned long)_stext && ((unsigned long)_end < (map.virtual + map.length))) {
4529+ struct map_desc kernel;
4530+ struct map_desc initmap;
4531+
4532+ /* when freeing initmem we will make this RW */
4533+ initmap.pfn = __phys_to_pfn(__pa(__init_begin));
4534+ initmap.virtual = (unsigned long)__init_begin;
4535+ initmap.length = _sdata - __init_begin;
4536+ initmap.type = MT_MEMORY_RWX;
4537+ create_mapping(&initmap);
4538+
4539+ /* when freeing initmem we will make this RX */
4540+ kernel.pfn = __phys_to_pfn(__pa(_stext));
4541+ kernel.virtual = (unsigned long)_stext;
4542+ kernel.length = __init_begin - _stext;
4543+ kernel.type = MT_MEMORY_RWX;
4544+ create_mapping(&kernel);
4545+
4546+ if (map.virtual < (unsigned long)_stext) {
4547+ map.length = (unsigned long)_stext - map.virtual;
4548+ map.type = MT_MEMORY_RWX;
4549+ create_mapping(&map);
4550+ }
4551+
4552+ map.pfn = __phys_to_pfn(__pa(_sdata));
4553+ map.virtual = (unsigned long)_sdata;
4554+ map.length = end - __pa(_sdata);
4555+ }
4556+#endif
4557+
4558+ map.type = MT_MEMORY_RW;
4559 create_mapping(&map);
4560 }
4561 }
4562diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S
4563index 6d98c13..3cfb174 100644
4564--- a/arch/arm/mm/proc-v7-2level.S
4565+++ b/arch/arm/mm/proc-v7-2level.S
4566@@ -99,6 +99,9 @@ ENTRY(cpu_v7_set_pte_ext)
4567 tst r1, #L_PTE_XN
4568 orrne r3, r3, #PTE_EXT_XN
4569
4570+ tst r1, #L_PTE_PXN
4571+ orrne r3, r3, #PTE_EXT_PXN
4572+
4573 tst r1, #L_PTE_YOUNG
4574 tstne r1, #L_PTE_VALID
4575 #ifndef CONFIG_CPU_USE_DOMAINS
4576diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
4577index a5bc92d..0bb4730 100644
4578--- a/arch/arm/plat-omap/sram.c
4579+++ b/arch/arm/plat-omap/sram.c
4580@@ -93,6 +93,8 @@ void __init omap_map_sram(unsigned long start, unsigned long size,
4581 * Looks like we need to preserve some bootloader code at the
4582 * beginning of SRAM for jumping to flash for reboot to work...
4583 */
4584+ pax_open_kernel();
4585 memset_io(omap_sram_base + omap_sram_skip, 0,
4586 omap_sram_size - omap_sram_skip);
4587+ pax_close_kernel();
4588 }
4589diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
4590index f5144cd..71f6d1f 100644
4591--- a/arch/arm/plat-samsung/include/plat/dma-ops.h
4592+++ b/arch/arm/plat-samsung/include/plat/dma-ops.h
4593@@ -47,7 +47,7 @@ struct samsung_dma_ops {
4594 int (*started)(unsigned ch);
4595 int (*flush)(unsigned ch);
4596 int (*stop)(unsigned ch);
4597-};
4598+} __no_const;
4599
4600 extern void *samsung_dmadev_get_ops(void);
4601 extern void *s3c_dma_get_ops(void);
4602diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
4603index 0c3ba9f..95722b3 100644
4604--- a/arch/arm64/kernel/debug-monitors.c
4605+++ b/arch/arm64/kernel/debug-monitors.c
4606@@ -151,7 +151,7 @@ static int __cpuinit os_lock_notify(struct notifier_block *self,
4607 return NOTIFY_OK;
4608 }
4609
4610-static struct notifier_block __cpuinitdata os_lock_nb = {
4611+static struct notifier_block os_lock_nb = {
4612 .notifier_call = os_lock_notify,
4613 };
4614
4615diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
4616index 5ab825c..96aaec8 100644
4617--- a/arch/arm64/kernel/hw_breakpoint.c
4618+++ b/arch/arm64/kernel/hw_breakpoint.c
4619@@ -831,7 +831,7 @@ static int __cpuinit hw_breakpoint_reset_notify(struct notifier_block *self,
4620 return NOTIFY_OK;
4621 }
4622
4623-static struct notifier_block __cpuinitdata hw_breakpoint_reset_nb = {
4624+static struct notifier_block hw_breakpoint_reset_nb = {
4625 .notifier_call = hw_breakpoint_reset_notify,
4626 };
4627
4628diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
4629index c3a58a1..78fbf54 100644
4630--- a/arch/avr32/include/asm/cache.h
4631+++ b/arch/avr32/include/asm/cache.h
4632@@ -1,8 +1,10 @@
4633 #ifndef __ASM_AVR32_CACHE_H
4634 #define __ASM_AVR32_CACHE_H
4635
4636+#include <linux/const.h>
4637+
4638 #define L1_CACHE_SHIFT 5
4639-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4640+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4641
4642 /*
4643 * Memory returned by kmalloc() may be used for DMA, so we must make
4644diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
4645index e2c3287..6c4f98c 100644
4646--- a/arch/avr32/include/asm/elf.h
4647+++ b/arch/avr32/include/asm/elf.h
4648@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
4649 the loader. We need to make sure that it is out of the way of the program
4650 that it will "exec", and that there is sufficient room for the brk. */
4651
4652-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
4653+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
4654
4655+#ifdef CONFIG_PAX_ASLR
4656+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
4657+
4658+#define PAX_DELTA_MMAP_LEN 15
4659+#define PAX_DELTA_STACK_LEN 15
4660+#endif
4661
4662 /* This yields a mask that user programs can use to figure out what
4663 instruction set this CPU supports. This could be done in user space,
4664diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
4665index 479330b..53717a8 100644
4666--- a/arch/avr32/include/asm/kmap_types.h
4667+++ b/arch/avr32/include/asm/kmap_types.h
4668@@ -2,9 +2,9 @@
4669 #define __ASM_AVR32_KMAP_TYPES_H
4670
4671 #ifdef CONFIG_DEBUG_HIGHMEM
4672-# define KM_TYPE_NR 29
4673+# define KM_TYPE_NR 30
4674 #else
4675-# define KM_TYPE_NR 14
4676+# define KM_TYPE_NR 15
4677 #endif
4678
4679 #endif /* __ASM_AVR32_KMAP_TYPES_H */
4680diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
4681index b2f2d2d..d1c85cb 100644
4682--- a/arch/avr32/mm/fault.c
4683+++ b/arch/avr32/mm/fault.c
4684@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
4685
4686 int exception_trace = 1;
4687
4688+#ifdef CONFIG_PAX_PAGEEXEC
4689+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4690+{
4691+ unsigned long i;
4692+
4693+ printk(KERN_ERR "PAX: bytes at PC: ");
4694+ for (i = 0; i < 20; i++) {
4695+ unsigned char c;
4696+ if (get_user(c, (unsigned char *)pc+i))
4697+ printk(KERN_CONT "???????? ");
4698+ else
4699+ printk(KERN_CONT "%02x ", c);
4700+ }
4701+ printk("\n");
4702+}
4703+#endif
4704+
4705 /*
4706 * This routine handles page faults. It determines the address and the
4707 * problem, and then passes it off to one of the appropriate routines.
4708@@ -174,6 +191,16 @@ bad_area:
4709 up_read(&mm->mmap_sem);
4710
4711 if (user_mode(regs)) {
4712+
4713+#ifdef CONFIG_PAX_PAGEEXEC
4714+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
4715+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
4716+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
4717+ do_group_exit(SIGKILL);
4718+ }
4719+ }
4720+#endif
4721+
4722 if (exception_trace && printk_ratelimit())
4723 printk("%s%s[%d]: segfault at %08lx pc %08lx "
4724 "sp %08lx ecr %lu\n",
4725diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
4726index 568885a..f8008df 100644
4727--- a/arch/blackfin/include/asm/cache.h
4728+++ b/arch/blackfin/include/asm/cache.h
4729@@ -7,6 +7,7 @@
4730 #ifndef __ARCH_BLACKFIN_CACHE_H
4731 #define __ARCH_BLACKFIN_CACHE_H
4732
4733+#include <linux/const.h>
4734 #include <linux/linkage.h> /* for asmlinkage */
4735
4736 /*
4737@@ -14,7 +15,7 @@
4738 * Blackfin loads 32 bytes for cache
4739 */
4740 #define L1_CACHE_SHIFT 5
4741-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4742+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4743 #define SMP_CACHE_BYTES L1_CACHE_BYTES
4744
4745 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
4746diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
4747index aea2718..3639a60 100644
4748--- a/arch/cris/include/arch-v10/arch/cache.h
4749+++ b/arch/cris/include/arch-v10/arch/cache.h
4750@@ -1,8 +1,9 @@
4751 #ifndef _ASM_ARCH_CACHE_H
4752 #define _ASM_ARCH_CACHE_H
4753
4754+#include <linux/const.h>
4755 /* Etrax 100LX have 32-byte cache-lines. */
4756-#define L1_CACHE_BYTES 32
4757 #define L1_CACHE_SHIFT 5
4758+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4759
4760 #endif /* _ASM_ARCH_CACHE_H */
4761diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
4762index 7caf25d..ee65ac5 100644
4763--- a/arch/cris/include/arch-v32/arch/cache.h
4764+++ b/arch/cris/include/arch-v32/arch/cache.h
4765@@ -1,11 +1,12 @@
4766 #ifndef _ASM_CRIS_ARCH_CACHE_H
4767 #define _ASM_CRIS_ARCH_CACHE_H
4768
4769+#include <linux/const.h>
4770 #include <arch/hwregs/dma.h>
4771
4772 /* A cache-line is 32 bytes. */
4773-#define L1_CACHE_BYTES 32
4774 #define L1_CACHE_SHIFT 5
4775+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4776
4777 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
4778
4779diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
4780index b86329d..6709906 100644
4781--- a/arch/frv/include/asm/atomic.h
4782+++ b/arch/frv/include/asm/atomic.h
4783@@ -186,6 +186,16 @@ static inline void atomic64_dec(atomic64_t *v)
4784 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
4785 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
4786
4787+#define atomic64_read_unchecked(v) atomic64_read(v)
4788+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4789+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4790+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4791+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4792+#define atomic64_inc_unchecked(v) atomic64_inc(v)
4793+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4794+#define atomic64_dec_unchecked(v) atomic64_dec(v)
4795+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4796+
4797 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
4798 {
4799 int c, old;
4800diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
4801index 2797163..c2a401d 100644
4802--- a/arch/frv/include/asm/cache.h
4803+++ b/arch/frv/include/asm/cache.h
4804@@ -12,10 +12,11 @@
4805 #ifndef __ASM_CACHE_H
4806 #define __ASM_CACHE_H
4807
4808+#include <linux/const.h>
4809
4810 /* bytes per L1 cache line */
4811 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
4812-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4813+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4814
4815 #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
4816 #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
4817diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
4818index 43901f2..0d8b865 100644
4819--- a/arch/frv/include/asm/kmap_types.h
4820+++ b/arch/frv/include/asm/kmap_types.h
4821@@ -2,6 +2,6 @@
4822 #ifndef _ASM_KMAP_TYPES_H
4823 #define _ASM_KMAP_TYPES_H
4824
4825-#define KM_TYPE_NR 17
4826+#define KM_TYPE_NR 18
4827
4828 #endif
4829diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
4830index 385fd30..3aaf4fe 100644
4831--- a/arch/frv/mm/elf-fdpic.c
4832+++ b/arch/frv/mm/elf-fdpic.c
4833@@ -61,6 +61,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4834 {
4835 struct vm_area_struct *vma;
4836 unsigned long limit;
4837+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
4838
4839 if (len > TASK_SIZE)
4840 return -ENOMEM;
4841@@ -73,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4842 if (addr) {
4843 addr = PAGE_ALIGN(addr);
4844 vma = find_vma(current->mm, addr);
4845- if (TASK_SIZE - len >= addr &&
4846- (!vma || addr + len <= vma->vm_start))
4847+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4848 goto success;
4849 }
4850
4851@@ -89,7 +89,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4852 for (; vma; vma = vma->vm_next) {
4853 if (addr > limit)
4854 break;
4855- if (addr + len <= vma->vm_start)
4856+ if (check_heap_stack_gap(vma, addr, len, offset))
4857 goto success;
4858 addr = vma->vm_end;
4859 }
4860@@ -104,7 +104,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4861 for (; vma; vma = vma->vm_next) {
4862 if (addr > limit)
4863 break;
4864- if (addr + len <= vma->vm_start)
4865+ if (check_heap_stack_gap(vma, addr, len, offset))
4866 goto success;
4867 addr = vma->vm_end;
4868 }
4869diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
4870index f4ca594..adc72fd6 100644
4871--- a/arch/hexagon/include/asm/cache.h
4872+++ b/arch/hexagon/include/asm/cache.h
4873@@ -21,9 +21,11 @@
4874 #ifndef __ASM_CACHE_H
4875 #define __ASM_CACHE_H
4876
4877+#include <linux/const.h>
4878+
4879 /* Bytes per L1 cache line */
4880-#define L1_CACHE_SHIFT (5)
4881-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4882+#define L1_CACHE_SHIFT 5
4883+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4884
4885 #define __cacheline_aligned __aligned(L1_CACHE_BYTES)
4886 #define ____cacheline_aligned __aligned(L1_CACHE_BYTES)
4887diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
4888index 6e6fe18..a6ae668 100644
4889--- a/arch/ia64/include/asm/atomic.h
4890+++ b/arch/ia64/include/asm/atomic.h
4891@@ -208,6 +208,16 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
4892 #define atomic64_inc(v) atomic64_add(1, (v))
4893 #define atomic64_dec(v) atomic64_sub(1, (v))
4894
4895+#define atomic64_read_unchecked(v) atomic64_read(v)
4896+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4897+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4898+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4899+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4900+#define atomic64_inc_unchecked(v) atomic64_inc(v)
4901+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4902+#define atomic64_dec_unchecked(v) atomic64_dec(v)
4903+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4904+
4905 /* Atomic operations are already serializing */
4906 #define smp_mb__before_atomic_dec() barrier()
4907 #define smp_mb__after_atomic_dec() barrier()
4908diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
4909index 988254a..e1ee885 100644
4910--- a/arch/ia64/include/asm/cache.h
4911+++ b/arch/ia64/include/asm/cache.h
4912@@ -1,6 +1,7 @@
4913 #ifndef _ASM_IA64_CACHE_H
4914 #define _ASM_IA64_CACHE_H
4915
4916+#include <linux/const.h>
4917
4918 /*
4919 * Copyright (C) 1998-2000 Hewlett-Packard Co
4920@@ -9,7 +10,7 @@
4921
4922 /* Bytes per L1 (data) cache line. */
4923 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
4924-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4925+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4926
4927 #ifdef CONFIG_SMP
4928 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
4929diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
4930index b5298eb..67c6e62 100644
4931--- a/arch/ia64/include/asm/elf.h
4932+++ b/arch/ia64/include/asm/elf.h
4933@@ -42,6 +42,13 @@
4934 */
4935 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
4936
4937+#ifdef CONFIG_PAX_ASLR
4938+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
4939+
4940+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
4941+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
4942+#endif
4943+
4944 #define PT_IA_64_UNWIND 0x70000001
4945
4946 /* IA-64 relocations: */
4947diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
4948index 96a8d92..617a1cf 100644
4949--- a/arch/ia64/include/asm/pgalloc.h
4950+++ b/arch/ia64/include/asm/pgalloc.h
4951@@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
4952 pgd_val(*pgd_entry) = __pa(pud);
4953 }
4954
4955+static inline void
4956+pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
4957+{
4958+ pgd_populate(mm, pgd_entry, pud);
4959+}
4960+
4961 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
4962 {
4963 return quicklist_alloc(0, GFP_KERNEL, NULL);
4964@@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
4965 pud_val(*pud_entry) = __pa(pmd);
4966 }
4967
4968+static inline void
4969+pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
4970+{
4971+ pud_populate(mm, pud_entry, pmd);
4972+}
4973+
4974 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
4975 {
4976 return quicklist_alloc(0, GFP_KERNEL, NULL);
4977diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
4978index 815810c..d60bd4c 100644
4979--- a/arch/ia64/include/asm/pgtable.h
4980+++ b/arch/ia64/include/asm/pgtable.h
4981@@ -12,7 +12,7 @@
4982 * David Mosberger-Tang <davidm@hpl.hp.com>
4983 */
4984
4985-
4986+#include <linux/const.h>
4987 #include <asm/mman.h>
4988 #include <asm/page.h>
4989 #include <asm/processor.h>
4990@@ -142,6 +142,17 @@
4991 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
4992 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
4993 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
4994+
4995+#ifdef CONFIG_PAX_PAGEEXEC
4996+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
4997+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
4998+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
4999+#else
5000+# define PAGE_SHARED_NOEXEC PAGE_SHARED
5001+# define PAGE_READONLY_NOEXEC PAGE_READONLY
5002+# define PAGE_COPY_NOEXEC PAGE_COPY
5003+#endif
5004+
5005 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
5006 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
5007 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
5008diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
5009index 54ff557..70c88b7 100644
5010--- a/arch/ia64/include/asm/spinlock.h
5011+++ b/arch/ia64/include/asm/spinlock.h
5012@@ -71,7 +71,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
5013 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
5014
5015 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
5016- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
5017+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
5018 }
5019
5020 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
5021diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
5022index 449c8c0..18965fb 100644
5023--- a/arch/ia64/include/asm/uaccess.h
5024+++ b/arch/ia64/include/asm/uaccess.h
5025@@ -240,12 +240,24 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
5026 static inline unsigned long
5027 __copy_to_user (void __user *to, const void *from, unsigned long count)
5028 {
5029+ if (count > INT_MAX)
5030+ return count;
5031+
5032+ if (!__builtin_constant_p(count))
5033+ check_object_size(from, count, true);
5034+
5035 return __copy_user(to, (__force void __user *) from, count);
5036 }
5037
5038 static inline unsigned long
5039 __copy_from_user (void *to, const void __user *from, unsigned long count)
5040 {
5041+ if (count > INT_MAX)
5042+ return count;
5043+
5044+ if (!__builtin_constant_p(count))
5045+ check_object_size(to, count, false);
5046+
5047 return __copy_user((__force void __user *) to, from, count);
5048 }
5049
5050@@ -255,10 +267,13 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
5051 ({ \
5052 void __user *__cu_to = (to); \
5053 const void *__cu_from = (from); \
5054- long __cu_len = (n); \
5055+ unsigned long __cu_len = (n); \
5056 \
5057- if (__access_ok(__cu_to, __cu_len, get_fs())) \
5058+ if (__cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) { \
5059+ if (!__builtin_constant_p(n)) \
5060+ check_object_size(__cu_from, __cu_len, true); \
5061 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
5062+ } \
5063 __cu_len; \
5064 })
5065
5066@@ -266,11 +281,14 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
5067 ({ \
5068 void *__cu_to = (to); \
5069 const void __user *__cu_from = (from); \
5070- long __cu_len = (n); \
5071+ unsigned long __cu_len = (n); \
5072 \
5073 __chk_user_ptr(__cu_from); \
5074- if (__access_ok(__cu_from, __cu_len, get_fs())) \
5075+ if (__cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) { \
5076+ if (!__builtin_constant_p(n)) \
5077+ check_object_size(__cu_to, __cu_len, false); \
5078 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
5079+ } \
5080 __cu_len; \
5081 })
5082
5083diff --git a/arch/ia64/kernel/err_inject.c b/arch/ia64/kernel/err_inject.c
5084index 2d67317..07d8bfa 100644
5085--- a/arch/ia64/kernel/err_inject.c
5086+++ b/arch/ia64/kernel/err_inject.c
5087@@ -256,7 +256,7 @@ static int __cpuinit err_inject_cpu_callback(struct notifier_block *nfb,
5088 return NOTIFY_OK;
5089 }
5090
5091-static struct notifier_block __cpuinitdata err_inject_cpu_notifier =
5092+static struct notifier_block err_inject_cpu_notifier =
5093 {
5094 .notifier_call = err_inject_cpu_callback,
5095 };
5096diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
5097index 65bf9cd..794f06b 100644
5098--- a/arch/ia64/kernel/mca.c
5099+++ b/arch/ia64/kernel/mca.c
5100@@ -1922,7 +1922,7 @@ static int __cpuinit mca_cpu_callback(struct notifier_block *nfb,
5101 return NOTIFY_OK;
5102 }
5103
5104-static struct notifier_block mca_cpu_notifier __cpuinitdata = {
5105+static struct notifier_block mca_cpu_notifier = {
5106 .notifier_call = mca_cpu_callback
5107 };
5108
5109diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
5110index 24603be..948052d 100644
5111--- a/arch/ia64/kernel/module.c
5112+++ b/arch/ia64/kernel/module.c
5113@@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
5114 void
5115 module_free (struct module *mod, void *module_region)
5116 {
5117- if (mod && mod->arch.init_unw_table &&
5118- module_region == mod->module_init) {
5119+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
5120 unw_remove_unwind_table(mod->arch.init_unw_table);
5121 mod->arch.init_unw_table = NULL;
5122 }
5123@@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
5124 }
5125
5126 static inline int
5127+in_init_rx (const struct module *mod, uint64_t addr)
5128+{
5129+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
5130+}
5131+
5132+static inline int
5133+in_init_rw (const struct module *mod, uint64_t addr)
5134+{
5135+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
5136+}
5137+
5138+static inline int
5139 in_init (const struct module *mod, uint64_t addr)
5140 {
5141- return addr - (uint64_t) mod->module_init < mod->init_size;
5142+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
5143+}
5144+
5145+static inline int
5146+in_core_rx (const struct module *mod, uint64_t addr)
5147+{
5148+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
5149+}
5150+
5151+static inline int
5152+in_core_rw (const struct module *mod, uint64_t addr)
5153+{
5154+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
5155 }
5156
5157 static inline int
5158 in_core (const struct module *mod, uint64_t addr)
5159 {
5160- return addr - (uint64_t) mod->module_core < mod->core_size;
5161+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
5162 }
5163
5164 static inline int
5165@@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
5166 break;
5167
5168 case RV_BDREL:
5169- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
5170+ if (in_init_rx(mod, val))
5171+ val -= (uint64_t) mod->module_init_rx;
5172+ else if (in_init_rw(mod, val))
5173+ val -= (uint64_t) mod->module_init_rw;
5174+ else if (in_core_rx(mod, val))
5175+ val -= (uint64_t) mod->module_core_rx;
5176+ else if (in_core_rw(mod, val))
5177+ val -= (uint64_t) mod->module_core_rw;
5178 break;
5179
5180 case RV_LTV:
5181@@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
5182 * addresses have been selected...
5183 */
5184 uint64_t gp;
5185- if (mod->core_size > MAX_LTOFF)
5186+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
5187 /*
5188 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
5189 * at the end of the module.
5190 */
5191- gp = mod->core_size - MAX_LTOFF / 2;
5192+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
5193 else
5194- gp = mod->core_size / 2;
5195- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
5196+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
5197+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
5198 mod->arch.gp = gp;
5199 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
5200 }
5201diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
5202index 77597e5..6f28f3f 100644
5203--- a/arch/ia64/kernel/palinfo.c
5204+++ b/arch/ia64/kernel/palinfo.c
5205@@ -1045,7 +1045,7 @@ static int __cpuinit palinfo_cpu_callback(struct notifier_block *nfb,
5206 return NOTIFY_OK;
5207 }
5208
5209-static struct notifier_block __refdata palinfo_cpu_notifier =
5210+static struct notifier_block palinfo_cpu_notifier =
5211 {
5212 .notifier_call = palinfo_cpu_callback,
5213 .priority = 0,
5214diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
5215index 79802e5..1a89ec5 100644
5216--- a/arch/ia64/kernel/salinfo.c
5217+++ b/arch/ia64/kernel/salinfo.c
5218@@ -616,7 +616,7 @@ salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu
5219 return NOTIFY_OK;
5220 }
5221
5222-static struct notifier_block salinfo_cpu_notifier __cpuinitdata =
5223+static struct notifier_block salinfo_cpu_notifier =
5224 {
5225 .notifier_call = salinfo_cpu_callback,
5226 .priority = 0,
5227diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
5228index d9439ef..d0cac6b 100644
5229--- a/arch/ia64/kernel/sys_ia64.c
5230+++ b/arch/ia64/kernel/sys_ia64.c
5231@@ -28,6 +28,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5232 unsigned long start_addr, align_mask = PAGE_SIZE - 1;
5233 struct mm_struct *mm = current->mm;
5234 struct vm_area_struct *vma;
5235+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
5236
5237 if (len > RGN_MAP_LIMIT)
5238 return -ENOMEM;
5239@@ -43,6 +44,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5240 if (REGION_NUMBER(addr) == RGN_HPAGE)
5241 addr = 0;
5242 #endif
5243+
5244+#ifdef CONFIG_PAX_RANDMMAP
5245+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5246+ addr = mm->free_area_cache;
5247+ else
5248+#endif
5249+
5250 if (!addr)
5251 addr = mm->free_area_cache;
5252
5253@@ -61,14 +69,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5254 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
5255 /* At this point: (!vma || addr < vma->vm_end). */
5256 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
5257- if (start_addr != TASK_UNMAPPED_BASE) {
5258+ if (start_addr != mm->mmap_base) {
5259 /* Start a new search --- just in case we missed some holes. */
5260- addr = TASK_UNMAPPED_BASE;
5261+ addr = mm->mmap_base;
5262 goto full_search;
5263 }
5264 return -ENOMEM;
5265 }
5266- if (!vma || addr + len <= vma->vm_start) {
5267+ if (check_heap_stack_gap(vma, addr, len, offset)) {
5268 /* Remember the address where we stopped this search: */
5269 mm->free_area_cache = addr + len;
5270 return addr;
5271diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
5272index dc00b2c..cce53c2 100644
5273--- a/arch/ia64/kernel/topology.c
5274+++ b/arch/ia64/kernel/topology.c
5275@@ -445,7 +445,7 @@ static int __cpuinit cache_cpu_callback(struct notifier_block *nfb,
5276 return NOTIFY_OK;
5277 }
5278
5279-static struct notifier_block __cpuinitdata cache_cpu_notifier =
5280+static struct notifier_block cache_cpu_notifier =
5281 {
5282 .notifier_call = cache_cpu_callback
5283 };
5284diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
5285index 0ccb28f..8992469 100644
5286--- a/arch/ia64/kernel/vmlinux.lds.S
5287+++ b/arch/ia64/kernel/vmlinux.lds.S
5288@@ -198,7 +198,7 @@ SECTIONS {
5289 /* Per-cpu data: */
5290 . = ALIGN(PERCPU_PAGE_SIZE);
5291 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
5292- __phys_per_cpu_start = __per_cpu_load;
5293+ __phys_per_cpu_start = per_cpu_load;
5294 /*
5295 * ensure percpu data fits
5296 * into percpu page size
5297diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
5298index 6cf0341..d352594 100644
5299--- a/arch/ia64/mm/fault.c
5300+++ b/arch/ia64/mm/fault.c
5301@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
5302 return pte_present(pte);
5303 }
5304
5305+#ifdef CONFIG_PAX_PAGEEXEC
5306+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5307+{
5308+ unsigned long i;
5309+
5310+ printk(KERN_ERR "PAX: bytes at PC: ");
5311+ for (i = 0; i < 8; i++) {
5312+ unsigned int c;
5313+ if (get_user(c, (unsigned int *)pc+i))
5314+ printk(KERN_CONT "???????? ");
5315+ else
5316+ printk(KERN_CONT "%08x ", c);
5317+ }
5318+ printk("\n");
5319+}
5320+#endif
5321+
5322 # define VM_READ_BIT 0
5323 # define VM_WRITE_BIT 1
5324 # define VM_EXEC_BIT 2
5325@@ -149,8 +166,21 @@ retry:
5326 if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
5327 goto bad_area;
5328
5329- if ((vma->vm_flags & mask) != mask)
5330+ if ((vma->vm_flags & mask) != mask) {
5331+
5332+#ifdef CONFIG_PAX_PAGEEXEC
5333+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
5334+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
5335+ goto bad_area;
5336+
5337+ up_read(&mm->mmap_sem);
5338+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
5339+ do_group_exit(SIGKILL);
5340+ }
5341+#endif
5342+
5343 goto bad_area;
5344+ }
5345
5346 /*
5347 * If for any reason at all we couldn't handle the fault, make
5348diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
5349index 5ca674b..127c3cb 100644
5350--- a/arch/ia64/mm/hugetlbpage.c
5351+++ b/arch/ia64/mm/hugetlbpage.c
5352@@ -149,6 +149,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5353 unsigned long pgoff, unsigned long flags)
5354 {
5355 struct vm_area_struct *vmm;
5356+ unsigned long offset = gr_rand_threadstack_offset(current->mm, file, flags);
5357
5358 if (len > RGN_MAP_LIMIT)
5359 return -ENOMEM;
5360@@ -171,7 +172,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5361 /* At this point: (!vmm || addr < vmm->vm_end). */
5362 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
5363 return -ENOMEM;
5364- if (!vmm || (addr + len) <= vmm->vm_start)
5365+ if (check_heap_stack_gap(vmm, addr, len, offset))
5366 return addr;
5367 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
5368 }
5369diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
5370index b755ea9..b9a969e 100644
5371--- a/arch/ia64/mm/init.c
5372+++ b/arch/ia64/mm/init.c
5373@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
5374 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
5375 vma->vm_end = vma->vm_start + PAGE_SIZE;
5376 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
5377+
5378+#ifdef CONFIG_PAX_PAGEEXEC
5379+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
5380+ vma->vm_flags &= ~VM_EXEC;
5381+
5382+#ifdef CONFIG_PAX_MPROTECT
5383+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
5384+ vma->vm_flags &= ~VM_MAYEXEC;
5385+#endif
5386+
5387+ }
5388+#endif
5389+
5390 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5391 down_write(&current->mm->mmap_sem);
5392 if (insert_vm_struct(current->mm, vma)) {
5393diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
5394index 40b3ee9..8c2c112 100644
5395--- a/arch/m32r/include/asm/cache.h
5396+++ b/arch/m32r/include/asm/cache.h
5397@@ -1,8 +1,10 @@
5398 #ifndef _ASM_M32R_CACHE_H
5399 #define _ASM_M32R_CACHE_H
5400
5401+#include <linux/const.h>
5402+
5403 /* L1 cache line size */
5404 #define L1_CACHE_SHIFT 4
5405-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5406+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5407
5408 #endif /* _ASM_M32R_CACHE_H */
5409diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
5410index 82abd15..d95ae5d 100644
5411--- a/arch/m32r/lib/usercopy.c
5412+++ b/arch/m32r/lib/usercopy.c
5413@@ -14,6 +14,9 @@
5414 unsigned long
5415 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5416 {
5417+ if ((long)n < 0)
5418+ return n;
5419+
5420 prefetch(from);
5421 if (access_ok(VERIFY_WRITE, to, n))
5422 __copy_user(to,from,n);
5423@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5424 unsigned long
5425 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
5426 {
5427+ if ((long)n < 0)
5428+ return n;
5429+
5430 prefetchw(to);
5431 if (access_ok(VERIFY_READ, from, n))
5432 __copy_user_zeroing(to,from,n);
5433diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
5434index 0395c51..5f26031 100644
5435--- a/arch/m68k/include/asm/cache.h
5436+++ b/arch/m68k/include/asm/cache.h
5437@@ -4,9 +4,11 @@
5438 #ifndef __ARCH_M68K_CACHE_H
5439 #define __ARCH_M68K_CACHE_H
5440
5441+#include <linux/const.h>
5442+
5443 /* bytes per L1 cache line */
5444 #define L1_CACHE_SHIFT 4
5445-#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
5446+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5447
5448 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
5449
5450diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
5451index 4efe96a..60e8699 100644
5452--- a/arch/microblaze/include/asm/cache.h
5453+++ b/arch/microblaze/include/asm/cache.h
5454@@ -13,11 +13,12 @@
5455 #ifndef _ASM_MICROBLAZE_CACHE_H
5456 #define _ASM_MICROBLAZE_CACHE_H
5457
5458+#include <linux/const.h>
5459 #include <asm/registers.h>
5460
5461 #define L1_CACHE_SHIFT 5
5462 /* word-granular cache in microblaze */
5463-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5464+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5465
5466 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5467
5468diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
5469index 01cc6ba..bcb7a5d 100644
5470--- a/arch/mips/include/asm/atomic.h
5471+++ b/arch/mips/include/asm/atomic.h
5472@@ -21,6 +21,10 @@
5473 #include <asm/cmpxchg.h>
5474 #include <asm/war.h>
5475
5476+#ifdef CONFIG_GENERIC_ATOMIC64
5477+#include <asm-generic/atomic64.h>
5478+#endif
5479+
5480 #define ATOMIC_INIT(i) { (i) }
5481
5482 /*
5483@@ -759,6 +763,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
5484 */
5485 #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
5486
5487+#define atomic64_read_unchecked(v) atomic64_read(v)
5488+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5489+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5490+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5491+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5492+#define atomic64_inc_unchecked(v) atomic64_inc(v)
5493+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5494+#define atomic64_dec_unchecked(v) atomic64_dec(v)
5495+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5496+
5497 #endif /* CONFIG_64BIT */
5498
5499 /*
5500diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
5501index b4db69f..8f3b093 100644
5502--- a/arch/mips/include/asm/cache.h
5503+++ b/arch/mips/include/asm/cache.h
5504@@ -9,10 +9,11 @@
5505 #ifndef _ASM_CACHE_H
5506 #define _ASM_CACHE_H
5507
5508+#include <linux/const.h>
5509 #include <kmalloc.h>
5510
5511 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
5512-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5513+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5514
5515 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
5516 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5517diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
5518index 455c0ac..ad65fbe 100644
5519--- a/arch/mips/include/asm/elf.h
5520+++ b/arch/mips/include/asm/elf.h
5521@@ -372,13 +372,16 @@ extern const char *__elf_platform;
5522 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
5523 #endif
5524
5525+#ifdef CONFIG_PAX_ASLR
5526+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
5527+
5528+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5529+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5530+#endif
5531+
5532 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
5533 struct linux_binprm;
5534 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
5535 int uses_interp);
5536
5537-struct mm_struct;
5538-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
5539-#define arch_randomize_brk arch_randomize_brk
5540-
5541 #endif /* _ASM_ELF_H */
5542diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h
5543index c1f6afa..38cc6e9 100644
5544--- a/arch/mips/include/asm/exec.h
5545+++ b/arch/mips/include/asm/exec.h
5546@@ -12,6 +12,6 @@
5547 #ifndef _ASM_EXEC_H
5548 #define _ASM_EXEC_H
5549
5550-extern unsigned long arch_align_stack(unsigned long sp);
5551+#define arch_align_stack(x) ((x) & ~0xfUL)
5552
5553 #endif /* _ASM_EXEC_H */
5554diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
5555index dbaec94..6a14935 100644
5556--- a/arch/mips/include/asm/page.h
5557+++ b/arch/mips/include/asm/page.h
5558@@ -96,7 +96,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
5559 #ifdef CONFIG_CPU_MIPS32
5560 typedef struct { unsigned long pte_low, pte_high; } pte_t;
5561 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
5562- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
5563+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
5564 #else
5565 typedef struct { unsigned long long pte; } pte_t;
5566 #define pte_val(x) ((x).pte)
5567diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
5568index 881d18b..cea38bc 100644
5569--- a/arch/mips/include/asm/pgalloc.h
5570+++ b/arch/mips/include/asm/pgalloc.h
5571@@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
5572 {
5573 set_pud(pud, __pud((unsigned long)pmd));
5574 }
5575+
5576+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
5577+{
5578+ pud_populate(mm, pud, pmd);
5579+}
5580 #endif
5581
5582 /*
5583diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
5584index b2050b9..d71bb1b 100644
5585--- a/arch/mips/include/asm/thread_info.h
5586+++ b/arch/mips/include/asm/thread_info.h
5587@@ -111,6 +111,8 @@ register struct thread_info *__current_thread_info __asm__("$28");
5588 #define TIF_32BIT_ADDR 23 /* 32-bit address space (o32/n32) */
5589 #define TIF_FPUBOUND 24 /* thread bound to FPU-full CPU set */
5590 #define TIF_LOAD_WATCH 25 /* If set, load watch registers */
5591+/* li takes a 32bit immediate */
5592+#define TIF_GRSEC_SETXID 29 /* update credentials on syscall entry/exit */
5593 #define TIF_SYSCALL_TRACE 31 /* syscall trace active */
5594
5595 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
5596@@ -126,15 +128,18 @@ register struct thread_info *__current_thread_info __asm__("$28");
5597 #define _TIF_32BIT_ADDR (1<<TIF_32BIT_ADDR)
5598 #define _TIF_FPUBOUND (1<<TIF_FPUBOUND)
5599 #define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH)
5600+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
5601+
5602+#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
5603
5604 /* work to do in syscall_trace_leave() */
5605-#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)
5606+#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
5607
5608 /* work to do on interrupt/exception return */
5609 #define _TIF_WORK_MASK \
5610 (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_NOTIFY_RESUME)
5611 /* work to do on any return to u-space */
5612-#define _TIF_ALLWORK_MASK (_TIF_WORK_MASK | _TIF_WORK_SYSCALL_EXIT)
5613+#define _TIF_ALLWORK_MASK (_TIF_WORK_MASK | _TIF_WORK_SYSCALL_EXIT | _TIF_GRSEC_SETXID)
5614
5615 #endif /* __KERNEL__ */
5616
5617diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
5618index 9fdd8bc..4bd7f1a 100644
5619--- a/arch/mips/kernel/binfmt_elfn32.c
5620+++ b/arch/mips/kernel/binfmt_elfn32.c
5621@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
5622 #undef ELF_ET_DYN_BASE
5623 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
5624
5625+#ifdef CONFIG_PAX_ASLR
5626+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
5627+
5628+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5629+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5630+#endif
5631+
5632 #include <asm/processor.h>
5633 #include <linux/module.h>
5634 #include <linux/elfcore.h>
5635diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
5636index ff44823..97f8906 100644
5637--- a/arch/mips/kernel/binfmt_elfo32.c
5638+++ b/arch/mips/kernel/binfmt_elfo32.c
5639@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
5640 #undef ELF_ET_DYN_BASE
5641 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
5642
5643+#ifdef CONFIG_PAX_ASLR
5644+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
5645+
5646+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5647+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5648+#endif
5649+
5650 #include <asm/processor.h>
5651
5652 /*
5653diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
5654index a11c6f9..be5e164 100644
5655--- a/arch/mips/kernel/process.c
5656+++ b/arch/mips/kernel/process.c
5657@@ -460,15 +460,3 @@ unsigned long get_wchan(struct task_struct *task)
5658 out:
5659 return pc;
5660 }
5661-
5662-/*
5663- * Don't forget that the stack pointer must be aligned on a 8 bytes
5664- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
5665- */
5666-unsigned long arch_align_stack(unsigned long sp)
5667-{
5668- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
5669- sp -= get_random_int() & ~PAGE_MASK;
5670-
5671- return sp & ALMASK;
5672-}
5673diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
5674index 4812c6d..2069554 100644
5675--- a/arch/mips/kernel/ptrace.c
5676+++ b/arch/mips/kernel/ptrace.c
5677@@ -528,6 +528,10 @@ static inline int audit_arch(void)
5678 return arch;
5679 }
5680
5681+#ifdef CONFIG_GRKERNSEC_SETXID
5682+extern void gr_delayed_cred_worker(void);
5683+#endif
5684+
5685 /*
5686 * Notification of system call entry/exit
5687 * - triggered by current->work.syscall_trace
5688@@ -537,6 +541,11 @@ asmlinkage void syscall_trace_enter(struct pt_regs *regs)
5689 /* do the secure computing check first */
5690 secure_computing_strict(regs->regs[2]);
5691
5692+#ifdef CONFIG_GRKERNSEC_SETXID
5693+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
5694+ gr_delayed_cred_worker();
5695+#endif
5696+
5697 if (!(current->ptrace & PT_PTRACED))
5698 goto out;
5699
5700diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
5701index d20a4bc..7096ae5 100644
5702--- a/arch/mips/kernel/scall32-o32.S
5703+++ b/arch/mips/kernel/scall32-o32.S
5704@@ -52,7 +52,7 @@ NESTED(handle_sys, PT_SIZE, sp)
5705
5706 stack_done:
5707 lw t0, TI_FLAGS($28) # syscall tracing enabled?
5708- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
5709+ li t1, _TIF_SYSCALL_WORK
5710 and t0, t1
5711 bnez t0, syscall_trace_entry # -> yes
5712
5713diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
5714index b64f642..0fe6eab 100644
5715--- a/arch/mips/kernel/scall64-64.S
5716+++ b/arch/mips/kernel/scall64-64.S
5717@@ -54,7 +54,7 @@ NESTED(handle_sys64, PT_SIZE, sp)
5718
5719 sd a3, PT_R26(sp) # save a3 for syscall restarting
5720
5721- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
5722+ li t1, _TIF_SYSCALL_WORK
5723 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
5724 and t0, t1, t0
5725 bnez t0, syscall_trace_entry
5726diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
5727index c29ac19..c592d05 100644
5728--- a/arch/mips/kernel/scall64-n32.S
5729+++ b/arch/mips/kernel/scall64-n32.S
5730@@ -47,7 +47,7 @@ NESTED(handle_sysn32, PT_SIZE, sp)
5731
5732 sd a3, PT_R26(sp) # save a3 for syscall restarting
5733
5734- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
5735+ li t1, _TIF_SYSCALL_WORK
5736 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
5737 and t0, t1, t0
5738 bnez t0, n32_syscall_trace_entry
5739diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
5740index cf3e75e..72e93fe 100644
5741--- a/arch/mips/kernel/scall64-o32.S
5742+++ b/arch/mips/kernel/scall64-o32.S
5743@@ -81,7 +81,7 @@ NESTED(handle_sys, PT_SIZE, sp)
5744 PTR 4b, bad_stack
5745 .previous
5746
5747- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
5748+ li t1, _TIF_SYSCALL_WORK
5749 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
5750 and t0, t1, t0
5751 bnez t0, trace_a_syscall
5752diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
5753index ddcec1e..c7f983e 100644
5754--- a/arch/mips/mm/fault.c
5755+++ b/arch/mips/mm/fault.c
5756@@ -27,6 +27,23 @@
5757 #include <asm/highmem.h> /* For VMALLOC_END */
5758 #include <linux/kdebug.h>
5759
5760+#ifdef CONFIG_PAX_PAGEEXEC
5761+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5762+{
5763+ unsigned long i;
5764+
5765+ printk(KERN_ERR "PAX: bytes at PC: ");
5766+ for (i = 0; i < 5; i++) {
5767+ unsigned int c;
5768+ if (get_user(c, (unsigned int *)pc+i))
5769+ printk(KERN_CONT "???????? ");
5770+ else
5771+ printk(KERN_CONT "%08x ", c);
5772+ }
5773+ printk("\n");
5774+}
5775+#endif
5776+
5777 /*
5778 * This routine handles page faults. It determines the address,
5779 * and the problem, and then passes it off to one of the appropriate
5780diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
5781index 7e5fe27..479a219 100644
5782--- a/arch/mips/mm/mmap.c
5783+++ b/arch/mips/mm/mmap.c
5784@@ -59,6 +59,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
5785 struct vm_area_struct *vma;
5786 unsigned long addr = addr0;
5787 int do_color_align;
5788+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
5789 struct vm_unmapped_area_info info;
5790
5791 if (unlikely(len > TASK_SIZE))
5792@@ -84,6 +85,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
5793 do_color_align = 1;
5794
5795 /* requesting a specific address */
5796+
5797+#ifdef CONFIG_PAX_RANDMMAP
5798+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
5799+#endif
5800+
5801 if (addr) {
5802 if (do_color_align)
5803 addr = COLOUR_ALIGN(addr, pgoff);
5804@@ -91,8 +97,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
5805 addr = PAGE_ALIGN(addr);
5806
5807 vma = find_vma(mm, addr);
5808- if (TASK_SIZE - len >= addr &&
5809- (!vma || addr + len <= vma->vm_start))
5810+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len, offset))
5811 return addr;
5812 }
5813
5814@@ -146,6 +151,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5815 {
5816 unsigned long random_factor = 0UL;
5817
5818+#ifdef CONFIG_PAX_RANDMMAP
5819+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
5820+#endif
5821+
5822 if (current->flags & PF_RANDOMIZE) {
5823 random_factor = get_random_int();
5824 random_factor = random_factor << PAGE_SHIFT;
5825@@ -157,42 +166,27 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5826
5827 if (mmap_is_legacy()) {
5828 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
5829+
5830+#ifdef CONFIG_PAX_RANDMMAP
5831+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5832+ mm->mmap_base += mm->delta_mmap;
5833+#endif
5834+
5835 mm->get_unmapped_area = arch_get_unmapped_area;
5836 mm->unmap_area = arch_unmap_area;
5837 } else {
5838 mm->mmap_base = mmap_base(random_factor);
5839+
5840+#ifdef CONFIG_PAX_RANDMMAP
5841+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5842+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
5843+#endif
5844+
5845 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
5846 mm->unmap_area = arch_unmap_area_topdown;
5847 }
5848 }
5849
5850-static inline unsigned long brk_rnd(void)
5851-{
5852- unsigned long rnd = get_random_int();
5853-
5854- rnd = rnd << PAGE_SHIFT;
5855- /* 8MB for 32bit, 256MB for 64bit */
5856- if (TASK_IS_32BIT_ADDR)
5857- rnd = rnd & 0x7ffffful;
5858- else
5859- rnd = rnd & 0xffffffful;
5860-
5861- return rnd;
5862-}
5863-
5864-unsigned long arch_randomize_brk(struct mm_struct *mm)
5865-{
5866- unsigned long base = mm->brk;
5867- unsigned long ret;
5868-
5869- ret = PAGE_ALIGN(base + brk_rnd());
5870-
5871- if (ret < mm->brk)
5872- return mm->brk;
5873-
5874- return ret;
5875-}
5876-
5877 int __virt_addr_valid(const volatile void *kaddr)
5878 {
5879 return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
5880diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
5881index 967d144..db12197 100644
5882--- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
5883+++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
5884@@ -11,12 +11,14 @@
5885 #ifndef _ASM_PROC_CACHE_H
5886 #define _ASM_PROC_CACHE_H
5887
5888+#include <linux/const.h>
5889+
5890 /* L1 cache */
5891
5892 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
5893 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
5894-#define L1_CACHE_BYTES 16 /* bytes per entry */
5895 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
5896+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
5897 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
5898
5899 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
5900diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
5901index bcb5df2..84fabd2 100644
5902--- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
5903+++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
5904@@ -16,13 +16,15 @@
5905 #ifndef _ASM_PROC_CACHE_H
5906 #define _ASM_PROC_CACHE_H
5907
5908+#include <linux/const.h>
5909+
5910 /*
5911 * L1 cache
5912 */
5913 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
5914 #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
5915-#define L1_CACHE_BYTES 32 /* bytes per entry */
5916 #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
5917+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
5918 #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
5919
5920 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
5921diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
5922index 4ce7a01..449202a 100644
5923--- a/arch/openrisc/include/asm/cache.h
5924+++ b/arch/openrisc/include/asm/cache.h
5925@@ -19,11 +19,13 @@
5926 #ifndef __ASM_OPENRISC_CACHE_H
5927 #define __ASM_OPENRISC_CACHE_H
5928
5929+#include <linux/const.h>
5930+
5931 /* FIXME: How can we replace these with values from the CPU...
5932 * they shouldn't be hard-coded!
5933 */
5934
5935-#define L1_CACHE_BYTES 16
5936 #define L1_CACHE_SHIFT 4
5937+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5938
5939 #endif /* __ASM_OPENRISC_CACHE_H */
5940diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
5941index af9cf30..2aae9b2 100644
5942--- a/arch/parisc/include/asm/atomic.h
5943+++ b/arch/parisc/include/asm/atomic.h
5944@@ -229,6 +229,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
5945
5946 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
5947
5948+#define atomic64_read_unchecked(v) atomic64_read(v)
5949+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5950+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5951+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5952+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5953+#define atomic64_inc_unchecked(v) atomic64_inc(v)
5954+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5955+#define atomic64_dec_unchecked(v) atomic64_dec(v)
5956+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5957+
5958 #endif /* !CONFIG_64BIT */
5959
5960
5961diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
5962index 47f11c7..3420df2 100644
5963--- a/arch/parisc/include/asm/cache.h
5964+++ b/arch/parisc/include/asm/cache.h
5965@@ -5,6 +5,7 @@
5966 #ifndef __ARCH_PARISC_CACHE_H
5967 #define __ARCH_PARISC_CACHE_H
5968
5969+#include <linux/const.h>
5970
5971 /*
5972 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
5973@@ -15,13 +16,13 @@
5974 * just ruin performance.
5975 */
5976 #ifdef CONFIG_PA20
5977-#define L1_CACHE_BYTES 64
5978 #define L1_CACHE_SHIFT 6
5979 #else
5980-#define L1_CACHE_BYTES 32
5981 #define L1_CACHE_SHIFT 5
5982 #endif
5983
5984+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5985+
5986 #ifndef __ASSEMBLY__
5987
5988 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5989diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
5990index 19f6cb1..6c78cf2 100644
5991--- a/arch/parisc/include/asm/elf.h
5992+++ b/arch/parisc/include/asm/elf.h
5993@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
5994
5995 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
5996
5997+#ifdef CONFIG_PAX_ASLR
5998+#define PAX_ELF_ET_DYN_BASE 0x10000UL
5999+
6000+#define PAX_DELTA_MMAP_LEN 16
6001+#define PAX_DELTA_STACK_LEN 16
6002+#endif
6003+
6004 /* This yields a mask that user programs can use to figure out what
6005 instruction set this CPU supports. This could be done in user space,
6006 but it's not easy, and we've already done it here. */
6007diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
6008index fc987a1..6e068ef 100644
6009--- a/arch/parisc/include/asm/pgalloc.h
6010+++ b/arch/parisc/include/asm/pgalloc.h
6011@@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
6012 (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
6013 }
6014
6015+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
6016+{
6017+ pgd_populate(mm, pgd, pmd);
6018+}
6019+
6020 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
6021 {
6022 pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
6023@@ -93,6 +98,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
6024 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
6025 #define pmd_free(mm, x) do { } while (0)
6026 #define pgd_populate(mm, pmd, pte) BUG()
6027+#define pgd_populate_kernel(mm, pmd, pte) BUG()
6028
6029 #endif
6030
6031diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
6032index 7df49fa..38b62bf 100644
6033--- a/arch/parisc/include/asm/pgtable.h
6034+++ b/arch/parisc/include/asm/pgtable.h
6035@@ -218,6 +218,17 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
6036 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
6037 #define PAGE_COPY PAGE_EXECREAD
6038 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
6039+
6040+#ifdef CONFIG_PAX_PAGEEXEC
6041+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
6042+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
6043+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
6044+#else
6045+# define PAGE_SHARED_NOEXEC PAGE_SHARED
6046+# define PAGE_COPY_NOEXEC PAGE_COPY
6047+# define PAGE_READONLY_NOEXEC PAGE_READONLY
6048+#endif
6049+
6050 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
6051 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
6052 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
6053diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
6054index 4ba2c93..f5e3974 100644
6055--- a/arch/parisc/include/asm/uaccess.h
6056+++ b/arch/parisc/include/asm/uaccess.h
6057@@ -251,10 +251,10 @@ static inline unsigned long __must_check copy_from_user(void *to,
6058 const void __user *from,
6059 unsigned long n)
6060 {
6061- int sz = __compiletime_object_size(to);
6062+ size_t sz = __compiletime_object_size(to);
6063 int ret = -EFAULT;
6064
6065- if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
6066+ if (likely(sz == (size_t)-1 || !__builtin_constant_p(n) || sz >= n))
6067 ret = __copy_from_user(to, from, n);
6068 else
6069 copy_from_user_overflow();
6070diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
6071index 2a625fb..9908930 100644
6072--- a/arch/parisc/kernel/module.c
6073+++ b/arch/parisc/kernel/module.c
6074@@ -98,16 +98,38 @@
6075
6076 /* three functions to determine where in the module core
6077 * or init pieces the location is */
6078+static inline int in_init_rx(struct module *me, void *loc)
6079+{
6080+ return (loc >= me->module_init_rx &&
6081+ loc < (me->module_init_rx + me->init_size_rx));
6082+}
6083+
6084+static inline int in_init_rw(struct module *me, void *loc)
6085+{
6086+ return (loc >= me->module_init_rw &&
6087+ loc < (me->module_init_rw + me->init_size_rw));
6088+}
6089+
6090 static inline int in_init(struct module *me, void *loc)
6091 {
6092- return (loc >= me->module_init &&
6093- loc <= (me->module_init + me->init_size));
6094+ return in_init_rx(me, loc) || in_init_rw(me, loc);
6095+}
6096+
6097+static inline int in_core_rx(struct module *me, void *loc)
6098+{
6099+ return (loc >= me->module_core_rx &&
6100+ loc < (me->module_core_rx + me->core_size_rx));
6101+}
6102+
6103+static inline int in_core_rw(struct module *me, void *loc)
6104+{
6105+ return (loc >= me->module_core_rw &&
6106+ loc < (me->module_core_rw + me->core_size_rw));
6107 }
6108
6109 static inline int in_core(struct module *me, void *loc)
6110 {
6111- return (loc >= me->module_core &&
6112- loc <= (me->module_core + me->core_size));
6113+ return in_core_rx(me, loc) || in_core_rw(me, loc);
6114 }
6115
6116 static inline int in_local(struct module *me, void *loc)
6117@@ -371,13 +393,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
6118 }
6119
6120 /* align things a bit */
6121- me->core_size = ALIGN(me->core_size, 16);
6122- me->arch.got_offset = me->core_size;
6123- me->core_size += gots * sizeof(struct got_entry);
6124+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
6125+ me->arch.got_offset = me->core_size_rw;
6126+ me->core_size_rw += gots * sizeof(struct got_entry);
6127
6128- me->core_size = ALIGN(me->core_size, 16);
6129- me->arch.fdesc_offset = me->core_size;
6130- me->core_size += fdescs * sizeof(Elf_Fdesc);
6131+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
6132+ me->arch.fdesc_offset = me->core_size_rw;
6133+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
6134
6135 me->arch.got_max = gots;
6136 me->arch.fdesc_max = fdescs;
6137@@ -395,7 +417,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
6138
6139 BUG_ON(value == 0);
6140
6141- got = me->module_core + me->arch.got_offset;
6142+ got = me->module_core_rw + me->arch.got_offset;
6143 for (i = 0; got[i].addr; i++)
6144 if (got[i].addr == value)
6145 goto out;
6146@@ -413,7 +435,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
6147 #ifdef CONFIG_64BIT
6148 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
6149 {
6150- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
6151+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
6152
6153 if (!value) {
6154 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
6155@@ -431,7 +453,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
6156
6157 /* Create new one */
6158 fdesc->addr = value;
6159- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
6160+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
6161 return (Elf_Addr)fdesc;
6162 }
6163 #endif /* CONFIG_64BIT */
6164@@ -843,7 +865,7 @@ register_unwind_table(struct module *me,
6165
6166 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
6167 end = table + sechdrs[me->arch.unwind_section].sh_size;
6168- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
6169+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
6170
6171 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
6172 me->arch.unwind_section, table, end, gp);
6173diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
6174index f76c108..92bad82 100644
6175--- a/arch/parisc/kernel/sys_parisc.c
6176+++ b/arch/parisc/kernel/sys_parisc.c
6177@@ -33,9 +33,11 @@
6178 #include <linux/utsname.h>
6179 #include <linux/personality.h>
6180
6181-static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
6182+static unsigned long get_unshared_area(struct file *filp, unsigned long addr, unsigned long len,
6183+ unsigned long flags)
6184 {
6185 struct vm_area_struct *vma;
6186+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
6187
6188 addr = PAGE_ALIGN(addr);
6189
6190@@ -43,7 +45,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
6191 /* At this point: (!vma || addr < vma->vm_end). */
6192 if (TASK_SIZE - len < addr)
6193 return -ENOMEM;
6194- if (!vma || addr + len <= vma->vm_start)
6195+ if (check_heap_stack_gap(vma, addr, len, offset))
6196 return addr;
6197 addr = vma->vm_end;
6198 }
6199@@ -67,11 +69,12 @@ static int get_offset(struct address_space *mapping)
6200 return offset & 0x3FF000;
6201 }
6202
6203-static unsigned long get_shared_area(struct address_space *mapping,
6204- unsigned long addr, unsigned long len, unsigned long pgoff)
6205+static unsigned long get_shared_area(struct file *filp, struct address_space *mapping,
6206+ unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
6207 {
6208 struct vm_area_struct *vma;
6209 int offset = mapping ? get_offset(mapping) : 0;
6210+ unsigned long rand_offset = gr_rand_threadstack_offset(current->mm, filp, flags);
6211
6212 offset = (offset + (pgoff << PAGE_SHIFT)) & 0x3FF000;
6213
6214@@ -81,7 +84,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
6215 /* At this point: (!vma || addr < vma->vm_end). */
6216 if (TASK_SIZE - len < addr)
6217 return -ENOMEM;
6218- if (!vma || addr + len <= vma->vm_start)
6219+ if (check_heap_stack_gap(vma, addr, len, rand_offset))
6220 return addr;
6221 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
6222 if (addr < vma->vm_end) /* handle wraparound */
6223@@ -100,14 +103,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
6224 if (flags & MAP_FIXED)
6225 return addr;
6226 if (!addr)
6227- addr = TASK_UNMAPPED_BASE;
6228+ addr = current->mm->mmap_base;
6229
6230 if (filp) {
6231- addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
6232+ addr = get_shared_area(filp, filp->f_mapping, addr, len, pgoff, flags);
6233 } else if(flags & MAP_SHARED) {
6234- addr = get_shared_area(NULL, addr, len, pgoff);
6235+ addr = get_shared_area(filp, NULL, addr, len, pgoff, flags);
6236 } else {
6237- addr = get_unshared_area(addr, len);
6238+ addr = get_unshared_area(filp, addr, len, flags);
6239 }
6240 return addr;
6241 }
6242diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
6243index 45ba99f..8e22c33 100644
6244--- a/arch/parisc/kernel/traps.c
6245+++ b/arch/parisc/kernel/traps.c
6246@@ -732,9 +732,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
6247
6248 down_read(&current->mm->mmap_sem);
6249 vma = find_vma(current->mm,regs->iaoq[0]);
6250- if (vma && (regs->iaoq[0] >= vma->vm_start)
6251- && (vma->vm_flags & VM_EXEC)) {
6252-
6253+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
6254 fault_address = regs->iaoq[0];
6255 fault_space = regs->iasq[0];
6256
6257diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
6258index 18162ce..94de376 100644
6259--- a/arch/parisc/mm/fault.c
6260+++ b/arch/parisc/mm/fault.c
6261@@ -15,6 +15,7 @@
6262 #include <linux/sched.h>
6263 #include <linux/interrupt.h>
6264 #include <linux/module.h>
6265+#include <linux/unistd.h>
6266
6267 #include <asm/uaccess.h>
6268 #include <asm/traps.h>
6269@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
6270 static unsigned long
6271 parisc_acctyp(unsigned long code, unsigned int inst)
6272 {
6273- if (code == 6 || code == 16)
6274+ if (code == 6 || code == 7 || code == 16)
6275 return VM_EXEC;
6276
6277 switch (inst & 0xf0000000) {
6278@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
6279 }
6280 #endif
6281
6282+#ifdef CONFIG_PAX_PAGEEXEC
6283+/*
6284+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
6285+ *
6286+ * returns 1 when task should be killed
6287+ * 2 when rt_sigreturn trampoline was detected
6288+ * 3 when unpatched PLT trampoline was detected
6289+ */
6290+static int pax_handle_fetch_fault(struct pt_regs *regs)
6291+{
6292+
6293+#ifdef CONFIG_PAX_EMUPLT
6294+ int err;
6295+
6296+ do { /* PaX: unpatched PLT emulation */
6297+ unsigned int bl, depwi;
6298+
6299+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
6300+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
6301+
6302+ if (err)
6303+ break;
6304+
6305+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
6306+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
6307+
6308+ err = get_user(ldw, (unsigned int *)addr);
6309+ err |= get_user(bv, (unsigned int *)(addr+4));
6310+ err |= get_user(ldw2, (unsigned int *)(addr+8));
6311+
6312+ if (err)
6313+ break;
6314+
6315+ if (ldw == 0x0E801096U &&
6316+ bv == 0xEAC0C000U &&
6317+ ldw2 == 0x0E881095U)
6318+ {
6319+ unsigned int resolver, map;
6320+
6321+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
6322+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
6323+ if (err)
6324+ break;
6325+
6326+ regs->gr[20] = instruction_pointer(regs)+8;
6327+ regs->gr[21] = map;
6328+ regs->gr[22] = resolver;
6329+ regs->iaoq[0] = resolver | 3UL;
6330+ regs->iaoq[1] = regs->iaoq[0] + 4;
6331+ return 3;
6332+ }
6333+ }
6334+ } while (0);
6335+#endif
6336+
6337+#ifdef CONFIG_PAX_EMUTRAMP
6338+
6339+#ifndef CONFIG_PAX_EMUSIGRT
6340+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
6341+ return 1;
6342+#endif
6343+
6344+ do { /* PaX: rt_sigreturn emulation */
6345+ unsigned int ldi1, ldi2, bel, nop;
6346+
6347+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
6348+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
6349+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
6350+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
6351+
6352+ if (err)
6353+ break;
6354+
6355+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
6356+ ldi2 == 0x3414015AU &&
6357+ bel == 0xE4008200U &&
6358+ nop == 0x08000240U)
6359+ {
6360+ regs->gr[25] = (ldi1 & 2) >> 1;
6361+ regs->gr[20] = __NR_rt_sigreturn;
6362+ regs->gr[31] = regs->iaoq[1] + 16;
6363+ regs->sr[0] = regs->iasq[1];
6364+ regs->iaoq[0] = 0x100UL;
6365+ regs->iaoq[1] = regs->iaoq[0] + 4;
6366+ regs->iasq[0] = regs->sr[2];
6367+ regs->iasq[1] = regs->sr[2];
6368+ return 2;
6369+ }
6370+ } while (0);
6371+#endif
6372+
6373+ return 1;
6374+}
6375+
6376+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6377+{
6378+ unsigned long i;
6379+
6380+ printk(KERN_ERR "PAX: bytes at PC: ");
6381+ for (i = 0; i < 5; i++) {
6382+ unsigned int c;
6383+ if (get_user(c, (unsigned int *)pc+i))
6384+ printk(KERN_CONT "???????? ");
6385+ else
6386+ printk(KERN_CONT "%08x ", c);
6387+ }
6388+ printk("\n");
6389+}
6390+#endif
6391+
6392 int fixup_exception(struct pt_regs *regs)
6393 {
6394 const struct exception_table_entry *fix;
6395@@ -192,8 +303,33 @@ good_area:
6396
6397 acc_type = parisc_acctyp(code,regs->iir);
6398
6399- if ((vma->vm_flags & acc_type) != acc_type)
6400+ if ((vma->vm_flags & acc_type) != acc_type) {
6401+
6402+#ifdef CONFIG_PAX_PAGEEXEC
6403+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
6404+ (address & ~3UL) == instruction_pointer(regs))
6405+ {
6406+ up_read(&mm->mmap_sem);
6407+ switch (pax_handle_fetch_fault(regs)) {
6408+
6409+#ifdef CONFIG_PAX_EMUPLT
6410+ case 3:
6411+ return;
6412+#endif
6413+
6414+#ifdef CONFIG_PAX_EMUTRAMP
6415+ case 2:
6416+ return;
6417+#endif
6418+
6419+ }
6420+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
6421+ do_group_exit(SIGKILL);
6422+ }
6423+#endif
6424+
6425 goto bad_area;
6426+ }
6427
6428 /*
6429 * If for any reason at all we couldn't handle the fault, make
6430diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
6431index e3b1d41..8e81edf 100644
6432--- a/arch/powerpc/include/asm/atomic.h
6433+++ b/arch/powerpc/include/asm/atomic.h
6434@@ -523,6 +523,16 @@ static __inline__ long atomic64_inc_not_zero(atomic64_t *v)
6435 return t1;
6436 }
6437
6438+#define atomic64_read_unchecked(v) atomic64_read(v)
6439+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
6440+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
6441+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
6442+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
6443+#define atomic64_inc_unchecked(v) atomic64_inc(v)
6444+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
6445+#define atomic64_dec_unchecked(v) atomic64_dec(v)
6446+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
6447+
6448 #endif /* __powerpc64__ */
6449
6450 #endif /* __KERNEL__ */
6451diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
6452index 9e495c9..b6878e5 100644
6453--- a/arch/powerpc/include/asm/cache.h
6454+++ b/arch/powerpc/include/asm/cache.h
6455@@ -3,6 +3,7 @@
6456
6457 #ifdef __KERNEL__
6458
6459+#include <linux/const.h>
6460
6461 /* bytes per L1 cache line */
6462 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
6463@@ -22,7 +23,7 @@
6464 #define L1_CACHE_SHIFT 7
6465 #endif
6466
6467-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6468+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6469
6470 #define SMP_CACHE_BYTES L1_CACHE_BYTES
6471
6472diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
6473index 6abf0a1..459d0f1 100644
6474--- a/arch/powerpc/include/asm/elf.h
6475+++ b/arch/powerpc/include/asm/elf.h
6476@@ -28,8 +28,19 @@
6477 the loader. We need to make sure that it is out of the way of the program
6478 that it will "exec", and that there is sufficient room for the brk. */
6479
6480-extern unsigned long randomize_et_dyn(unsigned long base);
6481-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
6482+#define ELF_ET_DYN_BASE (0x20000000)
6483+
6484+#ifdef CONFIG_PAX_ASLR
6485+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
6486+
6487+#ifdef __powerpc64__
6488+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
6489+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
6490+#else
6491+#define PAX_DELTA_MMAP_LEN 15
6492+#define PAX_DELTA_STACK_LEN 15
6493+#endif
6494+#endif
6495
6496 /*
6497 * Our registers are always unsigned longs, whether we're a 32 bit
6498@@ -124,10 +135,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
6499 (0x7ff >> (PAGE_SHIFT - 12)) : \
6500 (0x3ffff >> (PAGE_SHIFT - 12)))
6501
6502-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
6503-#define arch_randomize_brk arch_randomize_brk
6504-
6505-
6506 #ifdef CONFIG_SPU_BASE
6507 /* Notes used in ET_CORE. Note name is "SPU/<fd>/<filename>". */
6508 #define NT_SPU 1
6509diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h
6510index 8196e9c..d83a9f3 100644
6511--- a/arch/powerpc/include/asm/exec.h
6512+++ b/arch/powerpc/include/asm/exec.h
6513@@ -4,6 +4,6 @@
6514 #ifndef _ASM_POWERPC_EXEC_H
6515 #define _ASM_POWERPC_EXEC_H
6516
6517-extern unsigned long arch_align_stack(unsigned long sp);
6518+#define arch_align_stack(x) ((x) & ~0xfUL)
6519
6520 #endif /* _ASM_POWERPC_EXEC_H */
6521diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
6522index 5acabbd..7ea14fa 100644
6523--- a/arch/powerpc/include/asm/kmap_types.h
6524+++ b/arch/powerpc/include/asm/kmap_types.h
6525@@ -10,7 +10,7 @@
6526 * 2 of the License, or (at your option) any later version.
6527 */
6528
6529-#define KM_TYPE_NR 16
6530+#define KM_TYPE_NR 17
6531
6532 #endif /* __KERNEL__ */
6533 #endif /* _ASM_POWERPC_KMAP_TYPES_H */
6534diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
6535index 8565c25..2865190 100644
6536--- a/arch/powerpc/include/asm/mman.h
6537+++ b/arch/powerpc/include/asm/mman.h
6538@@ -24,7 +24,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
6539 }
6540 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
6541
6542-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
6543+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
6544 {
6545 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
6546 }
6547diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
6548index f072e97..b436dee 100644
6549--- a/arch/powerpc/include/asm/page.h
6550+++ b/arch/powerpc/include/asm/page.h
6551@@ -220,8 +220,9 @@ extern long long virt_phys_offset;
6552 * and needs to be executable. This means the whole heap ends
6553 * up being executable.
6554 */
6555-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
6556- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6557+#define VM_DATA_DEFAULT_FLAGS32 \
6558+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
6559+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6560
6561 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
6562 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6563@@ -249,6 +250,9 @@ extern long long virt_phys_offset;
6564 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
6565 #endif
6566
6567+#define ktla_ktva(addr) (addr)
6568+#define ktva_ktla(addr) (addr)
6569+
6570 /*
6571 * Use the top bit of the higher-level page table entries to indicate whether
6572 * the entries we point to contain hugepages. This works because we know that
6573diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
6574index cd915d6..c10cee8 100644
6575--- a/arch/powerpc/include/asm/page_64.h
6576+++ b/arch/powerpc/include/asm/page_64.h
6577@@ -154,15 +154,18 @@ do { \
6578 * stack by default, so in the absence of a PT_GNU_STACK program header
6579 * we turn execute permission off.
6580 */
6581-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
6582- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6583+#define VM_STACK_DEFAULT_FLAGS32 \
6584+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
6585+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6586
6587 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
6588 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6589
6590+#ifndef CONFIG_PAX_PAGEEXEC
6591 #define VM_STACK_DEFAULT_FLAGS \
6592 (is_32bit_task() ? \
6593 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
6594+#endif
6595
6596 #include <asm-generic/getorder.h>
6597
6598diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
6599index 292725c..f87ae14 100644
6600--- a/arch/powerpc/include/asm/pgalloc-64.h
6601+++ b/arch/powerpc/include/asm/pgalloc-64.h
6602@@ -50,6 +50,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
6603 #ifndef CONFIG_PPC_64K_PAGES
6604
6605 #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
6606+#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD))
6607
6608 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
6609 {
6610@@ -67,6 +68,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6611 pud_set(pud, (unsigned long)pmd);
6612 }
6613
6614+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6615+{
6616+ pud_populate(mm, pud, pmd);
6617+}
6618+
6619 #define pmd_populate(mm, pmd, pte_page) \
6620 pmd_populate_kernel(mm, pmd, page_address(pte_page))
6621 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
6622@@ -76,6 +82,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6623 #else /* CONFIG_PPC_64K_PAGES */
6624
6625 #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
6626+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
6627
6628 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
6629 pte_t *pte)
6630diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
6631index a9cbd3b..3b67efa 100644
6632--- a/arch/powerpc/include/asm/pgtable.h
6633+++ b/arch/powerpc/include/asm/pgtable.h
6634@@ -2,6 +2,7 @@
6635 #define _ASM_POWERPC_PGTABLE_H
6636 #ifdef __KERNEL__
6637
6638+#include <linux/const.h>
6639 #ifndef __ASSEMBLY__
6640 #include <asm/processor.h> /* For TASK_SIZE */
6641 #include <asm/mmu.h>
6642diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
6643index 4aad413..85d86bf 100644
6644--- a/arch/powerpc/include/asm/pte-hash32.h
6645+++ b/arch/powerpc/include/asm/pte-hash32.h
6646@@ -21,6 +21,7 @@
6647 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
6648 #define _PAGE_USER 0x004 /* usermode access allowed */
6649 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
6650+#define _PAGE_EXEC _PAGE_GUARDED
6651 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
6652 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
6653 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
6654diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
6655index 3d5c9dc..62f8414 100644
6656--- a/arch/powerpc/include/asm/reg.h
6657+++ b/arch/powerpc/include/asm/reg.h
6658@@ -215,6 +215,7 @@
6659 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
6660 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
6661 #define DSISR_NOHPTE 0x40000000 /* no translation found */
6662+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
6663 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
6664 #define DSISR_ISSTORE 0x02000000 /* access was a store */
6665 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
6666diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
6667index 406b7b9..af63426 100644
6668--- a/arch/powerpc/include/asm/thread_info.h
6669+++ b/arch/powerpc/include/asm/thread_info.h
6670@@ -97,7 +97,6 @@ static inline struct thread_info *current_thread_info(void)
6671 #define TIF_PERFMON_CTXSW 6 /* perfmon needs ctxsw calls */
6672 #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
6673 #define TIF_SINGLESTEP 8 /* singlestepping active */
6674-#define TIF_MEMDIE 9 /* is terminating due to OOM killer */
6675 #define TIF_SECCOMP 10 /* secure computing */
6676 #define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */
6677 #define TIF_NOERROR 12 /* Force successful syscall return */
6678@@ -106,6 +105,9 @@ static inline struct thread_info *current_thread_info(void)
6679 #define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */
6680 #define TIF_EMULATE_STACK_STORE 16 /* Is an instruction emulation
6681 for stack store? */
6682+#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
6683+/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
6684+#define TIF_GRSEC_SETXID 9 /* update credentials on syscall entry/exit */
6685
6686 /* as above, but as bit values */
6687 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
6688@@ -124,8 +126,10 @@ static inline struct thread_info *current_thread_info(void)
6689 #define _TIF_UPROBE (1<<TIF_UPROBE)
6690 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
6691 #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
6692+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
6693 #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
6694- _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT)
6695+ _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
6696+ _TIF_GRSEC_SETXID)
6697
6698 #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
6699 _TIF_NOTIFY_RESUME | _TIF_UPROBE)
6700diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
6701index 4db4959..aba5c41 100644
6702--- a/arch/powerpc/include/asm/uaccess.h
6703+++ b/arch/powerpc/include/asm/uaccess.h
6704@@ -318,52 +318,6 @@ do { \
6705 extern unsigned long __copy_tofrom_user(void __user *to,
6706 const void __user *from, unsigned long size);
6707
6708-#ifndef __powerpc64__
6709-
6710-static inline unsigned long copy_from_user(void *to,
6711- const void __user *from, unsigned long n)
6712-{
6713- unsigned long over;
6714-
6715- if (access_ok(VERIFY_READ, from, n))
6716- return __copy_tofrom_user((__force void __user *)to, from, n);
6717- if ((unsigned long)from < TASK_SIZE) {
6718- over = (unsigned long)from + n - TASK_SIZE;
6719- return __copy_tofrom_user((__force void __user *)to, from,
6720- n - over) + over;
6721- }
6722- return n;
6723-}
6724-
6725-static inline unsigned long copy_to_user(void __user *to,
6726- const void *from, unsigned long n)
6727-{
6728- unsigned long over;
6729-
6730- if (access_ok(VERIFY_WRITE, to, n))
6731- return __copy_tofrom_user(to, (__force void __user *)from, n);
6732- if ((unsigned long)to < TASK_SIZE) {
6733- over = (unsigned long)to + n - TASK_SIZE;
6734- return __copy_tofrom_user(to, (__force void __user *)from,
6735- n - over) + over;
6736- }
6737- return n;
6738-}
6739-
6740-#else /* __powerpc64__ */
6741-
6742-#define __copy_in_user(to, from, size) \
6743- __copy_tofrom_user((to), (from), (size))
6744-
6745-extern unsigned long copy_from_user(void *to, const void __user *from,
6746- unsigned long n);
6747-extern unsigned long copy_to_user(void __user *to, const void *from,
6748- unsigned long n);
6749-extern unsigned long copy_in_user(void __user *to, const void __user *from,
6750- unsigned long n);
6751-
6752-#endif /* __powerpc64__ */
6753-
6754 static inline unsigned long __copy_from_user_inatomic(void *to,
6755 const void __user *from, unsigned long n)
6756 {
6757@@ -387,6 +341,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
6758 if (ret == 0)
6759 return 0;
6760 }
6761+
6762+ if (!__builtin_constant_p(n))
6763+ check_object_size(to, n, false);
6764+
6765 return __copy_tofrom_user((__force void __user *)to, from, n);
6766 }
6767
6768@@ -413,6 +371,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
6769 if (ret == 0)
6770 return 0;
6771 }
6772+
6773+ if (!__builtin_constant_p(n))
6774+ check_object_size(from, n, true);
6775+
6776 return __copy_tofrom_user(to, (__force const void __user *)from, n);
6777 }
6778
6779@@ -430,6 +392,92 @@ static inline unsigned long __copy_to_user(void __user *to,
6780 return __copy_to_user_inatomic(to, from, size);
6781 }
6782
6783+#ifndef __powerpc64__
6784+
6785+static inline unsigned long __must_check copy_from_user(void *to,
6786+ const void __user *from, unsigned long n)
6787+{
6788+ unsigned long over;
6789+
6790+ if ((long)n < 0)
6791+ return n;
6792+
6793+ if (access_ok(VERIFY_READ, from, n)) {
6794+ if (!__builtin_constant_p(n))
6795+ check_object_size(to, n, false);
6796+ return __copy_tofrom_user((__force void __user *)to, from, n);
6797+ }
6798+ if ((unsigned long)from < TASK_SIZE) {
6799+ over = (unsigned long)from + n - TASK_SIZE;
6800+ if (!__builtin_constant_p(n - over))
6801+ check_object_size(to, n - over, false);
6802+ return __copy_tofrom_user((__force void __user *)to, from,
6803+ n - over) + over;
6804+ }
6805+ return n;
6806+}
6807+
6808+static inline unsigned long __must_check copy_to_user(void __user *to,
6809+ const void *from, unsigned long n)
6810+{
6811+ unsigned long over;
6812+
6813+ if ((long)n < 0)
6814+ return n;
6815+
6816+ if (access_ok(VERIFY_WRITE, to, n)) {
6817+ if (!__builtin_constant_p(n))
6818+ check_object_size(from, n, true);
6819+ return __copy_tofrom_user(to, (__force void __user *)from, n);
6820+ }
6821+ if ((unsigned long)to < TASK_SIZE) {
6822+ over = (unsigned long)to + n - TASK_SIZE;
6823+ if (!__builtin_constant_p(n))
6824+ check_object_size(from, n - over, true);
6825+ return __copy_tofrom_user(to, (__force void __user *)from,
6826+ n - over) + over;
6827+ }
6828+ return n;
6829+}
6830+
6831+#else /* __powerpc64__ */
6832+
6833+#define __copy_in_user(to, from, size) \
6834+ __copy_tofrom_user((to), (from), (size))
6835+
6836+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
6837+{
6838+ if ((long)n < 0 || n > INT_MAX)
6839+ return n;
6840+
6841+ if (!__builtin_constant_p(n))
6842+ check_object_size(to, n, false);
6843+
6844+ if (likely(access_ok(VERIFY_READ, from, n)))
6845+ n = __copy_from_user(to, from, n);
6846+ else
6847+ memset(to, 0, n);
6848+ return n;
6849+}
6850+
6851+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
6852+{
6853+ if ((long)n < 0 || n > INT_MAX)
6854+ return n;
6855+
6856+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
6857+ if (!__builtin_constant_p(n))
6858+ check_object_size(from, n, true);
6859+ n = __copy_to_user(to, from, n);
6860+ }
6861+ return n;
6862+}
6863+
6864+extern unsigned long copy_in_user(void __user *to, const void __user *from,
6865+ unsigned long n);
6866+
6867+#endif /* __powerpc64__ */
6868+
6869 extern unsigned long __clear_user(void __user *addr, unsigned long size);
6870
6871 static inline unsigned long clear_user(void __user *addr, unsigned long size)
6872diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
6873index 4684e33..acc4d19e 100644
6874--- a/arch/powerpc/kernel/exceptions-64e.S
6875+++ b/arch/powerpc/kernel/exceptions-64e.S
6876@@ -715,6 +715,7 @@ storage_fault_common:
6877 std r14,_DAR(r1)
6878 std r15,_DSISR(r1)
6879 addi r3,r1,STACK_FRAME_OVERHEAD
6880+ bl .save_nvgprs
6881 mr r4,r14
6882 mr r5,r15
6883 ld r14,PACA_EXGEN+EX_R14(r13)
6884@@ -723,8 +724,7 @@ storage_fault_common:
6885 cmpdi r3,0
6886 bne- 1f
6887 b .ret_from_except_lite
6888-1: bl .save_nvgprs
6889- mr r5,r3
6890+1: mr r5,r3
6891 addi r3,r1,STACK_FRAME_OVERHEAD
6892 ld r4,_DAR(r1)
6893 bl .bad_page_fault
6894diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
6895index 3684cbd..bc89eab 100644
6896--- a/arch/powerpc/kernel/exceptions-64s.S
6897+++ b/arch/powerpc/kernel/exceptions-64s.S
6898@@ -1206,10 +1206,10 @@ handle_page_fault:
6899 11: ld r4,_DAR(r1)
6900 ld r5,_DSISR(r1)
6901 addi r3,r1,STACK_FRAME_OVERHEAD
6902+ bl .save_nvgprs
6903 bl .do_page_fault
6904 cmpdi r3,0
6905 beq+ 12f
6906- bl .save_nvgprs
6907 mr r5,r3
6908 addi r3,r1,STACK_FRAME_OVERHEAD
6909 lwz r4,_DAR(r1)
6910diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
6911index 2e3200c..72095ce 100644
6912--- a/arch/powerpc/kernel/module_32.c
6913+++ b/arch/powerpc/kernel/module_32.c
6914@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
6915 me->arch.core_plt_section = i;
6916 }
6917 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
6918- printk("Module doesn't contain .plt or .init.plt sections.\n");
6919+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
6920 return -ENOEXEC;
6921 }
6922
6923@@ -192,11 +192,16 @@ static uint32_t do_plt_call(void *location,
6924
6925 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
6926 /* Init, or core PLT? */
6927- if (location >= mod->module_core
6928- && location < mod->module_core + mod->core_size)
6929+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
6930+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
6931 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
6932- else
6933+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
6934+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
6935 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
6936+ else {
6937+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
6938+ return ~0UL;
6939+ }
6940
6941 /* Find this entry, or if that fails, the next avail. entry */
6942 while (entry->jump[0]) {
6943diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
6944index 8143067..21ae55b 100644
6945--- a/arch/powerpc/kernel/process.c
6946+++ b/arch/powerpc/kernel/process.c
6947@@ -680,8 +680,8 @@ void show_regs(struct pt_regs * regs)
6948 * Lookup NIP late so we have the best change of getting the
6949 * above info out without failing
6950 */
6951- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
6952- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
6953+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
6954+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
6955 #endif
6956 show_stack(current, (unsigned long *) regs->gpr[1]);
6957 if (!user_mode(regs))
6958@@ -1129,10 +1129,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
6959 newsp = stack[0];
6960 ip = stack[STACK_FRAME_LR_SAVE];
6961 if (!firstframe || ip != lr) {
6962- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
6963+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
6964 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
6965 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
6966- printk(" (%pS)",
6967+ printk(" (%pA)",
6968 (void *)current->ret_stack[curr_frame].ret);
6969 curr_frame--;
6970 }
6971@@ -1152,7 +1152,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
6972 struct pt_regs *regs = (struct pt_regs *)
6973 (sp + STACK_FRAME_OVERHEAD);
6974 lr = regs->link;
6975- printk("--- Exception: %lx at %pS\n LR = %pS\n",
6976+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
6977 regs->trap, (void *)regs->nip, (void *)lr);
6978 firstframe = 1;
6979 }
6980@@ -1194,58 +1194,3 @@ void __ppc64_runlatch_off(void)
6981 mtspr(SPRN_CTRLT, ctrl);
6982 }
6983 #endif /* CONFIG_PPC64 */
6984-
6985-unsigned long arch_align_stack(unsigned long sp)
6986-{
6987- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
6988- sp -= get_random_int() & ~PAGE_MASK;
6989- return sp & ~0xf;
6990-}
6991-
6992-static inline unsigned long brk_rnd(void)
6993-{
6994- unsigned long rnd = 0;
6995-
6996- /* 8MB for 32bit, 1GB for 64bit */
6997- if (is_32bit_task())
6998- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
6999- else
7000- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
7001-
7002- return rnd << PAGE_SHIFT;
7003-}
7004-
7005-unsigned long arch_randomize_brk(struct mm_struct *mm)
7006-{
7007- unsigned long base = mm->brk;
7008- unsigned long ret;
7009-
7010-#ifdef CONFIG_PPC_STD_MMU_64
7011- /*
7012- * If we are using 1TB segments and we are allowed to randomise
7013- * the heap, we can put it above 1TB so it is backed by a 1TB
7014- * segment. Otherwise the heap will be in the bottom 1TB
7015- * which always uses 256MB segments and this may result in a
7016- * performance penalty.
7017- */
7018- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
7019- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
7020-#endif
7021-
7022- ret = PAGE_ALIGN(base + brk_rnd());
7023-
7024- if (ret < mm->brk)
7025- return mm->brk;
7026-
7027- return ret;
7028-}
7029-
7030-unsigned long randomize_et_dyn(unsigned long base)
7031-{
7032- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
7033-
7034- if (ret < base)
7035- return base;
7036-
7037- return ret;
7038-}
7039diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
7040index c497000..8fde506 100644
7041--- a/arch/powerpc/kernel/ptrace.c
7042+++ b/arch/powerpc/kernel/ptrace.c
7043@@ -1737,6 +1737,10 @@ long arch_ptrace(struct task_struct *child, long request,
7044 return ret;
7045 }
7046
7047+#ifdef CONFIG_GRKERNSEC_SETXID
7048+extern void gr_delayed_cred_worker(void);
7049+#endif
7050+
7051 /*
7052 * We must return the syscall number to actually look up in the table.
7053 * This can be -1L to skip running any syscall at all.
7054@@ -1747,6 +1751,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
7055
7056 secure_computing_strict(regs->gpr[0]);
7057
7058+#ifdef CONFIG_GRKERNSEC_SETXID
7059+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
7060+ gr_delayed_cred_worker();
7061+#endif
7062+
7063 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
7064 tracehook_report_syscall_entry(regs))
7065 /*
7066@@ -1781,6 +1790,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
7067 {
7068 int step;
7069
7070+#ifdef CONFIG_GRKERNSEC_SETXID
7071+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
7072+ gr_delayed_cred_worker();
7073+#endif
7074+
7075 audit_syscall_exit(regs);
7076
7077 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
7078diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
7079index 804e323..79181c1 100644
7080--- a/arch/powerpc/kernel/signal_32.c
7081+++ b/arch/powerpc/kernel/signal_32.c
7082@@ -851,7 +851,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
7083 /* Save user registers on the stack */
7084 frame = &rt_sf->uc.uc_mcontext;
7085 addr = frame;
7086- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
7087+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
7088 if (save_user_regs(regs, frame, 0, 1))
7089 goto badframe;
7090 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
7091diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
7092index 1ca045d..139c3f7 100644
7093--- a/arch/powerpc/kernel/signal_64.c
7094+++ b/arch/powerpc/kernel/signal_64.c
7095@@ -430,7 +430,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
7096 current->thread.fpscr.val = 0;
7097
7098 /* Set up to return from userspace. */
7099- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
7100+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
7101 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
7102 } else {
7103 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
7104diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
7105index 3ce1f86..c30e629 100644
7106--- a/arch/powerpc/kernel/sysfs.c
7107+++ b/arch/powerpc/kernel/sysfs.c
7108@@ -522,7 +522,7 @@ static int __cpuinit sysfs_cpu_notify(struct notifier_block *self,
7109 return NOTIFY_OK;
7110 }
7111
7112-static struct notifier_block __cpuinitdata sysfs_cpu_nb = {
7113+static struct notifier_block sysfs_cpu_nb = {
7114 .notifier_call = sysfs_cpu_notify,
7115 };
7116
7117diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
7118index 3251840..3f7c77a 100644
7119--- a/arch/powerpc/kernel/traps.c
7120+++ b/arch/powerpc/kernel/traps.c
7121@@ -133,6 +133,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
7122 return flags;
7123 }
7124
7125+extern void gr_handle_kernel_exploit(void);
7126+
7127 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
7128 int signr)
7129 {
7130@@ -182,6 +184,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
7131 panic("Fatal exception in interrupt");
7132 if (panic_on_oops)
7133 panic("Fatal exception");
7134+
7135+ gr_handle_kernel_exploit();
7136+
7137 do_exit(signr);
7138 }
7139
7140diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
7141index 1b2076f..835e4be 100644
7142--- a/arch/powerpc/kernel/vdso.c
7143+++ b/arch/powerpc/kernel/vdso.c
7144@@ -34,6 +34,7 @@
7145 #include <asm/firmware.h>
7146 #include <asm/vdso.h>
7147 #include <asm/vdso_datapage.h>
7148+#include <asm/mman.h>
7149
7150 #include "setup.h"
7151
7152@@ -218,7 +219,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
7153 vdso_base = VDSO32_MBASE;
7154 #endif
7155
7156- current->mm->context.vdso_base = 0;
7157+ current->mm->context.vdso_base = ~0UL;
7158
7159 /* vDSO has a problem and was disabled, just don't "enable" it for the
7160 * process
7161@@ -238,7 +239,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
7162 vdso_base = get_unmapped_area(NULL, vdso_base,
7163 (vdso_pages << PAGE_SHIFT) +
7164 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
7165- 0, 0);
7166+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
7167 if (IS_ERR_VALUE(vdso_base)) {
7168 rc = vdso_base;
7169 goto fail_mmapsem;
7170diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
7171index 5eea6f3..5d10396 100644
7172--- a/arch/powerpc/lib/usercopy_64.c
7173+++ b/arch/powerpc/lib/usercopy_64.c
7174@@ -9,22 +9,6 @@
7175 #include <linux/module.h>
7176 #include <asm/uaccess.h>
7177
7178-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
7179-{
7180- if (likely(access_ok(VERIFY_READ, from, n)))
7181- n = __copy_from_user(to, from, n);
7182- else
7183- memset(to, 0, n);
7184- return n;
7185-}
7186-
7187-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
7188-{
7189- if (likely(access_ok(VERIFY_WRITE, to, n)))
7190- n = __copy_to_user(to, from, n);
7191- return n;
7192-}
7193-
7194 unsigned long copy_in_user(void __user *to, const void __user *from,
7195 unsigned long n)
7196 {
7197@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
7198 return n;
7199 }
7200
7201-EXPORT_SYMBOL(copy_from_user);
7202-EXPORT_SYMBOL(copy_to_user);
7203 EXPORT_SYMBOL(copy_in_user);
7204
7205diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
7206index 3a8489a..6a63b3b 100644
7207--- a/arch/powerpc/mm/fault.c
7208+++ b/arch/powerpc/mm/fault.c
7209@@ -32,6 +32,10 @@
7210 #include <linux/perf_event.h>
7211 #include <linux/magic.h>
7212 #include <linux/ratelimit.h>
7213+#include <linux/slab.h>
7214+#include <linux/pagemap.h>
7215+#include <linux/compiler.h>
7216+#include <linux/unistd.h>
7217
7218 #include <asm/firmware.h>
7219 #include <asm/page.h>
7220@@ -68,6 +72,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
7221 }
7222 #endif
7223
7224+#ifdef CONFIG_PAX_PAGEEXEC
7225+/*
7226+ * PaX: decide what to do with offenders (regs->nip = fault address)
7227+ *
7228+ * returns 1 when task should be killed
7229+ */
7230+static int pax_handle_fetch_fault(struct pt_regs *regs)
7231+{
7232+ return 1;
7233+}
7234+
7235+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
7236+{
7237+ unsigned long i;
7238+
7239+ printk(KERN_ERR "PAX: bytes at PC: ");
7240+ for (i = 0; i < 5; i++) {
7241+ unsigned int c;
7242+ if (get_user(c, (unsigned int __user *)pc+i))
7243+ printk(KERN_CONT "???????? ");
7244+ else
7245+ printk(KERN_CONT "%08x ", c);
7246+ }
7247+ printk("\n");
7248+}
7249+#endif
7250+
7251 /*
7252 * Check whether the instruction at regs->nip is a store using
7253 * an update addressing form which will update r1.
7254@@ -213,7 +244,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
7255 * indicate errors in DSISR but can validly be set in SRR1.
7256 */
7257 if (trap == 0x400)
7258- error_code &= 0x48200000;
7259+ error_code &= 0x58200000;
7260 else
7261 is_write = error_code & DSISR_ISSTORE;
7262 #else
7263@@ -364,7 +395,7 @@ good_area:
7264 * "undefined". Of those that can be set, this is the only
7265 * one which seems bad.
7266 */
7267- if (error_code & 0x10000000)
7268+ if (error_code & DSISR_GUARDED)
7269 /* Guarded storage error. */
7270 goto bad_area;
7271 #endif /* CONFIG_8xx */
7272@@ -379,7 +410,7 @@ good_area:
7273 * processors use the same I/D cache coherency mechanism
7274 * as embedded.
7275 */
7276- if (error_code & DSISR_PROTFAULT)
7277+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
7278 goto bad_area;
7279 #endif /* CONFIG_PPC_STD_MMU */
7280
7281@@ -462,6 +493,23 @@ bad_area:
7282 bad_area_nosemaphore:
7283 /* User mode accesses cause a SIGSEGV */
7284 if (user_mode(regs)) {
7285+
7286+#ifdef CONFIG_PAX_PAGEEXEC
7287+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
7288+#ifdef CONFIG_PPC_STD_MMU
7289+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
7290+#else
7291+ if (is_exec && regs->nip == address) {
7292+#endif
7293+ switch (pax_handle_fetch_fault(regs)) {
7294+ }
7295+
7296+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
7297+ do_group_exit(SIGKILL);
7298+ }
7299+ }
7300+#endif
7301+
7302 _exception(SIGSEGV, regs, code, address);
7303 return 0;
7304 }
7305diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
7306index 67a42ed..cd463e0 100644
7307--- a/arch/powerpc/mm/mmap_64.c
7308+++ b/arch/powerpc/mm/mmap_64.c
7309@@ -57,6 +57,10 @@ static unsigned long mmap_rnd(void)
7310 {
7311 unsigned long rnd = 0;
7312
7313+#ifdef CONFIG_PAX_RANDMMAP
7314+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7315+#endif
7316+
7317 if (current->flags & PF_RANDOMIZE) {
7318 /* 8MB for 32bit, 1GB for 64bit */
7319 if (is_32bit_task())
7320@@ -91,10 +95,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7321 */
7322 if (mmap_is_legacy()) {
7323 mm->mmap_base = TASK_UNMAPPED_BASE;
7324+
7325+#ifdef CONFIG_PAX_RANDMMAP
7326+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7327+ mm->mmap_base += mm->delta_mmap;
7328+#endif
7329+
7330 mm->get_unmapped_area = arch_get_unmapped_area;
7331 mm->unmap_area = arch_unmap_area;
7332 } else {
7333 mm->mmap_base = mmap_base();
7334+
7335+#ifdef CONFIG_PAX_RANDMMAP
7336+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7337+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7338+#endif
7339+
7340 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
7341 mm->unmap_area = arch_unmap_area_topdown;
7342 }
7343diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
7344index e779642..e5bb889 100644
7345--- a/arch/powerpc/mm/mmu_context_nohash.c
7346+++ b/arch/powerpc/mm/mmu_context_nohash.c
7347@@ -363,7 +363,7 @@ static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self,
7348 return NOTIFY_OK;
7349 }
7350
7351-static struct notifier_block __cpuinitdata mmu_context_cpu_nb = {
7352+static struct notifier_block mmu_context_cpu_nb = {
7353 .notifier_call = mmu_context_cpu_notify,
7354 };
7355
7356diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
7357index bba87ca..c346a33 100644
7358--- a/arch/powerpc/mm/numa.c
7359+++ b/arch/powerpc/mm/numa.c
7360@@ -932,7 +932,7 @@ static void __init *careful_zallocation(int nid, unsigned long size,
7361 return ret;
7362 }
7363
7364-static struct notifier_block __cpuinitdata ppc64_numa_nb = {
7365+static struct notifier_block ppc64_numa_nb = {
7366 .notifier_call = cpu_numa_callback,
7367 .priority = 1 /* Must run before sched domains notifier. */
7368 };
7369diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
7370index cf9dada..241529f 100644
7371--- a/arch/powerpc/mm/slice.c
7372+++ b/arch/powerpc/mm/slice.c
7373@@ -103,7 +103,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
7374 if ((mm->task_size - len) < addr)
7375 return 0;
7376 vma = find_vma(mm, addr);
7377- return (!vma || (addr + len) <= vma->vm_start);
7378+ return check_heap_stack_gap(vma, addr, len, 0);
7379 }
7380
7381 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
7382@@ -272,7 +272,7 @@ full_search:
7383 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
7384 continue;
7385 }
7386- if (!vma || addr + len <= vma->vm_start) {
7387+ if (check_heap_stack_gap(vma, addr, len, 0)) {
7388 /*
7389 * Remember the place where we stopped the search:
7390 */
7391@@ -329,10 +329,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
7392 }
7393 }
7394
7395- addr = mm->mmap_base;
7396- while (addr > len) {
7397+ if (mm->mmap_base < len)
7398+ addr = -ENOMEM;
7399+ else
7400+ addr = mm->mmap_base - len;
7401+
7402+ while (!IS_ERR_VALUE(addr)) {
7403 /* Go down by chunk size */
7404- addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
7405+ addr = _ALIGN_DOWN(addr, 1ul << pshift);
7406
7407 /* Check for hit with different page size */
7408 mask = slice_range_to_mask(addr, len);
7409@@ -352,7 +356,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
7410 * return with success:
7411 */
7412 vma = find_vma(mm, addr);
7413- if (!vma || (addr + len) <= vma->vm_start) {
7414+ if (check_heap_stack_gap(vma, addr, len, 0)) {
7415 /* remember the address as a hint for next time */
7416 if (use_cache)
7417 mm->free_area_cache = addr;
7418@@ -364,7 +368,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
7419 mm->cached_hole_size = vma->vm_start - addr;
7420
7421 /* try just below the current vma->vm_start */
7422- addr = vma->vm_start;
7423+ addr = skip_heap_stack_gap(vma, len, 0);
7424 }
7425
7426 /*
7427@@ -442,6 +446,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
7428 if (fixed && addr > (mm->task_size - len))
7429 return -EINVAL;
7430
7431+#ifdef CONFIG_PAX_RANDMMAP
7432+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
7433+ addr = 0;
7434+#endif
7435+
7436 /* If hint, make sure it matches our alignment restrictions */
7437 if (!fixed && addr) {
7438 addr = _ALIGN_UP(addr, 1ul << pshift);
7439diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
7440index 0cfece4..2f1a0e5 100644
7441--- a/arch/powerpc/platforms/cell/spufs/file.c
7442+++ b/arch/powerpc/platforms/cell/spufs/file.c
7443@@ -281,9 +281,9 @@ spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
7444 return VM_FAULT_NOPAGE;
7445 }
7446
7447-static int spufs_mem_mmap_access(struct vm_area_struct *vma,
7448+static ssize_t spufs_mem_mmap_access(struct vm_area_struct *vma,
7449 unsigned long address,
7450- void *buf, int len, int write)
7451+ void *buf, size_t len, int write)
7452 {
7453 struct spu_context *ctx = vma->vm_file->private_data;
7454 unsigned long offset = address - vma->vm_start;
7455diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
7456index bdb738a..49c9f95 100644
7457--- a/arch/powerpc/platforms/powermac/smp.c
7458+++ b/arch/powerpc/platforms/powermac/smp.c
7459@@ -885,7 +885,7 @@ static int smp_core99_cpu_notify(struct notifier_block *self,
7460 return NOTIFY_OK;
7461 }
7462
7463-static struct notifier_block __cpuinitdata smp_core99_cpu_nb = {
7464+static struct notifier_block smp_core99_cpu_nb = {
7465 .notifier_call = smp_core99_cpu_notify,
7466 };
7467 #endif /* CONFIG_HOTPLUG_CPU */
7468diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
7469index c797832..ce575c8 100644
7470--- a/arch/s390/include/asm/atomic.h
7471+++ b/arch/s390/include/asm/atomic.h
7472@@ -326,6 +326,16 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
7473 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
7474 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
7475
7476+#define atomic64_read_unchecked(v) atomic64_read(v)
7477+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
7478+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
7479+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
7480+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
7481+#define atomic64_inc_unchecked(v) atomic64_inc(v)
7482+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
7483+#define atomic64_dec_unchecked(v) atomic64_dec(v)
7484+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
7485+
7486 #define smp_mb__before_atomic_dec() smp_mb()
7487 #define smp_mb__after_atomic_dec() smp_mb()
7488 #define smp_mb__before_atomic_inc() smp_mb()
7489diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
7490index 4d7ccac..d03d0ad 100644
7491--- a/arch/s390/include/asm/cache.h
7492+++ b/arch/s390/include/asm/cache.h
7493@@ -9,8 +9,10 @@
7494 #ifndef __ARCH_S390_CACHE_H
7495 #define __ARCH_S390_CACHE_H
7496
7497-#define L1_CACHE_BYTES 256
7498+#include <linux/const.h>
7499+
7500 #define L1_CACHE_SHIFT 8
7501+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7502 #define NET_SKB_PAD 32
7503
7504 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
7505diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
7506index 178ff96..8c93bd1 100644
7507--- a/arch/s390/include/asm/elf.h
7508+++ b/arch/s390/include/asm/elf.h
7509@@ -160,8 +160,14 @@ extern unsigned int vdso_enabled;
7510 the loader. We need to make sure that it is out of the way of the program
7511 that it will "exec", and that there is sufficient room for the brk. */
7512
7513-extern unsigned long randomize_et_dyn(unsigned long base);
7514-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
7515+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
7516+
7517+#ifdef CONFIG_PAX_ASLR
7518+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
7519+
7520+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
7521+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
7522+#endif
7523
7524 /* This yields a mask that user programs can use to figure out what
7525 instruction set this CPU supports. */
7526@@ -210,9 +216,6 @@ struct linux_binprm;
7527 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
7528 int arch_setup_additional_pages(struct linux_binprm *, int);
7529
7530-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
7531-#define arch_randomize_brk arch_randomize_brk
7532-
7533 void *fill_cpu_elf_notes(void *ptr, struct save_area *sa);
7534
7535 #endif
7536diff --git a/arch/s390/include/asm/exec.h b/arch/s390/include/asm/exec.h
7537index c4a93d6..4d2a9b4 100644
7538--- a/arch/s390/include/asm/exec.h
7539+++ b/arch/s390/include/asm/exec.h
7540@@ -7,6 +7,6 @@
7541 #ifndef __ASM_EXEC_H
7542 #define __ASM_EXEC_H
7543
7544-extern unsigned long arch_align_stack(unsigned long sp);
7545+#define arch_align_stack(x) ((x) & ~0xfUL)
7546
7547 #endif /* __ASM_EXEC_H */
7548diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
7549index 34268df..ea97318 100644
7550--- a/arch/s390/include/asm/uaccess.h
7551+++ b/arch/s390/include/asm/uaccess.h
7552@@ -252,6 +252,10 @@ static inline unsigned long __must_check
7553 copy_to_user(void __user *to, const void *from, unsigned long n)
7554 {
7555 might_fault();
7556+
7557+ if ((long)n < 0)
7558+ return n;
7559+
7560 if (access_ok(VERIFY_WRITE, to, n))
7561 n = __copy_to_user(to, from, n);
7562 return n;
7563@@ -277,6 +281,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
7564 static inline unsigned long __must_check
7565 __copy_from_user(void *to, const void __user *from, unsigned long n)
7566 {
7567+ if ((long)n < 0)
7568+ return n;
7569+
7570 if (__builtin_constant_p(n) && (n <= 256))
7571 return uaccess.copy_from_user_small(n, from, to);
7572 else
7573@@ -308,10 +315,14 @@ __compiletime_warning("copy_from_user() buffer size is not provably correct")
7574 static inline unsigned long __must_check
7575 copy_from_user(void *to, const void __user *from, unsigned long n)
7576 {
7577- unsigned int sz = __compiletime_object_size(to);
7578+ size_t sz = __compiletime_object_size(to);
7579
7580 might_fault();
7581- if (unlikely(sz != -1 && sz < n)) {
7582+
7583+ if ((long)n < 0)
7584+ return n;
7585+
7586+ if (unlikely(sz != (size_t)-1 && sz < n)) {
7587 copy_from_user_overflow();
7588 return n;
7589 }
7590diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
7591index 4610dea..cf0af21 100644
7592--- a/arch/s390/kernel/module.c
7593+++ b/arch/s390/kernel/module.c
7594@@ -171,11 +171,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
7595
7596 /* Increase core size by size of got & plt and set start
7597 offsets for got and plt. */
7598- me->core_size = ALIGN(me->core_size, 4);
7599- me->arch.got_offset = me->core_size;
7600- me->core_size += me->arch.got_size;
7601- me->arch.plt_offset = me->core_size;
7602- me->core_size += me->arch.plt_size;
7603+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
7604+ me->arch.got_offset = me->core_size_rw;
7605+ me->core_size_rw += me->arch.got_size;
7606+ me->arch.plt_offset = me->core_size_rx;
7607+ me->core_size_rx += me->arch.plt_size;
7608 return 0;
7609 }
7610
7611@@ -252,7 +252,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7612 if (info->got_initialized == 0) {
7613 Elf_Addr *gotent;
7614
7615- gotent = me->module_core + me->arch.got_offset +
7616+ gotent = me->module_core_rw + me->arch.got_offset +
7617 info->got_offset;
7618 *gotent = val;
7619 info->got_initialized = 1;
7620@@ -276,7 +276,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7621 else if (r_type == R_390_GOTENT ||
7622 r_type == R_390_GOTPLTENT)
7623 *(unsigned int *) loc =
7624- (val + (Elf_Addr) me->module_core - loc) >> 1;
7625+ (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
7626 else if (r_type == R_390_GOT64 ||
7627 r_type == R_390_GOTPLT64)
7628 *(unsigned long *) loc = val;
7629@@ -290,7 +290,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7630 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
7631 if (info->plt_initialized == 0) {
7632 unsigned int *ip;
7633- ip = me->module_core + me->arch.plt_offset +
7634+ ip = me->module_core_rx + me->arch.plt_offset +
7635 info->plt_offset;
7636 #ifndef CONFIG_64BIT
7637 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
7638@@ -315,7 +315,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7639 val - loc + 0xffffUL < 0x1ffffeUL) ||
7640 (r_type == R_390_PLT32DBL &&
7641 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
7642- val = (Elf_Addr) me->module_core +
7643+ val = (Elf_Addr) me->module_core_rx +
7644 me->arch.plt_offset +
7645 info->plt_offset;
7646 val += rela->r_addend - loc;
7647@@ -337,7 +337,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7648 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
7649 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
7650 val = val + rela->r_addend -
7651- ((Elf_Addr) me->module_core + me->arch.got_offset);
7652+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
7653 if (r_type == R_390_GOTOFF16)
7654 *(unsigned short *) loc = val;
7655 else if (r_type == R_390_GOTOFF32)
7656@@ -347,7 +347,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7657 break;
7658 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
7659 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
7660- val = (Elf_Addr) me->module_core + me->arch.got_offset +
7661+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
7662 rela->r_addend - loc;
7663 if (r_type == R_390_GOTPC)
7664 *(unsigned int *) loc = val;
7665diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
7666index 536d645..4a5bd9e 100644
7667--- a/arch/s390/kernel/process.c
7668+++ b/arch/s390/kernel/process.c
7669@@ -250,39 +250,3 @@ unsigned long get_wchan(struct task_struct *p)
7670 }
7671 return 0;
7672 }
7673-
7674-unsigned long arch_align_stack(unsigned long sp)
7675-{
7676- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
7677- sp -= get_random_int() & ~PAGE_MASK;
7678- return sp & ~0xf;
7679-}
7680-
7681-static inline unsigned long brk_rnd(void)
7682-{
7683- /* 8MB for 32bit, 1GB for 64bit */
7684- if (is_32bit_task())
7685- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
7686- else
7687- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
7688-}
7689-
7690-unsigned long arch_randomize_brk(struct mm_struct *mm)
7691-{
7692- unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
7693-
7694- if (ret < mm->brk)
7695- return mm->brk;
7696- return ret;
7697-}
7698-
7699-unsigned long randomize_et_dyn(unsigned long base)
7700-{
7701- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
7702-
7703- if (!(current->flags & PF_RANDOMIZE))
7704- return base;
7705- if (ret < base)
7706- return base;
7707- return ret;
7708-}
7709diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
7710index c59a5ef..3fae59c 100644
7711--- a/arch/s390/mm/mmap.c
7712+++ b/arch/s390/mm/mmap.c
7713@@ -90,10 +90,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7714 */
7715 if (mmap_is_legacy()) {
7716 mm->mmap_base = TASK_UNMAPPED_BASE;
7717+
7718+#ifdef CONFIG_PAX_RANDMMAP
7719+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7720+ mm->mmap_base += mm->delta_mmap;
7721+#endif
7722+
7723 mm->get_unmapped_area = arch_get_unmapped_area;
7724 mm->unmap_area = arch_unmap_area;
7725 } else {
7726 mm->mmap_base = mmap_base();
7727+
7728+#ifdef CONFIG_PAX_RANDMMAP
7729+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7730+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7731+#endif
7732+
7733 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
7734 mm->unmap_area = arch_unmap_area_topdown;
7735 }
7736@@ -172,10 +184,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7737 */
7738 if (mmap_is_legacy()) {
7739 mm->mmap_base = TASK_UNMAPPED_BASE;
7740+
7741+#ifdef CONFIG_PAX_RANDMMAP
7742+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7743+ mm->mmap_base += mm->delta_mmap;
7744+#endif
7745+
7746 mm->get_unmapped_area = s390_get_unmapped_area;
7747 mm->unmap_area = arch_unmap_area;
7748 } else {
7749 mm->mmap_base = mmap_base();
7750+
7751+#ifdef CONFIG_PAX_RANDMMAP
7752+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7753+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7754+#endif
7755+
7756 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
7757 mm->unmap_area = arch_unmap_area_topdown;
7758 }
7759diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
7760index ae3d59f..f65f075 100644
7761--- a/arch/score/include/asm/cache.h
7762+++ b/arch/score/include/asm/cache.h
7763@@ -1,7 +1,9 @@
7764 #ifndef _ASM_SCORE_CACHE_H
7765 #define _ASM_SCORE_CACHE_H
7766
7767+#include <linux/const.h>
7768+
7769 #define L1_CACHE_SHIFT 4
7770-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7771+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7772
7773 #endif /* _ASM_SCORE_CACHE_H */
7774diff --git a/arch/score/include/asm/exec.h b/arch/score/include/asm/exec.h
7775index f9f3cd5..58ff438 100644
7776--- a/arch/score/include/asm/exec.h
7777+++ b/arch/score/include/asm/exec.h
7778@@ -1,6 +1,6 @@
7779 #ifndef _ASM_SCORE_EXEC_H
7780 #define _ASM_SCORE_EXEC_H
7781
7782-extern unsigned long arch_align_stack(unsigned long sp);
7783+#define arch_align_stack(x) (x)
7784
7785 #endif /* _ASM_SCORE_EXEC_H */
7786diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
7787index 7956846..5f37677 100644
7788--- a/arch/score/kernel/process.c
7789+++ b/arch/score/kernel/process.c
7790@@ -134,8 +134,3 @@ unsigned long get_wchan(struct task_struct *task)
7791
7792 return task_pt_regs(task)->cp0_epc;
7793 }
7794-
7795-unsigned long arch_align_stack(unsigned long sp)
7796-{
7797- return sp;
7798-}
7799diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
7800index ef9e555..331bd29 100644
7801--- a/arch/sh/include/asm/cache.h
7802+++ b/arch/sh/include/asm/cache.h
7803@@ -9,10 +9,11 @@
7804 #define __ASM_SH_CACHE_H
7805 #ifdef __KERNEL__
7806
7807+#include <linux/const.h>
7808 #include <linux/init.h>
7809 #include <cpu/cache.h>
7810
7811-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7812+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7813
7814 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
7815
7816diff --git a/arch/sh/kernel/cpu/sh4a/smp-shx3.c b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
7817index 03f2b55..b027032 100644
7818--- a/arch/sh/kernel/cpu/sh4a/smp-shx3.c
7819+++ b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
7820@@ -143,7 +143,7 @@ shx3_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
7821 return NOTIFY_OK;
7822 }
7823
7824-static struct notifier_block __cpuinitdata shx3_cpu_notifier = {
7825+static struct notifier_block shx3_cpu_notifier = {
7826 .notifier_call = shx3_cpu_callback,
7827 };
7828
7829diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
7830index 6777177..cb5e44f 100644
7831--- a/arch/sh/mm/mmap.c
7832+++ b/arch/sh/mm/mmap.c
7833@@ -36,6 +36,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7834 struct mm_struct *mm = current->mm;
7835 struct vm_area_struct *vma;
7836 int do_colour_align;
7837+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
7838 struct vm_unmapped_area_info info;
7839
7840 if (flags & MAP_FIXED) {
7841@@ -55,6 +56,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7842 if (filp || (flags & MAP_SHARED))
7843 do_colour_align = 1;
7844
7845+#ifdef CONFIG_PAX_RANDMMAP
7846+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7847+#endif
7848+
7849 if (addr) {
7850 if (do_colour_align)
7851 addr = COLOUR_ALIGN(addr, pgoff);
7852@@ -62,14 +67,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7853 addr = PAGE_ALIGN(addr);
7854
7855 vma = find_vma(mm, addr);
7856- if (TASK_SIZE - len >= addr &&
7857- (!vma || addr + len <= vma->vm_start))
7858+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
7859 return addr;
7860 }
7861
7862 info.flags = 0;
7863 info.length = len;
7864- info.low_limit = TASK_UNMAPPED_BASE;
7865+ info.low_limit = mm->mmap_base;
7866 info.high_limit = TASK_SIZE;
7867 info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
7868 info.align_offset = pgoff << PAGE_SHIFT;
7869@@ -85,6 +89,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7870 struct mm_struct *mm = current->mm;
7871 unsigned long addr = addr0;
7872 int do_colour_align;
7873+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
7874 struct vm_unmapped_area_info info;
7875
7876 if (flags & MAP_FIXED) {
7877@@ -104,6 +109,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7878 if (filp || (flags & MAP_SHARED))
7879 do_colour_align = 1;
7880
7881+#ifdef CONFIG_PAX_RANDMMAP
7882+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7883+#endif
7884+
7885 /* requesting a specific address */
7886 if (addr) {
7887 if (do_colour_align)
7888@@ -112,8 +121,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7889 addr = PAGE_ALIGN(addr);
7890
7891 vma = find_vma(mm, addr);
7892- if (TASK_SIZE - len >= addr &&
7893- (!vma || addr + len <= vma->vm_start))
7894+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
7895 return addr;
7896 }
7897
7898@@ -135,6 +143,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7899 VM_BUG_ON(addr != -ENOMEM);
7900 info.flags = 0;
7901 info.low_limit = TASK_UNMAPPED_BASE;
7902+
7903+#ifdef CONFIG_PAX_RANDMMAP
7904+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7905+ info.low_limit += mm->delta_mmap;
7906+#endif
7907+
7908 info.high_limit = TASK_SIZE;
7909 addr = vm_unmapped_area(&info);
7910 }
7911diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
7912index be56a24..443328f 100644
7913--- a/arch/sparc/include/asm/atomic_64.h
7914+++ b/arch/sparc/include/asm/atomic_64.h
7915@@ -14,18 +14,40 @@
7916 #define ATOMIC64_INIT(i) { (i) }
7917
7918 #define atomic_read(v) (*(volatile int *)&(v)->counter)
7919+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
7920+{
7921+ return v->counter;
7922+}
7923 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
7924+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
7925+{
7926+ return v->counter;
7927+}
7928
7929 #define atomic_set(v, i) (((v)->counter) = i)
7930+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
7931+{
7932+ v->counter = i;
7933+}
7934 #define atomic64_set(v, i) (((v)->counter) = i)
7935+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
7936+{
7937+ v->counter = i;
7938+}
7939
7940 extern void atomic_add(int, atomic_t *);
7941+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
7942 extern void atomic64_add(long, atomic64_t *);
7943+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
7944 extern void atomic_sub(int, atomic_t *);
7945+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
7946 extern void atomic64_sub(long, atomic64_t *);
7947+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
7948
7949 extern int atomic_add_ret(int, atomic_t *);
7950+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
7951 extern long atomic64_add_ret(long, atomic64_t *);
7952+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
7953 extern int atomic_sub_ret(int, atomic_t *);
7954 extern long atomic64_sub_ret(long, atomic64_t *);
7955
7956@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
7957 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
7958
7959 #define atomic_inc_return(v) atomic_add_ret(1, v)
7960+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7961+{
7962+ return atomic_add_ret_unchecked(1, v);
7963+}
7964 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
7965+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
7966+{
7967+ return atomic64_add_ret_unchecked(1, v);
7968+}
7969
7970 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
7971 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
7972
7973 #define atomic_add_return(i, v) atomic_add_ret(i, v)
7974+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
7975+{
7976+ return atomic_add_ret_unchecked(i, v);
7977+}
7978 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
7979+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
7980+{
7981+ return atomic64_add_ret_unchecked(i, v);
7982+}
7983
7984 /*
7985 * atomic_inc_and_test - increment and test
7986@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
7987 * other cases.
7988 */
7989 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
7990+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7991+{
7992+ return atomic_inc_return_unchecked(v) == 0;
7993+}
7994 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
7995
7996 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
7997@@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomic64_t *);
7998 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
7999
8000 #define atomic_inc(v) atomic_add(1, v)
8001+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
8002+{
8003+ atomic_add_unchecked(1, v);
8004+}
8005 #define atomic64_inc(v) atomic64_add(1, v)
8006+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
8007+{
8008+ atomic64_add_unchecked(1, v);
8009+}
8010
8011 #define atomic_dec(v) atomic_sub(1, v)
8012+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
8013+{
8014+ atomic_sub_unchecked(1, v);
8015+}
8016 #define atomic64_dec(v) atomic64_sub(1, v)
8017+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
8018+{
8019+ atomic64_sub_unchecked(1, v);
8020+}
8021
8022 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
8023 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
8024
8025 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
8026+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
8027+{
8028+ return cmpxchg(&v->counter, old, new);
8029+}
8030 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
8031+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
8032+{
8033+ return xchg(&v->counter, new);
8034+}
8035
8036 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
8037 {
8038- int c, old;
8039+ int c, old, new;
8040 c = atomic_read(v);
8041 for (;;) {
8042- if (unlikely(c == (u)))
8043+ if (unlikely(c == u))
8044 break;
8045- old = atomic_cmpxchg((v), c, c + (a));
8046+
8047+ asm volatile("addcc %2, %0, %0\n"
8048+
8049+#ifdef CONFIG_PAX_REFCOUNT
8050+ "tvs %%icc, 6\n"
8051+#endif
8052+
8053+ : "=r" (new)
8054+ : "0" (c), "ir" (a)
8055+ : "cc");
8056+
8057+ old = atomic_cmpxchg(v, c, new);
8058 if (likely(old == c))
8059 break;
8060 c = old;
8061@@ -88,20 +165,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
8062 #define atomic64_cmpxchg(v, o, n) \
8063 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
8064 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
8065+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
8066+{
8067+ return xchg(&v->counter, new);
8068+}
8069
8070 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
8071 {
8072- long c, old;
8073+ long c, old, new;
8074 c = atomic64_read(v);
8075 for (;;) {
8076- if (unlikely(c == (u)))
8077+ if (unlikely(c == u))
8078 break;
8079- old = atomic64_cmpxchg((v), c, c + (a));
8080+
8081+ asm volatile("addcc %2, %0, %0\n"
8082+
8083+#ifdef CONFIG_PAX_REFCOUNT
8084+ "tvs %%xcc, 6\n"
8085+#endif
8086+
8087+ : "=r" (new)
8088+ : "0" (c), "ir" (a)
8089+ : "cc");
8090+
8091+ old = atomic64_cmpxchg(v, c, new);
8092 if (likely(old == c))
8093 break;
8094 c = old;
8095 }
8096- return c != (u);
8097+ return c != u;
8098 }
8099
8100 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
8101diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
8102index 5bb6991..5c2132e 100644
8103--- a/arch/sparc/include/asm/cache.h
8104+++ b/arch/sparc/include/asm/cache.h
8105@@ -7,10 +7,12 @@
8106 #ifndef _SPARC_CACHE_H
8107 #define _SPARC_CACHE_H
8108
8109+#include <linux/const.h>
8110+
8111 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
8112
8113 #define L1_CACHE_SHIFT 5
8114-#define L1_CACHE_BYTES 32
8115+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8116
8117 #ifdef CONFIG_SPARC32
8118 #define SMP_CACHE_BYTES_SHIFT 5
8119diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
8120index ac74a2c..a9e58af 100644
8121--- a/arch/sparc/include/asm/elf_32.h
8122+++ b/arch/sparc/include/asm/elf_32.h
8123@@ -114,6 +114,13 @@ typedef struct {
8124
8125 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
8126
8127+#ifdef CONFIG_PAX_ASLR
8128+#define PAX_ELF_ET_DYN_BASE 0x10000UL
8129+
8130+#define PAX_DELTA_MMAP_LEN 16
8131+#define PAX_DELTA_STACK_LEN 16
8132+#endif
8133+
8134 /* This yields a mask that user programs can use to figure out what
8135 instruction set this cpu supports. This can NOT be done in userspace
8136 on Sparc. */
8137diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
8138index 370ca1e..d4f4a98 100644
8139--- a/arch/sparc/include/asm/elf_64.h
8140+++ b/arch/sparc/include/asm/elf_64.h
8141@@ -189,6 +189,13 @@ typedef struct {
8142 #define ELF_ET_DYN_BASE 0x0000010000000000UL
8143 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
8144
8145+#ifdef CONFIG_PAX_ASLR
8146+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
8147+
8148+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
8149+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
8150+#endif
8151+
8152 extern unsigned long sparc64_elf_hwcap;
8153 #define ELF_HWCAP sparc64_elf_hwcap
8154
8155diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
8156index 9b1c36d..209298b 100644
8157--- a/arch/sparc/include/asm/pgalloc_32.h
8158+++ b/arch/sparc/include/asm/pgalloc_32.h
8159@@ -33,6 +33,7 @@ static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
8160 }
8161
8162 #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
8163+#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
8164
8165 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm,
8166 unsigned long address)
8167diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
8168index bcfe063..b333142 100644
8169--- a/arch/sparc/include/asm/pgalloc_64.h
8170+++ b/arch/sparc/include/asm/pgalloc_64.h
8171@@ -26,6 +26,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
8172 }
8173
8174 #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
8175+#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD))
8176
8177 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
8178 {
8179diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
8180index 6fc1348..390c50a 100644
8181--- a/arch/sparc/include/asm/pgtable_32.h
8182+++ b/arch/sparc/include/asm/pgtable_32.h
8183@@ -50,6 +50,9 @@ extern unsigned long calc_highpages(void);
8184 #define PAGE_SHARED SRMMU_PAGE_SHARED
8185 #define PAGE_COPY SRMMU_PAGE_COPY
8186 #define PAGE_READONLY SRMMU_PAGE_RDONLY
8187+#define PAGE_SHARED_NOEXEC SRMMU_PAGE_SHARED_NOEXEC
8188+#define PAGE_COPY_NOEXEC SRMMU_PAGE_COPY_NOEXEC
8189+#define PAGE_READONLY_NOEXEC SRMMU_PAGE_RDONLY_NOEXEC
8190 #define PAGE_KERNEL SRMMU_PAGE_KERNEL
8191
8192 /* Top-level page directory - dummy used by init-mm.
8193@@ -62,18 +65,18 @@ extern unsigned long ptr_in_current_pgd;
8194
8195 /* xwr */
8196 #define __P000 PAGE_NONE
8197-#define __P001 PAGE_READONLY
8198-#define __P010 PAGE_COPY
8199-#define __P011 PAGE_COPY
8200+#define __P001 PAGE_READONLY_NOEXEC
8201+#define __P010 PAGE_COPY_NOEXEC
8202+#define __P011 PAGE_COPY_NOEXEC
8203 #define __P100 PAGE_READONLY
8204 #define __P101 PAGE_READONLY
8205 #define __P110 PAGE_COPY
8206 #define __P111 PAGE_COPY
8207
8208 #define __S000 PAGE_NONE
8209-#define __S001 PAGE_READONLY
8210-#define __S010 PAGE_SHARED
8211-#define __S011 PAGE_SHARED
8212+#define __S001 PAGE_READONLY_NOEXEC
8213+#define __S010 PAGE_SHARED_NOEXEC
8214+#define __S011 PAGE_SHARED_NOEXEC
8215 #define __S100 PAGE_READONLY
8216 #define __S101 PAGE_READONLY
8217 #define __S110 PAGE_SHARED
8218diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
8219index 79da178..c2eede8 100644
8220--- a/arch/sparc/include/asm/pgtsrmmu.h
8221+++ b/arch/sparc/include/asm/pgtsrmmu.h
8222@@ -115,6 +115,11 @@
8223 SRMMU_EXEC | SRMMU_REF)
8224 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
8225 SRMMU_EXEC | SRMMU_REF)
8226+
8227+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
8228+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
8229+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
8230+
8231 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
8232 SRMMU_DIRTY | SRMMU_REF)
8233
8234diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
8235index 9689176..63c18ea 100644
8236--- a/arch/sparc/include/asm/spinlock_64.h
8237+++ b/arch/sparc/include/asm/spinlock_64.h
8238@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
8239
8240 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
8241
8242-static void inline arch_read_lock(arch_rwlock_t *lock)
8243+static inline void arch_read_lock(arch_rwlock_t *lock)
8244 {
8245 unsigned long tmp1, tmp2;
8246
8247 __asm__ __volatile__ (
8248 "1: ldsw [%2], %0\n"
8249 " brlz,pn %0, 2f\n"
8250-"4: add %0, 1, %1\n"
8251+"4: addcc %0, 1, %1\n"
8252+
8253+#ifdef CONFIG_PAX_REFCOUNT
8254+" tvs %%icc, 6\n"
8255+#endif
8256+
8257 " cas [%2], %0, %1\n"
8258 " cmp %0, %1\n"
8259 " bne,pn %%icc, 1b\n"
8260@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
8261 " .previous"
8262 : "=&r" (tmp1), "=&r" (tmp2)
8263 : "r" (lock)
8264- : "memory");
8265+ : "memory", "cc");
8266 }
8267
8268-static int inline arch_read_trylock(arch_rwlock_t *lock)
8269+static inline int arch_read_trylock(arch_rwlock_t *lock)
8270 {
8271 int tmp1, tmp2;
8272
8273@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
8274 "1: ldsw [%2], %0\n"
8275 " brlz,a,pn %0, 2f\n"
8276 " mov 0, %0\n"
8277-" add %0, 1, %1\n"
8278+" addcc %0, 1, %1\n"
8279+
8280+#ifdef CONFIG_PAX_REFCOUNT
8281+" tvs %%icc, 6\n"
8282+#endif
8283+
8284 " cas [%2], %0, %1\n"
8285 " cmp %0, %1\n"
8286 " bne,pn %%icc, 1b\n"
8287@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
8288 return tmp1;
8289 }
8290
8291-static void inline arch_read_unlock(arch_rwlock_t *lock)
8292+static inline void arch_read_unlock(arch_rwlock_t *lock)
8293 {
8294 unsigned long tmp1, tmp2;
8295
8296 __asm__ __volatile__(
8297 "1: lduw [%2], %0\n"
8298-" sub %0, 1, %1\n"
8299+" subcc %0, 1, %1\n"
8300+
8301+#ifdef CONFIG_PAX_REFCOUNT
8302+" tvs %%icc, 6\n"
8303+#endif
8304+
8305 " cas [%2], %0, %1\n"
8306 " cmp %0, %1\n"
8307 " bne,pn %%xcc, 1b\n"
8308@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
8309 : "memory");
8310 }
8311
8312-static void inline arch_write_lock(arch_rwlock_t *lock)
8313+static inline void arch_write_lock(arch_rwlock_t *lock)
8314 {
8315 unsigned long mask, tmp1, tmp2;
8316
8317@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
8318 : "memory");
8319 }
8320
8321-static void inline arch_write_unlock(arch_rwlock_t *lock)
8322+static inline void arch_write_unlock(arch_rwlock_t *lock)
8323 {
8324 __asm__ __volatile__(
8325 " stw %%g0, [%0]"
8326@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
8327 : "memory");
8328 }
8329
8330-static int inline arch_write_trylock(arch_rwlock_t *lock)
8331+static inline int arch_write_trylock(arch_rwlock_t *lock)
8332 {
8333 unsigned long mask, tmp1, tmp2, result;
8334
8335diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
8336index 25849ae..924c54b 100644
8337--- a/arch/sparc/include/asm/thread_info_32.h
8338+++ b/arch/sparc/include/asm/thread_info_32.h
8339@@ -49,6 +49,8 @@ struct thread_info {
8340 unsigned long w_saved;
8341
8342 struct restart_block restart_block;
8343+
8344+ unsigned long lowest_stack;
8345 };
8346
8347 /*
8348diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
8349index 269bd92..e46a9b8 100644
8350--- a/arch/sparc/include/asm/thread_info_64.h
8351+++ b/arch/sparc/include/asm/thread_info_64.h
8352@@ -63,6 +63,8 @@ struct thread_info {
8353 struct pt_regs *kern_una_regs;
8354 unsigned int kern_una_insn;
8355
8356+ unsigned long lowest_stack;
8357+
8358 unsigned long fpregs[0] __attribute__ ((aligned(64)));
8359 };
8360
8361@@ -192,10 +194,11 @@ register struct thread_info *current_thread_info_reg asm("g6");
8362 #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
8363 /* flag bit 6 is available */
8364 #define TIF_32BIT 7 /* 32-bit binary */
8365-/* flag bit 8 is available */
8366+#define TIF_GRSEC_SETXID 8 /* update credentials on syscall entry/exit */
8367 #define TIF_SECCOMP 9 /* secure computing */
8368 #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
8369 #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
8370+
8371 /* NOTE: Thread flags >= 12 should be ones we have no interest
8372 * in using in assembly, else we can't use the mask as
8373 * an immediate value in instructions such as andcc.
8374@@ -214,12 +217,18 @@ register struct thread_info *current_thread_info_reg asm("g6");
8375 #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
8376 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
8377 #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
8378+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
8379
8380 #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
8381 _TIF_DO_NOTIFY_RESUME_MASK | \
8382 _TIF_NEED_RESCHED)
8383 #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
8384
8385+#define _TIF_WORK_SYSCALL \
8386+ (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
8387+ _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
8388+
8389+
8390 /*
8391 * Thread-synchronous status.
8392 *
8393diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
8394index 0167d26..767bb0c 100644
8395--- a/arch/sparc/include/asm/uaccess.h
8396+++ b/arch/sparc/include/asm/uaccess.h
8397@@ -1,5 +1,6 @@
8398 #ifndef ___ASM_SPARC_UACCESS_H
8399 #define ___ASM_SPARC_UACCESS_H
8400+
8401 #if defined(__sparc__) && defined(__arch64__)
8402 #include <asm/uaccess_64.h>
8403 #else
8404diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
8405index 53a28dd..50c38c3 100644
8406--- a/arch/sparc/include/asm/uaccess_32.h
8407+++ b/arch/sparc/include/asm/uaccess_32.h
8408@@ -250,27 +250,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
8409
8410 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
8411 {
8412- if (n && __access_ok((unsigned long) to, n))
8413+ if ((long)n < 0)
8414+ return n;
8415+
8416+ if (n && __access_ok((unsigned long) to, n)) {
8417+ if (!__builtin_constant_p(n))
8418+ check_object_size(from, n, true);
8419 return __copy_user(to, (__force void __user *) from, n);
8420- else
8421+ } else
8422 return n;
8423 }
8424
8425 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
8426 {
8427+ if ((long)n < 0)
8428+ return n;
8429+
8430+ if (!__builtin_constant_p(n))
8431+ check_object_size(from, n, true);
8432+
8433 return __copy_user(to, (__force void __user *) from, n);
8434 }
8435
8436 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
8437 {
8438- if (n && __access_ok((unsigned long) from, n))
8439+ if ((long)n < 0)
8440+ return n;
8441+
8442+ if (n && __access_ok((unsigned long) from, n)) {
8443+ if (!__builtin_constant_p(n))
8444+ check_object_size(to, n, false);
8445 return __copy_user((__force void __user *) to, from, n);
8446- else
8447+ } else
8448 return n;
8449 }
8450
8451 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
8452 {
8453+ if ((long)n < 0)
8454+ return n;
8455+
8456 return __copy_user((__force void __user *) to, from, n);
8457 }
8458
8459diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
8460index e562d3c..191f176 100644
8461--- a/arch/sparc/include/asm/uaccess_64.h
8462+++ b/arch/sparc/include/asm/uaccess_64.h
8463@@ -10,6 +10,7 @@
8464 #include <linux/compiler.h>
8465 #include <linux/string.h>
8466 #include <linux/thread_info.h>
8467+#include <linux/kernel.h>
8468 #include <asm/asi.h>
8469 #include <asm/spitfire.h>
8470 #include <asm-generic/uaccess-unaligned.h>
8471@@ -214,8 +215,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
8472 static inline unsigned long __must_check
8473 copy_from_user(void *to, const void __user *from, unsigned long size)
8474 {
8475- unsigned long ret = ___copy_from_user(to, from, size);
8476+ unsigned long ret;
8477
8478+ if ((long)size < 0 || size > INT_MAX)
8479+ return size;
8480+
8481+ if (!__builtin_constant_p(size))
8482+ check_object_size(to, size, false);
8483+
8484+ ret = ___copy_from_user(to, from, size);
8485 if (unlikely(ret))
8486 ret = copy_from_user_fixup(to, from, size);
8487
8488@@ -231,8 +239,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
8489 static inline unsigned long __must_check
8490 copy_to_user(void __user *to, const void *from, unsigned long size)
8491 {
8492- unsigned long ret = ___copy_to_user(to, from, size);
8493+ unsigned long ret;
8494
8495+ if ((long)size < 0 || size > INT_MAX)
8496+ return size;
8497+
8498+ if (!__builtin_constant_p(size))
8499+ check_object_size(from, size, true);
8500+
8501+ ret = ___copy_to_user(to, from, size);
8502 if (unlikely(ret))
8503 ret = copy_to_user_fixup(to, from, size);
8504 return ret;
8505diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
8506index 6cf591b..b49e65a 100644
8507--- a/arch/sparc/kernel/Makefile
8508+++ b/arch/sparc/kernel/Makefile
8509@@ -3,7 +3,7 @@
8510 #
8511
8512 asflags-y := -ansi
8513-ccflags-y := -Werror
8514+#ccflags-y := -Werror
8515
8516 extra-y := head_$(BITS).o
8517
8518diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
8519index be8e862..5b50b12 100644
8520--- a/arch/sparc/kernel/process_32.c
8521+++ b/arch/sparc/kernel/process_32.c
8522@@ -126,14 +126,14 @@ void show_regs(struct pt_regs *r)
8523
8524 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
8525 r->psr, r->pc, r->npc, r->y, print_tainted());
8526- printk("PC: <%pS>\n", (void *) r->pc);
8527+ printk("PC: <%pA>\n", (void *) r->pc);
8528 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
8529 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
8530 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
8531 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
8532 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
8533 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
8534- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
8535+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
8536
8537 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
8538 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
8539@@ -168,7 +168,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
8540 rw = (struct reg_window32 *) fp;
8541 pc = rw->ins[7];
8542 printk("[%08lx : ", pc);
8543- printk("%pS ] ", (void *) pc);
8544+ printk("%pA ] ", (void *) pc);
8545 fp = rw->ins[6];
8546 } while (++count < 16);
8547 printk("\n");
8548diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
8549index cdb80b2..5ca141d 100644
8550--- a/arch/sparc/kernel/process_64.c
8551+++ b/arch/sparc/kernel/process_64.c
8552@@ -181,14 +181,14 @@ static void show_regwindow(struct pt_regs *regs)
8553 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
8554 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
8555 if (regs->tstate & TSTATE_PRIV)
8556- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
8557+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
8558 }
8559
8560 void show_regs(struct pt_regs *regs)
8561 {
8562 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
8563 regs->tpc, regs->tnpc, regs->y, print_tainted());
8564- printk("TPC: <%pS>\n", (void *) regs->tpc);
8565+ printk("TPC: <%pA>\n", (void *) regs->tpc);
8566 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
8567 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
8568 regs->u_regs[3]);
8569@@ -201,7 +201,7 @@ void show_regs(struct pt_regs *regs)
8570 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
8571 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
8572 regs->u_regs[15]);
8573- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
8574+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
8575 show_regwindow(regs);
8576 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
8577 }
8578@@ -290,7 +290,7 @@ void arch_trigger_all_cpu_backtrace(void)
8579 ((tp && tp->task) ? tp->task->pid : -1));
8580
8581 if (gp->tstate & TSTATE_PRIV) {
8582- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
8583+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
8584 (void *) gp->tpc,
8585 (void *) gp->o7,
8586 (void *) gp->i7,
8587diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c
8588index 1303021..c2a6321 100644
8589--- a/arch/sparc/kernel/prom_common.c
8590+++ b/arch/sparc/kernel/prom_common.c
8591@@ -143,7 +143,7 @@ static int __init prom_common_nextprop(phandle node, char *prev, char *buf)
8592
8593 unsigned int prom_early_allocated __initdata;
8594
8595-static struct of_pdt_ops prom_sparc_ops __initdata = {
8596+static struct of_pdt_ops prom_sparc_ops __initconst = {
8597 .nextprop = prom_common_nextprop,
8598 .getproplen = prom_getproplen,
8599 .getproperty = prom_getproperty,
8600diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
8601index 7ff45e4..a58f271 100644
8602--- a/arch/sparc/kernel/ptrace_64.c
8603+++ b/arch/sparc/kernel/ptrace_64.c
8604@@ -1057,6 +1057,10 @@ long arch_ptrace(struct task_struct *child, long request,
8605 return ret;
8606 }
8607
8608+#ifdef CONFIG_GRKERNSEC_SETXID
8609+extern void gr_delayed_cred_worker(void);
8610+#endif
8611+
8612 asmlinkage int syscall_trace_enter(struct pt_regs *regs)
8613 {
8614 int ret = 0;
8615@@ -1064,6 +1068,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
8616 /* do the secure computing check first */
8617 secure_computing_strict(regs->u_regs[UREG_G1]);
8618
8619+#ifdef CONFIG_GRKERNSEC_SETXID
8620+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
8621+ gr_delayed_cred_worker();
8622+#endif
8623+
8624 if (test_thread_flag(TIF_SYSCALL_TRACE))
8625 ret = tracehook_report_syscall_entry(regs);
8626
8627@@ -1084,6 +1093,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
8628
8629 asmlinkage void syscall_trace_leave(struct pt_regs *regs)
8630 {
8631+#ifdef CONFIG_GRKERNSEC_SETXID
8632+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
8633+ gr_delayed_cred_worker();
8634+#endif
8635+
8636 audit_syscall_exit(regs);
8637
8638 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
8639diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
8640index 2da0bdc..79128d2 100644
8641--- a/arch/sparc/kernel/sys_sparc_32.c
8642+++ b/arch/sparc/kernel/sys_sparc_32.c
8643@@ -52,7 +52,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
8644 if (len > TASK_SIZE - PAGE_SIZE)
8645 return -ENOMEM;
8646 if (!addr)
8647- addr = TASK_UNMAPPED_BASE;
8648+ addr = current->mm->mmap_base;
8649
8650 info.flags = 0;
8651 info.length = len;
8652diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
8653index 708bc29..f0129cb 100644
8654--- a/arch/sparc/kernel/sys_sparc_64.c
8655+++ b/arch/sparc/kernel/sys_sparc_64.c
8656@@ -90,13 +90,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
8657 struct vm_area_struct * vma;
8658 unsigned long task_size = TASK_SIZE;
8659 int do_color_align;
8660+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
8661 struct vm_unmapped_area_info info;
8662
8663 if (flags & MAP_FIXED) {
8664 /* We do not accept a shared mapping if it would violate
8665 * cache aliasing constraints.
8666 */
8667- if ((flags & MAP_SHARED) &&
8668+ if ((filp || (flags & MAP_SHARED)) &&
8669 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
8670 return -EINVAL;
8671 return addr;
8672@@ -111,6 +112,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
8673 if (filp || (flags & MAP_SHARED))
8674 do_color_align = 1;
8675
8676+#ifdef CONFIG_PAX_RANDMMAP
8677+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
8678+#endif
8679+
8680 if (addr) {
8681 if (do_color_align)
8682 addr = COLOR_ALIGN(addr, pgoff);
8683@@ -118,14 +123,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
8684 addr = PAGE_ALIGN(addr);
8685
8686 vma = find_vma(mm, addr);
8687- if (task_size - len >= addr &&
8688- (!vma || addr + len <= vma->vm_start))
8689+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
8690 return addr;
8691 }
8692
8693 info.flags = 0;
8694 info.length = len;
8695- info.low_limit = TASK_UNMAPPED_BASE;
8696+ info.low_limit = mm->mmap_base;
8697 info.high_limit = min(task_size, VA_EXCLUDE_START);
8698 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
8699 info.align_offset = pgoff << PAGE_SHIFT;
8700@@ -134,6 +138,12 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
8701 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
8702 VM_BUG_ON(addr != -ENOMEM);
8703 info.low_limit = VA_EXCLUDE_END;
8704+
8705+#ifdef CONFIG_PAX_RANDMMAP
8706+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8707+ info.low_limit += mm->delta_mmap;
8708+#endif
8709+
8710 info.high_limit = task_size;
8711 addr = vm_unmapped_area(&info);
8712 }
8713@@ -151,6 +161,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8714 unsigned long task_size = STACK_TOP32;
8715 unsigned long addr = addr0;
8716 int do_color_align;
8717+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
8718 struct vm_unmapped_area_info info;
8719
8720 /* This should only ever run for 32-bit processes. */
8721@@ -160,7 +171,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8722 /* We do not accept a shared mapping if it would violate
8723 * cache aliasing constraints.
8724 */
8725- if ((flags & MAP_SHARED) &&
8726+ if ((filp || (flags & MAP_SHARED)) &&
8727 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
8728 return -EINVAL;
8729 return addr;
8730@@ -173,6 +184,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8731 if (filp || (flags & MAP_SHARED))
8732 do_color_align = 1;
8733
8734+#ifdef CONFIG_PAX_RANDMMAP
8735+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
8736+#endif
8737+
8738 /* requesting a specific address */
8739 if (addr) {
8740 if (do_color_align)
8741@@ -181,8 +196,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8742 addr = PAGE_ALIGN(addr);
8743
8744 vma = find_vma(mm, addr);
8745- if (task_size - len >= addr &&
8746- (!vma || addr + len <= vma->vm_start))
8747+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
8748 return addr;
8749 }
8750
8751@@ -204,6 +218,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8752 VM_BUG_ON(addr != -ENOMEM);
8753 info.flags = 0;
8754 info.low_limit = TASK_UNMAPPED_BASE;
8755+
8756+#ifdef CONFIG_PAX_RANDMMAP
8757+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8758+ info.low_limit += mm->delta_mmap;
8759+#endif
8760+
8761 info.high_limit = STACK_TOP32;
8762 addr = vm_unmapped_area(&info);
8763 }
8764@@ -264,6 +284,10 @@ static unsigned long mmap_rnd(void)
8765 {
8766 unsigned long rnd = 0UL;
8767
8768+#ifdef CONFIG_PAX_RANDMMAP
8769+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
8770+#endif
8771+
8772 if (current->flags & PF_RANDOMIZE) {
8773 unsigned long val = get_random_int();
8774 if (test_thread_flag(TIF_32BIT))
8775@@ -289,6 +313,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
8776 gap == RLIM_INFINITY ||
8777 sysctl_legacy_va_layout) {
8778 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
8779+
8780+#ifdef CONFIG_PAX_RANDMMAP
8781+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8782+ mm->mmap_base += mm->delta_mmap;
8783+#endif
8784+
8785 mm->get_unmapped_area = arch_get_unmapped_area;
8786 mm->unmap_area = arch_unmap_area;
8787 } else {
8788@@ -301,6 +331,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
8789 gap = (task_size / 6 * 5);
8790
8791 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
8792+
8793+#ifdef CONFIG_PAX_RANDMMAP
8794+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8795+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
8796+#endif
8797+
8798 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
8799 mm->unmap_area = arch_unmap_area_topdown;
8800 }
8801diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
8802index e0fed77..604a7e5 100644
8803--- a/arch/sparc/kernel/syscalls.S
8804+++ b/arch/sparc/kernel/syscalls.S
8805@@ -58,7 +58,7 @@ sys32_rt_sigreturn:
8806 #endif
8807 .align 32
8808 1: ldx [%g6 + TI_FLAGS], %l5
8809- andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
8810+ andcc %l5, _TIF_WORK_SYSCALL, %g0
8811 be,pt %icc, rtrap
8812 nop
8813 call syscall_trace_leave
8814@@ -190,7 +190,7 @@ linux_sparc_syscall32:
8815
8816 srl %i5, 0, %o5 ! IEU1
8817 srl %i2, 0, %o2 ! IEU0 Group
8818- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
8819+ andcc %l0, _TIF_WORK_SYSCALL, %g0
8820 bne,pn %icc, linux_syscall_trace32 ! CTI
8821 mov %i0, %l5 ! IEU1
8822 call %l7 ! CTI Group brk forced
8823@@ -213,7 +213,7 @@ linux_sparc_syscall:
8824
8825 mov %i3, %o3 ! IEU1
8826 mov %i4, %o4 ! IEU0 Group
8827- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
8828+ andcc %l0, _TIF_WORK_SYSCALL, %g0
8829 bne,pn %icc, linux_syscall_trace ! CTI Group
8830 mov %i0, %l5 ! IEU0
8831 2: call %l7 ! CTI Group brk forced
8832@@ -229,7 +229,7 @@ ret_sys_call:
8833
8834 cmp %o0, -ERESTART_RESTARTBLOCK
8835 bgeu,pn %xcc, 1f
8836- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
8837+ andcc %l0, _TIF_WORK_SYSCALL, %g0
8838 ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
8839
8840 2:
8841diff --git a/arch/sparc/kernel/sysfs.c b/arch/sparc/kernel/sysfs.c
8842index 654e8aa..45f431b 100644
8843--- a/arch/sparc/kernel/sysfs.c
8844+++ b/arch/sparc/kernel/sysfs.c
8845@@ -266,7 +266,7 @@ static int __cpuinit sysfs_cpu_notify(struct notifier_block *self,
8846 return NOTIFY_OK;
8847 }
8848
8849-static struct notifier_block __cpuinitdata sysfs_cpu_nb = {
8850+static struct notifier_block sysfs_cpu_nb = {
8851 .notifier_call = sysfs_cpu_notify,
8852 };
8853
8854diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
8855index a5785ea..405c5f7 100644
8856--- a/arch/sparc/kernel/traps_32.c
8857+++ b/arch/sparc/kernel/traps_32.c
8858@@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
8859 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
8860 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
8861
8862+extern void gr_handle_kernel_exploit(void);
8863+
8864 void die_if_kernel(char *str, struct pt_regs *regs)
8865 {
8866 static int die_counter;
8867@@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
8868 count++ < 30 &&
8869 (((unsigned long) rw) >= PAGE_OFFSET) &&
8870 !(((unsigned long) rw) & 0x7)) {
8871- printk("Caller[%08lx]: %pS\n", rw->ins[7],
8872+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
8873 (void *) rw->ins[7]);
8874 rw = (struct reg_window32 *)rw->ins[6];
8875 }
8876 }
8877 printk("Instruction DUMP:");
8878 instruction_dump ((unsigned long *) regs->pc);
8879- if(regs->psr & PSR_PS)
8880+ if(regs->psr & PSR_PS) {
8881+ gr_handle_kernel_exploit();
8882 do_exit(SIGKILL);
8883+ }
8884 do_exit(SIGSEGV);
8885 }
8886
8887diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
8888index e7ecf15..6520e65 100644
8889--- a/arch/sparc/kernel/traps_64.c
8890+++ b/arch/sparc/kernel/traps_64.c
8891@@ -76,7 +76,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
8892 i + 1,
8893 p->trapstack[i].tstate, p->trapstack[i].tpc,
8894 p->trapstack[i].tnpc, p->trapstack[i].tt);
8895- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
8896+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
8897 }
8898 }
8899
8900@@ -96,6 +96,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
8901
8902 lvl -= 0x100;
8903 if (regs->tstate & TSTATE_PRIV) {
8904+
8905+#ifdef CONFIG_PAX_REFCOUNT
8906+ if (lvl == 6)
8907+ pax_report_refcount_overflow(regs);
8908+#endif
8909+
8910 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
8911 die_if_kernel(buffer, regs);
8912 }
8913@@ -114,11 +120,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
8914 void bad_trap_tl1(struct pt_regs *regs, long lvl)
8915 {
8916 char buffer[32];
8917-
8918+
8919 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
8920 0, lvl, SIGTRAP) == NOTIFY_STOP)
8921 return;
8922
8923+#ifdef CONFIG_PAX_REFCOUNT
8924+ if (lvl == 6)
8925+ pax_report_refcount_overflow(regs);
8926+#endif
8927+
8928 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
8929
8930 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
8931@@ -1142,7 +1153,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
8932 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
8933 printk("%s" "ERROR(%d): ",
8934 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
8935- printk("TPC<%pS>\n", (void *) regs->tpc);
8936+ printk("TPC<%pA>\n", (void *) regs->tpc);
8937 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
8938 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
8939 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
8940@@ -1749,7 +1760,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
8941 smp_processor_id(),
8942 (type & 0x1) ? 'I' : 'D',
8943 regs->tpc);
8944- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
8945+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
8946 panic("Irrecoverable Cheetah+ parity error.");
8947 }
8948
8949@@ -1757,7 +1768,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
8950 smp_processor_id(),
8951 (type & 0x1) ? 'I' : 'D',
8952 regs->tpc);
8953- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
8954+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
8955 }
8956
8957 struct sun4v_error_entry {
8958@@ -2104,9 +2115,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
8959
8960 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
8961 regs->tpc, tl);
8962- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
8963+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
8964 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
8965- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
8966+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
8967 (void *) regs->u_regs[UREG_I7]);
8968 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
8969 "pte[%lx] error[%lx]\n",
8970@@ -2128,9 +2139,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
8971
8972 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
8973 regs->tpc, tl);
8974- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
8975+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
8976 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
8977- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
8978+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
8979 (void *) regs->u_regs[UREG_I7]);
8980 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
8981 "pte[%lx] error[%lx]\n",
8982@@ -2336,13 +2347,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
8983 fp = (unsigned long)sf->fp + STACK_BIAS;
8984 }
8985
8986- printk(" [%016lx] %pS\n", pc, (void *) pc);
8987+ printk(" [%016lx] %pA\n", pc, (void *) pc);
8988 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
8989 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
8990 int index = tsk->curr_ret_stack;
8991 if (tsk->ret_stack && index >= graph) {
8992 pc = tsk->ret_stack[index - graph].ret;
8993- printk(" [%016lx] %pS\n", pc, (void *) pc);
8994+ printk(" [%016lx] %pA\n", pc, (void *) pc);
8995 graph++;
8996 }
8997 }
8998@@ -2367,6 +2378,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
8999 return (struct reg_window *) (fp + STACK_BIAS);
9000 }
9001
9002+extern void gr_handle_kernel_exploit(void);
9003+
9004 void die_if_kernel(char *str, struct pt_regs *regs)
9005 {
9006 static int die_counter;
9007@@ -2395,7 +2408,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
9008 while (rw &&
9009 count++ < 30 &&
9010 kstack_valid(tp, (unsigned long) rw)) {
9011- printk("Caller[%016lx]: %pS\n", rw->ins[7],
9012+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
9013 (void *) rw->ins[7]);
9014
9015 rw = kernel_stack_up(rw);
9016@@ -2408,8 +2421,10 @@ void die_if_kernel(char *str, struct pt_regs *regs)
9017 }
9018 user_instruction_dump ((unsigned int __user *) regs->tpc);
9019 }
9020- if (regs->tstate & TSTATE_PRIV)
9021+ if (regs->tstate & TSTATE_PRIV) {
9022+ gr_handle_kernel_exploit();
9023 do_exit(SIGKILL);
9024+ }
9025 do_exit(SIGSEGV);
9026 }
9027 EXPORT_SYMBOL(die_if_kernel);
9028diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
9029index 8201c25e..072a2a7 100644
9030--- a/arch/sparc/kernel/unaligned_64.c
9031+++ b/arch/sparc/kernel/unaligned_64.c
9032@@ -286,7 +286,7 @@ static void log_unaligned(struct pt_regs *regs)
9033 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
9034
9035 if (__ratelimit(&ratelimit)) {
9036- printk("Kernel unaligned access at TPC[%lx] %pS\n",
9037+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
9038 regs->tpc, (void *) regs->tpc);
9039 }
9040 }
9041diff --git a/arch/sparc/kernel/us3_cpufreq.c b/arch/sparc/kernel/us3_cpufreq.c
9042index eb1624b..f9f4ddb 100644
9043--- a/arch/sparc/kernel/us3_cpufreq.c
9044+++ b/arch/sparc/kernel/us3_cpufreq.c
9045@@ -18,14 +18,12 @@
9046 #include <asm/head.h>
9047 #include <asm/timer.h>
9048
9049-static struct cpufreq_driver *cpufreq_us3_driver;
9050-
9051 struct us3_freq_percpu_info {
9052 struct cpufreq_frequency_table table[4];
9053 };
9054
9055 /* Indexed by cpu number. */
9056-static struct us3_freq_percpu_info *us3_freq_table;
9057+static struct us3_freq_percpu_info us3_freq_table[NR_CPUS];
9058
9059 /* UltraSPARC-III has three dividers: 1, 2, and 32. These are controlled
9060 * in the Safari config register.
9061@@ -191,12 +189,25 @@ static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
9062
9063 static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
9064 {
9065- if (cpufreq_us3_driver)
9066- us3_set_cpu_divider_index(policy->cpu, 0);
9067+ us3_set_cpu_divider_index(policy->cpu, 0);
9068
9069 return 0;
9070 }
9071
9072+static int __init us3_freq_init(void);
9073+static void __exit us3_freq_exit(void);
9074+
9075+static struct cpufreq_driver cpufreq_us3_driver = {
9076+ .init = us3_freq_cpu_init,
9077+ .verify = us3_freq_verify,
9078+ .target = us3_freq_target,
9079+ .get = us3_freq_get,
9080+ .exit = us3_freq_cpu_exit,
9081+ .owner = THIS_MODULE,
9082+ .name = "UltraSPARC-III",
9083+
9084+};
9085+
9086 static int __init us3_freq_init(void)
9087 {
9088 unsigned long manuf, impl, ver;
9089@@ -213,57 +224,15 @@ static int __init us3_freq_init(void)
9090 (impl == CHEETAH_IMPL ||
9091 impl == CHEETAH_PLUS_IMPL ||
9092 impl == JAGUAR_IMPL ||
9093- impl == PANTHER_IMPL)) {
9094- struct cpufreq_driver *driver;
9095-
9096- ret = -ENOMEM;
9097- driver = kzalloc(sizeof(struct cpufreq_driver), GFP_KERNEL);
9098- if (!driver)
9099- goto err_out;
9100-
9101- us3_freq_table = kzalloc(
9102- (NR_CPUS * sizeof(struct us3_freq_percpu_info)),
9103- GFP_KERNEL);
9104- if (!us3_freq_table)
9105- goto err_out;
9106-
9107- driver->init = us3_freq_cpu_init;
9108- driver->verify = us3_freq_verify;
9109- driver->target = us3_freq_target;
9110- driver->get = us3_freq_get;
9111- driver->exit = us3_freq_cpu_exit;
9112- driver->owner = THIS_MODULE,
9113- strcpy(driver->name, "UltraSPARC-III");
9114-
9115- cpufreq_us3_driver = driver;
9116- ret = cpufreq_register_driver(driver);
9117- if (ret)
9118- goto err_out;
9119-
9120- return 0;
9121-
9122-err_out:
9123- if (driver) {
9124- kfree(driver);
9125- cpufreq_us3_driver = NULL;
9126- }
9127- kfree(us3_freq_table);
9128- us3_freq_table = NULL;
9129- return ret;
9130- }
9131+ impl == PANTHER_IMPL))
9132+ return cpufreq_register_driver(cpufreq_us3_driver);
9133
9134 return -ENODEV;
9135 }
9136
9137 static void __exit us3_freq_exit(void)
9138 {
9139- if (cpufreq_us3_driver) {
9140- cpufreq_unregister_driver(cpufreq_us3_driver);
9141- kfree(cpufreq_us3_driver);
9142- cpufreq_us3_driver = NULL;
9143- kfree(us3_freq_table);
9144- us3_freq_table = NULL;
9145- }
9146+ cpufreq_unregister_driver(cpufreq_us3_driver);
9147 }
9148
9149 MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
9150diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
9151index 8410065f2..4fd4ca22 100644
9152--- a/arch/sparc/lib/Makefile
9153+++ b/arch/sparc/lib/Makefile
9154@@ -2,7 +2,7 @@
9155 #
9156
9157 asflags-y := -ansi -DST_DIV0=0x02
9158-ccflags-y := -Werror
9159+#ccflags-y := -Werror
9160
9161 lib-$(CONFIG_SPARC32) += ashrdi3.o
9162 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
9163diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
9164index 85c233d..68500e0 100644
9165--- a/arch/sparc/lib/atomic_64.S
9166+++ b/arch/sparc/lib/atomic_64.S
9167@@ -17,7 +17,12 @@
9168 ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
9169 BACKOFF_SETUP(%o2)
9170 1: lduw [%o1], %g1
9171- add %g1, %o0, %g7
9172+ addcc %g1, %o0, %g7
9173+
9174+#ifdef CONFIG_PAX_REFCOUNT
9175+ tvs %icc, 6
9176+#endif
9177+
9178 cas [%o1], %g1, %g7
9179 cmp %g1, %g7
9180 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
9181@@ -27,10 +32,28 @@ ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
9182 2: BACKOFF_SPIN(%o2, %o3, 1b)
9183 ENDPROC(atomic_add)
9184
9185+ENTRY(atomic_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
9186+ BACKOFF_SETUP(%o2)
9187+1: lduw [%o1], %g1
9188+ add %g1, %o0, %g7
9189+ cas [%o1], %g1, %g7
9190+ cmp %g1, %g7
9191+ bne,pn %icc, 2f
9192+ nop
9193+ retl
9194+ nop
9195+2: BACKOFF_SPIN(%o2, %o3, 1b)
9196+ENDPROC(atomic_add_unchecked)
9197+
9198 ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
9199 BACKOFF_SETUP(%o2)
9200 1: lduw [%o1], %g1
9201- sub %g1, %o0, %g7
9202+ subcc %g1, %o0, %g7
9203+
9204+#ifdef CONFIG_PAX_REFCOUNT
9205+ tvs %icc, 6
9206+#endif
9207+
9208 cas [%o1], %g1, %g7
9209 cmp %g1, %g7
9210 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
9211@@ -40,10 +63,28 @@ ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
9212 2: BACKOFF_SPIN(%o2, %o3, 1b)
9213 ENDPROC(atomic_sub)
9214
9215+ENTRY(atomic_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
9216+ BACKOFF_SETUP(%o2)
9217+1: lduw [%o1], %g1
9218+ sub %g1, %o0, %g7
9219+ cas [%o1], %g1, %g7
9220+ cmp %g1, %g7
9221+ bne,pn %icc, 2f
9222+ nop
9223+ retl
9224+ nop
9225+2: BACKOFF_SPIN(%o2, %o3, 1b)
9226+ENDPROC(atomic_sub_unchecked)
9227+
9228 ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
9229 BACKOFF_SETUP(%o2)
9230 1: lduw [%o1], %g1
9231- add %g1, %o0, %g7
9232+ addcc %g1, %o0, %g7
9233+
9234+#ifdef CONFIG_PAX_REFCOUNT
9235+ tvs %icc, 6
9236+#endif
9237+
9238 cas [%o1], %g1, %g7
9239 cmp %g1, %g7
9240 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
9241@@ -53,10 +94,29 @@ ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
9242 2: BACKOFF_SPIN(%o2, %o3, 1b)
9243 ENDPROC(atomic_add_ret)
9244
9245+ENTRY(atomic_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
9246+ BACKOFF_SETUP(%o2)
9247+1: lduw [%o1], %g1
9248+ addcc %g1, %o0, %g7
9249+ cas [%o1], %g1, %g7
9250+ cmp %g1, %g7
9251+ bne,pn %icc, 2f
9252+ add %g7, %o0, %g7
9253+ sra %g7, 0, %o0
9254+ retl
9255+ nop
9256+2: BACKOFF_SPIN(%o2, %o3, 1b)
9257+ENDPROC(atomic_add_ret_unchecked)
9258+
9259 ENTRY(atomic_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
9260 BACKOFF_SETUP(%o2)
9261 1: lduw [%o1], %g1
9262- sub %g1, %o0, %g7
9263+ subcc %g1, %o0, %g7
9264+
9265+#ifdef CONFIG_PAX_REFCOUNT
9266+ tvs %icc, 6
9267+#endif
9268+
9269 cas [%o1], %g1, %g7
9270 cmp %g1, %g7
9271 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
9272@@ -69,7 +129,12 @@ ENDPROC(atomic_sub_ret)
9273 ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
9274 BACKOFF_SETUP(%o2)
9275 1: ldx [%o1], %g1
9276- add %g1, %o0, %g7
9277+ addcc %g1, %o0, %g7
9278+
9279+#ifdef CONFIG_PAX_REFCOUNT
9280+ tvs %xcc, 6
9281+#endif
9282+
9283 casx [%o1], %g1, %g7
9284 cmp %g1, %g7
9285 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
9286@@ -79,10 +144,28 @@ ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
9287 2: BACKOFF_SPIN(%o2, %o3, 1b)
9288 ENDPROC(atomic64_add)
9289
9290+ENTRY(atomic64_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
9291+ BACKOFF_SETUP(%o2)
9292+1: ldx [%o1], %g1
9293+ addcc %g1, %o0, %g7
9294+ casx [%o1], %g1, %g7
9295+ cmp %g1, %g7
9296+ bne,pn %xcc, 2f
9297+ nop
9298+ retl
9299+ nop
9300+2: BACKOFF_SPIN(%o2, %o3, 1b)
9301+ENDPROC(atomic64_add_unchecked)
9302+
9303 ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
9304 BACKOFF_SETUP(%o2)
9305 1: ldx [%o1], %g1
9306- sub %g1, %o0, %g7
9307+ subcc %g1, %o0, %g7
9308+
9309+#ifdef CONFIG_PAX_REFCOUNT
9310+ tvs %xcc, 6
9311+#endif
9312+
9313 casx [%o1], %g1, %g7
9314 cmp %g1, %g7
9315 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
9316@@ -92,10 +175,28 @@ ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
9317 2: BACKOFF_SPIN(%o2, %o3, 1b)
9318 ENDPROC(atomic64_sub)
9319
9320+ENTRY(atomic64_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
9321+ BACKOFF_SETUP(%o2)
9322+1: ldx [%o1], %g1
9323+ subcc %g1, %o0, %g7
9324+ casx [%o1], %g1, %g7
9325+ cmp %g1, %g7
9326+ bne,pn %xcc, 2f
9327+ nop
9328+ retl
9329+ nop
9330+2: BACKOFF_SPIN(%o2, %o3, 1b)
9331+ENDPROC(atomic64_sub_unchecked)
9332+
9333 ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
9334 BACKOFF_SETUP(%o2)
9335 1: ldx [%o1], %g1
9336- add %g1, %o0, %g7
9337+ addcc %g1, %o0, %g7
9338+
9339+#ifdef CONFIG_PAX_REFCOUNT
9340+ tvs %xcc, 6
9341+#endif
9342+
9343 casx [%o1], %g1, %g7
9344 cmp %g1, %g7
9345 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
9346@@ -105,10 +206,29 @@ ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
9347 2: BACKOFF_SPIN(%o2, %o3, 1b)
9348 ENDPROC(atomic64_add_ret)
9349
9350+ENTRY(atomic64_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
9351+ BACKOFF_SETUP(%o2)
9352+1: ldx [%o1], %g1
9353+ addcc %g1, %o0, %g7
9354+ casx [%o1], %g1, %g7
9355+ cmp %g1, %g7
9356+ bne,pn %xcc, 2f
9357+ add %g7, %o0, %g7
9358+ mov %g7, %o0
9359+ retl
9360+ nop
9361+2: BACKOFF_SPIN(%o2, %o3, 1b)
9362+ENDPROC(atomic64_add_ret_unchecked)
9363+
9364 ENTRY(atomic64_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
9365 BACKOFF_SETUP(%o2)
9366 1: ldx [%o1], %g1
9367- sub %g1, %o0, %g7
9368+ subcc %g1, %o0, %g7
9369+
9370+#ifdef CONFIG_PAX_REFCOUNT
9371+ tvs %xcc, 6
9372+#endif
9373+
9374 casx [%o1], %g1, %g7
9375 cmp %g1, %g7
9376 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
9377diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
9378index 0c4e35e..745d3e4 100644
9379--- a/arch/sparc/lib/ksyms.c
9380+++ b/arch/sparc/lib/ksyms.c
9381@@ -109,12 +109,18 @@ EXPORT_SYMBOL(__downgrade_write);
9382
9383 /* Atomic counter implementation. */
9384 EXPORT_SYMBOL(atomic_add);
9385+EXPORT_SYMBOL(atomic_add_unchecked);
9386 EXPORT_SYMBOL(atomic_add_ret);
9387+EXPORT_SYMBOL(atomic_add_ret_unchecked);
9388 EXPORT_SYMBOL(atomic_sub);
9389+EXPORT_SYMBOL(atomic_sub_unchecked);
9390 EXPORT_SYMBOL(atomic_sub_ret);
9391 EXPORT_SYMBOL(atomic64_add);
9392+EXPORT_SYMBOL(atomic64_add_unchecked);
9393 EXPORT_SYMBOL(atomic64_add_ret);
9394+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
9395 EXPORT_SYMBOL(atomic64_sub);
9396+EXPORT_SYMBOL(atomic64_sub_unchecked);
9397 EXPORT_SYMBOL(atomic64_sub_ret);
9398 EXPORT_SYMBOL(atomic64_dec_if_positive);
9399
9400diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
9401index 30c3ecc..736f015 100644
9402--- a/arch/sparc/mm/Makefile
9403+++ b/arch/sparc/mm/Makefile
9404@@ -2,7 +2,7 @@
9405 #
9406
9407 asflags-y := -ansi
9408-ccflags-y := -Werror
9409+#ccflags-y := -Werror
9410
9411 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
9412 obj-y += fault_$(BITS).o
9413diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
9414index e98bfda..ea8d221 100644
9415--- a/arch/sparc/mm/fault_32.c
9416+++ b/arch/sparc/mm/fault_32.c
9417@@ -21,6 +21,9 @@
9418 #include <linux/perf_event.h>
9419 #include <linux/interrupt.h>
9420 #include <linux/kdebug.h>
9421+#include <linux/slab.h>
9422+#include <linux/pagemap.h>
9423+#include <linux/compiler.h>
9424
9425 #include <asm/page.h>
9426 #include <asm/pgtable.h>
9427@@ -159,6 +162,277 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
9428 return safe_compute_effective_address(regs, insn);
9429 }
9430
9431+#ifdef CONFIG_PAX_PAGEEXEC
9432+#ifdef CONFIG_PAX_DLRESOLVE
9433+static void pax_emuplt_close(struct vm_area_struct *vma)
9434+{
9435+ vma->vm_mm->call_dl_resolve = 0UL;
9436+}
9437+
9438+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
9439+{
9440+ unsigned int *kaddr;
9441+
9442+ vmf->page = alloc_page(GFP_HIGHUSER);
9443+ if (!vmf->page)
9444+ return VM_FAULT_OOM;
9445+
9446+ kaddr = kmap(vmf->page);
9447+ memset(kaddr, 0, PAGE_SIZE);
9448+ kaddr[0] = 0x9DE3BFA8U; /* save */
9449+ flush_dcache_page(vmf->page);
9450+ kunmap(vmf->page);
9451+ return VM_FAULT_MAJOR;
9452+}
9453+
9454+static const struct vm_operations_struct pax_vm_ops = {
9455+ .close = pax_emuplt_close,
9456+ .fault = pax_emuplt_fault
9457+};
9458+
9459+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
9460+{
9461+ int ret;
9462+
9463+ INIT_LIST_HEAD(&vma->anon_vma_chain);
9464+ vma->vm_mm = current->mm;
9465+ vma->vm_start = addr;
9466+ vma->vm_end = addr + PAGE_SIZE;
9467+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
9468+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
9469+ vma->vm_ops = &pax_vm_ops;
9470+
9471+ ret = insert_vm_struct(current->mm, vma);
9472+ if (ret)
9473+ return ret;
9474+
9475+ ++current->mm->total_vm;
9476+ return 0;
9477+}
9478+#endif
9479+
9480+/*
9481+ * PaX: decide what to do with offenders (regs->pc = fault address)
9482+ *
9483+ * returns 1 when task should be killed
9484+ * 2 when patched PLT trampoline was detected
9485+ * 3 when unpatched PLT trampoline was detected
9486+ */
9487+static int pax_handle_fetch_fault(struct pt_regs *regs)
9488+{
9489+
9490+#ifdef CONFIG_PAX_EMUPLT
9491+ int err;
9492+
9493+ do { /* PaX: patched PLT emulation #1 */
9494+ unsigned int sethi1, sethi2, jmpl;
9495+
9496+ err = get_user(sethi1, (unsigned int *)regs->pc);
9497+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
9498+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
9499+
9500+ if (err)
9501+ break;
9502+
9503+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
9504+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
9505+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
9506+ {
9507+ unsigned int addr;
9508+
9509+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
9510+ addr = regs->u_regs[UREG_G1];
9511+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
9512+ regs->pc = addr;
9513+ regs->npc = addr+4;
9514+ return 2;
9515+ }
9516+ } while (0);
9517+
9518+ do { /* PaX: patched PLT emulation #2 */
9519+ unsigned int ba;
9520+
9521+ err = get_user(ba, (unsigned int *)regs->pc);
9522+
9523+ if (err)
9524+ break;
9525+
9526+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
9527+ unsigned int addr;
9528+
9529+ if ((ba & 0xFFC00000U) == 0x30800000U)
9530+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
9531+ else
9532+ addr = regs->pc + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
9533+ regs->pc = addr;
9534+ regs->npc = addr+4;
9535+ return 2;
9536+ }
9537+ } while (0);
9538+
9539+ do { /* PaX: patched PLT emulation #3 */
9540+ unsigned int sethi, bajmpl, nop;
9541+
9542+ err = get_user(sethi, (unsigned int *)regs->pc);
9543+ err |= get_user(bajmpl, (unsigned int *)(regs->pc+4));
9544+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
9545+
9546+ if (err)
9547+ break;
9548+
9549+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
9550+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
9551+ nop == 0x01000000U)
9552+ {
9553+ unsigned int addr;
9554+
9555+ addr = (sethi & 0x003FFFFFU) << 10;
9556+ regs->u_regs[UREG_G1] = addr;
9557+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
9558+ addr += (((bajmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
9559+ else
9560+ addr = regs->pc + ((((bajmpl | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
9561+ regs->pc = addr;
9562+ regs->npc = addr+4;
9563+ return 2;
9564+ }
9565+ } while (0);
9566+
9567+ do { /* PaX: unpatched PLT emulation step 1 */
9568+ unsigned int sethi, ba, nop;
9569+
9570+ err = get_user(sethi, (unsigned int *)regs->pc);
9571+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
9572+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
9573+
9574+ if (err)
9575+ break;
9576+
9577+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
9578+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
9579+ nop == 0x01000000U)
9580+ {
9581+ unsigned int addr, save, call;
9582+
9583+ if ((ba & 0xFFC00000U) == 0x30800000U)
9584+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
9585+ else
9586+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
9587+
9588+ err = get_user(save, (unsigned int *)addr);
9589+ err |= get_user(call, (unsigned int *)(addr+4));
9590+ err |= get_user(nop, (unsigned int *)(addr+8));
9591+ if (err)
9592+ break;
9593+
9594+#ifdef CONFIG_PAX_DLRESOLVE
9595+ if (save == 0x9DE3BFA8U &&
9596+ (call & 0xC0000000U) == 0x40000000U &&
9597+ nop == 0x01000000U)
9598+ {
9599+ struct vm_area_struct *vma;
9600+ unsigned long call_dl_resolve;
9601+
9602+ down_read(&current->mm->mmap_sem);
9603+ call_dl_resolve = current->mm->call_dl_resolve;
9604+ up_read(&current->mm->mmap_sem);
9605+ if (likely(call_dl_resolve))
9606+ goto emulate;
9607+
9608+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
9609+
9610+ down_write(&current->mm->mmap_sem);
9611+ if (current->mm->call_dl_resolve) {
9612+ call_dl_resolve = current->mm->call_dl_resolve;
9613+ up_write(&current->mm->mmap_sem);
9614+ if (vma)
9615+ kmem_cache_free(vm_area_cachep, vma);
9616+ goto emulate;
9617+ }
9618+
9619+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
9620+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
9621+ up_write(&current->mm->mmap_sem);
9622+ if (vma)
9623+ kmem_cache_free(vm_area_cachep, vma);
9624+ return 1;
9625+ }
9626+
9627+ if (pax_insert_vma(vma, call_dl_resolve)) {
9628+ up_write(&current->mm->mmap_sem);
9629+ kmem_cache_free(vm_area_cachep, vma);
9630+ return 1;
9631+ }
9632+
9633+ current->mm->call_dl_resolve = call_dl_resolve;
9634+ up_write(&current->mm->mmap_sem);
9635+
9636+emulate:
9637+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
9638+ regs->pc = call_dl_resolve;
9639+ regs->npc = addr+4;
9640+ return 3;
9641+ }
9642+#endif
9643+
9644+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
9645+ if ((save & 0xFFC00000U) == 0x05000000U &&
9646+ (call & 0xFFFFE000U) == 0x85C0A000U &&
9647+ nop == 0x01000000U)
9648+ {
9649+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
9650+ regs->u_regs[UREG_G2] = addr + 4;
9651+ addr = (save & 0x003FFFFFU) << 10;
9652+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
9653+ regs->pc = addr;
9654+ regs->npc = addr+4;
9655+ return 3;
9656+ }
9657+ }
9658+ } while (0);
9659+
9660+ do { /* PaX: unpatched PLT emulation step 2 */
9661+ unsigned int save, call, nop;
9662+
9663+ err = get_user(save, (unsigned int *)(regs->pc-4));
9664+ err |= get_user(call, (unsigned int *)regs->pc);
9665+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
9666+ if (err)
9667+ break;
9668+
9669+ if (save == 0x9DE3BFA8U &&
9670+ (call & 0xC0000000U) == 0x40000000U &&
9671+ nop == 0x01000000U)
9672+ {
9673+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
9674+
9675+ regs->u_regs[UREG_RETPC] = regs->pc;
9676+ regs->pc = dl_resolve;
9677+ regs->npc = dl_resolve+4;
9678+ return 3;
9679+ }
9680+ } while (0);
9681+#endif
9682+
9683+ return 1;
9684+}
9685+
9686+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
9687+{
9688+ unsigned long i;
9689+
9690+ printk(KERN_ERR "PAX: bytes at PC: ");
9691+ for (i = 0; i < 8; i++) {
9692+ unsigned int c;
9693+ if (get_user(c, (unsigned int *)pc+i))
9694+ printk(KERN_CONT "???????? ");
9695+ else
9696+ printk(KERN_CONT "%08x ", c);
9697+ }
9698+ printk("\n");
9699+}
9700+#endif
9701+
9702 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
9703 int text_fault)
9704 {
9705@@ -230,6 +504,24 @@ good_area:
9706 if (!(vma->vm_flags & VM_WRITE))
9707 goto bad_area;
9708 } else {
9709+
9710+#ifdef CONFIG_PAX_PAGEEXEC
9711+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
9712+ up_read(&mm->mmap_sem);
9713+ switch (pax_handle_fetch_fault(regs)) {
9714+
9715+#ifdef CONFIG_PAX_EMUPLT
9716+ case 2:
9717+ case 3:
9718+ return;
9719+#endif
9720+
9721+ }
9722+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
9723+ do_group_exit(SIGKILL);
9724+ }
9725+#endif
9726+
9727 /* Allow reads even for write-only mappings */
9728 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
9729 goto bad_area;
9730diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
9731index 5062ff3..e0b75f3 100644
9732--- a/arch/sparc/mm/fault_64.c
9733+++ b/arch/sparc/mm/fault_64.c
9734@@ -21,6 +21,9 @@
9735 #include <linux/kprobes.h>
9736 #include <linux/kdebug.h>
9737 #include <linux/percpu.h>
9738+#include <linux/slab.h>
9739+#include <linux/pagemap.h>
9740+#include <linux/compiler.h>
9741
9742 #include <asm/page.h>
9743 #include <asm/pgtable.h>
9744@@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
9745 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
9746 regs->tpc);
9747 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
9748- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
9749+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
9750 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
9751 dump_stack();
9752 unhandled_fault(regs->tpc, current, regs);
9753@@ -270,6 +273,466 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
9754 show_regs(regs);
9755 }
9756
9757+#ifdef CONFIG_PAX_PAGEEXEC
9758+#ifdef CONFIG_PAX_DLRESOLVE
9759+static void pax_emuplt_close(struct vm_area_struct *vma)
9760+{
9761+ vma->vm_mm->call_dl_resolve = 0UL;
9762+}
9763+
9764+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
9765+{
9766+ unsigned int *kaddr;
9767+
9768+ vmf->page = alloc_page(GFP_HIGHUSER);
9769+ if (!vmf->page)
9770+ return VM_FAULT_OOM;
9771+
9772+ kaddr = kmap(vmf->page);
9773+ memset(kaddr, 0, PAGE_SIZE);
9774+ kaddr[0] = 0x9DE3BFA8U; /* save */
9775+ flush_dcache_page(vmf->page);
9776+ kunmap(vmf->page);
9777+ return VM_FAULT_MAJOR;
9778+}
9779+
9780+static const struct vm_operations_struct pax_vm_ops = {
9781+ .close = pax_emuplt_close,
9782+ .fault = pax_emuplt_fault
9783+};
9784+
9785+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
9786+{
9787+ int ret;
9788+
9789+ INIT_LIST_HEAD(&vma->anon_vma_chain);
9790+ vma->vm_mm = current->mm;
9791+ vma->vm_start = addr;
9792+ vma->vm_end = addr + PAGE_SIZE;
9793+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
9794+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
9795+ vma->vm_ops = &pax_vm_ops;
9796+
9797+ ret = insert_vm_struct(current->mm, vma);
9798+ if (ret)
9799+ return ret;
9800+
9801+ ++current->mm->total_vm;
9802+ return 0;
9803+}
9804+#endif
9805+
9806+/*
9807+ * PaX: decide what to do with offenders (regs->tpc = fault address)
9808+ *
9809+ * returns 1 when task should be killed
9810+ * 2 when patched PLT trampoline was detected
9811+ * 3 when unpatched PLT trampoline was detected
9812+ */
9813+static int pax_handle_fetch_fault(struct pt_regs *regs)
9814+{
9815+
9816+#ifdef CONFIG_PAX_EMUPLT
9817+ int err;
9818+
9819+ do { /* PaX: patched PLT emulation #1 */
9820+ unsigned int sethi1, sethi2, jmpl;
9821+
9822+ err = get_user(sethi1, (unsigned int *)regs->tpc);
9823+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
9824+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
9825+
9826+ if (err)
9827+ break;
9828+
9829+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
9830+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
9831+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
9832+ {
9833+ unsigned long addr;
9834+
9835+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
9836+ addr = regs->u_regs[UREG_G1];
9837+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
9838+
9839+ if (test_thread_flag(TIF_32BIT))
9840+ addr &= 0xFFFFFFFFUL;
9841+
9842+ regs->tpc = addr;
9843+ regs->tnpc = addr+4;
9844+ return 2;
9845+ }
9846+ } while (0);
9847+
9848+ do { /* PaX: patched PLT emulation #2 */
9849+ unsigned int ba;
9850+
9851+ err = get_user(ba, (unsigned int *)regs->tpc);
9852+
9853+ if (err)
9854+ break;
9855+
9856+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
9857+ unsigned long addr;
9858+
9859+ if ((ba & 0xFFC00000U) == 0x30800000U)
9860+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
9861+ else
9862+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
9863+
9864+ if (test_thread_flag(TIF_32BIT))
9865+ addr &= 0xFFFFFFFFUL;
9866+
9867+ regs->tpc = addr;
9868+ regs->tnpc = addr+4;
9869+ return 2;
9870+ }
9871+ } while (0);
9872+
9873+ do { /* PaX: patched PLT emulation #3 */
9874+ unsigned int sethi, bajmpl, nop;
9875+
9876+ err = get_user(sethi, (unsigned int *)regs->tpc);
9877+ err |= get_user(bajmpl, (unsigned int *)(regs->tpc+4));
9878+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
9879+
9880+ if (err)
9881+ break;
9882+
9883+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
9884+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
9885+ nop == 0x01000000U)
9886+ {
9887+ unsigned long addr;
9888+
9889+ addr = (sethi & 0x003FFFFFU) << 10;
9890+ regs->u_regs[UREG_G1] = addr;
9891+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
9892+ addr += (((bajmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
9893+ else
9894+ addr = regs->tpc + ((((bajmpl | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
9895+
9896+ if (test_thread_flag(TIF_32BIT))
9897+ addr &= 0xFFFFFFFFUL;
9898+
9899+ regs->tpc = addr;
9900+ regs->tnpc = addr+4;
9901+ return 2;
9902+ }
9903+ } while (0);
9904+
9905+ do { /* PaX: patched PLT emulation #4 */
9906+ unsigned int sethi, mov1, call, mov2;
9907+
9908+ err = get_user(sethi, (unsigned int *)regs->tpc);
9909+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
9910+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
9911+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
9912+
9913+ if (err)
9914+ break;
9915+
9916+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
9917+ mov1 == 0x8210000FU &&
9918+ (call & 0xC0000000U) == 0x40000000U &&
9919+ mov2 == 0x9E100001U)
9920+ {
9921+ unsigned long addr;
9922+
9923+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
9924+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
9925+
9926+ if (test_thread_flag(TIF_32BIT))
9927+ addr &= 0xFFFFFFFFUL;
9928+
9929+ regs->tpc = addr;
9930+ regs->tnpc = addr+4;
9931+ return 2;
9932+ }
9933+ } while (0);
9934+
9935+ do { /* PaX: patched PLT emulation #5 */
9936+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
9937+
9938+ err = get_user(sethi, (unsigned int *)regs->tpc);
9939+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
9940+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
9941+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
9942+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
9943+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
9944+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
9945+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
9946+
9947+ if (err)
9948+ break;
9949+
9950+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
9951+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
9952+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
9953+ (or1 & 0xFFFFE000U) == 0x82106000U &&
9954+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
9955+ sllx == 0x83287020U &&
9956+ jmpl == 0x81C04005U &&
9957+ nop == 0x01000000U)
9958+ {
9959+ unsigned long addr;
9960+
9961+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
9962+ regs->u_regs[UREG_G1] <<= 32;
9963+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
9964+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
9965+ regs->tpc = addr;
9966+ regs->tnpc = addr+4;
9967+ return 2;
9968+ }
9969+ } while (0);
9970+
9971+ do { /* PaX: patched PLT emulation #6 */
9972+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
9973+
9974+ err = get_user(sethi, (unsigned int *)regs->tpc);
9975+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
9976+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
9977+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
9978+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
9979+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
9980+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
9981+
9982+ if (err)
9983+ break;
9984+
9985+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
9986+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
9987+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
9988+ sllx == 0x83287020U &&
9989+ (or & 0xFFFFE000U) == 0x8A116000U &&
9990+ jmpl == 0x81C04005U &&
9991+ nop == 0x01000000U)
9992+ {
9993+ unsigned long addr;
9994+
9995+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
9996+ regs->u_regs[UREG_G1] <<= 32;
9997+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
9998+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
9999+ regs->tpc = addr;
10000+ regs->tnpc = addr+4;
10001+ return 2;
10002+ }
10003+ } while (0);
10004+
10005+ do { /* PaX: unpatched PLT emulation step 1 */
10006+ unsigned int sethi, ba, nop;
10007+
10008+ err = get_user(sethi, (unsigned int *)regs->tpc);
10009+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
10010+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
10011+
10012+ if (err)
10013+ break;
10014+
10015+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
10016+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
10017+ nop == 0x01000000U)
10018+ {
10019+ unsigned long addr;
10020+ unsigned int save, call;
10021+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
10022+
10023+ if ((ba & 0xFFC00000U) == 0x30800000U)
10024+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
10025+ else
10026+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
10027+
10028+ if (test_thread_flag(TIF_32BIT))
10029+ addr &= 0xFFFFFFFFUL;
10030+
10031+ err = get_user(save, (unsigned int *)addr);
10032+ err |= get_user(call, (unsigned int *)(addr+4));
10033+ err |= get_user(nop, (unsigned int *)(addr+8));
10034+ if (err)
10035+ break;
10036+
10037+#ifdef CONFIG_PAX_DLRESOLVE
10038+ if (save == 0x9DE3BFA8U &&
10039+ (call & 0xC0000000U) == 0x40000000U &&
10040+ nop == 0x01000000U)
10041+ {
10042+ struct vm_area_struct *vma;
10043+ unsigned long call_dl_resolve;
10044+
10045+ down_read(&current->mm->mmap_sem);
10046+ call_dl_resolve = current->mm->call_dl_resolve;
10047+ up_read(&current->mm->mmap_sem);
10048+ if (likely(call_dl_resolve))
10049+ goto emulate;
10050+
10051+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
10052+
10053+ down_write(&current->mm->mmap_sem);
10054+ if (current->mm->call_dl_resolve) {
10055+ call_dl_resolve = current->mm->call_dl_resolve;
10056+ up_write(&current->mm->mmap_sem);
10057+ if (vma)
10058+ kmem_cache_free(vm_area_cachep, vma);
10059+ goto emulate;
10060+ }
10061+
10062+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
10063+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
10064+ up_write(&current->mm->mmap_sem);
10065+ if (vma)
10066+ kmem_cache_free(vm_area_cachep, vma);
10067+ return 1;
10068+ }
10069+
10070+ if (pax_insert_vma(vma, call_dl_resolve)) {
10071+ up_write(&current->mm->mmap_sem);
10072+ kmem_cache_free(vm_area_cachep, vma);
10073+ return 1;
10074+ }
10075+
10076+ current->mm->call_dl_resolve = call_dl_resolve;
10077+ up_write(&current->mm->mmap_sem);
10078+
10079+emulate:
10080+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
10081+ regs->tpc = call_dl_resolve;
10082+ regs->tnpc = addr+4;
10083+ return 3;
10084+ }
10085+#endif
10086+
10087+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
10088+ if ((save & 0xFFC00000U) == 0x05000000U &&
10089+ (call & 0xFFFFE000U) == 0x85C0A000U &&
10090+ nop == 0x01000000U)
10091+ {
10092+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
10093+ regs->u_regs[UREG_G2] = addr + 4;
10094+ addr = (save & 0x003FFFFFU) << 10;
10095+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
10096+
10097+ if (test_thread_flag(TIF_32BIT))
10098+ addr &= 0xFFFFFFFFUL;
10099+
10100+ regs->tpc = addr;
10101+ regs->tnpc = addr+4;
10102+ return 3;
10103+ }
10104+
10105+ /* PaX: 64-bit PLT stub */
10106+ err = get_user(sethi1, (unsigned int *)addr);
10107+ err |= get_user(sethi2, (unsigned int *)(addr+4));
10108+ err |= get_user(or1, (unsigned int *)(addr+8));
10109+ err |= get_user(or2, (unsigned int *)(addr+12));
10110+ err |= get_user(sllx, (unsigned int *)(addr+16));
10111+ err |= get_user(add, (unsigned int *)(addr+20));
10112+ err |= get_user(jmpl, (unsigned int *)(addr+24));
10113+ err |= get_user(nop, (unsigned int *)(addr+28));
10114+ if (err)
10115+ break;
10116+
10117+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
10118+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
10119+ (or1 & 0xFFFFE000U) == 0x88112000U &&
10120+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
10121+ sllx == 0x89293020U &&
10122+ add == 0x8A010005U &&
10123+ jmpl == 0x89C14000U &&
10124+ nop == 0x01000000U)
10125+ {
10126+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
10127+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
10128+ regs->u_regs[UREG_G4] <<= 32;
10129+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
10130+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
10131+ regs->u_regs[UREG_G4] = addr + 24;
10132+ addr = regs->u_regs[UREG_G5];
10133+ regs->tpc = addr;
10134+ regs->tnpc = addr+4;
10135+ return 3;
10136+ }
10137+ }
10138+ } while (0);
10139+
10140+#ifdef CONFIG_PAX_DLRESOLVE
10141+ do { /* PaX: unpatched PLT emulation step 2 */
10142+ unsigned int save, call, nop;
10143+
10144+ err = get_user(save, (unsigned int *)(regs->tpc-4));
10145+ err |= get_user(call, (unsigned int *)regs->tpc);
10146+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
10147+ if (err)
10148+ break;
10149+
10150+ if (save == 0x9DE3BFA8U &&
10151+ (call & 0xC0000000U) == 0x40000000U &&
10152+ nop == 0x01000000U)
10153+ {
10154+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
10155+
10156+ if (test_thread_flag(TIF_32BIT))
10157+ dl_resolve &= 0xFFFFFFFFUL;
10158+
10159+ regs->u_regs[UREG_RETPC] = regs->tpc;
10160+ regs->tpc = dl_resolve;
10161+ regs->tnpc = dl_resolve+4;
10162+ return 3;
10163+ }
10164+ } while (0);
10165+#endif
10166+
10167+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
10168+ unsigned int sethi, ba, nop;
10169+
10170+ err = get_user(sethi, (unsigned int *)regs->tpc);
10171+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
10172+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
10173+
10174+ if (err)
10175+ break;
10176+
10177+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
10178+ (ba & 0xFFF00000U) == 0x30600000U &&
10179+ nop == 0x01000000U)
10180+ {
10181+ unsigned long addr;
10182+
10183+ addr = (sethi & 0x003FFFFFU) << 10;
10184+ regs->u_regs[UREG_G1] = addr;
10185+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
10186+
10187+ if (test_thread_flag(TIF_32BIT))
10188+ addr &= 0xFFFFFFFFUL;
10189+
10190+ regs->tpc = addr;
10191+ regs->tnpc = addr+4;
10192+ return 2;
10193+ }
10194+ } while (0);
10195+
10196+#endif
10197+
10198+ return 1;
10199+}
10200+
10201+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
10202+{
10203+ unsigned long i;
10204+
10205+ printk(KERN_ERR "PAX: bytes at PC: ");
10206+ for (i = 0; i < 8; i++) {
10207+ unsigned int c;
10208+ if (get_user(c, (unsigned int *)pc+i))
10209+ printk(KERN_CONT "???????? ");
10210+ else
10211+ printk(KERN_CONT "%08x ", c);
10212+ }
10213+ printk("\n");
10214+}
10215+#endif
10216+
10217 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
10218 {
10219 struct mm_struct *mm = current->mm;
10220@@ -341,6 +804,29 @@ retry:
10221 if (!vma)
10222 goto bad_area;
10223
10224+#ifdef CONFIG_PAX_PAGEEXEC
10225+ /* PaX: detect ITLB misses on non-exec pages */
10226+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
10227+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
10228+ {
10229+ if (address != regs->tpc)
10230+ goto good_area;
10231+
10232+ up_read(&mm->mmap_sem);
10233+ switch (pax_handle_fetch_fault(regs)) {
10234+
10235+#ifdef CONFIG_PAX_EMUPLT
10236+ case 2:
10237+ case 3:
10238+ return;
10239+#endif
10240+
10241+ }
10242+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
10243+ do_group_exit(SIGKILL);
10244+ }
10245+#endif
10246+
10247 /* Pure DTLB misses do not tell us whether the fault causing
10248 * load/store/atomic was a write or not, it only says that there
10249 * was no match. So in such a case we (carefully) read the
10250diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
10251index d2b5944..bd813f2 100644
10252--- a/arch/sparc/mm/hugetlbpage.c
10253+++ b/arch/sparc/mm/hugetlbpage.c
10254@@ -38,7 +38,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
10255
10256 info.flags = 0;
10257 info.length = len;
10258- info.low_limit = TASK_UNMAPPED_BASE;
10259+ info.low_limit = mm->mmap_base;
10260 info.high_limit = min(task_size, VA_EXCLUDE_START);
10261 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
10262 info.align_offset = 0;
10263@@ -47,6 +47,12 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
10264 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
10265 VM_BUG_ON(addr != -ENOMEM);
10266 info.low_limit = VA_EXCLUDE_END;
10267+
10268+#ifdef CONFIG_PAX_RANDMMAP
10269+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10270+ info.low_limit += mm->delta_mmap;
10271+#endif
10272+
10273 info.high_limit = task_size;
10274 addr = vm_unmapped_area(&info);
10275 }
10276@@ -85,6 +91,12 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10277 VM_BUG_ON(addr != -ENOMEM);
10278 info.flags = 0;
10279 info.low_limit = TASK_UNMAPPED_BASE;
10280+
10281+#ifdef CONFIG_PAX_RANDMMAP
10282+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10283+ info.low_limit += mm->delta_mmap;
10284+#endif
10285+
10286 info.high_limit = STACK_TOP32;
10287 addr = vm_unmapped_area(&info);
10288 }
10289@@ -99,6 +111,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
10290 struct mm_struct *mm = current->mm;
10291 struct vm_area_struct *vma;
10292 unsigned long task_size = TASK_SIZE;
10293+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
10294
10295 if (test_thread_flag(TIF_32BIT))
10296 task_size = STACK_TOP32;
10297@@ -114,11 +127,14 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
10298 return addr;
10299 }
10300
10301+#ifdef CONFIG_PAX_RANDMMAP
10302+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10303+#endif
10304+
10305 if (addr) {
10306 addr = ALIGN(addr, HPAGE_SIZE);
10307 vma = find_vma(mm, addr);
10308- if (task_size - len >= addr &&
10309- (!vma || addr + len <= vma->vm_start))
10310+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
10311 return addr;
10312 }
10313 if (mm->get_unmapped_area == arch_get_unmapped_area)
10314diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
10315index f4500c6..889656c 100644
10316--- a/arch/tile/include/asm/atomic_64.h
10317+++ b/arch/tile/include/asm/atomic_64.h
10318@@ -143,6 +143,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
10319
10320 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
10321
10322+#define atomic64_read_unchecked(v) atomic64_read(v)
10323+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
10324+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
10325+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
10326+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
10327+#define atomic64_inc_unchecked(v) atomic64_inc(v)
10328+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
10329+#define atomic64_dec_unchecked(v) atomic64_dec(v)
10330+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
10331+
10332 /* Atomic dec and inc don't implement barrier, so provide them if needed. */
10333 #define smp_mb__before_atomic_dec() smp_mb()
10334 #define smp_mb__after_atomic_dec() smp_mb()
10335diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
10336index a9a5299..0fce79e 100644
10337--- a/arch/tile/include/asm/cache.h
10338+++ b/arch/tile/include/asm/cache.h
10339@@ -15,11 +15,12 @@
10340 #ifndef _ASM_TILE_CACHE_H
10341 #define _ASM_TILE_CACHE_H
10342
10343+#include <linux/const.h>
10344 #include <arch/chip.h>
10345
10346 /* bytes per L1 data cache line */
10347 #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
10348-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
10349+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10350
10351 /* bytes per L2 cache line */
10352 #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
10353diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
10354index 9ab078a..d6635c2 100644
10355--- a/arch/tile/include/asm/uaccess.h
10356+++ b/arch/tile/include/asm/uaccess.h
10357@@ -403,9 +403,9 @@ static inline unsigned long __must_check copy_from_user(void *to,
10358 const void __user *from,
10359 unsigned long n)
10360 {
10361- int sz = __compiletime_object_size(to);
10362+ size_t sz = __compiletime_object_size(to);
10363
10364- if (likely(sz == -1 || sz >= n))
10365+ if (likely(sz == (size_t)-1 || sz >= n))
10366 n = _copy_from_user(to, from, n);
10367 else
10368 copy_from_user_overflow();
10369diff --git a/arch/um/Makefile b/arch/um/Makefile
10370index 133f7de..1d6f2f1 100644
10371--- a/arch/um/Makefile
10372+++ b/arch/um/Makefile
10373@@ -62,6 +62,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
10374 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
10375 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
10376
10377+ifdef CONSTIFY_PLUGIN
10378+USER_CFLAGS += -fplugin-arg-constify_plugin-no-constify
10379+endif
10380+
10381 #This will adjust *FLAGS accordingly to the platform.
10382 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
10383
10384diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
10385index 19e1bdd..3665b77 100644
10386--- a/arch/um/include/asm/cache.h
10387+++ b/arch/um/include/asm/cache.h
10388@@ -1,6 +1,7 @@
10389 #ifndef __UM_CACHE_H
10390 #define __UM_CACHE_H
10391
10392+#include <linux/const.h>
10393
10394 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
10395 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
10396@@ -12,6 +13,6 @@
10397 # define L1_CACHE_SHIFT 5
10398 #endif
10399
10400-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
10401+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10402
10403 #endif
10404diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
10405index 2e0a6b1..a64d0f5 100644
10406--- a/arch/um/include/asm/kmap_types.h
10407+++ b/arch/um/include/asm/kmap_types.h
10408@@ -8,6 +8,6 @@
10409
10410 /* No more #include "asm/arch/kmap_types.h" ! */
10411
10412-#define KM_TYPE_NR 14
10413+#define KM_TYPE_NR 15
10414
10415 #endif
10416diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
10417index 5ff53d9..5850cdf 100644
10418--- a/arch/um/include/asm/page.h
10419+++ b/arch/um/include/asm/page.h
10420@@ -14,6 +14,9 @@
10421 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
10422 #define PAGE_MASK (~(PAGE_SIZE-1))
10423
10424+#define ktla_ktva(addr) (addr)
10425+#define ktva_ktla(addr) (addr)
10426+
10427 #ifndef __ASSEMBLY__
10428
10429 struct page;
10430diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
10431index 0032f92..cd151e0 100644
10432--- a/arch/um/include/asm/pgtable-3level.h
10433+++ b/arch/um/include/asm/pgtable-3level.h
10434@@ -58,6 +58,7 @@
10435 #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
10436 #define pud_populate(mm, pud, pmd) \
10437 set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
10438+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
10439
10440 #ifdef CONFIG_64BIT
10441 #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
10442diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
10443index b462b13..e7a19aa 100644
10444--- a/arch/um/kernel/process.c
10445+++ b/arch/um/kernel/process.c
10446@@ -386,22 +386,6 @@ int singlestepping(void * t)
10447 return 2;
10448 }
10449
10450-/*
10451- * Only x86 and x86_64 have an arch_align_stack().
10452- * All other arches have "#define arch_align_stack(x) (x)"
10453- * in their asm/system.h
10454- * As this is included in UML from asm-um/system-generic.h,
10455- * we can use it to behave as the subarch does.
10456- */
10457-#ifndef arch_align_stack
10458-unsigned long arch_align_stack(unsigned long sp)
10459-{
10460- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
10461- sp -= get_random_int() % 8192;
10462- return sp & ~0xf;
10463-}
10464-#endif
10465-
10466 unsigned long get_wchan(struct task_struct *p)
10467 {
10468 unsigned long stack_page, sp, ip;
10469diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
10470index ad8f795..2c7eec6 100644
10471--- a/arch/unicore32/include/asm/cache.h
10472+++ b/arch/unicore32/include/asm/cache.h
10473@@ -12,8 +12,10 @@
10474 #ifndef __UNICORE_CACHE_H__
10475 #define __UNICORE_CACHE_H__
10476
10477-#define L1_CACHE_SHIFT (5)
10478-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
10479+#include <linux/const.h>
10480+
10481+#define L1_CACHE_SHIFT 5
10482+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10483
10484 /*
10485 * Memory returned by kmalloc() may be used for DMA, so we must make
10486diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
10487index 0694d09..b58b3aa 100644
10488--- a/arch/x86/Kconfig
10489+++ b/arch/x86/Kconfig
10490@@ -238,7 +238,7 @@ config X86_HT
10491
10492 config X86_32_LAZY_GS
10493 def_bool y
10494- depends on X86_32 && !CC_STACKPROTECTOR
10495+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
10496
10497 config ARCH_HWEIGHT_CFLAGS
10498 string
10499@@ -1031,6 +1031,7 @@ config MICROCODE_OLD_INTERFACE
10500
10501 config X86_MSR
10502 tristate "/dev/cpu/*/msr - Model-specific register support"
10503+ depends on !GRKERNSEC_KMEM
10504 ---help---
10505 This device gives privileged processes access to the x86
10506 Model-Specific Registers (MSRs). It is a character device with
10507@@ -1054,7 +1055,7 @@ choice
10508
10509 config NOHIGHMEM
10510 bool "off"
10511- depends on !X86_NUMAQ
10512+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10513 ---help---
10514 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
10515 However, the address space of 32-bit x86 processors is only 4
10516@@ -1091,7 +1092,7 @@ config NOHIGHMEM
10517
10518 config HIGHMEM4G
10519 bool "4GB"
10520- depends on !X86_NUMAQ
10521+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10522 ---help---
10523 Select this if you have a 32-bit processor and between 1 and 4
10524 gigabytes of physical RAM.
10525@@ -1145,7 +1146,7 @@ config PAGE_OFFSET
10526 hex
10527 default 0xB0000000 if VMSPLIT_3G_OPT
10528 default 0x80000000 if VMSPLIT_2G
10529- default 0x78000000 if VMSPLIT_2G_OPT
10530+ default 0x70000000 if VMSPLIT_2G_OPT
10531 default 0x40000000 if VMSPLIT_1G
10532 default 0xC0000000
10533 depends on X86_32
10534@@ -1542,6 +1543,7 @@ config SECCOMP
10535
10536 config CC_STACKPROTECTOR
10537 bool "Enable -fstack-protector buffer overflow detection"
10538+ depends on X86_64 || !PAX_MEMORY_UDEREF
10539 ---help---
10540 This option turns on the -fstack-protector GCC feature. This
10541 feature puts, at the beginning of functions, a canary value on
10542@@ -1599,6 +1601,7 @@ config KEXEC_JUMP
10543 config PHYSICAL_START
10544 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
10545 default "0x1000000"
10546+ range 0x400000 0x40000000
10547 ---help---
10548 This gives the physical address where the kernel is loaded.
10549
10550@@ -1662,6 +1665,7 @@ config X86_NEED_RELOCS
10551 config PHYSICAL_ALIGN
10552 hex "Alignment value to which kernel should be aligned" if X86_32
10553 default "0x1000000"
10554+ range 0x400000 0x1000000 if PAX_KERNEXEC
10555 range 0x2000 0x1000000
10556 ---help---
10557 This value puts the alignment restrictions on physical address
10558@@ -1737,9 +1741,10 @@ config DEBUG_HOTPLUG_CPU0
10559 If unsure, say N.
10560
10561 config COMPAT_VDSO
10562- def_bool y
10563+ def_bool n
10564 prompt "Compat VDSO support"
10565 depends on X86_32 || IA32_EMULATION
10566+ depends on !PAX_PAGEEXEC && !PAX_SEGMEXEC && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
10567 ---help---
10568 Map the 32-bit VDSO to the predictable old-style address too.
10569
10570diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
10571index c026cca..14657ae 100644
10572--- a/arch/x86/Kconfig.cpu
10573+++ b/arch/x86/Kconfig.cpu
10574@@ -319,7 +319,7 @@ config X86_PPRO_FENCE
10575
10576 config X86_F00F_BUG
10577 def_bool y
10578- depends on M586MMX || M586TSC || M586 || M486
10579+ depends on (M586MMX || M586TSC || M586 || M486) && !PAX_KERNEXEC
10580
10581 config X86_INVD_BUG
10582 def_bool y
10583@@ -327,7 +327,7 @@ config X86_INVD_BUG
10584
10585 config X86_ALIGNMENT_16
10586 def_bool y
10587- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10588+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10589
10590 config X86_INTEL_USERCOPY
10591 def_bool y
10592@@ -373,7 +373,7 @@ config X86_CMPXCHG64
10593 # generates cmov.
10594 config X86_CMOV
10595 def_bool y
10596- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10597+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10598
10599 config X86_MINIMUM_CPU_FAMILY
10600 int
10601diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
10602index b322f12..652d0d9 100644
10603--- a/arch/x86/Kconfig.debug
10604+++ b/arch/x86/Kconfig.debug
10605@@ -84,7 +84,7 @@ config X86_PTDUMP
10606 config DEBUG_RODATA
10607 bool "Write protect kernel read-only data structures"
10608 default y
10609- depends on DEBUG_KERNEL
10610+ depends on DEBUG_KERNEL && BROKEN
10611 ---help---
10612 Mark the kernel read-only data as write-protected in the pagetables,
10613 in order to catch accidental (and incorrect) writes to such const
10614@@ -102,7 +102,7 @@ config DEBUG_RODATA_TEST
10615
10616 config DEBUG_SET_MODULE_RONX
10617 bool "Set loadable kernel module data as NX and text as RO"
10618- depends on MODULES
10619+ depends on MODULES && BROKEN
10620 ---help---
10621 This option helps catch unintended modifications to loadable
10622 kernel module's text and read-only data. It also prevents execution
10623@@ -294,7 +294,7 @@ config OPTIMIZE_INLINING
10624
10625 config DEBUG_STRICT_USER_COPY_CHECKS
10626 bool "Strict copy size checks"
10627- depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
10628+ depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING && !PAX_SIZE_OVERFLOW
10629 ---help---
10630 Enabling this option turns a certain set of sanity checks for user
10631 copy operations into compile time failures.
10632diff --git a/arch/x86/Makefile b/arch/x86/Makefile
10633index e71fc42..7829607 100644
10634--- a/arch/x86/Makefile
10635+++ b/arch/x86/Makefile
10636@@ -50,6 +50,7 @@ else
10637 UTS_MACHINE := x86_64
10638 CHECKFLAGS += -D__x86_64__ -m64
10639
10640+ biarch := $(call cc-option,-m64)
10641 KBUILD_AFLAGS += -m64
10642 KBUILD_CFLAGS += -m64
10643
10644@@ -230,3 +231,12 @@ define archhelp
10645 echo ' FDARGS="..." arguments for the booted kernel'
10646 echo ' FDINITRD=file initrd for the booted kernel'
10647 endef
10648+
10649+define OLD_LD
10650+
10651+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
10652+*** Please upgrade your binutils to 2.18 or newer
10653+endef
10654+
10655+archprepare:
10656+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
10657diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
10658index 379814b..add62ce 100644
10659--- a/arch/x86/boot/Makefile
10660+++ b/arch/x86/boot/Makefile
10661@@ -65,6 +65,9 @@ KBUILD_CFLAGS := $(USERINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
10662 $(call cc-option, -fno-stack-protector) \
10663 $(call cc-option, -mpreferred-stack-boundary=2)
10664 KBUILD_CFLAGS += $(call cc-option, -m32)
10665+ifdef CONSTIFY_PLUGIN
10666+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
10667+endif
10668 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
10669 GCOV_PROFILE := n
10670
10671diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
10672index 878e4b9..20537ab 100644
10673--- a/arch/x86/boot/bitops.h
10674+++ b/arch/x86/boot/bitops.h
10675@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
10676 u8 v;
10677 const u32 *p = (const u32 *)addr;
10678
10679- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
10680+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
10681 return v;
10682 }
10683
10684@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
10685
10686 static inline void set_bit(int nr, void *addr)
10687 {
10688- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
10689+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
10690 }
10691
10692 #endif /* BOOT_BITOPS_H */
10693diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
10694index 18997e5..83d9c67 100644
10695--- a/arch/x86/boot/boot.h
10696+++ b/arch/x86/boot/boot.h
10697@@ -85,7 +85,7 @@ static inline void io_delay(void)
10698 static inline u16 ds(void)
10699 {
10700 u16 seg;
10701- asm("movw %%ds,%0" : "=rm" (seg));
10702+ asm volatile("movw %%ds,%0" : "=rm" (seg));
10703 return seg;
10704 }
10705
10706@@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t addr)
10707 static inline int memcmp(const void *s1, const void *s2, size_t len)
10708 {
10709 u8 diff;
10710- asm("repe; cmpsb; setnz %0"
10711+ asm volatile("repe; cmpsb; setnz %0"
10712 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
10713 return diff;
10714 }
10715diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
10716index 8a84501..b2d165f 100644
10717--- a/arch/x86/boot/compressed/Makefile
10718+++ b/arch/x86/boot/compressed/Makefile
10719@@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
10720 KBUILD_CFLAGS += $(cflags-y)
10721 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
10722 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
10723+ifdef CONSTIFY_PLUGIN
10724+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
10725+endif
10726
10727 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
10728 GCOV_PROFILE := n
10729diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
10730index c205035..5853587 100644
10731--- a/arch/x86/boot/compressed/eboot.c
10732+++ b/arch/x86/boot/compressed/eboot.c
10733@@ -150,7 +150,6 @@ again:
10734 *addr = max_addr;
10735 }
10736
10737-free_pool:
10738 efi_call_phys1(sys_table->boottime->free_pool, map);
10739
10740 fail:
10741@@ -214,7 +213,6 @@ static efi_status_t low_alloc(unsigned long size, unsigned long align,
10742 if (i == map_size / desc_size)
10743 status = EFI_NOT_FOUND;
10744
10745-free_pool:
10746 efi_call_phys1(sys_table->boottime->free_pool, map);
10747 fail:
10748 return status;
10749diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
10750index 1e3184f..0d11e2e 100644
10751--- a/arch/x86/boot/compressed/head_32.S
10752+++ b/arch/x86/boot/compressed/head_32.S
10753@@ -118,7 +118,7 @@ preferred_addr:
10754 notl %eax
10755 andl %eax, %ebx
10756 #else
10757- movl $LOAD_PHYSICAL_ADDR, %ebx
10758+ movl $____LOAD_PHYSICAL_ADDR, %ebx
10759 #endif
10760
10761 /* Target address to relocate to for decompression */
10762@@ -204,7 +204,7 @@ relocated:
10763 * and where it was actually loaded.
10764 */
10765 movl %ebp, %ebx
10766- subl $LOAD_PHYSICAL_ADDR, %ebx
10767+ subl $____LOAD_PHYSICAL_ADDR, %ebx
10768 jz 2f /* Nothing to be done if loaded at compiled addr. */
10769 /*
10770 * Process relocations.
10771@@ -212,8 +212,7 @@ relocated:
10772
10773 1: subl $4, %edi
10774 movl (%edi), %ecx
10775- testl %ecx, %ecx
10776- jz 2f
10777+ jecxz 2f
10778 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
10779 jmp 1b
10780 2:
10781diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
10782index f5d1aaa..cce11dc 100644
10783--- a/arch/x86/boot/compressed/head_64.S
10784+++ b/arch/x86/boot/compressed/head_64.S
10785@@ -91,7 +91,7 @@ ENTRY(startup_32)
10786 notl %eax
10787 andl %eax, %ebx
10788 #else
10789- movl $LOAD_PHYSICAL_ADDR, %ebx
10790+ movl $____LOAD_PHYSICAL_ADDR, %ebx
10791 #endif
10792
10793 /* Target address to relocate to for decompression */
10794@@ -273,7 +273,7 @@ preferred_addr:
10795 notq %rax
10796 andq %rax, %rbp
10797 #else
10798- movq $LOAD_PHYSICAL_ADDR, %rbp
10799+ movq $____LOAD_PHYSICAL_ADDR, %rbp
10800 #endif
10801
10802 /* Target address to relocate to for decompression */
10803diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
10804index 88f7ff6..ed695dd 100644
10805--- a/arch/x86/boot/compressed/misc.c
10806+++ b/arch/x86/boot/compressed/misc.c
10807@@ -303,7 +303,7 @@ static void parse_elf(void *output)
10808 case PT_LOAD:
10809 #ifdef CONFIG_RELOCATABLE
10810 dest = output;
10811- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
10812+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
10813 #else
10814 dest = (void *)(phdr->p_paddr);
10815 #endif
10816@@ -352,7 +352,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
10817 error("Destination address too large");
10818 #endif
10819 #ifndef CONFIG_RELOCATABLE
10820- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
10821+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
10822 error("Wrong destination address");
10823 #endif
10824
10825diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
10826index 4d3ff03..e4972ff 100644
10827--- a/arch/x86/boot/cpucheck.c
10828+++ b/arch/x86/boot/cpucheck.c
10829@@ -74,7 +74,7 @@ static int has_fpu(void)
10830 u16 fcw = -1, fsw = -1;
10831 u32 cr0;
10832
10833- asm("movl %%cr0,%0" : "=r" (cr0));
10834+ asm volatile("movl %%cr0,%0" : "=r" (cr0));
10835 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
10836 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
10837 asm volatile("movl %0,%%cr0" : : "r" (cr0));
10838@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
10839 {
10840 u32 f0, f1;
10841
10842- asm("pushfl ; "
10843+ asm volatile("pushfl ; "
10844 "pushfl ; "
10845 "popl %0 ; "
10846 "movl %0,%1 ; "
10847@@ -115,7 +115,7 @@ static void get_flags(void)
10848 set_bit(X86_FEATURE_FPU, cpu.flags);
10849
10850 if (has_eflag(X86_EFLAGS_ID)) {
10851- asm("cpuid"
10852+ asm volatile("cpuid"
10853 : "=a" (max_intel_level),
10854 "=b" (cpu_vendor[0]),
10855 "=d" (cpu_vendor[1]),
10856@@ -124,7 +124,7 @@ static void get_flags(void)
10857
10858 if (max_intel_level >= 0x00000001 &&
10859 max_intel_level <= 0x0000ffff) {
10860- asm("cpuid"
10861+ asm volatile("cpuid"
10862 : "=a" (tfms),
10863 "=c" (cpu.flags[4]),
10864 "=d" (cpu.flags[0])
10865@@ -136,7 +136,7 @@ static void get_flags(void)
10866 cpu.model += ((tfms >> 16) & 0xf) << 4;
10867 }
10868
10869- asm("cpuid"
10870+ asm volatile("cpuid"
10871 : "=a" (max_amd_level)
10872 : "a" (0x80000000)
10873 : "ebx", "ecx", "edx");
10874@@ -144,7 +144,7 @@ static void get_flags(void)
10875 if (max_amd_level >= 0x80000001 &&
10876 max_amd_level <= 0x8000ffff) {
10877 u32 eax = 0x80000001;
10878- asm("cpuid"
10879+ asm volatile("cpuid"
10880 : "+a" (eax),
10881 "=c" (cpu.flags[6]),
10882 "=d" (cpu.flags[1])
10883@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
10884 u32 ecx = MSR_K7_HWCR;
10885 u32 eax, edx;
10886
10887- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
10888+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
10889 eax &= ~(1 << 15);
10890- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
10891+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
10892
10893 get_flags(); /* Make sure it really did something */
10894 err = check_flags();
10895@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
10896 u32 ecx = MSR_VIA_FCR;
10897 u32 eax, edx;
10898
10899- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
10900+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
10901 eax |= (1<<1)|(1<<7);
10902- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
10903+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
10904
10905 set_bit(X86_FEATURE_CX8, cpu.flags);
10906 err = check_flags();
10907@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
10908 u32 eax, edx;
10909 u32 level = 1;
10910
10911- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
10912- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
10913- asm("cpuid"
10914+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
10915+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
10916+ asm volatile("cpuid"
10917 : "+a" (level), "=d" (cpu.flags[0])
10918 : : "ecx", "ebx");
10919- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
10920+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
10921
10922 err = check_flags();
10923 }
10924diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
10925index 944ce59..87ee37a 100644
10926--- a/arch/x86/boot/header.S
10927+++ b/arch/x86/boot/header.S
10928@@ -401,10 +401,14 @@ setup_data: .quad 0 # 64-bit physical pointer to
10929 # single linked list of
10930 # struct setup_data
10931
10932-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
10933+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
10934
10935 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
10936+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
10937+#define VO_INIT_SIZE (VO__end - VO__text - __PAGE_OFFSET - ____LOAD_PHYSICAL_ADDR)
10938+#else
10939 #define VO_INIT_SIZE (VO__end - VO__text)
10940+#endif
10941 #if ZO_INIT_SIZE > VO_INIT_SIZE
10942 #define INIT_SIZE ZO_INIT_SIZE
10943 #else
10944diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
10945index db75d07..8e6d0af 100644
10946--- a/arch/x86/boot/memory.c
10947+++ b/arch/x86/boot/memory.c
10948@@ -19,7 +19,7 @@
10949
10950 static int detect_memory_e820(void)
10951 {
10952- int count = 0;
10953+ unsigned int count = 0;
10954 struct biosregs ireg, oreg;
10955 struct e820entry *desc = boot_params.e820_map;
10956 static struct e820entry buf; /* static so it is zeroed */
10957diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
10958index 11e8c6e..fdbb1ed 100644
10959--- a/arch/x86/boot/video-vesa.c
10960+++ b/arch/x86/boot/video-vesa.c
10961@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
10962
10963 boot_params.screen_info.vesapm_seg = oreg.es;
10964 boot_params.screen_info.vesapm_off = oreg.di;
10965+ boot_params.screen_info.vesapm_size = oreg.cx;
10966 }
10967
10968 /*
10969diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
10970index 43eda28..5ab5fdb 100644
10971--- a/arch/x86/boot/video.c
10972+++ b/arch/x86/boot/video.c
10973@@ -96,7 +96,7 @@ static void store_mode_params(void)
10974 static unsigned int get_entry(void)
10975 {
10976 char entry_buf[4];
10977- int i, len = 0;
10978+ unsigned int i, len = 0;
10979 int key;
10980 unsigned int v;
10981
10982diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
10983index 5b577d5..3c1fed4 100644
10984--- a/arch/x86/crypto/aes-x86_64-asm_64.S
10985+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
10986@@ -8,6 +8,8 @@
10987 * including this sentence is retained in full.
10988 */
10989
10990+#include <asm/alternative-asm.h>
10991+
10992 .extern crypto_ft_tab
10993 .extern crypto_it_tab
10994 .extern crypto_fl_tab
10995@@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
10996 je B192; \
10997 leaq 32(r9),r9;
10998
10999+#define ret pax_force_retaddr 0, 1; ret
11000+
11001 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
11002 movq r1,r2; \
11003 movq r3,r4; \
11004diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
11005index 3470624..201259d 100644
11006--- a/arch/x86/crypto/aesni-intel_asm.S
11007+++ b/arch/x86/crypto/aesni-intel_asm.S
11008@@ -31,6 +31,7 @@
11009
11010 #include <linux/linkage.h>
11011 #include <asm/inst.h>
11012+#include <asm/alternative-asm.h>
11013
11014 #ifdef __x86_64__
11015 .data
11016@@ -1436,7 +1437,9 @@ _return_T_done_decrypt:
11017 pop %r14
11018 pop %r13
11019 pop %r12
11020+ pax_force_retaddr 0, 1
11021 ret
11022+ENDPROC(aesni_gcm_dec)
11023
11024
11025 /*****************************************************************************
11026@@ -1699,7 +1702,9 @@ _return_T_done_encrypt:
11027 pop %r14
11028 pop %r13
11029 pop %r12
11030+ pax_force_retaddr 0, 1
11031 ret
11032+ENDPROC(aesni_gcm_enc)
11033
11034 #endif
11035
11036@@ -1714,6 +1719,7 @@ _key_expansion_256a:
11037 pxor %xmm1, %xmm0
11038 movaps %xmm0, (TKEYP)
11039 add $0x10, TKEYP
11040+ pax_force_retaddr_bts
11041 ret
11042
11043 .align 4
11044@@ -1738,6 +1744,7 @@ _key_expansion_192a:
11045 shufps $0b01001110, %xmm2, %xmm1
11046 movaps %xmm1, 0x10(TKEYP)
11047 add $0x20, TKEYP
11048+ pax_force_retaddr_bts
11049 ret
11050
11051 .align 4
11052@@ -1757,6 +1764,7 @@ _key_expansion_192b:
11053
11054 movaps %xmm0, (TKEYP)
11055 add $0x10, TKEYP
11056+ pax_force_retaddr_bts
11057 ret
11058
11059 .align 4
11060@@ -1769,6 +1777,7 @@ _key_expansion_256b:
11061 pxor %xmm1, %xmm2
11062 movaps %xmm2, (TKEYP)
11063 add $0x10, TKEYP
11064+ pax_force_retaddr_bts
11065 ret
11066
11067 /*
11068@@ -1881,7 +1890,9 @@ ENTRY(aesni_set_key)
11069 #ifndef __x86_64__
11070 popl KEYP
11071 #endif
11072+ pax_force_retaddr 0, 1
11073 ret
11074+ENDPROC(aesni_set_key)
11075
11076 /*
11077 * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
11078@@ -1902,7 +1913,9 @@ ENTRY(aesni_enc)
11079 popl KLEN
11080 popl KEYP
11081 #endif
11082+ pax_force_retaddr 0, 1
11083 ret
11084+ENDPROC(aesni_enc)
11085
11086 /*
11087 * _aesni_enc1: internal ABI
11088@@ -1959,6 +1972,7 @@ _aesni_enc1:
11089 AESENC KEY STATE
11090 movaps 0x70(TKEYP), KEY
11091 AESENCLAST KEY STATE
11092+ pax_force_retaddr_bts
11093 ret
11094
11095 /*
11096@@ -2067,6 +2081,7 @@ _aesni_enc4:
11097 AESENCLAST KEY STATE2
11098 AESENCLAST KEY STATE3
11099 AESENCLAST KEY STATE4
11100+ pax_force_retaddr_bts
11101 ret
11102
11103 /*
11104@@ -2089,7 +2104,9 @@ ENTRY(aesni_dec)
11105 popl KLEN
11106 popl KEYP
11107 #endif
11108+ pax_force_retaddr 0, 1
11109 ret
11110+ENDPROC(aesni_dec)
11111
11112 /*
11113 * _aesni_dec1: internal ABI
11114@@ -2146,6 +2163,7 @@ _aesni_dec1:
11115 AESDEC KEY STATE
11116 movaps 0x70(TKEYP), KEY
11117 AESDECLAST KEY STATE
11118+ pax_force_retaddr_bts
11119 ret
11120
11121 /*
11122@@ -2254,6 +2272,7 @@ _aesni_dec4:
11123 AESDECLAST KEY STATE2
11124 AESDECLAST KEY STATE3
11125 AESDECLAST KEY STATE4
11126+ pax_force_retaddr_bts
11127 ret
11128
11129 /*
11130@@ -2311,7 +2330,9 @@ ENTRY(aesni_ecb_enc)
11131 popl KEYP
11132 popl LEN
11133 #endif
11134+ pax_force_retaddr 0, 1
11135 ret
11136+ENDPROC(aesni_ecb_enc)
11137
11138 /*
11139 * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
11140@@ -2369,7 +2390,9 @@ ENTRY(aesni_ecb_dec)
11141 popl KEYP
11142 popl LEN
11143 #endif
11144+ pax_force_retaddr 0, 1
11145 ret
11146+ENDPROC(aesni_ecb_dec)
11147
11148 /*
11149 * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
11150@@ -2410,7 +2433,9 @@ ENTRY(aesni_cbc_enc)
11151 popl LEN
11152 popl IVP
11153 #endif
11154+ pax_force_retaddr 0, 1
11155 ret
11156+ENDPROC(aesni_cbc_enc)
11157
11158 /*
11159 * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
11160@@ -2500,7 +2525,9 @@ ENTRY(aesni_cbc_dec)
11161 popl LEN
11162 popl IVP
11163 #endif
11164+ pax_force_retaddr 0, 1
11165 ret
11166+ENDPROC(aesni_cbc_dec)
11167
11168 #ifdef __x86_64__
11169 .align 16
11170@@ -2526,6 +2553,7 @@ _aesni_inc_init:
11171 mov $1, TCTR_LOW
11172 MOVQ_R64_XMM TCTR_LOW INC
11173 MOVQ_R64_XMM CTR TCTR_LOW
11174+ pax_force_retaddr_bts
11175 ret
11176
11177 /*
11178@@ -2554,6 +2582,7 @@ _aesni_inc:
11179 .Linc_low:
11180 movaps CTR, IV
11181 PSHUFB_XMM BSWAP_MASK IV
11182+ pax_force_retaddr_bts
11183 ret
11184
11185 /*
11186@@ -2614,5 +2643,7 @@ ENTRY(aesni_ctr_enc)
11187 .Lctr_enc_ret:
11188 movups IV, (IVP)
11189 .Lctr_enc_just_ret:
11190+ pax_force_retaddr 0, 1
11191 ret
11192+ENDPROC(aesni_ctr_enc)
11193 #endif
11194diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
11195index 391d245..67f35c2 100644
11196--- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
11197+++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
11198@@ -20,6 +20,8 @@
11199 *
11200 */
11201
11202+#include <asm/alternative-asm.h>
11203+
11204 .file "blowfish-x86_64-asm.S"
11205 .text
11206
11207@@ -151,9 +153,11 @@ __blowfish_enc_blk:
11208 jnz __enc_xor;
11209
11210 write_block();
11211+ pax_force_retaddr 0, 1
11212 ret;
11213 __enc_xor:
11214 xor_block();
11215+ pax_force_retaddr 0, 1
11216 ret;
11217
11218 .align 8
11219@@ -188,6 +192,7 @@ blowfish_dec_blk:
11220
11221 movq %r11, %rbp;
11222
11223+ pax_force_retaddr 0, 1
11224 ret;
11225
11226 /**********************************************************************
11227@@ -342,6 +347,7 @@ __blowfish_enc_blk_4way:
11228
11229 popq %rbx;
11230 popq %rbp;
11231+ pax_force_retaddr 0, 1
11232 ret;
11233
11234 __enc_xor4:
11235@@ -349,6 +355,7 @@ __enc_xor4:
11236
11237 popq %rbx;
11238 popq %rbp;
11239+ pax_force_retaddr 0, 1
11240 ret;
11241
11242 .align 8
11243@@ -386,5 +393,6 @@ blowfish_dec_blk_4way:
11244 popq %rbx;
11245 popq %rbp;
11246
11247+ pax_force_retaddr 0, 1
11248 ret;
11249
11250diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S
11251index 0b33743..7a56206 100644
11252--- a/arch/x86/crypto/camellia-x86_64-asm_64.S
11253+++ b/arch/x86/crypto/camellia-x86_64-asm_64.S
11254@@ -20,6 +20,8 @@
11255 *
11256 */
11257
11258+#include <asm/alternative-asm.h>
11259+
11260 .file "camellia-x86_64-asm_64.S"
11261 .text
11262
11263@@ -229,12 +231,14 @@ __enc_done:
11264 enc_outunpack(mov, RT1);
11265
11266 movq RRBP, %rbp;
11267+ pax_force_retaddr 0, 1
11268 ret;
11269
11270 __enc_xor:
11271 enc_outunpack(xor, RT1);
11272
11273 movq RRBP, %rbp;
11274+ pax_force_retaddr 0, 1
11275 ret;
11276
11277 .global camellia_dec_blk;
11278@@ -275,6 +279,7 @@ __dec_rounds16:
11279 dec_outunpack();
11280
11281 movq RRBP, %rbp;
11282+ pax_force_retaddr 0, 1
11283 ret;
11284
11285 /**********************************************************************
11286@@ -468,6 +473,7 @@ __enc2_done:
11287
11288 movq RRBP, %rbp;
11289 popq %rbx;
11290+ pax_force_retaddr 0, 1
11291 ret;
11292
11293 __enc2_xor:
11294@@ -475,6 +481,7 @@ __enc2_xor:
11295
11296 movq RRBP, %rbp;
11297 popq %rbx;
11298+ pax_force_retaddr 0, 1
11299 ret;
11300
11301 .global camellia_dec_blk_2way;
11302@@ -517,4 +524,5 @@ __dec2_rounds16:
11303
11304 movq RRBP, %rbp;
11305 movq RXOR, %rbx;
11306+ pax_force_retaddr 0, 1
11307 ret;
11308diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
11309index 15b00ac..2071784 100644
11310--- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
11311+++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
11312@@ -23,6 +23,8 @@
11313 *
11314 */
11315
11316+#include <asm/alternative-asm.h>
11317+
11318 .file "cast5-avx-x86_64-asm_64.S"
11319
11320 .extern cast_s1
11321@@ -281,6 +283,7 @@ __skip_enc:
11322 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
11323 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
11324
11325+ pax_force_retaddr 0, 1
11326 ret;
11327
11328 .align 16
11329@@ -353,6 +356,7 @@ __dec_tail:
11330 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
11331 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
11332
11333+ pax_force_retaddr 0, 1
11334 ret;
11335
11336 __skip_dec:
11337@@ -392,6 +396,7 @@ cast5_ecb_enc_16way:
11338 vmovdqu RR4, (6*4*4)(%r11);
11339 vmovdqu RL4, (7*4*4)(%r11);
11340
11341+ pax_force_retaddr
11342 ret;
11343
11344 .align 16
11345@@ -427,6 +432,7 @@ cast5_ecb_dec_16way:
11346 vmovdqu RR4, (6*4*4)(%r11);
11347 vmovdqu RL4, (7*4*4)(%r11);
11348
11349+ pax_force_retaddr
11350 ret;
11351
11352 .align 16
11353@@ -479,6 +485,7 @@ cast5_cbc_dec_16way:
11354
11355 popq %r12;
11356
11357+ pax_force_retaddr
11358 ret;
11359
11360 .align 16
11361@@ -555,4 +562,5 @@ cast5_ctr_16way:
11362
11363 popq %r12;
11364
11365+ pax_force_retaddr
11366 ret;
11367diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
11368index 2569d0d..637c289 100644
11369--- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
11370+++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
11371@@ -23,6 +23,8 @@
11372 *
11373 */
11374
11375+#include <asm/alternative-asm.h>
11376+
11377 #include "glue_helper-asm-avx.S"
11378
11379 .file "cast6-avx-x86_64-asm_64.S"
11380@@ -294,6 +296,7 @@ __cast6_enc_blk8:
11381 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
11382 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
11383
11384+ pax_force_retaddr 0, 1
11385 ret;
11386
11387 .align 8
11388@@ -340,6 +343,7 @@ __cast6_dec_blk8:
11389 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
11390 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
11391
11392+ pax_force_retaddr 0, 1
11393 ret;
11394
11395 .align 8
11396@@ -361,6 +365,7 @@ cast6_ecb_enc_8way:
11397
11398 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
11399
11400+ pax_force_retaddr
11401 ret;
11402
11403 .align 8
11404@@ -382,6 +387,7 @@ cast6_ecb_dec_8way:
11405
11406 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
11407
11408+ pax_force_retaddr
11409 ret;
11410
11411 .align 8
11412@@ -408,6 +414,7 @@ cast6_cbc_dec_8way:
11413
11414 popq %r12;
11415
11416+ pax_force_retaddr
11417 ret;
11418
11419 .align 8
11420@@ -436,4 +443,5 @@ cast6_ctr_8way:
11421
11422 popq %r12;
11423
11424+ pax_force_retaddr
11425 ret;
11426diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
11427index 6214a9b..1f4fc9a 100644
11428--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
11429+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
11430@@ -1,3 +1,5 @@
11431+#include <asm/alternative-asm.h>
11432+
11433 # enter ECRYPT_encrypt_bytes
11434 .text
11435 .p2align 5
11436@@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
11437 add %r11,%rsp
11438 mov %rdi,%rax
11439 mov %rsi,%rdx
11440+ pax_force_retaddr 0, 1
11441 ret
11442 # bytesatleast65:
11443 ._bytesatleast65:
11444@@ -891,6 +894,7 @@ ECRYPT_keysetup:
11445 add %r11,%rsp
11446 mov %rdi,%rax
11447 mov %rsi,%rdx
11448+ pax_force_retaddr
11449 ret
11450 # enter ECRYPT_ivsetup
11451 .text
11452@@ -917,4 +921,5 @@ ECRYPT_ivsetup:
11453 add %r11,%rsp
11454 mov %rdi,%rax
11455 mov %rsi,%rdx
11456+ pax_force_retaddr
11457 ret
11458diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
11459index 02b0e9f..cf4cf5c 100644
11460--- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
11461+++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
11462@@ -24,6 +24,8 @@
11463 *
11464 */
11465
11466+#include <asm/alternative-asm.h>
11467+
11468 #include "glue_helper-asm-avx.S"
11469
11470 .file "serpent-avx-x86_64-asm_64.S"
11471@@ -618,6 +620,7 @@ __serpent_enc_blk8_avx:
11472 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
11473 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
11474
11475+ pax_force_retaddr
11476 ret;
11477
11478 .align 8
11479@@ -673,6 +676,7 @@ __serpent_dec_blk8_avx:
11480 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
11481 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
11482
11483+ pax_force_retaddr
11484 ret;
11485
11486 .align 8
11487@@ -692,6 +696,7 @@ serpent_ecb_enc_8way_avx:
11488
11489 store_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
11490
11491+ pax_force_retaddr
11492 ret;
11493
11494 .align 8
11495@@ -711,6 +716,7 @@ serpent_ecb_dec_8way_avx:
11496
11497 store_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
11498
11499+ pax_force_retaddr
11500 ret;
11501
11502 .align 8
11503@@ -730,6 +736,7 @@ serpent_cbc_dec_8way_avx:
11504
11505 store_cbc_8way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
11506
11507+ pax_force_retaddr
11508 ret;
11509
11510 .align 8
11511@@ -751,4 +758,5 @@ serpent_ctr_8way_avx:
11512
11513 store_ctr_8way(%rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
11514
11515+ pax_force_retaddr
11516 ret;
11517diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
11518index 3ee1ff0..cbc568b 100644
11519--- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
11520+++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
11521@@ -24,6 +24,8 @@
11522 *
11523 */
11524
11525+#include <asm/alternative-asm.h>
11526+
11527 .file "serpent-sse2-x86_64-asm_64.S"
11528 .text
11529
11530@@ -692,12 +694,14 @@ __serpent_enc_blk_8way:
11531 write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
11532 write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
11533
11534+ pax_force_retaddr
11535 ret;
11536
11537 __enc_xor8:
11538 xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
11539 xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
11540
11541+ pax_force_retaddr
11542 ret;
11543
11544 .align 8
11545@@ -755,4 +759,5 @@ serpent_dec_blk_8way:
11546 write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
11547 write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
11548
11549+ pax_force_retaddr
11550 ret;
11551diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
11552index 49d6987..df66bd4 100644
11553--- a/arch/x86/crypto/sha1_ssse3_asm.S
11554+++ b/arch/x86/crypto/sha1_ssse3_asm.S
11555@@ -28,6 +28,8 @@
11556 * (at your option) any later version.
11557 */
11558
11559+#include <asm/alternative-asm.h>
11560+
11561 #define CTX %rdi // arg1
11562 #define BUF %rsi // arg2
11563 #define CNT %rdx // arg3
11564@@ -104,6 +106,7 @@
11565 pop %r12
11566 pop %rbp
11567 pop %rbx
11568+ pax_force_retaddr 0, 1
11569 ret
11570
11571 .size \name, .-\name
11572diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
11573index ebac16b..8092eb9 100644
11574--- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
11575+++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
11576@@ -23,6 +23,8 @@
11577 *
11578 */
11579
11580+#include <asm/alternative-asm.h>
11581+
11582 #include "glue_helper-asm-avx.S"
11583
11584 .file "twofish-avx-x86_64-asm_64.S"
11585@@ -283,6 +285,7 @@ __twofish_enc_blk8:
11586 outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
11587 outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
11588
11589+ pax_force_retaddr 0, 1
11590 ret;
11591
11592 .align 8
11593@@ -324,6 +327,7 @@ __twofish_dec_blk8:
11594 outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
11595 outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
11596
11597+ pax_force_retaddr 0, 1
11598 ret;
11599
11600 .align 8
11601@@ -345,6 +349,7 @@ twofish_ecb_enc_8way:
11602
11603 store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
11604
11605+ pax_force_retaddr 0, 1
11606 ret;
11607
11608 .align 8
11609@@ -366,6 +371,7 @@ twofish_ecb_dec_8way:
11610
11611 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
11612
11613+ pax_force_retaddr 0, 1
11614 ret;
11615
11616 .align 8
11617@@ -392,6 +398,7 @@ twofish_cbc_dec_8way:
11618
11619 popq %r12;
11620
11621+ pax_force_retaddr 0, 1
11622 ret;
11623
11624 .align 8
11625@@ -420,4 +427,5 @@ twofish_ctr_8way:
11626
11627 popq %r12;
11628
11629+ pax_force_retaddr 0, 1
11630 ret;
11631diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
11632index 5b012a2..36d5364 100644
11633--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
11634+++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
11635@@ -20,6 +20,8 @@
11636 *
11637 */
11638
11639+#include <asm/alternative-asm.h>
11640+
11641 .file "twofish-x86_64-asm-3way.S"
11642 .text
11643
11644@@ -260,6 +262,7 @@ __twofish_enc_blk_3way:
11645 popq %r13;
11646 popq %r14;
11647 popq %r15;
11648+ pax_force_retaddr 0, 1
11649 ret;
11650
11651 __enc_xor3:
11652@@ -271,6 +274,7 @@ __enc_xor3:
11653 popq %r13;
11654 popq %r14;
11655 popq %r15;
11656+ pax_force_retaddr 0, 1
11657 ret;
11658
11659 .global twofish_dec_blk_3way
11660@@ -312,5 +316,6 @@ twofish_dec_blk_3way:
11661 popq %r13;
11662 popq %r14;
11663 popq %r15;
11664+ pax_force_retaddr 0, 1
11665 ret;
11666
11667diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
11668index 7bcf3fc..f53832f 100644
11669--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
11670+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
11671@@ -21,6 +21,7 @@
11672 .text
11673
11674 #include <asm/asm-offsets.h>
11675+#include <asm/alternative-asm.h>
11676
11677 #define a_offset 0
11678 #define b_offset 4
11679@@ -268,6 +269,7 @@ twofish_enc_blk:
11680
11681 popq R1
11682 movq $1,%rax
11683+ pax_force_retaddr 0, 1
11684 ret
11685
11686 twofish_dec_blk:
11687@@ -319,4 +321,5 @@ twofish_dec_blk:
11688
11689 popq R1
11690 movq $1,%rax
11691+ pax_force_retaddr 0, 1
11692 ret
11693diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
11694index a703af1..f5b9c36 100644
11695--- a/arch/x86/ia32/ia32_aout.c
11696+++ b/arch/x86/ia32/ia32_aout.c
11697@@ -159,6 +159,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
11698 unsigned long dump_start, dump_size;
11699 struct user32 dump;
11700
11701+ memset(&dump, 0, sizeof(dump));
11702+
11703 fs = get_fs();
11704 set_fs(KERNEL_DS);
11705 has_dumped = 1;
11706diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
11707index a1daf4a..f8c4537 100644
11708--- a/arch/x86/ia32/ia32_signal.c
11709+++ b/arch/x86/ia32/ia32_signal.c
11710@@ -348,7 +348,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
11711 sp -= frame_size;
11712 /* Align the stack pointer according to the i386 ABI,
11713 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
11714- sp = ((sp + 4) & -16ul) - 4;
11715+ sp = ((sp - 12) & -16ul) - 4;
11716 return (void __user *) sp;
11717 }
11718
11719@@ -406,7 +406,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
11720 * These are actually not used anymore, but left because some
11721 * gdb versions depend on them as a marker.
11722 */
11723- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
11724+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
11725 } put_user_catch(err);
11726
11727 if (err)
11728@@ -448,7 +448,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
11729 0xb8,
11730 __NR_ia32_rt_sigreturn,
11731 0x80cd,
11732- 0,
11733+ 0
11734 };
11735
11736 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
11737@@ -471,16 +471,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
11738
11739 if (ka->sa.sa_flags & SA_RESTORER)
11740 restorer = ka->sa.sa_restorer;
11741+ else if (current->mm->context.vdso)
11742+ /* Return stub is in 32bit vsyscall page */
11743+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
11744 else
11745- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
11746- rt_sigreturn);
11747+ restorer = &frame->retcode;
11748 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
11749
11750 /*
11751 * Not actually used anymore, but left because some gdb
11752 * versions need it.
11753 */
11754- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
11755+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
11756 } put_user_catch(err);
11757
11758 err |= copy_siginfo_to_user32(&frame->info, info);
11759diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
11760index 142c4ce..19b683f 100644
11761--- a/arch/x86/ia32/ia32entry.S
11762+++ b/arch/x86/ia32/ia32entry.S
11763@@ -15,8 +15,10 @@
11764 #include <asm/irqflags.h>
11765 #include <asm/asm.h>
11766 #include <asm/smap.h>
11767+#include <asm/pgtable.h>
11768 #include <linux/linkage.h>
11769 #include <linux/err.h>
11770+#include <asm/alternative-asm.h>
11771
11772 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
11773 #include <linux/elf-em.h>
11774@@ -96,6 +98,32 @@ ENTRY(native_irq_enable_sysexit)
11775 ENDPROC(native_irq_enable_sysexit)
11776 #endif
11777
11778+ .macro pax_enter_kernel_user
11779+ pax_set_fptr_mask
11780+#ifdef CONFIG_PAX_MEMORY_UDEREF
11781+ call pax_enter_kernel_user
11782+#endif
11783+ .endm
11784+
11785+ .macro pax_exit_kernel_user
11786+#ifdef CONFIG_PAX_MEMORY_UDEREF
11787+ call pax_exit_kernel_user
11788+#endif
11789+#ifdef CONFIG_PAX_RANDKSTACK
11790+ pushq %rax
11791+ pushq %r11
11792+ call pax_randomize_kstack
11793+ popq %r11
11794+ popq %rax
11795+#endif
11796+ .endm
11797+
11798+.macro pax_erase_kstack
11799+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11800+ call pax_erase_kstack
11801+#endif
11802+.endm
11803+
11804 /*
11805 * 32bit SYSENTER instruction entry.
11806 *
11807@@ -122,12 +150,6 @@ ENTRY(ia32_sysenter_target)
11808 CFI_REGISTER rsp,rbp
11809 SWAPGS_UNSAFE_STACK
11810 movq PER_CPU_VAR(kernel_stack), %rsp
11811- addq $(KERNEL_STACK_OFFSET),%rsp
11812- /*
11813- * No need to follow this irqs on/off section: the syscall
11814- * disabled irqs, here we enable it straight after entry:
11815- */
11816- ENABLE_INTERRUPTS(CLBR_NONE)
11817 movl %ebp,%ebp /* zero extension */
11818 pushq_cfi $__USER32_DS
11819 /*CFI_REL_OFFSET ss,0*/
11820@@ -135,24 +157,44 @@ ENTRY(ia32_sysenter_target)
11821 CFI_REL_OFFSET rsp,0
11822 pushfq_cfi
11823 /*CFI_REL_OFFSET rflags,0*/
11824- movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
11825- CFI_REGISTER rip,r10
11826+ orl $X86_EFLAGS_IF,(%rsp)
11827+ GET_THREAD_INFO(%r11)
11828+ movl TI_sysenter_return(%r11), %r11d
11829+ CFI_REGISTER rip,r11
11830 pushq_cfi $__USER32_CS
11831 /*CFI_REL_OFFSET cs,0*/
11832 movl %eax, %eax
11833- pushq_cfi %r10
11834+ pushq_cfi %r11
11835 CFI_REL_OFFSET rip,0
11836 pushq_cfi %rax
11837 cld
11838 SAVE_ARGS 0,1,0
11839+ pax_enter_kernel_user
11840+
11841+#ifdef CONFIG_PAX_RANDKSTACK
11842+ pax_erase_kstack
11843+#endif
11844+
11845+ /*
11846+ * No need to follow this irqs on/off section: the syscall
11847+ * disabled irqs, here we enable it straight after entry:
11848+ */
11849+ ENABLE_INTERRUPTS(CLBR_NONE)
11850 /* no need to do an access_ok check here because rbp has been
11851 32bit zero extended */
11852+
11853+#ifdef CONFIG_PAX_MEMORY_UDEREF
11854+ mov $PAX_USER_SHADOW_BASE,%r11
11855+ add %r11,%rbp
11856+#endif
11857+
11858 ASM_STAC
11859 1: movl (%rbp),%ebp
11860 _ASM_EXTABLE(1b,ia32_badarg)
11861 ASM_CLAC
11862- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11863- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11864+ GET_THREAD_INFO(%r11)
11865+ orl $TS_COMPAT,TI_status(%r11)
11866+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
11867 CFI_REMEMBER_STATE
11868 jnz sysenter_tracesys
11869 cmpq $(IA32_NR_syscalls-1),%rax
11870@@ -162,12 +204,15 @@ sysenter_do_call:
11871 sysenter_dispatch:
11872 call *ia32_sys_call_table(,%rax,8)
11873 movq %rax,RAX-ARGOFFSET(%rsp)
11874+ GET_THREAD_INFO(%r11)
11875 DISABLE_INTERRUPTS(CLBR_NONE)
11876 TRACE_IRQS_OFF
11877- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11878+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
11879 jnz sysexit_audit
11880 sysexit_from_sys_call:
11881- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11882+ pax_exit_kernel_user
11883+ pax_erase_kstack
11884+ andl $~TS_COMPAT,TI_status(%r11)
11885 /* clear IF, that popfq doesn't enable interrupts early */
11886 andl $~0x200,EFLAGS-R11(%rsp)
11887 movl RIP-R11(%rsp),%edx /* User %eip */
11888@@ -193,6 +238,9 @@ sysexit_from_sys_call:
11889 movl %eax,%esi /* 2nd arg: syscall number */
11890 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
11891 call __audit_syscall_entry
11892+
11893+ pax_erase_kstack
11894+
11895 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
11896 cmpq $(IA32_NR_syscalls-1),%rax
11897 ja ia32_badsys
11898@@ -204,7 +252,7 @@ sysexit_from_sys_call:
11899 .endm
11900
11901 .macro auditsys_exit exit
11902- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11903+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
11904 jnz ia32_ret_from_sys_call
11905 TRACE_IRQS_ON
11906 ENABLE_INTERRUPTS(CLBR_NONE)
11907@@ -215,11 +263,12 @@ sysexit_from_sys_call:
11908 1: setbe %al /* 1 if error, 0 if not */
11909 movzbl %al,%edi /* zero-extend that into %edi */
11910 call __audit_syscall_exit
11911+ GET_THREAD_INFO(%r11)
11912 movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
11913 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
11914 DISABLE_INTERRUPTS(CLBR_NONE)
11915 TRACE_IRQS_OFF
11916- testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11917+ testl %edi,TI_flags(%r11)
11918 jz \exit
11919 CLEAR_RREGS -ARGOFFSET
11920 jmp int_with_check
11921@@ -237,7 +286,7 @@ sysexit_audit:
11922
11923 sysenter_tracesys:
11924 #ifdef CONFIG_AUDITSYSCALL
11925- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11926+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
11927 jz sysenter_auditsys
11928 #endif
11929 SAVE_REST
11930@@ -249,6 +298,9 @@ sysenter_tracesys:
11931 RESTORE_REST
11932 cmpq $(IA32_NR_syscalls-1),%rax
11933 ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
11934+
11935+ pax_erase_kstack
11936+
11937 jmp sysenter_do_call
11938 CFI_ENDPROC
11939 ENDPROC(ia32_sysenter_target)
11940@@ -276,19 +328,25 @@ ENDPROC(ia32_sysenter_target)
11941 ENTRY(ia32_cstar_target)
11942 CFI_STARTPROC32 simple
11943 CFI_SIGNAL_FRAME
11944- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
11945+ CFI_DEF_CFA rsp,0
11946 CFI_REGISTER rip,rcx
11947 /*CFI_REGISTER rflags,r11*/
11948 SWAPGS_UNSAFE_STACK
11949 movl %esp,%r8d
11950 CFI_REGISTER rsp,r8
11951 movq PER_CPU_VAR(kernel_stack),%rsp
11952+ SAVE_ARGS 8*6,0,0
11953+ pax_enter_kernel_user
11954+
11955+#ifdef CONFIG_PAX_RANDKSTACK
11956+ pax_erase_kstack
11957+#endif
11958+
11959 /*
11960 * No need to follow this irqs on/off section: the syscall
11961 * disabled irqs and here we enable it straight after entry:
11962 */
11963 ENABLE_INTERRUPTS(CLBR_NONE)
11964- SAVE_ARGS 8,0,0
11965 movl %eax,%eax /* zero extension */
11966 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
11967 movq %rcx,RIP-ARGOFFSET(%rsp)
11968@@ -304,12 +362,19 @@ ENTRY(ia32_cstar_target)
11969 /* no need to do an access_ok check here because r8 has been
11970 32bit zero extended */
11971 /* hardware stack frame is complete now */
11972+
11973+#ifdef CONFIG_PAX_MEMORY_UDEREF
11974+ mov $PAX_USER_SHADOW_BASE,%r11
11975+ add %r11,%r8
11976+#endif
11977+
11978 ASM_STAC
11979 1: movl (%r8),%r9d
11980 _ASM_EXTABLE(1b,ia32_badarg)
11981 ASM_CLAC
11982- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11983- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11984+ GET_THREAD_INFO(%r11)
11985+ orl $TS_COMPAT,TI_status(%r11)
11986+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
11987 CFI_REMEMBER_STATE
11988 jnz cstar_tracesys
11989 cmpq $IA32_NR_syscalls-1,%rax
11990@@ -319,12 +384,15 @@ cstar_do_call:
11991 cstar_dispatch:
11992 call *ia32_sys_call_table(,%rax,8)
11993 movq %rax,RAX-ARGOFFSET(%rsp)
11994+ GET_THREAD_INFO(%r11)
11995 DISABLE_INTERRUPTS(CLBR_NONE)
11996 TRACE_IRQS_OFF
11997- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11998+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
11999 jnz sysretl_audit
12000 sysretl_from_sys_call:
12001- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
12002+ pax_exit_kernel_user
12003+ pax_erase_kstack
12004+ andl $~TS_COMPAT,TI_status(%r11)
12005 RESTORE_ARGS 0,-ARG_SKIP,0,0,0
12006 movl RIP-ARGOFFSET(%rsp),%ecx
12007 CFI_REGISTER rip,rcx
12008@@ -352,7 +420,7 @@ sysretl_audit:
12009
12010 cstar_tracesys:
12011 #ifdef CONFIG_AUDITSYSCALL
12012- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
12013+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
12014 jz cstar_auditsys
12015 #endif
12016 xchgl %r9d,%ebp
12017@@ -366,6 +434,9 @@ cstar_tracesys:
12018 xchgl %ebp,%r9d
12019 cmpq $(IA32_NR_syscalls-1),%rax
12020 ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
12021+
12022+ pax_erase_kstack
12023+
12024 jmp cstar_do_call
12025 END(ia32_cstar_target)
12026
12027@@ -407,19 +478,26 @@ ENTRY(ia32_syscall)
12028 CFI_REL_OFFSET rip,RIP-RIP
12029 PARAVIRT_ADJUST_EXCEPTION_FRAME
12030 SWAPGS
12031- /*
12032- * No need to follow this irqs on/off section: the syscall
12033- * disabled irqs and here we enable it straight after entry:
12034- */
12035- ENABLE_INTERRUPTS(CLBR_NONE)
12036 movl %eax,%eax
12037 pushq_cfi %rax
12038 cld
12039 /* note the registers are not zero extended to the sf.
12040 this could be a problem. */
12041 SAVE_ARGS 0,1,0
12042- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
12043- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
12044+ pax_enter_kernel_user
12045+
12046+#ifdef CONFIG_PAX_RANDKSTACK
12047+ pax_erase_kstack
12048+#endif
12049+
12050+ /*
12051+ * No need to follow this irqs on/off section: the syscall
12052+ * disabled irqs and here we enable it straight after entry:
12053+ */
12054+ ENABLE_INTERRUPTS(CLBR_NONE)
12055+ GET_THREAD_INFO(%r11)
12056+ orl $TS_COMPAT,TI_status(%r11)
12057+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
12058 jnz ia32_tracesys
12059 cmpq $(IA32_NR_syscalls-1),%rax
12060 ja ia32_badsys
12061@@ -442,6 +520,9 @@ ia32_tracesys:
12062 RESTORE_REST
12063 cmpq $(IA32_NR_syscalls-1),%rax
12064 ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
12065+
12066+ pax_erase_kstack
12067+
12068 jmp ia32_do_call
12069 END(ia32_syscall)
12070
12071diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
12072index d0b689b..6811ddc 100644
12073--- a/arch/x86/ia32/sys_ia32.c
12074+++ b/arch/x86/ia32/sys_ia32.c
12075@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
12076 */
12077 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
12078 {
12079- typeof(ubuf->st_uid) uid = 0;
12080- typeof(ubuf->st_gid) gid = 0;
12081+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
12082+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
12083 SET_UID(uid, from_kuid_munged(current_user_ns(), stat->uid));
12084 SET_GID(gid, from_kgid_munged(current_user_ns(), stat->gid));
12085 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
12086@@ -303,7 +303,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
12087 mm_segment_t old_fs = get_fs();
12088
12089 set_fs(KERNEL_DS);
12090- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
12091+ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
12092 set_fs(old_fs);
12093 if (put_compat_timespec(&t, interval))
12094 return -EFAULT;
12095@@ -313,13 +313,13 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
12096 asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
12097 compat_size_t sigsetsize)
12098 {
12099- sigset_t s;
12100+ sigset_t s = { };
12101 compat_sigset_t s32;
12102 int ret;
12103 mm_segment_t old_fs = get_fs();
12104
12105 set_fs(KERNEL_DS);
12106- ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
12107+ ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
12108 set_fs(old_fs);
12109 if (!ret) {
12110 switch (_NSIG_WORDS) {
12111@@ -344,7 +344,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
12112 if (copy_siginfo_from_user32(&info, uinfo))
12113 return -EFAULT;
12114 set_fs(KERNEL_DS);
12115- ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
12116+ ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
12117 set_fs(old_fs);
12118 return ret;
12119 }
12120@@ -376,7 +376,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
12121 return -EFAULT;
12122
12123 set_fs(KERNEL_DS);
12124- ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
12125+ ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
12126 count);
12127 set_fs(old_fs);
12128
12129diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
12130index 372231c..a5aa1a1 100644
12131--- a/arch/x86/include/asm/alternative-asm.h
12132+++ b/arch/x86/include/asm/alternative-asm.h
12133@@ -18,6 +18,45 @@
12134 .endm
12135 #endif
12136
12137+#ifdef KERNEXEC_PLUGIN
12138+ .macro pax_force_retaddr_bts rip=0
12139+ btsq $63,\rip(%rsp)
12140+ .endm
12141+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
12142+ .macro pax_force_retaddr rip=0, reload=0
12143+ btsq $63,\rip(%rsp)
12144+ .endm
12145+ .macro pax_force_fptr ptr
12146+ btsq $63,\ptr
12147+ .endm
12148+ .macro pax_set_fptr_mask
12149+ .endm
12150+#endif
12151+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
12152+ .macro pax_force_retaddr rip=0, reload=0
12153+ .if \reload
12154+ pax_set_fptr_mask
12155+ .endif
12156+ orq %r10,\rip(%rsp)
12157+ .endm
12158+ .macro pax_force_fptr ptr
12159+ orq %r10,\ptr
12160+ .endm
12161+ .macro pax_set_fptr_mask
12162+ movabs $0x8000000000000000,%r10
12163+ .endm
12164+#endif
12165+#else
12166+ .macro pax_force_retaddr rip=0, reload=0
12167+ .endm
12168+ .macro pax_force_fptr ptr
12169+ .endm
12170+ .macro pax_force_retaddr_bts rip=0
12171+ .endm
12172+ .macro pax_set_fptr_mask
12173+ .endm
12174+#endif
12175+
12176 .macro altinstruction_entry orig alt feature orig_len alt_len
12177 .long \orig - .
12178 .long \alt - .
12179diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
12180index 58ed6d9..f1cbe58 100644
12181--- a/arch/x86/include/asm/alternative.h
12182+++ b/arch/x86/include/asm/alternative.h
12183@@ -105,7 +105,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
12184 ".pushsection .discard,\"aw\",@progbits\n" \
12185 DISCARD_ENTRY(1) \
12186 ".popsection\n" \
12187- ".pushsection .altinstr_replacement, \"ax\"\n" \
12188+ ".pushsection .altinstr_replacement, \"a\"\n" \
12189 ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
12190 ".popsection"
12191
12192@@ -119,7 +119,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
12193 DISCARD_ENTRY(1) \
12194 DISCARD_ENTRY(2) \
12195 ".popsection\n" \
12196- ".pushsection .altinstr_replacement, \"ax\"\n" \
12197+ ".pushsection .altinstr_replacement, \"a\"\n" \
12198 ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \
12199 ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
12200 ".popsection"
12201diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
12202index 3388034..050f0b9 100644
12203--- a/arch/x86/include/asm/apic.h
12204+++ b/arch/x86/include/asm/apic.h
12205@@ -44,7 +44,7 @@ static inline void generic_apic_probe(void)
12206
12207 #ifdef CONFIG_X86_LOCAL_APIC
12208
12209-extern unsigned int apic_verbosity;
12210+extern int apic_verbosity;
12211 extern int local_apic_timer_c2_ok;
12212
12213 extern int disable_apic;
12214diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
12215index 20370c6..a2eb9b0 100644
12216--- a/arch/x86/include/asm/apm.h
12217+++ b/arch/x86/include/asm/apm.h
12218@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
12219 __asm__ __volatile__(APM_DO_ZERO_SEGS
12220 "pushl %%edi\n\t"
12221 "pushl %%ebp\n\t"
12222- "lcall *%%cs:apm_bios_entry\n\t"
12223+ "lcall *%%ss:apm_bios_entry\n\t"
12224 "setc %%al\n\t"
12225 "popl %%ebp\n\t"
12226 "popl %%edi\n\t"
12227@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
12228 __asm__ __volatile__(APM_DO_ZERO_SEGS
12229 "pushl %%edi\n\t"
12230 "pushl %%ebp\n\t"
12231- "lcall *%%cs:apm_bios_entry\n\t"
12232+ "lcall *%%ss:apm_bios_entry\n\t"
12233 "setc %%bl\n\t"
12234 "popl %%ebp\n\t"
12235 "popl %%edi\n\t"
12236diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
12237index 722aa3b..3a0bb27 100644
12238--- a/arch/x86/include/asm/atomic.h
12239+++ b/arch/x86/include/asm/atomic.h
12240@@ -22,7 +22,18 @@
12241 */
12242 static inline int atomic_read(const atomic_t *v)
12243 {
12244- return (*(volatile int *)&(v)->counter);
12245+ return (*(volatile const int *)&(v)->counter);
12246+}
12247+
12248+/**
12249+ * atomic_read_unchecked - read atomic variable
12250+ * @v: pointer of type atomic_unchecked_t
12251+ *
12252+ * Atomically reads the value of @v.
12253+ */
12254+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
12255+{
12256+ return (*(volatile const int *)&(v)->counter);
12257 }
12258
12259 /**
12260@@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *v, int i)
12261 }
12262
12263 /**
12264+ * atomic_set_unchecked - set atomic variable
12265+ * @v: pointer of type atomic_unchecked_t
12266+ * @i: required value
12267+ *
12268+ * Atomically sets the value of @v to @i.
12269+ */
12270+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
12271+{
12272+ v->counter = i;
12273+}
12274+
12275+/**
12276 * atomic_add - add integer to atomic variable
12277 * @i: integer value to add
12278 * @v: pointer of type atomic_t
12279@@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *v, int i)
12280 */
12281 static inline void atomic_add(int i, atomic_t *v)
12282 {
12283- asm volatile(LOCK_PREFIX "addl %1,%0"
12284+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
12285+
12286+#ifdef CONFIG_PAX_REFCOUNT
12287+ "jno 0f\n"
12288+ LOCK_PREFIX "subl %1,%0\n"
12289+ "int $4\n0:\n"
12290+ _ASM_EXTABLE(0b, 0b)
12291+#endif
12292+
12293+ : "+m" (v->counter)
12294+ : "ir" (i));
12295+}
12296+
12297+/**
12298+ * atomic_add_unchecked - add integer to atomic variable
12299+ * @i: integer value to add
12300+ * @v: pointer of type atomic_unchecked_t
12301+ *
12302+ * Atomically adds @i to @v.
12303+ */
12304+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
12305+{
12306+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
12307 : "+m" (v->counter)
12308 : "ir" (i));
12309 }
12310@@ -60,7 +105,29 @@ static inline void atomic_add(int i, atomic_t *v)
12311 */
12312 static inline void atomic_sub(int i, atomic_t *v)
12313 {
12314- asm volatile(LOCK_PREFIX "subl %1,%0"
12315+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
12316+
12317+#ifdef CONFIG_PAX_REFCOUNT
12318+ "jno 0f\n"
12319+ LOCK_PREFIX "addl %1,%0\n"
12320+ "int $4\n0:\n"
12321+ _ASM_EXTABLE(0b, 0b)
12322+#endif
12323+
12324+ : "+m" (v->counter)
12325+ : "ir" (i));
12326+}
12327+
12328+/**
12329+ * atomic_sub_unchecked - subtract integer from atomic variable
12330+ * @i: integer value to subtract
12331+ * @v: pointer of type atomic_unchecked_t
12332+ *
12333+ * Atomically subtracts @i from @v.
12334+ */
12335+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
12336+{
12337+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
12338 : "+m" (v->counter)
12339 : "ir" (i));
12340 }
12341@@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
12342 {
12343 unsigned char c;
12344
12345- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
12346+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
12347+
12348+#ifdef CONFIG_PAX_REFCOUNT
12349+ "jno 0f\n"
12350+ LOCK_PREFIX "addl %2,%0\n"
12351+ "int $4\n0:\n"
12352+ _ASM_EXTABLE(0b, 0b)
12353+#endif
12354+
12355+ "sete %1\n"
12356 : "+m" (v->counter), "=qm" (c)
12357 : "ir" (i) : "memory");
12358 return c;
12359@@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
12360 */
12361 static inline void atomic_inc(atomic_t *v)
12362 {
12363- asm volatile(LOCK_PREFIX "incl %0"
12364+ asm volatile(LOCK_PREFIX "incl %0\n"
12365+
12366+#ifdef CONFIG_PAX_REFCOUNT
12367+ "jno 0f\n"
12368+ LOCK_PREFIX "decl %0\n"
12369+ "int $4\n0:\n"
12370+ _ASM_EXTABLE(0b, 0b)
12371+#endif
12372+
12373+ : "+m" (v->counter));
12374+}
12375+
12376+/**
12377+ * atomic_inc_unchecked - increment atomic variable
12378+ * @v: pointer of type atomic_unchecked_t
12379+ *
12380+ * Atomically increments @v by 1.
12381+ */
12382+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
12383+{
12384+ asm volatile(LOCK_PREFIX "incl %0\n"
12385 : "+m" (v->counter));
12386 }
12387
12388@@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *v)
12389 */
12390 static inline void atomic_dec(atomic_t *v)
12391 {
12392- asm volatile(LOCK_PREFIX "decl %0"
12393+ asm volatile(LOCK_PREFIX "decl %0\n"
12394+
12395+#ifdef CONFIG_PAX_REFCOUNT
12396+ "jno 0f\n"
12397+ LOCK_PREFIX "incl %0\n"
12398+ "int $4\n0:\n"
12399+ _ASM_EXTABLE(0b, 0b)
12400+#endif
12401+
12402+ : "+m" (v->counter));
12403+}
12404+
12405+/**
12406+ * atomic_dec_unchecked - decrement atomic variable
12407+ * @v: pointer of type atomic_unchecked_t
12408+ *
12409+ * Atomically decrements @v by 1.
12410+ */
12411+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
12412+{
12413+ asm volatile(LOCK_PREFIX "decl %0\n"
12414 : "+m" (v->counter));
12415 }
12416
12417@@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
12418 {
12419 unsigned char c;
12420
12421- asm volatile(LOCK_PREFIX "decl %0; sete %1"
12422+ asm volatile(LOCK_PREFIX "decl %0\n"
12423+
12424+#ifdef CONFIG_PAX_REFCOUNT
12425+ "jno 0f\n"
12426+ LOCK_PREFIX "incl %0\n"
12427+ "int $4\n0:\n"
12428+ _ASM_EXTABLE(0b, 0b)
12429+#endif
12430+
12431+ "sete %1\n"
12432 : "+m" (v->counter), "=qm" (c)
12433 : : "memory");
12434 return c != 0;
12435@@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
12436 {
12437 unsigned char c;
12438
12439- asm volatile(LOCK_PREFIX "incl %0; sete %1"
12440+ asm volatile(LOCK_PREFIX "incl %0\n"
12441+
12442+#ifdef CONFIG_PAX_REFCOUNT
12443+ "jno 0f\n"
12444+ LOCK_PREFIX "decl %0\n"
12445+ "int $4\n0:\n"
12446+ _ASM_EXTABLE(0b, 0b)
12447+#endif
12448+
12449+ "sete %1\n"
12450+ : "+m" (v->counter), "=qm" (c)
12451+ : : "memory");
12452+ return c != 0;
12453+}
12454+
12455+/**
12456+ * atomic_inc_and_test_unchecked - increment and test
12457+ * @v: pointer of type atomic_unchecked_t
12458+ *
12459+ * Atomically increments @v by 1
12460+ * and returns true if the result is zero, or false for all
12461+ * other cases.
12462+ */
12463+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
12464+{
12465+ unsigned char c;
12466+
12467+ asm volatile(LOCK_PREFIX "incl %0\n"
12468+ "sete %1\n"
12469 : "+m" (v->counter), "=qm" (c)
12470 : : "memory");
12471 return c != 0;
12472@@ -157,7 +310,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
12473 {
12474 unsigned char c;
12475
12476- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
12477+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
12478+
12479+#ifdef CONFIG_PAX_REFCOUNT
12480+ "jno 0f\n"
12481+ LOCK_PREFIX "subl %2,%0\n"
12482+ "int $4\n0:\n"
12483+ _ASM_EXTABLE(0b, 0b)
12484+#endif
12485+
12486+ "sets %1\n"
12487 : "+m" (v->counter), "=qm" (c)
12488 : "ir" (i) : "memory");
12489 return c;
12490@@ -172,6 +334,18 @@ static inline int atomic_add_negative(int i, atomic_t *v)
12491 */
12492 static inline int atomic_add_return(int i, atomic_t *v)
12493 {
12494+ return i + xadd_check_overflow(&v->counter, i);
12495+}
12496+
12497+/**
12498+ * atomic_add_return_unchecked - add integer and return
12499+ * @i: integer value to add
12500+ * @v: pointer of type atomic_unchecked_t
12501+ *
12502+ * Atomically adds @i to @v and returns @i + @v
12503+ */
12504+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
12505+{
12506 return i + xadd(&v->counter, i);
12507 }
12508
12509@@ -188,6 +362,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
12510 }
12511
12512 #define atomic_inc_return(v) (atomic_add_return(1, v))
12513+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
12514+{
12515+ return atomic_add_return_unchecked(1, v);
12516+}
12517 #define atomic_dec_return(v) (atomic_sub_return(1, v))
12518
12519 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
12520@@ -195,11 +373,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
12521 return cmpxchg(&v->counter, old, new);
12522 }
12523
12524+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
12525+{
12526+ return cmpxchg(&v->counter, old, new);
12527+}
12528+
12529 static inline int atomic_xchg(atomic_t *v, int new)
12530 {
12531 return xchg(&v->counter, new);
12532 }
12533
12534+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
12535+{
12536+ return xchg(&v->counter, new);
12537+}
12538+
12539 /**
12540 * __atomic_add_unless - add unless the number is already a given value
12541 * @v: pointer of type atomic_t
12542@@ -211,12 +399,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
12543 */
12544 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
12545 {
12546- int c, old;
12547+ int c, old, new;
12548 c = atomic_read(v);
12549 for (;;) {
12550- if (unlikely(c == (u)))
12551+ if (unlikely(c == u))
12552 break;
12553- old = atomic_cmpxchg((v), c, c + (a));
12554+
12555+ asm volatile("addl %2,%0\n"
12556+
12557+#ifdef CONFIG_PAX_REFCOUNT
12558+ "jno 0f\n"
12559+ "subl %2,%0\n"
12560+ "int $4\n0:\n"
12561+ _ASM_EXTABLE(0b, 0b)
12562+#endif
12563+
12564+ : "=r" (new)
12565+ : "0" (c), "ir" (a));
12566+
12567+ old = atomic_cmpxchg(v, c, new);
12568 if (likely(old == c))
12569 break;
12570 c = old;
12571@@ -225,6 +426,49 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
12572 }
12573
12574 /**
12575+ * atomic_inc_not_zero_hint - increment if not null
12576+ * @v: pointer of type atomic_t
12577+ * @hint: probable value of the atomic before the increment
12578+ *
12579+ * This version of atomic_inc_not_zero() gives a hint of probable
12580+ * value of the atomic. This helps processor to not read the memory
12581+ * before doing the atomic read/modify/write cycle, lowering
12582+ * number of bus transactions on some arches.
12583+ *
12584+ * Returns: 0 if increment was not done, 1 otherwise.
12585+ */
12586+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
12587+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
12588+{
12589+ int val, c = hint, new;
12590+
12591+ /* sanity test, should be removed by compiler if hint is a constant */
12592+ if (!hint)
12593+ return __atomic_add_unless(v, 1, 0);
12594+
12595+ do {
12596+ asm volatile("incl %0\n"
12597+
12598+#ifdef CONFIG_PAX_REFCOUNT
12599+ "jno 0f\n"
12600+ "decl %0\n"
12601+ "int $4\n0:\n"
12602+ _ASM_EXTABLE(0b, 0b)
12603+#endif
12604+
12605+ : "=r" (new)
12606+ : "0" (c));
12607+
12608+ val = atomic_cmpxchg(v, c, new);
12609+ if (val == c)
12610+ return 1;
12611+ c = val;
12612+ } while (c);
12613+
12614+ return 0;
12615+}
12616+
12617+/**
12618 * atomic_inc_short - increment of a short integer
12619 * @v: pointer to type int
12620 *
12621@@ -253,14 +497,37 @@ static inline void atomic_or_long(unsigned long *v1, unsigned long v2)
12622 #endif
12623
12624 /* These are x86-specific, used by some header files */
12625-#define atomic_clear_mask(mask, addr) \
12626- asm volatile(LOCK_PREFIX "andl %0,%1" \
12627- : : "r" (~(mask)), "m" (*(addr)) : "memory")
12628+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
12629+{
12630+ asm volatile(LOCK_PREFIX "andl %1,%0"
12631+ : "+m" (v->counter)
12632+ : "r" (~(mask))
12633+ : "memory");
12634+}
12635
12636-#define atomic_set_mask(mask, addr) \
12637- asm volatile(LOCK_PREFIX "orl %0,%1" \
12638- : : "r" ((unsigned)(mask)), "m" (*(addr)) \
12639- : "memory")
12640+static inline void atomic_clear_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
12641+{
12642+ asm volatile(LOCK_PREFIX "andl %1,%0"
12643+ : "+m" (v->counter)
12644+ : "r" (~(mask))
12645+ : "memory");
12646+}
12647+
12648+static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
12649+{
12650+ asm volatile(LOCK_PREFIX "orl %1,%0"
12651+ : "+m" (v->counter)
12652+ : "r" (mask)
12653+ : "memory");
12654+}
12655+
12656+static inline void atomic_set_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
12657+{
12658+ asm volatile(LOCK_PREFIX "orl %1,%0"
12659+ : "+m" (v->counter)
12660+ : "r" (mask)
12661+ : "memory");
12662+}
12663
12664 /* Atomic operations are already serializing on x86 */
12665 #define smp_mb__before_atomic_dec() barrier()
12666diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
12667index b154de7..aadebd8 100644
12668--- a/arch/x86/include/asm/atomic64_32.h
12669+++ b/arch/x86/include/asm/atomic64_32.h
12670@@ -12,6 +12,14 @@ typedef struct {
12671 u64 __aligned(8) counter;
12672 } atomic64_t;
12673
12674+#ifdef CONFIG_PAX_REFCOUNT
12675+typedef struct {
12676+ u64 __aligned(8) counter;
12677+} atomic64_unchecked_t;
12678+#else
12679+typedef atomic64_t atomic64_unchecked_t;
12680+#endif
12681+
12682 #define ATOMIC64_INIT(val) { (val) }
12683
12684 #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
12685@@ -37,21 +45,31 @@ typedef struct {
12686 ATOMIC64_DECL_ONE(sym##_386)
12687
12688 ATOMIC64_DECL_ONE(add_386);
12689+ATOMIC64_DECL_ONE(add_unchecked_386);
12690 ATOMIC64_DECL_ONE(sub_386);
12691+ATOMIC64_DECL_ONE(sub_unchecked_386);
12692 ATOMIC64_DECL_ONE(inc_386);
12693+ATOMIC64_DECL_ONE(inc_unchecked_386);
12694 ATOMIC64_DECL_ONE(dec_386);
12695+ATOMIC64_DECL_ONE(dec_unchecked_386);
12696 #endif
12697
12698 #define alternative_atomic64(f, out, in...) \
12699 __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
12700
12701 ATOMIC64_DECL(read);
12702+ATOMIC64_DECL(read_unchecked);
12703 ATOMIC64_DECL(set);
12704+ATOMIC64_DECL(set_unchecked);
12705 ATOMIC64_DECL(xchg);
12706 ATOMIC64_DECL(add_return);
12707+ATOMIC64_DECL(add_return_unchecked);
12708 ATOMIC64_DECL(sub_return);
12709+ATOMIC64_DECL(sub_return_unchecked);
12710 ATOMIC64_DECL(inc_return);
12711+ATOMIC64_DECL(inc_return_unchecked);
12712 ATOMIC64_DECL(dec_return);
12713+ATOMIC64_DECL(dec_return_unchecked);
12714 ATOMIC64_DECL(dec_if_positive);
12715 ATOMIC64_DECL(inc_not_zero);
12716 ATOMIC64_DECL(add_unless);
12717@@ -77,6 +95,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
12718 }
12719
12720 /**
12721+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
12722+ * @p: pointer to type atomic64_unchecked_t
12723+ * @o: expected value
12724+ * @n: new value
12725+ *
12726+ * Atomically sets @v to @n if it was equal to @o and returns
12727+ * the old value.
12728+ */
12729+
12730+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
12731+{
12732+ return cmpxchg64(&v->counter, o, n);
12733+}
12734+
12735+/**
12736 * atomic64_xchg - xchg atomic64 variable
12737 * @v: pointer to type atomic64_t
12738 * @n: value to assign
12739@@ -112,6 +145,22 @@ static inline void atomic64_set(atomic64_t *v, long long i)
12740 }
12741
12742 /**
12743+ * atomic64_set_unchecked - set atomic64 variable
12744+ * @v: pointer to type atomic64_unchecked_t
12745+ * @n: value to assign
12746+ *
12747+ * Atomically sets the value of @v to @n.
12748+ */
12749+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
12750+{
12751+ unsigned high = (unsigned)(i >> 32);
12752+ unsigned low = (unsigned)i;
12753+ alternative_atomic64(set, /* no output */,
12754+ "S" (v), "b" (low), "c" (high)
12755+ : "eax", "edx", "memory");
12756+}
12757+
12758+/**
12759 * atomic64_read - read atomic64 variable
12760 * @v: pointer to type atomic64_t
12761 *
12762@@ -125,6 +174,19 @@ static inline long long atomic64_read(const atomic64_t *v)
12763 }
12764
12765 /**
12766+ * atomic64_read_unchecked - read atomic64 variable
12767+ * @v: pointer to type atomic64_unchecked_t
12768+ *
12769+ * Atomically reads the value of @v and returns it.
12770+ */
12771+static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
12772+{
12773+ long long r;
12774+ alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
12775+ return r;
12776+ }
12777+
12778+/**
12779 * atomic64_add_return - add and return
12780 * @i: integer value to add
12781 * @v: pointer to type atomic64_t
12782@@ -139,6 +201,21 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
12783 return i;
12784 }
12785
12786+/**
12787+ * atomic64_add_return_unchecked - add and return
12788+ * @i: integer value to add
12789+ * @v: pointer to type atomic64_unchecked_t
12790+ *
12791+ * Atomically adds @i to @v and returns @i + *@v
12792+ */
12793+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
12794+{
12795+ alternative_atomic64(add_return_unchecked,
12796+ ASM_OUTPUT2("+A" (i), "+c" (v)),
12797+ ASM_NO_INPUT_CLOBBER("memory"));
12798+ return i;
12799+}
12800+
12801 /*
12802 * Other variants with different arithmetic operators:
12803 */
12804@@ -158,6 +235,14 @@ static inline long long atomic64_inc_return(atomic64_t *v)
12805 return a;
12806 }
12807
12808+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
12809+{
12810+ long long a;
12811+ alternative_atomic64(inc_return_unchecked, "=&A" (a),
12812+ "S" (v) : "memory", "ecx");
12813+ return a;
12814+}
12815+
12816 static inline long long atomic64_dec_return(atomic64_t *v)
12817 {
12818 long long a;
12819@@ -182,6 +267,21 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
12820 }
12821
12822 /**
12823+ * atomic64_add_unchecked - add integer to atomic64 variable
12824+ * @i: integer value to add
12825+ * @v: pointer to type atomic64_unchecked_t
12826+ *
12827+ * Atomically adds @i to @v.
12828+ */
12829+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
12830+{
12831+ __alternative_atomic64(add_unchecked, add_return_unchecked,
12832+ ASM_OUTPUT2("+A" (i), "+c" (v)),
12833+ ASM_NO_INPUT_CLOBBER("memory"));
12834+ return i;
12835+}
12836+
12837+/**
12838 * atomic64_sub - subtract the atomic64 variable
12839 * @i: integer value to subtract
12840 * @v: pointer to type atomic64_t
12841diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
12842index 0e1cbfc..5623683 100644
12843--- a/arch/x86/include/asm/atomic64_64.h
12844+++ b/arch/x86/include/asm/atomic64_64.h
12845@@ -18,7 +18,19 @@
12846 */
12847 static inline long atomic64_read(const atomic64_t *v)
12848 {
12849- return (*(volatile long *)&(v)->counter);
12850+ return (*(volatile const long *)&(v)->counter);
12851+}
12852+
12853+/**
12854+ * atomic64_read_unchecked - read atomic64 variable
12855+ * @v: pointer of type atomic64_unchecked_t
12856+ *
12857+ * Atomically reads the value of @v.
12858+ * Doesn't imply a read memory barrier.
12859+ */
12860+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
12861+{
12862+ return (*(volatile const long *)&(v)->counter);
12863 }
12864
12865 /**
12866@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
12867 }
12868
12869 /**
12870+ * atomic64_set_unchecked - set atomic64 variable
12871+ * @v: pointer to type atomic64_unchecked_t
12872+ * @i: required value
12873+ *
12874+ * Atomically sets the value of @v to @i.
12875+ */
12876+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
12877+{
12878+ v->counter = i;
12879+}
12880+
12881+/**
12882 * atomic64_add - add integer to atomic64 variable
12883 * @i: integer value to add
12884 * @v: pointer to type atomic64_t
12885@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
12886 */
12887 static inline void atomic64_add(long i, atomic64_t *v)
12888 {
12889+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
12890+
12891+#ifdef CONFIG_PAX_REFCOUNT
12892+ "jno 0f\n"
12893+ LOCK_PREFIX "subq %1,%0\n"
12894+ "int $4\n0:\n"
12895+ _ASM_EXTABLE(0b, 0b)
12896+#endif
12897+
12898+ : "=m" (v->counter)
12899+ : "er" (i), "m" (v->counter));
12900+}
12901+
12902+/**
12903+ * atomic64_add_unchecked - add integer to atomic64 variable
12904+ * @i: integer value to add
12905+ * @v: pointer to type atomic64_unchecked_t
12906+ *
12907+ * Atomically adds @i to @v.
12908+ */
12909+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
12910+{
12911 asm volatile(LOCK_PREFIX "addq %1,%0"
12912 : "=m" (v->counter)
12913 : "er" (i), "m" (v->counter));
12914@@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
12915 */
12916 static inline void atomic64_sub(long i, atomic64_t *v)
12917 {
12918- asm volatile(LOCK_PREFIX "subq %1,%0"
12919+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
12920+
12921+#ifdef CONFIG_PAX_REFCOUNT
12922+ "jno 0f\n"
12923+ LOCK_PREFIX "addq %1,%0\n"
12924+ "int $4\n0:\n"
12925+ _ASM_EXTABLE(0b, 0b)
12926+#endif
12927+
12928+ : "=m" (v->counter)
12929+ : "er" (i), "m" (v->counter));
12930+}
12931+
12932+/**
12933+ * atomic64_sub_unchecked - subtract the atomic64 variable
12934+ * @i: integer value to subtract
12935+ * @v: pointer to type atomic64_unchecked_t
12936+ *
12937+ * Atomically subtracts @i from @v.
12938+ */
12939+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
12940+{
12941+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
12942 : "=m" (v->counter)
12943 : "er" (i), "m" (v->counter));
12944 }
12945@@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
12946 {
12947 unsigned char c;
12948
12949- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
12950+ asm volatile(LOCK_PREFIX "subq %2,%0\n"
12951+
12952+#ifdef CONFIG_PAX_REFCOUNT
12953+ "jno 0f\n"
12954+ LOCK_PREFIX "addq %2,%0\n"
12955+ "int $4\n0:\n"
12956+ _ASM_EXTABLE(0b, 0b)
12957+#endif
12958+
12959+ "sete %1\n"
12960 : "=m" (v->counter), "=qm" (c)
12961 : "er" (i), "m" (v->counter) : "memory");
12962 return c;
12963@@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
12964 */
12965 static inline void atomic64_inc(atomic64_t *v)
12966 {
12967+ asm volatile(LOCK_PREFIX "incq %0\n"
12968+
12969+#ifdef CONFIG_PAX_REFCOUNT
12970+ "jno 0f\n"
12971+ LOCK_PREFIX "decq %0\n"
12972+ "int $4\n0:\n"
12973+ _ASM_EXTABLE(0b, 0b)
12974+#endif
12975+
12976+ : "=m" (v->counter)
12977+ : "m" (v->counter));
12978+}
12979+
12980+/**
12981+ * atomic64_inc_unchecked - increment atomic64 variable
12982+ * @v: pointer to type atomic64_unchecked_t
12983+ *
12984+ * Atomically increments @v by 1.
12985+ */
12986+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
12987+{
12988 asm volatile(LOCK_PREFIX "incq %0"
12989 : "=m" (v->counter)
12990 : "m" (v->counter));
12991@@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64_t *v)
12992 */
12993 static inline void atomic64_dec(atomic64_t *v)
12994 {
12995- asm volatile(LOCK_PREFIX "decq %0"
12996+ asm volatile(LOCK_PREFIX "decq %0\n"
12997+
12998+#ifdef CONFIG_PAX_REFCOUNT
12999+ "jno 0f\n"
13000+ LOCK_PREFIX "incq %0\n"
13001+ "int $4\n0:\n"
13002+ _ASM_EXTABLE(0b, 0b)
13003+#endif
13004+
13005+ : "=m" (v->counter)
13006+ : "m" (v->counter));
13007+}
13008+
13009+/**
13010+ * atomic64_dec_unchecked - decrement atomic64 variable
13011+ * @v: pointer to type atomic64_t
13012+ *
13013+ * Atomically decrements @v by 1.
13014+ */
13015+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
13016+{
13017+ asm volatile(LOCK_PREFIX "decq %0\n"
13018 : "=m" (v->counter)
13019 : "m" (v->counter));
13020 }
13021@@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
13022 {
13023 unsigned char c;
13024
13025- asm volatile(LOCK_PREFIX "decq %0; sete %1"
13026+ asm volatile(LOCK_PREFIX "decq %0\n"
13027+
13028+#ifdef CONFIG_PAX_REFCOUNT
13029+ "jno 0f\n"
13030+ LOCK_PREFIX "incq %0\n"
13031+ "int $4\n0:\n"
13032+ _ASM_EXTABLE(0b, 0b)
13033+#endif
13034+
13035+ "sete %1\n"
13036 : "=m" (v->counter), "=qm" (c)
13037 : "m" (v->counter) : "memory");
13038 return c != 0;
13039@@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
13040 {
13041 unsigned char c;
13042
13043- asm volatile(LOCK_PREFIX "incq %0; sete %1"
13044+ asm volatile(LOCK_PREFIX "incq %0\n"
13045+
13046+#ifdef CONFIG_PAX_REFCOUNT
13047+ "jno 0f\n"
13048+ LOCK_PREFIX "decq %0\n"
13049+ "int $4\n0:\n"
13050+ _ASM_EXTABLE(0b, 0b)
13051+#endif
13052+
13053+ "sete %1\n"
13054 : "=m" (v->counter), "=qm" (c)
13055 : "m" (v->counter) : "memory");
13056 return c != 0;
13057@@ -155,7 +292,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
13058 {
13059 unsigned char c;
13060
13061- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
13062+ asm volatile(LOCK_PREFIX "addq %2,%0\n"
13063+
13064+#ifdef CONFIG_PAX_REFCOUNT
13065+ "jno 0f\n"
13066+ LOCK_PREFIX "subq %2,%0\n"
13067+ "int $4\n0:\n"
13068+ _ASM_EXTABLE(0b, 0b)
13069+#endif
13070+
13071+ "sets %1\n"
13072 : "=m" (v->counter), "=qm" (c)
13073 : "er" (i), "m" (v->counter) : "memory");
13074 return c;
13075@@ -170,6 +316,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
13076 */
13077 static inline long atomic64_add_return(long i, atomic64_t *v)
13078 {
13079+ return i + xadd_check_overflow(&v->counter, i);
13080+}
13081+
13082+/**
13083+ * atomic64_add_return_unchecked - add and return
13084+ * @i: integer value to add
13085+ * @v: pointer to type atomic64_unchecked_t
13086+ *
13087+ * Atomically adds @i to @v and returns @i + @v
13088+ */
13089+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
13090+{
13091 return i + xadd(&v->counter, i);
13092 }
13093
13094@@ -179,6 +337,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
13095 }
13096
13097 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
13098+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
13099+{
13100+ return atomic64_add_return_unchecked(1, v);
13101+}
13102 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
13103
13104 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
13105@@ -186,6 +348,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
13106 return cmpxchg(&v->counter, old, new);
13107 }
13108
13109+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
13110+{
13111+ return cmpxchg(&v->counter, old, new);
13112+}
13113+
13114 static inline long atomic64_xchg(atomic64_t *v, long new)
13115 {
13116 return xchg(&v->counter, new);
13117@@ -202,17 +369,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
13118 */
13119 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
13120 {
13121- long c, old;
13122+ long c, old, new;
13123 c = atomic64_read(v);
13124 for (;;) {
13125- if (unlikely(c == (u)))
13126+ if (unlikely(c == u))
13127 break;
13128- old = atomic64_cmpxchg((v), c, c + (a));
13129+
13130+ asm volatile("add %2,%0\n"
13131+
13132+#ifdef CONFIG_PAX_REFCOUNT
13133+ "jno 0f\n"
13134+ "sub %2,%0\n"
13135+ "int $4\n0:\n"
13136+ _ASM_EXTABLE(0b, 0b)
13137+#endif
13138+
13139+ : "=r" (new)
13140+ : "0" (c), "ir" (a));
13141+
13142+ old = atomic64_cmpxchg(v, c, new);
13143 if (likely(old == c))
13144 break;
13145 c = old;
13146 }
13147- return c != (u);
13148+ return c != u;
13149 }
13150
13151 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
13152diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
13153index 6dfd019..28e188d 100644
13154--- a/arch/x86/include/asm/bitops.h
13155+++ b/arch/x86/include/asm/bitops.h
13156@@ -40,7 +40,7 @@
13157 * a mask operation on a byte.
13158 */
13159 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
13160-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
13161+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
13162 #define CONST_MASK(nr) (1 << ((nr) & 7))
13163
13164 /**
13165@@ -486,7 +486,7 @@ static inline int fls(int x)
13166 * at position 64.
13167 */
13168 #ifdef CONFIG_X86_64
13169-static __always_inline int fls64(__u64 x)
13170+static __always_inline long fls64(__u64 x)
13171 {
13172 int bitpos = -1;
13173 /*
13174diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
13175index 4fa687a..60f2d39 100644
13176--- a/arch/x86/include/asm/boot.h
13177+++ b/arch/x86/include/asm/boot.h
13178@@ -6,10 +6,15 @@
13179 #include <uapi/asm/boot.h>
13180
13181 /* Physical address where kernel should be loaded. */
13182-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
13183+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
13184 + (CONFIG_PHYSICAL_ALIGN - 1)) \
13185 & ~(CONFIG_PHYSICAL_ALIGN - 1))
13186
13187+#ifndef __ASSEMBLY__
13188+extern unsigned char __LOAD_PHYSICAL_ADDR[];
13189+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
13190+#endif
13191+
13192 /* Minimum kernel alignment, as a power of two */
13193 #ifdef CONFIG_X86_64
13194 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
13195diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
13196index 48f99f1..d78ebf9 100644
13197--- a/arch/x86/include/asm/cache.h
13198+++ b/arch/x86/include/asm/cache.h
13199@@ -5,12 +5,13 @@
13200
13201 /* L1 cache line size */
13202 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
13203-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
13204+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
13205
13206 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
13207+#define __read_only __attribute__((__section__(".data..read_only")))
13208
13209 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
13210-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
13211+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
13212
13213 #ifdef CONFIG_X86_VSMP
13214 #ifdef CONFIG_SMP
13215diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
13216index 9863ee3..4a1f8e1 100644
13217--- a/arch/x86/include/asm/cacheflush.h
13218+++ b/arch/x86/include/asm/cacheflush.h
13219@@ -27,7 +27,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
13220 unsigned long pg_flags = pg->flags & _PGMT_MASK;
13221
13222 if (pg_flags == _PGMT_DEFAULT)
13223- return -1;
13224+ return ~0UL;
13225 else if (pg_flags == _PGMT_WC)
13226 return _PAGE_CACHE_WC;
13227 else if (pg_flags == _PGMT_UC_MINUS)
13228diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
13229index 46fc474..b02b0f9 100644
13230--- a/arch/x86/include/asm/checksum_32.h
13231+++ b/arch/x86/include/asm/checksum_32.h
13232@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
13233 int len, __wsum sum,
13234 int *src_err_ptr, int *dst_err_ptr);
13235
13236+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
13237+ int len, __wsum sum,
13238+ int *src_err_ptr, int *dst_err_ptr);
13239+
13240+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
13241+ int len, __wsum sum,
13242+ int *src_err_ptr, int *dst_err_ptr);
13243+
13244 /*
13245 * Note: when you get a NULL pointer exception here this means someone
13246 * passed in an incorrect kernel address to one of these functions.
13247@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
13248 int *err_ptr)
13249 {
13250 might_sleep();
13251- return csum_partial_copy_generic((__force void *)src, dst,
13252+ return csum_partial_copy_generic_from_user((__force void *)src, dst,
13253 len, sum, err_ptr, NULL);
13254 }
13255
13256@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
13257 {
13258 might_sleep();
13259 if (access_ok(VERIFY_WRITE, dst, len))
13260- return csum_partial_copy_generic(src, (__force void *)dst,
13261+ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
13262 len, sum, NULL, err_ptr);
13263
13264 if (len)
13265diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
13266index 8d871ea..c1a0dc9 100644
13267--- a/arch/x86/include/asm/cmpxchg.h
13268+++ b/arch/x86/include/asm/cmpxchg.h
13269@@ -14,8 +14,12 @@ extern void __cmpxchg_wrong_size(void)
13270 __compiletime_error("Bad argument size for cmpxchg");
13271 extern void __xadd_wrong_size(void)
13272 __compiletime_error("Bad argument size for xadd");
13273+extern void __xadd_check_overflow_wrong_size(void)
13274+ __compiletime_error("Bad argument size for xadd_check_overflow");
13275 extern void __add_wrong_size(void)
13276 __compiletime_error("Bad argument size for add");
13277+extern void __add_check_overflow_wrong_size(void)
13278+ __compiletime_error("Bad argument size for add_check_overflow");
13279
13280 /*
13281 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
13282@@ -67,6 +71,34 @@ extern void __add_wrong_size(void)
13283 __ret; \
13284 })
13285
13286+#define __xchg_op_check_overflow(ptr, arg, op, lock) \
13287+ ({ \
13288+ __typeof__ (*(ptr)) __ret = (arg); \
13289+ switch (sizeof(*(ptr))) { \
13290+ case __X86_CASE_L: \
13291+ asm volatile (lock #op "l %0, %1\n" \
13292+ "jno 0f\n" \
13293+ "mov %0,%1\n" \
13294+ "int $4\n0:\n" \
13295+ _ASM_EXTABLE(0b, 0b) \
13296+ : "+r" (__ret), "+m" (*(ptr)) \
13297+ : : "memory", "cc"); \
13298+ break; \
13299+ case __X86_CASE_Q: \
13300+ asm volatile (lock #op "q %q0, %1\n" \
13301+ "jno 0f\n" \
13302+ "mov %0,%1\n" \
13303+ "int $4\n0:\n" \
13304+ _ASM_EXTABLE(0b, 0b) \
13305+ : "+r" (__ret), "+m" (*(ptr)) \
13306+ : : "memory", "cc"); \
13307+ break; \
13308+ default: \
13309+ __ ## op ## _check_overflow_wrong_size(); \
13310+ } \
13311+ __ret; \
13312+ })
13313+
13314 /*
13315 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
13316 * Since this is generally used to protect other memory information, we
13317@@ -167,6 +199,9 @@ extern void __add_wrong_size(void)
13318 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
13319 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
13320
13321+#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock)
13322+#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
13323+
13324 #define __add(ptr, inc, lock) \
13325 ({ \
13326 __typeof__ (*(ptr)) __ret = (inc); \
13327diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
13328index 59c6c40..5e0b22c 100644
13329--- a/arch/x86/include/asm/compat.h
13330+++ b/arch/x86/include/asm/compat.h
13331@@ -41,7 +41,7 @@ typedef s64 __attribute__((aligned(4))) compat_s64;
13332 typedef u32 compat_uint_t;
13333 typedef u32 compat_ulong_t;
13334 typedef u64 __attribute__((aligned(4))) compat_u64;
13335-typedef u32 compat_uptr_t;
13336+typedef u32 __user compat_uptr_t;
13337
13338 struct compat_timespec {
13339 compat_time_t tv_sec;
13340diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
13341index 2d9075e..b75a844 100644
13342--- a/arch/x86/include/asm/cpufeature.h
13343+++ b/arch/x86/include/asm/cpufeature.h
13344@@ -206,7 +206,7 @@
13345 #define X86_FEATURE_BMI1 (9*32+ 3) /* 1st group bit manipulation extensions */
13346 #define X86_FEATURE_HLE (9*32+ 4) /* Hardware Lock Elision */
13347 #define X86_FEATURE_AVX2 (9*32+ 5) /* AVX2 instructions */
13348-#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Protection */
13349+#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Prevention */
13350 #define X86_FEATURE_BMI2 (9*32+ 8) /* 2nd group bit manipulation extensions */
13351 #define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */
13352 #define X86_FEATURE_INVPCID (9*32+10) /* Invalidate Processor Context ID */
13353@@ -375,7 +375,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
13354 ".section .discard,\"aw\",@progbits\n"
13355 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
13356 ".previous\n"
13357- ".section .altinstr_replacement,\"ax\"\n"
13358+ ".section .altinstr_replacement,\"a\"\n"
13359 "3: movb $1,%0\n"
13360 "4:\n"
13361 ".previous\n"
13362diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
13363index 8bf1c06..b6ae785 100644
13364--- a/arch/x86/include/asm/desc.h
13365+++ b/arch/x86/include/asm/desc.h
13366@@ -4,6 +4,7 @@
13367 #include <asm/desc_defs.h>
13368 #include <asm/ldt.h>
13369 #include <asm/mmu.h>
13370+#include <asm/pgtable.h>
13371
13372 #include <linux/smp.h>
13373 #include <linux/percpu.h>
13374@@ -17,6 +18,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
13375
13376 desc->type = (info->read_exec_only ^ 1) << 1;
13377 desc->type |= info->contents << 2;
13378+ desc->type |= info->seg_not_present ^ 1;
13379
13380 desc->s = 1;
13381 desc->dpl = 0x3;
13382@@ -35,19 +37,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
13383 }
13384
13385 extern struct desc_ptr idt_descr;
13386-extern gate_desc idt_table[];
13387 extern struct desc_ptr nmi_idt_descr;
13388-extern gate_desc nmi_idt_table[];
13389-
13390-struct gdt_page {
13391- struct desc_struct gdt[GDT_ENTRIES];
13392-} __attribute__((aligned(PAGE_SIZE)));
13393-
13394-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
13395+extern gate_desc idt_table[256];
13396+extern gate_desc nmi_idt_table[256];
13397
13398+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
13399 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
13400 {
13401- return per_cpu(gdt_page, cpu).gdt;
13402+ return cpu_gdt_table[cpu];
13403 }
13404
13405 #ifdef CONFIG_X86_64
13406@@ -72,8 +69,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
13407 unsigned long base, unsigned dpl, unsigned flags,
13408 unsigned short seg)
13409 {
13410- gate->a = (seg << 16) | (base & 0xffff);
13411- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
13412+ gate->gate.offset_low = base;
13413+ gate->gate.seg = seg;
13414+ gate->gate.reserved = 0;
13415+ gate->gate.type = type;
13416+ gate->gate.s = 0;
13417+ gate->gate.dpl = dpl;
13418+ gate->gate.p = 1;
13419+ gate->gate.offset_high = base >> 16;
13420 }
13421
13422 #endif
13423@@ -118,12 +121,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
13424
13425 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
13426 {
13427+ pax_open_kernel();
13428 memcpy(&idt[entry], gate, sizeof(*gate));
13429+ pax_close_kernel();
13430 }
13431
13432 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
13433 {
13434+ pax_open_kernel();
13435 memcpy(&ldt[entry], desc, 8);
13436+ pax_close_kernel();
13437 }
13438
13439 static inline void
13440@@ -137,7 +144,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
13441 default: size = sizeof(*gdt); break;
13442 }
13443
13444+ pax_open_kernel();
13445 memcpy(&gdt[entry], desc, size);
13446+ pax_close_kernel();
13447 }
13448
13449 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
13450@@ -210,7 +219,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
13451
13452 static inline void native_load_tr_desc(void)
13453 {
13454+ pax_open_kernel();
13455 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
13456+ pax_close_kernel();
13457 }
13458
13459 static inline void native_load_gdt(const struct desc_ptr *dtr)
13460@@ -247,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
13461 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
13462 unsigned int i;
13463
13464+ pax_open_kernel();
13465 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
13466 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
13467+ pax_close_kernel();
13468 }
13469
13470 #define _LDT_empty(info) \
13471@@ -287,7 +300,7 @@ static inline void load_LDT(mm_context_t *pc)
13472 preempt_enable();
13473 }
13474
13475-static inline unsigned long get_desc_base(const struct desc_struct *desc)
13476+static inline unsigned long __intentional_overflow(-1) get_desc_base(const struct desc_struct *desc)
13477 {
13478 return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
13479 }
13480@@ -311,7 +324,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
13481 }
13482
13483 #ifdef CONFIG_X86_64
13484-static inline void set_nmi_gate(int gate, void *addr)
13485+static inline void set_nmi_gate(int gate, const void *addr)
13486 {
13487 gate_desc s;
13488
13489@@ -320,7 +333,7 @@ static inline void set_nmi_gate(int gate, void *addr)
13490 }
13491 #endif
13492
13493-static inline void _set_gate(int gate, unsigned type, void *addr,
13494+static inline void _set_gate(int gate, unsigned type, const void *addr,
13495 unsigned dpl, unsigned ist, unsigned seg)
13496 {
13497 gate_desc s;
13498@@ -339,7 +352,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
13499 * Pentium F0 0F bugfix can have resulted in the mapped
13500 * IDT being write-protected.
13501 */
13502-static inline void set_intr_gate(unsigned int n, void *addr)
13503+static inline void set_intr_gate(unsigned int n, const void *addr)
13504 {
13505 BUG_ON((unsigned)n > 0xFF);
13506 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
13507@@ -369,19 +382,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
13508 /*
13509 * This routine sets up an interrupt gate at directory privilege level 3.
13510 */
13511-static inline void set_system_intr_gate(unsigned int n, void *addr)
13512+static inline void set_system_intr_gate(unsigned int n, const void *addr)
13513 {
13514 BUG_ON((unsigned)n > 0xFF);
13515 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
13516 }
13517
13518-static inline void set_system_trap_gate(unsigned int n, void *addr)
13519+static inline void set_system_trap_gate(unsigned int n, const void *addr)
13520 {
13521 BUG_ON((unsigned)n > 0xFF);
13522 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
13523 }
13524
13525-static inline void set_trap_gate(unsigned int n, void *addr)
13526+static inline void set_trap_gate(unsigned int n, const void *addr)
13527 {
13528 BUG_ON((unsigned)n > 0xFF);
13529 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
13530@@ -390,19 +403,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
13531 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
13532 {
13533 BUG_ON((unsigned)n > 0xFF);
13534- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
13535+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
13536 }
13537
13538-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
13539+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
13540 {
13541 BUG_ON((unsigned)n > 0xFF);
13542 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
13543 }
13544
13545-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
13546+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
13547 {
13548 BUG_ON((unsigned)n > 0xFF);
13549 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
13550 }
13551
13552+#ifdef CONFIG_X86_32
13553+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
13554+{
13555+ struct desc_struct d;
13556+
13557+ if (likely(limit))
13558+ limit = (limit - 1UL) >> PAGE_SHIFT;
13559+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
13560+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
13561+}
13562+#endif
13563+
13564 #endif /* _ASM_X86_DESC_H */
13565diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
13566index 278441f..b95a174 100644
13567--- a/arch/x86/include/asm/desc_defs.h
13568+++ b/arch/x86/include/asm/desc_defs.h
13569@@ -31,6 +31,12 @@ struct desc_struct {
13570 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
13571 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
13572 };
13573+ struct {
13574+ u16 offset_low;
13575+ u16 seg;
13576+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
13577+ unsigned offset_high: 16;
13578+ } gate;
13579 };
13580 } __attribute__((packed));
13581
13582diff --git a/arch/x86/include/asm/div64.h b/arch/x86/include/asm/div64.h
13583index ced283a..ffe04cc 100644
13584--- a/arch/x86/include/asm/div64.h
13585+++ b/arch/x86/include/asm/div64.h
13586@@ -39,7 +39,7 @@
13587 __mod; \
13588 })
13589
13590-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
13591+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
13592 {
13593 union {
13594 u64 v64;
13595diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
13596index 9c999c1..3860cb8 100644
13597--- a/arch/x86/include/asm/elf.h
13598+++ b/arch/x86/include/asm/elf.h
13599@@ -243,7 +243,25 @@ extern int force_personality32;
13600 the loader. We need to make sure that it is out of the way of the program
13601 that it will "exec", and that there is sufficient room for the brk. */
13602
13603+#ifdef CONFIG_PAX_SEGMEXEC
13604+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
13605+#else
13606 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
13607+#endif
13608+
13609+#ifdef CONFIG_PAX_ASLR
13610+#ifdef CONFIG_X86_32
13611+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
13612+
13613+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
13614+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
13615+#else
13616+#define PAX_ELF_ET_DYN_BASE 0x400000UL
13617+
13618+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
13619+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
13620+#endif
13621+#endif
13622
13623 /* This yields a mask that user programs can use to figure out what
13624 instruction set this CPU supports. This could be done in user space,
13625@@ -296,16 +314,12 @@ do { \
13626
13627 #define ARCH_DLINFO \
13628 do { \
13629- if (vdso_enabled) \
13630- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
13631- (unsigned long)current->mm->context.vdso); \
13632+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
13633 } while (0)
13634
13635 #define ARCH_DLINFO_X32 \
13636 do { \
13637- if (vdso_enabled) \
13638- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
13639- (unsigned long)current->mm->context.vdso); \
13640+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
13641 } while (0)
13642
13643 #define AT_SYSINFO 32
13644@@ -320,7 +334,7 @@ else \
13645
13646 #endif /* !CONFIG_X86_32 */
13647
13648-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
13649+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
13650
13651 #define VDSO_ENTRY \
13652 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
13653@@ -336,9 +350,6 @@ extern int x32_setup_additional_pages(struct linux_binprm *bprm,
13654 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
13655 #define compat_arch_setup_additional_pages syscall32_setup_pages
13656
13657-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
13658-#define arch_randomize_brk arch_randomize_brk
13659-
13660 /*
13661 * True on X86_32 or when emulating IA32 on X86_64
13662 */
13663diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
13664index 75ce3f4..882e801 100644
13665--- a/arch/x86/include/asm/emergency-restart.h
13666+++ b/arch/x86/include/asm/emergency-restart.h
13667@@ -13,6 +13,6 @@ enum reboot_type {
13668
13669 extern enum reboot_type reboot_type;
13670
13671-extern void machine_emergency_restart(void);
13672+extern void machine_emergency_restart(void) __noreturn;
13673
13674 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
13675diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
13676index 41ab26e..a88c9e6 100644
13677--- a/arch/x86/include/asm/fpu-internal.h
13678+++ b/arch/x86/include/asm/fpu-internal.h
13679@@ -126,7 +126,9 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
13680 ({ \
13681 int err; \
13682 asm volatile(ASM_STAC "\n" \
13683- "1:" #insn "\n\t" \
13684+ "1:" \
13685+ __copyuser_seg \
13686+ #insn "\n\t" \
13687 "2: " ASM_CLAC "\n" \
13688 ".section .fixup,\"ax\"\n" \
13689 "3: movl $-1,%[err]\n" \
13690@@ -299,7 +301,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
13691 "emms\n\t" /* clear stack tags */
13692 "fildl %P[addr]", /* set F?P to defined value */
13693 X86_FEATURE_FXSAVE_LEAK,
13694- [addr] "m" (tsk->thread.fpu.has_fpu));
13695+ [addr] "m" (init_tss[raw_smp_processor_id()].x86_tss.sp0));
13696
13697 return fpu_restore_checking(&tsk->thread.fpu);
13698 }
13699diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
13700index be27ba1..8f13ff9 100644
13701--- a/arch/x86/include/asm/futex.h
13702+++ b/arch/x86/include/asm/futex.h
13703@@ -12,6 +12,7 @@
13704 #include <asm/smap.h>
13705
13706 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
13707+ typecheck(u32 __user *, uaddr); \
13708 asm volatile("\t" ASM_STAC "\n" \
13709 "1:\t" insn "\n" \
13710 "2:\t" ASM_CLAC "\n" \
13711@@ -20,15 +21,16 @@
13712 "\tjmp\t2b\n" \
13713 "\t.previous\n" \
13714 _ASM_EXTABLE(1b, 3b) \
13715- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
13716+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr)) \
13717 : "i" (-EFAULT), "0" (oparg), "1" (0))
13718
13719 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
13720+ typecheck(u32 __user *, uaddr); \
13721 asm volatile("\t" ASM_STAC "\n" \
13722 "1:\tmovl %2, %0\n" \
13723 "\tmovl\t%0, %3\n" \
13724 "\t" insn "\n" \
13725- "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \
13726+ "2:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %2\n" \
13727 "\tjnz\t1b\n" \
13728 "3:\t" ASM_CLAC "\n" \
13729 "\t.section .fixup,\"ax\"\n" \
13730@@ -38,7 +40,7 @@
13731 _ASM_EXTABLE(1b, 4b) \
13732 _ASM_EXTABLE(2b, 4b) \
13733 : "=&a" (oldval), "=&r" (ret), \
13734- "+m" (*uaddr), "=&r" (tem) \
13735+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
13736 : "r" (oparg), "i" (-EFAULT), "1" (0))
13737
13738 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
13739@@ -59,10 +61,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
13740
13741 switch (op) {
13742 case FUTEX_OP_SET:
13743- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
13744+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
13745 break;
13746 case FUTEX_OP_ADD:
13747- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
13748+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
13749 uaddr, oparg);
13750 break;
13751 case FUTEX_OP_OR:
13752@@ -116,14 +118,14 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
13753 return -EFAULT;
13754
13755 asm volatile("\t" ASM_STAC "\n"
13756- "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
13757+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
13758 "2:\t" ASM_CLAC "\n"
13759 "\t.section .fixup, \"ax\"\n"
13760 "3:\tmov %3, %0\n"
13761 "\tjmp 2b\n"
13762 "\t.previous\n"
13763 _ASM_EXTABLE(1b, 3b)
13764- : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
13765+ : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
13766 : "i" (-EFAULT), "r" (newval), "1" (oldval)
13767 : "memory"
13768 );
13769diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
13770index eb92a6e..b98b2f4 100644
13771--- a/arch/x86/include/asm/hw_irq.h
13772+++ b/arch/x86/include/asm/hw_irq.h
13773@@ -136,8 +136,8 @@ extern void setup_ioapic_dest(void);
13774 extern void enable_IO_APIC(void);
13775
13776 /* Statistics */
13777-extern atomic_t irq_err_count;
13778-extern atomic_t irq_mis_count;
13779+extern atomic_unchecked_t irq_err_count;
13780+extern atomic_unchecked_t irq_mis_count;
13781
13782 /* EISA */
13783 extern void eisa_set_level_irq(unsigned int irq);
13784diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
13785index a203659..9889f1c 100644
13786--- a/arch/x86/include/asm/i8259.h
13787+++ b/arch/x86/include/asm/i8259.h
13788@@ -62,7 +62,7 @@ struct legacy_pic {
13789 void (*init)(int auto_eoi);
13790 int (*irq_pending)(unsigned int irq);
13791 void (*make_irq)(unsigned int irq);
13792-};
13793+} __do_const;
13794
13795 extern struct legacy_pic *legacy_pic;
13796 extern struct legacy_pic null_legacy_pic;
13797diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
13798index d8e8eef..1765f78 100644
13799--- a/arch/x86/include/asm/io.h
13800+++ b/arch/x86/include/asm/io.h
13801@@ -51,12 +51,12 @@ static inline void name(type val, volatile void __iomem *addr) \
13802 "m" (*(volatile type __force *)addr) barrier); }
13803
13804 build_mmio_read(readb, "b", unsigned char, "=q", :"memory")
13805-build_mmio_read(readw, "w", unsigned short, "=r", :"memory")
13806-build_mmio_read(readl, "l", unsigned int, "=r", :"memory")
13807+build_mmio_read(__intentional_overflow(-1) readw, "w", unsigned short, "=r", :"memory")
13808+build_mmio_read(__intentional_overflow(-1) readl, "l", unsigned int, "=r", :"memory")
13809
13810 build_mmio_read(__readb, "b", unsigned char, "=q", )
13811-build_mmio_read(__readw, "w", unsigned short, "=r", )
13812-build_mmio_read(__readl, "l", unsigned int, "=r", )
13813+build_mmio_read(__intentional_overflow(-1) __readw, "w", unsigned short, "=r", )
13814+build_mmio_read(__intentional_overflow(-1) __readl, "l", unsigned int, "=r", )
13815
13816 build_mmio_write(writeb, "b", unsigned char, "q", :"memory")
13817 build_mmio_write(writew, "w", unsigned short, "r", :"memory")
13818@@ -184,7 +184,7 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
13819 return ioremap_nocache(offset, size);
13820 }
13821
13822-extern void iounmap(volatile void __iomem *addr);
13823+extern void iounmap(const volatile void __iomem *addr);
13824
13825 extern void set_iounmap_nonlazy(void);
13826
13827@@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
13828
13829 #include <linux/vmalloc.h>
13830
13831+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
13832+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
13833+{
13834+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
13835+}
13836+
13837+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
13838+{
13839+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
13840+}
13841+
13842 /*
13843 * Convert a virtual cached pointer to an uncached pointer
13844 */
13845diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
13846index bba3cf8..06bc8da 100644
13847--- a/arch/x86/include/asm/irqflags.h
13848+++ b/arch/x86/include/asm/irqflags.h
13849@@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
13850 sti; \
13851 sysexit
13852
13853+#define GET_CR0_INTO_RDI mov %cr0, %rdi
13854+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
13855+#define GET_CR3_INTO_RDI mov %cr3, %rdi
13856+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
13857+
13858 #else
13859 #define INTERRUPT_RETURN iret
13860 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
13861diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
13862index d3ddd17..c9fb0cc 100644
13863--- a/arch/x86/include/asm/kprobes.h
13864+++ b/arch/x86/include/asm/kprobes.h
13865@@ -38,13 +38,8 @@ typedef u8 kprobe_opcode_t;
13866 #define RELATIVEJUMP_SIZE 5
13867 #define RELATIVECALL_OPCODE 0xe8
13868 #define RELATIVE_ADDR_SIZE 4
13869-#define MAX_STACK_SIZE 64
13870-#define MIN_STACK_SIZE(ADDR) \
13871- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
13872- THREAD_SIZE - (unsigned long)(ADDR))) \
13873- ? (MAX_STACK_SIZE) \
13874- : (((unsigned long)current_thread_info()) + \
13875- THREAD_SIZE - (unsigned long)(ADDR)))
13876+#define MAX_STACK_SIZE 64UL
13877+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
13878
13879 #define flush_insn_slot(p) do { } while (0)
13880
13881diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
13882index dc87b65..85039f9 100644
13883--- a/arch/x86/include/asm/kvm_host.h
13884+++ b/arch/x86/include/asm/kvm_host.h
13885@@ -419,8 +419,8 @@ struct kvm_vcpu_arch {
13886 gpa_t time;
13887 struct pvclock_vcpu_time_info hv_clock;
13888 unsigned int hw_tsc_khz;
13889- unsigned int time_offset;
13890- struct page *time_page;
13891+ struct gfn_to_hva_cache pv_time;
13892+ bool pv_time_enabled;
13893 /* set guest stopped flag in pvclock flags field */
13894 bool pvclock_set_guest_stopped_request;
13895
13896diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
13897index 2d89e39..baee879 100644
13898--- a/arch/x86/include/asm/local.h
13899+++ b/arch/x86/include/asm/local.h
13900@@ -10,33 +10,97 @@ typedef struct {
13901 atomic_long_t a;
13902 } local_t;
13903
13904+typedef struct {
13905+ atomic_long_unchecked_t a;
13906+} local_unchecked_t;
13907+
13908 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
13909
13910 #define local_read(l) atomic_long_read(&(l)->a)
13911+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
13912 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
13913+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
13914
13915 static inline void local_inc(local_t *l)
13916 {
13917- asm volatile(_ASM_INC "%0"
13918+ asm volatile(_ASM_INC "%0\n"
13919+
13920+#ifdef CONFIG_PAX_REFCOUNT
13921+ "jno 0f\n"
13922+ _ASM_DEC "%0\n"
13923+ "int $4\n0:\n"
13924+ _ASM_EXTABLE(0b, 0b)
13925+#endif
13926+
13927+ : "+m" (l->a.counter));
13928+}
13929+
13930+static inline void local_inc_unchecked(local_unchecked_t *l)
13931+{
13932+ asm volatile(_ASM_INC "%0\n"
13933 : "+m" (l->a.counter));
13934 }
13935
13936 static inline void local_dec(local_t *l)
13937 {
13938- asm volatile(_ASM_DEC "%0"
13939+ asm volatile(_ASM_DEC "%0\n"
13940+
13941+#ifdef CONFIG_PAX_REFCOUNT
13942+ "jno 0f\n"
13943+ _ASM_INC "%0\n"
13944+ "int $4\n0:\n"
13945+ _ASM_EXTABLE(0b, 0b)
13946+#endif
13947+
13948+ : "+m" (l->a.counter));
13949+}
13950+
13951+static inline void local_dec_unchecked(local_unchecked_t *l)
13952+{
13953+ asm volatile(_ASM_DEC "%0\n"
13954 : "+m" (l->a.counter));
13955 }
13956
13957 static inline void local_add(long i, local_t *l)
13958 {
13959- asm volatile(_ASM_ADD "%1,%0"
13960+ asm volatile(_ASM_ADD "%1,%0\n"
13961+
13962+#ifdef CONFIG_PAX_REFCOUNT
13963+ "jno 0f\n"
13964+ _ASM_SUB "%1,%0\n"
13965+ "int $4\n0:\n"
13966+ _ASM_EXTABLE(0b, 0b)
13967+#endif
13968+
13969+ : "+m" (l->a.counter)
13970+ : "ir" (i));
13971+}
13972+
13973+static inline void local_add_unchecked(long i, local_unchecked_t *l)
13974+{
13975+ asm volatile(_ASM_ADD "%1,%0\n"
13976 : "+m" (l->a.counter)
13977 : "ir" (i));
13978 }
13979
13980 static inline void local_sub(long i, local_t *l)
13981 {
13982- asm volatile(_ASM_SUB "%1,%0"
13983+ asm volatile(_ASM_SUB "%1,%0\n"
13984+
13985+#ifdef CONFIG_PAX_REFCOUNT
13986+ "jno 0f\n"
13987+ _ASM_ADD "%1,%0\n"
13988+ "int $4\n0:\n"
13989+ _ASM_EXTABLE(0b, 0b)
13990+#endif
13991+
13992+ : "+m" (l->a.counter)
13993+ : "ir" (i));
13994+}
13995+
13996+static inline void local_sub_unchecked(long i, local_unchecked_t *l)
13997+{
13998+ asm volatile(_ASM_SUB "%1,%0\n"
13999 : "+m" (l->a.counter)
14000 : "ir" (i));
14001 }
14002@@ -54,7 +118,16 @@ static inline int local_sub_and_test(long i, local_t *l)
14003 {
14004 unsigned char c;
14005
14006- asm volatile(_ASM_SUB "%2,%0; sete %1"
14007+ asm volatile(_ASM_SUB "%2,%0\n"
14008+
14009+#ifdef CONFIG_PAX_REFCOUNT
14010+ "jno 0f\n"
14011+ _ASM_ADD "%2,%0\n"
14012+ "int $4\n0:\n"
14013+ _ASM_EXTABLE(0b, 0b)
14014+#endif
14015+
14016+ "sete %1\n"
14017 : "+m" (l->a.counter), "=qm" (c)
14018 : "ir" (i) : "memory");
14019 return c;
14020@@ -72,7 +145,16 @@ static inline int local_dec_and_test(local_t *l)
14021 {
14022 unsigned char c;
14023
14024- asm volatile(_ASM_DEC "%0; sete %1"
14025+ asm volatile(_ASM_DEC "%0\n"
14026+
14027+#ifdef CONFIG_PAX_REFCOUNT
14028+ "jno 0f\n"
14029+ _ASM_INC "%0\n"
14030+ "int $4\n0:\n"
14031+ _ASM_EXTABLE(0b, 0b)
14032+#endif
14033+
14034+ "sete %1\n"
14035 : "+m" (l->a.counter), "=qm" (c)
14036 : : "memory");
14037 return c != 0;
14038@@ -90,7 +172,16 @@ static inline int local_inc_and_test(local_t *l)
14039 {
14040 unsigned char c;
14041
14042- asm volatile(_ASM_INC "%0; sete %1"
14043+ asm volatile(_ASM_INC "%0\n"
14044+
14045+#ifdef CONFIG_PAX_REFCOUNT
14046+ "jno 0f\n"
14047+ _ASM_DEC "%0\n"
14048+ "int $4\n0:\n"
14049+ _ASM_EXTABLE(0b, 0b)
14050+#endif
14051+
14052+ "sete %1\n"
14053 : "+m" (l->a.counter), "=qm" (c)
14054 : : "memory");
14055 return c != 0;
14056@@ -109,7 +200,16 @@ static inline int local_add_negative(long i, local_t *l)
14057 {
14058 unsigned char c;
14059
14060- asm volatile(_ASM_ADD "%2,%0; sets %1"
14061+ asm volatile(_ASM_ADD "%2,%0\n"
14062+
14063+#ifdef CONFIG_PAX_REFCOUNT
14064+ "jno 0f\n"
14065+ _ASM_SUB "%2,%0\n"
14066+ "int $4\n0:\n"
14067+ _ASM_EXTABLE(0b, 0b)
14068+#endif
14069+
14070+ "sets %1\n"
14071 : "+m" (l->a.counter), "=qm" (c)
14072 : "ir" (i) : "memory");
14073 return c;
14074@@ -125,6 +225,30 @@ static inline int local_add_negative(long i, local_t *l)
14075 static inline long local_add_return(long i, local_t *l)
14076 {
14077 long __i = i;
14078+ asm volatile(_ASM_XADD "%0, %1\n"
14079+
14080+#ifdef CONFIG_PAX_REFCOUNT
14081+ "jno 0f\n"
14082+ _ASM_MOV "%0,%1\n"
14083+ "int $4\n0:\n"
14084+ _ASM_EXTABLE(0b, 0b)
14085+#endif
14086+
14087+ : "+r" (i), "+m" (l->a.counter)
14088+ : : "memory");
14089+ return i + __i;
14090+}
14091+
14092+/**
14093+ * local_add_return_unchecked - add and return
14094+ * @i: integer value to add
14095+ * @l: pointer to type local_unchecked_t
14096+ *
14097+ * Atomically adds @i to @l and returns @i + @l
14098+ */
14099+static inline long local_add_return_unchecked(long i, local_unchecked_t *l)
14100+{
14101+ long __i = i;
14102 asm volatile(_ASM_XADD "%0, %1;"
14103 : "+r" (i), "+m" (l->a.counter)
14104 : : "memory");
14105@@ -141,6 +265,8 @@ static inline long local_sub_return(long i, local_t *l)
14106
14107 #define local_cmpxchg(l, o, n) \
14108 (cmpxchg_local(&((l)->a.counter), (o), (n)))
14109+#define local_cmpxchg_unchecked(l, o, n) \
14110+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
14111 /* Always has a lock prefix */
14112 #define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
14113
14114diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
14115new file mode 100644
14116index 0000000..2bfd3ba
14117--- /dev/null
14118+++ b/arch/x86/include/asm/mman.h
14119@@ -0,0 +1,15 @@
14120+#ifndef _X86_MMAN_H
14121+#define _X86_MMAN_H
14122+
14123+#include <uapi/asm/mman.h>
14124+
14125+#ifdef __KERNEL__
14126+#ifndef __ASSEMBLY__
14127+#ifdef CONFIG_X86_32
14128+#define arch_mmap_check i386_mmap_check
14129+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags);
14130+#endif
14131+#endif
14132+#endif
14133+
14134+#endif /* X86_MMAN_H */
14135diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
14136index 5f55e69..e20bfb1 100644
14137--- a/arch/x86/include/asm/mmu.h
14138+++ b/arch/x86/include/asm/mmu.h
14139@@ -9,7 +9,7 @@
14140 * we put the segment information here.
14141 */
14142 typedef struct {
14143- void *ldt;
14144+ struct desc_struct *ldt;
14145 int size;
14146
14147 #ifdef CONFIG_X86_64
14148@@ -18,7 +18,19 @@ typedef struct {
14149 #endif
14150
14151 struct mutex lock;
14152- void *vdso;
14153+ unsigned long vdso;
14154+
14155+#ifdef CONFIG_X86_32
14156+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
14157+ unsigned long user_cs_base;
14158+ unsigned long user_cs_limit;
14159+
14160+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
14161+ cpumask_t cpu_user_cs_mask;
14162+#endif
14163+
14164+#endif
14165+#endif
14166 } mm_context_t;
14167
14168 #ifdef CONFIG_SMP
14169diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
14170index cdbf367..adb37ac 100644
14171--- a/arch/x86/include/asm/mmu_context.h
14172+++ b/arch/x86/include/asm/mmu_context.h
14173@@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
14174
14175 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
14176 {
14177+
14178+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
14179+ unsigned int i;
14180+ pgd_t *pgd;
14181+
14182+ pax_open_kernel();
14183+ pgd = get_cpu_pgd(smp_processor_id());
14184+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
14185+ set_pgd_batched(pgd+i, native_make_pgd(0));
14186+ pax_close_kernel();
14187+#endif
14188+
14189 #ifdef CONFIG_SMP
14190 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
14191 this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
14192@@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
14193 struct task_struct *tsk)
14194 {
14195 unsigned cpu = smp_processor_id();
14196+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
14197+ int tlbstate = TLBSTATE_OK;
14198+#endif
14199
14200 if (likely(prev != next)) {
14201 #ifdef CONFIG_SMP
14202+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
14203+ tlbstate = this_cpu_read(cpu_tlbstate.state);
14204+#endif
14205 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
14206 this_cpu_write(cpu_tlbstate.active_mm, next);
14207 #endif
14208 cpumask_set_cpu(cpu, mm_cpumask(next));
14209
14210 /* Re-load page tables */
14211+#ifdef CONFIG_PAX_PER_CPU_PGD
14212+ pax_open_kernel();
14213+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd);
14214+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd);
14215+ pax_close_kernel();
14216+ load_cr3(get_cpu_pgd(cpu));
14217+#else
14218 load_cr3(next->pgd);
14219+#endif
14220
14221 /* stop flush ipis for the previous mm */
14222 cpumask_clear_cpu(cpu, mm_cpumask(prev));
14223@@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
14224 */
14225 if (unlikely(prev->context.ldt != next->context.ldt))
14226 load_LDT_nolock(&next->context);
14227- }
14228+
14229+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
14230+ if (!(__supported_pte_mask & _PAGE_NX)) {
14231+ smp_mb__before_clear_bit();
14232+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
14233+ smp_mb__after_clear_bit();
14234+ cpu_set(cpu, next->context.cpu_user_cs_mask);
14235+ }
14236+#endif
14237+
14238+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
14239+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
14240+ prev->context.user_cs_limit != next->context.user_cs_limit))
14241+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
14242 #ifdef CONFIG_SMP
14243+ else if (unlikely(tlbstate != TLBSTATE_OK))
14244+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
14245+#endif
14246+#endif
14247+
14248+ }
14249 else {
14250+
14251+#ifdef CONFIG_PAX_PER_CPU_PGD
14252+ pax_open_kernel();
14253+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd);
14254+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd);
14255+ pax_close_kernel();
14256+ load_cr3(get_cpu_pgd(cpu));
14257+#endif
14258+
14259+#ifdef CONFIG_SMP
14260 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
14261 BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
14262
14263@@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
14264 * tlb flush IPI delivery. We must reload CR3
14265 * to make sure to use no freed page tables.
14266 */
14267+
14268+#ifndef CONFIG_PAX_PER_CPU_PGD
14269 load_cr3(next->pgd);
14270+#endif
14271+
14272 load_LDT_nolock(&next->context);
14273+
14274+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
14275+ if (!(__supported_pte_mask & _PAGE_NX))
14276+ cpu_set(cpu, next->context.cpu_user_cs_mask);
14277+#endif
14278+
14279+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
14280+#ifdef CONFIG_PAX_PAGEEXEC
14281+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
14282+#endif
14283+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
14284+#endif
14285+
14286 }
14287+#endif
14288 }
14289-#endif
14290 }
14291
14292 #define activate_mm(prev, next) \
14293diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
14294index e3b7819..b257c64 100644
14295--- a/arch/x86/include/asm/module.h
14296+++ b/arch/x86/include/asm/module.h
14297@@ -5,6 +5,7 @@
14298
14299 #ifdef CONFIG_X86_64
14300 /* X86_64 does not define MODULE_PROC_FAMILY */
14301+#define MODULE_PROC_FAMILY ""
14302 #elif defined CONFIG_M486
14303 #define MODULE_PROC_FAMILY "486 "
14304 #elif defined CONFIG_M586
14305@@ -57,8 +58,20 @@
14306 #error unknown processor family
14307 #endif
14308
14309-#ifdef CONFIG_X86_32
14310-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
14311+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
14312+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
14313+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
14314+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
14315+#else
14316+#define MODULE_PAX_KERNEXEC ""
14317 #endif
14318
14319+#ifdef CONFIG_PAX_MEMORY_UDEREF
14320+#define MODULE_PAX_UDEREF "UDEREF "
14321+#else
14322+#define MODULE_PAX_UDEREF ""
14323+#endif
14324+
14325+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
14326+
14327 #endif /* _ASM_X86_MODULE_H */
14328diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
14329index c0fa356..07a498a 100644
14330--- a/arch/x86/include/asm/nmi.h
14331+++ b/arch/x86/include/asm/nmi.h
14332@@ -42,11 +42,11 @@ struct nmiaction {
14333 nmi_handler_t handler;
14334 unsigned long flags;
14335 const char *name;
14336-};
14337+} __do_const;
14338
14339 #define register_nmi_handler(t, fn, fg, n, init...) \
14340 ({ \
14341- static struct nmiaction init fn##_na = { \
14342+ static const struct nmiaction init fn##_na = { \
14343 .handler = (fn), \
14344 .name = (n), \
14345 .flags = (fg), \
14346@@ -54,7 +54,7 @@ struct nmiaction {
14347 __register_nmi_handler((t), &fn##_na); \
14348 })
14349
14350-int __register_nmi_handler(unsigned int, struct nmiaction *);
14351+int __register_nmi_handler(unsigned int, const struct nmiaction *);
14352
14353 void unregister_nmi_handler(unsigned int, const char *);
14354
14355diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
14356index 320f7bb..e89f8f8 100644
14357--- a/arch/x86/include/asm/page_64_types.h
14358+++ b/arch/x86/include/asm/page_64_types.h
14359@@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
14360
14361 /* duplicated to the one in bootmem.h */
14362 extern unsigned long max_pfn;
14363-extern unsigned long phys_base;
14364+extern const unsigned long phys_base;
14365
14366 extern unsigned long __phys_addr(unsigned long);
14367 #define __phys_reloc_hide(x) (x)
14368diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
14369index 5edd174..c395822 100644
14370--- a/arch/x86/include/asm/paravirt.h
14371+++ b/arch/x86/include/asm/paravirt.h
14372@@ -564,7 +564,7 @@ static inline pmd_t __pmd(pmdval_t val)
14373 return (pmd_t) { ret };
14374 }
14375
14376-static inline pmdval_t pmd_val(pmd_t pmd)
14377+static inline __intentional_overflow(-1) pmdval_t pmd_val(pmd_t pmd)
14378 {
14379 pmdval_t ret;
14380
14381@@ -630,6 +630,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
14382 val);
14383 }
14384
14385+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
14386+{
14387+ pgdval_t val = native_pgd_val(pgd);
14388+
14389+ if (sizeof(pgdval_t) > sizeof(long))
14390+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
14391+ val, (u64)val >> 32);
14392+ else
14393+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
14394+ val);
14395+}
14396+
14397 static inline void pgd_clear(pgd_t *pgdp)
14398 {
14399 set_pgd(pgdp, __pgd(0));
14400@@ -711,6 +723,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
14401 pv_mmu_ops.set_fixmap(idx, phys, flags);
14402 }
14403
14404+#ifdef CONFIG_PAX_KERNEXEC
14405+static inline unsigned long pax_open_kernel(void)
14406+{
14407+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
14408+}
14409+
14410+static inline unsigned long pax_close_kernel(void)
14411+{
14412+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
14413+}
14414+#else
14415+static inline unsigned long pax_open_kernel(void) { return 0; }
14416+static inline unsigned long pax_close_kernel(void) { return 0; }
14417+#endif
14418+
14419 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
14420
14421 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
14422@@ -927,7 +954,7 @@ extern void default_banner(void);
14423
14424 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
14425 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
14426-#define PARA_INDIRECT(addr) *%cs:addr
14427+#define PARA_INDIRECT(addr) *%ss:addr
14428 #endif
14429
14430 #define INTERRUPT_RETURN \
14431@@ -1002,6 +1029,21 @@ extern void default_banner(void);
14432 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
14433 CLBR_NONE, \
14434 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
14435+
14436+#define GET_CR0_INTO_RDI \
14437+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
14438+ mov %rax,%rdi
14439+
14440+#define SET_RDI_INTO_CR0 \
14441+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
14442+
14443+#define GET_CR3_INTO_RDI \
14444+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
14445+ mov %rax,%rdi
14446+
14447+#define SET_RDI_INTO_CR3 \
14448+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
14449+
14450 #endif /* CONFIG_X86_32 */
14451
14452 #endif /* __ASSEMBLY__ */
14453diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
14454index 142236e..5446ffbc 100644
14455--- a/arch/x86/include/asm/paravirt_types.h
14456+++ b/arch/x86/include/asm/paravirt_types.h
14457@@ -84,7 +84,7 @@ struct pv_init_ops {
14458 */
14459 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
14460 unsigned long addr, unsigned len);
14461-};
14462+} __no_const;
14463
14464
14465 struct pv_lazy_ops {
14466@@ -97,7 +97,7 @@ struct pv_time_ops {
14467 unsigned long long (*sched_clock)(void);
14468 unsigned long long (*steal_clock)(int cpu);
14469 unsigned long (*get_tsc_khz)(void);
14470-};
14471+} __no_const;
14472
14473 struct pv_cpu_ops {
14474 /* hooks for various privileged instructions */
14475@@ -191,7 +191,7 @@ struct pv_cpu_ops {
14476
14477 void (*start_context_switch)(struct task_struct *prev);
14478 void (*end_context_switch)(struct task_struct *next);
14479-};
14480+} __no_const;
14481
14482 struct pv_irq_ops {
14483 /*
14484@@ -222,7 +222,7 @@ struct pv_apic_ops {
14485 unsigned long start_eip,
14486 unsigned long start_esp);
14487 #endif
14488-};
14489+} __no_const;
14490
14491 struct pv_mmu_ops {
14492 unsigned long (*read_cr2)(void);
14493@@ -312,6 +312,7 @@ struct pv_mmu_ops {
14494 struct paravirt_callee_save make_pud;
14495
14496 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
14497+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
14498 #endif /* PAGETABLE_LEVELS == 4 */
14499 #endif /* PAGETABLE_LEVELS >= 3 */
14500
14501@@ -323,6 +324,12 @@ struct pv_mmu_ops {
14502 an mfn. We can tell which is which from the index. */
14503 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
14504 phys_addr_t phys, pgprot_t flags);
14505+
14506+#ifdef CONFIG_PAX_KERNEXEC
14507+ unsigned long (*pax_open_kernel)(void);
14508+ unsigned long (*pax_close_kernel)(void);
14509+#endif
14510+
14511 };
14512
14513 struct arch_spinlock;
14514@@ -333,7 +340,7 @@ struct pv_lock_ops {
14515 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
14516 int (*spin_trylock)(struct arch_spinlock *lock);
14517 void (*spin_unlock)(struct arch_spinlock *lock);
14518-};
14519+} __no_const;
14520
14521 /* This contains all the paravirt structures: we get a convenient
14522 * number for each function using the offset which we use to indicate
14523diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
14524index b4389a4..7024269 100644
14525--- a/arch/x86/include/asm/pgalloc.h
14526+++ b/arch/x86/include/asm/pgalloc.h
14527@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
14528 pmd_t *pmd, pte_t *pte)
14529 {
14530 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
14531+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
14532+}
14533+
14534+static inline void pmd_populate_user(struct mm_struct *mm,
14535+ pmd_t *pmd, pte_t *pte)
14536+{
14537+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
14538 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
14539 }
14540
14541@@ -99,12 +106,22 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
14542
14543 #ifdef CONFIG_X86_PAE
14544 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
14545+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
14546+{
14547+ pud_populate(mm, pudp, pmd);
14548+}
14549 #else /* !CONFIG_X86_PAE */
14550 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
14551 {
14552 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
14553 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
14554 }
14555+
14556+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
14557+{
14558+ paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
14559+ set_pud(pud, __pud(_KERNPG_TABLE | __pa(pmd)));
14560+}
14561 #endif /* CONFIG_X86_PAE */
14562
14563 #if PAGETABLE_LEVELS > 3
14564@@ -114,6 +131,12 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
14565 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
14566 }
14567
14568+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
14569+{
14570+ paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
14571+ set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(pud)));
14572+}
14573+
14574 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
14575 {
14576 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
14577diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
14578index f2b489c..4f7e2e5 100644
14579--- a/arch/x86/include/asm/pgtable-2level.h
14580+++ b/arch/x86/include/asm/pgtable-2level.h
14581@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
14582
14583 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
14584 {
14585+ pax_open_kernel();
14586 *pmdp = pmd;
14587+ pax_close_kernel();
14588 }
14589
14590 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
14591diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
14592index 4cc9f2b..5fd9226 100644
14593--- a/arch/x86/include/asm/pgtable-3level.h
14594+++ b/arch/x86/include/asm/pgtable-3level.h
14595@@ -92,12 +92,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
14596
14597 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
14598 {
14599+ pax_open_kernel();
14600 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
14601+ pax_close_kernel();
14602 }
14603
14604 static inline void native_set_pud(pud_t *pudp, pud_t pud)
14605 {
14606+ pax_open_kernel();
14607 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
14608+ pax_close_kernel();
14609 }
14610
14611 /*
14612diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
14613index 1c1a955..50f828c 100644
14614--- a/arch/x86/include/asm/pgtable.h
14615+++ b/arch/x86/include/asm/pgtable.h
14616@@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
14617
14618 #ifndef __PAGETABLE_PUD_FOLDED
14619 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
14620+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
14621 #define pgd_clear(pgd) native_pgd_clear(pgd)
14622 #endif
14623
14624@@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
14625
14626 #define arch_end_context_switch(prev) do {} while(0)
14627
14628+#define pax_open_kernel() native_pax_open_kernel()
14629+#define pax_close_kernel() native_pax_close_kernel()
14630 #endif /* CONFIG_PARAVIRT */
14631
14632+#define __HAVE_ARCH_PAX_OPEN_KERNEL
14633+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
14634+
14635+#ifdef CONFIG_PAX_KERNEXEC
14636+static inline unsigned long native_pax_open_kernel(void)
14637+{
14638+ unsigned long cr0;
14639+
14640+ preempt_disable();
14641+ barrier();
14642+ cr0 = read_cr0() ^ X86_CR0_WP;
14643+ BUG_ON(cr0 & X86_CR0_WP);
14644+ write_cr0(cr0);
14645+ return cr0 ^ X86_CR0_WP;
14646+}
14647+
14648+static inline unsigned long native_pax_close_kernel(void)
14649+{
14650+ unsigned long cr0;
14651+
14652+ cr0 = read_cr0() ^ X86_CR0_WP;
14653+ BUG_ON(!(cr0 & X86_CR0_WP));
14654+ write_cr0(cr0);
14655+ barrier();
14656+ preempt_enable_no_resched();
14657+ return cr0 ^ X86_CR0_WP;
14658+}
14659+#else
14660+static inline unsigned long native_pax_open_kernel(void) { return 0; }
14661+static inline unsigned long native_pax_close_kernel(void) { return 0; }
14662+#endif
14663+
14664 /*
14665 * The following only work if pte_present() is true.
14666 * Undefined behaviour if not..
14667 */
14668+static inline int pte_user(pte_t pte)
14669+{
14670+ return pte_val(pte) & _PAGE_USER;
14671+}
14672+
14673 static inline int pte_dirty(pte_t pte)
14674 {
14675 return pte_flags(pte) & _PAGE_DIRTY;
14676@@ -200,9 +240,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
14677 return pte_clear_flags(pte, _PAGE_RW);
14678 }
14679
14680+static inline pte_t pte_mkread(pte_t pte)
14681+{
14682+ return __pte(pte_val(pte) | _PAGE_USER);
14683+}
14684+
14685 static inline pte_t pte_mkexec(pte_t pte)
14686 {
14687- return pte_clear_flags(pte, _PAGE_NX);
14688+#ifdef CONFIG_X86_PAE
14689+ if (__supported_pte_mask & _PAGE_NX)
14690+ return pte_clear_flags(pte, _PAGE_NX);
14691+ else
14692+#endif
14693+ return pte_set_flags(pte, _PAGE_USER);
14694+}
14695+
14696+static inline pte_t pte_exprotect(pte_t pte)
14697+{
14698+#ifdef CONFIG_X86_PAE
14699+ if (__supported_pte_mask & _PAGE_NX)
14700+ return pte_set_flags(pte, _PAGE_NX);
14701+ else
14702+#endif
14703+ return pte_clear_flags(pte, _PAGE_USER);
14704 }
14705
14706 static inline pte_t pte_mkdirty(pte_t pte)
14707@@ -394,6 +454,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
14708 #endif
14709
14710 #ifndef __ASSEMBLY__
14711+
14712+#ifdef CONFIG_PAX_PER_CPU_PGD
14713+extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
14714+static inline pgd_t *get_cpu_pgd(unsigned int cpu)
14715+{
14716+ return cpu_pgd[cpu];
14717+}
14718+#endif
14719+
14720 #include <linux/mm_types.h>
14721
14722 static inline int pte_none(pte_t pte)
14723@@ -583,7 +652,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
14724
14725 static inline int pgd_bad(pgd_t pgd)
14726 {
14727- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
14728+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
14729 }
14730
14731 static inline int pgd_none(pgd_t pgd)
14732@@ -606,7 +675,12 @@ static inline int pgd_none(pgd_t pgd)
14733 * pgd_offset() returns a (pgd_t *)
14734 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
14735 */
14736-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
14737+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
14738+
14739+#ifdef CONFIG_PAX_PER_CPU_PGD
14740+#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
14741+#endif
14742+
14743 /*
14744 * a shortcut which implies the use of the kernel's pgd, instead
14745 * of a process's
14746@@ -617,6 +691,20 @@ static inline int pgd_none(pgd_t pgd)
14747 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
14748 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
14749
14750+#ifdef CONFIG_X86_32
14751+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
14752+#else
14753+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
14754+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
14755+
14756+#ifdef CONFIG_PAX_MEMORY_UDEREF
14757+#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
14758+#else
14759+#define PAX_USER_SHADOW_BASE (_AC(0,UL))
14760+#endif
14761+
14762+#endif
14763+
14764 #ifndef __ASSEMBLY__
14765
14766 extern int direct_gbpages;
14767@@ -781,11 +869,23 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
14768 * dst and src can be on the same page, but the range must not overlap,
14769 * and must not cross a page boundary.
14770 */
14771-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
14772+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
14773 {
14774- memcpy(dst, src, count * sizeof(pgd_t));
14775+ pax_open_kernel();
14776+ while (count--)
14777+ *dst++ = *src++;
14778+ pax_close_kernel();
14779 }
14780
14781+#ifdef CONFIG_PAX_PER_CPU_PGD
14782+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src);
14783+#endif
14784+
14785+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
14786+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src);
14787+#else
14788+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src) {}
14789+#endif
14790
14791 #include <asm-generic/pgtable.h>
14792 #endif /* __ASSEMBLY__ */
14793diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
14794index 8faa215..a8a17ea 100644
14795--- a/arch/x86/include/asm/pgtable_32.h
14796+++ b/arch/x86/include/asm/pgtable_32.h
14797@@ -25,9 +25,6 @@
14798 struct mm_struct;
14799 struct vm_area_struct;
14800
14801-extern pgd_t swapper_pg_dir[1024];
14802-extern pgd_t initial_page_table[1024];
14803-
14804 static inline void pgtable_cache_init(void) { }
14805 static inline void check_pgt_cache(void) { }
14806 void paging_init(void);
14807@@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
14808 # include <asm/pgtable-2level.h>
14809 #endif
14810
14811+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
14812+extern pgd_t initial_page_table[PTRS_PER_PGD];
14813+#ifdef CONFIG_X86_PAE
14814+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
14815+#endif
14816+
14817 #if defined(CONFIG_HIGHPTE)
14818 #define pte_offset_map(dir, address) \
14819 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
14820@@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
14821 /* Clear a kernel PTE and flush it from the TLB */
14822 #define kpte_clear_flush(ptep, vaddr) \
14823 do { \
14824+ pax_open_kernel(); \
14825 pte_clear(&init_mm, (vaddr), (ptep)); \
14826+ pax_close_kernel(); \
14827 __flush_tlb_one((vaddr)); \
14828 } while (0)
14829
14830@@ -75,6 +80,9 @@ do { \
14831
14832 #endif /* !__ASSEMBLY__ */
14833
14834+#define HAVE_ARCH_UNMAPPED_AREA
14835+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
14836+
14837 /*
14838 * kern_addr_valid() is (1) for FLATMEM and (0) for
14839 * SPARSEMEM and DISCONTIGMEM
14840diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
14841index ed5903b..c7fe163 100644
14842--- a/arch/x86/include/asm/pgtable_32_types.h
14843+++ b/arch/x86/include/asm/pgtable_32_types.h
14844@@ -8,7 +8,7 @@
14845 */
14846 #ifdef CONFIG_X86_PAE
14847 # include <asm/pgtable-3level_types.h>
14848-# define PMD_SIZE (1UL << PMD_SHIFT)
14849+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
14850 # define PMD_MASK (~(PMD_SIZE - 1))
14851 #else
14852 # include <asm/pgtable-2level_types.h>
14853@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
14854 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
14855 #endif
14856
14857+#ifdef CONFIG_PAX_KERNEXEC
14858+#ifndef __ASSEMBLY__
14859+extern unsigned char MODULES_EXEC_VADDR[];
14860+extern unsigned char MODULES_EXEC_END[];
14861+#endif
14862+#include <asm/boot.h>
14863+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
14864+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
14865+#else
14866+#define ktla_ktva(addr) (addr)
14867+#define ktva_ktla(addr) (addr)
14868+#endif
14869+
14870 #define MODULES_VADDR VMALLOC_START
14871 #define MODULES_END VMALLOC_END
14872 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
14873diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
14874index 47356f9..deb94a2 100644
14875--- a/arch/x86/include/asm/pgtable_64.h
14876+++ b/arch/x86/include/asm/pgtable_64.h
14877@@ -16,10 +16,14 @@
14878
14879 extern pud_t level3_kernel_pgt[512];
14880 extern pud_t level3_ident_pgt[512];
14881+extern pud_t level3_vmalloc_start_pgt[512];
14882+extern pud_t level3_vmalloc_end_pgt[512];
14883+extern pud_t level3_vmemmap_pgt[512];
14884+extern pud_t level2_vmemmap_pgt[512];
14885 extern pmd_t level2_kernel_pgt[512];
14886 extern pmd_t level2_fixmap_pgt[512];
14887-extern pmd_t level2_ident_pgt[512];
14888-extern pgd_t init_level4_pgt[];
14889+extern pmd_t level2_ident_pgt[512*2];
14890+extern pgd_t init_level4_pgt[512];
14891
14892 #define swapper_pg_dir init_level4_pgt
14893
14894@@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
14895
14896 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
14897 {
14898+ pax_open_kernel();
14899 *pmdp = pmd;
14900+ pax_close_kernel();
14901 }
14902
14903 static inline void native_pmd_clear(pmd_t *pmd)
14904@@ -97,7 +103,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
14905
14906 static inline void native_set_pud(pud_t *pudp, pud_t pud)
14907 {
14908+ pax_open_kernel();
14909 *pudp = pud;
14910+ pax_close_kernel();
14911 }
14912
14913 static inline void native_pud_clear(pud_t *pud)
14914@@ -107,6 +115,13 @@ static inline void native_pud_clear(pud_t *pud)
14915
14916 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
14917 {
14918+ pax_open_kernel();
14919+ *pgdp = pgd;
14920+ pax_close_kernel();
14921+}
14922+
14923+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
14924+{
14925 *pgdp = pgd;
14926 }
14927
14928diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
14929index 766ea16..5b96cb3 100644
14930--- a/arch/x86/include/asm/pgtable_64_types.h
14931+++ b/arch/x86/include/asm/pgtable_64_types.h
14932@@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
14933 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
14934 #define MODULES_END _AC(0xffffffffff000000, UL)
14935 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
14936+#define MODULES_EXEC_VADDR MODULES_VADDR
14937+#define MODULES_EXEC_END MODULES_END
14938+
14939+#define ktla_ktva(addr) (addr)
14940+#define ktva_ktla(addr) (addr)
14941
14942 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
14943diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
14944index 3c32db8..1ddccf5 100644
14945--- a/arch/x86/include/asm/pgtable_types.h
14946+++ b/arch/x86/include/asm/pgtable_types.h
14947@@ -16,13 +16,12 @@
14948 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
14949 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
14950 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
14951-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
14952+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
14953 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
14954 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
14955 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
14956-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
14957-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
14958-#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
14959+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
14960+#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
14961 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
14962
14963 /* If _PAGE_BIT_PRESENT is clear, we use these: */
14964@@ -40,7 +39,6 @@
14965 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
14966 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
14967 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
14968-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
14969 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
14970 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
14971 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
14972@@ -57,8 +55,10 @@
14973
14974 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
14975 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
14976-#else
14977+#elif defined(CONFIG_KMEMCHECK)
14978 #define _PAGE_NX (_AT(pteval_t, 0))
14979+#else
14980+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
14981 #endif
14982
14983 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
14984@@ -116,6 +116,9 @@
14985 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
14986 _PAGE_ACCESSED)
14987
14988+#define PAGE_READONLY_NOEXEC PAGE_READONLY
14989+#define PAGE_SHARED_NOEXEC PAGE_SHARED
14990+
14991 #define __PAGE_KERNEL_EXEC \
14992 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
14993 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
14994@@ -126,7 +129,7 @@
14995 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
14996 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
14997 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
14998-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
14999+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
15000 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
15001 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
15002 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
15003@@ -188,8 +191,8 @@
15004 * bits are combined, this will alow user to access the high address mapped
15005 * VDSO in the presence of CONFIG_COMPAT_VDSO
15006 */
15007-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
15008-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
15009+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
15010+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
15011 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
15012 #endif
15013
15014@@ -227,7 +230,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
15015 {
15016 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
15017 }
15018+#endif
15019
15020+#if PAGETABLE_LEVELS == 3
15021+#include <asm-generic/pgtable-nopud.h>
15022+#endif
15023+
15024+#if PAGETABLE_LEVELS == 2
15025+#include <asm-generic/pgtable-nopmd.h>
15026+#endif
15027+
15028+#ifndef __ASSEMBLY__
15029 #if PAGETABLE_LEVELS > 3
15030 typedef struct { pudval_t pud; } pud_t;
15031
15032@@ -241,8 +254,6 @@ static inline pudval_t native_pud_val(pud_t pud)
15033 return pud.pud;
15034 }
15035 #else
15036-#include <asm-generic/pgtable-nopud.h>
15037-
15038 static inline pudval_t native_pud_val(pud_t pud)
15039 {
15040 return native_pgd_val(pud.pgd);
15041@@ -262,8 +273,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
15042 return pmd.pmd;
15043 }
15044 #else
15045-#include <asm-generic/pgtable-nopmd.h>
15046-
15047 static inline pmdval_t native_pmd_val(pmd_t pmd)
15048 {
15049 return native_pgd_val(pmd.pud.pgd);
15050@@ -303,7 +312,6 @@ typedef struct page *pgtable_t;
15051
15052 extern pteval_t __supported_pte_mask;
15053 extern void set_nx(void);
15054-extern int nx_enabled;
15055
15056 #define pgprot_writecombine pgprot_writecombine
15057 extern pgprot_t pgprot_writecombine(pgprot_t prot);
15058diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
15059index 888184b..a07ac89 100644
15060--- a/arch/x86/include/asm/processor.h
15061+++ b/arch/x86/include/asm/processor.h
15062@@ -287,7 +287,7 @@ struct tss_struct {
15063
15064 } ____cacheline_aligned;
15065
15066-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
15067+extern struct tss_struct init_tss[NR_CPUS];
15068
15069 /*
15070 * Save the original ist values for checking stack pointers during debugging
15071@@ -827,11 +827,18 @@ static inline void spin_lock_prefetch(const void *x)
15072 */
15073 #define TASK_SIZE PAGE_OFFSET
15074 #define TASK_SIZE_MAX TASK_SIZE
15075+
15076+#ifdef CONFIG_PAX_SEGMEXEC
15077+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
15078+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
15079+#else
15080 #define STACK_TOP TASK_SIZE
15081-#define STACK_TOP_MAX STACK_TOP
15082+#endif
15083+
15084+#define STACK_TOP_MAX TASK_SIZE
15085
15086 #define INIT_THREAD { \
15087- .sp0 = sizeof(init_stack) + (long)&init_stack, \
15088+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
15089 .vm86_info = NULL, \
15090 .sysenter_cs = __KERNEL_CS, \
15091 .io_bitmap_ptr = NULL, \
15092@@ -845,7 +852,7 @@ static inline void spin_lock_prefetch(const void *x)
15093 */
15094 #define INIT_TSS { \
15095 .x86_tss = { \
15096- .sp0 = sizeof(init_stack) + (long)&init_stack, \
15097+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
15098 .ss0 = __KERNEL_DS, \
15099 .ss1 = __KERNEL_CS, \
15100 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
15101@@ -856,11 +863,7 @@ static inline void spin_lock_prefetch(const void *x)
15102 extern unsigned long thread_saved_pc(struct task_struct *tsk);
15103
15104 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
15105-#define KSTK_TOP(info) \
15106-({ \
15107- unsigned long *__ptr = (unsigned long *)(info); \
15108- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
15109-})
15110+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
15111
15112 /*
15113 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
15114@@ -875,7 +878,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
15115 #define task_pt_regs(task) \
15116 ({ \
15117 struct pt_regs *__regs__; \
15118- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
15119+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
15120 __regs__ - 1; \
15121 })
15122
15123@@ -885,13 +888,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
15124 /*
15125 * User space process size. 47bits minus one guard page.
15126 */
15127-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
15128+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
15129
15130 /* This decides where the kernel will search for a free chunk of vm
15131 * space during mmap's.
15132 */
15133 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
15134- 0xc0000000 : 0xFFFFe000)
15135+ 0xc0000000 : 0xFFFFf000)
15136
15137 #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
15138 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
15139@@ -902,11 +905,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
15140 #define STACK_TOP_MAX TASK_SIZE_MAX
15141
15142 #define INIT_THREAD { \
15143- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
15144+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
15145 }
15146
15147 #define INIT_TSS { \
15148- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
15149+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
15150 }
15151
15152 /*
15153@@ -934,6 +937,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
15154 */
15155 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
15156
15157+#ifdef CONFIG_PAX_SEGMEXEC
15158+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
15159+#endif
15160+
15161 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
15162
15163 /* Get/set a process' ability to use the timestamp counter instruction */
15164@@ -994,12 +1001,12 @@ extern bool cpu_has_amd_erratum(const int *);
15165 #define cpu_has_amd_erratum(x) (false)
15166 #endif /* CONFIG_CPU_SUP_AMD */
15167
15168-extern unsigned long arch_align_stack(unsigned long sp);
15169+#define arch_align_stack(x) ((x) & ~0xfUL)
15170 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
15171
15172 void default_idle(void);
15173 bool set_pm_idle_to_default(void);
15174
15175-void stop_this_cpu(void *dummy);
15176+void stop_this_cpu(void *dummy) __noreturn;
15177
15178 #endif /* _ASM_X86_PROCESSOR_H */
15179diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
15180index 942a086..6c26446 100644
15181--- a/arch/x86/include/asm/ptrace.h
15182+++ b/arch/x86/include/asm/ptrace.h
15183@@ -85,28 +85,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
15184 }
15185
15186 /*
15187- * user_mode_vm(regs) determines whether a register set came from user mode.
15188+ * user_mode(regs) determines whether a register set came from user mode.
15189 * This is true if V8086 mode was enabled OR if the register set was from
15190 * protected mode with RPL-3 CS value. This tricky test checks that with
15191 * one comparison. Many places in the kernel can bypass this full check
15192- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
15193+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
15194+ * be used.
15195 */
15196-static inline int user_mode(struct pt_regs *regs)
15197+static inline int user_mode_novm(struct pt_regs *regs)
15198 {
15199 #ifdef CONFIG_X86_32
15200 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
15201 #else
15202- return !!(regs->cs & 3);
15203+ return !!(regs->cs & SEGMENT_RPL_MASK);
15204 #endif
15205 }
15206
15207-static inline int user_mode_vm(struct pt_regs *regs)
15208+static inline int user_mode(struct pt_regs *regs)
15209 {
15210 #ifdef CONFIG_X86_32
15211 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
15212 USER_RPL;
15213 #else
15214- return user_mode(regs);
15215+ return user_mode_novm(regs);
15216 #endif
15217 }
15218
15219@@ -122,15 +123,16 @@ static inline int v8086_mode(struct pt_regs *regs)
15220 #ifdef CONFIG_X86_64
15221 static inline bool user_64bit_mode(struct pt_regs *regs)
15222 {
15223+ unsigned long cs = regs->cs & 0xffff;
15224 #ifndef CONFIG_PARAVIRT
15225 /*
15226 * On non-paravirt systems, this is the only long mode CPL 3
15227 * selector. We do not allow long mode selectors in the LDT.
15228 */
15229- return regs->cs == __USER_CS;
15230+ return cs == __USER_CS;
15231 #else
15232 /* Headers are too twisted for this to go in paravirt.h. */
15233- return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
15234+ return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
15235 #endif
15236 }
15237
15238@@ -181,9 +183,11 @@ static inline unsigned long regs_get_register(struct pt_regs *regs,
15239 * Traps from the kernel do not save sp and ss.
15240 * Use the helper function to retrieve sp.
15241 */
15242- if (offset == offsetof(struct pt_regs, sp) &&
15243- regs->cs == __KERNEL_CS)
15244- return kernel_stack_pointer(regs);
15245+ if (offset == offsetof(struct pt_regs, sp)) {
15246+ unsigned long cs = regs->cs & 0xffff;
15247+ if (cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS)
15248+ return kernel_stack_pointer(regs);
15249+ }
15250 #endif
15251 return *(unsigned long *)((unsigned long)regs + offset);
15252 }
15253diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
15254index fe1ec5b..dc5c3fe 100644
15255--- a/arch/x86/include/asm/realmode.h
15256+++ b/arch/x86/include/asm/realmode.h
15257@@ -22,16 +22,14 @@ struct real_mode_header {
15258 #endif
15259 /* APM/BIOS reboot */
15260 u32 machine_real_restart_asm;
15261-#ifdef CONFIG_X86_64
15262 u32 machine_real_restart_seg;
15263-#endif
15264 };
15265
15266 /* This must match data at trampoline_32/64.S */
15267 struct trampoline_header {
15268 #ifdef CONFIG_X86_32
15269 u32 start;
15270- u16 gdt_pad;
15271+ u16 boot_cs;
15272 u16 gdt_limit;
15273 u32 gdt_base;
15274 #else
15275diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
15276index a82c4f1..ac45053 100644
15277--- a/arch/x86/include/asm/reboot.h
15278+++ b/arch/x86/include/asm/reboot.h
15279@@ -6,13 +6,13 @@
15280 struct pt_regs;
15281
15282 struct machine_ops {
15283- void (*restart)(char *cmd);
15284- void (*halt)(void);
15285- void (*power_off)(void);
15286+ void (* __noreturn restart)(char *cmd);
15287+ void (* __noreturn halt)(void);
15288+ void (* __noreturn power_off)(void);
15289 void (*shutdown)(void);
15290 void (*crash_shutdown)(struct pt_regs *);
15291- void (*emergency_restart)(void);
15292-};
15293+ void (* __noreturn emergency_restart)(void);
15294+} __no_const;
15295
15296 extern struct machine_ops machine_ops;
15297
15298diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
15299index 2dbe4a7..ce1db00 100644
15300--- a/arch/x86/include/asm/rwsem.h
15301+++ b/arch/x86/include/asm/rwsem.h
15302@@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
15303 {
15304 asm volatile("# beginning down_read\n\t"
15305 LOCK_PREFIX _ASM_INC "(%1)\n\t"
15306+
15307+#ifdef CONFIG_PAX_REFCOUNT
15308+ "jno 0f\n"
15309+ LOCK_PREFIX _ASM_DEC "(%1)\n"
15310+ "int $4\n0:\n"
15311+ _ASM_EXTABLE(0b, 0b)
15312+#endif
15313+
15314 /* adds 0x00000001 */
15315 " jns 1f\n"
15316 " call call_rwsem_down_read_failed\n"
15317@@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
15318 "1:\n\t"
15319 " mov %1,%2\n\t"
15320 " add %3,%2\n\t"
15321+
15322+#ifdef CONFIG_PAX_REFCOUNT
15323+ "jno 0f\n"
15324+ "sub %3,%2\n"
15325+ "int $4\n0:\n"
15326+ _ASM_EXTABLE(0b, 0b)
15327+#endif
15328+
15329 " jle 2f\n\t"
15330 LOCK_PREFIX " cmpxchg %2,%0\n\t"
15331 " jnz 1b\n\t"
15332@@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
15333 long tmp;
15334 asm volatile("# beginning down_write\n\t"
15335 LOCK_PREFIX " xadd %1,(%2)\n\t"
15336+
15337+#ifdef CONFIG_PAX_REFCOUNT
15338+ "jno 0f\n"
15339+ "mov %1,(%2)\n"
15340+ "int $4\n0:\n"
15341+ _ASM_EXTABLE(0b, 0b)
15342+#endif
15343+
15344 /* adds 0xffff0001, returns the old value */
15345 " test %1,%1\n\t"
15346 /* was the count 0 before? */
15347@@ -141,6 +165,14 @@ static inline void __up_read(struct rw_semaphore *sem)
15348 long tmp;
15349 asm volatile("# beginning __up_read\n\t"
15350 LOCK_PREFIX " xadd %1,(%2)\n\t"
15351+
15352+#ifdef CONFIG_PAX_REFCOUNT
15353+ "jno 0f\n"
15354+ "mov %1,(%2)\n"
15355+ "int $4\n0:\n"
15356+ _ASM_EXTABLE(0b, 0b)
15357+#endif
15358+
15359 /* subtracts 1, returns the old value */
15360 " jns 1f\n\t"
15361 " call call_rwsem_wake\n" /* expects old value in %edx */
15362@@ -159,6 +191,14 @@ static inline void __up_write(struct rw_semaphore *sem)
15363 long tmp;
15364 asm volatile("# beginning __up_write\n\t"
15365 LOCK_PREFIX " xadd %1,(%2)\n\t"
15366+
15367+#ifdef CONFIG_PAX_REFCOUNT
15368+ "jno 0f\n"
15369+ "mov %1,(%2)\n"
15370+ "int $4\n0:\n"
15371+ _ASM_EXTABLE(0b, 0b)
15372+#endif
15373+
15374 /* subtracts 0xffff0001, returns the old value */
15375 " jns 1f\n\t"
15376 " call call_rwsem_wake\n" /* expects old value in %edx */
15377@@ -176,6 +216,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
15378 {
15379 asm volatile("# beginning __downgrade_write\n\t"
15380 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
15381+
15382+#ifdef CONFIG_PAX_REFCOUNT
15383+ "jno 0f\n"
15384+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
15385+ "int $4\n0:\n"
15386+ _ASM_EXTABLE(0b, 0b)
15387+#endif
15388+
15389 /*
15390 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
15391 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
15392@@ -194,7 +242,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
15393 */
15394 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
15395 {
15396- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
15397+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
15398+
15399+#ifdef CONFIG_PAX_REFCOUNT
15400+ "jno 0f\n"
15401+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
15402+ "int $4\n0:\n"
15403+ _ASM_EXTABLE(0b, 0b)
15404+#endif
15405+
15406 : "+m" (sem->count)
15407 : "er" (delta));
15408 }
15409@@ -204,7 +260,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
15410 */
15411 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
15412 {
15413- return delta + xadd(&sem->count, delta);
15414+ return delta + xadd_check_overflow(&sem->count, delta);
15415 }
15416
15417 #endif /* __KERNEL__ */
15418diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
15419index c48a950..c6d7468 100644
15420--- a/arch/x86/include/asm/segment.h
15421+++ b/arch/x86/include/asm/segment.h
15422@@ -64,10 +64,15 @@
15423 * 26 - ESPFIX small SS
15424 * 27 - per-cpu [ offset to per-cpu data area ]
15425 * 28 - stack_canary-20 [ for stack protector ]
15426- * 29 - unused
15427- * 30 - unused
15428+ * 29 - PCI BIOS CS
15429+ * 30 - PCI BIOS DS
15430 * 31 - TSS for double fault handler
15431 */
15432+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
15433+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
15434+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
15435+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
15436+
15437 #define GDT_ENTRY_TLS_MIN 6
15438 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
15439
15440@@ -79,6 +84,8 @@
15441
15442 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
15443
15444+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
15445+
15446 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
15447
15448 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
15449@@ -104,6 +111,12 @@
15450 #define __KERNEL_STACK_CANARY 0
15451 #endif
15452
15453+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
15454+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
15455+
15456+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
15457+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
15458+
15459 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
15460
15461 /*
15462@@ -141,7 +154,7 @@
15463 */
15464
15465 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
15466-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
15467+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
15468
15469
15470 #else
15471@@ -165,6 +178,8 @@
15472 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
15473 #define __USER32_DS __USER_DS
15474
15475+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
15476+
15477 #define GDT_ENTRY_TSS 8 /* needs two entries */
15478 #define GDT_ENTRY_LDT 10 /* needs two entries */
15479 #define GDT_ENTRY_TLS_MIN 12
15480@@ -185,6 +200,7 @@
15481 #endif
15482
15483 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
15484+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
15485 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
15486 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
15487 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
15488@@ -265,7 +281,7 @@ static inline unsigned long get_limit(unsigned long segment)
15489 {
15490 unsigned long __limit;
15491 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
15492- return __limit + 1;
15493+ return __limit;
15494 }
15495
15496 #endif /* !__ASSEMBLY__ */
15497diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
15498index b073aae..39f9bdd 100644
15499--- a/arch/x86/include/asm/smp.h
15500+++ b/arch/x86/include/asm/smp.h
15501@@ -36,7 +36,7 @@ DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
15502 /* cpus sharing the last level cache: */
15503 DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
15504 DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id);
15505-DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
15506+DECLARE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
15507
15508 static inline struct cpumask *cpu_sibling_mask(int cpu)
15509 {
15510@@ -79,7 +79,7 @@ struct smp_ops {
15511
15512 void (*send_call_func_ipi)(const struct cpumask *mask);
15513 void (*send_call_func_single_ipi)(int cpu);
15514-};
15515+} __no_const;
15516
15517 /* Globals due to paravirt */
15518 extern void set_cpu_sibling_map(int cpu);
15519@@ -191,14 +191,8 @@ extern unsigned disabled_cpus __cpuinitdata;
15520 extern int safe_smp_processor_id(void);
15521
15522 #elif defined(CONFIG_X86_64_SMP)
15523-#define raw_smp_processor_id() (this_cpu_read(cpu_number))
15524-
15525-#define stack_smp_processor_id() \
15526-({ \
15527- struct thread_info *ti; \
15528- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
15529- ti->cpu; \
15530-})
15531+#define raw_smp_processor_id() (this_cpu_read(cpu_number))
15532+#define stack_smp_processor_id() raw_smp_processor_id()
15533 #define safe_smp_processor_id() smp_processor_id()
15534
15535 #endif
15536diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
15537index 33692ea..350a534 100644
15538--- a/arch/x86/include/asm/spinlock.h
15539+++ b/arch/x86/include/asm/spinlock.h
15540@@ -172,6 +172,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
15541 static inline void arch_read_lock(arch_rwlock_t *rw)
15542 {
15543 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
15544+
15545+#ifdef CONFIG_PAX_REFCOUNT
15546+ "jno 0f\n"
15547+ LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
15548+ "int $4\n0:\n"
15549+ _ASM_EXTABLE(0b, 0b)
15550+#endif
15551+
15552 "jns 1f\n"
15553 "call __read_lock_failed\n\t"
15554 "1:\n"
15555@@ -181,6 +189,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
15556 static inline void arch_write_lock(arch_rwlock_t *rw)
15557 {
15558 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
15559+
15560+#ifdef CONFIG_PAX_REFCOUNT
15561+ "jno 0f\n"
15562+ LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
15563+ "int $4\n0:\n"
15564+ _ASM_EXTABLE(0b, 0b)
15565+#endif
15566+
15567 "jz 1f\n"
15568 "call __write_lock_failed\n\t"
15569 "1:\n"
15570@@ -210,13 +226,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
15571
15572 static inline void arch_read_unlock(arch_rwlock_t *rw)
15573 {
15574- asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
15575+ asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
15576+
15577+#ifdef CONFIG_PAX_REFCOUNT
15578+ "jno 0f\n"
15579+ LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
15580+ "int $4\n0:\n"
15581+ _ASM_EXTABLE(0b, 0b)
15582+#endif
15583+
15584 :"+m" (rw->lock) : : "memory");
15585 }
15586
15587 static inline void arch_write_unlock(arch_rwlock_t *rw)
15588 {
15589- asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
15590+ asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
15591+
15592+#ifdef CONFIG_PAX_REFCOUNT
15593+ "jno 0f\n"
15594+ LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
15595+ "int $4\n0:\n"
15596+ _ASM_EXTABLE(0b, 0b)
15597+#endif
15598+
15599 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
15600 }
15601
15602diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
15603index 6a99859..03cb807 100644
15604--- a/arch/x86/include/asm/stackprotector.h
15605+++ b/arch/x86/include/asm/stackprotector.h
15606@@ -47,7 +47,7 @@
15607 * head_32 for boot CPU and setup_per_cpu_areas() for others.
15608 */
15609 #define GDT_STACK_CANARY_INIT \
15610- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
15611+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
15612
15613 /*
15614 * Initialize the stackprotector canary value.
15615@@ -112,7 +112,7 @@ static inline void setup_stack_canary_segment(int cpu)
15616
15617 static inline void load_stack_canary_segment(void)
15618 {
15619-#ifdef CONFIG_X86_32
15620+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
15621 asm volatile ("mov %0, %%gs" : : "r" (0));
15622 #endif
15623 }
15624diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
15625index 70bbe39..4ae2bd4 100644
15626--- a/arch/x86/include/asm/stacktrace.h
15627+++ b/arch/x86/include/asm/stacktrace.h
15628@@ -11,28 +11,20 @@
15629
15630 extern int kstack_depth_to_print;
15631
15632-struct thread_info;
15633+struct task_struct;
15634 struct stacktrace_ops;
15635
15636-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
15637- unsigned long *stack,
15638- unsigned long bp,
15639- const struct stacktrace_ops *ops,
15640- void *data,
15641- unsigned long *end,
15642- int *graph);
15643+typedef unsigned long walk_stack_t(struct task_struct *task,
15644+ void *stack_start,
15645+ unsigned long *stack,
15646+ unsigned long bp,
15647+ const struct stacktrace_ops *ops,
15648+ void *data,
15649+ unsigned long *end,
15650+ int *graph);
15651
15652-extern unsigned long
15653-print_context_stack(struct thread_info *tinfo,
15654- unsigned long *stack, unsigned long bp,
15655- const struct stacktrace_ops *ops, void *data,
15656- unsigned long *end, int *graph);
15657-
15658-extern unsigned long
15659-print_context_stack_bp(struct thread_info *tinfo,
15660- unsigned long *stack, unsigned long bp,
15661- const struct stacktrace_ops *ops, void *data,
15662- unsigned long *end, int *graph);
15663+extern walk_stack_t print_context_stack;
15664+extern walk_stack_t print_context_stack_bp;
15665
15666 /* Generic stack tracer with callbacks */
15667
15668@@ -40,7 +32,7 @@ struct stacktrace_ops {
15669 void (*address)(void *data, unsigned long address, int reliable);
15670 /* On negative return stop dumping */
15671 int (*stack)(void *data, char *name);
15672- walk_stack_t walk_stack;
15673+ walk_stack_t *walk_stack;
15674 };
15675
15676 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
15677diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
15678index 4ec45b3..a4f0a8a 100644
15679--- a/arch/x86/include/asm/switch_to.h
15680+++ b/arch/x86/include/asm/switch_to.h
15681@@ -108,7 +108,7 @@ do { \
15682 "call __switch_to\n\t" \
15683 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
15684 __switch_canary \
15685- "movq %P[thread_info](%%rsi),%%r8\n\t" \
15686+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
15687 "movq %%rax,%%rdi\n\t" \
15688 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
15689 "jnz ret_from_fork\n\t" \
15690@@ -119,7 +119,7 @@ do { \
15691 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
15692 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
15693 [_tif_fork] "i" (_TIF_FORK), \
15694- [thread_info] "i" (offsetof(struct task_struct, stack)), \
15695+ [thread_info] "m" (current_tinfo), \
15696 [current_task] "m" (current_task) \
15697 __switch_canary_iparam \
15698 : "memory", "cc" __EXTRA_CLOBBER)
15699diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
15700index 2d946e6..e453ec4 100644
15701--- a/arch/x86/include/asm/thread_info.h
15702+++ b/arch/x86/include/asm/thread_info.h
15703@@ -10,6 +10,7 @@
15704 #include <linux/compiler.h>
15705 #include <asm/page.h>
15706 #include <asm/types.h>
15707+#include <asm/percpu.h>
15708
15709 /*
15710 * low level task data that entry.S needs immediate access to
15711@@ -24,7 +25,6 @@ struct exec_domain;
15712 #include <linux/atomic.h>
15713
15714 struct thread_info {
15715- struct task_struct *task; /* main task structure */
15716 struct exec_domain *exec_domain; /* execution domain */
15717 __u32 flags; /* low level flags */
15718 __u32 status; /* thread synchronous flags */
15719@@ -34,19 +34,13 @@ struct thread_info {
15720 mm_segment_t addr_limit;
15721 struct restart_block restart_block;
15722 void __user *sysenter_return;
15723-#ifdef CONFIG_X86_32
15724- unsigned long previous_esp; /* ESP of the previous stack in
15725- case of nested (IRQ) stacks
15726- */
15727- __u8 supervisor_stack[0];
15728-#endif
15729+ unsigned long lowest_stack;
15730 unsigned int sig_on_uaccess_error:1;
15731 unsigned int uaccess_err:1; /* uaccess failed */
15732 };
15733
15734-#define INIT_THREAD_INFO(tsk) \
15735+#define INIT_THREAD_INFO \
15736 { \
15737- .task = &tsk, \
15738 .exec_domain = &default_exec_domain, \
15739 .flags = 0, \
15740 .cpu = 0, \
15741@@ -57,7 +51,7 @@ struct thread_info {
15742 }, \
15743 }
15744
15745-#define init_thread_info (init_thread_union.thread_info)
15746+#define init_thread_info (init_thread_union.stack)
15747 #define init_stack (init_thread_union.stack)
15748
15749 #else /* !__ASSEMBLY__ */
15750@@ -98,6 +92,7 @@ struct thread_info {
15751 #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
15752 #define TIF_ADDR32 29 /* 32-bit address space on 64 bits */
15753 #define TIF_X32 30 /* 32-bit native x86-64 binary */
15754+#define TIF_GRSEC_SETXID 31 /* update credentials on syscall entry/exit */
15755
15756 #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
15757 #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
15758@@ -122,17 +117,18 @@ struct thread_info {
15759 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
15760 #define _TIF_ADDR32 (1 << TIF_ADDR32)
15761 #define _TIF_X32 (1 << TIF_X32)
15762+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
15763
15764 /* work to do in syscall_trace_enter() */
15765 #define _TIF_WORK_SYSCALL_ENTRY \
15766 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
15767 _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | \
15768- _TIF_NOHZ)
15769+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
15770
15771 /* work to do in syscall_trace_leave() */
15772 #define _TIF_WORK_SYSCALL_EXIT \
15773 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
15774- _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ)
15775+ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
15776
15777 /* work to do on interrupt/exception return */
15778 #define _TIF_WORK_MASK \
15779@@ -143,7 +139,7 @@ struct thread_info {
15780 /* work to do on any return to user space */
15781 #define _TIF_ALLWORK_MASK \
15782 ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \
15783- _TIF_NOHZ)
15784+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
15785
15786 /* Only used for 64 bit */
15787 #define _TIF_DO_NOTIFY_MASK \
15788@@ -159,45 +155,40 @@ struct thread_info {
15789
15790 #define PREEMPT_ACTIVE 0x10000000
15791
15792-#ifdef CONFIG_X86_32
15793-
15794-#define STACK_WARN (THREAD_SIZE/8)
15795-/*
15796- * macros/functions for gaining access to the thread information structure
15797- *
15798- * preempt_count needs to be 1 initially, until the scheduler is functional.
15799- */
15800-#ifndef __ASSEMBLY__
15801-
15802-
15803-/* how to get the current stack pointer from C */
15804-register unsigned long current_stack_pointer asm("esp") __used;
15805-
15806-/* how to get the thread information struct from C */
15807-static inline struct thread_info *current_thread_info(void)
15808-{
15809- return (struct thread_info *)
15810- (current_stack_pointer & ~(THREAD_SIZE - 1));
15811-}
15812-
15813-#else /* !__ASSEMBLY__ */
15814-
15815+#ifdef __ASSEMBLY__
15816 /* how to get the thread information struct from ASM */
15817 #define GET_THREAD_INFO(reg) \
15818- movl $-THREAD_SIZE, reg; \
15819- andl %esp, reg
15820+ mov PER_CPU_VAR(current_tinfo), reg
15821
15822 /* use this one if reg already contains %esp */
15823-#define GET_THREAD_INFO_WITH_ESP(reg) \
15824- andl $-THREAD_SIZE, reg
15825+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
15826+#else
15827+/* how to get the thread information struct from C */
15828+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
15829+
15830+static __always_inline struct thread_info *current_thread_info(void)
15831+{
15832+ return this_cpu_read_stable(current_tinfo);
15833+}
15834+#endif
15835+
15836+#ifdef CONFIG_X86_32
15837+
15838+#define STACK_WARN (THREAD_SIZE/8)
15839+/*
15840+ * macros/functions for gaining access to the thread information structure
15841+ *
15842+ * preempt_count needs to be 1 initially, until the scheduler is functional.
15843+ */
15844+#ifndef __ASSEMBLY__
15845+
15846+/* how to get the current stack pointer from C */
15847+register unsigned long current_stack_pointer asm("esp") __used;
15848
15849 #endif
15850
15851 #else /* X86_32 */
15852
15853-#include <asm/percpu.h>
15854-#define KERNEL_STACK_OFFSET (5*8)
15855-
15856 /*
15857 * macros/functions for gaining access to the thread information structure
15858 * preempt_count needs to be 1 initially, until the scheduler is functional.
15859@@ -205,27 +196,8 @@ static inline struct thread_info *current_thread_info(void)
15860 #ifndef __ASSEMBLY__
15861 DECLARE_PER_CPU(unsigned long, kernel_stack);
15862
15863-static inline struct thread_info *current_thread_info(void)
15864-{
15865- struct thread_info *ti;
15866- ti = (void *)(this_cpu_read_stable(kernel_stack) +
15867- KERNEL_STACK_OFFSET - THREAD_SIZE);
15868- return ti;
15869-}
15870-
15871-#else /* !__ASSEMBLY__ */
15872-
15873-/* how to get the thread information struct from ASM */
15874-#define GET_THREAD_INFO(reg) \
15875- movq PER_CPU_VAR(kernel_stack),reg ; \
15876- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
15877-
15878-/*
15879- * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
15880- * a certain register (to be used in assembler memory operands).
15881- */
15882-#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
15883-
15884+/* how to get the current stack pointer from C */
15885+register unsigned long current_stack_pointer asm("rsp") __used;
15886 #endif
15887
15888 #endif /* !X86_32 */
15889@@ -286,5 +258,12 @@ static inline bool is_ia32_task(void)
15890 extern void arch_task_cache_init(void);
15891 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
15892 extern void arch_release_task_struct(struct task_struct *tsk);
15893+
15894+#define __HAVE_THREAD_FUNCTIONS
15895+#define task_thread_info(task) (&(task)->tinfo)
15896+#define task_stack_page(task) ((task)->stack)
15897+#define setup_thread_stack(p, org) do {} while (0)
15898+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
15899+
15900 #endif
15901 #endif /* _ASM_X86_THREAD_INFO_H */
15902diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
15903index 1709801..0a60f2f 100644
15904--- a/arch/x86/include/asm/uaccess.h
15905+++ b/arch/x86/include/asm/uaccess.h
15906@@ -7,6 +7,7 @@
15907 #include <linux/compiler.h>
15908 #include <linux/thread_info.h>
15909 #include <linux/string.h>
15910+#include <linux/sched.h>
15911 #include <asm/asm.h>
15912 #include <asm/page.h>
15913 #include <asm/smap.h>
15914@@ -29,7 +30,12 @@
15915
15916 #define get_ds() (KERNEL_DS)
15917 #define get_fs() (current_thread_info()->addr_limit)
15918+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
15919+void __set_fs(mm_segment_t x);
15920+void set_fs(mm_segment_t x);
15921+#else
15922 #define set_fs(x) (current_thread_info()->addr_limit = (x))
15923+#endif
15924
15925 #define segment_eq(a, b) ((a).seg == (b).seg)
15926
15927@@ -77,8 +83,33 @@
15928 * checks that the pointer is in the user space range - after calling
15929 * this function, memory access functions may still return -EFAULT.
15930 */
15931-#define access_ok(type, addr, size) \
15932- (likely(__range_not_ok(addr, size, user_addr_max()) == 0))
15933+#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size, user_addr_max()) == 0))
15934+#define access_ok(type, addr, size) \
15935+({ \
15936+ long __size = size; \
15937+ unsigned long __addr = (unsigned long)addr; \
15938+ unsigned long __addr_ao = __addr & PAGE_MASK; \
15939+ unsigned long __end_ao = __addr + __size - 1; \
15940+ bool __ret_ao = __range_not_ok(__addr, __size, user_addr_max()) == 0;\
15941+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
15942+ while(__addr_ao <= __end_ao) { \
15943+ char __c_ao; \
15944+ __addr_ao += PAGE_SIZE; \
15945+ if (__size > PAGE_SIZE) \
15946+ cond_resched(); \
15947+ if (__get_user(__c_ao, (char __user *)__addr)) \
15948+ break; \
15949+ if (type != VERIFY_WRITE) { \
15950+ __addr = __addr_ao; \
15951+ continue; \
15952+ } \
15953+ if (__put_user(__c_ao, (char __user *)__addr)) \
15954+ break; \
15955+ __addr = __addr_ao; \
15956+ } \
15957+ } \
15958+ __ret_ao; \
15959+})
15960
15961 /*
15962 * The exception table consists of pairs of addresses relative to the
15963@@ -189,13 +220,21 @@ extern int __get_user_bad(void);
15964 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
15965 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
15966
15967-
15968+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
15969+#define __copyuser_seg "gs;"
15970+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
15971+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
15972+#else
15973+#define __copyuser_seg
15974+#define __COPYUSER_SET_ES
15975+#define __COPYUSER_RESTORE_ES
15976+#endif
15977
15978 #ifdef CONFIG_X86_32
15979 #define __put_user_asm_u64(x, addr, err, errret) \
15980 asm volatile(ASM_STAC "\n" \
15981- "1: movl %%eax,0(%2)\n" \
15982- "2: movl %%edx,4(%2)\n" \
15983+ "1: "__copyuser_seg"movl %%eax,0(%2)\n" \
15984+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
15985 "3: " ASM_CLAC "\n" \
15986 ".section .fixup,\"ax\"\n" \
15987 "4: movl %3,%0\n" \
15988@@ -208,8 +247,8 @@ extern int __get_user_bad(void);
15989
15990 #define __put_user_asm_ex_u64(x, addr) \
15991 asm volatile(ASM_STAC "\n" \
15992- "1: movl %%eax,0(%1)\n" \
15993- "2: movl %%edx,4(%1)\n" \
15994+ "1: "__copyuser_seg"movl %%eax,0(%1)\n" \
15995+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
15996 "3: " ASM_CLAC "\n" \
15997 _ASM_EXTABLE_EX(1b, 2b) \
15998 _ASM_EXTABLE_EX(2b, 3b) \
15999@@ -259,7 +298,7 @@ extern void __put_user_8(void);
16000 __typeof__(*(ptr)) __pu_val; \
16001 __chk_user_ptr(ptr); \
16002 might_fault(); \
16003- __pu_val = x; \
16004+ __pu_val = (x); \
16005 switch (sizeof(*(ptr))) { \
16006 case 1: \
16007 __put_user_x(1, __pu_val, ptr, __ret_pu); \
16008@@ -358,7 +397,7 @@ do { \
16009
16010 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
16011 asm volatile(ASM_STAC "\n" \
16012- "1: mov"itype" %2,%"rtype"1\n" \
16013+ "1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
16014 "2: " ASM_CLAC "\n" \
16015 ".section .fixup,\"ax\"\n" \
16016 "3: mov %3,%0\n" \
16017@@ -366,7 +405,7 @@ do { \
16018 " jmp 2b\n" \
16019 ".previous\n" \
16020 _ASM_EXTABLE(1b, 3b) \
16021- : "=r" (err), ltype(x) \
16022+ : "=r" (err), ltype (x) \
16023 : "m" (__m(addr)), "i" (errret), "0" (err))
16024
16025 #define __get_user_size_ex(x, ptr, size) \
16026@@ -391,7 +430,7 @@ do { \
16027 } while (0)
16028
16029 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
16030- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
16031+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
16032 "2:\n" \
16033 _ASM_EXTABLE_EX(1b, 2b) \
16034 : ltype(x) : "m" (__m(addr)))
16035@@ -408,13 +447,24 @@ do { \
16036 int __gu_err; \
16037 unsigned long __gu_val; \
16038 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
16039- (x) = (__force __typeof__(*(ptr)))__gu_val; \
16040+ (x) = (__typeof__(*(ptr)))__gu_val; \
16041 __gu_err; \
16042 })
16043
16044 /* FIXME: this hack is definitely wrong -AK */
16045 struct __large_struct { unsigned long buf[100]; };
16046-#define __m(x) (*(struct __large_struct __user *)(x))
16047+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
16048+#define ____m(x) \
16049+({ \
16050+ unsigned long ____x = (unsigned long)(x); \
16051+ if (____x < PAX_USER_SHADOW_BASE) \
16052+ ____x += PAX_USER_SHADOW_BASE; \
16053+ (void __user *)____x; \
16054+})
16055+#else
16056+#define ____m(x) (x)
16057+#endif
16058+#define __m(x) (*(struct __large_struct __user *)____m(x))
16059
16060 /*
16061 * Tell gcc we read from memory instead of writing: this is because
16062@@ -423,7 +473,7 @@ struct __large_struct { unsigned long buf[100]; };
16063 */
16064 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
16065 asm volatile(ASM_STAC "\n" \
16066- "1: mov"itype" %"rtype"1,%2\n" \
16067+ "1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
16068 "2: " ASM_CLAC "\n" \
16069 ".section .fixup,\"ax\"\n" \
16070 "3: mov %3,%0\n" \
16071@@ -431,10 +481,10 @@ struct __large_struct { unsigned long buf[100]; };
16072 ".previous\n" \
16073 _ASM_EXTABLE(1b, 3b) \
16074 : "=r"(err) \
16075- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
16076+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
16077
16078 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
16079- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
16080+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
16081 "2:\n" \
16082 _ASM_EXTABLE_EX(1b, 2b) \
16083 : : ltype(x), "m" (__m(addr)))
16084@@ -473,8 +523,12 @@ struct __large_struct { unsigned long buf[100]; };
16085 * On error, the variable @x is set to zero.
16086 */
16087
16088+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
16089+#define __get_user(x, ptr) get_user((x), (ptr))
16090+#else
16091 #define __get_user(x, ptr) \
16092 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
16093+#endif
16094
16095 /**
16096 * __put_user: - Write a simple value into user space, with less checking.
16097@@ -496,8 +550,12 @@ struct __large_struct { unsigned long buf[100]; };
16098 * Returns zero on success, or -EFAULT on error.
16099 */
16100
16101+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
16102+#define __put_user(x, ptr) put_user((x), (ptr))
16103+#else
16104 #define __put_user(x, ptr) \
16105 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
16106+#endif
16107
16108 #define __get_user_unaligned __get_user
16109 #define __put_user_unaligned __put_user
16110@@ -515,7 +573,7 @@ struct __large_struct { unsigned long buf[100]; };
16111 #define get_user_ex(x, ptr) do { \
16112 unsigned long __gue_val; \
16113 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
16114- (x) = (__force __typeof__(*(ptr)))__gue_val; \
16115+ (x) = (__typeof__(*(ptr)))__gue_val; \
16116 } while (0)
16117
16118 #define put_user_try uaccess_try
16119@@ -532,8 +590,8 @@ strncpy_from_user(char *dst, const char __user *src, long count);
16120 extern __must_check long strlen_user(const char __user *str);
16121 extern __must_check long strnlen_user(const char __user *str, long n);
16122
16123-unsigned long __must_check clear_user(void __user *mem, unsigned long len);
16124-unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
16125+unsigned long __must_check clear_user(void __user *mem, unsigned long len) __size_overflow(2);
16126+unsigned long __must_check __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
16127
16128 /*
16129 * movsl can be slow when source and dest are not both 8-byte aligned
16130diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
16131index 7f760a9..04b1c65 100644
16132--- a/arch/x86/include/asm/uaccess_32.h
16133+++ b/arch/x86/include/asm/uaccess_32.h
16134@@ -11,15 +11,15 @@
16135 #include <asm/page.h>
16136
16137 unsigned long __must_check __copy_to_user_ll
16138- (void __user *to, const void *from, unsigned long n);
16139+ (void __user *to, const void *from, unsigned long n) __size_overflow(3);
16140 unsigned long __must_check __copy_from_user_ll
16141- (void *to, const void __user *from, unsigned long n);
16142+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
16143 unsigned long __must_check __copy_from_user_ll_nozero
16144- (void *to, const void __user *from, unsigned long n);
16145+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
16146 unsigned long __must_check __copy_from_user_ll_nocache
16147- (void *to, const void __user *from, unsigned long n);
16148+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
16149 unsigned long __must_check __copy_from_user_ll_nocache_nozero
16150- (void *to, const void __user *from, unsigned long n);
16151+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
16152
16153 /**
16154 * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
16155@@ -43,6 +43,11 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
16156 static __always_inline unsigned long __must_check
16157 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
16158 {
16159+ if ((long)n < 0)
16160+ return n;
16161+
16162+ check_object_size(from, n, true);
16163+
16164 if (__builtin_constant_p(n)) {
16165 unsigned long ret;
16166
16167@@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
16168 __copy_to_user(void __user *to, const void *from, unsigned long n)
16169 {
16170 might_fault();
16171+
16172 return __copy_to_user_inatomic(to, from, n);
16173 }
16174
16175 static __always_inline unsigned long
16176 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
16177 {
16178+ if ((long)n < 0)
16179+ return n;
16180+
16181 /* Avoid zeroing the tail if the copy fails..
16182 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
16183 * but as the zeroing behaviour is only significant when n is not
16184@@ -137,6 +146,12 @@ static __always_inline unsigned long
16185 __copy_from_user(void *to, const void __user *from, unsigned long n)
16186 {
16187 might_fault();
16188+
16189+ if ((long)n < 0)
16190+ return n;
16191+
16192+ check_object_size(to, n, false);
16193+
16194 if (__builtin_constant_p(n)) {
16195 unsigned long ret;
16196
16197@@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
16198 const void __user *from, unsigned long n)
16199 {
16200 might_fault();
16201+
16202+ if ((long)n < 0)
16203+ return n;
16204+
16205 if (__builtin_constant_p(n)) {
16206 unsigned long ret;
16207
16208@@ -181,15 +200,19 @@ static __always_inline unsigned long
16209 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
16210 unsigned long n)
16211 {
16212- return __copy_from_user_ll_nocache_nozero(to, from, n);
16213+ if ((long)n < 0)
16214+ return n;
16215+
16216+ return __copy_from_user_ll_nocache_nozero(to, from, n);
16217 }
16218
16219-unsigned long __must_check copy_to_user(void __user *to,
16220- const void *from, unsigned long n);
16221-unsigned long __must_check _copy_from_user(void *to,
16222- const void __user *from,
16223- unsigned long n);
16224-
16225+extern void copy_to_user_overflow(void)
16226+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
16227+ __compiletime_error("copy_to_user() buffer size is not provably correct")
16228+#else
16229+ __compiletime_warning("copy_to_user() buffer size is not provably correct")
16230+#endif
16231+;
16232
16233 extern void copy_from_user_overflow(void)
16234 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
16235@@ -199,17 +222,60 @@ extern void copy_from_user_overflow(void)
16236 #endif
16237 ;
16238
16239-static inline unsigned long __must_check copy_from_user(void *to,
16240- const void __user *from,
16241- unsigned long n)
16242+/**
16243+ * copy_to_user: - Copy a block of data into user space.
16244+ * @to: Destination address, in user space.
16245+ * @from: Source address, in kernel space.
16246+ * @n: Number of bytes to copy.
16247+ *
16248+ * Context: User context only. This function may sleep.
16249+ *
16250+ * Copy data from kernel space to user space.
16251+ *
16252+ * Returns number of bytes that could not be copied.
16253+ * On success, this will be zero.
16254+ */
16255+static inline unsigned long __must_check
16256+copy_to_user(void __user *to, const void *from, unsigned long n)
16257 {
16258- int sz = __compiletime_object_size(to);
16259+ size_t sz = __compiletime_object_size(from);
16260
16261- if (likely(sz == -1 || sz >= n))
16262- n = _copy_from_user(to, from, n);
16263- else
16264+ if (unlikely(sz != (size_t)-1 && sz < n))
16265+ copy_to_user_overflow();
16266+ else if (access_ok(VERIFY_WRITE, to, n))
16267+ n = __copy_to_user(to, from, n);
16268+ return n;
16269+}
16270+
16271+/**
16272+ * copy_from_user: - Copy a block of data from user space.
16273+ * @to: Destination address, in kernel space.
16274+ * @from: Source address, in user space.
16275+ * @n: Number of bytes to copy.
16276+ *
16277+ * Context: User context only. This function may sleep.
16278+ *
16279+ * Copy data from user space to kernel space.
16280+ *
16281+ * Returns number of bytes that could not be copied.
16282+ * On success, this will be zero.
16283+ *
16284+ * If some data could not be copied, this function will pad the copied
16285+ * data to the requested size using zero bytes.
16286+ */
16287+static inline unsigned long __must_check
16288+copy_from_user(void *to, const void __user *from, unsigned long n)
16289+{
16290+ size_t sz = __compiletime_object_size(to);
16291+
16292+ check_object_size(to, n, false);
16293+
16294+ if (unlikely(sz != (size_t)-1 && sz < n))
16295 copy_from_user_overflow();
16296-
16297+ else if (access_ok(VERIFY_READ, from, n))
16298+ n = __copy_from_user(to, from, n);
16299+ else if ((long)n > 0)
16300+ memset(to, 0, n);
16301 return n;
16302 }
16303
16304diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
16305index 142810c..1f2a0a7 100644
16306--- a/arch/x86/include/asm/uaccess_64.h
16307+++ b/arch/x86/include/asm/uaccess_64.h
16308@@ -10,6 +10,9 @@
16309 #include <asm/alternative.h>
16310 #include <asm/cpufeature.h>
16311 #include <asm/page.h>
16312+#include <asm/pgtable.h>
16313+
16314+#define set_fs(x) (current_thread_info()->addr_limit = (x))
16315
16316 /*
16317 * Copy To/From Userspace
16318@@ -17,13 +20,13 @@
16319
16320 /* Handles exceptions in both to and from, but doesn't do access_ok */
16321 __must_check unsigned long
16322-copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
16323+copy_user_enhanced_fast_string(void *to, const void *from, unsigned len) __size_overflow(3);
16324 __must_check unsigned long
16325-copy_user_generic_string(void *to, const void *from, unsigned len);
16326+copy_user_generic_string(void *to, const void *from, unsigned len) __size_overflow(3);
16327 __must_check unsigned long
16328-copy_user_generic_unrolled(void *to, const void *from, unsigned len);
16329+copy_user_generic_unrolled(void *to, const void *from, unsigned len) __size_overflow(3);
16330
16331-static __always_inline __must_check unsigned long
16332+static __always_inline __must_check __size_overflow(3) unsigned long
16333 copy_user_generic(void *to, const void *from, unsigned len)
16334 {
16335 unsigned ret;
16336@@ -41,142 +44,204 @@ copy_user_generic(void *to, const void *from, unsigned len)
16337 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
16338 "=d" (len)),
16339 "1" (to), "2" (from), "3" (len)
16340- : "memory", "rcx", "r8", "r9", "r10", "r11");
16341+ : "memory", "rcx", "r8", "r9", "r11");
16342 return ret;
16343 }
16344
16345+static __always_inline __must_check unsigned long
16346+__copy_to_user(void __user *to, const void *from, unsigned long len);
16347+static __always_inline __must_check unsigned long
16348+__copy_from_user(void *to, const void __user *from, unsigned long len);
16349 __must_check unsigned long
16350-_copy_to_user(void __user *to, const void *from, unsigned len);
16351-__must_check unsigned long
16352-_copy_from_user(void *to, const void __user *from, unsigned len);
16353-__must_check unsigned long
16354-copy_in_user(void __user *to, const void __user *from, unsigned len);
16355+copy_in_user(void __user *to, const void __user *from, unsigned long len);
16356+
16357+extern void copy_to_user_overflow(void)
16358+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
16359+ __compiletime_error("copy_to_user() buffer size is not provably correct")
16360+#else
16361+ __compiletime_warning("copy_to_user() buffer size is not provably correct")
16362+#endif
16363+;
16364+
16365+extern void copy_from_user_overflow(void)
16366+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
16367+ __compiletime_error("copy_from_user() buffer size is not provably correct")
16368+#else
16369+ __compiletime_warning("copy_from_user() buffer size is not provably correct")
16370+#endif
16371+;
16372
16373 static inline unsigned long __must_check copy_from_user(void *to,
16374 const void __user *from,
16375 unsigned long n)
16376 {
16377- int sz = __compiletime_object_size(to);
16378-
16379 might_fault();
16380- if (likely(sz == -1 || sz >= n))
16381- n = _copy_from_user(to, from, n);
16382-#ifdef CONFIG_DEBUG_VM
16383- else
16384- WARN(1, "Buffer overflow detected!\n");
16385-#endif
16386+
16387+ check_object_size(to, n, false);
16388+
16389+ if (access_ok(VERIFY_READ, from, n))
16390+ n = __copy_from_user(to, from, n);
16391+ else if (n < INT_MAX)
16392+ memset(to, 0, n);
16393 return n;
16394 }
16395
16396 static __always_inline __must_check
16397-int copy_to_user(void __user *dst, const void *src, unsigned size)
16398+int copy_to_user(void __user *dst, const void *src, unsigned long size)
16399 {
16400 might_fault();
16401
16402- return _copy_to_user(dst, src, size);
16403+ if (access_ok(VERIFY_WRITE, dst, size))
16404+ size = __copy_to_user(dst, src, size);
16405+ return size;
16406 }
16407
16408 static __always_inline __must_check
16409-int __copy_from_user(void *dst, const void __user *src, unsigned size)
16410+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
16411 {
16412- int ret = 0;
16413+ size_t sz = __compiletime_object_size(dst);
16414+ unsigned ret = 0;
16415
16416 might_fault();
16417+
16418+ if (size > INT_MAX)
16419+ return size;
16420+
16421+ check_object_size(dst, size, false);
16422+
16423+#ifdef CONFIG_PAX_MEMORY_UDEREF
16424+ if (!__access_ok(VERIFY_READ, src, size))
16425+ return size;
16426+#endif
16427+
16428+ if (unlikely(sz != (size_t)-1 && sz < size)) {
16429+ copy_from_user_overflow();
16430+ return size;
16431+ }
16432+
16433 if (!__builtin_constant_p(size))
16434- return copy_user_generic(dst, (__force void *)src, size);
16435+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
16436 switch (size) {
16437- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
16438+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
16439 ret, "b", "b", "=q", 1);
16440 return ret;
16441- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
16442+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
16443 ret, "w", "w", "=r", 2);
16444 return ret;
16445- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
16446+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
16447 ret, "l", "k", "=r", 4);
16448 return ret;
16449- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
16450+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
16451 ret, "q", "", "=r", 8);
16452 return ret;
16453 case 10:
16454- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
16455+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
16456 ret, "q", "", "=r", 10);
16457 if (unlikely(ret))
16458 return ret;
16459 __get_user_asm(*(u16 *)(8 + (char *)dst),
16460- (u16 __user *)(8 + (char __user *)src),
16461+ (const u16 __user *)(8 + (const char __user *)src),
16462 ret, "w", "w", "=r", 2);
16463 return ret;
16464 case 16:
16465- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
16466+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
16467 ret, "q", "", "=r", 16);
16468 if (unlikely(ret))
16469 return ret;
16470 __get_user_asm(*(u64 *)(8 + (char *)dst),
16471- (u64 __user *)(8 + (char __user *)src),
16472+ (const u64 __user *)(8 + (const char __user *)src),
16473 ret, "q", "", "=r", 8);
16474 return ret;
16475 default:
16476- return copy_user_generic(dst, (__force void *)src, size);
16477+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
16478 }
16479 }
16480
16481 static __always_inline __must_check
16482-int __copy_to_user(void __user *dst, const void *src, unsigned size)
16483+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
16484 {
16485- int ret = 0;
16486+ size_t sz = __compiletime_object_size(src);
16487+ unsigned ret = 0;
16488
16489 might_fault();
16490+
16491+ if (size > INT_MAX)
16492+ return size;
16493+
16494+ check_object_size(src, size, true);
16495+
16496+#ifdef CONFIG_PAX_MEMORY_UDEREF
16497+ if (!__access_ok(VERIFY_WRITE, dst, size))
16498+ return size;
16499+#endif
16500+
16501+ if (unlikely(sz != (size_t)-1 && sz < size)) {
16502+ copy_to_user_overflow();
16503+ return size;
16504+ }
16505+
16506 if (!__builtin_constant_p(size))
16507- return copy_user_generic((__force void *)dst, src, size);
16508+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
16509 switch (size) {
16510- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
16511+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
16512 ret, "b", "b", "iq", 1);
16513 return ret;
16514- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
16515+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
16516 ret, "w", "w", "ir", 2);
16517 return ret;
16518- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
16519+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
16520 ret, "l", "k", "ir", 4);
16521 return ret;
16522- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
16523+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
16524 ret, "q", "", "er", 8);
16525 return ret;
16526 case 10:
16527- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
16528+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
16529 ret, "q", "", "er", 10);
16530 if (unlikely(ret))
16531 return ret;
16532 asm("":::"memory");
16533- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
16534+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
16535 ret, "w", "w", "ir", 2);
16536 return ret;
16537 case 16:
16538- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
16539+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
16540 ret, "q", "", "er", 16);
16541 if (unlikely(ret))
16542 return ret;
16543 asm("":::"memory");
16544- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
16545+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
16546 ret, "q", "", "er", 8);
16547 return ret;
16548 default:
16549- return copy_user_generic((__force void *)dst, src, size);
16550+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
16551 }
16552 }
16553
16554 static __always_inline __must_check
16555-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
16556+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
16557 {
16558- int ret = 0;
16559+ unsigned ret = 0;
16560
16561 might_fault();
16562+
16563+ if (size > INT_MAX)
16564+ return size;
16565+
16566+#ifdef CONFIG_PAX_MEMORY_UDEREF
16567+ if (!__access_ok(VERIFY_READ, src, size))
16568+ return size;
16569+ if (!__access_ok(VERIFY_WRITE, dst, size))
16570+ return size;
16571+#endif
16572+
16573 if (!__builtin_constant_p(size))
16574- return copy_user_generic((__force void *)dst,
16575- (__force void *)src, size);
16576+ return copy_user_generic((__force_kernel void *)____m(dst),
16577+ (__force_kernel const void *)____m(src), size);
16578 switch (size) {
16579 case 1: {
16580 u8 tmp;
16581- __get_user_asm(tmp, (u8 __user *)src,
16582+ __get_user_asm(tmp, (const u8 __user *)src,
16583 ret, "b", "b", "=q", 1);
16584 if (likely(!ret))
16585 __put_user_asm(tmp, (u8 __user *)dst,
16586@@ -185,7 +250,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
16587 }
16588 case 2: {
16589 u16 tmp;
16590- __get_user_asm(tmp, (u16 __user *)src,
16591+ __get_user_asm(tmp, (const u16 __user *)src,
16592 ret, "w", "w", "=r", 2);
16593 if (likely(!ret))
16594 __put_user_asm(tmp, (u16 __user *)dst,
16595@@ -195,7 +260,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
16596
16597 case 4: {
16598 u32 tmp;
16599- __get_user_asm(tmp, (u32 __user *)src,
16600+ __get_user_asm(tmp, (const u32 __user *)src,
16601 ret, "l", "k", "=r", 4);
16602 if (likely(!ret))
16603 __put_user_asm(tmp, (u32 __user *)dst,
16604@@ -204,7 +269,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
16605 }
16606 case 8: {
16607 u64 tmp;
16608- __get_user_asm(tmp, (u64 __user *)src,
16609+ __get_user_asm(tmp, (const u64 __user *)src,
16610 ret, "q", "", "=r", 8);
16611 if (likely(!ret))
16612 __put_user_asm(tmp, (u64 __user *)dst,
16613@@ -212,41 +277,72 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
16614 return ret;
16615 }
16616 default:
16617- return copy_user_generic((__force void *)dst,
16618- (__force void *)src, size);
16619+ return copy_user_generic((__force_kernel void *)____m(dst),
16620+ (__force_kernel const void *)____m(src), size);
16621 }
16622 }
16623
16624 static __must_check __always_inline int
16625-__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
16626+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
16627 {
16628- return copy_user_generic(dst, (__force const void *)src, size);
16629+ if (size > INT_MAX)
16630+ return size;
16631+
16632+#ifdef CONFIG_PAX_MEMORY_UDEREF
16633+ if (!__access_ok(VERIFY_READ, src, size))
16634+ return size;
16635+#endif
16636+
16637+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
16638 }
16639
16640-static __must_check __always_inline int
16641-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
16642+static __must_check __always_inline unsigned long
16643+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
16644 {
16645- return copy_user_generic((__force void *)dst, src, size);
16646+ if (size > INT_MAX)
16647+ return size;
16648+
16649+#ifdef CONFIG_PAX_MEMORY_UDEREF
16650+ if (!__access_ok(VERIFY_WRITE, dst, size))
16651+ return size;
16652+#endif
16653+
16654+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
16655 }
16656
16657-extern long __copy_user_nocache(void *dst, const void __user *src,
16658- unsigned size, int zerorest);
16659+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
16660+ unsigned long size, int zerorest) __size_overflow(3);
16661
16662-static inline int
16663-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
16664+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
16665 {
16666 might_sleep();
16667+
16668+ if (size > INT_MAX)
16669+ return size;
16670+
16671+#ifdef CONFIG_PAX_MEMORY_UDEREF
16672+ if (!__access_ok(VERIFY_READ, src, size))
16673+ return size;
16674+#endif
16675+
16676 return __copy_user_nocache(dst, src, size, 1);
16677 }
16678
16679-static inline int
16680-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
16681- unsigned size)
16682+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
16683+ unsigned long size)
16684 {
16685+ if (size > INT_MAX)
16686+ return size;
16687+
16688+#ifdef CONFIG_PAX_MEMORY_UDEREF
16689+ if (!__access_ok(VERIFY_READ, src, size))
16690+ return size;
16691+#endif
16692+
16693 return __copy_user_nocache(dst, src, size, 0);
16694 }
16695
16696-unsigned long
16697-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
16698+extern unsigned long
16699+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest) __size_overflow(3);
16700
16701 #endif /* _ASM_X86_UACCESS_64_H */
16702diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h
16703index 5b238981..77fdd78 100644
16704--- a/arch/x86/include/asm/word-at-a-time.h
16705+++ b/arch/x86/include/asm/word-at-a-time.h
16706@@ -11,7 +11,7 @@
16707 * and shift, for example.
16708 */
16709 struct word_at_a_time {
16710- const unsigned long one_bits, high_bits;
16711+ unsigned long one_bits, high_bits;
16712 };
16713
16714 #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
16715diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
16716index 5769349..a3d3e2a 100644
16717--- a/arch/x86/include/asm/x86_init.h
16718+++ b/arch/x86/include/asm/x86_init.h
16719@@ -141,7 +141,7 @@ struct x86_init_ops {
16720 struct x86_init_timers timers;
16721 struct x86_init_iommu iommu;
16722 struct x86_init_pci pci;
16723-};
16724+} __no_const;
16725
16726 /**
16727 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
16728@@ -152,7 +152,7 @@ struct x86_cpuinit_ops {
16729 void (*setup_percpu_clockev)(void);
16730 void (*early_percpu_clock_init)(void);
16731 void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
16732-};
16733+} __no_const;
16734
16735 /**
16736 * struct x86_platform_ops - platform specific runtime functions
16737@@ -178,7 +178,7 @@ struct x86_platform_ops {
16738 void (*save_sched_clock_state)(void);
16739 void (*restore_sched_clock_state)(void);
16740 void (*apic_post_init)(void);
16741-};
16742+} __no_const;
16743
16744 struct pci_dev;
16745
16746@@ -187,14 +187,14 @@ struct x86_msi_ops {
16747 void (*teardown_msi_irq)(unsigned int irq);
16748 void (*teardown_msi_irqs)(struct pci_dev *dev);
16749 void (*restore_msi_irqs)(struct pci_dev *dev, int irq);
16750-};
16751+} __no_const;
16752
16753 struct x86_io_apic_ops {
16754 void (*init) (void);
16755 unsigned int (*read) (unsigned int apic, unsigned int reg);
16756 void (*write) (unsigned int apic, unsigned int reg, unsigned int value);
16757 void (*modify)(unsigned int apic, unsigned int reg, unsigned int value);
16758-};
16759+} __no_const;
16760
16761 extern struct x86_init_ops x86_init;
16762 extern struct x86_cpuinit_ops x86_cpuinit;
16763diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
16764index 0415cda..b43d877 100644
16765--- a/arch/x86/include/asm/xsave.h
16766+++ b/arch/x86/include/asm/xsave.h
16767@@ -71,7 +71,9 @@ static inline int xsave_user(struct xsave_struct __user *buf)
16768 return -EFAULT;
16769
16770 __asm__ __volatile__(ASM_STAC "\n"
16771- "1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
16772+ "1:"
16773+ __copyuser_seg
16774+ ".byte " REX_PREFIX "0x0f,0xae,0x27\n"
16775 "2: " ASM_CLAC "\n"
16776 ".section .fixup,\"ax\"\n"
16777 "3: movl $-1,%[err]\n"
16778@@ -87,12 +89,14 @@ static inline int xsave_user(struct xsave_struct __user *buf)
16779 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
16780 {
16781 int err;
16782- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
16783+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
16784 u32 lmask = mask;
16785 u32 hmask = mask >> 32;
16786
16787 __asm__ __volatile__(ASM_STAC "\n"
16788- "1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
16789+ "1:"
16790+ __copyuser_seg
16791+ ".byte " REX_PREFIX "0x0f,0xae,0x2f\n"
16792 "2: " ASM_CLAC "\n"
16793 ".section .fixup,\"ax\"\n"
16794 "3: movl $-1,%[err]\n"
16795diff --git a/arch/x86/include/uapi/asm/e820.h b/arch/x86/include/uapi/asm/e820.h
16796index bbae024..e1528f9 100644
16797--- a/arch/x86/include/uapi/asm/e820.h
16798+++ b/arch/x86/include/uapi/asm/e820.h
16799@@ -63,7 +63,7 @@ struct e820map {
16800 #define ISA_START_ADDRESS 0xa0000
16801 #define ISA_END_ADDRESS 0x100000
16802
16803-#define BIOS_BEGIN 0x000a0000
16804+#define BIOS_BEGIN 0x000c0000
16805 #define BIOS_END 0x00100000
16806
16807 #define BIOS_ROM_BASE 0xffe00000
16808diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
16809index 34e923a..0c6bb6e 100644
16810--- a/arch/x86/kernel/Makefile
16811+++ b/arch/x86/kernel/Makefile
16812@@ -22,7 +22,7 @@ obj-y += time.o ioport.o ldt.o dumpstack.o nmi.o
16813 obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
16814 obj-$(CONFIG_IRQ_WORK) += irq_work.o
16815 obj-y += probe_roms.o
16816-obj-$(CONFIG_X86_32) += i386_ksyms_32.o
16817+obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
16818 obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
16819 obj-y += syscall_$(BITS).o
16820 obj-$(CONFIG_X86_64) += vsyscall_64.o
16821diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
16822index bacf4b0..4ede72e 100644
16823--- a/arch/x86/kernel/acpi/boot.c
16824+++ b/arch/x86/kernel/acpi/boot.c
16825@@ -1358,7 +1358,7 @@ static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d)
16826 * If your system is blacklisted here, but you find that acpi=force
16827 * works for you, please contact linux-acpi@vger.kernel.org
16828 */
16829-static struct dmi_system_id __initdata acpi_dmi_table[] = {
16830+static const struct dmi_system_id __initconst acpi_dmi_table[] = {
16831 /*
16832 * Boxes that need ACPI disabled
16833 */
16834@@ -1433,7 +1433,7 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
16835 };
16836
16837 /* second table for DMI checks that should run after early-quirks */
16838-static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
16839+static const struct dmi_system_id __initconst acpi_dmi_table_late[] = {
16840 /*
16841 * HP laptops which use a DSDT reporting as HP/SB400/10000,
16842 * which includes some code which overrides all temperature
16843diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
16844index d5e0d71..6533e08 100644
16845--- a/arch/x86/kernel/acpi/sleep.c
16846+++ b/arch/x86/kernel/acpi/sleep.c
16847@@ -74,8 +74,12 @@ int acpi_suspend_lowlevel(void)
16848 #else /* CONFIG_64BIT */
16849 #ifdef CONFIG_SMP
16850 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
16851+
16852+ pax_open_kernel();
16853 early_gdt_descr.address =
16854 (unsigned long)get_cpu_gdt_table(smp_processor_id());
16855+ pax_close_kernel();
16856+
16857 initial_gs = per_cpu_offset(smp_processor_id());
16858 #endif
16859 initial_code = (unsigned long)wakeup_long64;
16860diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
16861index 13ab720..95d5442 100644
16862--- a/arch/x86/kernel/acpi/wakeup_32.S
16863+++ b/arch/x86/kernel/acpi/wakeup_32.S
16864@@ -30,13 +30,11 @@ wakeup_pmode_return:
16865 # and restore the stack ... but you need gdt for this to work
16866 movl saved_context_esp, %esp
16867
16868- movl %cs:saved_magic, %eax
16869- cmpl $0x12345678, %eax
16870+ cmpl $0x12345678, saved_magic
16871 jne bogus_magic
16872
16873 # jump to place where we left off
16874- movl saved_eip, %eax
16875- jmp *%eax
16876+ jmp *(saved_eip)
16877
16878 bogus_magic:
16879 jmp bogus_magic
16880diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
16881index ef5ccca..bd83949 100644
16882--- a/arch/x86/kernel/alternative.c
16883+++ b/arch/x86/kernel/alternative.c
16884@@ -268,6 +268,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
16885 */
16886 for (a = start; a < end; a++) {
16887 instr = (u8 *)&a->instr_offset + a->instr_offset;
16888+
16889+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16890+ instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
16891+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
16892+ instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
16893+#endif
16894+
16895 replacement = (u8 *)&a->repl_offset + a->repl_offset;
16896 BUG_ON(a->replacementlen > a->instrlen);
16897 BUG_ON(a->instrlen > sizeof(insnbuf));
16898@@ -299,10 +306,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
16899 for (poff = start; poff < end; poff++) {
16900 u8 *ptr = (u8 *)poff + *poff;
16901
16902+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16903+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
16904+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
16905+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
16906+#endif
16907+
16908 if (!*poff || ptr < text || ptr >= text_end)
16909 continue;
16910 /* turn DS segment override prefix into lock prefix */
16911- if (*ptr == 0x3e)
16912+ if (*ktla_ktva(ptr) == 0x3e)
16913 text_poke(ptr, ((unsigned char []){0xf0}), 1);
16914 }
16915 mutex_unlock(&text_mutex);
16916@@ -317,10 +330,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
16917 for (poff = start; poff < end; poff++) {
16918 u8 *ptr = (u8 *)poff + *poff;
16919
16920+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16921+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
16922+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
16923+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
16924+#endif
16925+
16926 if (!*poff || ptr < text || ptr >= text_end)
16927 continue;
16928 /* turn lock prefix into DS segment override prefix */
16929- if (*ptr == 0xf0)
16930+ if (*ktla_ktva(ptr) == 0xf0)
16931 text_poke(ptr, ((unsigned char []){0x3E}), 1);
16932 }
16933 mutex_unlock(&text_mutex);
16934@@ -468,7 +487,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
16935
16936 BUG_ON(p->len > MAX_PATCH_LEN);
16937 /* prep the buffer with the original instructions */
16938- memcpy(insnbuf, p->instr, p->len);
16939+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
16940 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
16941 (unsigned long)p->instr, p->len);
16942
16943@@ -515,7 +534,7 @@ void __init alternative_instructions(void)
16944 if (!uniproc_patched || num_possible_cpus() == 1)
16945 free_init_pages("SMP alternatives",
16946 (unsigned long)__smp_locks,
16947- (unsigned long)__smp_locks_end);
16948+ PAGE_ALIGN((unsigned long)__smp_locks_end));
16949 #endif
16950
16951 apply_paravirt(__parainstructions, __parainstructions_end);
16952@@ -535,13 +554,17 @@ void __init alternative_instructions(void)
16953 * instructions. And on the local CPU you need to be protected again NMI or MCE
16954 * handlers seeing an inconsistent instruction while you patch.
16955 */
16956-void *__init_or_module text_poke_early(void *addr, const void *opcode,
16957+void *__kprobes text_poke_early(void *addr, const void *opcode,
16958 size_t len)
16959 {
16960 unsigned long flags;
16961 local_irq_save(flags);
16962- memcpy(addr, opcode, len);
16963+
16964+ pax_open_kernel();
16965+ memcpy(ktla_ktva(addr), opcode, len);
16966 sync_core();
16967+ pax_close_kernel();
16968+
16969 local_irq_restore(flags);
16970 /* Could also do a CLFLUSH here to speed up CPU recovery; but
16971 that causes hangs on some VIA CPUs. */
16972@@ -563,36 +586,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
16973 */
16974 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
16975 {
16976- unsigned long flags;
16977- char *vaddr;
16978+ unsigned char *vaddr = ktla_ktva(addr);
16979 struct page *pages[2];
16980- int i;
16981+ size_t i;
16982
16983 if (!core_kernel_text((unsigned long)addr)) {
16984- pages[0] = vmalloc_to_page(addr);
16985- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
16986+ pages[0] = vmalloc_to_page(vaddr);
16987+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
16988 } else {
16989- pages[0] = virt_to_page(addr);
16990+ pages[0] = virt_to_page(vaddr);
16991 WARN_ON(!PageReserved(pages[0]));
16992- pages[1] = virt_to_page(addr + PAGE_SIZE);
16993+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
16994 }
16995 BUG_ON(!pages[0]);
16996- local_irq_save(flags);
16997- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
16998- if (pages[1])
16999- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
17000- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
17001- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
17002- clear_fixmap(FIX_TEXT_POKE0);
17003- if (pages[1])
17004- clear_fixmap(FIX_TEXT_POKE1);
17005- local_flush_tlb();
17006- sync_core();
17007- /* Could also do a CLFLUSH here to speed up CPU recovery; but
17008- that causes hangs on some VIA CPUs. */
17009+ text_poke_early(addr, opcode, len);
17010 for (i = 0; i < len; i++)
17011- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
17012- local_irq_restore(flags);
17013+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
17014 return addr;
17015 }
17016
17017diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
17018index cbf5121..812b537 100644
17019--- a/arch/x86/kernel/apic/apic.c
17020+++ b/arch/x86/kernel/apic/apic.c
17021@@ -189,7 +189,7 @@ int first_system_vector = 0xfe;
17022 /*
17023 * Debug level, exported for io_apic.c
17024 */
17025-unsigned int apic_verbosity;
17026+int apic_verbosity;
17027
17028 int pic_mode;
17029
17030@@ -1956,7 +1956,7 @@ void smp_error_interrupt(struct pt_regs *regs)
17031 apic_write(APIC_ESR, 0);
17032 v1 = apic_read(APIC_ESR);
17033 ack_APIC_irq();
17034- atomic_inc(&irq_err_count);
17035+ atomic_inc_unchecked(&irq_err_count);
17036
17037 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
17038 smp_processor_id(), v0 , v1);
17039diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
17040index 00c77cf..2dc6a2d 100644
17041--- a/arch/x86/kernel/apic/apic_flat_64.c
17042+++ b/arch/x86/kernel/apic/apic_flat_64.c
17043@@ -157,7 +157,7 @@ static int flat_probe(void)
17044 return 1;
17045 }
17046
17047-static struct apic apic_flat = {
17048+static struct apic apic_flat __read_only = {
17049 .name = "flat",
17050 .probe = flat_probe,
17051 .acpi_madt_oem_check = flat_acpi_madt_oem_check,
17052@@ -271,7 +271,7 @@ static int physflat_probe(void)
17053 return 0;
17054 }
17055
17056-static struct apic apic_physflat = {
17057+static struct apic apic_physflat __read_only = {
17058
17059 .name = "physical flat",
17060 .probe = physflat_probe,
17061diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c
17062index e145f28..2752888 100644
17063--- a/arch/x86/kernel/apic/apic_noop.c
17064+++ b/arch/x86/kernel/apic/apic_noop.c
17065@@ -119,7 +119,7 @@ static void noop_apic_write(u32 reg, u32 v)
17066 WARN_ON_ONCE(cpu_has_apic && !disable_apic);
17067 }
17068
17069-struct apic apic_noop = {
17070+struct apic apic_noop __read_only = {
17071 .name = "noop",
17072 .probe = noop_probe,
17073 .acpi_madt_oem_check = NULL,
17074diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
17075index d50e364..543bee3 100644
17076--- a/arch/x86/kernel/apic/bigsmp_32.c
17077+++ b/arch/x86/kernel/apic/bigsmp_32.c
17078@@ -152,7 +152,7 @@ static int probe_bigsmp(void)
17079 return dmi_bigsmp;
17080 }
17081
17082-static struct apic apic_bigsmp = {
17083+static struct apic apic_bigsmp __read_only = {
17084
17085 .name = "bigsmp",
17086 .probe = probe_bigsmp,
17087diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c
17088index 0874799..a7a7892 100644
17089--- a/arch/x86/kernel/apic/es7000_32.c
17090+++ b/arch/x86/kernel/apic/es7000_32.c
17091@@ -608,8 +608,7 @@ static int es7000_mps_oem_check_cluster(struct mpc_table *mpc, char *oem,
17092 return ret && es7000_apic_is_cluster();
17093 }
17094
17095-/* We've been warned by a false positive warning.Use __refdata to keep calm. */
17096-static struct apic __refdata apic_es7000_cluster = {
17097+static struct apic apic_es7000_cluster __read_only = {
17098
17099 .name = "es7000",
17100 .probe = probe_es7000,
17101@@ -675,7 +674,7 @@ static struct apic __refdata apic_es7000_cluster = {
17102 .x86_32_early_logical_apicid = es7000_early_logical_apicid,
17103 };
17104
17105-static struct apic __refdata apic_es7000 = {
17106+static struct apic apic_es7000 __read_only = {
17107
17108 .name = "es7000",
17109 .probe = probe_es7000,
17110diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
17111index b739d39..aebc14c 100644
17112--- a/arch/x86/kernel/apic/io_apic.c
17113+++ b/arch/x86/kernel/apic/io_apic.c
17114@@ -1084,7 +1084,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
17115 }
17116 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
17117
17118-void lock_vector_lock(void)
17119+void lock_vector_lock(void) __acquires(vector_lock)
17120 {
17121 /* Used to the online set of cpus does not change
17122 * during assign_irq_vector.
17123@@ -1092,7 +1092,7 @@ void lock_vector_lock(void)
17124 raw_spin_lock(&vector_lock);
17125 }
17126
17127-void unlock_vector_lock(void)
17128+void unlock_vector_lock(void) __releases(vector_lock)
17129 {
17130 raw_spin_unlock(&vector_lock);
17131 }
17132@@ -2399,7 +2399,7 @@ static void ack_apic_edge(struct irq_data *data)
17133 ack_APIC_irq();
17134 }
17135
17136-atomic_t irq_mis_count;
17137+atomic_unchecked_t irq_mis_count;
17138
17139 #ifdef CONFIG_GENERIC_PENDING_IRQ
17140 static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
17141@@ -2540,7 +2540,7 @@ static void ack_apic_level(struct irq_data *data)
17142 * at the cpu.
17143 */
17144 if (!(v & (1 << (i & 0x1f)))) {
17145- atomic_inc(&irq_mis_count);
17146+ atomic_inc_unchecked(&irq_mis_count);
17147
17148 eoi_ioapic_irq(irq, cfg);
17149 }
17150@@ -2567,11 +2567,13 @@ static void ir_print_prefix(struct irq_data *data, struct seq_file *p)
17151
17152 static void irq_remap_modify_chip_defaults(struct irq_chip *chip)
17153 {
17154- chip->irq_print_chip = ir_print_prefix;
17155- chip->irq_ack = ir_ack_apic_edge;
17156- chip->irq_eoi = ir_ack_apic_level;
17157+ pax_open_kernel();
17158+ *(void **)&chip->irq_print_chip = ir_print_prefix;
17159+ *(void **)&chip->irq_ack = ir_ack_apic_edge;
17160+ *(void **)&chip->irq_eoi = ir_ack_apic_level;
17161
17162- chip->irq_set_affinity = set_remapped_irq_affinity;
17163+ *(void **)&chip->irq_set_affinity = set_remapped_irq_affinity;
17164+ pax_close_kernel();
17165 }
17166 #endif /* CONFIG_IRQ_REMAP */
17167
17168diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c
17169index d661ee9..791fd33 100644
17170--- a/arch/x86/kernel/apic/numaq_32.c
17171+++ b/arch/x86/kernel/apic/numaq_32.c
17172@@ -455,8 +455,7 @@ static void numaq_setup_portio_remap(void)
17173 (u_long) xquad_portio, (u_long) num_quads*XQUAD_PORTIO_QUAD);
17174 }
17175
17176-/* Use __refdata to keep false positive warning calm. */
17177-static struct apic __refdata apic_numaq = {
17178+static struct apic apic_numaq __read_only = {
17179
17180 .name = "NUMAQ",
17181 .probe = probe_numaq,
17182diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
17183index eb35ef9..f184a21 100644
17184--- a/arch/x86/kernel/apic/probe_32.c
17185+++ b/arch/x86/kernel/apic/probe_32.c
17186@@ -72,7 +72,7 @@ static int probe_default(void)
17187 return 1;
17188 }
17189
17190-static struct apic apic_default = {
17191+static struct apic apic_default __read_only = {
17192
17193 .name = "default",
17194 .probe = probe_default,
17195diff --git a/arch/x86/kernel/apic/summit_32.c b/arch/x86/kernel/apic/summit_32.c
17196index 77c95c0..434f8a4 100644
17197--- a/arch/x86/kernel/apic/summit_32.c
17198+++ b/arch/x86/kernel/apic/summit_32.c
17199@@ -486,7 +486,7 @@ void setup_summit(void)
17200 }
17201 #endif
17202
17203-static struct apic apic_summit = {
17204+static struct apic apic_summit __read_only = {
17205
17206 .name = "summit",
17207 .probe = probe_summit,
17208diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
17209index c88baa4..757aee1 100644
17210--- a/arch/x86/kernel/apic/x2apic_cluster.c
17211+++ b/arch/x86/kernel/apic/x2apic_cluster.c
17212@@ -183,7 +183,7 @@ update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu)
17213 return notifier_from_errno(err);
17214 }
17215
17216-static struct notifier_block __refdata x2apic_cpu_notifier = {
17217+static struct notifier_block x2apic_cpu_notifier = {
17218 .notifier_call = update_clusterinfo,
17219 };
17220
17221@@ -235,7 +235,7 @@ static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask,
17222 cpumask_and(retmask, mask, per_cpu(cpus_in_cluster, cpu));
17223 }
17224
17225-static struct apic apic_x2apic_cluster = {
17226+static struct apic apic_x2apic_cluster __read_only = {
17227
17228 .name = "cluster x2apic",
17229 .probe = x2apic_cluster_probe,
17230diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
17231index 562a76d..a003c0f 100644
17232--- a/arch/x86/kernel/apic/x2apic_phys.c
17233+++ b/arch/x86/kernel/apic/x2apic_phys.c
17234@@ -89,7 +89,7 @@ static int x2apic_phys_probe(void)
17235 return apic == &apic_x2apic_phys;
17236 }
17237
17238-static struct apic apic_x2apic_phys = {
17239+static struct apic apic_x2apic_phys __read_only = {
17240
17241 .name = "physical x2apic",
17242 .probe = x2apic_phys_probe,
17243diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
17244index 8cfade9..b9d04fc 100644
17245--- a/arch/x86/kernel/apic/x2apic_uv_x.c
17246+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
17247@@ -333,7 +333,7 @@ static int uv_probe(void)
17248 return apic == &apic_x2apic_uv_x;
17249 }
17250
17251-static struct apic __refdata apic_x2apic_uv_x = {
17252+static struct apic apic_x2apic_uv_x __read_only = {
17253
17254 .name = "UV large system",
17255 .probe = uv_probe,
17256diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
17257index d65464e..1035d31 100644
17258--- a/arch/x86/kernel/apm_32.c
17259+++ b/arch/x86/kernel/apm_32.c
17260@@ -412,7 +412,7 @@ static DEFINE_MUTEX(apm_mutex);
17261 * This is for buggy BIOS's that refer to (real mode) segment 0x40
17262 * even though they are called in protected mode.
17263 */
17264-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
17265+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
17266 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
17267
17268 static const char driver_version[] = "1.16ac"; /* no spaces */
17269@@ -590,7 +590,10 @@ static long __apm_bios_call(void *_call)
17270 BUG_ON(cpu != 0);
17271 gdt = get_cpu_gdt_table(cpu);
17272 save_desc_40 = gdt[0x40 / 8];
17273+
17274+ pax_open_kernel();
17275 gdt[0x40 / 8] = bad_bios_desc;
17276+ pax_close_kernel();
17277
17278 apm_irq_save(flags);
17279 APM_DO_SAVE_SEGS;
17280@@ -599,7 +602,11 @@ static long __apm_bios_call(void *_call)
17281 &call->esi);
17282 APM_DO_RESTORE_SEGS;
17283 apm_irq_restore(flags);
17284+
17285+ pax_open_kernel();
17286 gdt[0x40 / 8] = save_desc_40;
17287+ pax_close_kernel();
17288+
17289 put_cpu();
17290
17291 return call->eax & 0xff;
17292@@ -666,7 +673,10 @@ static long __apm_bios_call_simple(void *_call)
17293 BUG_ON(cpu != 0);
17294 gdt = get_cpu_gdt_table(cpu);
17295 save_desc_40 = gdt[0x40 / 8];
17296+
17297+ pax_open_kernel();
17298 gdt[0x40 / 8] = bad_bios_desc;
17299+ pax_close_kernel();
17300
17301 apm_irq_save(flags);
17302 APM_DO_SAVE_SEGS;
17303@@ -674,7 +684,11 @@ static long __apm_bios_call_simple(void *_call)
17304 &call->eax);
17305 APM_DO_RESTORE_SEGS;
17306 apm_irq_restore(flags);
17307+
17308+ pax_open_kernel();
17309 gdt[0x40 / 8] = save_desc_40;
17310+ pax_close_kernel();
17311+
17312 put_cpu();
17313 return error;
17314 }
17315@@ -2345,12 +2359,15 @@ static int __init apm_init(void)
17316 * code to that CPU.
17317 */
17318 gdt = get_cpu_gdt_table(0);
17319+
17320+ pax_open_kernel();
17321 set_desc_base(&gdt[APM_CS >> 3],
17322 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
17323 set_desc_base(&gdt[APM_CS_16 >> 3],
17324 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
17325 set_desc_base(&gdt[APM_DS >> 3],
17326 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
17327+ pax_close_kernel();
17328
17329 proc_create("apm", 0, NULL, &apm_file_ops);
17330
17331diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
17332index 2861082..6d4718e 100644
17333--- a/arch/x86/kernel/asm-offsets.c
17334+++ b/arch/x86/kernel/asm-offsets.c
17335@@ -33,6 +33,8 @@ void common(void) {
17336 OFFSET(TI_status, thread_info, status);
17337 OFFSET(TI_addr_limit, thread_info, addr_limit);
17338 OFFSET(TI_preempt_count, thread_info, preempt_count);
17339+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
17340+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
17341
17342 BLANK();
17343 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
17344@@ -53,8 +55,26 @@ void common(void) {
17345 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
17346 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
17347 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
17348+
17349+#ifdef CONFIG_PAX_KERNEXEC
17350+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
17351 #endif
17352
17353+#ifdef CONFIG_PAX_MEMORY_UDEREF
17354+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
17355+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
17356+#ifdef CONFIG_X86_64
17357+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
17358+#endif
17359+#endif
17360+
17361+#endif
17362+
17363+ BLANK();
17364+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
17365+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
17366+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
17367+
17368 #ifdef CONFIG_XEN
17369 BLANK();
17370 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
17371diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
17372index 1b4754f..fbb4227 100644
17373--- a/arch/x86/kernel/asm-offsets_64.c
17374+++ b/arch/x86/kernel/asm-offsets_64.c
17375@@ -76,6 +76,7 @@ int main(void)
17376 BLANK();
17377 #undef ENTRY
17378
17379+ DEFINE(TSS_size, sizeof(struct tss_struct));
17380 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
17381 BLANK();
17382
17383diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
17384index a0e067d..9c7db16 100644
17385--- a/arch/x86/kernel/cpu/Makefile
17386+++ b/arch/x86/kernel/cpu/Makefile
17387@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
17388 CFLAGS_REMOVE_perf_event.o = -pg
17389 endif
17390
17391-# Make sure load_percpu_segment has no stackprotector
17392-nostackp := $(call cc-option, -fno-stack-protector)
17393-CFLAGS_common.o := $(nostackp)
17394-
17395 obj-y := intel_cacheinfo.o scattered.o topology.o
17396 obj-y += proc.o capflags.o powerflags.o common.o
17397 obj-y += vmware.o hypervisor.o mshyperv.o
17398diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
17399index 15239ff..e23e04e 100644
17400--- a/arch/x86/kernel/cpu/amd.c
17401+++ b/arch/x86/kernel/cpu/amd.c
17402@@ -733,7 +733,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
17403 unsigned int size)
17404 {
17405 /* AMD errata T13 (order #21922) */
17406- if ((c->x86 == 6)) {
17407+ if (c->x86 == 6) {
17408 /* Duron Rev A0 */
17409 if (c->x86_model == 3 && c->x86_mask == 0)
17410 size = 64;
17411diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
17412index 9c3ab43..51e6366 100644
17413--- a/arch/x86/kernel/cpu/common.c
17414+++ b/arch/x86/kernel/cpu/common.c
17415@@ -86,60 +86,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
17416
17417 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
17418
17419-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
17420-#ifdef CONFIG_X86_64
17421- /*
17422- * We need valid kernel segments for data and code in long mode too
17423- * IRET will check the segment types kkeil 2000/10/28
17424- * Also sysret mandates a special GDT layout
17425- *
17426- * TLS descriptors are currently at a different place compared to i386.
17427- * Hopefully nobody expects them at a fixed place (Wine?)
17428- */
17429- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
17430- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
17431- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
17432- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
17433- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
17434- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
17435-#else
17436- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
17437- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
17438- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
17439- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
17440- /*
17441- * Segments used for calling PnP BIOS have byte granularity.
17442- * They code segments and data segments have fixed 64k limits,
17443- * the transfer segment sizes are set at run time.
17444- */
17445- /* 32-bit code */
17446- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
17447- /* 16-bit code */
17448- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
17449- /* 16-bit data */
17450- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
17451- /* 16-bit data */
17452- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
17453- /* 16-bit data */
17454- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
17455- /*
17456- * The APM segments have byte granularity and their bases
17457- * are set at run time. All have 64k limits.
17458- */
17459- /* 32-bit code */
17460- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
17461- /* 16-bit code */
17462- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
17463- /* data */
17464- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
17465-
17466- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
17467- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
17468- GDT_STACK_CANARY_INIT
17469-#endif
17470-} };
17471-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
17472-
17473 static int __init x86_xsave_setup(char *s)
17474 {
17475 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
17476@@ -389,7 +335,7 @@ void switch_to_new_gdt(int cpu)
17477 {
17478 struct desc_ptr gdt_descr;
17479
17480- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
17481+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
17482 gdt_descr.size = GDT_SIZE - 1;
17483 load_gdt(&gdt_descr);
17484 /* Reload the per-cpu base */
17485@@ -885,6 +831,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
17486 /* Filter out anything that depends on CPUID levels we don't have */
17487 filter_cpuid_features(c, true);
17488
17489+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
17490+ setup_clear_cpu_cap(X86_FEATURE_SEP);
17491+#endif
17492+
17493 /* If the model name is still unset, do table lookup. */
17494 if (!c->x86_model_id[0]) {
17495 const char *p;
17496@@ -1068,10 +1018,12 @@ static __init int setup_disablecpuid(char *arg)
17497 }
17498 __setup("clearcpuid=", setup_disablecpuid);
17499
17500+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
17501+EXPORT_PER_CPU_SYMBOL(current_tinfo);
17502+
17503 #ifdef CONFIG_X86_64
17504 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
17505-struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1,
17506- (unsigned long) nmi_idt_table };
17507+struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) nmi_idt_table };
17508
17509 DEFINE_PER_CPU_FIRST(union irq_stack_union,
17510 irq_stack_union) __aligned(PAGE_SIZE);
17511@@ -1085,7 +1037,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
17512 EXPORT_PER_CPU_SYMBOL(current_task);
17513
17514 DEFINE_PER_CPU(unsigned long, kernel_stack) =
17515- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
17516+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
17517 EXPORT_PER_CPU_SYMBOL(kernel_stack);
17518
17519 DEFINE_PER_CPU(char *, irq_stack_ptr) =
17520@@ -1224,7 +1176,7 @@ void __cpuinit cpu_init(void)
17521 int i;
17522
17523 cpu = stack_smp_processor_id();
17524- t = &per_cpu(init_tss, cpu);
17525+ t = init_tss + cpu;
17526 oist = &per_cpu(orig_ist, cpu);
17527
17528 #ifdef CONFIG_NUMA
17529@@ -1250,7 +1202,7 @@ void __cpuinit cpu_init(void)
17530 switch_to_new_gdt(cpu);
17531 loadsegment(fs, 0);
17532
17533- load_idt((const struct desc_ptr *)&idt_descr);
17534+ load_idt(&idt_descr);
17535
17536 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
17537 syscall_init();
17538@@ -1259,7 +1211,6 @@ void __cpuinit cpu_init(void)
17539 wrmsrl(MSR_KERNEL_GS_BASE, 0);
17540 barrier();
17541
17542- x86_configure_nx();
17543 enable_x2apic();
17544
17545 /*
17546@@ -1311,7 +1262,7 @@ void __cpuinit cpu_init(void)
17547 {
17548 int cpu = smp_processor_id();
17549 struct task_struct *curr = current;
17550- struct tss_struct *t = &per_cpu(init_tss, cpu);
17551+ struct tss_struct *t = init_tss + cpu;
17552 struct thread_struct *thread = &curr->thread;
17553
17554 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
17555diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
17556index fcaabd0..7b55a26 100644
17557--- a/arch/x86/kernel/cpu/intel.c
17558+++ b/arch/x86/kernel/cpu/intel.c
17559@@ -174,7 +174,7 @@ static void __cpuinit trap_init_f00f_bug(void)
17560 * Update the IDT descriptor and reload the IDT so that
17561 * it uses the read-only mapped virtual address.
17562 */
17563- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
17564+ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
17565 load_idt(&idt_descr);
17566 }
17567 #endif
17568diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
17569index 84c1309..39b7224 100644
17570--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
17571+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
17572@@ -1017,6 +1017,22 @@ static struct attribute *default_attrs[] = {
17573 };
17574
17575 #ifdef CONFIG_AMD_NB
17576+static struct attribute *default_attrs_amd_nb[] = {
17577+ &type.attr,
17578+ &level.attr,
17579+ &coherency_line_size.attr,
17580+ &physical_line_partition.attr,
17581+ &ways_of_associativity.attr,
17582+ &number_of_sets.attr,
17583+ &size.attr,
17584+ &shared_cpu_map.attr,
17585+ &shared_cpu_list.attr,
17586+ NULL,
17587+ NULL,
17588+ NULL,
17589+ NULL
17590+};
17591+
17592 static struct attribute ** __cpuinit amd_l3_attrs(void)
17593 {
17594 static struct attribute **attrs;
17595@@ -1027,18 +1043,7 @@ static struct attribute ** __cpuinit amd_l3_attrs(void)
17596
17597 n = ARRAY_SIZE(default_attrs);
17598
17599- if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
17600- n += 2;
17601-
17602- if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
17603- n += 1;
17604-
17605- attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
17606- if (attrs == NULL)
17607- return attrs = default_attrs;
17608-
17609- for (n = 0; default_attrs[n]; n++)
17610- attrs[n] = default_attrs[n];
17611+ attrs = default_attrs_amd_nb;
17612
17613 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
17614 attrs[n++] = &cache_disable_0.attr;
17615@@ -1089,6 +1094,13 @@ static struct kobj_type ktype_cache = {
17616 .default_attrs = default_attrs,
17617 };
17618
17619+#ifdef CONFIG_AMD_NB
17620+static struct kobj_type ktype_cache_amd_nb = {
17621+ .sysfs_ops = &sysfs_ops,
17622+ .default_attrs = default_attrs_amd_nb,
17623+};
17624+#endif
17625+
17626 static struct kobj_type ktype_percpu_entry = {
17627 .sysfs_ops = &sysfs_ops,
17628 };
17629@@ -1154,20 +1166,26 @@ static int __cpuinit cache_add_dev(struct device *dev)
17630 return retval;
17631 }
17632
17633+#ifdef CONFIG_AMD_NB
17634+ amd_l3_attrs();
17635+#endif
17636+
17637 for (i = 0; i < num_cache_leaves; i++) {
17638+ struct kobj_type *ktype;
17639+
17640 this_object = INDEX_KOBJECT_PTR(cpu, i);
17641 this_object->cpu = cpu;
17642 this_object->index = i;
17643
17644 this_leaf = CPUID4_INFO_IDX(cpu, i);
17645
17646- ktype_cache.default_attrs = default_attrs;
17647+ ktype = &ktype_cache;
17648 #ifdef CONFIG_AMD_NB
17649 if (this_leaf->base.nb)
17650- ktype_cache.default_attrs = amd_l3_attrs();
17651+ ktype = &ktype_cache_amd_nb;
17652 #endif
17653 retval = kobject_init_and_add(&(this_object->kobj),
17654- &ktype_cache,
17655+ ktype,
17656 per_cpu(ici_cache_kobject, cpu),
17657 "index%1lu", i);
17658 if (unlikely(retval)) {
17659@@ -1222,7 +1240,7 @@ static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
17660 return NOTIFY_OK;
17661 }
17662
17663-static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = {
17664+static struct notifier_block cacheinfo_cpu_notifier = {
17665 .notifier_call = cacheinfo_cpu_callback,
17666 };
17667
17668diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
17669index 80dbda8..be16652 100644
17670--- a/arch/x86/kernel/cpu/mcheck/mce.c
17671+++ b/arch/x86/kernel/cpu/mcheck/mce.c
17672@@ -45,6 +45,7 @@
17673 #include <asm/processor.h>
17674 #include <asm/mce.h>
17675 #include <asm/msr.h>
17676+#include <asm/local.h>
17677
17678 #include "mce-internal.h"
17679
17680@@ -246,7 +247,7 @@ static void print_mce(struct mce *m)
17681 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
17682 m->cs, m->ip);
17683
17684- if (m->cs == __KERNEL_CS)
17685+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
17686 print_symbol("{%s}", m->ip);
17687 pr_cont("\n");
17688 }
17689@@ -279,10 +280,10 @@ static void print_mce(struct mce *m)
17690
17691 #define PANIC_TIMEOUT 5 /* 5 seconds */
17692
17693-static atomic_t mce_paniced;
17694+static atomic_unchecked_t mce_paniced;
17695
17696 static int fake_panic;
17697-static atomic_t mce_fake_paniced;
17698+static atomic_unchecked_t mce_fake_paniced;
17699
17700 /* Panic in progress. Enable interrupts and wait for final IPI */
17701 static void wait_for_panic(void)
17702@@ -306,7 +307,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
17703 /*
17704 * Make sure only one CPU runs in machine check panic
17705 */
17706- if (atomic_inc_return(&mce_paniced) > 1)
17707+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
17708 wait_for_panic();
17709 barrier();
17710
17711@@ -314,7 +315,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
17712 console_verbose();
17713 } else {
17714 /* Don't log too much for fake panic */
17715- if (atomic_inc_return(&mce_fake_paniced) > 1)
17716+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
17717 return;
17718 }
17719 /* First print corrected ones that are still unlogged */
17720@@ -686,7 +687,7 @@ static int mce_timed_out(u64 *t)
17721 * might have been modified by someone else.
17722 */
17723 rmb();
17724- if (atomic_read(&mce_paniced))
17725+ if (atomic_read_unchecked(&mce_paniced))
17726 wait_for_panic();
17727 if (!mca_cfg.monarch_timeout)
17728 goto out;
17729@@ -1662,7 +1663,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
17730 }
17731
17732 /* Call the installed machine check handler for this CPU setup. */
17733-void (*machine_check_vector)(struct pt_regs *, long error_code) =
17734+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
17735 unexpected_machine_check;
17736
17737 /*
17738@@ -1685,7 +1686,9 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
17739 return;
17740 }
17741
17742+ pax_open_kernel();
17743 machine_check_vector = do_machine_check;
17744+ pax_close_kernel();
17745
17746 __mcheck_cpu_init_generic();
17747 __mcheck_cpu_init_vendor(c);
17748@@ -1699,7 +1702,7 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
17749 */
17750
17751 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
17752-static int mce_chrdev_open_count; /* #times opened */
17753+static local_t mce_chrdev_open_count; /* #times opened */
17754 static int mce_chrdev_open_exclu; /* already open exclusive? */
17755
17756 static int mce_chrdev_open(struct inode *inode, struct file *file)
17757@@ -1707,7 +1710,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
17758 spin_lock(&mce_chrdev_state_lock);
17759
17760 if (mce_chrdev_open_exclu ||
17761- (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
17762+ (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
17763 spin_unlock(&mce_chrdev_state_lock);
17764
17765 return -EBUSY;
17766@@ -1715,7 +1718,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
17767
17768 if (file->f_flags & O_EXCL)
17769 mce_chrdev_open_exclu = 1;
17770- mce_chrdev_open_count++;
17771+ local_inc(&mce_chrdev_open_count);
17772
17773 spin_unlock(&mce_chrdev_state_lock);
17774
17775@@ -1726,7 +1729,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
17776 {
17777 spin_lock(&mce_chrdev_state_lock);
17778
17779- mce_chrdev_open_count--;
17780+ local_dec(&mce_chrdev_open_count);
17781 mce_chrdev_open_exclu = 0;
17782
17783 spin_unlock(&mce_chrdev_state_lock);
17784@@ -2372,7 +2375,7 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
17785 return NOTIFY_OK;
17786 }
17787
17788-static struct notifier_block mce_cpu_notifier __cpuinitdata = {
17789+static struct notifier_block mce_cpu_notifier = {
17790 .notifier_call = mce_cpu_callback,
17791 };
17792
17793@@ -2382,7 +2385,7 @@ static __init void mce_init_banks(void)
17794
17795 for (i = 0; i < mca_cfg.banks; i++) {
17796 struct mce_bank *b = &mce_banks[i];
17797- struct device_attribute *a = &b->attr;
17798+ device_attribute_no_const *a = &b->attr;
17799
17800 sysfs_attr_init(&a->attr);
17801 a->attr.name = b->attrname;
17802@@ -2450,7 +2453,7 @@ struct dentry *mce_get_debugfs_dir(void)
17803 static void mce_reset(void)
17804 {
17805 cpu_missing = 0;
17806- atomic_set(&mce_fake_paniced, 0);
17807+ atomic_set_unchecked(&mce_fake_paniced, 0);
17808 atomic_set(&mce_executing, 0);
17809 atomic_set(&mce_callin, 0);
17810 atomic_set(&global_nwo, 0);
17811diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
17812index 2d5454c..51987eb 100644
17813--- a/arch/x86/kernel/cpu/mcheck/p5.c
17814+++ b/arch/x86/kernel/cpu/mcheck/p5.c
17815@@ -11,6 +11,7 @@
17816 #include <asm/processor.h>
17817 #include <asm/mce.h>
17818 #include <asm/msr.h>
17819+#include <asm/pgtable.h>
17820
17821 /* By default disabled */
17822 int mce_p5_enabled __read_mostly;
17823@@ -49,7 +50,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
17824 if (!cpu_has(c, X86_FEATURE_MCE))
17825 return;
17826
17827+ pax_open_kernel();
17828 machine_check_vector = pentium_machine_check;
17829+ pax_close_kernel();
17830 /* Make sure the vector pointer is visible before we enable MCEs: */
17831 wmb();
17832
17833diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
17834index 47a1870..8c019a7 100644
17835--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
17836+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
17837@@ -288,7 +288,7 @@ thermal_throttle_cpu_callback(struct notifier_block *nfb,
17838 return notifier_from_errno(err);
17839 }
17840
17841-static struct notifier_block thermal_throttle_cpu_notifier __cpuinitdata =
17842+static struct notifier_block thermal_throttle_cpu_notifier =
17843 {
17844 .notifier_call = thermal_throttle_cpu_callback,
17845 };
17846diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
17847index 2d7998f..17c9de1 100644
17848--- a/arch/x86/kernel/cpu/mcheck/winchip.c
17849+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
17850@@ -10,6 +10,7 @@
17851 #include <asm/processor.h>
17852 #include <asm/mce.h>
17853 #include <asm/msr.h>
17854+#include <asm/pgtable.h>
17855
17856 /* Machine check handler for WinChip C6: */
17857 static void winchip_machine_check(struct pt_regs *regs, long error_code)
17858@@ -23,7 +24,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
17859 {
17860 u32 lo, hi;
17861
17862+ pax_open_kernel();
17863 machine_check_vector = winchip_machine_check;
17864+ pax_close_kernel();
17865 /* Make sure the vector pointer is visible before we enable MCEs: */
17866 wmb();
17867
17868diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
17869index 726bf96..81f0526 100644
17870--- a/arch/x86/kernel/cpu/mtrr/main.c
17871+++ b/arch/x86/kernel/cpu/mtrr/main.c
17872@@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
17873 u64 size_or_mask, size_and_mask;
17874 static bool mtrr_aps_delayed_init;
17875
17876-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
17877+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
17878
17879 const struct mtrr_ops *mtrr_if;
17880
17881diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
17882index df5e41f..816c719 100644
17883--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
17884+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
17885@@ -25,7 +25,7 @@ struct mtrr_ops {
17886 int (*validate_add_page)(unsigned long base, unsigned long size,
17887 unsigned int type);
17888 int (*have_wrcomb)(void);
17889-};
17890+} __do_const;
17891
17892 extern int generic_get_free_region(unsigned long base, unsigned long size,
17893 int replace_reg);
17894diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
17895index 6774c17..72c1b22 100644
17896--- a/arch/x86/kernel/cpu/perf_event.c
17897+++ b/arch/x86/kernel/cpu/perf_event.c
17898@@ -1305,7 +1305,7 @@ static void __init pmu_check_apic(void)
17899 pr_info("no hardware sampling interrupt available.\n");
17900 }
17901
17902-static struct attribute_group x86_pmu_format_group = {
17903+static attribute_group_no_const x86_pmu_format_group = {
17904 .name = "format",
17905 .attrs = NULL,
17906 };
17907@@ -1313,7 +1313,7 @@ static struct attribute_group x86_pmu_format_group = {
17908 struct perf_pmu_events_attr {
17909 struct device_attribute attr;
17910 u64 id;
17911-};
17912+} __do_const;
17913
17914 /*
17915 * Remove all undefined events (x86_pmu.event_map(id) == 0)
17916@@ -1381,7 +1381,7 @@ static struct attribute *events_attr[] = {
17917 NULL,
17918 };
17919
17920-static struct attribute_group x86_pmu_events_group = {
17921+static attribute_group_no_const x86_pmu_events_group = {
17922 .name = "events",
17923 .attrs = events_attr,
17924 };
17925@@ -1880,7 +1880,7 @@ static unsigned long get_segment_base(unsigned int segment)
17926 if (idx > GDT_ENTRIES)
17927 return 0;
17928
17929- desc = __this_cpu_ptr(&gdt_page.gdt[0]);
17930+ desc = get_cpu_gdt_table(smp_processor_id());
17931 }
17932
17933 return get_desc_base(desc + idx);
17934@@ -1970,7 +1970,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
17935 break;
17936
17937 perf_callchain_store(entry, frame.return_address);
17938- fp = frame.next_frame;
17939+ fp = (const void __force_user *)frame.next_frame;
17940 }
17941 }
17942
17943diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
17944index 4914e94..60b06e3 100644
17945--- a/arch/x86/kernel/cpu/perf_event_intel.c
17946+++ b/arch/x86/kernel/cpu/perf_event_intel.c
17947@@ -1958,10 +1958,10 @@ __init int intel_pmu_init(void)
17948 * v2 and above have a perf capabilities MSR
17949 */
17950 if (version > 1) {
17951- u64 capabilities;
17952+ u64 capabilities = x86_pmu.intel_cap.capabilities;
17953
17954- rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
17955- x86_pmu.intel_cap.capabilities = capabilities;
17956+ if (rdmsrl_safe(MSR_IA32_PERF_CAPABILITIES, &x86_pmu.intel_cap.capabilities))
17957+ x86_pmu.intel_cap.capabilities = capabilities;
17958 }
17959
17960 intel_ds_init();
17961diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
17962index b43200d..d235b3e 100644
17963--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
17964+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
17965@@ -2428,7 +2428,7 @@ static void __init uncore_types_exit(struct intel_uncore_type **types)
17966 static int __init uncore_type_init(struct intel_uncore_type *type)
17967 {
17968 struct intel_uncore_pmu *pmus;
17969- struct attribute_group *events_group;
17970+ attribute_group_no_const *attr_group;
17971 struct attribute **attrs;
17972 int i, j;
17973
17974@@ -2455,19 +2455,19 @@ static int __init uncore_type_init(struct intel_uncore_type *type)
17975 while (type->event_descs[i].attr.attr.name)
17976 i++;
17977
17978- events_group = kzalloc(sizeof(struct attribute *) * (i + 1) +
17979- sizeof(*events_group), GFP_KERNEL);
17980- if (!events_group)
17981+ attr_group = kzalloc(sizeof(struct attribute *) * (i + 1) +
17982+ sizeof(*attr_group), GFP_KERNEL);
17983+ if (!attr_group)
17984 goto fail;
17985
17986- attrs = (struct attribute **)(events_group + 1);
17987- events_group->name = "events";
17988- events_group->attrs = attrs;
17989+ attrs = (struct attribute **)(attr_group + 1);
17990+ attr_group->name = "events";
17991+ attr_group->attrs = attrs;
17992
17993 for (j = 0; j < i; j++)
17994 attrs[j] = &type->event_descs[j].attr.attr;
17995
17996- type->events_group = events_group;
17997+ type->events_group = attr_group;
17998 }
17999
18000 type->pmu_group = &uncore_pmu_attr_group;
18001@@ -2826,7 +2826,7 @@ static int
18002 return NOTIFY_OK;
18003 }
18004
18005-static struct notifier_block uncore_cpu_nb __cpuinitdata = {
18006+static struct notifier_block uncore_cpu_nb = {
18007 .notifier_call = uncore_cpu_notifier,
18008 /*
18009 * to migrate uncore events, our notifier should be executed
18010diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
18011index e68a455..975a932 100644
18012--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
18013+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
18014@@ -428,7 +428,7 @@ struct intel_uncore_box {
18015 struct uncore_event_desc {
18016 struct kobj_attribute attr;
18017 const char *config;
18018-};
18019+} __do_const;
18020
18021 #define INTEL_UNCORE_EVENT_DESC(_name, _config) \
18022 { \
18023diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
18024index 60c7891..9e911d3 100644
18025--- a/arch/x86/kernel/cpuid.c
18026+++ b/arch/x86/kernel/cpuid.c
18027@@ -171,7 +171,7 @@ static int __cpuinit cpuid_class_cpu_callback(struct notifier_block *nfb,
18028 return notifier_from_errno(err);
18029 }
18030
18031-static struct notifier_block __refdata cpuid_class_cpu_notifier =
18032+static struct notifier_block cpuid_class_cpu_notifier =
18033 {
18034 .notifier_call = cpuid_class_cpu_callback,
18035 };
18036diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
18037index 74467fe..18793d5 100644
18038--- a/arch/x86/kernel/crash.c
18039+++ b/arch/x86/kernel/crash.c
18040@@ -58,10 +58,8 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
18041 {
18042 #ifdef CONFIG_X86_32
18043 struct pt_regs fixed_regs;
18044-#endif
18045
18046-#ifdef CONFIG_X86_32
18047- if (!user_mode_vm(regs)) {
18048+ if (!user_mode(regs)) {
18049 crash_fixup_ss_esp(&fixed_regs, regs);
18050 regs = &fixed_regs;
18051 }
18052diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
18053index 37250fe..bf2ec74 100644
18054--- a/arch/x86/kernel/doublefault_32.c
18055+++ b/arch/x86/kernel/doublefault_32.c
18056@@ -11,7 +11,7 @@
18057
18058 #define DOUBLEFAULT_STACKSIZE (1024)
18059 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
18060-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
18061+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
18062
18063 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
18064
18065@@ -21,7 +21,7 @@ static void doublefault_fn(void)
18066 unsigned long gdt, tss;
18067
18068 store_gdt(&gdt_desc);
18069- gdt = gdt_desc.address;
18070+ gdt = (unsigned long)gdt_desc.address;
18071
18072 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
18073
18074@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
18075 /* 0x2 bit is always set */
18076 .flags = X86_EFLAGS_SF | 0x2,
18077 .sp = STACK_START,
18078- .es = __USER_DS,
18079+ .es = __KERNEL_DS,
18080 .cs = __KERNEL_CS,
18081 .ss = __KERNEL_DS,
18082- .ds = __USER_DS,
18083+ .ds = __KERNEL_DS,
18084 .fs = __KERNEL_PERCPU,
18085
18086 .__cr3 = __pa_nodebug(swapper_pg_dir),
18087diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
18088index ae42418b..787c16b 100644
18089--- a/arch/x86/kernel/dumpstack.c
18090+++ b/arch/x86/kernel/dumpstack.c
18091@@ -2,6 +2,9 @@
18092 * Copyright (C) 1991, 1992 Linus Torvalds
18093 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
18094 */
18095+#ifdef CONFIG_GRKERNSEC_HIDESYM
18096+#define __INCLUDED_BY_HIDESYM 1
18097+#endif
18098 #include <linux/kallsyms.h>
18099 #include <linux/kprobes.h>
18100 #include <linux/uaccess.h>
18101@@ -35,16 +38,14 @@ void printk_address(unsigned long address, int reliable)
18102 static void
18103 print_ftrace_graph_addr(unsigned long addr, void *data,
18104 const struct stacktrace_ops *ops,
18105- struct thread_info *tinfo, int *graph)
18106+ struct task_struct *task, int *graph)
18107 {
18108- struct task_struct *task;
18109 unsigned long ret_addr;
18110 int index;
18111
18112 if (addr != (unsigned long)return_to_handler)
18113 return;
18114
18115- task = tinfo->task;
18116 index = task->curr_ret_stack;
18117
18118 if (!task->ret_stack || index < *graph)
18119@@ -61,7 +62,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
18120 static inline void
18121 print_ftrace_graph_addr(unsigned long addr, void *data,
18122 const struct stacktrace_ops *ops,
18123- struct thread_info *tinfo, int *graph)
18124+ struct task_struct *task, int *graph)
18125 { }
18126 #endif
18127
18128@@ -72,10 +73,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
18129 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
18130 */
18131
18132-static inline int valid_stack_ptr(struct thread_info *tinfo,
18133- void *p, unsigned int size, void *end)
18134+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
18135 {
18136- void *t = tinfo;
18137 if (end) {
18138 if (p < end && p >= (end-THREAD_SIZE))
18139 return 1;
18140@@ -86,14 +85,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
18141 }
18142
18143 unsigned long
18144-print_context_stack(struct thread_info *tinfo,
18145+print_context_stack(struct task_struct *task, void *stack_start,
18146 unsigned long *stack, unsigned long bp,
18147 const struct stacktrace_ops *ops, void *data,
18148 unsigned long *end, int *graph)
18149 {
18150 struct stack_frame *frame = (struct stack_frame *)bp;
18151
18152- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
18153+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
18154 unsigned long addr;
18155
18156 addr = *stack;
18157@@ -105,7 +104,7 @@ print_context_stack(struct thread_info *tinfo,
18158 } else {
18159 ops->address(data, addr, 0);
18160 }
18161- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
18162+ print_ftrace_graph_addr(addr, data, ops, task, graph);
18163 }
18164 stack++;
18165 }
18166@@ -114,7 +113,7 @@ print_context_stack(struct thread_info *tinfo,
18167 EXPORT_SYMBOL_GPL(print_context_stack);
18168
18169 unsigned long
18170-print_context_stack_bp(struct thread_info *tinfo,
18171+print_context_stack_bp(struct task_struct *task, void *stack_start,
18172 unsigned long *stack, unsigned long bp,
18173 const struct stacktrace_ops *ops, void *data,
18174 unsigned long *end, int *graph)
18175@@ -122,7 +121,7 @@ print_context_stack_bp(struct thread_info *tinfo,
18176 struct stack_frame *frame = (struct stack_frame *)bp;
18177 unsigned long *ret_addr = &frame->return_address;
18178
18179- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
18180+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
18181 unsigned long addr = *ret_addr;
18182
18183 if (!__kernel_text_address(addr))
18184@@ -131,7 +130,7 @@ print_context_stack_bp(struct thread_info *tinfo,
18185 ops->address(data, addr, 1);
18186 frame = frame->next_frame;
18187 ret_addr = &frame->return_address;
18188- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
18189+ print_ftrace_graph_addr(addr, data, ops, task, graph);
18190 }
18191
18192 return (unsigned long)frame;
18193@@ -189,7 +188,7 @@ void dump_stack(void)
18194
18195 bp = stack_frame(current, NULL);
18196 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
18197- current->pid, current->comm, print_tainted(),
18198+ task_pid_nr(current), current->comm, print_tainted(),
18199 init_utsname()->release,
18200 (int)strcspn(init_utsname()->version, " "),
18201 init_utsname()->version);
18202@@ -225,6 +224,8 @@ unsigned __kprobes long oops_begin(void)
18203 }
18204 EXPORT_SYMBOL_GPL(oops_begin);
18205
18206+extern void gr_handle_kernel_exploit(void);
18207+
18208 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
18209 {
18210 if (regs && kexec_should_crash(current))
18211@@ -246,7 +247,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
18212 panic("Fatal exception in interrupt");
18213 if (panic_on_oops)
18214 panic("Fatal exception");
18215- do_exit(signr);
18216+
18217+ gr_handle_kernel_exploit();
18218+
18219+ do_group_exit(signr);
18220 }
18221
18222 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
18223@@ -274,7 +278,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
18224 print_modules();
18225 show_regs(regs);
18226 #ifdef CONFIG_X86_32
18227- if (user_mode_vm(regs)) {
18228+ if (user_mode(regs)) {
18229 sp = regs->sp;
18230 ss = regs->ss & 0xffff;
18231 } else {
18232@@ -302,7 +306,7 @@ void die(const char *str, struct pt_regs *regs, long err)
18233 unsigned long flags = oops_begin();
18234 int sig = SIGSEGV;
18235
18236- if (!user_mode_vm(regs))
18237+ if (!user_mode(regs))
18238 report_bug(regs->ip, regs);
18239
18240 if (__die(str, regs, err))
18241diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
18242index 1038a41..db2c12b 100644
18243--- a/arch/x86/kernel/dumpstack_32.c
18244+++ b/arch/x86/kernel/dumpstack_32.c
18245@@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
18246 bp = stack_frame(task, regs);
18247
18248 for (;;) {
18249- struct thread_info *context;
18250+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
18251
18252- context = (struct thread_info *)
18253- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
18254- bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
18255+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
18256
18257- stack = (unsigned long *)context->previous_esp;
18258- if (!stack)
18259+ if (stack_start == task_stack_page(task))
18260 break;
18261+ stack = *(unsigned long **)stack_start;
18262 if (ops->stack(data, "IRQ") < 0)
18263 break;
18264 touch_nmi_watchdog();
18265@@ -86,7 +84,7 @@ void show_regs(struct pt_regs *regs)
18266 {
18267 int i;
18268
18269- __show_regs(regs, !user_mode_vm(regs));
18270+ __show_regs(regs, !user_mode(regs));
18271
18272 pr_emerg("Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)\n",
18273 TASK_COMM_LEN, current->comm, task_pid_nr(current),
18274@@ -95,21 +93,22 @@ void show_regs(struct pt_regs *regs)
18275 * When in-kernel, we also print out the stack and code at the
18276 * time of the fault..
18277 */
18278- if (!user_mode_vm(regs)) {
18279+ if (!user_mode(regs)) {
18280 unsigned int code_prologue = code_bytes * 43 / 64;
18281 unsigned int code_len = code_bytes;
18282 unsigned char c;
18283 u8 *ip;
18284+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(0)[(0xffff & regs->cs) >> 3]);
18285
18286 pr_emerg("Stack:\n");
18287 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
18288
18289 pr_emerg("Code:");
18290
18291- ip = (u8 *)regs->ip - code_prologue;
18292+ ip = (u8 *)regs->ip - code_prologue + cs_base;
18293 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
18294 /* try starting at IP */
18295- ip = (u8 *)regs->ip;
18296+ ip = (u8 *)regs->ip + cs_base;
18297 code_len = code_len - code_prologue + 1;
18298 }
18299 for (i = 0; i < code_len; i++, ip++) {
18300@@ -118,7 +117,7 @@ void show_regs(struct pt_regs *regs)
18301 pr_cont(" Bad EIP value.");
18302 break;
18303 }
18304- if (ip == (u8 *)regs->ip)
18305+ if (ip == (u8 *)regs->ip + cs_base)
18306 pr_cont(" <%02x>", c);
18307 else
18308 pr_cont(" %02x", c);
18309@@ -131,6 +130,7 @@ int is_valid_bugaddr(unsigned long ip)
18310 {
18311 unsigned short ud2;
18312
18313+ ip = ktla_ktva(ip);
18314 if (ip < PAGE_OFFSET)
18315 return 0;
18316 if (probe_kernel_address((unsigned short *)ip, ud2))
18317@@ -138,3 +138,15 @@ int is_valid_bugaddr(unsigned long ip)
18318
18319 return ud2 == 0x0b0f;
18320 }
18321+
18322+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
18323+void pax_check_alloca(unsigned long size)
18324+{
18325+ unsigned long sp = (unsigned long)&sp, stack_left;
18326+
18327+ /* all kernel stacks are of the same size */
18328+ stack_left = sp & (THREAD_SIZE - 1);
18329+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
18330+}
18331+EXPORT_SYMBOL(pax_check_alloca);
18332+#endif
18333diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
18334index b653675..51cc8c0 100644
18335--- a/arch/x86/kernel/dumpstack_64.c
18336+++ b/arch/x86/kernel/dumpstack_64.c
18337@@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
18338 unsigned long *irq_stack_end =
18339 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
18340 unsigned used = 0;
18341- struct thread_info *tinfo;
18342 int graph = 0;
18343 unsigned long dummy;
18344+ void *stack_start;
18345
18346 if (!task)
18347 task = current;
18348@@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
18349 * current stack address. If the stacks consist of nested
18350 * exceptions
18351 */
18352- tinfo = task_thread_info(task);
18353 for (;;) {
18354 char *id;
18355 unsigned long *estack_end;
18356+
18357 estack_end = in_exception_stack(cpu, (unsigned long)stack,
18358 &used, &id);
18359
18360@@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
18361 if (ops->stack(data, id) < 0)
18362 break;
18363
18364- bp = ops->walk_stack(tinfo, stack, bp, ops,
18365+ bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
18366 data, estack_end, &graph);
18367 ops->stack(data, "<EOE>");
18368 /*
18369@@ -161,6 +161,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
18370 * second-to-last pointer (index -2 to end) in the
18371 * exception stack:
18372 */
18373+ if ((u16)estack_end[-1] != __KERNEL_DS)
18374+ goto out;
18375 stack = (unsigned long *) estack_end[-2];
18376 continue;
18377 }
18378@@ -172,7 +174,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
18379 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
18380 if (ops->stack(data, "IRQ") < 0)
18381 break;
18382- bp = ops->walk_stack(tinfo, stack, bp,
18383+ bp = ops->walk_stack(task, irq_stack, stack, bp,
18384 ops, data, irq_stack_end, &graph);
18385 /*
18386 * We link to the next stack (which would be
18387@@ -191,7 +193,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
18388 /*
18389 * This handles the process stack:
18390 */
18391- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
18392+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
18393+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
18394+out:
18395 put_cpu();
18396 }
18397 EXPORT_SYMBOL(dump_trace);
18398@@ -249,7 +253,7 @@ void show_regs(struct pt_regs *regs)
18399 {
18400 int i;
18401 unsigned long sp;
18402- const int cpu = smp_processor_id();
18403+ const int cpu = raw_smp_processor_id();
18404 struct task_struct *cur = current;
18405
18406 sp = regs->sp;
18407@@ -304,3 +308,50 @@ int is_valid_bugaddr(unsigned long ip)
18408
18409 return ud2 == 0x0b0f;
18410 }
18411+
18412+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
18413+void pax_check_alloca(unsigned long size)
18414+{
18415+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
18416+ unsigned cpu, used;
18417+ char *id;
18418+
18419+ /* check the process stack first */
18420+ stack_start = (unsigned long)task_stack_page(current);
18421+ stack_end = stack_start + THREAD_SIZE;
18422+ if (likely(stack_start <= sp && sp < stack_end)) {
18423+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
18424+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
18425+ return;
18426+ }
18427+
18428+ cpu = get_cpu();
18429+
18430+ /* check the irq stacks */
18431+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
18432+ stack_start = stack_end - IRQ_STACK_SIZE;
18433+ if (stack_start <= sp && sp < stack_end) {
18434+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
18435+ put_cpu();
18436+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
18437+ return;
18438+ }
18439+
18440+ /* check the exception stacks */
18441+ used = 0;
18442+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
18443+ stack_start = stack_end - EXCEPTION_STKSZ;
18444+ if (stack_end && stack_start <= sp && sp < stack_end) {
18445+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
18446+ put_cpu();
18447+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
18448+ return;
18449+ }
18450+
18451+ put_cpu();
18452+
18453+ /* unknown stack */
18454+ BUG();
18455+}
18456+EXPORT_SYMBOL(pax_check_alloca);
18457+#endif
18458diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
18459index 9b9f18b..9fcaa04 100644
18460--- a/arch/x86/kernel/early_printk.c
18461+++ b/arch/x86/kernel/early_printk.c
18462@@ -7,6 +7,7 @@
18463 #include <linux/pci_regs.h>
18464 #include <linux/pci_ids.h>
18465 #include <linux/errno.h>
18466+#include <linux/sched.h>
18467 #include <asm/io.h>
18468 #include <asm/processor.h>
18469 #include <asm/fcntl.h>
18470diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
18471index 6ed91d9..6cc365b 100644
18472--- a/arch/x86/kernel/entry_32.S
18473+++ b/arch/x86/kernel/entry_32.S
18474@@ -177,13 +177,153 @@
18475 /*CFI_REL_OFFSET gs, PT_GS*/
18476 .endm
18477 .macro SET_KERNEL_GS reg
18478+
18479+#ifdef CONFIG_CC_STACKPROTECTOR
18480 movl $(__KERNEL_STACK_CANARY), \reg
18481+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
18482+ movl $(__USER_DS), \reg
18483+#else
18484+ xorl \reg, \reg
18485+#endif
18486+
18487 movl \reg, %gs
18488 .endm
18489
18490 #endif /* CONFIG_X86_32_LAZY_GS */
18491
18492-.macro SAVE_ALL
18493+.macro pax_enter_kernel
18494+#ifdef CONFIG_PAX_KERNEXEC
18495+ call pax_enter_kernel
18496+#endif
18497+.endm
18498+
18499+.macro pax_exit_kernel
18500+#ifdef CONFIG_PAX_KERNEXEC
18501+ call pax_exit_kernel
18502+#endif
18503+.endm
18504+
18505+#ifdef CONFIG_PAX_KERNEXEC
18506+ENTRY(pax_enter_kernel)
18507+#ifdef CONFIG_PARAVIRT
18508+ pushl %eax
18509+ pushl %ecx
18510+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
18511+ mov %eax, %esi
18512+#else
18513+ mov %cr0, %esi
18514+#endif
18515+ bts $16, %esi
18516+ jnc 1f
18517+ mov %cs, %esi
18518+ cmp $__KERNEL_CS, %esi
18519+ jz 3f
18520+ ljmp $__KERNEL_CS, $3f
18521+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
18522+2:
18523+#ifdef CONFIG_PARAVIRT
18524+ mov %esi, %eax
18525+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
18526+#else
18527+ mov %esi, %cr0
18528+#endif
18529+3:
18530+#ifdef CONFIG_PARAVIRT
18531+ popl %ecx
18532+ popl %eax
18533+#endif
18534+ ret
18535+ENDPROC(pax_enter_kernel)
18536+
18537+ENTRY(pax_exit_kernel)
18538+#ifdef CONFIG_PARAVIRT
18539+ pushl %eax
18540+ pushl %ecx
18541+#endif
18542+ mov %cs, %esi
18543+ cmp $__KERNEXEC_KERNEL_CS, %esi
18544+ jnz 2f
18545+#ifdef CONFIG_PARAVIRT
18546+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
18547+ mov %eax, %esi
18548+#else
18549+ mov %cr0, %esi
18550+#endif
18551+ btr $16, %esi
18552+ ljmp $__KERNEL_CS, $1f
18553+1:
18554+#ifdef CONFIG_PARAVIRT
18555+ mov %esi, %eax
18556+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
18557+#else
18558+ mov %esi, %cr0
18559+#endif
18560+2:
18561+#ifdef CONFIG_PARAVIRT
18562+ popl %ecx
18563+ popl %eax
18564+#endif
18565+ ret
18566+ENDPROC(pax_exit_kernel)
18567+#endif
18568+
18569+.macro pax_erase_kstack
18570+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
18571+ call pax_erase_kstack
18572+#endif
18573+.endm
18574+
18575+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
18576+/*
18577+ * ebp: thread_info
18578+ */
18579+ENTRY(pax_erase_kstack)
18580+ pushl %edi
18581+ pushl %ecx
18582+ pushl %eax
18583+
18584+ mov TI_lowest_stack(%ebp), %edi
18585+ mov $-0xBEEF, %eax
18586+ std
18587+
18588+1: mov %edi, %ecx
18589+ and $THREAD_SIZE_asm - 1, %ecx
18590+ shr $2, %ecx
18591+ repne scasl
18592+ jecxz 2f
18593+
18594+ cmp $2*16, %ecx
18595+ jc 2f
18596+
18597+ mov $2*16, %ecx
18598+ repe scasl
18599+ jecxz 2f
18600+ jne 1b
18601+
18602+2: cld
18603+ mov %esp, %ecx
18604+ sub %edi, %ecx
18605+
18606+ cmp $THREAD_SIZE_asm, %ecx
18607+ jb 3f
18608+ ud2
18609+3:
18610+
18611+ shr $2, %ecx
18612+ rep stosl
18613+
18614+ mov TI_task_thread_sp0(%ebp), %edi
18615+ sub $128, %edi
18616+ mov %edi, TI_lowest_stack(%ebp)
18617+
18618+ popl %eax
18619+ popl %ecx
18620+ popl %edi
18621+ ret
18622+ENDPROC(pax_erase_kstack)
18623+#endif
18624+
18625+.macro __SAVE_ALL _DS
18626 cld
18627 PUSH_GS
18628 pushl_cfi %fs
18629@@ -206,7 +346,7 @@
18630 CFI_REL_OFFSET ecx, 0
18631 pushl_cfi %ebx
18632 CFI_REL_OFFSET ebx, 0
18633- movl $(__USER_DS), %edx
18634+ movl $\_DS, %edx
18635 movl %edx, %ds
18636 movl %edx, %es
18637 movl $(__KERNEL_PERCPU), %edx
18638@@ -214,6 +354,15 @@
18639 SET_KERNEL_GS %edx
18640 .endm
18641
18642+.macro SAVE_ALL
18643+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
18644+ __SAVE_ALL __KERNEL_DS
18645+ pax_enter_kernel
18646+#else
18647+ __SAVE_ALL __USER_DS
18648+#endif
18649+.endm
18650+
18651 .macro RESTORE_INT_REGS
18652 popl_cfi %ebx
18653 CFI_RESTORE ebx
18654@@ -297,7 +446,7 @@ ENTRY(ret_from_fork)
18655 popfl_cfi
18656 jmp syscall_exit
18657 CFI_ENDPROC
18658-END(ret_from_fork)
18659+ENDPROC(ret_from_fork)
18660
18661 ENTRY(ret_from_kernel_thread)
18662 CFI_STARTPROC
18663@@ -344,7 +493,15 @@ ret_from_intr:
18664 andl $SEGMENT_RPL_MASK, %eax
18665 #endif
18666 cmpl $USER_RPL, %eax
18667+
18668+#ifdef CONFIG_PAX_KERNEXEC
18669+ jae resume_userspace
18670+
18671+ pax_exit_kernel
18672+ jmp resume_kernel
18673+#else
18674 jb resume_kernel # not returning to v8086 or userspace
18675+#endif
18676
18677 ENTRY(resume_userspace)
18678 LOCKDEP_SYS_EXIT
18679@@ -356,8 +513,8 @@ ENTRY(resume_userspace)
18680 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
18681 # int/exception return?
18682 jne work_pending
18683- jmp restore_all
18684-END(ret_from_exception)
18685+ jmp restore_all_pax
18686+ENDPROC(ret_from_exception)
18687
18688 #ifdef CONFIG_PREEMPT
18689 ENTRY(resume_kernel)
18690@@ -372,7 +529,7 @@ need_resched:
18691 jz restore_all
18692 call preempt_schedule_irq
18693 jmp need_resched
18694-END(resume_kernel)
18695+ENDPROC(resume_kernel)
18696 #endif
18697 CFI_ENDPROC
18698 /*
18699@@ -406,30 +563,45 @@ sysenter_past_esp:
18700 /*CFI_REL_OFFSET cs, 0*/
18701 /*
18702 * Push current_thread_info()->sysenter_return to the stack.
18703- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
18704- * pushed above; +8 corresponds to copy_thread's esp0 setting.
18705 */
18706- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
18707+ pushl_cfi $0
18708 CFI_REL_OFFSET eip, 0
18709
18710 pushl_cfi %eax
18711 SAVE_ALL
18712+ GET_THREAD_INFO(%ebp)
18713+ movl TI_sysenter_return(%ebp),%ebp
18714+ movl %ebp,PT_EIP(%esp)
18715 ENABLE_INTERRUPTS(CLBR_NONE)
18716
18717 /*
18718 * Load the potential sixth argument from user stack.
18719 * Careful about security.
18720 */
18721+ movl PT_OLDESP(%esp),%ebp
18722+
18723+#ifdef CONFIG_PAX_MEMORY_UDEREF
18724+ mov PT_OLDSS(%esp),%ds
18725+1: movl %ds:(%ebp),%ebp
18726+ push %ss
18727+ pop %ds
18728+#else
18729 cmpl $__PAGE_OFFSET-3,%ebp
18730 jae syscall_fault
18731 ASM_STAC
18732 1: movl (%ebp),%ebp
18733 ASM_CLAC
18734+#endif
18735+
18736 movl %ebp,PT_EBP(%esp)
18737 _ASM_EXTABLE(1b,syscall_fault)
18738
18739 GET_THREAD_INFO(%ebp)
18740
18741+#ifdef CONFIG_PAX_RANDKSTACK
18742+ pax_erase_kstack
18743+#endif
18744+
18745 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
18746 jnz sysenter_audit
18747 sysenter_do_call:
18748@@ -444,12 +616,24 @@ sysenter_do_call:
18749 testl $_TIF_ALLWORK_MASK, %ecx
18750 jne sysexit_audit
18751 sysenter_exit:
18752+
18753+#ifdef CONFIG_PAX_RANDKSTACK
18754+ pushl_cfi %eax
18755+ movl %esp, %eax
18756+ call pax_randomize_kstack
18757+ popl_cfi %eax
18758+#endif
18759+
18760+ pax_erase_kstack
18761+
18762 /* if something modifies registers it must also disable sysexit */
18763 movl PT_EIP(%esp), %edx
18764 movl PT_OLDESP(%esp), %ecx
18765 xorl %ebp,%ebp
18766 TRACE_IRQS_ON
18767 1: mov PT_FS(%esp), %fs
18768+2: mov PT_DS(%esp), %ds
18769+3: mov PT_ES(%esp), %es
18770 PTGS_TO_GS
18771 ENABLE_INTERRUPTS_SYSEXIT
18772
18773@@ -466,6 +650,9 @@ sysenter_audit:
18774 movl %eax,%edx /* 2nd arg: syscall number */
18775 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
18776 call __audit_syscall_entry
18777+
18778+ pax_erase_kstack
18779+
18780 pushl_cfi %ebx
18781 movl PT_EAX(%esp),%eax /* reload syscall number */
18782 jmp sysenter_do_call
18783@@ -491,10 +678,16 @@ sysexit_audit:
18784
18785 CFI_ENDPROC
18786 .pushsection .fixup,"ax"
18787-2: movl $0,PT_FS(%esp)
18788+4: movl $0,PT_FS(%esp)
18789+ jmp 1b
18790+5: movl $0,PT_DS(%esp)
18791+ jmp 1b
18792+6: movl $0,PT_ES(%esp)
18793 jmp 1b
18794 .popsection
18795- _ASM_EXTABLE(1b,2b)
18796+ _ASM_EXTABLE(1b,4b)
18797+ _ASM_EXTABLE(2b,5b)
18798+ _ASM_EXTABLE(3b,6b)
18799 PTGS_TO_GS_EX
18800 ENDPROC(ia32_sysenter_target)
18801
18802@@ -509,6 +702,11 @@ ENTRY(system_call)
18803 pushl_cfi %eax # save orig_eax
18804 SAVE_ALL
18805 GET_THREAD_INFO(%ebp)
18806+
18807+#ifdef CONFIG_PAX_RANDKSTACK
18808+ pax_erase_kstack
18809+#endif
18810+
18811 # system call tracing in operation / emulation
18812 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
18813 jnz syscall_trace_entry
18814@@ -527,6 +725,15 @@ syscall_exit:
18815 testl $_TIF_ALLWORK_MASK, %ecx # current->work
18816 jne syscall_exit_work
18817
18818+restore_all_pax:
18819+
18820+#ifdef CONFIG_PAX_RANDKSTACK
18821+ movl %esp, %eax
18822+ call pax_randomize_kstack
18823+#endif
18824+
18825+ pax_erase_kstack
18826+
18827 restore_all:
18828 TRACE_IRQS_IRET
18829 restore_all_notrace:
18830@@ -583,14 +790,34 @@ ldt_ss:
18831 * compensating for the offset by changing to the ESPFIX segment with
18832 * a base address that matches for the difference.
18833 */
18834-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
18835+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
18836 mov %esp, %edx /* load kernel esp */
18837 mov PT_OLDESP(%esp), %eax /* load userspace esp */
18838 mov %dx, %ax /* eax: new kernel esp */
18839 sub %eax, %edx /* offset (low word is 0) */
18840+#ifdef CONFIG_SMP
18841+ movl PER_CPU_VAR(cpu_number), %ebx
18842+ shll $PAGE_SHIFT_asm, %ebx
18843+ addl $cpu_gdt_table, %ebx
18844+#else
18845+ movl $cpu_gdt_table, %ebx
18846+#endif
18847 shr $16, %edx
18848- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
18849- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
18850+
18851+#ifdef CONFIG_PAX_KERNEXEC
18852+ mov %cr0, %esi
18853+ btr $16, %esi
18854+ mov %esi, %cr0
18855+#endif
18856+
18857+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
18858+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
18859+
18860+#ifdef CONFIG_PAX_KERNEXEC
18861+ bts $16, %esi
18862+ mov %esi, %cr0
18863+#endif
18864+
18865 pushl_cfi $__ESPFIX_SS
18866 pushl_cfi %eax /* new kernel esp */
18867 /* Disable interrupts, but do not irqtrace this section: we
18868@@ -619,20 +846,18 @@ work_resched:
18869 movl TI_flags(%ebp), %ecx
18870 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
18871 # than syscall tracing?
18872- jz restore_all
18873+ jz restore_all_pax
18874 testb $_TIF_NEED_RESCHED, %cl
18875 jnz work_resched
18876
18877 work_notifysig: # deal with pending signals and
18878 # notify-resume requests
18879+ movl %esp, %eax
18880 #ifdef CONFIG_VM86
18881 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
18882- movl %esp, %eax
18883 jne work_notifysig_v86 # returning to kernel-space or
18884 # vm86-space
18885 1:
18886-#else
18887- movl %esp, %eax
18888 #endif
18889 TRACE_IRQS_ON
18890 ENABLE_INTERRUPTS(CLBR_NONE)
18891@@ -653,7 +878,7 @@ work_notifysig_v86:
18892 movl %eax, %esp
18893 jmp 1b
18894 #endif
18895-END(work_pending)
18896+ENDPROC(work_pending)
18897
18898 # perform syscall exit tracing
18899 ALIGN
18900@@ -661,11 +886,14 @@ syscall_trace_entry:
18901 movl $-ENOSYS,PT_EAX(%esp)
18902 movl %esp, %eax
18903 call syscall_trace_enter
18904+
18905+ pax_erase_kstack
18906+
18907 /* What it returned is what we'll actually use. */
18908 cmpl $(NR_syscalls), %eax
18909 jnae syscall_call
18910 jmp syscall_exit
18911-END(syscall_trace_entry)
18912+ENDPROC(syscall_trace_entry)
18913
18914 # perform syscall exit tracing
18915 ALIGN
18916@@ -678,21 +906,25 @@ syscall_exit_work:
18917 movl %esp, %eax
18918 call syscall_trace_leave
18919 jmp resume_userspace
18920-END(syscall_exit_work)
18921+ENDPROC(syscall_exit_work)
18922 CFI_ENDPROC
18923
18924 RING0_INT_FRAME # can't unwind into user space anyway
18925 syscall_fault:
18926+#ifdef CONFIG_PAX_MEMORY_UDEREF
18927+ push %ss
18928+ pop %ds
18929+#endif
18930 ASM_CLAC
18931 GET_THREAD_INFO(%ebp)
18932 movl $-EFAULT,PT_EAX(%esp)
18933 jmp resume_userspace
18934-END(syscall_fault)
18935+ENDPROC(syscall_fault)
18936
18937 syscall_badsys:
18938 movl $-ENOSYS,PT_EAX(%esp)
18939 jmp resume_userspace
18940-END(syscall_badsys)
18941+ENDPROC(syscall_badsys)
18942 CFI_ENDPROC
18943 /*
18944 * End of kprobes section
18945@@ -753,8 +985,15 @@ PTREGSCALL1(vm86old)
18946 * normal stack and adjusts ESP with the matching offset.
18947 */
18948 /* fixup the stack */
18949- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
18950- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
18951+#ifdef CONFIG_SMP
18952+ movl PER_CPU_VAR(cpu_number), %ebx
18953+ shll $PAGE_SHIFT_asm, %ebx
18954+ addl $cpu_gdt_table, %ebx
18955+#else
18956+ movl $cpu_gdt_table, %ebx
18957+#endif
18958+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
18959+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
18960 shl $16, %eax
18961 addl %esp, %eax /* the adjusted stack pointer */
18962 pushl_cfi $__KERNEL_DS
18963@@ -807,7 +1046,7 @@ vector=vector+1
18964 .endr
18965 2: jmp common_interrupt
18966 .endr
18967-END(irq_entries_start)
18968+ENDPROC(irq_entries_start)
18969
18970 .previous
18971 END(interrupt)
18972@@ -858,7 +1097,7 @@ ENTRY(coprocessor_error)
18973 pushl_cfi $do_coprocessor_error
18974 jmp error_code
18975 CFI_ENDPROC
18976-END(coprocessor_error)
18977+ENDPROC(coprocessor_error)
18978
18979 ENTRY(simd_coprocessor_error)
18980 RING0_INT_FRAME
18981@@ -880,7 +1119,7 @@ ENTRY(simd_coprocessor_error)
18982 #endif
18983 jmp error_code
18984 CFI_ENDPROC
18985-END(simd_coprocessor_error)
18986+ENDPROC(simd_coprocessor_error)
18987
18988 ENTRY(device_not_available)
18989 RING0_INT_FRAME
18990@@ -889,18 +1128,18 @@ ENTRY(device_not_available)
18991 pushl_cfi $do_device_not_available
18992 jmp error_code
18993 CFI_ENDPROC
18994-END(device_not_available)
18995+ENDPROC(device_not_available)
18996
18997 #ifdef CONFIG_PARAVIRT
18998 ENTRY(native_iret)
18999 iret
19000 _ASM_EXTABLE(native_iret, iret_exc)
19001-END(native_iret)
19002+ENDPROC(native_iret)
19003
19004 ENTRY(native_irq_enable_sysexit)
19005 sti
19006 sysexit
19007-END(native_irq_enable_sysexit)
19008+ENDPROC(native_irq_enable_sysexit)
19009 #endif
19010
19011 ENTRY(overflow)
19012@@ -910,7 +1149,7 @@ ENTRY(overflow)
19013 pushl_cfi $do_overflow
19014 jmp error_code
19015 CFI_ENDPROC
19016-END(overflow)
19017+ENDPROC(overflow)
19018
19019 ENTRY(bounds)
19020 RING0_INT_FRAME
19021@@ -919,7 +1158,7 @@ ENTRY(bounds)
19022 pushl_cfi $do_bounds
19023 jmp error_code
19024 CFI_ENDPROC
19025-END(bounds)
19026+ENDPROC(bounds)
19027
19028 ENTRY(invalid_op)
19029 RING0_INT_FRAME
19030@@ -928,7 +1167,7 @@ ENTRY(invalid_op)
19031 pushl_cfi $do_invalid_op
19032 jmp error_code
19033 CFI_ENDPROC
19034-END(invalid_op)
19035+ENDPROC(invalid_op)
19036
19037 ENTRY(coprocessor_segment_overrun)
19038 RING0_INT_FRAME
19039@@ -937,7 +1176,7 @@ ENTRY(coprocessor_segment_overrun)
19040 pushl_cfi $do_coprocessor_segment_overrun
19041 jmp error_code
19042 CFI_ENDPROC
19043-END(coprocessor_segment_overrun)
19044+ENDPROC(coprocessor_segment_overrun)
19045
19046 ENTRY(invalid_TSS)
19047 RING0_EC_FRAME
19048@@ -945,7 +1184,7 @@ ENTRY(invalid_TSS)
19049 pushl_cfi $do_invalid_TSS
19050 jmp error_code
19051 CFI_ENDPROC
19052-END(invalid_TSS)
19053+ENDPROC(invalid_TSS)
19054
19055 ENTRY(segment_not_present)
19056 RING0_EC_FRAME
19057@@ -953,7 +1192,7 @@ ENTRY(segment_not_present)
19058 pushl_cfi $do_segment_not_present
19059 jmp error_code
19060 CFI_ENDPROC
19061-END(segment_not_present)
19062+ENDPROC(segment_not_present)
19063
19064 ENTRY(stack_segment)
19065 RING0_EC_FRAME
19066@@ -961,7 +1200,7 @@ ENTRY(stack_segment)
19067 pushl_cfi $do_stack_segment
19068 jmp error_code
19069 CFI_ENDPROC
19070-END(stack_segment)
19071+ENDPROC(stack_segment)
19072
19073 ENTRY(alignment_check)
19074 RING0_EC_FRAME
19075@@ -969,7 +1208,7 @@ ENTRY(alignment_check)
19076 pushl_cfi $do_alignment_check
19077 jmp error_code
19078 CFI_ENDPROC
19079-END(alignment_check)
19080+ENDPROC(alignment_check)
19081
19082 ENTRY(divide_error)
19083 RING0_INT_FRAME
19084@@ -978,7 +1217,7 @@ ENTRY(divide_error)
19085 pushl_cfi $do_divide_error
19086 jmp error_code
19087 CFI_ENDPROC
19088-END(divide_error)
19089+ENDPROC(divide_error)
19090
19091 #ifdef CONFIG_X86_MCE
19092 ENTRY(machine_check)
19093@@ -988,7 +1227,7 @@ ENTRY(machine_check)
19094 pushl_cfi machine_check_vector
19095 jmp error_code
19096 CFI_ENDPROC
19097-END(machine_check)
19098+ENDPROC(machine_check)
19099 #endif
19100
19101 ENTRY(spurious_interrupt_bug)
19102@@ -998,7 +1237,7 @@ ENTRY(spurious_interrupt_bug)
19103 pushl_cfi $do_spurious_interrupt_bug
19104 jmp error_code
19105 CFI_ENDPROC
19106-END(spurious_interrupt_bug)
19107+ENDPROC(spurious_interrupt_bug)
19108 /*
19109 * End of kprobes section
19110 */
19111@@ -1101,7 +1340,7 @@ BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK,
19112
19113 ENTRY(mcount)
19114 ret
19115-END(mcount)
19116+ENDPROC(mcount)
19117
19118 ENTRY(ftrace_caller)
19119 cmpl $0, function_trace_stop
19120@@ -1134,7 +1373,7 @@ ftrace_graph_call:
19121 .globl ftrace_stub
19122 ftrace_stub:
19123 ret
19124-END(ftrace_caller)
19125+ENDPROC(ftrace_caller)
19126
19127 ENTRY(ftrace_regs_caller)
19128 pushf /* push flags before compare (in cs location) */
19129@@ -1235,7 +1474,7 @@ trace:
19130 popl %ecx
19131 popl %eax
19132 jmp ftrace_stub
19133-END(mcount)
19134+ENDPROC(mcount)
19135 #endif /* CONFIG_DYNAMIC_FTRACE */
19136 #endif /* CONFIG_FUNCTION_TRACER */
19137
19138@@ -1253,7 +1492,7 @@ ENTRY(ftrace_graph_caller)
19139 popl %ecx
19140 popl %eax
19141 ret
19142-END(ftrace_graph_caller)
19143+ENDPROC(ftrace_graph_caller)
19144
19145 .globl return_to_handler
19146 return_to_handler:
19147@@ -1309,15 +1548,18 @@ error_code:
19148 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
19149 REG_TO_PTGS %ecx
19150 SET_KERNEL_GS %ecx
19151- movl $(__USER_DS), %ecx
19152+ movl $(__KERNEL_DS), %ecx
19153 movl %ecx, %ds
19154 movl %ecx, %es
19155+
19156+ pax_enter_kernel
19157+
19158 TRACE_IRQS_OFF
19159 movl %esp,%eax # pt_regs pointer
19160 call *%edi
19161 jmp ret_from_exception
19162 CFI_ENDPROC
19163-END(page_fault)
19164+ENDPROC(page_fault)
19165
19166 /*
19167 * Debug traps and NMI can happen at the one SYSENTER instruction
19168@@ -1360,7 +1602,7 @@ debug_stack_correct:
19169 call do_debug
19170 jmp ret_from_exception
19171 CFI_ENDPROC
19172-END(debug)
19173+ENDPROC(debug)
19174
19175 /*
19176 * NMI is doubly nasty. It can happen _while_ we're handling
19177@@ -1398,6 +1640,9 @@ nmi_stack_correct:
19178 xorl %edx,%edx # zero error code
19179 movl %esp,%eax # pt_regs pointer
19180 call do_nmi
19181+
19182+ pax_exit_kernel
19183+
19184 jmp restore_all_notrace
19185 CFI_ENDPROC
19186
19187@@ -1434,12 +1679,15 @@ nmi_espfix_stack:
19188 FIXUP_ESPFIX_STACK # %eax == %esp
19189 xorl %edx,%edx # zero error code
19190 call do_nmi
19191+
19192+ pax_exit_kernel
19193+
19194 RESTORE_REGS
19195 lss 12+4(%esp), %esp # back to espfix stack
19196 CFI_ADJUST_CFA_OFFSET -24
19197 jmp irq_return
19198 CFI_ENDPROC
19199-END(nmi)
19200+ENDPROC(nmi)
19201
19202 ENTRY(int3)
19203 RING0_INT_FRAME
19204@@ -1452,14 +1700,14 @@ ENTRY(int3)
19205 call do_int3
19206 jmp ret_from_exception
19207 CFI_ENDPROC
19208-END(int3)
19209+ENDPROC(int3)
19210
19211 ENTRY(general_protection)
19212 RING0_EC_FRAME
19213 pushl_cfi $do_general_protection
19214 jmp error_code
19215 CFI_ENDPROC
19216-END(general_protection)
19217+ENDPROC(general_protection)
19218
19219 #ifdef CONFIG_KVM_GUEST
19220 ENTRY(async_page_fault)
19221@@ -1468,7 +1716,7 @@ ENTRY(async_page_fault)
19222 pushl_cfi $do_async_page_fault
19223 jmp error_code
19224 CFI_ENDPROC
19225-END(async_page_fault)
19226+ENDPROC(async_page_fault)
19227 #endif
19228
19229 /*
19230diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
19231index cb3c591..bc63707 100644
19232--- a/arch/x86/kernel/entry_64.S
19233+++ b/arch/x86/kernel/entry_64.S
19234@@ -59,6 +59,8 @@
19235 #include <asm/context_tracking.h>
19236 #include <asm/smap.h>
19237 #include <linux/err.h>
19238+#include <asm/pgtable.h>
19239+#include <asm/alternative-asm.h>
19240
19241 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
19242 #include <linux/elf-em.h>
19243@@ -80,8 +82,9 @@
19244 #ifdef CONFIG_DYNAMIC_FTRACE
19245
19246 ENTRY(function_hook)
19247+ pax_force_retaddr
19248 retq
19249-END(function_hook)
19250+ENDPROC(function_hook)
19251
19252 /* skip is set if stack has been adjusted */
19253 .macro ftrace_caller_setup skip=0
19254@@ -122,8 +125,9 @@ GLOBAL(ftrace_graph_call)
19255 #endif
19256
19257 GLOBAL(ftrace_stub)
19258+ pax_force_retaddr
19259 retq
19260-END(ftrace_caller)
19261+ENDPROC(ftrace_caller)
19262
19263 ENTRY(ftrace_regs_caller)
19264 /* Save the current flags before compare (in SS location)*/
19265@@ -191,7 +195,7 @@ ftrace_restore_flags:
19266 popfq
19267 jmp ftrace_stub
19268
19269-END(ftrace_regs_caller)
19270+ENDPROC(ftrace_regs_caller)
19271
19272
19273 #else /* ! CONFIG_DYNAMIC_FTRACE */
19274@@ -212,6 +216,7 @@ ENTRY(function_hook)
19275 #endif
19276
19277 GLOBAL(ftrace_stub)
19278+ pax_force_retaddr
19279 retq
19280
19281 trace:
19282@@ -225,12 +230,13 @@ trace:
19283 #endif
19284 subq $MCOUNT_INSN_SIZE, %rdi
19285
19286+ pax_force_fptr ftrace_trace_function
19287 call *ftrace_trace_function
19288
19289 MCOUNT_RESTORE_FRAME
19290
19291 jmp ftrace_stub
19292-END(function_hook)
19293+ENDPROC(function_hook)
19294 #endif /* CONFIG_DYNAMIC_FTRACE */
19295 #endif /* CONFIG_FUNCTION_TRACER */
19296
19297@@ -252,8 +258,9 @@ ENTRY(ftrace_graph_caller)
19298
19299 MCOUNT_RESTORE_FRAME
19300
19301+ pax_force_retaddr
19302 retq
19303-END(ftrace_graph_caller)
19304+ENDPROC(ftrace_graph_caller)
19305
19306 GLOBAL(return_to_handler)
19307 subq $24, %rsp
19308@@ -269,7 +276,9 @@ GLOBAL(return_to_handler)
19309 movq 8(%rsp), %rdx
19310 movq (%rsp), %rax
19311 addq $24, %rsp
19312+ pax_force_fptr %rdi
19313 jmp *%rdi
19314+ENDPROC(return_to_handler)
19315 #endif
19316
19317
19318@@ -284,6 +293,273 @@ ENTRY(native_usergs_sysret64)
19319 ENDPROC(native_usergs_sysret64)
19320 #endif /* CONFIG_PARAVIRT */
19321
19322+ .macro ljmpq sel, off
19323+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
19324+ .byte 0x48; ljmp *1234f(%rip)
19325+ .pushsection .rodata
19326+ .align 16
19327+ 1234: .quad \off; .word \sel
19328+ .popsection
19329+#else
19330+ pushq $\sel
19331+ pushq $\off
19332+ lretq
19333+#endif
19334+ .endm
19335+
19336+ .macro pax_enter_kernel
19337+ pax_set_fptr_mask
19338+#ifdef CONFIG_PAX_KERNEXEC
19339+ call pax_enter_kernel
19340+#endif
19341+ .endm
19342+
19343+ .macro pax_exit_kernel
19344+#ifdef CONFIG_PAX_KERNEXEC
19345+ call pax_exit_kernel
19346+#endif
19347+ .endm
19348+
19349+#ifdef CONFIG_PAX_KERNEXEC
19350+ENTRY(pax_enter_kernel)
19351+ pushq %rdi
19352+
19353+#ifdef CONFIG_PARAVIRT
19354+ PV_SAVE_REGS(CLBR_RDI)
19355+#endif
19356+
19357+ GET_CR0_INTO_RDI
19358+ bts $16,%rdi
19359+ jnc 3f
19360+ mov %cs,%edi
19361+ cmp $__KERNEL_CS,%edi
19362+ jnz 2f
19363+1:
19364+
19365+#ifdef CONFIG_PARAVIRT
19366+ PV_RESTORE_REGS(CLBR_RDI)
19367+#endif
19368+
19369+ popq %rdi
19370+ pax_force_retaddr
19371+ retq
19372+
19373+2: ljmpq __KERNEL_CS,1f
19374+3: ljmpq __KERNEXEC_KERNEL_CS,4f
19375+4: SET_RDI_INTO_CR0
19376+ jmp 1b
19377+ENDPROC(pax_enter_kernel)
19378+
19379+ENTRY(pax_exit_kernel)
19380+ pushq %rdi
19381+
19382+#ifdef CONFIG_PARAVIRT
19383+ PV_SAVE_REGS(CLBR_RDI)
19384+#endif
19385+
19386+ mov %cs,%rdi
19387+ cmp $__KERNEXEC_KERNEL_CS,%edi
19388+ jz 2f
19389+1:
19390+
19391+#ifdef CONFIG_PARAVIRT
19392+ PV_RESTORE_REGS(CLBR_RDI);
19393+#endif
19394+
19395+ popq %rdi
19396+ pax_force_retaddr
19397+ retq
19398+
19399+2: GET_CR0_INTO_RDI
19400+ btr $16,%rdi
19401+ ljmpq __KERNEL_CS,3f
19402+3: SET_RDI_INTO_CR0
19403+ jmp 1b
19404+ENDPROC(pax_exit_kernel)
19405+#endif
19406+
19407+ .macro pax_enter_kernel_user
19408+ pax_set_fptr_mask
19409+#ifdef CONFIG_PAX_MEMORY_UDEREF
19410+ call pax_enter_kernel_user
19411+#endif
19412+ .endm
19413+
19414+ .macro pax_exit_kernel_user
19415+#ifdef CONFIG_PAX_MEMORY_UDEREF
19416+ call pax_exit_kernel_user
19417+#endif
19418+#ifdef CONFIG_PAX_RANDKSTACK
19419+ pushq %rax
19420+ call pax_randomize_kstack
19421+ popq %rax
19422+#endif
19423+ .endm
19424+
19425+#ifdef CONFIG_PAX_MEMORY_UDEREF
19426+ENTRY(pax_enter_kernel_user)
19427+ pushq %rdi
19428+ pushq %rbx
19429+
19430+#ifdef CONFIG_PARAVIRT
19431+ PV_SAVE_REGS(CLBR_RDI)
19432+#endif
19433+
19434+ GET_CR3_INTO_RDI
19435+ mov %rdi,%rbx
19436+ add $__START_KERNEL_map,%rbx
19437+ sub phys_base(%rip),%rbx
19438+
19439+#ifdef CONFIG_PARAVIRT
19440+ pushq %rdi
19441+ cmpl $0, pv_info+PARAVIRT_enabled
19442+ jz 1f
19443+ i = 0
19444+ .rept USER_PGD_PTRS
19445+ mov i*8(%rbx),%rsi
19446+ mov $0,%sil
19447+ lea i*8(%rbx),%rdi
19448+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
19449+ i = i + 1
19450+ .endr
19451+ jmp 2f
19452+1:
19453+#endif
19454+
19455+ i = 0
19456+ .rept USER_PGD_PTRS
19457+ movb $0,i*8(%rbx)
19458+ i = i + 1
19459+ .endr
19460+
19461+#ifdef CONFIG_PARAVIRT
19462+2: popq %rdi
19463+#endif
19464+ SET_RDI_INTO_CR3
19465+
19466+#ifdef CONFIG_PAX_KERNEXEC
19467+ GET_CR0_INTO_RDI
19468+ bts $16,%rdi
19469+ SET_RDI_INTO_CR0
19470+#endif
19471+
19472+#ifdef CONFIG_PARAVIRT
19473+ PV_RESTORE_REGS(CLBR_RDI)
19474+#endif
19475+
19476+ popq %rbx
19477+ popq %rdi
19478+ pax_force_retaddr
19479+ retq
19480+ENDPROC(pax_enter_kernel_user)
19481+
19482+ENTRY(pax_exit_kernel_user)
19483+ push %rdi
19484+
19485+#ifdef CONFIG_PARAVIRT
19486+ pushq %rbx
19487+ PV_SAVE_REGS(CLBR_RDI)
19488+#endif
19489+
19490+#ifdef CONFIG_PAX_KERNEXEC
19491+ GET_CR0_INTO_RDI
19492+ btr $16,%rdi
19493+ SET_RDI_INTO_CR0
19494+#endif
19495+
19496+ GET_CR3_INTO_RDI
19497+ add $__START_KERNEL_map,%rdi
19498+ sub phys_base(%rip),%rdi
19499+
19500+#ifdef CONFIG_PARAVIRT
19501+ cmpl $0, pv_info+PARAVIRT_enabled
19502+ jz 1f
19503+ mov %rdi,%rbx
19504+ i = 0
19505+ .rept USER_PGD_PTRS
19506+ mov i*8(%rbx),%rsi
19507+ mov $0x67,%sil
19508+ lea i*8(%rbx),%rdi
19509+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
19510+ i = i + 1
19511+ .endr
19512+ jmp 2f
19513+1:
19514+#endif
19515+
19516+ i = 0
19517+ .rept USER_PGD_PTRS
19518+ movb $0x67,i*8(%rdi)
19519+ i = i + 1
19520+ .endr
19521+
19522+#ifdef CONFIG_PARAVIRT
19523+2: PV_RESTORE_REGS(CLBR_RDI)
19524+ popq %rbx
19525+#endif
19526+
19527+ popq %rdi
19528+ pax_force_retaddr
19529+ retq
19530+ENDPROC(pax_exit_kernel_user)
19531+#endif
19532+
19533+.macro pax_erase_kstack
19534+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
19535+ call pax_erase_kstack
19536+#endif
19537+.endm
19538+
19539+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
19540+ENTRY(pax_erase_kstack)
19541+ pushq %rdi
19542+ pushq %rcx
19543+ pushq %rax
19544+ pushq %r11
19545+
19546+ GET_THREAD_INFO(%r11)
19547+ mov TI_lowest_stack(%r11), %rdi
19548+ mov $-0xBEEF, %rax
19549+ std
19550+
19551+1: mov %edi, %ecx
19552+ and $THREAD_SIZE_asm - 1, %ecx
19553+ shr $3, %ecx
19554+ repne scasq
19555+ jecxz 2f
19556+
19557+ cmp $2*8, %ecx
19558+ jc 2f
19559+
19560+ mov $2*8, %ecx
19561+ repe scasq
19562+ jecxz 2f
19563+ jne 1b
19564+
19565+2: cld
19566+ mov %esp, %ecx
19567+ sub %edi, %ecx
19568+
19569+ cmp $THREAD_SIZE_asm, %rcx
19570+ jb 3f
19571+ ud2
19572+3:
19573+
19574+ shr $3, %ecx
19575+ rep stosq
19576+
19577+ mov TI_task_thread_sp0(%r11), %rdi
19578+ sub $256, %rdi
19579+ mov %rdi, TI_lowest_stack(%r11)
19580+
19581+ popq %r11
19582+ popq %rax
19583+ popq %rcx
19584+ popq %rdi
19585+ pax_force_retaddr
19586+ ret
19587+ENDPROC(pax_erase_kstack)
19588+#endif
19589
19590 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
19591 #ifdef CONFIG_TRACE_IRQFLAGS
19592@@ -375,8 +651,8 @@ ENDPROC(native_usergs_sysret64)
19593 .endm
19594
19595 .macro UNFAKE_STACK_FRAME
19596- addq $8*6, %rsp
19597- CFI_ADJUST_CFA_OFFSET -(6*8)
19598+ addq $8*6 + ARG_SKIP, %rsp
19599+ CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
19600 .endm
19601
19602 /*
19603@@ -463,7 +739,7 @@ ENDPROC(native_usergs_sysret64)
19604 movq %rsp, %rsi
19605
19606 leaq -RBP(%rsp),%rdi /* arg1 for handler */
19607- testl $3, CS-RBP(%rsi)
19608+ testb $3, CS-RBP(%rsi)
19609 je 1f
19610 SWAPGS
19611 /*
19612@@ -498,9 +774,10 @@ ENTRY(save_rest)
19613 movq_cfi r15, R15+16
19614 movq %r11, 8(%rsp) /* return address */
19615 FIXUP_TOP_OF_STACK %r11, 16
19616+ pax_force_retaddr
19617 ret
19618 CFI_ENDPROC
19619-END(save_rest)
19620+ENDPROC(save_rest)
19621
19622 /* save complete stack frame */
19623 .pushsection .kprobes.text, "ax"
19624@@ -529,9 +806,10 @@ ENTRY(save_paranoid)
19625 js 1f /* negative -> in kernel */
19626 SWAPGS
19627 xorl %ebx,%ebx
19628-1: ret
19629+1: pax_force_retaddr_bts
19630+ ret
19631 CFI_ENDPROC
19632-END(save_paranoid)
19633+ENDPROC(save_paranoid)
19634 .popsection
19635
19636 /*
19637@@ -553,7 +831,7 @@ ENTRY(ret_from_fork)
19638
19639 RESTORE_REST
19640
19641- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
19642+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
19643 jz 1f
19644
19645 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
19646@@ -571,7 +849,7 @@ ENTRY(ret_from_fork)
19647 RESTORE_REST
19648 jmp int_ret_from_sys_call
19649 CFI_ENDPROC
19650-END(ret_from_fork)
19651+ENDPROC(ret_from_fork)
19652
19653 /*
19654 * System call entry. Up to 6 arguments in registers are supported.
19655@@ -608,7 +886,7 @@ END(ret_from_fork)
19656 ENTRY(system_call)
19657 CFI_STARTPROC simple
19658 CFI_SIGNAL_FRAME
19659- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
19660+ CFI_DEF_CFA rsp,0
19661 CFI_REGISTER rip,rcx
19662 /*CFI_REGISTER rflags,r11*/
19663 SWAPGS_UNSAFE_STACK
19664@@ -621,16 +899,23 @@ GLOBAL(system_call_after_swapgs)
19665
19666 movq %rsp,PER_CPU_VAR(old_rsp)
19667 movq PER_CPU_VAR(kernel_stack),%rsp
19668+ SAVE_ARGS 8*6,0
19669+ pax_enter_kernel_user
19670+
19671+#ifdef CONFIG_PAX_RANDKSTACK
19672+ pax_erase_kstack
19673+#endif
19674+
19675 /*
19676 * No need to follow this irqs off/on section - it's straight
19677 * and short:
19678 */
19679 ENABLE_INTERRUPTS(CLBR_NONE)
19680- SAVE_ARGS 8,0
19681 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
19682 movq %rcx,RIP-ARGOFFSET(%rsp)
19683 CFI_REL_OFFSET rip,RIP-ARGOFFSET
19684- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
19685+ GET_THREAD_INFO(%rcx)
19686+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
19687 jnz tracesys
19688 system_call_fastpath:
19689 #if __SYSCALL_MASK == ~0
19690@@ -640,7 +925,7 @@ system_call_fastpath:
19691 cmpl $__NR_syscall_max,%eax
19692 #endif
19693 ja badsys
19694- movq %r10,%rcx
19695+ movq R10-ARGOFFSET(%rsp),%rcx
19696 call *sys_call_table(,%rax,8) # XXX: rip relative
19697 movq %rax,RAX-ARGOFFSET(%rsp)
19698 /*
19699@@ -654,10 +939,13 @@ sysret_check:
19700 LOCKDEP_SYS_EXIT
19701 DISABLE_INTERRUPTS(CLBR_NONE)
19702 TRACE_IRQS_OFF
19703- movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx
19704+ GET_THREAD_INFO(%rcx)
19705+ movl TI_flags(%rcx),%edx
19706 andl %edi,%edx
19707 jnz sysret_careful
19708 CFI_REMEMBER_STATE
19709+ pax_exit_kernel_user
19710+ pax_erase_kstack
19711 /*
19712 * sysretq will re-enable interrupts:
19713 */
19714@@ -709,14 +997,18 @@ badsys:
19715 * jump back to the normal fast path.
19716 */
19717 auditsys:
19718- movq %r10,%r9 /* 6th arg: 4th syscall arg */
19719+ movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
19720 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
19721 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
19722 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
19723 movq %rax,%rsi /* 2nd arg: syscall number */
19724 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
19725 call __audit_syscall_entry
19726+
19727+ pax_erase_kstack
19728+
19729 LOAD_ARGS 0 /* reload call-clobbered registers */
19730+ pax_set_fptr_mask
19731 jmp system_call_fastpath
19732
19733 /*
19734@@ -737,7 +1029,7 @@ sysret_audit:
19735 /* Do syscall tracing */
19736 tracesys:
19737 #ifdef CONFIG_AUDITSYSCALL
19738- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
19739+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
19740 jz auditsys
19741 #endif
19742 SAVE_REST
19743@@ -745,12 +1037,16 @@ tracesys:
19744 FIXUP_TOP_OF_STACK %rdi
19745 movq %rsp,%rdi
19746 call syscall_trace_enter
19747+
19748+ pax_erase_kstack
19749+
19750 /*
19751 * Reload arg registers from stack in case ptrace changed them.
19752 * We don't reload %rax because syscall_trace_enter() returned
19753 * the value it wants us to use in the table lookup.
19754 */
19755 LOAD_ARGS ARGOFFSET, 1
19756+ pax_set_fptr_mask
19757 RESTORE_REST
19758 #if __SYSCALL_MASK == ~0
19759 cmpq $__NR_syscall_max,%rax
19760@@ -759,7 +1055,7 @@ tracesys:
19761 cmpl $__NR_syscall_max,%eax
19762 #endif
19763 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
19764- movq %r10,%rcx /* fixup for C */
19765+ movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
19766 call *sys_call_table(,%rax,8)
19767 movq %rax,RAX-ARGOFFSET(%rsp)
19768 /* Use IRET because user could have changed frame */
19769@@ -780,7 +1076,9 @@ GLOBAL(int_with_check)
19770 andl %edi,%edx
19771 jnz int_careful
19772 andl $~TS_COMPAT,TI_status(%rcx)
19773- jmp retint_swapgs
19774+ pax_exit_kernel_user
19775+ pax_erase_kstack
19776+ jmp retint_swapgs_pax
19777
19778 /* Either reschedule or signal or syscall exit tracking needed. */
19779 /* First do a reschedule test. */
19780@@ -826,7 +1124,7 @@ int_restore_rest:
19781 TRACE_IRQS_OFF
19782 jmp int_with_check
19783 CFI_ENDPROC
19784-END(system_call)
19785+ENDPROC(system_call)
19786
19787 /*
19788 * Certain special system calls that need to save a complete full stack frame.
19789@@ -842,7 +1140,7 @@ ENTRY(\label)
19790 call \func
19791 jmp ptregscall_common
19792 CFI_ENDPROC
19793-END(\label)
19794+ENDPROC(\label)
19795 .endm
19796
19797 .macro FORK_LIKE func
19798@@ -856,9 +1154,10 @@ ENTRY(stub_\func)
19799 DEFAULT_FRAME 0 8 /* offset 8: return address */
19800 call sys_\func
19801 RESTORE_TOP_OF_STACK %r11, 8
19802+ pax_force_retaddr
19803 ret $REST_SKIP /* pop extended registers */
19804 CFI_ENDPROC
19805-END(stub_\func)
19806+ENDPROC(stub_\func)
19807 .endm
19808
19809 FORK_LIKE clone
19810@@ -875,9 +1174,10 @@ ENTRY(ptregscall_common)
19811 movq_cfi_restore R12+8, r12
19812 movq_cfi_restore RBP+8, rbp
19813 movq_cfi_restore RBX+8, rbx
19814+ pax_force_retaddr
19815 ret $REST_SKIP /* pop extended registers */
19816 CFI_ENDPROC
19817-END(ptregscall_common)
19818+ENDPROC(ptregscall_common)
19819
19820 ENTRY(stub_execve)
19821 CFI_STARTPROC
19822@@ -891,7 +1191,7 @@ ENTRY(stub_execve)
19823 RESTORE_REST
19824 jmp int_ret_from_sys_call
19825 CFI_ENDPROC
19826-END(stub_execve)
19827+ENDPROC(stub_execve)
19828
19829 /*
19830 * sigreturn is special because it needs to restore all registers on return.
19831@@ -909,7 +1209,7 @@ ENTRY(stub_rt_sigreturn)
19832 RESTORE_REST
19833 jmp int_ret_from_sys_call
19834 CFI_ENDPROC
19835-END(stub_rt_sigreturn)
19836+ENDPROC(stub_rt_sigreturn)
19837
19838 #ifdef CONFIG_X86_X32_ABI
19839 ENTRY(stub_x32_rt_sigreturn)
19840@@ -975,7 +1275,7 @@ vector=vector+1
19841 2: jmp common_interrupt
19842 .endr
19843 CFI_ENDPROC
19844-END(irq_entries_start)
19845+ENDPROC(irq_entries_start)
19846
19847 .previous
19848 END(interrupt)
19849@@ -995,6 +1295,16 @@ END(interrupt)
19850 subq $ORIG_RAX-RBP, %rsp
19851 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
19852 SAVE_ARGS_IRQ
19853+#ifdef CONFIG_PAX_MEMORY_UDEREF
19854+ testb $3, CS(%rdi)
19855+ jnz 1f
19856+ pax_enter_kernel
19857+ jmp 2f
19858+1: pax_enter_kernel_user
19859+2:
19860+#else
19861+ pax_enter_kernel
19862+#endif
19863 call \func
19864 .endm
19865
19866@@ -1027,7 +1337,7 @@ ret_from_intr:
19867
19868 exit_intr:
19869 GET_THREAD_INFO(%rcx)
19870- testl $3,CS-ARGOFFSET(%rsp)
19871+ testb $3,CS-ARGOFFSET(%rsp)
19872 je retint_kernel
19873
19874 /* Interrupt came from user space */
19875@@ -1049,12 +1359,16 @@ retint_swapgs: /* return to user-space */
19876 * The iretq could re-enable interrupts:
19877 */
19878 DISABLE_INTERRUPTS(CLBR_ANY)
19879+ pax_exit_kernel_user
19880+retint_swapgs_pax:
19881 TRACE_IRQS_IRETQ
19882 SWAPGS
19883 jmp restore_args
19884
19885 retint_restore_args: /* return to kernel space */
19886 DISABLE_INTERRUPTS(CLBR_ANY)
19887+ pax_exit_kernel
19888+ pax_force_retaddr (RIP-ARGOFFSET)
19889 /*
19890 * The iretq could re-enable interrupts:
19891 */
19892@@ -1137,7 +1451,7 @@ ENTRY(retint_kernel)
19893 #endif
19894
19895 CFI_ENDPROC
19896-END(common_interrupt)
19897+ENDPROC(common_interrupt)
19898 /*
19899 * End of kprobes section
19900 */
19901@@ -1155,7 +1469,7 @@ ENTRY(\sym)
19902 interrupt \do_sym
19903 jmp ret_from_intr
19904 CFI_ENDPROC
19905-END(\sym)
19906+ENDPROC(\sym)
19907 .endm
19908
19909 #ifdef CONFIG_SMP
19910@@ -1211,12 +1525,22 @@ ENTRY(\sym)
19911 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
19912 call error_entry
19913 DEFAULT_FRAME 0
19914+#ifdef CONFIG_PAX_MEMORY_UDEREF
19915+ testb $3, CS(%rsp)
19916+ jnz 1f
19917+ pax_enter_kernel
19918+ jmp 2f
19919+1: pax_enter_kernel_user
19920+2:
19921+#else
19922+ pax_enter_kernel
19923+#endif
19924 movq %rsp,%rdi /* pt_regs pointer */
19925 xorl %esi,%esi /* no error code */
19926 call \do_sym
19927 jmp error_exit /* %ebx: no swapgs flag */
19928 CFI_ENDPROC
19929-END(\sym)
19930+ENDPROC(\sym)
19931 .endm
19932
19933 .macro paranoidzeroentry sym do_sym
19934@@ -1229,15 +1553,25 @@ ENTRY(\sym)
19935 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
19936 call save_paranoid
19937 TRACE_IRQS_OFF
19938+#ifdef CONFIG_PAX_MEMORY_UDEREF
19939+ testb $3, CS(%rsp)
19940+ jnz 1f
19941+ pax_enter_kernel
19942+ jmp 2f
19943+1: pax_enter_kernel_user
19944+2:
19945+#else
19946+ pax_enter_kernel
19947+#endif
19948 movq %rsp,%rdi /* pt_regs pointer */
19949 xorl %esi,%esi /* no error code */
19950 call \do_sym
19951 jmp paranoid_exit /* %ebx: no swapgs flag */
19952 CFI_ENDPROC
19953-END(\sym)
19954+ENDPROC(\sym)
19955 .endm
19956
19957-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
19958+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
19959 .macro paranoidzeroentry_ist sym do_sym ist
19960 ENTRY(\sym)
19961 INTR_FRAME
19962@@ -1248,14 +1582,30 @@ ENTRY(\sym)
19963 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
19964 call save_paranoid
19965 TRACE_IRQS_OFF_DEBUG
19966+#ifdef CONFIG_PAX_MEMORY_UDEREF
19967+ testb $3, CS(%rsp)
19968+ jnz 1f
19969+ pax_enter_kernel
19970+ jmp 2f
19971+1: pax_enter_kernel_user
19972+2:
19973+#else
19974+ pax_enter_kernel
19975+#endif
19976 movq %rsp,%rdi /* pt_regs pointer */
19977 xorl %esi,%esi /* no error code */
19978+#ifdef CONFIG_SMP
19979+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
19980+ lea init_tss(%r12), %r12
19981+#else
19982+ lea init_tss(%rip), %r12
19983+#endif
19984 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
19985 call \do_sym
19986 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
19987 jmp paranoid_exit /* %ebx: no swapgs flag */
19988 CFI_ENDPROC
19989-END(\sym)
19990+ENDPROC(\sym)
19991 .endm
19992
19993 .macro errorentry sym do_sym
19994@@ -1267,13 +1617,23 @@ ENTRY(\sym)
19995 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
19996 call error_entry
19997 DEFAULT_FRAME 0
19998+#ifdef CONFIG_PAX_MEMORY_UDEREF
19999+ testb $3, CS(%rsp)
20000+ jnz 1f
20001+ pax_enter_kernel
20002+ jmp 2f
20003+1: pax_enter_kernel_user
20004+2:
20005+#else
20006+ pax_enter_kernel
20007+#endif
20008 movq %rsp,%rdi /* pt_regs pointer */
20009 movq ORIG_RAX(%rsp),%rsi /* get error code */
20010 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
20011 call \do_sym
20012 jmp error_exit /* %ebx: no swapgs flag */
20013 CFI_ENDPROC
20014-END(\sym)
20015+ENDPROC(\sym)
20016 .endm
20017
20018 /* error code is on the stack already */
20019@@ -1287,13 +1647,23 @@ ENTRY(\sym)
20020 call save_paranoid
20021 DEFAULT_FRAME 0
20022 TRACE_IRQS_OFF
20023+#ifdef CONFIG_PAX_MEMORY_UDEREF
20024+ testb $3, CS(%rsp)
20025+ jnz 1f
20026+ pax_enter_kernel
20027+ jmp 2f
20028+1: pax_enter_kernel_user
20029+2:
20030+#else
20031+ pax_enter_kernel
20032+#endif
20033 movq %rsp,%rdi /* pt_regs pointer */
20034 movq ORIG_RAX(%rsp),%rsi /* get error code */
20035 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
20036 call \do_sym
20037 jmp paranoid_exit /* %ebx: no swapgs flag */
20038 CFI_ENDPROC
20039-END(\sym)
20040+ENDPROC(\sym)
20041 .endm
20042
20043 zeroentry divide_error do_divide_error
20044@@ -1323,9 +1693,10 @@ gs_change:
20045 2: mfence /* workaround */
20046 SWAPGS
20047 popfq_cfi
20048+ pax_force_retaddr
20049 ret
20050 CFI_ENDPROC
20051-END(native_load_gs_index)
20052+ENDPROC(native_load_gs_index)
20053
20054 _ASM_EXTABLE(gs_change,bad_gs)
20055 .section .fixup,"ax"
20056@@ -1353,9 +1724,10 @@ ENTRY(call_softirq)
20057 CFI_DEF_CFA_REGISTER rsp
20058 CFI_ADJUST_CFA_OFFSET -8
20059 decl PER_CPU_VAR(irq_count)
20060+ pax_force_retaddr
20061 ret
20062 CFI_ENDPROC
20063-END(call_softirq)
20064+ENDPROC(call_softirq)
20065
20066 #ifdef CONFIG_XEN
20067 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
20068@@ -1393,7 +1765,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
20069 decl PER_CPU_VAR(irq_count)
20070 jmp error_exit
20071 CFI_ENDPROC
20072-END(xen_do_hypervisor_callback)
20073+ENDPROC(xen_do_hypervisor_callback)
20074
20075 /*
20076 * Hypervisor uses this for application faults while it executes.
20077@@ -1452,7 +1824,7 @@ ENTRY(xen_failsafe_callback)
20078 SAVE_ALL
20079 jmp error_exit
20080 CFI_ENDPROC
20081-END(xen_failsafe_callback)
20082+ENDPROC(xen_failsafe_callback)
20083
20084 apicinterrupt XEN_HVM_EVTCHN_CALLBACK \
20085 xen_hvm_callback_vector xen_evtchn_do_upcall
20086@@ -1501,16 +1873,31 @@ ENTRY(paranoid_exit)
20087 TRACE_IRQS_OFF_DEBUG
20088 testl %ebx,%ebx /* swapgs needed? */
20089 jnz paranoid_restore
20090- testl $3,CS(%rsp)
20091+ testb $3,CS(%rsp)
20092 jnz paranoid_userspace
20093+#ifdef CONFIG_PAX_MEMORY_UDEREF
20094+ pax_exit_kernel
20095+ TRACE_IRQS_IRETQ 0
20096+ SWAPGS_UNSAFE_STACK
20097+ RESTORE_ALL 8
20098+ pax_force_retaddr_bts
20099+ jmp irq_return
20100+#endif
20101 paranoid_swapgs:
20102+#ifdef CONFIG_PAX_MEMORY_UDEREF
20103+ pax_exit_kernel_user
20104+#else
20105+ pax_exit_kernel
20106+#endif
20107 TRACE_IRQS_IRETQ 0
20108 SWAPGS_UNSAFE_STACK
20109 RESTORE_ALL 8
20110 jmp irq_return
20111 paranoid_restore:
20112+ pax_exit_kernel
20113 TRACE_IRQS_IRETQ_DEBUG 0
20114 RESTORE_ALL 8
20115+ pax_force_retaddr_bts
20116 jmp irq_return
20117 paranoid_userspace:
20118 GET_THREAD_INFO(%rcx)
20119@@ -1539,7 +1926,7 @@ paranoid_schedule:
20120 TRACE_IRQS_OFF
20121 jmp paranoid_userspace
20122 CFI_ENDPROC
20123-END(paranoid_exit)
20124+ENDPROC(paranoid_exit)
20125
20126 /*
20127 * Exception entry point. This expects an error code/orig_rax on the stack.
20128@@ -1566,12 +1953,13 @@ ENTRY(error_entry)
20129 movq_cfi r14, R14+8
20130 movq_cfi r15, R15+8
20131 xorl %ebx,%ebx
20132- testl $3,CS+8(%rsp)
20133+ testb $3,CS+8(%rsp)
20134 je error_kernelspace
20135 error_swapgs:
20136 SWAPGS
20137 error_sti:
20138 TRACE_IRQS_OFF
20139+ pax_force_retaddr_bts
20140 ret
20141
20142 /*
20143@@ -1598,7 +1986,7 @@ bstep_iret:
20144 movq %rcx,RIP+8(%rsp)
20145 jmp error_swapgs
20146 CFI_ENDPROC
20147-END(error_entry)
20148+ENDPROC(error_entry)
20149
20150
20151 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
20152@@ -1618,7 +2006,7 @@ ENTRY(error_exit)
20153 jnz retint_careful
20154 jmp retint_swapgs
20155 CFI_ENDPROC
20156-END(error_exit)
20157+ENDPROC(error_exit)
20158
20159 /*
20160 * Test if a given stack is an NMI stack or not.
20161@@ -1676,9 +2064,11 @@ ENTRY(nmi)
20162 * If %cs was not the kernel segment, then the NMI triggered in user
20163 * space, which means it is definitely not nested.
20164 */
20165+ cmpl $__KERNEXEC_KERNEL_CS, 16(%rsp)
20166+ je 1f
20167 cmpl $__KERNEL_CS, 16(%rsp)
20168 jne first_nmi
20169-
20170+1:
20171 /*
20172 * Check the special variable on the stack to see if NMIs are
20173 * executing.
20174@@ -1847,6 +2237,17 @@ end_repeat_nmi:
20175 */
20176 movq %cr2, %r12
20177
20178+#ifdef CONFIG_PAX_MEMORY_UDEREF
20179+ testb $3, CS(%rsp)
20180+ jnz 1f
20181+ pax_enter_kernel
20182+ jmp 2f
20183+1: pax_enter_kernel_user
20184+2:
20185+#else
20186+ pax_enter_kernel
20187+#endif
20188+
20189 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
20190 movq %rsp,%rdi
20191 movq $-1,%rsi
20192@@ -1862,23 +2263,34 @@ end_repeat_nmi:
20193 testl %ebx,%ebx /* swapgs needed? */
20194 jnz nmi_restore
20195 nmi_swapgs:
20196+#ifdef CONFIG_PAX_MEMORY_UDEREF
20197+ pax_exit_kernel_user
20198+#else
20199+ pax_exit_kernel
20200+#endif
20201 SWAPGS_UNSAFE_STACK
20202+ RESTORE_ALL 6*8
20203+ /* Clear the NMI executing stack variable */
20204+ movq $0, 5*8(%rsp)
20205+ jmp irq_return
20206 nmi_restore:
20207+ pax_exit_kernel
20208 /* Pop the extra iret frame at once */
20209 RESTORE_ALL 6*8
20210+ pax_force_retaddr_bts
20211
20212 /* Clear the NMI executing stack variable */
20213 movq $0, 5*8(%rsp)
20214 jmp irq_return
20215 CFI_ENDPROC
20216-END(nmi)
20217+ENDPROC(nmi)
20218
20219 ENTRY(ignore_sysret)
20220 CFI_STARTPROC
20221 mov $-ENOSYS,%eax
20222 sysret
20223 CFI_ENDPROC
20224-END(ignore_sysret)
20225+ENDPROC(ignore_sysret)
20226
20227 /*
20228 * End of kprobes section
20229diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
20230index 1d41402..af9a46a 100644
20231--- a/arch/x86/kernel/ftrace.c
20232+++ b/arch/x86/kernel/ftrace.c
20233@@ -105,6 +105,8 @@ ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
20234 {
20235 unsigned char replaced[MCOUNT_INSN_SIZE];
20236
20237+ ip = ktla_ktva(ip);
20238+
20239 /*
20240 * Note: Due to modules and __init, code can
20241 * disappear and change, we need to protect against faulting
20242@@ -227,7 +229,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
20243 unsigned char old[MCOUNT_INSN_SIZE], *new;
20244 int ret;
20245
20246- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
20247+ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
20248 new = ftrace_call_replace(ip, (unsigned long)func);
20249
20250 /* See comment above by declaration of modifying_ftrace_code */
20251@@ -238,7 +240,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
20252 /* Also update the regs callback function */
20253 if (!ret) {
20254 ip = (unsigned long)(&ftrace_regs_call);
20255- memcpy(old, &ftrace_regs_call, MCOUNT_INSN_SIZE);
20256+ memcpy(old, ktla_ktva((void *)&ftrace_regs_call), MCOUNT_INSN_SIZE);
20257 new = ftrace_call_replace(ip, (unsigned long)func);
20258 ret = ftrace_modify_code(ip, old, new);
20259 }
20260@@ -279,7 +281,7 @@ static int ftrace_write(unsigned long ip, const char *val, int size)
20261 * kernel identity mapping to modify code.
20262 */
20263 if (within(ip, (unsigned long)_text, (unsigned long)_etext))
20264- ip = (unsigned long)__va(__pa(ip));
20265+ ip = (unsigned long)__va(__pa(ktla_ktva(ip)));
20266
20267 return probe_kernel_write((void *)ip, val, size);
20268 }
20269@@ -289,7 +291,7 @@ static int add_break(unsigned long ip, const char *old)
20270 unsigned char replaced[MCOUNT_INSN_SIZE];
20271 unsigned char brk = BREAKPOINT_INSTRUCTION;
20272
20273- if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
20274+ if (probe_kernel_read(replaced, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE))
20275 return -EFAULT;
20276
20277 /* Make sure it is what we expect it to be */
20278@@ -637,7 +639,7 @@ ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
20279 return ret;
20280
20281 fail_update:
20282- probe_kernel_write((void *)ip, &old_code[0], 1);
20283+ probe_kernel_write((void *)ktla_ktva(ip), &old_code[0], 1);
20284 goto out;
20285 }
20286
20287@@ -670,6 +672,8 @@ static int ftrace_mod_jmp(unsigned long ip,
20288 {
20289 unsigned char code[MCOUNT_INSN_SIZE];
20290
20291+ ip = ktla_ktva(ip);
20292+
20293 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
20294 return -EFAULT;
20295
20296diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
20297index c18f59d..9c0c9f6 100644
20298--- a/arch/x86/kernel/head32.c
20299+++ b/arch/x86/kernel/head32.c
20300@@ -18,6 +18,7 @@
20301 #include <asm/io_apic.h>
20302 #include <asm/bios_ebda.h>
20303 #include <asm/tlbflush.h>
20304+#include <asm/boot.h>
20305
20306 static void __init i386_default_early_setup(void)
20307 {
20308@@ -30,8 +31,7 @@ static void __init i386_default_early_setup(void)
20309
20310 void __init i386_start_kernel(void)
20311 {
20312- memblock_reserve(__pa_symbol(&_text),
20313- __pa_symbol(&__bss_stop) - __pa_symbol(&_text));
20314+ memblock_reserve(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop) - LOAD_PHYSICAL_ADDR);
20315
20316 #ifdef CONFIG_BLK_DEV_INITRD
20317 /* Reserve INITRD */
20318diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
20319index c8932c7..d56b622 100644
20320--- a/arch/x86/kernel/head_32.S
20321+++ b/arch/x86/kernel/head_32.S
20322@@ -26,6 +26,12 @@
20323 /* Physical address */
20324 #define pa(X) ((X) - __PAGE_OFFSET)
20325
20326+#ifdef CONFIG_PAX_KERNEXEC
20327+#define ta(X) (X)
20328+#else
20329+#define ta(X) ((X) - __PAGE_OFFSET)
20330+#endif
20331+
20332 /*
20333 * References to members of the new_cpu_data structure.
20334 */
20335@@ -55,11 +61,7 @@
20336 * and small than max_low_pfn, otherwise will waste some page table entries
20337 */
20338
20339-#if PTRS_PER_PMD > 1
20340-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
20341-#else
20342-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
20343-#endif
20344+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
20345
20346 /* Number of possible pages in the lowmem region */
20347 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
20348@@ -78,6 +80,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
20349 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
20350
20351 /*
20352+ * Real beginning of normal "text" segment
20353+ */
20354+ENTRY(stext)
20355+ENTRY(_stext)
20356+
20357+/*
20358 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
20359 * %esi points to the real-mode code as a 32-bit pointer.
20360 * CS and DS must be 4 GB flat segments, but we don't depend on
20361@@ -85,6 +93,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
20362 * can.
20363 */
20364 __HEAD
20365+
20366+#ifdef CONFIG_PAX_KERNEXEC
20367+ jmp startup_32
20368+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
20369+.fill PAGE_SIZE-5,1,0xcc
20370+#endif
20371+
20372 ENTRY(startup_32)
20373 movl pa(stack_start),%ecx
20374
20375@@ -106,6 +121,59 @@ ENTRY(startup_32)
20376 2:
20377 leal -__PAGE_OFFSET(%ecx),%esp
20378
20379+#ifdef CONFIG_SMP
20380+ movl $pa(cpu_gdt_table),%edi
20381+ movl $__per_cpu_load,%eax
20382+ movw %ax,GDT_ENTRY_PERCPU * 8 + 2(%edi)
20383+ rorl $16,%eax
20384+ movb %al,GDT_ENTRY_PERCPU * 8 + 4(%edi)
20385+ movb %ah,GDT_ENTRY_PERCPU * 8 + 7(%edi)
20386+ movl $__per_cpu_end - 1,%eax
20387+ subl $__per_cpu_start,%eax
20388+ movw %ax,GDT_ENTRY_PERCPU * 8 + 0(%edi)
20389+#endif
20390+
20391+#ifdef CONFIG_PAX_MEMORY_UDEREF
20392+ movl $NR_CPUS,%ecx
20393+ movl $pa(cpu_gdt_table),%edi
20394+1:
20395+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
20396+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
20397+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
20398+ addl $PAGE_SIZE_asm,%edi
20399+ loop 1b
20400+#endif
20401+
20402+#ifdef CONFIG_PAX_KERNEXEC
20403+ movl $pa(boot_gdt),%edi
20404+ movl $__LOAD_PHYSICAL_ADDR,%eax
20405+ movw %ax,GDT_ENTRY_BOOT_CS * 8 + 2(%edi)
20406+ rorl $16,%eax
20407+ movb %al,GDT_ENTRY_BOOT_CS * 8 + 4(%edi)
20408+ movb %ah,GDT_ENTRY_BOOT_CS * 8 + 7(%edi)
20409+ rorl $16,%eax
20410+
20411+ ljmp $(__BOOT_CS),$1f
20412+1:
20413+
20414+ movl $NR_CPUS,%ecx
20415+ movl $pa(cpu_gdt_table),%edi
20416+ addl $__PAGE_OFFSET,%eax
20417+1:
20418+ movb $0xc0,GDT_ENTRY_KERNEL_CS * 8 + 6(%edi)
20419+ movb $0xc0,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 6(%edi)
20420+ movw %ax,GDT_ENTRY_KERNEL_CS * 8 + 2(%edi)
20421+ movw %ax,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 2(%edi)
20422+ rorl $16,%eax
20423+ movb %al,GDT_ENTRY_KERNEL_CS * 8 + 4(%edi)
20424+ movb %al,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 4(%edi)
20425+ movb %ah,GDT_ENTRY_KERNEL_CS * 8 + 7(%edi)
20426+ movb %ah,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 7(%edi)
20427+ rorl $16,%eax
20428+ addl $PAGE_SIZE_asm,%edi
20429+ loop 1b
20430+#endif
20431+
20432 /*
20433 * Clear BSS first so that there are no surprises...
20434 */
20435@@ -196,8 +264,11 @@ ENTRY(startup_32)
20436 movl %eax, pa(max_pfn_mapped)
20437
20438 /* Do early initialization of the fixmap area */
20439- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
20440- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
20441+#ifdef CONFIG_COMPAT_VDSO
20442+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
20443+#else
20444+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
20445+#endif
20446 #else /* Not PAE */
20447
20448 page_pde_offset = (__PAGE_OFFSET >> 20);
20449@@ -227,8 +298,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
20450 movl %eax, pa(max_pfn_mapped)
20451
20452 /* Do early initialization of the fixmap area */
20453- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
20454- movl %eax,pa(initial_page_table+0xffc)
20455+#ifdef CONFIG_COMPAT_VDSO
20456+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
20457+#else
20458+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
20459+#endif
20460 #endif
20461
20462 #ifdef CONFIG_PARAVIRT
20463@@ -242,9 +316,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
20464 cmpl $num_subarch_entries, %eax
20465 jae bad_subarch
20466
20467- movl pa(subarch_entries)(,%eax,4), %eax
20468- subl $__PAGE_OFFSET, %eax
20469- jmp *%eax
20470+ jmp *pa(subarch_entries)(,%eax,4)
20471
20472 bad_subarch:
20473 WEAK(lguest_entry)
20474@@ -256,10 +328,10 @@ WEAK(xen_entry)
20475 __INITDATA
20476
20477 subarch_entries:
20478- .long default_entry /* normal x86/PC */
20479- .long lguest_entry /* lguest hypervisor */
20480- .long xen_entry /* Xen hypervisor */
20481- .long default_entry /* Moorestown MID */
20482+ .long ta(default_entry) /* normal x86/PC */
20483+ .long ta(lguest_entry) /* lguest hypervisor */
20484+ .long ta(xen_entry) /* Xen hypervisor */
20485+ .long ta(default_entry) /* Moorestown MID */
20486 num_subarch_entries = (. - subarch_entries) / 4
20487 .previous
20488 #else
20489@@ -335,6 +407,7 @@ default_entry:
20490 movl pa(mmu_cr4_features),%eax
20491 movl %eax,%cr4
20492
20493+#ifdef CONFIG_X86_PAE
20494 testb $X86_CR4_PAE, %al # check if PAE is enabled
20495 jz 6f
20496
20497@@ -363,6 +436,9 @@ default_entry:
20498 /* Make changes effective */
20499 wrmsr
20500
20501+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
20502+#endif
20503+
20504 6:
20505
20506 /*
20507@@ -460,14 +536,20 @@ is386: movl $2,%ecx # set MP
20508 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
20509 movl %eax,%ss # after changing gdt.
20510
20511- movl $(__USER_DS),%eax # DS/ES contains default USER segment
20512+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
20513 movl %eax,%ds
20514 movl %eax,%es
20515
20516 movl $(__KERNEL_PERCPU), %eax
20517 movl %eax,%fs # set this cpu's percpu
20518
20519+#ifdef CONFIG_CC_STACKPROTECTOR
20520 movl $(__KERNEL_STACK_CANARY),%eax
20521+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
20522+ movl $(__USER_DS),%eax
20523+#else
20524+ xorl %eax,%eax
20525+#endif
20526 movl %eax,%gs
20527
20528 xorl %eax,%eax # Clear LDT
20529@@ -544,8 +626,11 @@ setup_once:
20530 * relocation. Manually set base address in stack canary
20531 * segment descriptor.
20532 */
20533- movl $gdt_page,%eax
20534+ movl $cpu_gdt_table,%eax
20535 movl $stack_canary,%ecx
20536+#ifdef CONFIG_SMP
20537+ addl $__per_cpu_load,%ecx
20538+#endif
20539 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
20540 shrl $16, %ecx
20541 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
20542@@ -576,7 +661,7 @@ ENDPROC(early_idt_handlers)
20543 /* This is global to keep gas from relaxing the jumps */
20544 ENTRY(early_idt_handler)
20545 cld
20546- cmpl $2,%ss:early_recursion_flag
20547+ cmpl $1,%ss:early_recursion_flag
20548 je hlt_loop
20549 incl %ss:early_recursion_flag
20550
20551@@ -614,8 +699,8 @@ ENTRY(early_idt_handler)
20552 pushl (20+6*4)(%esp) /* trapno */
20553 pushl $fault_msg
20554 call printk
20555-#endif
20556 call dump_stack
20557+#endif
20558 hlt_loop:
20559 hlt
20560 jmp hlt_loop
20561@@ -634,8 +719,11 @@ ENDPROC(early_idt_handler)
20562 /* This is the default interrupt "handler" :-) */
20563 ALIGN
20564 ignore_int:
20565- cld
20566 #ifdef CONFIG_PRINTK
20567+ cmpl $2,%ss:early_recursion_flag
20568+ je hlt_loop
20569+ incl %ss:early_recursion_flag
20570+ cld
20571 pushl %eax
20572 pushl %ecx
20573 pushl %edx
20574@@ -644,9 +732,6 @@ ignore_int:
20575 movl $(__KERNEL_DS),%eax
20576 movl %eax,%ds
20577 movl %eax,%es
20578- cmpl $2,early_recursion_flag
20579- je hlt_loop
20580- incl early_recursion_flag
20581 pushl 16(%esp)
20582 pushl 24(%esp)
20583 pushl 32(%esp)
20584@@ -680,29 +765,43 @@ ENTRY(setup_once_ref)
20585 /*
20586 * BSS section
20587 */
20588-__PAGE_ALIGNED_BSS
20589- .align PAGE_SIZE
20590 #ifdef CONFIG_X86_PAE
20591+.section .initial_pg_pmd,"a",@progbits
20592 initial_pg_pmd:
20593 .fill 1024*KPMDS,4,0
20594 #else
20595+.section .initial_page_table,"a",@progbits
20596 ENTRY(initial_page_table)
20597 .fill 1024,4,0
20598 #endif
20599+.section .initial_pg_fixmap,"a",@progbits
20600 initial_pg_fixmap:
20601 .fill 1024,4,0
20602+.section .empty_zero_page,"a",@progbits
20603 ENTRY(empty_zero_page)
20604 .fill 4096,1,0
20605+.section .swapper_pg_dir,"a",@progbits
20606 ENTRY(swapper_pg_dir)
20607+#ifdef CONFIG_X86_PAE
20608+ .fill 4,8,0
20609+#else
20610 .fill 1024,4,0
20611+#endif
20612+
20613+/*
20614+ * The IDT has to be page-aligned to simplify the Pentium
20615+ * F0 0F bug workaround.. We have a special link segment
20616+ * for this.
20617+ */
20618+.section .idt,"a",@progbits
20619+ENTRY(idt_table)
20620+ .fill 256,8,0
20621
20622 /*
20623 * This starts the data section.
20624 */
20625 #ifdef CONFIG_X86_PAE
20626-__PAGE_ALIGNED_DATA
20627- /* Page-aligned for the benefit of paravirt? */
20628- .align PAGE_SIZE
20629+.section .initial_page_table,"a",@progbits
20630 ENTRY(initial_page_table)
20631 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
20632 # if KPMDS == 3
20633@@ -721,12 +820,20 @@ ENTRY(initial_page_table)
20634 # error "Kernel PMDs should be 1, 2 or 3"
20635 # endif
20636 .align PAGE_SIZE /* needs to be page-sized too */
20637+
20638+#ifdef CONFIG_PAX_PER_CPU_PGD
20639+ENTRY(cpu_pgd)
20640+ .rept NR_CPUS
20641+ .fill 4,8,0
20642+ .endr
20643+#endif
20644+
20645 #endif
20646
20647 .data
20648 .balign 4
20649 ENTRY(stack_start)
20650- .long init_thread_union+THREAD_SIZE
20651+ .long init_thread_union+THREAD_SIZE-8
20652
20653 __INITRODATA
20654 int_msg:
20655@@ -754,7 +861,7 @@ fault_msg:
20656 * segment size, and 32-bit linear address value:
20657 */
20658
20659- .data
20660+.section .rodata,"a",@progbits
20661 .globl boot_gdt_descr
20662 .globl idt_descr
20663
20664@@ -763,7 +870,7 @@ fault_msg:
20665 .word 0 # 32 bit align gdt_desc.address
20666 boot_gdt_descr:
20667 .word __BOOT_DS+7
20668- .long boot_gdt - __PAGE_OFFSET
20669+ .long pa(boot_gdt)
20670
20671 .word 0 # 32-bit align idt_desc.address
20672 idt_descr:
20673@@ -774,7 +881,7 @@ idt_descr:
20674 .word 0 # 32 bit align gdt_desc.address
20675 ENTRY(early_gdt_descr)
20676 .word GDT_ENTRIES*8-1
20677- .long gdt_page /* Overwritten for secondary CPUs */
20678+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
20679
20680 /*
20681 * The boot_gdt must mirror the equivalent in setup.S and is
20682@@ -783,5 +890,65 @@ ENTRY(early_gdt_descr)
20683 .align L1_CACHE_BYTES
20684 ENTRY(boot_gdt)
20685 .fill GDT_ENTRY_BOOT_CS,8,0
20686- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
20687- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
20688+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
20689+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
20690+
20691+ .align PAGE_SIZE_asm
20692+ENTRY(cpu_gdt_table)
20693+ .rept NR_CPUS
20694+ .quad 0x0000000000000000 /* NULL descriptor */
20695+ .quad 0x0000000000000000 /* 0x0b reserved */
20696+ .quad 0x0000000000000000 /* 0x13 reserved */
20697+ .quad 0x0000000000000000 /* 0x1b reserved */
20698+
20699+#ifdef CONFIG_PAX_KERNEXEC
20700+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
20701+#else
20702+ .quad 0x0000000000000000 /* 0x20 unused */
20703+#endif
20704+
20705+ .quad 0x0000000000000000 /* 0x28 unused */
20706+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
20707+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
20708+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
20709+ .quad 0x0000000000000000 /* 0x4b reserved */
20710+ .quad 0x0000000000000000 /* 0x53 reserved */
20711+ .quad 0x0000000000000000 /* 0x5b reserved */
20712+
20713+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
20714+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
20715+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
20716+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
20717+
20718+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
20719+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
20720+
20721+ /*
20722+ * Segments used for calling PnP BIOS have byte granularity.
20723+ * The code segments and data segments have fixed 64k limits,
20724+ * the transfer segment sizes are set at run time.
20725+ */
20726+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
20727+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
20728+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
20729+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
20730+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
20731+
20732+ /*
20733+ * The APM segments have byte granularity and their bases
20734+ * are set at run time. All have 64k limits.
20735+ */
20736+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
20737+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
20738+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
20739+
20740+ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
20741+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
20742+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
20743+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
20744+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
20745+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
20746+
20747+ /* Be sure this is zeroed to avoid false validations in Xen */
20748+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
20749+ .endr
20750diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
20751index 980053c..74d3b44 100644
20752--- a/arch/x86/kernel/head_64.S
20753+++ b/arch/x86/kernel/head_64.S
20754@@ -20,6 +20,8 @@
20755 #include <asm/processor-flags.h>
20756 #include <asm/percpu.h>
20757 #include <asm/nops.h>
20758+#include <asm/cpufeature.h>
20759+#include <asm/alternative-asm.h>
20760
20761 #ifdef CONFIG_PARAVIRT
20762 #include <asm/asm-offsets.h>
20763@@ -41,6 +43,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
20764 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
20765 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
20766 L3_START_KERNEL = pud_index(__START_KERNEL_map)
20767+L4_VMALLOC_START = pgd_index(VMALLOC_START)
20768+L3_VMALLOC_START = pud_index(VMALLOC_START)
20769+L4_VMALLOC_END = pgd_index(VMALLOC_END)
20770+L3_VMALLOC_END = pud_index(VMALLOC_END)
20771+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
20772+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
20773
20774 .text
20775 __HEAD
20776@@ -88,35 +96,23 @@ startup_64:
20777 */
20778 addq %rbp, init_level4_pgt + 0(%rip)
20779 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
20780+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
20781+ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
20782+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
20783 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
20784
20785 addq %rbp, level3_ident_pgt + 0(%rip)
20786+#ifndef CONFIG_XEN
20787+ addq %rbp, level3_ident_pgt + 8(%rip)
20788+#endif
20789
20790- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
20791- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
20792+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
20793+
20794+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
20795+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
20796
20797 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
20798-
20799- /* Add an Identity mapping if I am above 1G */
20800- leaq _text(%rip), %rdi
20801- andq $PMD_PAGE_MASK, %rdi
20802-
20803- movq %rdi, %rax
20804- shrq $PUD_SHIFT, %rax
20805- andq $(PTRS_PER_PUD - 1), %rax
20806- jz ident_complete
20807-
20808- leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
20809- leaq level3_ident_pgt(%rip), %rbx
20810- movq %rdx, 0(%rbx, %rax, 8)
20811-
20812- movq %rdi, %rax
20813- shrq $PMD_SHIFT, %rax
20814- andq $(PTRS_PER_PMD - 1), %rax
20815- leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
20816- leaq level2_spare_pgt(%rip), %rbx
20817- movq %rdx, 0(%rbx, %rax, 8)
20818-ident_complete:
20819+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
20820
20821 /*
20822 * Fixup the kernel text+data virtual addresses. Note that
20823@@ -159,8 +155,8 @@ ENTRY(secondary_startup_64)
20824 * after the boot processor executes this code.
20825 */
20826
20827- /* Enable PAE mode and PGE */
20828- movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
20829+ /* Enable PAE mode and PSE/PGE */
20830+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
20831 movq %rax, %cr4
20832
20833 /* Setup early boot stage 4 level pagetables. */
20834@@ -182,9 +178,17 @@ ENTRY(secondary_startup_64)
20835 movl $MSR_EFER, %ecx
20836 rdmsr
20837 btsl $_EFER_SCE, %eax /* Enable System Call */
20838- btl $20,%edi /* No Execute supported? */
20839+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
20840 jnc 1f
20841 btsl $_EFER_NX, %eax
20842+ leaq init_level4_pgt(%rip), %rdi
20843+#ifndef CONFIG_EFI
20844+ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
20845+#endif
20846+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
20847+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
20848+ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
20849+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
20850 1: wrmsr /* Make changes effective */
20851
20852 /* Setup cr0 */
20853@@ -246,6 +250,7 @@ ENTRY(secondary_startup_64)
20854 * jump. In addition we need to ensure %cs is set so we make this
20855 * a far return.
20856 */
20857+ pax_set_fptr_mask
20858 movq initial_code(%rip),%rax
20859 pushq $0 # fake return address to stop unwinder
20860 pushq $__KERNEL_CS # set correct cs
20861@@ -284,7 +289,7 @@ ENDPROC(start_cpu0)
20862 bad_address:
20863 jmp bad_address
20864
20865- .section ".init.text","ax"
20866+ __INIT
20867 .globl early_idt_handlers
20868 early_idt_handlers:
20869 # 104(%rsp) %rflags
20870@@ -343,7 +348,7 @@ ENTRY(early_idt_handler)
20871 call dump_stack
20872 #ifdef CONFIG_KALLSYMS
20873 leaq early_idt_ripmsg(%rip),%rdi
20874- movq 40(%rsp),%rsi # %rip again
20875+ movq 88(%rsp),%rsi # %rip again
20876 call __print_symbol
20877 #endif
20878 #endif /* EARLY_PRINTK */
20879@@ -363,11 +368,15 @@ ENTRY(early_idt_handler)
20880 addq $16,%rsp # drop vector number and error code
20881 decl early_recursion_flag(%rip)
20882 INTERRUPT_RETURN
20883+ .previous
20884
20885+ __INITDATA
20886 .balign 4
20887 early_recursion_flag:
20888 .long 0
20889+ .previous
20890
20891+ .section .rodata,"a",@progbits
20892 #ifdef CONFIG_EARLY_PRINTK
20893 early_idt_msg:
20894 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
20895@@ -376,6 +385,7 @@ early_idt_ripmsg:
20896 #endif /* CONFIG_EARLY_PRINTK */
20897 .previous
20898
20899+ .section .rodata,"a",@progbits
20900 #define NEXT_PAGE(name) \
20901 .balign PAGE_SIZE; \
20902 ENTRY(name)
20903@@ -388,7 +398,6 @@ ENTRY(name)
20904 i = i + 1 ; \
20905 .endr
20906
20907- .data
20908 /*
20909 * This default setting generates an ident mapping at address 0x100000
20910 * and a mapping for the kernel that precisely maps virtual address
20911@@ -399,13 +408,41 @@ NEXT_PAGE(init_level4_pgt)
20912 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
20913 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
20914 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
20915+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
20916+ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
20917+ .org init_level4_pgt + L4_VMALLOC_END*8, 0
20918+ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
20919+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
20920+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
20921 .org init_level4_pgt + L4_START_KERNEL*8, 0
20922 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
20923 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
20924
20925+#ifdef CONFIG_PAX_PER_CPU_PGD
20926+NEXT_PAGE(cpu_pgd)
20927+ .rept NR_CPUS
20928+ .fill 512,8,0
20929+ .endr
20930+#endif
20931+
20932 NEXT_PAGE(level3_ident_pgt)
20933 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
20934+#ifdef CONFIG_XEN
20935 .fill 511,8,0
20936+#else
20937+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
20938+ .fill 510,8,0
20939+#endif
20940+
20941+NEXT_PAGE(level3_vmalloc_start_pgt)
20942+ .fill 512,8,0
20943+
20944+NEXT_PAGE(level3_vmalloc_end_pgt)
20945+ .fill 512,8,0
20946+
20947+NEXT_PAGE(level3_vmemmap_pgt)
20948+ .fill L3_VMEMMAP_START,8,0
20949+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
20950
20951 NEXT_PAGE(level3_kernel_pgt)
20952 .fill L3_START_KERNEL,8,0
20953@@ -413,20 +450,23 @@ NEXT_PAGE(level3_kernel_pgt)
20954 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
20955 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
20956
20957+NEXT_PAGE(level2_vmemmap_pgt)
20958+ .fill 512,8,0
20959+
20960 NEXT_PAGE(level2_fixmap_pgt)
20961- .fill 506,8,0
20962- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
20963- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
20964- .fill 5,8,0
20965+ .fill 507,8,0
20966+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
20967+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
20968+ .fill 4,8,0
20969
20970-NEXT_PAGE(level1_fixmap_pgt)
20971+NEXT_PAGE(level1_vsyscall_pgt)
20972 .fill 512,8,0
20973
20974-NEXT_PAGE(level2_ident_pgt)
20975- /* Since I easily can, map the first 1G.
20976+ /* Since I easily can, map the first 2G.
20977 * Don't set NX because code runs from these pages.
20978 */
20979- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
20980+NEXT_PAGE(level2_ident_pgt)
20981+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
20982
20983 NEXT_PAGE(level2_kernel_pgt)
20984 /*
20985@@ -439,37 +479,59 @@ NEXT_PAGE(level2_kernel_pgt)
20986 * If you want to increase this then increase MODULES_VADDR
20987 * too.)
20988 */
20989- PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
20990- KERNEL_IMAGE_SIZE/PMD_SIZE)
20991-
20992-NEXT_PAGE(level2_spare_pgt)
20993- .fill 512, 8, 0
20994+ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
20995
20996 #undef PMDS
20997 #undef NEXT_PAGE
20998
20999- .data
21000+ .align PAGE_SIZE
21001+ENTRY(cpu_gdt_table)
21002+ .rept NR_CPUS
21003+ .quad 0x0000000000000000 /* NULL descriptor */
21004+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
21005+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
21006+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
21007+ .quad 0x00cffb000000ffff /* __USER32_CS */
21008+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
21009+ .quad 0x00affb000000ffff /* __USER_CS */
21010+
21011+#ifdef CONFIG_PAX_KERNEXEC
21012+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
21013+#else
21014+ .quad 0x0 /* unused */
21015+#endif
21016+
21017+ .quad 0,0 /* TSS */
21018+ .quad 0,0 /* LDT */
21019+ .quad 0,0,0 /* three TLS descriptors */
21020+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
21021+ /* asm/segment.h:GDT_ENTRIES must match this */
21022+
21023+ /* zero the remaining page */
21024+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
21025+ .endr
21026+
21027 .align 16
21028 .globl early_gdt_descr
21029 early_gdt_descr:
21030 .word GDT_ENTRIES*8-1
21031 early_gdt_descr_base:
21032- .quad INIT_PER_CPU_VAR(gdt_page)
21033+ .quad cpu_gdt_table
21034
21035 ENTRY(phys_base)
21036 /* This must match the first entry in level2_kernel_pgt */
21037 .quad 0x0000000000000000
21038
21039 #include "../../x86/xen/xen-head.S"
21040-
21041- .section .bss, "aw", @nobits
21042+
21043+ .section .rodata,"a",@progbits
21044 .align L1_CACHE_BYTES
21045 ENTRY(idt_table)
21046- .skip IDT_ENTRIES * 16
21047+ .fill 512,8,0
21048
21049 .align L1_CACHE_BYTES
21050 ENTRY(nmi_idt_table)
21051- .skip IDT_ENTRIES * 16
21052+ .fill 512,8,0
21053
21054 __PAGE_ALIGNED_BSS
21055 .align PAGE_SIZE
21056diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
21057index 9c3bd4a..e1d9b35 100644
21058--- a/arch/x86/kernel/i386_ksyms_32.c
21059+++ b/arch/x86/kernel/i386_ksyms_32.c
21060@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
21061 EXPORT_SYMBOL(cmpxchg8b_emu);
21062 #endif
21063
21064+EXPORT_SYMBOL_GPL(cpu_gdt_table);
21065+
21066 /* Networking helper routines. */
21067 EXPORT_SYMBOL(csum_partial_copy_generic);
21068+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
21069+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
21070
21071 EXPORT_SYMBOL(__get_user_1);
21072 EXPORT_SYMBOL(__get_user_2);
21073@@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
21074
21075 EXPORT_SYMBOL(csum_partial);
21076 EXPORT_SYMBOL(empty_zero_page);
21077+
21078+#ifdef CONFIG_PAX_KERNEXEC
21079+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
21080+#endif
21081diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
21082index 245a71d..89d9ce4 100644
21083--- a/arch/x86/kernel/i387.c
21084+++ b/arch/x86/kernel/i387.c
21085@@ -55,7 +55,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
21086 static inline bool interrupted_user_mode(void)
21087 {
21088 struct pt_regs *regs = get_irq_regs();
21089- return regs && user_mode_vm(regs);
21090+ return regs && user_mode(regs);
21091 }
21092
21093 /*
21094diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
21095index 9a5c460..b332a4b 100644
21096--- a/arch/x86/kernel/i8259.c
21097+++ b/arch/x86/kernel/i8259.c
21098@@ -209,7 +209,7 @@ spurious_8259A_irq:
21099 "spurious 8259A interrupt: IRQ%d.\n", irq);
21100 spurious_irq_mask |= irqmask;
21101 }
21102- atomic_inc(&irq_err_count);
21103+ atomic_inc_unchecked(&irq_err_count);
21104 /*
21105 * Theoretically we do not have to handle this IRQ,
21106 * but in Linux this does not cause problems and is
21107@@ -333,14 +333,16 @@ static void init_8259A(int auto_eoi)
21108 /* (slave's support for AEOI in flat mode is to be investigated) */
21109 outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR);
21110
21111+ pax_open_kernel();
21112 if (auto_eoi)
21113 /*
21114 * In AEOI mode we just have to mask the interrupt
21115 * when acking.
21116 */
21117- i8259A_chip.irq_mask_ack = disable_8259A_irq;
21118+ *(void **)&i8259A_chip.irq_mask_ack = disable_8259A_irq;
21119 else
21120- i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
21121+ *(void **)&i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
21122+ pax_close_kernel();
21123
21124 udelay(100); /* wait for 8259A to initialize */
21125
21126diff --git a/arch/x86/kernel/io_delay.c b/arch/x86/kernel/io_delay.c
21127index a979b5b..1d6db75 100644
21128--- a/arch/x86/kernel/io_delay.c
21129+++ b/arch/x86/kernel/io_delay.c
21130@@ -58,7 +58,7 @@ static int __init dmi_io_delay_0xed_port(const struct dmi_system_id *id)
21131 * Quirk table for systems that misbehave (lock up, etc.) if port
21132 * 0x80 is used:
21133 */
21134-static struct dmi_system_id __initdata io_delay_0xed_port_dmi_table[] = {
21135+static const struct dmi_system_id __initconst io_delay_0xed_port_dmi_table[] = {
21136 {
21137 .callback = dmi_io_delay_0xed_port,
21138 .ident = "Compaq Presario V6000",
21139diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
21140index 8c96897..be66bfa 100644
21141--- a/arch/x86/kernel/ioport.c
21142+++ b/arch/x86/kernel/ioport.c
21143@@ -6,6 +6,7 @@
21144 #include <linux/sched.h>
21145 #include <linux/kernel.h>
21146 #include <linux/capability.h>
21147+#include <linux/security.h>
21148 #include <linux/errno.h>
21149 #include <linux/types.h>
21150 #include <linux/ioport.h>
21151@@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
21152
21153 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
21154 return -EINVAL;
21155+#ifdef CONFIG_GRKERNSEC_IO
21156+ if (turn_on && grsec_disable_privio) {
21157+ gr_handle_ioperm();
21158+ return -EPERM;
21159+ }
21160+#endif
21161 if (turn_on && !capable(CAP_SYS_RAWIO))
21162 return -EPERM;
21163
21164@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
21165 * because the ->io_bitmap_max value must match the bitmap
21166 * contents:
21167 */
21168- tss = &per_cpu(init_tss, get_cpu());
21169+ tss = init_tss + get_cpu();
21170
21171 if (turn_on)
21172 bitmap_clear(t->io_bitmap_ptr, from, num);
21173@@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct pt_regs *regs)
21174 return -EINVAL;
21175 /* Trying to gain more privileges? */
21176 if (level > old) {
21177+#ifdef CONFIG_GRKERNSEC_IO
21178+ if (grsec_disable_privio) {
21179+ gr_handle_iopl();
21180+ return -EPERM;
21181+ }
21182+#endif
21183 if (!capable(CAP_SYS_RAWIO))
21184 return -EPERM;
21185 }
21186diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
21187index e4595f1..ee3bfb8 100644
21188--- a/arch/x86/kernel/irq.c
21189+++ b/arch/x86/kernel/irq.c
21190@@ -18,7 +18,7 @@
21191 #include <asm/mce.h>
21192 #include <asm/hw_irq.h>
21193
21194-atomic_t irq_err_count;
21195+atomic_unchecked_t irq_err_count;
21196
21197 /* Function pointer for generic interrupt vector handling */
21198 void (*x86_platform_ipi_callback)(void) = NULL;
21199@@ -122,9 +122,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
21200 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
21201 seq_printf(p, " Machine check polls\n");
21202 #endif
21203- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
21204+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
21205 #if defined(CONFIG_X86_IO_APIC)
21206- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
21207+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
21208 #endif
21209 return 0;
21210 }
21211@@ -164,10 +164,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
21212
21213 u64 arch_irq_stat(void)
21214 {
21215- u64 sum = atomic_read(&irq_err_count);
21216+ u64 sum = atomic_read_unchecked(&irq_err_count);
21217
21218 #ifdef CONFIG_X86_IO_APIC
21219- sum += atomic_read(&irq_mis_count);
21220+ sum += atomic_read_unchecked(&irq_mis_count);
21221 #endif
21222 return sum;
21223 }
21224diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
21225index 344faf8..355f60d 100644
21226--- a/arch/x86/kernel/irq_32.c
21227+++ b/arch/x86/kernel/irq_32.c
21228@@ -39,7 +39,7 @@ static int check_stack_overflow(void)
21229 __asm__ __volatile__("andl %%esp,%0" :
21230 "=r" (sp) : "0" (THREAD_SIZE - 1));
21231
21232- return sp < (sizeof(struct thread_info) + STACK_WARN);
21233+ return sp < STACK_WARN;
21234 }
21235
21236 static void print_stack_overflow(void)
21237@@ -59,8 +59,8 @@ static inline void print_stack_overflow(void) { }
21238 * per-CPU IRQ handling contexts (thread information and stack)
21239 */
21240 union irq_ctx {
21241- struct thread_info tinfo;
21242- u32 stack[THREAD_SIZE/sizeof(u32)];
21243+ unsigned long previous_esp;
21244+ u32 stack[THREAD_SIZE/sizeof(u32)];
21245 } __attribute__((aligned(THREAD_SIZE)));
21246
21247 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
21248@@ -80,10 +80,9 @@ static void call_on_stack(void *func, void *stack)
21249 static inline int
21250 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
21251 {
21252- union irq_ctx *curctx, *irqctx;
21253+ union irq_ctx *irqctx;
21254 u32 *isp, arg1, arg2;
21255
21256- curctx = (union irq_ctx *) current_thread_info();
21257 irqctx = __this_cpu_read(hardirq_ctx);
21258
21259 /*
21260@@ -92,16 +91,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
21261 * handler) we can't do that and just have to keep using the
21262 * current stack (which is the irq stack already after all)
21263 */
21264- if (unlikely(curctx == irqctx))
21265+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
21266 return 0;
21267
21268 /* build the stack frame on the IRQ stack */
21269- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
21270- irqctx->tinfo.task = curctx->tinfo.task;
21271- irqctx->tinfo.previous_esp = current_stack_pointer;
21272+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
21273+ irqctx->previous_esp = current_stack_pointer;
21274
21275- /* Copy the preempt_count so that the [soft]irq checks work. */
21276- irqctx->tinfo.preempt_count = curctx->tinfo.preempt_count;
21277+#ifdef CONFIG_PAX_MEMORY_UDEREF
21278+ __set_fs(MAKE_MM_SEG(0));
21279+#endif
21280
21281 if (unlikely(overflow))
21282 call_on_stack(print_stack_overflow, isp);
21283@@ -113,6 +112,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
21284 : "0" (irq), "1" (desc), "2" (isp),
21285 "D" (desc->handle_irq)
21286 : "memory", "cc", "ecx");
21287+
21288+#ifdef CONFIG_PAX_MEMORY_UDEREF
21289+ __set_fs(current_thread_info()->addr_limit);
21290+#endif
21291+
21292 return 1;
21293 }
21294
21295@@ -121,29 +125,14 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
21296 */
21297 void __cpuinit irq_ctx_init(int cpu)
21298 {
21299- union irq_ctx *irqctx;
21300-
21301 if (per_cpu(hardirq_ctx, cpu))
21302 return;
21303
21304- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
21305- THREADINFO_GFP,
21306- THREAD_SIZE_ORDER));
21307- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
21308- irqctx->tinfo.cpu = cpu;
21309- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
21310- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
21311-
21312- per_cpu(hardirq_ctx, cpu) = irqctx;
21313-
21314- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
21315- THREADINFO_GFP,
21316- THREAD_SIZE_ORDER));
21317- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
21318- irqctx->tinfo.cpu = cpu;
21319- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
21320-
21321- per_cpu(softirq_ctx, cpu) = irqctx;
21322+ per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
21323+ per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
21324+
21325+ printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
21326+ cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
21327
21328 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
21329 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
21330@@ -152,7 +141,6 @@ void __cpuinit irq_ctx_init(int cpu)
21331 asmlinkage void do_softirq(void)
21332 {
21333 unsigned long flags;
21334- struct thread_info *curctx;
21335 union irq_ctx *irqctx;
21336 u32 *isp;
21337
21338@@ -162,15 +150,22 @@ asmlinkage void do_softirq(void)
21339 local_irq_save(flags);
21340
21341 if (local_softirq_pending()) {
21342- curctx = current_thread_info();
21343 irqctx = __this_cpu_read(softirq_ctx);
21344- irqctx->tinfo.task = curctx->task;
21345- irqctx->tinfo.previous_esp = current_stack_pointer;
21346+ irqctx->previous_esp = current_stack_pointer;
21347
21348 /* build the stack frame on the softirq stack */
21349- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
21350+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
21351+
21352+#ifdef CONFIG_PAX_MEMORY_UDEREF
21353+ __set_fs(MAKE_MM_SEG(0));
21354+#endif
21355
21356 call_on_stack(__do_softirq, isp);
21357+
21358+#ifdef CONFIG_PAX_MEMORY_UDEREF
21359+ __set_fs(current_thread_info()->addr_limit);
21360+#endif
21361+
21362 /*
21363 * Shouldn't happen, we returned above if in_interrupt():
21364 */
21365@@ -191,7 +186,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
21366 if (unlikely(!desc))
21367 return false;
21368
21369- if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
21370+ if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
21371 if (unlikely(overflow))
21372 print_stack_overflow();
21373 desc->handle_irq(irq, desc);
21374diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
21375index d04d3ec..ea4b374 100644
21376--- a/arch/x86/kernel/irq_64.c
21377+++ b/arch/x86/kernel/irq_64.c
21378@@ -44,7 +44,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
21379 u64 estack_top, estack_bottom;
21380 u64 curbase = (u64)task_stack_page(current);
21381
21382- if (user_mode_vm(regs))
21383+ if (user_mode(regs))
21384 return;
21385
21386 if (regs->sp >= curbase + sizeof(struct thread_info) +
21387diff --git a/arch/x86/kernel/kdebugfs.c b/arch/x86/kernel/kdebugfs.c
21388index dc1404b..bbc43e7 100644
21389--- a/arch/x86/kernel/kdebugfs.c
21390+++ b/arch/x86/kernel/kdebugfs.c
21391@@ -27,7 +27,7 @@ struct setup_data_node {
21392 u32 len;
21393 };
21394
21395-static ssize_t setup_data_read(struct file *file, char __user *user_buf,
21396+static ssize_t __size_overflow(3) setup_data_read(struct file *file, char __user *user_buf,
21397 size_t count, loff_t *ppos)
21398 {
21399 struct setup_data_node *node = file->private_data;
21400diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
21401index 836f832..a8bda67 100644
21402--- a/arch/x86/kernel/kgdb.c
21403+++ b/arch/x86/kernel/kgdb.c
21404@@ -127,11 +127,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
21405 #ifdef CONFIG_X86_32
21406 switch (regno) {
21407 case GDB_SS:
21408- if (!user_mode_vm(regs))
21409+ if (!user_mode(regs))
21410 *(unsigned long *)mem = __KERNEL_DS;
21411 break;
21412 case GDB_SP:
21413- if (!user_mode_vm(regs))
21414+ if (!user_mode(regs))
21415 *(unsigned long *)mem = kernel_stack_pointer(regs);
21416 break;
21417 case GDB_GS:
21418@@ -229,7 +229,10 @@ static void kgdb_correct_hw_break(void)
21419 bp->attr.bp_addr = breakinfo[breakno].addr;
21420 bp->attr.bp_len = breakinfo[breakno].len;
21421 bp->attr.bp_type = breakinfo[breakno].type;
21422- info->address = breakinfo[breakno].addr;
21423+ if (breakinfo[breakno].type == X86_BREAKPOINT_EXECUTE)
21424+ info->address = ktla_ktva(breakinfo[breakno].addr);
21425+ else
21426+ info->address = breakinfo[breakno].addr;
21427 info->len = breakinfo[breakno].len;
21428 info->type = breakinfo[breakno].type;
21429 val = arch_install_hw_breakpoint(bp);
21430@@ -476,12 +479,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
21431 case 'k':
21432 /* clear the trace bit */
21433 linux_regs->flags &= ~X86_EFLAGS_TF;
21434- atomic_set(&kgdb_cpu_doing_single_step, -1);
21435+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
21436
21437 /* set the trace bit if we're stepping */
21438 if (remcomInBuffer[0] == 's') {
21439 linux_regs->flags |= X86_EFLAGS_TF;
21440- atomic_set(&kgdb_cpu_doing_single_step,
21441+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
21442 raw_smp_processor_id());
21443 }
21444
21445@@ -546,7 +549,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
21446
21447 switch (cmd) {
21448 case DIE_DEBUG:
21449- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
21450+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
21451 if (user_mode(regs))
21452 return single_step_cont(regs, args);
21453 break;
21454@@ -751,11 +754,11 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
21455 #endif /* CONFIG_DEBUG_RODATA */
21456
21457 bpt->type = BP_BREAKPOINT;
21458- err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
21459+ err = probe_kernel_read(bpt->saved_instr, ktla_ktva((char *)bpt->bpt_addr),
21460 BREAK_INSTR_SIZE);
21461 if (err)
21462 return err;
21463- err = probe_kernel_write((char *)bpt->bpt_addr,
21464+ err = probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
21465 arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
21466 #ifdef CONFIG_DEBUG_RODATA
21467 if (!err)
21468@@ -768,7 +771,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
21469 return -EBUSY;
21470 text_poke((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr,
21471 BREAK_INSTR_SIZE);
21472- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
21473+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
21474 if (err)
21475 return err;
21476 if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE))
21477@@ -793,13 +796,13 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
21478 if (mutex_is_locked(&text_mutex))
21479 goto knl_write;
21480 text_poke((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE);
21481- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
21482+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
21483 if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE))
21484 goto knl_write;
21485 return err;
21486 knl_write:
21487 #endif /* CONFIG_DEBUG_RODATA */
21488- return probe_kernel_write((char *)bpt->bpt_addr,
21489+ return probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
21490 (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
21491 }
21492
21493diff --git a/arch/x86/kernel/kprobes-opt.c b/arch/x86/kernel/kprobes-opt.c
21494index c5e410e..ed5a7f0 100644
21495--- a/arch/x86/kernel/kprobes-opt.c
21496+++ b/arch/x86/kernel/kprobes-opt.c
21497@@ -338,7 +338,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
21498 * Verify if the address gap is in 2GB range, because this uses
21499 * a relative jump.
21500 */
21501- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
21502+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
21503 if (abs(rel) > 0x7fffffff)
21504 return -ERANGE;
21505
21506@@ -353,16 +353,16 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
21507 op->optinsn.size = ret;
21508
21509 /* Copy arch-dep-instance from template */
21510- memcpy(buf, &optprobe_template_entry, TMPL_END_IDX);
21511+ memcpy(buf, ktla_ktva(&optprobe_template_entry), TMPL_END_IDX);
21512
21513 /* Set probe information */
21514 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
21515
21516 /* Set probe function call */
21517- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
21518+ synthesize_relcall(ktva_ktla(buf) + TMPL_CALL_IDX, optimized_callback);
21519
21520 /* Set returning jmp instruction at the tail of out-of-line buffer */
21521- synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
21522+ synthesize_reljump(ktva_ktla(buf) + TMPL_END_IDX + op->optinsn.size,
21523 (u8 *)op->kp.addr + op->optinsn.size);
21524
21525 flush_icache_range((unsigned long) buf,
21526@@ -385,7 +385,7 @@ static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
21527 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
21528
21529 /* Backup instructions which will be replaced by jump address */
21530- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
21531+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
21532 RELATIVE_ADDR_SIZE);
21533
21534 insn_buf[0] = RELATIVEJUMP_OPCODE;
21535@@ -483,7 +483,7 @@ setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
21536 /* This kprobe is really able to run optimized path. */
21537 op = container_of(p, struct optimized_kprobe, kp);
21538 /* Detour through copied instructions */
21539- regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
21540+ regs->ip = ktva_ktla((unsigned long)op->optinsn.insn) + TMPL_END_IDX;
21541 if (!reenter)
21542 reset_current_kprobe();
21543 preempt_enable_no_resched();
21544diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
21545index 57916c0..9e0b9d0 100644
21546--- a/arch/x86/kernel/kprobes.c
21547+++ b/arch/x86/kernel/kprobes.c
21548@@ -119,9 +119,12 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
21549 s32 raddr;
21550 } __attribute__((packed)) *insn;
21551
21552- insn = (struct __arch_relative_insn *)from;
21553+ insn = (struct __arch_relative_insn *)ktla_ktva(from);
21554+
21555+ pax_open_kernel();
21556 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
21557 insn->op = op;
21558+ pax_close_kernel();
21559 }
21560
21561 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
21562@@ -164,7 +167,7 @@ int __kprobes can_boost(kprobe_opcode_t *opcodes)
21563 kprobe_opcode_t opcode;
21564 kprobe_opcode_t *orig_opcodes = opcodes;
21565
21566- if (search_exception_tables((unsigned long)opcodes))
21567+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
21568 return 0; /* Page fault may occur on this address. */
21569
21570 retry:
21571@@ -238,9 +241,9 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
21572 * for the first byte, we can recover the original instruction
21573 * from it and kp->opcode.
21574 */
21575- memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
21576+ memcpy(buf, ktla_ktva(kp->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
21577 buf[0] = kp->opcode;
21578- return (unsigned long)buf;
21579+ return ktva_ktla((unsigned long)buf);
21580 }
21581
21582 /*
21583@@ -332,7 +335,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
21584 /* Another subsystem puts a breakpoint, failed to recover */
21585 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
21586 return 0;
21587+ pax_open_kernel();
21588 memcpy(dest, insn.kaddr, insn.length);
21589+ pax_close_kernel();
21590
21591 #ifdef CONFIG_X86_64
21592 if (insn_rip_relative(&insn)) {
21593@@ -355,7 +360,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
21594 newdisp = (u8 *) src + (s64) insn.displacement.value - (u8 *) dest;
21595 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
21596 disp = (u8 *) dest + insn_offset_displacement(&insn);
21597+ pax_open_kernel();
21598 *(s32 *) disp = (s32) newdisp;
21599+ pax_close_kernel();
21600 }
21601 #endif
21602 return insn.length;
21603@@ -485,7 +492,7 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
21604 * nor set current_kprobe, because it doesn't use single
21605 * stepping.
21606 */
21607- regs->ip = (unsigned long)p->ainsn.insn;
21608+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
21609 preempt_enable_no_resched();
21610 return;
21611 }
21612@@ -502,9 +509,9 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
21613 regs->flags &= ~X86_EFLAGS_IF;
21614 /* single step inline if the instruction is an int3 */
21615 if (p->opcode == BREAKPOINT_INSTRUCTION)
21616- regs->ip = (unsigned long)p->addr;
21617+ regs->ip = ktla_ktva((unsigned long)p->addr);
21618 else
21619- regs->ip = (unsigned long)p->ainsn.insn;
21620+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
21621 }
21622
21623 /*
21624@@ -600,7 +607,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
21625 setup_singlestep(p, regs, kcb, 0);
21626 return 1;
21627 }
21628- } else if (*addr != BREAKPOINT_INSTRUCTION) {
21629+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
21630 /*
21631 * The breakpoint instruction was removed right
21632 * after we hit it. Another cpu has removed
21633@@ -651,6 +658,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
21634 " movq %rax, 152(%rsp)\n"
21635 RESTORE_REGS_STRING
21636 " popfq\n"
21637+#ifdef KERNEXEC_PLUGIN
21638+ " btsq $63,(%rsp)\n"
21639+#endif
21640 #else
21641 " pushf\n"
21642 SAVE_REGS_STRING
21643@@ -788,7 +798,7 @@ static void __kprobes
21644 resume_execution(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb)
21645 {
21646 unsigned long *tos = stack_addr(regs);
21647- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
21648+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
21649 unsigned long orig_ip = (unsigned long)p->addr;
21650 kprobe_opcode_t *insn = p->ainsn.insn;
21651
21652@@ -970,7 +980,7 @@ kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *d
21653 struct die_args *args = data;
21654 int ret = NOTIFY_DONE;
21655
21656- if (args->regs && user_mode_vm(args->regs))
21657+ if (args->regs && user_mode(args->regs))
21658 return ret;
21659
21660 switch (val) {
21661diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
21662index 9c2bd8b..bb1131c 100644
21663--- a/arch/x86/kernel/kvm.c
21664+++ b/arch/x86/kernel/kvm.c
21665@@ -452,7 +452,7 @@ static int __cpuinit kvm_cpu_notify(struct notifier_block *self,
21666 return NOTIFY_OK;
21667 }
21668
21669-static struct notifier_block __cpuinitdata kvm_cpu_notifier = {
21670+static struct notifier_block kvm_cpu_notifier = {
21671 .notifier_call = kvm_cpu_notify,
21672 };
21673 #endif
21674diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
21675index ebc9873..1b9724b 100644
21676--- a/arch/x86/kernel/ldt.c
21677+++ b/arch/x86/kernel/ldt.c
21678@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
21679 if (reload) {
21680 #ifdef CONFIG_SMP
21681 preempt_disable();
21682- load_LDT(pc);
21683+ load_LDT_nolock(pc);
21684 if (!cpumask_equal(mm_cpumask(current->mm),
21685 cpumask_of(smp_processor_id())))
21686 smp_call_function(flush_ldt, current->mm, 1);
21687 preempt_enable();
21688 #else
21689- load_LDT(pc);
21690+ load_LDT_nolock(pc);
21691 #endif
21692 }
21693 if (oldsize) {
21694@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
21695 return err;
21696
21697 for (i = 0; i < old->size; i++)
21698- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
21699+ write_ldt_entry(new->ldt, i, old->ldt + i);
21700 return 0;
21701 }
21702
21703@@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
21704 retval = copy_ldt(&mm->context, &old_mm->context);
21705 mutex_unlock(&old_mm->context.lock);
21706 }
21707+
21708+ if (tsk == current) {
21709+ mm->context.vdso = 0;
21710+
21711+#ifdef CONFIG_X86_32
21712+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21713+ mm->context.user_cs_base = 0UL;
21714+ mm->context.user_cs_limit = ~0UL;
21715+
21716+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
21717+ cpus_clear(mm->context.cpu_user_cs_mask);
21718+#endif
21719+
21720+#endif
21721+#endif
21722+
21723+ }
21724+
21725 return retval;
21726 }
21727
21728@@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
21729 }
21730 }
21731
21732+#ifdef CONFIG_PAX_SEGMEXEC
21733+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
21734+ error = -EINVAL;
21735+ goto out_unlock;
21736+ }
21737+#endif
21738+
21739 fill_ldt(&ldt, &ldt_info);
21740 if (oldmode)
21741 ldt.avl = 0;
21742diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
21743index 5b19e4d..6476a76 100644
21744--- a/arch/x86/kernel/machine_kexec_32.c
21745+++ b/arch/x86/kernel/machine_kexec_32.c
21746@@ -26,7 +26,7 @@
21747 #include <asm/cacheflush.h>
21748 #include <asm/debugreg.h>
21749
21750-static void set_idt(void *newidt, __u16 limit)
21751+static void set_idt(struct desc_struct *newidt, __u16 limit)
21752 {
21753 struct desc_ptr curidt;
21754
21755@@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit)
21756 }
21757
21758
21759-static void set_gdt(void *newgdt, __u16 limit)
21760+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
21761 {
21762 struct desc_ptr curgdt;
21763
21764@@ -216,7 +216,7 @@ void machine_kexec(struct kimage *image)
21765 }
21766
21767 control_page = page_address(image->control_code_page);
21768- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
21769+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
21770
21771 relocate_kernel_ptr = control_page;
21772 page_list[PA_CONTROL_PAGE] = __pa(control_page);
21773diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
21774index 3a04b22..1d2eb09 100644
21775--- a/arch/x86/kernel/microcode_core.c
21776+++ b/arch/x86/kernel/microcode_core.c
21777@@ -512,7 +512,7 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
21778 return NOTIFY_OK;
21779 }
21780
21781-static struct notifier_block __refdata mc_cpu_notifier = {
21782+static struct notifier_block mc_cpu_notifier = {
21783 .notifier_call = mc_cpu_callback,
21784 };
21785
21786diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
21787index 3544aed..01ddc1c 100644
21788--- a/arch/x86/kernel/microcode_intel.c
21789+++ b/arch/x86/kernel/microcode_intel.c
21790@@ -431,13 +431,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
21791
21792 static int get_ucode_user(void *to, const void *from, size_t n)
21793 {
21794- return copy_from_user(to, from, n);
21795+ return copy_from_user(to, (const void __force_user *)from, n);
21796 }
21797
21798 static enum ucode_state
21799 request_microcode_user(int cpu, const void __user *buf, size_t size)
21800 {
21801- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
21802+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
21803 }
21804
21805 static void microcode_fini_cpu(int cpu)
21806diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
21807index 216a4d7..228255a 100644
21808--- a/arch/x86/kernel/module.c
21809+++ b/arch/x86/kernel/module.c
21810@@ -43,15 +43,60 @@ do { \
21811 } while (0)
21812 #endif
21813
21814-void *module_alloc(unsigned long size)
21815+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
21816 {
21817- if (PAGE_ALIGN(size) > MODULES_LEN)
21818+ if (!size || PAGE_ALIGN(size) > MODULES_LEN)
21819 return NULL;
21820 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
21821- GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
21822+ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
21823 -1, __builtin_return_address(0));
21824 }
21825
21826+void *module_alloc(unsigned long size)
21827+{
21828+
21829+#ifdef CONFIG_PAX_KERNEXEC
21830+ return __module_alloc(size, PAGE_KERNEL);
21831+#else
21832+ return __module_alloc(size, PAGE_KERNEL_EXEC);
21833+#endif
21834+
21835+}
21836+
21837+#ifdef CONFIG_PAX_KERNEXEC
21838+#ifdef CONFIG_X86_32
21839+void *module_alloc_exec(unsigned long size)
21840+{
21841+ struct vm_struct *area;
21842+
21843+ if (size == 0)
21844+ return NULL;
21845+
21846+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
21847+ return area ? area->addr : NULL;
21848+}
21849+EXPORT_SYMBOL(module_alloc_exec);
21850+
21851+void module_free_exec(struct module *mod, void *module_region)
21852+{
21853+ vunmap(module_region);
21854+}
21855+EXPORT_SYMBOL(module_free_exec);
21856+#else
21857+void module_free_exec(struct module *mod, void *module_region)
21858+{
21859+ module_free(mod, module_region);
21860+}
21861+EXPORT_SYMBOL(module_free_exec);
21862+
21863+void *module_alloc_exec(unsigned long size)
21864+{
21865+ return __module_alloc(size, PAGE_KERNEL_RX);
21866+}
21867+EXPORT_SYMBOL(module_alloc_exec);
21868+#endif
21869+#endif
21870+
21871 #ifdef CONFIG_X86_32
21872 int apply_relocate(Elf32_Shdr *sechdrs,
21873 const char *strtab,
21874@@ -62,14 +107,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
21875 unsigned int i;
21876 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
21877 Elf32_Sym *sym;
21878- uint32_t *location;
21879+ uint32_t *plocation, location;
21880
21881 DEBUGP("Applying relocate section %u to %u\n",
21882 relsec, sechdrs[relsec].sh_info);
21883 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
21884 /* This is where to make the change */
21885- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
21886- + rel[i].r_offset;
21887+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
21888+ location = (uint32_t)plocation;
21889+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
21890+ plocation = ktla_ktva((void *)plocation);
21891 /* This is the symbol it is referring to. Note that all
21892 undefined symbols have been resolved. */
21893 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
21894@@ -78,11 +125,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
21895 switch (ELF32_R_TYPE(rel[i].r_info)) {
21896 case R_386_32:
21897 /* We add the value into the location given */
21898- *location += sym->st_value;
21899+ pax_open_kernel();
21900+ *plocation += sym->st_value;
21901+ pax_close_kernel();
21902 break;
21903 case R_386_PC32:
21904 /* Add the value, subtract its position */
21905- *location += sym->st_value - (uint32_t)location;
21906+ pax_open_kernel();
21907+ *plocation += sym->st_value - location;
21908+ pax_close_kernel();
21909 break;
21910 default:
21911 pr_err("%s: Unknown relocation: %u\n",
21912@@ -127,21 +178,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
21913 case R_X86_64_NONE:
21914 break;
21915 case R_X86_64_64:
21916+ pax_open_kernel();
21917 *(u64 *)loc = val;
21918+ pax_close_kernel();
21919 break;
21920 case R_X86_64_32:
21921+ pax_open_kernel();
21922 *(u32 *)loc = val;
21923+ pax_close_kernel();
21924 if (val != *(u32 *)loc)
21925 goto overflow;
21926 break;
21927 case R_X86_64_32S:
21928+ pax_open_kernel();
21929 *(s32 *)loc = val;
21930+ pax_close_kernel();
21931 if ((s64)val != *(s32 *)loc)
21932 goto overflow;
21933 break;
21934 case R_X86_64_PC32:
21935 val -= (u64)loc;
21936+ pax_open_kernel();
21937 *(u32 *)loc = val;
21938+ pax_close_kernel();
21939+
21940 #if 0
21941 if ((s64)val != *(s32 *)loc)
21942 goto overflow;
21943diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
21944index 4929502..686c291 100644
21945--- a/arch/x86/kernel/msr.c
21946+++ b/arch/x86/kernel/msr.c
21947@@ -234,7 +234,7 @@ static int __cpuinit msr_class_cpu_callback(struct notifier_block *nfb,
21948 return notifier_from_errno(err);
21949 }
21950
21951-static struct notifier_block __refdata msr_class_cpu_notifier = {
21952+static struct notifier_block msr_class_cpu_notifier = {
21953 .notifier_call = msr_class_cpu_callback,
21954 };
21955
21956diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
21957index f84f5c5..f404e81 100644
21958--- a/arch/x86/kernel/nmi.c
21959+++ b/arch/x86/kernel/nmi.c
21960@@ -105,7 +105,7 @@ static int __kprobes nmi_handle(unsigned int type, struct pt_regs *regs, bool b2
21961 return handled;
21962 }
21963
21964-int __register_nmi_handler(unsigned int type, struct nmiaction *action)
21965+int __register_nmi_handler(unsigned int type, const struct nmiaction *action)
21966 {
21967 struct nmi_desc *desc = nmi_to_desc(type);
21968 unsigned long flags;
21969@@ -129,9 +129,9 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
21970 * event confuses some handlers (kdump uses this flag)
21971 */
21972 if (action->flags & NMI_FLAG_FIRST)
21973- list_add_rcu(&action->list, &desc->head);
21974+ pax_list_add_rcu((struct list_head *)&action->list, &desc->head);
21975 else
21976- list_add_tail_rcu(&action->list, &desc->head);
21977+ pax_list_add_tail_rcu((struct list_head *)&action->list, &desc->head);
21978
21979 spin_unlock_irqrestore(&desc->lock, flags);
21980 return 0;
21981@@ -154,7 +154,7 @@ void unregister_nmi_handler(unsigned int type, const char *name)
21982 if (!strcmp(n->name, name)) {
21983 WARN(in_nmi(),
21984 "Trying to free NMI (%s) from NMI context!\n", n->name);
21985- list_del_rcu(&n->list);
21986+ pax_list_del_rcu((struct list_head *)&n->list);
21987 break;
21988 }
21989 }
21990@@ -479,6 +479,17 @@ static inline void nmi_nesting_postprocess(void)
21991 dotraplinkage notrace __kprobes void
21992 do_nmi(struct pt_regs *regs, long error_code)
21993 {
21994+
21995+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21996+ if (!user_mode(regs)) {
21997+ unsigned long cs = regs->cs & 0xFFFF;
21998+ unsigned long ip = ktva_ktla(regs->ip);
21999+
22000+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
22001+ regs->ip = ip;
22002+ }
22003+#endif
22004+
22005 nmi_nesting_preprocess(regs);
22006
22007 nmi_enter();
22008diff --git a/arch/x86/kernel/nmi_selftest.c b/arch/x86/kernel/nmi_selftest.c
22009index 6d9582e..f746287 100644
22010--- a/arch/x86/kernel/nmi_selftest.c
22011+++ b/arch/x86/kernel/nmi_selftest.c
22012@@ -43,7 +43,7 @@ static void __init init_nmi_testsuite(void)
22013 {
22014 /* trap all the unknown NMIs we may generate */
22015 register_nmi_handler(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk",
22016- __initdata);
22017+ __initconst);
22018 }
22019
22020 static void __init cleanup_nmi_testsuite(void)
22021@@ -66,7 +66,7 @@ static void __init test_nmi_ipi(struct cpumask *mask)
22022 unsigned long timeout;
22023
22024 if (register_nmi_handler(NMI_LOCAL, test_nmi_ipi_callback,
22025- NMI_FLAG_FIRST, "nmi_selftest", __initdata)) {
22026+ NMI_FLAG_FIRST, "nmi_selftest", __initconst)) {
22027 nmi_fail = FAILURE;
22028 return;
22029 }
22030diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
22031index 676b8c7..870ba04 100644
22032--- a/arch/x86/kernel/paravirt-spinlocks.c
22033+++ b/arch/x86/kernel/paravirt-spinlocks.c
22034@@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
22035 arch_spin_lock(lock);
22036 }
22037
22038-struct pv_lock_ops pv_lock_ops = {
22039+struct pv_lock_ops pv_lock_ops __read_only = {
22040 #ifdef CONFIG_SMP
22041 .spin_is_locked = __ticket_spin_is_locked,
22042 .spin_is_contended = __ticket_spin_is_contended,
22043diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
22044index 17fff18..5cfa0f4 100644
22045--- a/arch/x86/kernel/paravirt.c
22046+++ b/arch/x86/kernel/paravirt.c
22047@@ -55,6 +55,9 @@ u64 _paravirt_ident_64(u64 x)
22048 {
22049 return x;
22050 }
22051+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
22052+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
22053+#endif
22054
22055 void __init default_banner(void)
22056 {
22057@@ -147,15 +150,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
22058 if (opfunc == NULL)
22059 /* If there's no function, patch it with a ud2a (BUG) */
22060 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
22061- else if (opfunc == _paravirt_nop)
22062+ else if (opfunc == (void *)_paravirt_nop)
22063 /* If the operation is a nop, then nop the callsite */
22064 ret = paravirt_patch_nop();
22065
22066 /* identity functions just return their single argument */
22067- else if (opfunc == _paravirt_ident_32)
22068+ else if (opfunc == (void *)_paravirt_ident_32)
22069 ret = paravirt_patch_ident_32(insnbuf, len);
22070- else if (opfunc == _paravirt_ident_64)
22071+ else if (opfunc == (void *)_paravirt_ident_64)
22072 ret = paravirt_patch_ident_64(insnbuf, len);
22073+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
22074+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
22075+ ret = paravirt_patch_ident_64(insnbuf, len);
22076+#endif
22077
22078 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
22079 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
22080@@ -180,7 +187,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
22081 if (insn_len > len || start == NULL)
22082 insn_len = len;
22083 else
22084- memcpy(insnbuf, start, insn_len);
22085+ memcpy(insnbuf, ktla_ktva(start), insn_len);
22086
22087 return insn_len;
22088 }
22089@@ -304,7 +311,7 @@ void arch_flush_lazy_mmu_mode(void)
22090 preempt_enable();
22091 }
22092
22093-struct pv_info pv_info = {
22094+struct pv_info pv_info __read_only = {
22095 .name = "bare hardware",
22096 .paravirt_enabled = 0,
22097 .kernel_rpl = 0,
22098@@ -315,16 +322,16 @@ struct pv_info pv_info = {
22099 #endif
22100 };
22101
22102-struct pv_init_ops pv_init_ops = {
22103+struct pv_init_ops pv_init_ops __read_only = {
22104 .patch = native_patch,
22105 };
22106
22107-struct pv_time_ops pv_time_ops = {
22108+struct pv_time_ops pv_time_ops __read_only = {
22109 .sched_clock = native_sched_clock,
22110 .steal_clock = native_steal_clock,
22111 };
22112
22113-struct pv_irq_ops pv_irq_ops = {
22114+struct pv_irq_ops pv_irq_ops __read_only = {
22115 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
22116 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
22117 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
22118@@ -336,7 +343,7 @@ struct pv_irq_ops pv_irq_ops = {
22119 #endif
22120 };
22121
22122-struct pv_cpu_ops pv_cpu_ops = {
22123+struct pv_cpu_ops pv_cpu_ops __read_only = {
22124 .cpuid = native_cpuid,
22125 .get_debugreg = native_get_debugreg,
22126 .set_debugreg = native_set_debugreg,
22127@@ -395,21 +402,26 @@ struct pv_cpu_ops pv_cpu_ops = {
22128 .end_context_switch = paravirt_nop,
22129 };
22130
22131-struct pv_apic_ops pv_apic_ops = {
22132+struct pv_apic_ops pv_apic_ops __read_only= {
22133 #ifdef CONFIG_X86_LOCAL_APIC
22134 .startup_ipi_hook = paravirt_nop,
22135 #endif
22136 };
22137
22138-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
22139+#ifdef CONFIG_X86_32
22140+#ifdef CONFIG_X86_PAE
22141+/* 64-bit pagetable entries */
22142+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
22143+#else
22144 /* 32-bit pagetable entries */
22145 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
22146+#endif
22147 #else
22148 /* 64-bit pagetable entries */
22149 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
22150 #endif
22151
22152-struct pv_mmu_ops pv_mmu_ops = {
22153+struct pv_mmu_ops pv_mmu_ops __read_only = {
22154
22155 .read_cr2 = native_read_cr2,
22156 .write_cr2 = native_write_cr2,
22157@@ -459,6 +471,7 @@ struct pv_mmu_ops pv_mmu_ops = {
22158 .make_pud = PTE_IDENT,
22159
22160 .set_pgd = native_set_pgd,
22161+ .set_pgd_batched = native_set_pgd_batched,
22162 #endif
22163 #endif /* PAGETABLE_LEVELS >= 3 */
22164
22165@@ -478,6 +491,12 @@ struct pv_mmu_ops pv_mmu_ops = {
22166 },
22167
22168 .set_fixmap = native_set_fixmap,
22169+
22170+#ifdef CONFIG_PAX_KERNEXEC
22171+ .pax_open_kernel = native_pax_open_kernel,
22172+ .pax_close_kernel = native_pax_close_kernel,
22173+#endif
22174+
22175 };
22176
22177 EXPORT_SYMBOL_GPL(pv_time_ops);
22178diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
22179index 35ccf75..7a15747 100644
22180--- a/arch/x86/kernel/pci-iommu_table.c
22181+++ b/arch/x86/kernel/pci-iommu_table.c
22182@@ -2,7 +2,7 @@
22183 #include <asm/iommu_table.h>
22184 #include <linux/string.h>
22185 #include <linux/kallsyms.h>
22186-
22187+#include <linux/sched.h>
22188
22189 #define DEBUG 1
22190
22191diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
22192index 6c483ba..d10ce2f 100644
22193--- a/arch/x86/kernel/pci-swiotlb.c
22194+++ b/arch/x86/kernel/pci-swiotlb.c
22195@@ -32,7 +32,7 @@ static void x86_swiotlb_free_coherent(struct device *dev, size_t size,
22196 void *vaddr, dma_addr_t dma_addr,
22197 struct dma_attrs *attrs)
22198 {
22199- swiotlb_free_coherent(dev, size, vaddr, dma_addr);
22200+ swiotlb_free_coherent(dev, size, vaddr, dma_addr, attrs);
22201 }
22202
22203 static struct dma_map_ops swiotlb_dma_ops = {
22204diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
22205index 2ed787f..f70c9f6 100644
22206--- a/arch/x86/kernel/process.c
22207+++ b/arch/x86/kernel/process.c
22208@@ -36,7 +36,8 @@
22209 * section. Since TSS's are completely CPU-local, we want them
22210 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
22211 */
22212-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
22213+struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
22214+EXPORT_SYMBOL(init_tss);
22215
22216 #ifdef CONFIG_X86_64
22217 static DEFINE_PER_CPU(unsigned char, is_idle);
22218@@ -92,7 +93,7 @@ void arch_task_cache_init(void)
22219 task_xstate_cachep =
22220 kmem_cache_create("task_xstate", xstate_size,
22221 __alignof__(union thread_xstate),
22222- SLAB_PANIC | SLAB_NOTRACK, NULL);
22223+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
22224 }
22225
22226 /*
22227@@ -105,7 +106,7 @@ void exit_thread(void)
22228 unsigned long *bp = t->io_bitmap_ptr;
22229
22230 if (bp) {
22231- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
22232+ struct tss_struct *tss = init_tss + get_cpu();
22233
22234 t->io_bitmap_ptr = NULL;
22235 clear_thread_flag(TIF_IO_BITMAP);
22236@@ -136,7 +137,7 @@ void show_regs_common(void)
22237 board = dmi_get_system_info(DMI_BOARD_NAME);
22238
22239 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s %s %s%s%s\n",
22240- current->pid, current->comm, print_tainted(),
22241+ task_pid_nr(current), current->comm, print_tainted(),
22242 init_utsname()->release,
22243 (int)strcspn(init_utsname()->version, " "),
22244 init_utsname()->version,
22245@@ -149,6 +150,9 @@ void flush_thread(void)
22246 {
22247 struct task_struct *tsk = current;
22248
22249+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
22250+ loadsegment(gs, 0);
22251+#endif
22252 flush_ptrace_hw_breakpoint(tsk);
22253 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
22254 drop_init_fpu(tsk);
22255@@ -301,7 +305,7 @@ static void __exit_idle(void)
22256 void exit_idle(void)
22257 {
22258 /* idle loop has pid 0 */
22259- if (current->pid)
22260+ if (task_pid_nr(current))
22261 return;
22262 __exit_idle();
22263 }
22264@@ -404,7 +408,7 @@ bool set_pm_idle_to_default(void)
22265
22266 return ret;
22267 }
22268-void stop_this_cpu(void *dummy)
22269+__noreturn void stop_this_cpu(void *dummy)
22270 {
22271 local_irq_disable();
22272 /*
22273@@ -632,16 +636,37 @@ static int __init idle_setup(char *str)
22274 }
22275 early_param("idle", idle_setup);
22276
22277-unsigned long arch_align_stack(unsigned long sp)
22278+#ifdef CONFIG_PAX_RANDKSTACK
22279+void pax_randomize_kstack(struct pt_regs *regs)
22280 {
22281- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
22282- sp -= get_random_int() % 8192;
22283- return sp & ~0xf;
22284-}
22285+ struct thread_struct *thread = &current->thread;
22286+ unsigned long time;
22287
22288-unsigned long arch_randomize_brk(struct mm_struct *mm)
22289-{
22290- unsigned long range_end = mm->brk + 0x02000000;
22291- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
22292-}
22293+ if (!randomize_va_space)
22294+ return;
22295+
22296+ if (v8086_mode(regs))
22297+ return;
22298
22299+ rdtscl(time);
22300+
22301+ /* P4 seems to return a 0 LSB, ignore it */
22302+#ifdef CONFIG_MPENTIUM4
22303+ time &= 0x3EUL;
22304+ time <<= 2;
22305+#elif defined(CONFIG_X86_64)
22306+ time &= 0xFUL;
22307+ time <<= 4;
22308+#else
22309+ time &= 0x1FUL;
22310+ time <<= 3;
22311+#endif
22312+
22313+ thread->sp0 ^= time;
22314+ load_sp0(init_tss + smp_processor_id(), thread);
22315+
22316+#ifdef CONFIG_X86_64
22317+ this_cpu_write(kernel_stack, thread->sp0);
22318+#endif
22319+}
22320+#endif
22321diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
22322index b5a8905..d9cacac 100644
22323--- a/arch/x86/kernel/process_32.c
22324+++ b/arch/x86/kernel/process_32.c
22325@@ -65,6 +65,7 @@ asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread");
22326 unsigned long thread_saved_pc(struct task_struct *tsk)
22327 {
22328 return ((unsigned long *)tsk->thread.sp)[3];
22329+//XXX return tsk->thread.eip;
22330 }
22331
22332 void __show_regs(struct pt_regs *regs, int all)
22333@@ -74,21 +75,20 @@ void __show_regs(struct pt_regs *regs, int all)
22334 unsigned long sp;
22335 unsigned short ss, gs;
22336
22337- if (user_mode_vm(regs)) {
22338+ if (user_mode(regs)) {
22339 sp = regs->sp;
22340 ss = regs->ss & 0xffff;
22341- gs = get_user_gs(regs);
22342 } else {
22343 sp = kernel_stack_pointer(regs);
22344 savesegment(ss, ss);
22345- savesegment(gs, gs);
22346 }
22347+ gs = get_user_gs(regs);
22348
22349 show_regs_common();
22350
22351 printk(KERN_DEFAULT "EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
22352 (u16)regs->cs, regs->ip, regs->flags,
22353- smp_processor_id());
22354+ raw_smp_processor_id());
22355 print_symbol("EIP is at %s\n", regs->ip);
22356
22357 printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
22358@@ -130,20 +130,21 @@ void release_thread(struct task_struct *dead_task)
22359 int copy_thread(unsigned long clone_flags, unsigned long sp,
22360 unsigned long arg, struct task_struct *p)
22361 {
22362- struct pt_regs *childregs = task_pt_regs(p);
22363+ struct pt_regs *childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
22364 struct task_struct *tsk;
22365 int err;
22366
22367 p->thread.sp = (unsigned long) childregs;
22368 p->thread.sp0 = (unsigned long) (childregs+1);
22369+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
22370
22371 if (unlikely(p->flags & PF_KTHREAD)) {
22372 /* kernel thread */
22373 memset(childregs, 0, sizeof(struct pt_regs));
22374 p->thread.ip = (unsigned long) ret_from_kernel_thread;
22375- task_user_gs(p) = __KERNEL_STACK_CANARY;
22376- childregs->ds = __USER_DS;
22377- childregs->es = __USER_DS;
22378+ savesegment(gs, childregs->gs);
22379+ childregs->ds = __KERNEL_DS;
22380+ childregs->es = __KERNEL_DS;
22381 childregs->fs = __KERNEL_PERCPU;
22382 childregs->bx = sp; /* function */
22383 childregs->bp = arg;
22384@@ -250,7 +251,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
22385 struct thread_struct *prev = &prev_p->thread,
22386 *next = &next_p->thread;
22387 int cpu = smp_processor_id();
22388- struct tss_struct *tss = &per_cpu(init_tss, cpu);
22389+ struct tss_struct *tss = init_tss + cpu;
22390 fpu_switch_t fpu;
22391
22392 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
22393@@ -274,6 +275,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
22394 */
22395 lazy_save_gs(prev->gs);
22396
22397+#ifdef CONFIG_PAX_MEMORY_UDEREF
22398+ __set_fs(task_thread_info(next_p)->addr_limit);
22399+#endif
22400+
22401 /*
22402 * Load the per-thread Thread-Local Storage descriptor.
22403 */
22404@@ -304,6 +309,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
22405 */
22406 arch_end_context_switch(next_p);
22407
22408+ this_cpu_write(current_task, next_p);
22409+ this_cpu_write(current_tinfo, &next_p->tinfo);
22410+
22411 /*
22412 * Restore %gs if needed (which is common)
22413 */
22414@@ -312,8 +320,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
22415
22416 switch_fpu_finish(next_p, fpu);
22417
22418- this_cpu_write(current_task, next_p);
22419-
22420 return prev_p;
22421 }
22422
22423@@ -343,4 +349,3 @@ unsigned long get_wchan(struct task_struct *p)
22424 } while (count++ < 16);
22425 return 0;
22426 }
22427-
22428diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
22429index 6e68a61..955a9a5 100644
22430--- a/arch/x86/kernel/process_64.c
22431+++ b/arch/x86/kernel/process_64.c
22432@@ -152,10 +152,11 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
22433 struct pt_regs *childregs;
22434 struct task_struct *me = current;
22435
22436- p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
22437+ p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE - 16;
22438 childregs = task_pt_regs(p);
22439 p->thread.sp = (unsigned long) childregs;
22440 p->thread.usersp = me->thread.usersp;
22441+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
22442 set_tsk_thread_flag(p, TIF_FORK);
22443 p->fpu_counter = 0;
22444 p->thread.io_bitmap_ptr = NULL;
22445@@ -274,7 +275,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
22446 struct thread_struct *prev = &prev_p->thread;
22447 struct thread_struct *next = &next_p->thread;
22448 int cpu = smp_processor_id();
22449- struct tss_struct *tss = &per_cpu(init_tss, cpu);
22450+ struct tss_struct *tss = init_tss + cpu;
22451 unsigned fsindex, gsindex;
22452 fpu_switch_t fpu;
22453
22454@@ -356,10 +357,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
22455 prev->usersp = this_cpu_read(old_rsp);
22456 this_cpu_write(old_rsp, next->usersp);
22457 this_cpu_write(current_task, next_p);
22458+ this_cpu_write(current_tinfo, &next_p->tinfo);
22459
22460- this_cpu_write(kernel_stack,
22461- (unsigned long)task_stack_page(next_p) +
22462- THREAD_SIZE - KERNEL_STACK_OFFSET);
22463+ this_cpu_write(kernel_stack, next->sp0);
22464
22465 /*
22466 * Now maybe reload the debug registers and handle I/O bitmaps
22467@@ -428,12 +428,11 @@ unsigned long get_wchan(struct task_struct *p)
22468 if (!p || p == current || p->state == TASK_RUNNING)
22469 return 0;
22470 stack = (unsigned long)task_stack_page(p);
22471- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
22472+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
22473 return 0;
22474 fp = *(u64 *)(p->thread.sp);
22475 do {
22476- if (fp < (unsigned long)stack ||
22477- fp >= (unsigned long)stack+THREAD_SIZE)
22478+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
22479 return 0;
22480 ip = *(u64 *)(fp+8);
22481 if (!in_sched_functions(ip))
22482diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
22483index b629bbe..0fa615a 100644
22484--- a/arch/x86/kernel/ptrace.c
22485+++ b/arch/x86/kernel/ptrace.c
22486@@ -184,14 +184,13 @@ unsigned long kernel_stack_pointer(struct pt_regs *regs)
22487 {
22488 unsigned long context = (unsigned long)regs & ~(THREAD_SIZE - 1);
22489 unsigned long sp = (unsigned long)&regs->sp;
22490- struct thread_info *tinfo;
22491
22492- if (context == (sp & ~(THREAD_SIZE - 1)))
22493+ if (context == ((sp + 8) & ~(THREAD_SIZE - 1)))
22494 return sp;
22495
22496- tinfo = (struct thread_info *)context;
22497- if (tinfo->previous_esp)
22498- return tinfo->previous_esp;
22499+ sp = *(unsigned long *)context;
22500+ if (sp)
22501+ return sp;
22502
22503 return (unsigned long)regs;
22504 }
22505@@ -588,7 +587,7 @@ static void ptrace_triggered(struct perf_event *bp,
22506 static unsigned long ptrace_get_dr7(struct perf_event *bp[])
22507 {
22508 int i;
22509- int dr7 = 0;
22510+ unsigned long dr7 = 0;
22511 struct arch_hw_breakpoint *info;
22512
22513 for (i = 0; i < HBP_NUM; i++) {
22514@@ -856,7 +855,7 @@ long arch_ptrace(struct task_struct *child, long request,
22515 unsigned long addr, unsigned long data)
22516 {
22517 int ret;
22518- unsigned long __user *datap = (unsigned long __user *)data;
22519+ unsigned long __user *datap = (__force unsigned long __user *)data;
22520
22521 switch (request) {
22522 /* read the word at location addr in the USER area. */
22523@@ -941,14 +940,14 @@ long arch_ptrace(struct task_struct *child, long request,
22524 if ((int) addr < 0)
22525 return -EIO;
22526 ret = do_get_thread_area(child, addr,
22527- (struct user_desc __user *)data);
22528+ (__force struct user_desc __user *) data);
22529 break;
22530
22531 case PTRACE_SET_THREAD_AREA:
22532 if ((int) addr < 0)
22533 return -EIO;
22534 ret = do_set_thread_area(child, addr,
22535- (struct user_desc __user *)data, 0);
22536+ (__force struct user_desc __user *) data, 0);
22537 break;
22538 #endif
22539
22540@@ -1326,7 +1325,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
22541
22542 #ifdef CONFIG_X86_64
22543
22544-static struct user_regset x86_64_regsets[] __read_mostly = {
22545+static user_regset_no_const x86_64_regsets[] __read_only = {
22546 [REGSET_GENERAL] = {
22547 .core_note_type = NT_PRSTATUS,
22548 .n = sizeof(struct user_regs_struct) / sizeof(long),
22549@@ -1367,7 +1366,7 @@ static const struct user_regset_view user_x86_64_view = {
22550 #endif /* CONFIG_X86_64 */
22551
22552 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
22553-static struct user_regset x86_32_regsets[] __read_mostly = {
22554+static user_regset_no_const x86_32_regsets[] __read_only = {
22555 [REGSET_GENERAL] = {
22556 .core_note_type = NT_PRSTATUS,
22557 .n = sizeof(struct user_regs_struct32) / sizeof(u32),
22558@@ -1420,7 +1419,7 @@ static const struct user_regset_view user_x86_32_view = {
22559 */
22560 u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
22561
22562-void update_regset_xstate_info(unsigned int size, u64 xstate_mask)
22563+void __init update_regset_xstate_info(unsigned int size, u64 xstate_mask)
22564 {
22565 #ifdef CONFIG_X86_64
22566 x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64);
22567@@ -1455,7 +1454,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
22568 memset(info, 0, sizeof(*info));
22569 info->si_signo = SIGTRAP;
22570 info->si_code = si_code;
22571- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
22572+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
22573 }
22574
22575 void user_single_step_siginfo(struct task_struct *tsk,
22576@@ -1484,6 +1483,10 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
22577 # define IS_IA32 0
22578 #endif
22579
22580+#ifdef CONFIG_GRKERNSEC_SETXID
22581+extern void gr_delayed_cred_worker(void);
22582+#endif
22583+
22584 /*
22585 * We must return the syscall number to actually look up in the table.
22586 * This can be -1L to skip running any syscall at all.
22587@@ -1494,6 +1497,11 @@ long syscall_trace_enter(struct pt_regs *regs)
22588
22589 user_exit();
22590
22591+#ifdef CONFIG_GRKERNSEC_SETXID
22592+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
22593+ gr_delayed_cred_worker();
22594+#endif
22595+
22596 /*
22597 * If we stepped into a sysenter/syscall insn, it trapped in
22598 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
22599@@ -1549,6 +1557,11 @@ void syscall_trace_leave(struct pt_regs *regs)
22600 */
22601 user_exit();
22602
22603+#ifdef CONFIG_GRKERNSEC_SETXID
22604+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
22605+ gr_delayed_cred_worker();
22606+#endif
22607+
22608 audit_syscall_exit(regs);
22609
22610 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
22611diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
22612index 2cb9470..ff1fd80 100644
22613--- a/arch/x86/kernel/pvclock.c
22614+++ b/arch/x86/kernel/pvclock.c
22615@@ -43,11 +43,11 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
22616 return pv_tsc_khz;
22617 }
22618
22619-static atomic64_t last_value = ATOMIC64_INIT(0);
22620+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
22621
22622 void pvclock_resume(void)
22623 {
22624- atomic64_set(&last_value, 0);
22625+ atomic64_set_unchecked(&last_value, 0);
22626 }
22627
22628 u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
22629@@ -92,11 +92,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
22630 * updating at the same time, and one of them could be slightly behind,
22631 * making the assumption that last_value always go forward fail to hold.
22632 */
22633- last = atomic64_read(&last_value);
22634+ last = atomic64_read_unchecked(&last_value);
22635 do {
22636 if (ret < last)
22637 return last;
22638- last = atomic64_cmpxchg(&last_value, last, ret);
22639+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
22640 } while (unlikely(last != ret));
22641
22642 return ret;
22643diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
22644index 76fa1e9..abf09ea 100644
22645--- a/arch/x86/kernel/reboot.c
22646+++ b/arch/x86/kernel/reboot.c
22647@@ -36,7 +36,7 @@ void (*pm_power_off)(void);
22648 EXPORT_SYMBOL(pm_power_off);
22649
22650 static const struct desc_ptr no_idt = {};
22651-static int reboot_mode;
22652+static unsigned short reboot_mode;
22653 enum reboot_type reboot_type = BOOT_ACPI;
22654 int reboot_force;
22655
22656@@ -157,6 +157,11 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
22657
22658 void __noreturn machine_real_restart(unsigned int type)
22659 {
22660+
22661+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
22662+ struct desc_struct *gdt;
22663+#endif
22664+
22665 local_irq_disable();
22666
22667 /*
22668@@ -184,7 +189,29 @@ void __noreturn machine_real_restart(unsigned int type)
22669
22670 /* Jump to the identity-mapped low memory code */
22671 #ifdef CONFIG_X86_32
22672- asm volatile("jmpl *%0" : :
22673+
22674+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
22675+ gdt = get_cpu_gdt_table(smp_processor_id());
22676+ pax_open_kernel();
22677+#ifdef CONFIG_PAX_MEMORY_UDEREF
22678+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
22679+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
22680+ loadsegment(ds, __KERNEL_DS);
22681+ loadsegment(es, __KERNEL_DS);
22682+ loadsegment(ss, __KERNEL_DS);
22683+#endif
22684+#ifdef CONFIG_PAX_KERNEXEC
22685+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
22686+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
22687+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
22688+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
22689+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
22690+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
22691+#endif
22692+ pax_close_kernel();
22693+#endif
22694+
22695+ asm volatile("ljmpl *%0" : :
22696 "rm" (real_mode_header->machine_real_restart_asm),
22697 "a" (type));
22698 #else
22699@@ -531,7 +558,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
22700 * try to force a triple fault and then cycle between hitting the keyboard
22701 * controller and doing that
22702 */
22703-static void native_machine_emergency_restart(void)
22704+static void __noreturn native_machine_emergency_restart(void)
22705 {
22706 int i;
22707 int attempt = 0;
22708@@ -654,13 +681,13 @@ void native_machine_shutdown(void)
22709 #endif
22710 }
22711
22712-static void __machine_emergency_restart(int emergency)
22713+static void __noreturn __machine_emergency_restart(int emergency)
22714 {
22715 reboot_emergency = emergency;
22716 machine_ops.emergency_restart();
22717 }
22718
22719-static void native_machine_restart(char *__unused)
22720+static void __noreturn native_machine_restart(char *__unused)
22721 {
22722 pr_notice("machine restart\n");
22723
22724@@ -669,7 +696,7 @@ static void native_machine_restart(char *__unused)
22725 __machine_emergency_restart(0);
22726 }
22727
22728-static void native_machine_halt(void)
22729+static void __noreturn native_machine_halt(void)
22730 {
22731 /* Stop other cpus and apics */
22732 machine_shutdown();
22733@@ -679,7 +706,7 @@ static void native_machine_halt(void)
22734 stop_this_cpu(NULL);
22735 }
22736
22737-static void native_machine_power_off(void)
22738+static void __noreturn native_machine_power_off(void)
22739 {
22740 if (pm_power_off) {
22741 if (!reboot_force)
22742@@ -688,9 +715,10 @@ static void native_machine_power_off(void)
22743 }
22744 /* A fallback in case there is no PM info available */
22745 tboot_shutdown(TB_SHUTDOWN_HALT);
22746+ unreachable();
22747 }
22748
22749-struct machine_ops machine_ops = {
22750+struct machine_ops machine_ops __read_only = {
22751 .power_off = native_machine_power_off,
22752 .shutdown = native_machine_shutdown,
22753 .emergency_restart = native_machine_emergency_restart,
22754diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
22755index 7a6f3b3..bed145d7 100644
22756--- a/arch/x86/kernel/relocate_kernel_64.S
22757+++ b/arch/x86/kernel/relocate_kernel_64.S
22758@@ -11,6 +11,7 @@
22759 #include <asm/kexec.h>
22760 #include <asm/processor-flags.h>
22761 #include <asm/pgtable_types.h>
22762+#include <asm/alternative-asm.h>
22763
22764 /*
22765 * Must be relocatable PIC code callable as a C function
22766@@ -160,13 +161,14 @@ identity_mapped:
22767 xorq %rbp, %rbp
22768 xorq %r8, %r8
22769 xorq %r9, %r9
22770- xorq %r10, %r9
22771+ xorq %r10, %r10
22772 xorq %r11, %r11
22773 xorq %r12, %r12
22774 xorq %r13, %r13
22775 xorq %r14, %r14
22776 xorq %r15, %r15
22777
22778+ pax_force_retaddr 0, 1
22779 ret
22780
22781 1:
22782diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
22783index 8b24289..d37b58b 100644
22784--- a/arch/x86/kernel/setup.c
22785+++ b/arch/x86/kernel/setup.c
22786@@ -437,7 +437,7 @@ static void __init parse_setup_data(void)
22787
22788 switch (data->type) {
22789 case SETUP_E820_EXT:
22790- parse_e820_ext(data);
22791+ parse_e820_ext((struct setup_data __force_kernel *)data);
22792 break;
22793 case SETUP_DTB:
22794 add_dtb(pa_data);
22795@@ -706,7 +706,7 @@ static void __init trim_bios_range(void)
22796 * area (640->1Mb) as ram even though it is not.
22797 * take them out.
22798 */
22799- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
22800+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
22801
22802 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
22803 }
22804@@ -830,14 +830,14 @@ void __init setup_arch(char **cmdline_p)
22805
22806 if (!boot_params.hdr.root_flags)
22807 root_mountflags &= ~MS_RDONLY;
22808- init_mm.start_code = (unsigned long) _text;
22809- init_mm.end_code = (unsigned long) _etext;
22810+ init_mm.start_code = ktla_ktva((unsigned long) _text);
22811+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
22812 init_mm.end_data = (unsigned long) _edata;
22813 init_mm.brk = _brk_end;
22814
22815- code_resource.start = virt_to_phys(_text);
22816- code_resource.end = virt_to_phys(_etext)-1;
22817- data_resource.start = virt_to_phys(_etext);
22818+ code_resource.start = virt_to_phys(ktla_ktva(_text));
22819+ code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
22820+ data_resource.start = virt_to_phys(_sdata);
22821 data_resource.end = virt_to_phys(_edata)-1;
22822 bss_resource.start = virt_to_phys(&__bss_start);
22823 bss_resource.end = virt_to_phys(&__bss_stop)-1;
22824diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
22825index 5cdff03..80fa283 100644
22826--- a/arch/x86/kernel/setup_percpu.c
22827+++ b/arch/x86/kernel/setup_percpu.c
22828@@ -21,19 +21,17 @@
22829 #include <asm/cpu.h>
22830 #include <asm/stackprotector.h>
22831
22832-DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
22833+#ifdef CONFIG_SMP
22834+DEFINE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
22835 EXPORT_PER_CPU_SYMBOL(cpu_number);
22836+#endif
22837
22838-#ifdef CONFIG_X86_64
22839 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
22840-#else
22841-#define BOOT_PERCPU_OFFSET 0
22842-#endif
22843
22844 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
22845 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
22846
22847-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
22848+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
22849 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
22850 };
22851 EXPORT_SYMBOL(__per_cpu_offset);
22852@@ -66,7 +64,7 @@ static bool __init pcpu_need_numa(void)
22853 {
22854 #ifdef CONFIG_NEED_MULTIPLE_NODES
22855 pg_data_t *last = NULL;
22856- unsigned int cpu;
22857+ int cpu;
22858
22859 for_each_possible_cpu(cpu) {
22860 int node = early_cpu_to_node(cpu);
22861@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
22862 {
22863 #ifdef CONFIG_X86_32
22864 struct desc_struct gdt;
22865+ unsigned long base = per_cpu_offset(cpu);
22866
22867- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
22868- 0x2 | DESCTYPE_S, 0x8);
22869- gdt.s = 1;
22870+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
22871+ 0x83 | DESCTYPE_S, 0xC);
22872 write_gdt_entry(get_cpu_gdt_table(cpu),
22873 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
22874 #endif
22875@@ -219,6 +217,11 @@ void __init setup_per_cpu_areas(void)
22876 /* alrighty, percpu areas up and running */
22877 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
22878 for_each_possible_cpu(cpu) {
22879+#ifdef CONFIG_CC_STACKPROTECTOR
22880+#ifdef CONFIG_X86_32
22881+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
22882+#endif
22883+#endif
22884 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
22885 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
22886 per_cpu(cpu_number, cpu) = cpu;
22887@@ -259,6 +262,12 @@ void __init setup_per_cpu_areas(void)
22888 */
22889 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
22890 #endif
22891+#ifdef CONFIG_CC_STACKPROTECTOR
22892+#ifdef CONFIG_X86_32
22893+ if (!cpu)
22894+ per_cpu(stack_canary.canary, cpu) = canary;
22895+#endif
22896+#endif
22897 /*
22898 * Up to this point, the boot CPU has been using .init.data
22899 * area. Reload any changed state for the boot CPU.
22900diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
22901index d6bf1f3..3ffce5a 100644
22902--- a/arch/x86/kernel/signal.c
22903+++ b/arch/x86/kernel/signal.c
22904@@ -196,7 +196,7 @@ static unsigned long align_sigframe(unsigned long sp)
22905 * Align the stack pointer according to the i386 ABI,
22906 * i.e. so that on function entry ((sp + 4) & 15) == 0.
22907 */
22908- sp = ((sp + 4) & -16ul) - 4;
22909+ sp = ((sp - 12) & -16ul) - 4;
22910 #else /* !CONFIG_X86_32 */
22911 sp = round_down(sp, 16) - 8;
22912 #endif
22913@@ -304,9 +304,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
22914 }
22915
22916 if (current->mm->context.vdso)
22917- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
22918+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
22919 else
22920- restorer = &frame->retcode;
22921+ restorer = (void __user *)&frame->retcode;
22922 if (ka->sa.sa_flags & SA_RESTORER)
22923 restorer = ka->sa.sa_restorer;
22924
22925@@ -320,7 +320,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
22926 * reasons and because gdb uses it as a signature to notice
22927 * signal handler stack frames.
22928 */
22929- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
22930+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
22931
22932 if (err)
22933 return -EFAULT;
22934@@ -367,7 +367,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
22935 err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
22936
22937 /* Set up to return from userspace. */
22938- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
22939+ if (current->mm->context.vdso)
22940+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
22941+ else
22942+ restorer = (void __user *)&frame->retcode;
22943 if (ka->sa.sa_flags & SA_RESTORER)
22944 restorer = ka->sa.sa_restorer;
22945 put_user_ex(restorer, &frame->pretcode);
22946@@ -379,7 +382,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
22947 * reasons and because gdb uses it as a signature to notice
22948 * signal handler stack frames.
22949 */
22950- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
22951+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
22952 } put_user_catch(err);
22953
22954 err |= copy_siginfo_to_user(&frame->info, info);
22955diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
22956index 48d2b7d..90d328a 100644
22957--- a/arch/x86/kernel/smp.c
22958+++ b/arch/x86/kernel/smp.c
22959@@ -285,7 +285,7 @@ static int __init nonmi_ipi_setup(char *str)
22960
22961 __setup("nonmi_ipi", nonmi_ipi_setup);
22962
22963-struct smp_ops smp_ops = {
22964+struct smp_ops smp_ops __read_only = {
22965 .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
22966 .smp_prepare_cpus = native_smp_prepare_cpus,
22967 .smp_cpus_done = native_smp_cpus_done,
22968diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
22969index ed0fe38..87fc692 100644
22970--- a/arch/x86/kernel/smpboot.c
22971+++ b/arch/x86/kernel/smpboot.c
22972@@ -748,6 +748,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
22973 idle->thread.sp = (unsigned long) (((struct pt_regs *)
22974 (THREAD_SIZE + task_stack_page(idle))) - 1);
22975 per_cpu(current_task, cpu) = idle;
22976+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
22977
22978 #ifdef CONFIG_X86_32
22979 /* Stack for startup_32 can be just as for start_secondary onwards */
22980@@ -755,11 +756,13 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
22981 #else
22982 clear_tsk_thread_flag(idle, TIF_FORK);
22983 initial_gs = per_cpu_offset(cpu);
22984- per_cpu(kernel_stack, cpu) =
22985- (unsigned long)task_stack_page(idle) -
22986- KERNEL_STACK_OFFSET + THREAD_SIZE;
22987+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
22988 #endif
22989+
22990+ pax_open_kernel();
22991 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
22992+ pax_close_kernel();
22993+
22994 initial_code = (unsigned long)start_secondary;
22995 stack_start = idle->thread.sp;
22996
22997@@ -908,6 +911,15 @@ int __cpuinit native_cpu_up(unsigned int cpu, struct task_struct *tidle)
22998 /* the FPU context is blank, nobody can own it */
22999 __cpu_disable_lazy_restore(cpu);
23000
23001+#ifdef CONFIG_PAX_PER_CPU_PGD
23002+ clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
23003+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
23004+ KERNEL_PGD_PTRS);
23005+#endif
23006+
23007+ /* the FPU context is blank, nobody can own it */
23008+ __cpu_disable_lazy_restore(cpu);
23009+
23010 err = do_boot_cpu(apicid, cpu, tidle);
23011 if (err) {
23012 pr_debug("do_boot_cpu failed %d\n", err);
23013diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
23014index 9b4d51d..5d28b58 100644
23015--- a/arch/x86/kernel/step.c
23016+++ b/arch/x86/kernel/step.c
23017@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
23018 struct desc_struct *desc;
23019 unsigned long base;
23020
23021- seg &= ~7UL;
23022+ seg >>= 3;
23023
23024 mutex_lock(&child->mm->context.lock);
23025- if (unlikely((seg >> 3) >= child->mm->context.size))
23026+ if (unlikely(seg >= child->mm->context.size))
23027 addr = -1L; /* bogus selector, access would fault */
23028 else {
23029 desc = child->mm->context.ldt + seg;
23030@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
23031 addr += base;
23032 }
23033 mutex_unlock(&child->mm->context.lock);
23034- }
23035+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
23036+ addr = ktla_ktva(addr);
23037
23038 return addr;
23039 }
23040@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
23041 unsigned char opcode[15];
23042 unsigned long addr = convert_ip_to_linear(child, regs);
23043
23044+ if (addr == -EINVAL)
23045+ return 0;
23046+
23047 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
23048 for (i = 0; i < copied; i++) {
23049 switch (opcode[i]) {
23050diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
23051new file mode 100644
23052index 0000000..207bec6
23053--- /dev/null
23054+++ b/arch/x86/kernel/sys_i386_32.c
23055@@ -0,0 +1,250 @@
23056+/*
23057+ * This file contains various random system calls that
23058+ * have a non-standard calling sequence on the Linux/i386
23059+ * platform.
23060+ */
23061+
23062+#include <linux/errno.h>
23063+#include <linux/sched.h>
23064+#include <linux/mm.h>
23065+#include <linux/fs.h>
23066+#include <linux/smp.h>
23067+#include <linux/sem.h>
23068+#include <linux/msg.h>
23069+#include <linux/shm.h>
23070+#include <linux/stat.h>
23071+#include <linux/syscalls.h>
23072+#include <linux/mman.h>
23073+#include <linux/file.h>
23074+#include <linux/utsname.h>
23075+#include <linux/ipc.h>
23076+
23077+#include <linux/uaccess.h>
23078+#include <linux/unistd.h>
23079+
23080+#include <asm/syscalls.h>
23081+
23082+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
23083+{
23084+ unsigned long pax_task_size = TASK_SIZE;
23085+
23086+#ifdef CONFIG_PAX_SEGMEXEC
23087+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
23088+ pax_task_size = SEGMEXEC_TASK_SIZE;
23089+#endif
23090+
23091+ if (flags & MAP_FIXED)
23092+ if (len > pax_task_size || addr > pax_task_size - len)
23093+ return -EINVAL;
23094+
23095+ return 0;
23096+}
23097+
23098+unsigned long
23099+arch_get_unmapped_area(struct file *filp, unsigned long addr,
23100+ unsigned long len, unsigned long pgoff, unsigned long flags)
23101+{
23102+ struct mm_struct *mm = current->mm;
23103+ struct vm_area_struct *vma;
23104+ unsigned long start_addr, pax_task_size = TASK_SIZE;
23105+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
23106+
23107+#ifdef CONFIG_PAX_SEGMEXEC
23108+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
23109+ pax_task_size = SEGMEXEC_TASK_SIZE;
23110+#endif
23111+
23112+ pax_task_size -= PAGE_SIZE;
23113+
23114+ if (len > pax_task_size)
23115+ return -ENOMEM;
23116+
23117+ if (flags & MAP_FIXED)
23118+ return addr;
23119+
23120+#ifdef CONFIG_PAX_RANDMMAP
23121+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
23122+#endif
23123+
23124+ if (addr) {
23125+ addr = PAGE_ALIGN(addr);
23126+ if (pax_task_size - len >= addr) {
23127+ vma = find_vma(mm, addr);
23128+ if (check_heap_stack_gap(vma, addr, len, offset))
23129+ return addr;
23130+ }
23131+ }
23132+ if (len > mm->cached_hole_size) {
23133+ start_addr = addr = mm->free_area_cache;
23134+ } else {
23135+ start_addr = addr = mm->mmap_base;
23136+ mm->cached_hole_size = 0;
23137+ }
23138+
23139+#ifdef CONFIG_PAX_PAGEEXEC
23140+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
23141+ start_addr = 0x00110000UL;
23142+
23143+#ifdef CONFIG_PAX_RANDMMAP
23144+ if (mm->pax_flags & MF_PAX_RANDMMAP)
23145+ start_addr += mm->delta_mmap & 0x03FFF000UL;
23146+#endif
23147+
23148+ if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
23149+ start_addr = addr = mm->mmap_base;
23150+ else
23151+ addr = start_addr;
23152+ }
23153+#endif
23154+
23155+full_search:
23156+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
23157+ /* At this point: (!vma || addr < vma->vm_end). */
23158+ if (pax_task_size - len < addr) {
23159+ /*
23160+ * Start a new search - just in case we missed
23161+ * some holes.
23162+ */
23163+ if (start_addr != mm->mmap_base) {
23164+ start_addr = addr = mm->mmap_base;
23165+ mm->cached_hole_size = 0;
23166+ goto full_search;
23167+ }
23168+ return -ENOMEM;
23169+ }
23170+ if (check_heap_stack_gap(vma, addr, len, offset))
23171+ break;
23172+ if (addr + mm->cached_hole_size < vma->vm_start)
23173+ mm->cached_hole_size = vma->vm_start - addr;
23174+ addr = vma->vm_end;
23175+ if (mm->start_brk <= addr && addr < mm->mmap_base) {
23176+ start_addr = addr = mm->mmap_base;
23177+ mm->cached_hole_size = 0;
23178+ goto full_search;
23179+ }
23180+ }
23181+
23182+ /*
23183+ * Remember the place where we stopped the search:
23184+ */
23185+ mm->free_area_cache = addr + len;
23186+ return addr;
23187+}
23188+
23189+unsigned long
23190+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
23191+ const unsigned long len, const unsigned long pgoff,
23192+ const unsigned long flags)
23193+{
23194+ struct vm_area_struct *vma;
23195+ struct mm_struct *mm = current->mm;
23196+ unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
23197+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
23198+
23199+#ifdef CONFIG_PAX_SEGMEXEC
23200+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
23201+ pax_task_size = SEGMEXEC_TASK_SIZE;
23202+#endif
23203+
23204+ pax_task_size -= PAGE_SIZE;
23205+
23206+ /* requested length too big for entire address space */
23207+ if (len > pax_task_size)
23208+ return -ENOMEM;
23209+
23210+ if (flags & MAP_FIXED)
23211+ return addr;
23212+
23213+#ifdef CONFIG_PAX_PAGEEXEC
23214+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
23215+ goto bottomup;
23216+#endif
23217+
23218+#ifdef CONFIG_PAX_RANDMMAP
23219+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
23220+#endif
23221+
23222+ /* requesting a specific address */
23223+ if (addr) {
23224+ addr = PAGE_ALIGN(addr);
23225+ if (pax_task_size - len >= addr) {
23226+ vma = find_vma(mm, addr);
23227+ if (check_heap_stack_gap(vma, addr, len, offset))
23228+ return addr;
23229+ }
23230+ }
23231+
23232+ /* check if free_area_cache is useful for us */
23233+ if (len <= mm->cached_hole_size) {
23234+ mm->cached_hole_size = 0;
23235+ mm->free_area_cache = mm->mmap_base;
23236+ }
23237+
23238+ /* either no address requested or can't fit in requested address hole */
23239+ addr = mm->free_area_cache;
23240+
23241+ /* make sure it can fit in the remaining address space */
23242+ if (addr > len) {
23243+ vma = find_vma(mm, addr-len);
23244+ if (check_heap_stack_gap(vma, addr - len, len, offset))
23245+ /* remember the address as a hint for next time */
23246+ return (mm->free_area_cache = addr-len);
23247+ }
23248+
23249+ if (mm->mmap_base < len)
23250+ goto bottomup;
23251+
23252+ addr = mm->mmap_base-len;
23253+
23254+ do {
23255+ /*
23256+ * Lookup failure means no vma is above this address,
23257+ * else if new region fits below vma->vm_start,
23258+ * return with success:
23259+ */
23260+ vma = find_vma(mm, addr);
23261+ if (check_heap_stack_gap(vma, addr, len, offset))
23262+ /* remember the address as a hint for next time */
23263+ return (mm->free_area_cache = addr);
23264+
23265+ /* remember the largest hole we saw so far */
23266+ if (addr + mm->cached_hole_size < vma->vm_start)
23267+ mm->cached_hole_size = vma->vm_start - addr;
23268+
23269+ /* try just below the current vma->vm_start */
23270+ addr = skip_heap_stack_gap(vma, len, offset);
23271+ } while (!IS_ERR_VALUE(addr));
23272+
23273+bottomup:
23274+ /*
23275+ * A failed mmap() very likely causes application failure,
23276+ * so fall back to the bottom-up function here. This scenario
23277+ * can happen with large stack limits and large mmap()
23278+ * allocations.
23279+ */
23280+
23281+#ifdef CONFIG_PAX_SEGMEXEC
23282+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
23283+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
23284+ else
23285+#endif
23286+
23287+ mm->mmap_base = TASK_UNMAPPED_BASE;
23288+
23289+#ifdef CONFIG_PAX_RANDMMAP
23290+ if (mm->pax_flags & MF_PAX_RANDMMAP)
23291+ mm->mmap_base += mm->delta_mmap;
23292+#endif
23293+
23294+ mm->free_area_cache = mm->mmap_base;
23295+ mm->cached_hole_size = ~0UL;
23296+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
23297+ /*
23298+ * Restore the topdown base:
23299+ */
23300+ mm->mmap_base = base;
23301+ mm->free_area_cache = base;
23302+ mm->cached_hole_size = ~0UL;
23303+
23304+ return addr;
23305+}
23306diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
23307index 97ef74b..57a1882 100644
23308--- a/arch/x86/kernel/sys_x86_64.c
23309+++ b/arch/x86/kernel/sys_x86_64.c
23310@@ -81,8 +81,8 @@ out:
23311 return error;
23312 }
23313
23314-static void find_start_end(unsigned long flags, unsigned long *begin,
23315- unsigned long *end)
23316+static void find_start_end(struct mm_struct *mm, unsigned long flags,
23317+ unsigned long *begin, unsigned long *end)
23318 {
23319 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
23320 unsigned long new_begin;
23321@@ -101,7 +101,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
23322 *begin = new_begin;
23323 }
23324 } else {
23325- *begin = TASK_UNMAPPED_BASE;
23326+ *begin = mm->mmap_base;
23327 *end = TASK_SIZE;
23328 }
23329 }
23330@@ -114,20 +114,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
23331 struct vm_area_struct *vma;
23332 struct vm_unmapped_area_info info;
23333 unsigned long begin, end;
23334+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
23335
23336 if (flags & MAP_FIXED)
23337 return addr;
23338
23339- find_start_end(flags, &begin, &end);
23340+ find_start_end(mm, flags, &begin, &end);
23341
23342 if (len > end)
23343 return -ENOMEM;
23344
23345+#ifdef CONFIG_PAX_RANDMMAP
23346+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
23347+#endif
23348+
23349 if (addr) {
23350 addr = PAGE_ALIGN(addr);
23351 vma = find_vma(mm, addr);
23352- if (end - len >= addr &&
23353- (!vma || addr + len <= vma->vm_start))
23354+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
23355 return addr;
23356 }
23357
23358@@ -161,6 +165,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
23359 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
23360 goto bottomup;
23361
23362+#ifdef CONFIG_PAX_RANDMMAP
23363+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
23364+#endif
23365+
23366 /* requesting a specific address */
23367 if (addr) {
23368 addr = PAGE_ALIGN(addr);
23369diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
23370index f84fe00..f41d9f1 100644
23371--- a/arch/x86/kernel/tboot.c
23372+++ b/arch/x86/kernel/tboot.c
23373@@ -220,7 +220,7 @@ static int tboot_setup_sleep(void)
23374
23375 void tboot_shutdown(u32 shutdown_type)
23376 {
23377- void (*shutdown)(void);
23378+ void (* __noreturn shutdown)(void);
23379
23380 if (!tboot_enabled())
23381 return;
23382@@ -242,7 +242,7 @@ void tboot_shutdown(u32 shutdown_type)
23383
23384 switch_to_tboot_pt();
23385
23386- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
23387+ shutdown = (void *)tboot->shutdown_entry;
23388 shutdown();
23389
23390 /* should not reach here */
23391@@ -300,7 +300,7 @@ static int tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
23392 return 0;
23393 }
23394
23395-static atomic_t ap_wfs_count;
23396+static atomic_unchecked_t ap_wfs_count;
23397
23398 static int tboot_wait_for_aps(int num_aps)
23399 {
23400@@ -324,16 +324,16 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
23401 {
23402 switch (action) {
23403 case CPU_DYING:
23404- atomic_inc(&ap_wfs_count);
23405+ atomic_inc_unchecked(&ap_wfs_count);
23406 if (num_online_cpus() == 1)
23407- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
23408+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
23409 return NOTIFY_BAD;
23410 break;
23411 }
23412 return NOTIFY_OK;
23413 }
23414
23415-static struct notifier_block tboot_cpu_notifier __cpuinitdata =
23416+static struct notifier_block tboot_cpu_notifier =
23417 {
23418 .notifier_call = tboot_cpu_callback,
23419 };
23420@@ -345,7 +345,7 @@ static __init int tboot_late_init(void)
23421
23422 tboot_create_trampoline();
23423
23424- atomic_set(&ap_wfs_count, 0);
23425+ atomic_set_unchecked(&ap_wfs_count, 0);
23426 register_hotcpu_notifier(&tboot_cpu_notifier);
23427
23428 acpi_os_set_prepare_sleep(&tboot_sleep);
23429diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
23430index 24d3c91..d06b473 100644
23431--- a/arch/x86/kernel/time.c
23432+++ b/arch/x86/kernel/time.c
23433@@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs *regs)
23434 {
23435 unsigned long pc = instruction_pointer(regs);
23436
23437- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
23438+ if (!user_mode(regs) && in_lock_functions(pc)) {
23439 #ifdef CONFIG_FRAME_POINTER
23440- return *(unsigned long *)(regs->bp + sizeof(long));
23441+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
23442 #else
23443 unsigned long *sp =
23444 (unsigned long *)kernel_stack_pointer(regs);
23445@@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
23446 * or above a saved flags. Eflags has bits 22-31 zero,
23447 * kernel addresses don't.
23448 */
23449+
23450+#ifdef CONFIG_PAX_KERNEXEC
23451+ return ktla_ktva(sp[0]);
23452+#else
23453 if (sp[0] >> 22)
23454 return sp[0];
23455 if (sp[1] >> 22)
23456 return sp[1];
23457 #endif
23458+
23459+#endif
23460 }
23461 return pc;
23462 }
23463diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
23464index 9d9d2f9..cad418a 100644
23465--- a/arch/x86/kernel/tls.c
23466+++ b/arch/x86/kernel/tls.c
23467@@ -84,6 +84,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
23468 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
23469 return -EINVAL;
23470
23471+#ifdef CONFIG_PAX_SEGMEXEC
23472+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
23473+ return -EINVAL;
23474+#endif
23475+
23476 set_tls_desc(p, idx, &info, 1);
23477
23478 return 0;
23479@@ -204,7 +209,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
23480
23481 if (kbuf)
23482 info = kbuf;
23483- else if (__copy_from_user(infobuf, ubuf, count))
23484+ else if (count > sizeof infobuf || __copy_from_user(infobuf, ubuf, count))
23485 return -EFAULT;
23486 else
23487 info = infobuf;
23488diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
23489index ecffca1..95c4d13 100644
23490--- a/arch/x86/kernel/traps.c
23491+++ b/arch/x86/kernel/traps.c
23492@@ -68,12 +68,6 @@
23493 #include <asm/setup.h>
23494
23495 asmlinkage int system_call(void);
23496-
23497-/*
23498- * The IDT has to be page-aligned to simplify the Pentium
23499- * F0 0F bug workaround.
23500- */
23501-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
23502 #endif
23503
23504 DECLARE_BITMAP(used_vectors, NR_VECTORS);
23505@@ -106,11 +100,11 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
23506 }
23507
23508 static int __kprobes
23509-do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
23510+do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str,
23511 struct pt_regs *regs, long error_code)
23512 {
23513 #ifdef CONFIG_X86_32
23514- if (regs->flags & X86_VM_MASK) {
23515+ if (v8086_mode(regs)) {
23516 /*
23517 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
23518 * On nmi (interrupt 2), do_trap should not be called.
23519@@ -123,12 +117,24 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
23520 return -1;
23521 }
23522 #endif
23523- if (!user_mode(regs)) {
23524+ if (!user_mode_novm(regs)) {
23525 if (!fixup_exception(regs)) {
23526 tsk->thread.error_code = error_code;
23527 tsk->thread.trap_nr = trapnr;
23528+
23529+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
23530+ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
23531+ str = "PAX: suspicious stack segment fault";
23532+#endif
23533+
23534 die(str, regs, error_code);
23535 }
23536+
23537+#ifdef CONFIG_PAX_REFCOUNT
23538+ if (trapnr == 4)
23539+ pax_report_refcount_overflow(regs);
23540+#endif
23541+
23542 return 0;
23543 }
23544
23545@@ -136,7 +142,7 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
23546 }
23547
23548 static void __kprobes
23549-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
23550+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
23551 long error_code, siginfo_t *info)
23552 {
23553 struct task_struct *tsk = current;
23554@@ -160,7 +166,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
23555 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
23556 printk_ratelimit()) {
23557 pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
23558- tsk->comm, tsk->pid, str,
23559+ tsk->comm, task_pid_nr(tsk), str,
23560 regs->ip, regs->sp, error_code);
23561 print_vma_addr(" in ", regs->ip);
23562 pr_cont("\n");
23563@@ -266,7 +272,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
23564 conditional_sti(regs);
23565
23566 #ifdef CONFIG_X86_32
23567- if (regs->flags & X86_VM_MASK) {
23568+ if (v8086_mode(regs)) {
23569 local_irq_enable();
23570 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
23571 goto exit;
23572@@ -274,18 +280,42 @@ do_general_protection(struct pt_regs *regs, long error_code)
23573 #endif
23574
23575 tsk = current;
23576- if (!user_mode(regs)) {
23577+ if (!user_mode_novm(regs)) {
23578 if (fixup_exception(regs))
23579 goto exit;
23580
23581 tsk->thread.error_code = error_code;
23582 tsk->thread.trap_nr = X86_TRAP_GP;
23583 if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
23584- X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP)
23585+ X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP) {
23586+
23587+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
23588+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
23589+ die("PAX: suspicious general protection fault", regs, error_code);
23590+ else
23591+#endif
23592+
23593 die("general protection fault", regs, error_code);
23594+ }
23595 goto exit;
23596 }
23597
23598+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
23599+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
23600+ struct mm_struct *mm = tsk->mm;
23601+ unsigned long limit;
23602+
23603+ down_write(&mm->mmap_sem);
23604+ limit = mm->context.user_cs_limit;
23605+ if (limit < TASK_SIZE) {
23606+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
23607+ up_write(&mm->mmap_sem);
23608+ return;
23609+ }
23610+ up_write(&mm->mmap_sem);
23611+ }
23612+#endif
23613+
23614 tsk->thread.error_code = error_code;
23615 tsk->thread.trap_nr = X86_TRAP_GP;
23616
23617@@ -440,7 +470,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
23618 /* It's safe to allow irq's after DR6 has been saved */
23619 preempt_conditional_sti(regs);
23620
23621- if (regs->flags & X86_VM_MASK) {
23622+ if (v8086_mode(regs)) {
23623 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
23624 X86_TRAP_DB);
23625 preempt_conditional_cli(regs);
23626@@ -455,7 +485,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
23627 * We already checked v86 mode above, so we can check for kernel mode
23628 * by just checking the CPL of CS.
23629 */
23630- if ((dr6 & DR_STEP) && !user_mode(regs)) {
23631+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
23632 tsk->thread.debugreg6 &= ~DR_STEP;
23633 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
23634 regs->flags &= ~X86_EFLAGS_TF;
23635@@ -487,7 +517,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
23636 return;
23637 conditional_sti(regs);
23638
23639- if (!user_mode_vm(regs))
23640+ if (!user_mode(regs))
23641 {
23642 if (!fixup_exception(regs)) {
23643 task->thread.error_code = error_code;
23644diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
23645index c71025b..b117501 100644
23646--- a/arch/x86/kernel/uprobes.c
23647+++ b/arch/x86/kernel/uprobes.c
23648@@ -629,7 +629,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
23649 int ret = NOTIFY_DONE;
23650
23651 /* We are only interested in userspace traps */
23652- if (regs && !user_mode_vm(regs))
23653+ if (regs && !user_mode(regs))
23654 return NOTIFY_DONE;
23655
23656 switch (val) {
23657diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
23658index b9242ba..50c5edd 100644
23659--- a/arch/x86/kernel/verify_cpu.S
23660+++ b/arch/x86/kernel/verify_cpu.S
23661@@ -20,6 +20,7 @@
23662 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
23663 * arch/x86/kernel/trampoline_64.S: secondary processor verification
23664 * arch/x86/kernel/head_32.S: processor startup
23665+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
23666 *
23667 * verify_cpu, returns the status of longmode and SSE in register %eax.
23668 * 0: Success 1: Failure
23669diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
23670index 1dfe69c..a3df6f6 100644
23671--- a/arch/x86/kernel/vm86_32.c
23672+++ b/arch/x86/kernel/vm86_32.c
23673@@ -43,6 +43,7 @@
23674 #include <linux/ptrace.h>
23675 #include <linux/audit.h>
23676 #include <linux/stddef.h>
23677+#include <linux/grsecurity.h>
23678
23679 #include <asm/uaccess.h>
23680 #include <asm/io.h>
23681@@ -150,7 +151,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
23682 do_exit(SIGSEGV);
23683 }
23684
23685- tss = &per_cpu(init_tss, get_cpu());
23686+ tss = init_tss + get_cpu();
23687 current->thread.sp0 = current->thread.saved_sp0;
23688 current->thread.sysenter_cs = __KERNEL_CS;
23689 load_sp0(tss, &current->thread);
23690@@ -212,6 +213,13 @@ int sys_vm86old(struct vm86_struct __user *v86, struct pt_regs *regs)
23691 struct task_struct *tsk;
23692 int tmp, ret = -EPERM;
23693
23694+#ifdef CONFIG_GRKERNSEC_VM86
23695+ if (!capable(CAP_SYS_RAWIO)) {
23696+ gr_handle_vm86();
23697+ goto out;
23698+ }
23699+#endif
23700+
23701 tsk = current;
23702 if (tsk->thread.saved_sp0)
23703 goto out;
23704@@ -242,6 +250,14 @@ int sys_vm86(unsigned long cmd, unsigned long arg, struct pt_regs *regs)
23705 int tmp, ret;
23706 struct vm86plus_struct __user *v86;
23707
23708+#ifdef CONFIG_GRKERNSEC_VM86
23709+ if (!capable(CAP_SYS_RAWIO)) {
23710+ gr_handle_vm86();
23711+ ret = -EPERM;
23712+ goto out;
23713+ }
23714+#endif
23715+
23716 tsk = current;
23717 switch (cmd) {
23718 case VM86_REQUEST_IRQ:
23719@@ -328,7 +344,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
23720 tsk->thread.saved_fs = info->regs32->fs;
23721 tsk->thread.saved_gs = get_user_gs(info->regs32);
23722
23723- tss = &per_cpu(init_tss, get_cpu());
23724+ tss = init_tss + get_cpu();
23725 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
23726 if (cpu_has_sep)
23727 tsk->thread.sysenter_cs = 0;
23728@@ -535,7 +551,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
23729 goto cannot_handle;
23730 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
23731 goto cannot_handle;
23732- intr_ptr = (unsigned long __user *) (i << 2);
23733+ intr_ptr = (__force unsigned long __user *) (i << 2);
23734 if (get_user(segoffs, intr_ptr))
23735 goto cannot_handle;
23736 if ((segoffs >> 16) == BIOSSEG)
23737diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
23738index 22a1530..8fbaaad 100644
23739--- a/arch/x86/kernel/vmlinux.lds.S
23740+++ b/arch/x86/kernel/vmlinux.lds.S
23741@@ -26,6 +26,13 @@
23742 #include <asm/page_types.h>
23743 #include <asm/cache.h>
23744 #include <asm/boot.h>
23745+#include <asm/segment.h>
23746+
23747+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
23748+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
23749+#else
23750+#define __KERNEL_TEXT_OFFSET 0
23751+#endif
23752
23753 #undef i386 /* in case the preprocessor is a 32bit one */
23754
23755@@ -69,30 +76,43 @@ jiffies_64 = jiffies;
23756
23757 PHDRS {
23758 text PT_LOAD FLAGS(5); /* R_E */
23759+#ifdef CONFIG_X86_32
23760+ module PT_LOAD FLAGS(5); /* R_E */
23761+#endif
23762+#ifdef CONFIG_XEN
23763+ rodata PT_LOAD FLAGS(5); /* R_E */
23764+#else
23765+ rodata PT_LOAD FLAGS(4); /* R__ */
23766+#endif
23767 data PT_LOAD FLAGS(6); /* RW_ */
23768-#ifdef CONFIG_X86_64
23769+ init.begin PT_LOAD FLAGS(6); /* RW_ */
23770 #ifdef CONFIG_SMP
23771 percpu PT_LOAD FLAGS(6); /* RW_ */
23772 #endif
23773+ text.init PT_LOAD FLAGS(5); /* R_E */
23774+ text.exit PT_LOAD FLAGS(5); /* R_E */
23775 init PT_LOAD FLAGS(7); /* RWE */
23776-#endif
23777 note PT_NOTE FLAGS(0); /* ___ */
23778 }
23779
23780 SECTIONS
23781 {
23782 #ifdef CONFIG_X86_32
23783- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
23784- phys_startup_32 = startup_32 - LOAD_OFFSET;
23785+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
23786 #else
23787- . = __START_KERNEL;
23788- phys_startup_64 = startup_64 - LOAD_OFFSET;
23789+ . = __START_KERNEL;
23790 #endif
23791
23792 /* Text and read-only data */
23793- .text : AT(ADDR(.text) - LOAD_OFFSET) {
23794- _text = .;
23795+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
23796 /* bootstrapping code */
23797+#ifdef CONFIG_X86_32
23798+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
23799+#else
23800+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
23801+#endif
23802+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
23803+ _text = .;
23804 HEAD_TEXT
23805 #ifdef CONFIG_X86_32
23806 . = ALIGN(PAGE_SIZE);
23807@@ -108,13 +128,48 @@ SECTIONS
23808 IRQENTRY_TEXT
23809 *(.fixup)
23810 *(.gnu.warning)
23811- /* End of text section */
23812- _etext = .;
23813 } :text = 0x9090
23814
23815- NOTES :text :note
23816+ . += __KERNEL_TEXT_OFFSET;
23817
23818- EXCEPTION_TABLE(16) :text = 0x9090
23819+#ifdef CONFIG_X86_32
23820+ . = ALIGN(PAGE_SIZE);
23821+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
23822+
23823+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
23824+ MODULES_EXEC_VADDR = .;
23825+ BYTE(0)
23826+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
23827+ . = ALIGN(HPAGE_SIZE) - 1;
23828+ MODULES_EXEC_END = .;
23829+#endif
23830+
23831+ } :module
23832+#endif
23833+
23834+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
23835+ /* End of text section */
23836+ BYTE(0)
23837+ _etext = . - __KERNEL_TEXT_OFFSET;
23838+ }
23839+
23840+#ifdef CONFIG_X86_32
23841+ . = ALIGN(PAGE_SIZE);
23842+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
23843+ *(.idt)
23844+ . = ALIGN(PAGE_SIZE);
23845+ *(.empty_zero_page)
23846+ *(.initial_pg_fixmap)
23847+ *(.initial_pg_pmd)
23848+ *(.initial_page_table)
23849+ *(.swapper_pg_dir)
23850+ } :rodata
23851+#endif
23852+
23853+ . = ALIGN(PAGE_SIZE);
23854+ NOTES :rodata :note
23855+
23856+ EXCEPTION_TABLE(16) :rodata
23857
23858 #if defined(CONFIG_DEBUG_RODATA)
23859 /* .text should occupy whole number of pages */
23860@@ -126,16 +181,20 @@ SECTIONS
23861
23862 /* Data */
23863 .data : AT(ADDR(.data) - LOAD_OFFSET) {
23864+
23865+#ifdef CONFIG_PAX_KERNEXEC
23866+ . = ALIGN(HPAGE_SIZE);
23867+#else
23868+ . = ALIGN(PAGE_SIZE);
23869+#endif
23870+
23871 /* Start of data section */
23872 _sdata = .;
23873
23874 /* init_task */
23875 INIT_TASK_DATA(THREAD_SIZE)
23876
23877-#ifdef CONFIG_X86_32
23878- /* 32 bit has nosave before _edata */
23879 NOSAVE_DATA
23880-#endif
23881
23882 PAGE_ALIGNED_DATA(PAGE_SIZE)
23883
23884@@ -176,12 +235,19 @@ SECTIONS
23885 #endif /* CONFIG_X86_64 */
23886
23887 /* Init code and data - will be freed after init */
23888- . = ALIGN(PAGE_SIZE);
23889 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
23890+ BYTE(0)
23891+
23892+#ifdef CONFIG_PAX_KERNEXEC
23893+ . = ALIGN(HPAGE_SIZE);
23894+#else
23895+ . = ALIGN(PAGE_SIZE);
23896+#endif
23897+
23898 __init_begin = .; /* paired with __init_end */
23899- }
23900+ } :init.begin
23901
23902-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
23903+#ifdef CONFIG_SMP
23904 /*
23905 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
23906 * output PHDR, so the next output section - .init.text - should
23907@@ -190,12 +256,27 @@ SECTIONS
23908 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
23909 #endif
23910
23911- INIT_TEXT_SECTION(PAGE_SIZE)
23912-#ifdef CONFIG_X86_64
23913- :init
23914-#endif
23915+ . = ALIGN(PAGE_SIZE);
23916+ init_begin = .;
23917+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
23918+ VMLINUX_SYMBOL(_sinittext) = .;
23919+ INIT_TEXT
23920+ VMLINUX_SYMBOL(_einittext) = .;
23921+ . = ALIGN(PAGE_SIZE);
23922+ } :text.init
23923
23924- INIT_DATA_SECTION(16)
23925+ /*
23926+ * .exit.text is discard at runtime, not link time, to deal with
23927+ * references from .altinstructions and .eh_frame
23928+ */
23929+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
23930+ EXIT_TEXT
23931+ . = ALIGN(16);
23932+ } :text.exit
23933+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
23934+
23935+ . = ALIGN(PAGE_SIZE);
23936+ INIT_DATA_SECTION(16) :init
23937
23938 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
23939 __x86_cpu_dev_start = .;
23940@@ -257,19 +338,12 @@ SECTIONS
23941 }
23942
23943 . = ALIGN(8);
23944- /*
23945- * .exit.text is discard at runtime, not link time, to deal with
23946- * references from .altinstructions and .eh_frame
23947- */
23948- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
23949- EXIT_TEXT
23950- }
23951
23952 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
23953 EXIT_DATA
23954 }
23955
23956-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
23957+#ifndef CONFIG_SMP
23958 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
23959 #endif
23960
23961@@ -288,16 +362,10 @@ SECTIONS
23962 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
23963 __smp_locks = .;
23964 *(.smp_locks)
23965- . = ALIGN(PAGE_SIZE);
23966 __smp_locks_end = .;
23967+ . = ALIGN(PAGE_SIZE);
23968 }
23969
23970-#ifdef CONFIG_X86_64
23971- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
23972- NOSAVE_DATA
23973- }
23974-#endif
23975-
23976 /* BSS */
23977 . = ALIGN(PAGE_SIZE);
23978 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
23979@@ -313,6 +381,7 @@ SECTIONS
23980 __brk_base = .;
23981 . += 64 * 1024; /* 64k alignment slop space */
23982 *(.brk_reservation) /* areas brk users have reserved */
23983+ . = ALIGN(HPAGE_SIZE);
23984 __brk_limit = .;
23985 }
23986
23987@@ -339,13 +408,12 @@ SECTIONS
23988 * for the boot processor.
23989 */
23990 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
23991-INIT_PER_CPU(gdt_page);
23992 INIT_PER_CPU(irq_stack_union);
23993
23994 /*
23995 * Build-time check on the image size:
23996 */
23997-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
23998+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
23999 "kernel image bigger than KERNEL_IMAGE_SIZE");
24000
24001 #ifdef CONFIG_SMP
24002diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
24003index 9a907a6..f83f921 100644
24004--- a/arch/x86/kernel/vsyscall_64.c
24005+++ b/arch/x86/kernel/vsyscall_64.c
24006@@ -56,15 +56,13 @@
24007 DEFINE_VVAR(int, vgetcpu_mode);
24008 DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data);
24009
24010-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
24011+static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
24012
24013 static int __init vsyscall_setup(char *str)
24014 {
24015 if (str) {
24016 if (!strcmp("emulate", str))
24017 vsyscall_mode = EMULATE;
24018- else if (!strcmp("native", str))
24019- vsyscall_mode = NATIVE;
24020 else if (!strcmp("none", str))
24021 vsyscall_mode = NONE;
24022 else
24023@@ -323,8 +321,7 @@ do_ret:
24024 return true;
24025
24026 sigsegv:
24027- force_sig(SIGSEGV, current);
24028- return true;
24029+ do_group_exit(SIGKILL);
24030 }
24031
24032 /*
24033@@ -377,10 +374,7 @@ void __init map_vsyscall(void)
24034 extern char __vvar_page;
24035 unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
24036
24037- __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
24038- vsyscall_mode == NATIVE
24039- ? PAGE_KERNEL_VSYSCALL
24040- : PAGE_KERNEL_VVAR);
24041+ __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
24042 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
24043 (unsigned long)VSYSCALL_START);
24044
24045diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
24046index 1330dd1..d220b99 100644
24047--- a/arch/x86/kernel/x8664_ksyms_64.c
24048+++ b/arch/x86/kernel/x8664_ksyms_64.c
24049@@ -34,8 +34,6 @@ EXPORT_SYMBOL(copy_user_generic_string);
24050 EXPORT_SYMBOL(copy_user_generic_unrolled);
24051 EXPORT_SYMBOL(copy_user_enhanced_fast_string);
24052 EXPORT_SYMBOL(__copy_user_nocache);
24053-EXPORT_SYMBOL(_copy_from_user);
24054-EXPORT_SYMBOL(_copy_to_user);
24055
24056 EXPORT_SYMBOL(copy_page);
24057 EXPORT_SYMBOL(clear_page);
24058diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
24059index 7a3d075..6cb373d 100644
24060--- a/arch/x86/kernel/x86_init.c
24061+++ b/arch/x86/kernel/x86_init.c
24062@@ -88,7 +88,7 @@ struct x86_init_ops x86_init __initdata = {
24063 },
24064 };
24065
24066-struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = {
24067+struct x86_cpuinit_ops x86_cpuinit __cpuinitconst = {
24068 .early_percpu_clock_init = x86_init_noop,
24069 .setup_percpu_clockev = setup_secondary_APIC_clock,
24070 };
24071@@ -96,7 +96,7 @@ struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = {
24072 static void default_nmi_init(void) { };
24073 static int default_i8042_detect(void) { return 1; };
24074
24075-struct x86_platform_ops x86_platform = {
24076+struct x86_platform_ops x86_platform __read_only = {
24077 .calibrate_tsc = native_calibrate_tsc,
24078 .get_wallclock = mach_get_cmos_time,
24079 .set_wallclock = mach_set_rtc_mmss,
24080@@ -110,14 +110,14 @@ struct x86_platform_ops x86_platform = {
24081 };
24082
24083 EXPORT_SYMBOL_GPL(x86_platform);
24084-struct x86_msi_ops x86_msi = {
24085+struct x86_msi_ops x86_msi __read_only = {
24086 .setup_msi_irqs = native_setup_msi_irqs,
24087 .teardown_msi_irq = native_teardown_msi_irq,
24088 .teardown_msi_irqs = default_teardown_msi_irqs,
24089 .restore_msi_irqs = default_restore_msi_irqs,
24090 };
24091
24092-struct x86_io_apic_ops x86_io_apic_ops = {
24093+struct x86_io_apic_ops x86_io_apic_ops __read_only = {
24094 .init = native_io_apic_init_mappings,
24095 .read = native_io_apic_read,
24096 .write = native_io_apic_write,
24097diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
24098index ada87a3..afea76d 100644
24099--- a/arch/x86/kernel/xsave.c
24100+++ b/arch/x86/kernel/xsave.c
24101@@ -199,6 +199,7 @@ static inline int save_user_xstate(struct xsave_struct __user *buf)
24102 {
24103 int err;
24104
24105+ buf = (struct xsave_struct __user *)____m(buf);
24106 if (use_xsave())
24107 err = xsave_user(buf);
24108 else if (use_fxsr())
24109@@ -311,6 +312,7 @@ sanitize_restored_xstate(struct task_struct *tsk,
24110 */
24111 static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only)
24112 {
24113+ buf = (void __user *)____m(buf);
24114 if (use_xsave()) {
24115 if ((unsigned long)buf % 64 || fx_only) {
24116 u64 init_bv = pcntxt_mask & ~XSTATE_FPSSE;
24117diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
24118index a20ecb5..d0e2194 100644
24119--- a/arch/x86/kvm/cpuid.c
24120+++ b/arch/x86/kvm/cpuid.c
24121@@ -124,15 +124,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
24122 struct kvm_cpuid2 *cpuid,
24123 struct kvm_cpuid_entry2 __user *entries)
24124 {
24125- int r;
24126+ int r, i;
24127
24128 r = -E2BIG;
24129 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
24130 goto out;
24131 r = -EFAULT;
24132- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
24133- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
24134+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
24135 goto out;
24136+ for (i = 0; i < cpuid->nent; ++i) {
24137+ struct kvm_cpuid_entry2 cpuid_entry;
24138+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
24139+ goto out;
24140+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
24141+ }
24142 vcpu->arch.cpuid_nent = cpuid->nent;
24143 kvm_apic_set_version(vcpu);
24144 kvm_x86_ops->cpuid_update(vcpu);
24145@@ -147,15 +152,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
24146 struct kvm_cpuid2 *cpuid,
24147 struct kvm_cpuid_entry2 __user *entries)
24148 {
24149- int r;
24150+ int r, i;
24151
24152 r = -E2BIG;
24153 if (cpuid->nent < vcpu->arch.cpuid_nent)
24154 goto out;
24155 r = -EFAULT;
24156- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
24157- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
24158+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
24159 goto out;
24160+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
24161+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
24162+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
24163+ goto out;
24164+ }
24165 return 0;
24166
24167 out:
24168diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
24169index a27e763..54bfe43 100644
24170--- a/arch/x86/kvm/emulate.c
24171+++ b/arch/x86/kvm/emulate.c
24172@@ -292,6 +292,7 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
24173
24174 #define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype) \
24175 do { \
24176+ unsigned long _tmp; \
24177 __asm__ __volatile__ ( \
24178 _PRE_EFLAGS("0", "4", "2") \
24179 _op _suffix " %"_x"3,%1; " \
24180@@ -306,8 +307,6 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
24181 /* Raw emulation: instruction has two explicit operands. */
24182 #define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy) \
24183 do { \
24184- unsigned long _tmp; \
24185- \
24186 switch ((ctxt)->dst.bytes) { \
24187 case 2: \
24188 ____emulate_2op(ctxt,_op,_wx,_wy,"w",u16); \
24189@@ -323,7 +322,6 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
24190
24191 #define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
24192 do { \
24193- unsigned long _tmp; \
24194 switch ((ctxt)->dst.bytes) { \
24195 case 1: \
24196 ____emulate_2op(ctxt,_op,_bx,_by,"b",u8); \
24197diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
24198index 9392f52..0e56d77 100644
24199--- a/arch/x86/kvm/lapic.c
24200+++ b/arch/x86/kvm/lapic.c
24201@@ -55,7 +55,7 @@
24202 #define APIC_BUS_CYCLE_NS 1
24203
24204 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
24205-#define apic_debug(fmt, arg...)
24206+#define apic_debug(fmt, arg...) do {} while (0)
24207
24208 #define APIC_LVT_NUM 6
24209 /* 14 is the version for Xeon and Pentium 8.4.8*/
24210diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
24211index 891eb6d..e027900 100644
24212--- a/arch/x86/kvm/paging_tmpl.h
24213+++ b/arch/x86/kvm/paging_tmpl.h
24214@@ -208,7 +208,7 @@ retry_walk:
24215 if (unlikely(kvm_is_error_hva(host_addr)))
24216 goto error;
24217
24218- ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
24219+ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
24220 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
24221 goto error;
24222 walker->ptep_user[walker->level - 1] = ptep_user;
24223diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
24224index d29d3cd..ec9d522 100644
24225--- a/arch/x86/kvm/svm.c
24226+++ b/arch/x86/kvm/svm.c
24227@@ -3507,7 +3507,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
24228 int cpu = raw_smp_processor_id();
24229
24230 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
24231+
24232+ pax_open_kernel();
24233 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
24234+ pax_close_kernel();
24235+
24236 load_TR_desc();
24237 }
24238
24239@@ -3881,6 +3885,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
24240 #endif
24241 #endif
24242
24243+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
24244+ __set_fs(current_thread_info()->addr_limit);
24245+#endif
24246+
24247 reload_tss(vcpu);
24248
24249 local_irq_disable();
24250diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
24251index 9120ae1..238abc0 100644
24252--- a/arch/x86/kvm/vmx.c
24253+++ b/arch/x86/kvm/vmx.c
24254@@ -1370,7 +1370,11 @@ static void reload_tss(void)
24255 struct desc_struct *descs;
24256
24257 descs = (void *)gdt->address;
24258+
24259+ pax_open_kernel();
24260 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
24261+ pax_close_kernel();
24262+
24263 load_TR_desc();
24264 }
24265
24266@@ -1594,6 +1598,10 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
24267 vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
24268 vmcs_writel(HOST_GDTR_BASE, gdt->address); /* 22.2.4 */
24269
24270+#ifdef CONFIG_PAX_PER_CPU_PGD
24271+ vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
24272+#endif
24273+
24274 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
24275 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
24276 vmx->loaded_vmcs->cpu = cpu;
24277@@ -2738,8 +2746,11 @@ static __init int hardware_setup(void)
24278 if (!cpu_has_vmx_flexpriority())
24279 flexpriority_enabled = 0;
24280
24281- if (!cpu_has_vmx_tpr_shadow())
24282- kvm_x86_ops->update_cr8_intercept = NULL;
24283+ if (!cpu_has_vmx_tpr_shadow()) {
24284+ pax_open_kernel();
24285+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
24286+ pax_close_kernel();
24287+ }
24288
24289 if (enable_ept && !cpu_has_vmx_ept_2m_page())
24290 kvm_disable_largepages();
24291@@ -3782,7 +3793,10 @@ static void vmx_set_constant_host_state(void)
24292
24293 vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */
24294 vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */
24295+
24296+#ifndef CONFIG_PAX_PER_CPU_PGD
24297 vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
24298+#endif
24299
24300 vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
24301 #ifdef CONFIG_X86_64
24302@@ -3803,7 +3817,7 @@ static void vmx_set_constant_host_state(void)
24303 native_store_idt(&dt);
24304 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
24305
24306- vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */
24307+ vmcs_writel(HOST_RIP, ktla_ktva(vmx_return)); /* 22.2.5 */
24308
24309 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
24310 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
24311@@ -6355,6 +6369,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
24312 "jmp 2f \n\t"
24313 "1: " __ex(ASM_VMX_VMRESUME) "\n\t"
24314 "2: "
24315+
24316+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
24317+ "ljmp %[cs],$3f\n\t"
24318+ "3: "
24319+#endif
24320+
24321 /* Save guest registers, load host registers, keep flags */
24322 "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
24323 "pop %0 \n\t"
24324@@ -6407,6 +6427,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
24325 #endif
24326 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
24327 [wordsize]"i"(sizeof(ulong))
24328+
24329+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
24330+ ,[cs]"i"(__KERNEL_CS)
24331+#endif
24332+
24333 : "cc", "memory"
24334 #ifdef CONFIG_X86_64
24335 , "rax", "rbx", "rdi", "rsi"
24336@@ -6420,7 +6445,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
24337 if (debugctlmsr)
24338 update_debugctlmsr(debugctlmsr);
24339
24340-#ifndef CONFIG_X86_64
24341+#ifdef CONFIG_X86_32
24342 /*
24343 * The sysexit path does not restore ds/es, so we must set them to
24344 * a reasonable value ourselves.
24345@@ -6429,8 +6454,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
24346 * may be executed in interrupt context, which saves and restore segments
24347 * around it, nullifying its effect.
24348 */
24349- loadsegment(ds, __USER_DS);
24350- loadsegment(es, __USER_DS);
24351+ loadsegment(ds, __KERNEL_DS);
24352+ loadsegment(es, __KERNEL_DS);
24353+ loadsegment(ss, __KERNEL_DS);
24354+
24355+#ifdef CONFIG_PAX_KERNEXEC
24356+ loadsegment(fs, __KERNEL_PERCPU);
24357+#endif
24358+
24359+#ifdef CONFIG_PAX_MEMORY_UDEREF
24360+ __set_fs(current_thread_info()->addr_limit);
24361+#endif
24362+
24363 #endif
24364
24365 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
24366diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
24367index c243b81..b692af3 100644
24368--- a/arch/x86/kvm/x86.c
24369+++ b/arch/x86/kvm/x86.c
24370@@ -1408,10 +1408,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
24371 unsigned long flags, this_tsc_khz;
24372 struct kvm_vcpu_arch *vcpu = &v->arch;
24373 struct kvm_arch *ka = &v->kvm->arch;
24374- void *shared_kaddr;
24375 s64 kernel_ns, max_kernel_ns;
24376 u64 tsc_timestamp, host_tsc;
24377- struct pvclock_vcpu_time_info *guest_hv_clock;
24378+ struct pvclock_vcpu_time_info guest_hv_clock;
24379 u8 pvclock_flags;
24380 bool use_master_clock;
24381
24382@@ -1465,7 +1464,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
24383
24384 local_irq_restore(flags);
24385
24386- if (!vcpu->time_page)
24387+ if (!vcpu->pv_time_enabled)
24388 return 0;
24389
24390 /*
24391@@ -1527,12 +1526,12 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
24392 */
24393 vcpu->hv_clock.version += 2;
24394
24395- shared_kaddr = kmap_atomic(vcpu->time_page);
24396-
24397- guest_hv_clock = shared_kaddr + vcpu->time_offset;
24398+ if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time,
24399+ &guest_hv_clock, sizeof(guest_hv_clock))))
24400+ return 0;
24401
24402 /* retain PVCLOCK_GUEST_STOPPED if set in guest copy */
24403- pvclock_flags = (guest_hv_clock->flags & PVCLOCK_GUEST_STOPPED);
24404+ pvclock_flags = (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED);
24405
24406 if (vcpu->pvclock_set_guest_stopped_request) {
24407 pvclock_flags |= PVCLOCK_GUEST_STOPPED;
24408@@ -1545,12 +1544,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
24409
24410 vcpu->hv_clock.flags = pvclock_flags;
24411
24412- memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
24413- sizeof(vcpu->hv_clock));
24414-
24415- kunmap_atomic(shared_kaddr);
24416-
24417- mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
24418+ kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
24419+ &vcpu->hv_clock,
24420+ sizeof(vcpu->hv_clock));
24421 return 0;
24422 }
24423
24424@@ -1692,8 +1688,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
24425 {
24426 struct kvm *kvm = vcpu->kvm;
24427 int lm = is_long_mode(vcpu);
24428- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
24429- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
24430+ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
24431+ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
24432 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
24433 : kvm->arch.xen_hvm_config.blob_size_32;
24434 u32 page_num = data & ~PAGE_MASK;
24435@@ -1839,10 +1835,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
24436
24437 static void kvmclock_reset(struct kvm_vcpu *vcpu)
24438 {
24439- if (vcpu->arch.time_page) {
24440- kvm_release_page_dirty(vcpu->arch.time_page);
24441- vcpu->arch.time_page = NULL;
24442- }
24443+ vcpu->arch.pv_time_enabled = false;
24444 }
24445
24446 static void accumulate_steal_time(struct kvm_vcpu *vcpu)
24447@@ -1948,6 +1941,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
24448 break;
24449 case MSR_KVM_SYSTEM_TIME_NEW:
24450 case MSR_KVM_SYSTEM_TIME: {
24451+ u64 gpa_offset;
24452 kvmclock_reset(vcpu);
24453
24454 vcpu->arch.time = data;
24455@@ -1957,14 +1951,17 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
24456 if (!(data & 1))
24457 break;
24458
24459- /* ...but clean it before doing the actual write */
24460- vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
24461+ gpa_offset = data & ~(PAGE_MASK | 1);
24462
24463- vcpu->arch.time_page =
24464- gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
24465+ /* Check that the address is 32-byte aligned. */
24466+ if (gpa_offset & (sizeof(struct pvclock_vcpu_time_info) - 1))
24467+ break;
24468
24469- if (is_error_page(vcpu->arch.time_page))
24470- vcpu->arch.time_page = NULL;
24471+ if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
24472+ &vcpu->arch.pv_time, data & ~1ULL))
24473+ vcpu->arch.pv_time_enabled = false;
24474+ else
24475+ vcpu->arch.pv_time_enabled = true;
24476
24477 break;
24478 }
24479@@ -2571,6 +2568,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
24480 if (n < msr_list.nmsrs)
24481 goto out;
24482 r = -EFAULT;
24483+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
24484+ goto out;
24485 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
24486 num_msrs_to_save * sizeof(u32)))
24487 goto out;
24488@@ -2700,7 +2699,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
24489 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
24490 struct kvm_interrupt *irq)
24491 {
24492- if (irq->irq < 0 || irq->irq >= KVM_NR_INTERRUPTS)
24493+ if (irq->irq >= KVM_NR_INTERRUPTS)
24494 return -EINVAL;
24495 if (irqchip_in_kernel(vcpu->kvm))
24496 return -ENXIO;
24497@@ -2967,7 +2966,7 @@ static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
24498 */
24499 static int kvm_set_guest_paused(struct kvm_vcpu *vcpu)
24500 {
24501- if (!vcpu->arch.time_page)
24502+ if (!vcpu->arch.pv_time_enabled)
24503 return -EINVAL;
24504 vcpu->arch.pvclock_set_guest_stopped_request = true;
24505 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
24506@@ -5213,7 +5212,7 @@ static struct notifier_block pvclock_gtod_notifier = {
24507 };
24508 #endif
24509
24510-int kvm_arch_init(void *opaque)
24511+int kvm_arch_init(const void *opaque)
24512 {
24513 int r;
24514 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
24515@@ -6661,6 +6660,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
24516 goto fail_free_wbinvd_dirty_mask;
24517
24518 vcpu->arch.ia32_tsc_adjust_msr = 0x0;
24519+ vcpu->arch.pv_time_enabled = false;
24520 kvm_async_pf_hash_reset(vcpu);
24521 kvm_pmu_init(vcpu);
24522
24523diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
24524index df4176c..23ce092 100644
24525--- a/arch/x86/lguest/boot.c
24526+++ b/arch/x86/lguest/boot.c
24527@@ -1200,9 +1200,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
24528 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
24529 * Launcher to reboot us.
24530 */
24531-static void lguest_restart(char *reason)
24532+static __noreturn void lguest_restart(char *reason)
24533 {
24534 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
24535+ BUG();
24536 }
24537
24538 /*G:050
24539diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
24540index 00933d5..3a64af9 100644
24541--- a/arch/x86/lib/atomic64_386_32.S
24542+++ b/arch/x86/lib/atomic64_386_32.S
24543@@ -48,6 +48,10 @@ BEGIN(read)
24544 movl (v), %eax
24545 movl 4(v), %edx
24546 RET_ENDP
24547+BEGIN(read_unchecked)
24548+ movl (v), %eax
24549+ movl 4(v), %edx
24550+RET_ENDP
24551 #undef v
24552
24553 #define v %esi
24554@@ -55,6 +59,10 @@ BEGIN(set)
24555 movl %ebx, (v)
24556 movl %ecx, 4(v)
24557 RET_ENDP
24558+BEGIN(set_unchecked)
24559+ movl %ebx, (v)
24560+ movl %ecx, 4(v)
24561+RET_ENDP
24562 #undef v
24563
24564 #define v %esi
24565@@ -70,6 +78,20 @@ RET_ENDP
24566 BEGIN(add)
24567 addl %eax, (v)
24568 adcl %edx, 4(v)
24569+
24570+#ifdef CONFIG_PAX_REFCOUNT
24571+ jno 0f
24572+ subl %eax, (v)
24573+ sbbl %edx, 4(v)
24574+ int $4
24575+0:
24576+ _ASM_EXTABLE(0b, 0b)
24577+#endif
24578+
24579+RET_ENDP
24580+BEGIN(add_unchecked)
24581+ addl %eax, (v)
24582+ adcl %edx, 4(v)
24583 RET_ENDP
24584 #undef v
24585
24586@@ -77,6 +99,24 @@ RET_ENDP
24587 BEGIN(add_return)
24588 addl (v), %eax
24589 adcl 4(v), %edx
24590+
24591+#ifdef CONFIG_PAX_REFCOUNT
24592+ into
24593+1234:
24594+ _ASM_EXTABLE(1234b, 2f)
24595+#endif
24596+
24597+ movl %eax, (v)
24598+ movl %edx, 4(v)
24599+
24600+#ifdef CONFIG_PAX_REFCOUNT
24601+2:
24602+#endif
24603+
24604+RET_ENDP
24605+BEGIN(add_return_unchecked)
24606+ addl (v), %eax
24607+ adcl 4(v), %edx
24608 movl %eax, (v)
24609 movl %edx, 4(v)
24610 RET_ENDP
24611@@ -86,6 +126,20 @@ RET_ENDP
24612 BEGIN(sub)
24613 subl %eax, (v)
24614 sbbl %edx, 4(v)
24615+
24616+#ifdef CONFIG_PAX_REFCOUNT
24617+ jno 0f
24618+ addl %eax, (v)
24619+ adcl %edx, 4(v)
24620+ int $4
24621+0:
24622+ _ASM_EXTABLE(0b, 0b)
24623+#endif
24624+
24625+RET_ENDP
24626+BEGIN(sub_unchecked)
24627+ subl %eax, (v)
24628+ sbbl %edx, 4(v)
24629 RET_ENDP
24630 #undef v
24631
24632@@ -96,6 +150,27 @@ BEGIN(sub_return)
24633 sbbl $0, %edx
24634 addl (v), %eax
24635 adcl 4(v), %edx
24636+
24637+#ifdef CONFIG_PAX_REFCOUNT
24638+ into
24639+1234:
24640+ _ASM_EXTABLE(1234b, 2f)
24641+#endif
24642+
24643+ movl %eax, (v)
24644+ movl %edx, 4(v)
24645+
24646+#ifdef CONFIG_PAX_REFCOUNT
24647+2:
24648+#endif
24649+
24650+RET_ENDP
24651+BEGIN(sub_return_unchecked)
24652+ negl %edx
24653+ negl %eax
24654+ sbbl $0, %edx
24655+ addl (v), %eax
24656+ adcl 4(v), %edx
24657 movl %eax, (v)
24658 movl %edx, 4(v)
24659 RET_ENDP
24660@@ -105,6 +180,20 @@ RET_ENDP
24661 BEGIN(inc)
24662 addl $1, (v)
24663 adcl $0, 4(v)
24664+
24665+#ifdef CONFIG_PAX_REFCOUNT
24666+ jno 0f
24667+ subl $1, (v)
24668+ sbbl $0, 4(v)
24669+ int $4
24670+0:
24671+ _ASM_EXTABLE(0b, 0b)
24672+#endif
24673+
24674+RET_ENDP
24675+BEGIN(inc_unchecked)
24676+ addl $1, (v)
24677+ adcl $0, 4(v)
24678 RET_ENDP
24679 #undef v
24680
24681@@ -114,6 +203,26 @@ BEGIN(inc_return)
24682 movl 4(v), %edx
24683 addl $1, %eax
24684 adcl $0, %edx
24685+
24686+#ifdef CONFIG_PAX_REFCOUNT
24687+ into
24688+1234:
24689+ _ASM_EXTABLE(1234b, 2f)
24690+#endif
24691+
24692+ movl %eax, (v)
24693+ movl %edx, 4(v)
24694+
24695+#ifdef CONFIG_PAX_REFCOUNT
24696+2:
24697+#endif
24698+
24699+RET_ENDP
24700+BEGIN(inc_return_unchecked)
24701+ movl (v), %eax
24702+ movl 4(v), %edx
24703+ addl $1, %eax
24704+ adcl $0, %edx
24705 movl %eax, (v)
24706 movl %edx, 4(v)
24707 RET_ENDP
24708@@ -123,6 +232,20 @@ RET_ENDP
24709 BEGIN(dec)
24710 subl $1, (v)
24711 sbbl $0, 4(v)
24712+
24713+#ifdef CONFIG_PAX_REFCOUNT
24714+ jno 0f
24715+ addl $1, (v)
24716+ adcl $0, 4(v)
24717+ int $4
24718+0:
24719+ _ASM_EXTABLE(0b, 0b)
24720+#endif
24721+
24722+RET_ENDP
24723+BEGIN(dec_unchecked)
24724+ subl $1, (v)
24725+ sbbl $0, 4(v)
24726 RET_ENDP
24727 #undef v
24728
24729@@ -132,6 +255,26 @@ BEGIN(dec_return)
24730 movl 4(v), %edx
24731 subl $1, %eax
24732 sbbl $0, %edx
24733+
24734+#ifdef CONFIG_PAX_REFCOUNT
24735+ into
24736+1234:
24737+ _ASM_EXTABLE(1234b, 2f)
24738+#endif
24739+
24740+ movl %eax, (v)
24741+ movl %edx, 4(v)
24742+
24743+#ifdef CONFIG_PAX_REFCOUNT
24744+2:
24745+#endif
24746+
24747+RET_ENDP
24748+BEGIN(dec_return_unchecked)
24749+ movl (v), %eax
24750+ movl 4(v), %edx
24751+ subl $1, %eax
24752+ sbbl $0, %edx
24753 movl %eax, (v)
24754 movl %edx, 4(v)
24755 RET_ENDP
24756@@ -143,6 +286,13 @@ BEGIN(add_unless)
24757 adcl %edx, %edi
24758 addl (v), %eax
24759 adcl 4(v), %edx
24760+
24761+#ifdef CONFIG_PAX_REFCOUNT
24762+ into
24763+1234:
24764+ _ASM_EXTABLE(1234b, 2f)
24765+#endif
24766+
24767 cmpl %eax, %ecx
24768 je 3f
24769 1:
24770@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
24771 1:
24772 addl $1, %eax
24773 adcl $0, %edx
24774+
24775+#ifdef CONFIG_PAX_REFCOUNT
24776+ into
24777+1234:
24778+ _ASM_EXTABLE(1234b, 2f)
24779+#endif
24780+
24781 movl %eax, (v)
24782 movl %edx, 4(v)
24783 movl $1, %eax
24784@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
24785 movl 4(v), %edx
24786 subl $1, %eax
24787 sbbl $0, %edx
24788+
24789+#ifdef CONFIG_PAX_REFCOUNT
24790+ into
24791+1234:
24792+ _ASM_EXTABLE(1234b, 1f)
24793+#endif
24794+
24795 js 1f
24796 movl %eax, (v)
24797 movl %edx, 4(v)
24798diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
24799index f5cc9eb..51fa319 100644
24800--- a/arch/x86/lib/atomic64_cx8_32.S
24801+++ b/arch/x86/lib/atomic64_cx8_32.S
24802@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
24803 CFI_STARTPROC
24804
24805 read64 %ecx
24806+ pax_force_retaddr
24807 ret
24808 CFI_ENDPROC
24809 ENDPROC(atomic64_read_cx8)
24810
24811+ENTRY(atomic64_read_unchecked_cx8)
24812+ CFI_STARTPROC
24813+
24814+ read64 %ecx
24815+ pax_force_retaddr
24816+ ret
24817+ CFI_ENDPROC
24818+ENDPROC(atomic64_read_unchecked_cx8)
24819+
24820 ENTRY(atomic64_set_cx8)
24821 CFI_STARTPROC
24822
24823@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
24824 cmpxchg8b (%esi)
24825 jne 1b
24826
24827+ pax_force_retaddr
24828 ret
24829 CFI_ENDPROC
24830 ENDPROC(atomic64_set_cx8)
24831
24832+ENTRY(atomic64_set_unchecked_cx8)
24833+ CFI_STARTPROC
24834+
24835+1:
24836+/* we don't need LOCK_PREFIX since aligned 64-bit writes
24837+ * are atomic on 586 and newer */
24838+ cmpxchg8b (%esi)
24839+ jne 1b
24840+
24841+ pax_force_retaddr
24842+ ret
24843+ CFI_ENDPROC
24844+ENDPROC(atomic64_set_unchecked_cx8)
24845+
24846 ENTRY(atomic64_xchg_cx8)
24847 CFI_STARTPROC
24848
24849@@ -60,12 +85,13 @@ ENTRY(atomic64_xchg_cx8)
24850 cmpxchg8b (%esi)
24851 jne 1b
24852
24853+ pax_force_retaddr
24854 ret
24855 CFI_ENDPROC
24856 ENDPROC(atomic64_xchg_cx8)
24857
24858-.macro addsub_return func ins insc
24859-ENTRY(atomic64_\func\()_return_cx8)
24860+.macro addsub_return func ins insc unchecked=""
24861+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
24862 CFI_STARTPROC
24863 SAVE ebp
24864 SAVE ebx
24865@@ -82,27 +108,44 @@ ENTRY(atomic64_\func\()_return_cx8)
24866 movl %edx, %ecx
24867 \ins\()l %esi, %ebx
24868 \insc\()l %edi, %ecx
24869+
24870+.ifb \unchecked
24871+#ifdef CONFIG_PAX_REFCOUNT
24872+ into
24873+2:
24874+ _ASM_EXTABLE(2b, 3f)
24875+#endif
24876+.endif
24877+
24878 LOCK_PREFIX
24879 cmpxchg8b (%ebp)
24880 jne 1b
24881-
24882-10:
24883 movl %ebx, %eax
24884 movl %ecx, %edx
24885+
24886+.ifb \unchecked
24887+#ifdef CONFIG_PAX_REFCOUNT
24888+3:
24889+#endif
24890+.endif
24891+
24892 RESTORE edi
24893 RESTORE esi
24894 RESTORE ebx
24895 RESTORE ebp
24896+ pax_force_retaddr
24897 ret
24898 CFI_ENDPROC
24899-ENDPROC(atomic64_\func\()_return_cx8)
24900+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
24901 .endm
24902
24903 addsub_return add add adc
24904 addsub_return sub sub sbb
24905+addsub_return add add adc _unchecked
24906+addsub_return sub sub sbb _unchecked
24907
24908-.macro incdec_return func ins insc
24909-ENTRY(atomic64_\func\()_return_cx8)
24910+.macro incdec_return func ins insc unchecked=""
24911+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
24912 CFI_STARTPROC
24913 SAVE ebx
24914
24915@@ -112,21 +155,39 @@ ENTRY(atomic64_\func\()_return_cx8)
24916 movl %edx, %ecx
24917 \ins\()l $1, %ebx
24918 \insc\()l $0, %ecx
24919+
24920+.ifb \unchecked
24921+#ifdef CONFIG_PAX_REFCOUNT
24922+ into
24923+2:
24924+ _ASM_EXTABLE(2b, 3f)
24925+#endif
24926+.endif
24927+
24928 LOCK_PREFIX
24929 cmpxchg8b (%esi)
24930 jne 1b
24931
24932-10:
24933 movl %ebx, %eax
24934 movl %ecx, %edx
24935+
24936+.ifb \unchecked
24937+#ifdef CONFIG_PAX_REFCOUNT
24938+3:
24939+#endif
24940+.endif
24941+
24942 RESTORE ebx
24943+ pax_force_retaddr
24944 ret
24945 CFI_ENDPROC
24946-ENDPROC(atomic64_\func\()_return_cx8)
24947+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
24948 .endm
24949
24950 incdec_return inc add adc
24951 incdec_return dec sub sbb
24952+incdec_return inc add adc _unchecked
24953+incdec_return dec sub sbb _unchecked
24954
24955 ENTRY(atomic64_dec_if_positive_cx8)
24956 CFI_STARTPROC
24957@@ -138,6 +199,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
24958 movl %edx, %ecx
24959 subl $1, %ebx
24960 sbb $0, %ecx
24961+
24962+#ifdef CONFIG_PAX_REFCOUNT
24963+ into
24964+1234:
24965+ _ASM_EXTABLE(1234b, 2f)
24966+#endif
24967+
24968 js 2f
24969 LOCK_PREFIX
24970 cmpxchg8b (%esi)
24971@@ -147,6 +215,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
24972 movl %ebx, %eax
24973 movl %ecx, %edx
24974 RESTORE ebx
24975+ pax_force_retaddr
24976 ret
24977 CFI_ENDPROC
24978 ENDPROC(atomic64_dec_if_positive_cx8)
24979@@ -171,6 +240,13 @@ ENTRY(atomic64_add_unless_cx8)
24980 movl %edx, %ecx
24981 addl %ebp, %ebx
24982 adcl %edi, %ecx
24983+
24984+#ifdef CONFIG_PAX_REFCOUNT
24985+ into
24986+1234:
24987+ _ASM_EXTABLE(1234b, 3f)
24988+#endif
24989+
24990 LOCK_PREFIX
24991 cmpxchg8b (%esi)
24992 jne 1b
24993@@ -181,6 +257,7 @@ ENTRY(atomic64_add_unless_cx8)
24994 CFI_ADJUST_CFA_OFFSET -8
24995 RESTORE ebx
24996 RESTORE ebp
24997+ pax_force_retaddr
24998 ret
24999 4:
25000 cmpl %edx, 4(%esp)
25001@@ -203,6 +280,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
25002 xorl %ecx, %ecx
25003 addl $1, %ebx
25004 adcl %edx, %ecx
25005+
25006+#ifdef CONFIG_PAX_REFCOUNT
25007+ into
25008+1234:
25009+ _ASM_EXTABLE(1234b, 3f)
25010+#endif
25011+
25012 LOCK_PREFIX
25013 cmpxchg8b (%esi)
25014 jne 1b
25015@@ -210,6 +294,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
25016 movl $1, %eax
25017 3:
25018 RESTORE ebx
25019+ pax_force_retaddr
25020 ret
25021 CFI_ENDPROC
25022 ENDPROC(atomic64_inc_not_zero_cx8)
25023diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
25024index 2af5df3..62b1a5a 100644
25025--- a/arch/x86/lib/checksum_32.S
25026+++ b/arch/x86/lib/checksum_32.S
25027@@ -29,7 +29,8 @@
25028 #include <asm/dwarf2.h>
25029 #include <asm/errno.h>
25030 #include <asm/asm.h>
25031-
25032+#include <asm/segment.h>
25033+
25034 /*
25035 * computes a partial checksum, e.g. for TCP/UDP fragments
25036 */
25037@@ -293,9 +294,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
25038
25039 #define ARGBASE 16
25040 #define FP 12
25041-
25042-ENTRY(csum_partial_copy_generic)
25043+
25044+ENTRY(csum_partial_copy_generic_to_user)
25045 CFI_STARTPROC
25046+
25047+#ifdef CONFIG_PAX_MEMORY_UDEREF
25048+ pushl_cfi %gs
25049+ popl_cfi %es
25050+ jmp csum_partial_copy_generic
25051+#endif
25052+
25053+ENTRY(csum_partial_copy_generic_from_user)
25054+
25055+#ifdef CONFIG_PAX_MEMORY_UDEREF
25056+ pushl_cfi %gs
25057+ popl_cfi %ds
25058+#endif
25059+
25060+ENTRY(csum_partial_copy_generic)
25061 subl $4,%esp
25062 CFI_ADJUST_CFA_OFFSET 4
25063 pushl_cfi %edi
25064@@ -317,7 +333,7 @@ ENTRY(csum_partial_copy_generic)
25065 jmp 4f
25066 SRC(1: movw (%esi), %bx )
25067 addl $2, %esi
25068-DST( movw %bx, (%edi) )
25069+DST( movw %bx, %es:(%edi) )
25070 addl $2, %edi
25071 addw %bx, %ax
25072 adcl $0, %eax
25073@@ -329,30 +345,30 @@ DST( movw %bx, (%edi) )
25074 SRC(1: movl (%esi), %ebx )
25075 SRC( movl 4(%esi), %edx )
25076 adcl %ebx, %eax
25077-DST( movl %ebx, (%edi) )
25078+DST( movl %ebx, %es:(%edi) )
25079 adcl %edx, %eax
25080-DST( movl %edx, 4(%edi) )
25081+DST( movl %edx, %es:4(%edi) )
25082
25083 SRC( movl 8(%esi), %ebx )
25084 SRC( movl 12(%esi), %edx )
25085 adcl %ebx, %eax
25086-DST( movl %ebx, 8(%edi) )
25087+DST( movl %ebx, %es:8(%edi) )
25088 adcl %edx, %eax
25089-DST( movl %edx, 12(%edi) )
25090+DST( movl %edx, %es:12(%edi) )
25091
25092 SRC( movl 16(%esi), %ebx )
25093 SRC( movl 20(%esi), %edx )
25094 adcl %ebx, %eax
25095-DST( movl %ebx, 16(%edi) )
25096+DST( movl %ebx, %es:16(%edi) )
25097 adcl %edx, %eax
25098-DST( movl %edx, 20(%edi) )
25099+DST( movl %edx, %es:20(%edi) )
25100
25101 SRC( movl 24(%esi), %ebx )
25102 SRC( movl 28(%esi), %edx )
25103 adcl %ebx, %eax
25104-DST( movl %ebx, 24(%edi) )
25105+DST( movl %ebx, %es:24(%edi) )
25106 adcl %edx, %eax
25107-DST( movl %edx, 28(%edi) )
25108+DST( movl %edx, %es:28(%edi) )
25109
25110 lea 32(%esi), %esi
25111 lea 32(%edi), %edi
25112@@ -366,7 +382,7 @@ DST( movl %edx, 28(%edi) )
25113 shrl $2, %edx # This clears CF
25114 SRC(3: movl (%esi), %ebx )
25115 adcl %ebx, %eax
25116-DST( movl %ebx, (%edi) )
25117+DST( movl %ebx, %es:(%edi) )
25118 lea 4(%esi), %esi
25119 lea 4(%edi), %edi
25120 dec %edx
25121@@ -378,12 +394,12 @@ DST( movl %ebx, (%edi) )
25122 jb 5f
25123 SRC( movw (%esi), %cx )
25124 leal 2(%esi), %esi
25125-DST( movw %cx, (%edi) )
25126+DST( movw %cx, %es:(%edi) )
25127 leal 2(%edi), %edi
25128 je 6f
25129 shll $16,%ecx
25130 SRC(5: movb (%esi), %cl )
25131-DST( movb %cl, (%edi) )
25132+DST( movb %cl, %es:(%edi) )
25133 6: addl %ecx, %eax
25134 adcl $0, %eax
25135 7:
25136@@ -394,7 +410,7 @@ DST( movb %cl, (%edi) )
25137
25138 6001:
25139 movl ARGBASE+20(%esp), %ebx # src_err_ptr
25140- movl $-EFAULT, (%ebx)
25141+ movl $-EFAULT, %ss:(%ebx)
25142
25143 # zero the complete destination - computing the rest
25144 # is too much work
25145@@ -407,11 +423,15 @@ DST( movb %cl, (%edi) )
25146
25147 6002:
25148 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
25149- movl $-EFAULT,(%ebx)
25150+ movl $-EFAULT,%ss:(%ebx)
25151 jmp 5000b
25152
25153 .previous
25154
25155+ pushl_cfi %ss
25156+ popl_cfi %ds
25157+ pushl_cfi %ss
25158+ popl_cfi %es
25159 popl_cfi %ebx
25160 CFI_RESTORE ebx
25161 popl_cfi %esi
25162@@ -421,26 +441,43 @@ DST( movb %cl, (%edi) )
25163 popl_cfi %ecx # equivalent to addl $4,%esp
25164 ret
25165 CFI_ENDPROC
25166-ENDPROC(csum_partial_copy_generic)
25167+ENDPROC(csum_partial_copy_generic_to_user)
25168
25169 #else
25170
25171 /* Version for PentiumII/PPro */
25172
25173 #define ROUND1(x) \
25174+ nop; nop; nop; \
25175 SRC(movl x(%esi), %ebx ) ; \
25176 addl %ebx, %eax ; \
25177- DST(movl %ebx, x(%edi) ) ;
25178+ DST(movl %ebx, %es:x(%edi)) ;
25179
25180 #define ROUND(x) \
25181+ nop; nop; nop; \
25182 SRC(movl x(%esi), %ebx ) ; \
25183 adcl %ebx, %eax ; \
25184- DST(movl %ebx, x(%edi) ) ;
25185+ DST(movl %ebx, %es:x(%edi)) ;
25186
25187 #define ARGBASE 12
25188-
25189-ENTRY(csum_partial_copy_generic)
25190+
25191+ENTRY(csum_partial_copy_generic_to_user)
25192 CFI_STARTPROC
25193+
25194+#ifdef CONFIG_PAX_MEMORY_UDEREF
25195+ pushl_cfi %gs
25196+ popl_cfi %es
25197+ jmp csum_partial_copy_generic
25198+#endif
25199+
25200+ENTRY(csum_partial_copy_generic_from_user)
25201+
25202+#ifdef CONFIG_PAX_MEMORY_UDEREF
25203+ pushl_cfi %gs
25204+ popl_cfi %ds
25205+#endif
25206+
25207+ENTRY(csum_partial_copy_generic)
25208 pushl_cfi %ebx
25209 CFI_REL_OFFSET ebx, 0
25210 pushl_cfi %edi
25211@@ -461,7 +498,7 @@ ENTRY(csum_partial_copy_generic)
25212 subl %ebx, %edi
25213 lea -1(%esi),%edx
25214 andl $-32,%edx
25215- lea 3f(%ebx,%ebx), %ebx
25216+ lea 3f(%ebx,%ebx,2), %ebx
25217 testl %esi, %esi
25218 jmp *%ebx
25219 1: addl $64,%esi
25220@@ -482,19 +519,19 @@ ENTRY(csum_partial_copy_generic)
25221 jb 5f
25222 SRC( movw (%esi), %dx )
25223 leal 2(%esi), %esi
25224-DST( movw %dx, (%edi) )
25225+DST( movw %dx, %es:(%edi) )
25226 leal 2(%edi), %edi
25227 je 6f
25228 shll $16,%edx
25229 5:
25230 SRC( movb (%esi), %dl )
25231-DST( movb %dl, (%edi) )
25232+DST( movb %dl, %es:(%edi) )
25233 6: addl %edx, %eax
25234 adcl $0, %eax
25235 7:
25236 .section .fixup, "ax"
25237 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
25238- movl $-EFAULT, (%ebx)
25239+ movl $-EFAULT, %ss:(%ebx)
25240 # zero the complete destination (computing the rest is too much work)
25241 movl ARGBASE+8(%esp),%edi # dst
25242 movl ARGBASE+12(%esp),%ecx # len
25243@@ -502,10 +539,17 @@ DST( movb %dl, (%edi) )
25244 rep; stosb
25245 jmp 7b
25246 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
25247- movl $-EFAULT, (%ebx)
25248+ movl $-EFAULT, %ss:(%ebx)
25249 jmp 7b
25250 .previous
25251
25252+#ifdef CONFIG_PAX_MEMORY_UDEREF
25253+ pushl_cfi %ss
25254+ popl_cfi %ds
25255+ pushl_cfi %ss
25256+ popl_cfi %es
25257+#endif
25258+
25259 popl_cfi %esi
25260 CFI_RESTORE esi
25261 popl_cfi %edi
25262@@ -514,7 +558,7 @@ DST( movb %dl, (%edi) )
25263 CFI_RESTORE ebx
25264 ret
25265 CFI_ENDPROC
25266-ENDPROC(csum_partial_copy_generic)
25267+ENDPROC(csum_partial_copy_generic_to_user)
25268
25269 #undef ROUND
25270 #undef ROUND1
25271diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
25272index f2145cf..cea889d 100644
25273--- a/arch/x86/lib/clear_page_64.S
25274+++ b/arch/x86/lib/clear_page_64.S
25275@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
25276 movl $4096/8,%ecx
25277 xorl %eax,%eax
25278 rep stosq
25279+ pax_force_retaddr
25280 ret
25281 CFI_ENDPROC
25282 ENDPROC(clear_page_c)
25283@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
25284 movl $4096,%ecx
25285 xorl %eax,%eax
25286 rep stosb
25287+ pax_force_retaddr
25288 ret
25289 CFI_ENDPROC
25290 ENDPROC(clear_page_c_e)
25291@@ -43,6 +45,7 @@ ENTRY(clear_page)
25292 leaq 64(%rdi),%rdi
25293 jnz .Lloop
25294 nop
25295+ pax_force_retaddr
25296 ret
25297 CFI_ENDPROC
25298 .Lclear_page_end:
25299@@ -58,7 +61,7 @@ ENDPROC(clear_page)
25300
25301 #include <asm/cpufeature.h>
25302
25303- .section .altinstr_replacement,"ax"
25304+ .section .altinstr_replacement,"a"
25305 1: .byte 0xeb /* jmp <disp8> */
25306 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
25307 2: .byte 0xeb /* jmp <disp8> */
25308diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
25309index 1e572c5..2a162cd 100644
25310--- a/arch/x86/lib/cmpxchg16b_emu.S
25311+++ b/arch/x86/lib/cmpxchg16b_emu.S
25312@@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
25313
25314 popf
25315 mov $1, %al
25316+ pax_force_retaddr
25317 ret
25318
25319 not_same:
25320 popf
25321 xor %al,%al
25322+ pax_force_retaddr
25323 ret
25324
25325 CFI_ENDPROC
25326diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
25327index 176cca6..1166c50 100644
25328--- a/arch/x86/lib/copy_page_64.S
25329+++ b/arch/x86/lib/copy_page_64.S
25330@@ -9,6 +9,7 @@ copy_page_rep:
25331 CFI_STARTPROC
25332 movl $4096/8, %ecx
25333 rep movsq
25334+ pax_force_retaddr
25335 ret
25336 CFI_ENDPROC
25337 ENDPROC(copy_page_rep)
25338@@ -20,12 +21,14 @@ ENDPROC(copy_page_rep)
25339
25340 ENTRY(copy_page)
25341 CFI_STARTPROC
25342- subq $2*8, %rsp
25343- CFI_ADJUST_CFA_OFFSET 2*8
25344+ subq $3*8, %rsp
25345+ CFI_ADJUST_CFA_OFFSET 3*8
25346 movq %rbx, (%rsp)
25347 CFI_REL_OFFSET rbx, 0
25348 movq %r12, 1*8(%rsp)
25349 CFI_REL_OFFSET r12, 1*8
25350+ movq %r13, 2*8(%rsp)
25351+ CFI_REL_OFFSET r13, 2*8
25352
25353 movl $(4096/64)-5, %ecx
25354 .p2align 4
25355@@ -36,7 +39,7 @@ ENTRY(copy_page)
25356 movq 0x8*2(%rsi), %rdx
25357 movq 0x8*3(%rsi), %r8
25358 movq 0x8*4(%rsi), %r9
25359- movq 0x8*5(%rsi), %r10
25360+ movq 0x8*5(%rsi), %r13
25361 movq 0x8*6(%rsi), %r11
25362 movq 0x8*7(%rsi), %r12
25363
25364@@ -47,7 +50,7 @@ ENTRY(copy_page)
25365 movq %rdx, 0x8*2(%rdi)
25366 movq %r8, 0x8*3(%rdi)
25367 movq %r9, 0x8*4(%rdi)
25368- movq %r10, 0x8*5(%rdi)
25369+ movq %r13, 0x8*5(%rdi)
25370 movq %r11, 0x8*6(%rdi)
25371 movq %r12, 0x8*7(%rdi)
25372
25373@@ -66,7 +69,7 @@ ENTRY(copy_page)
25374 movq 0x8*2(%rsi), %rdx
25375 movq 0x8*3(%rsi), %r8
25376 movq 0x8*4(%rsi), %r9
25377- movq 0x8*5(%rsi), %r10
25378+ movq 0x8*5(%rsi), %r13
25379 movq 0x8*6(%rsi), %r11
25380 movq 0x8*7(%rsi), %r12
25381
25382@@ -75,7 +78,7 @@ ENTRY(copy_page)
25383 movq %rdx, 0x8*2(%rdi)
25384 movq %r8, 0x8*3(%rdi)
25385 movq %r9, 0x8*4(%rdi)
25386- movq %r10, 0x8*5(%rdi)
25387+ movq %r13, 0x8*5(%rdi)
25388 movq %r11, 0x8*6(%rdi)
25389 movq %r12, 0x8*7(%rdi)
25390
25391@@ -87,8 +90,11 @@ ENTRY(copy_page)
25392 CFI_RESTORE rbx
25393 movq 1*8(%rsp), %r12
25394 CFI_RESTORE r12
25395- addq $2*8, %rsp
25396- CFI_ADJUST_CFA_OFFSET -2*8
25397+ movq 2*8(%rsp), %r13
25398+ CFI_RESTORE r13
25399+ addq $3*8, %rsp
25400+ CFI_ADJUST_CFA_OFFSET -3*8
25401+ pax_force_retaddr
25402 ret
25403 .Lcopy_page_end:
25404 CFI_ENDPROC
25405@@ -99,7 +105,7 @@ ENDPROC(copy_page)
25406
25407 #include <asm/cpufeature.h>
25408
25409- .section .altinstr_replacement,"ax"
25410+ .section .altinstr_replacement,"a"
25411 1: .byte 0xeb /* jmp <disp8> */
25412 .byte (copy_page_rep - copy_page) - (2f - 1b) /* offset */
25413 2:
25414diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
25415index a30ca15..d25fab6 100644
25416--- a/arch/x86/lib/copy_user_64.S
25417+++ b/arch/x86/lib/copy_user_64.S
25418@@ -18,6 +18,7 @@
25419 #include <asm/alternative-asm.h>
25420 #include <asm/asm.h>
25421 #include <asm/smap.h>
25422+#include <asm/pgtable.h>
25423
25424 /*
25425 * By placing feature2 after feature1 in altinstructions section, we logically
25426@@ -31,7 +32,7 @@
25427 .byte 0xe9 /* 32bit jump */
25428 .long \orig-1f /* by default jump to orig */
25429 1:
25430- .section .altinstr_replacement,"ax"
25431+ .section .altinstr_replacement,"a"
25432 2: .byte 0xe9 /* near jump with 32bit immediate */
25433 .long \alt1-1b /* offset */ /* or alternatively to alt1 */
25434 3: .byte 0xe9 /* near jump with 32bit immediate */
25435@@ -70,47 +71,20 @@
25436 #endif
25437 .endm
25438
25439-/* Standard copy_to_user with segment limit checking */
25440-ENTRY(_copy_to_user)
25441- CFI_STARTPROC
25442- GET_THREAD_INFO(%rax)
25443- movq %rdi,%rcx
25444- addq %rdx,%rcx
25445- jc bad_to_user
25446- cmpq TI_addr_limit(%rax),%rcx
25447- ja bad_to_user
25448- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
25449- copy_user_generic_unrolled,copy_user_generic_string, \
25450- copy_user_enhanced_fast_string
25451- CFI_ENDPROC
25452-ENDPROC(_copy_to_user)
25453-
25454-/* Standard copy_from_user with segment limit checking */
25455-ENTRY(_copy_from_user)
25456- CFI_STARTPROC
25457- GET_THREAD_INFO(%rax)
25458- movq %rsi,%rcx
25459- addq %rdx,%rcx
25460- jc bad_from_user
25461- cmpq TI_addr_limit(%rax),%rcx
25462- ja bad_from_user
25463- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
25464- copy_user_generic_unrolled,copy_user_generic_string, \
25465- copy_user_enhanced_fast_string
25466- CFI_ENDPROC
25467-ENDPROC(_copy_from_user)
25468-
25469 .section .fixup,"ax"
25470 /* must zero dest */
25471 ENTRY(bad_from_user)
25472 bad_from_user:
25473 CFI_STARTPROC
25474+ testl %edx,%edx
25475+ js bad_to_user
25476 movl %edx,%ecx
25477 xorl %eax,%eax
25478 rep
25479 stosb
25480 bad_to_user:
25481 movl %edx,%eax
25482+ pax_force_retaddr
25483 ret
25484 CFI_ENDPROC
25485 ENDPROC(bad_from_user)
25486@@ -141,19 +115,19 @@ ENTRY(copy_user_generic_unrolled)
25487 jz 17f
25488 1: movq (%rsi),%r8
25489 2: movq 1*8(%rsi),%r9
25490-3: movq 2*8(%rsi),%r10
25491+3: movq 2*8(%rsi),%rax
25492 4: movq 3*8(%rsi),%r11
25493 5: movq %r8,(%rdi)
25494 6: movq %r9,1*8(%rdi)
25495-7: movq %r10,2*8(%rdi)
25496+7: movq %rax,2*8(%rdi)
25497 8: movq %r11,3*8(%rdi)
25498 9: movq 4*8(%rsi),%r8
25499 10: movq 5*8(%rsi),%r9
25500-11: movq 6*8(%rsi),%r10
25501+11: movq 6*8(%rsi),%rax
25502 12: movq 7*8(%rsi),%r11
25503 13: movq %r8,4*8(%rdi)
25504 14: movq %r9,5*8(%rdi)
25505-15: movq %r10,6*8(%rdi)
25506+15: movq %rax,6*8(%rdi)
25507 16: movq %r11,7*8(%rdi)
25508 leaq 64(%rsi),%rsi
25509 leaq 64(%rdi),%rdi
25510@@ -180,6 +154,7 @@ ENTRY(copy_user_generic_unrolled)
25511 jnz 21b
25512 23: xor %eax,%eax
25513 ASM_CLAC
25514+ pax_force_retaddr
25515 ret
25516
25517 .section .fixup,"ax"
25518@@ -251,6 +226,7 @@ ENTRY(copy_user_generic_string)
25519 movsb
25520 4: xorl %eax,%eax
25521 ASM_CLAC
25522+ pax_force_retaddr
25523 ret
25524
25525 .section .fixup,"ax"
25526@@ -286,6 +262,7 @@ ENTRY(copy_user_enhanced_fast_string)
25527 movsb
25528 2: xorl %eax,%eax
25529 ASM_CLAC
25530+ pax_force_retaddr
25531 ret
25532
25533 .section .fixup,"ax"
25534diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
25535index 6a4f43c..f5f9e26 100644
25536--- a/arch/x86/lib/copy_user_nocache_64.S
25537+++ b/arch/x86/lib/copy_user_nocache_64.S
25538@@ -8,6 +8,7 @@
25539
25540 #include <linux/linkage.h>
25541 #include <asm/dwarf2.h>
25542+#include <asm/alternative-asm.h>
25543
25544 #define FIX_ALIGNMENT 1
25545
25546@@ -16,6 +17,7 @@
25547 #include <asm/thread_info.h>
25548 #include <asm/asm.h>
25549 #include <asm/smap.h>
25550+#include <asm/pgtable.h>
25551
25552 .macro ALIGN_DESTINATION
25553 #ifdef FIX_ALIGNMENT
25554@@ -49,6 +51,15 @@
25555 */
25556 ENTRY(__copy_user_nocache)
25557 CFI_STARTPROC
25558+
25559+#ifdef CONFIG_PAX_MEMORY_UDEREF
25560+ mov $PAX_USER_SHADOW_BASE,%rcx
25561+ cmp %rcx,%rsi
25562+ jae 1f
25563+ add %rcx,%rsi
25564+1:
25565+#endif
25566+
25567 ASM_STAC
25568 cmpl $8,%edx
25569 jb 20f /* less then 8 bytes, go to byte copy loop */
25570@@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache)
25571 jz 17f
25572 1: movq (%rsi),%r8
25573 2: movq 1*8(%rsi),%r9
25574-3: movq 2*8(%rsi),%r10
25575+3: movq 2*8(%rsi),%rax
25576 4: movq 3*8(%rsi),%r11
25577 5: movnti %r8,(%rdi)
25578 6: movnti %r9,1*8(%rdi)
25579-7: movnti %r10,2*8(%rdi)
25580+7: movnti %rax,2*8(%rdi)
25581 8: movnti %r11,3*8(%rdi)
25582 9: movq 4*8(%rsi),%r8
25583 10: movq 5*8(%rsi),%r9
25584-11: movq 6*8(%rsi),%r10
25585+11: movq 6*8(%rsi),%rax
25586 12: movq 7*8(%rsi),%r11
25587 13: movnti %r8,4*8(%rdi)
25588 14: movnti %r9,5*8(%rdi)
25589-15: movnti %r10,6*8(%rdi)
25590+15: movnti %rax,6*8(%rdi)
25591 16: movnti %r11,7*8(%rdi)
25592 leaq 64(%rsi),%rsi
25593 leaq 64(%rdi),%rdi
25594@@ -99,6 +110,7 @@ ENTRY(__copy_user_nocache)
25595 23: xorl %eax,%eax
25596 ASM_CLAC
25597 sfence
25598+ pax_force_retaddr
25599 ret
25600
25601 .section .fixup,"ax"
25602diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
25603index 2419d5f..953ee51 100644
25604--- a/arch/x86/lib/csum-copy_64.S
25605+++ b/arch/x86/lib/csum-copy_64.S
25606@@ -9,6 +9,7 @@
25607 #include <asm/dwarf2.h>
25608 #include <asm/errno.h>
25609 #include <asm/asm.h>
25610+#include <asm/alternative-asm.h>
25611
25612 /*
25613 * Checksum copy with exception handling.
25614@@ -220,6 +221,7 @@ ENTRY(csum_partial_copy_generic)
25615 CFI_RESTORE rbp
25616 addq $7*8, %rsp
25617 CFI_ADJUST_CFA_OFFSET -7*8
25618+ pax_force_retaddr 0, 1
25619 ret
25620 CFI_RESTORE_STATE
25621
25622diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
25623index 25b7ae8..169fafc 100644
25624--- a/arch/x86/lib/csum-wrappers_64.c
25625+++ b/arch/x86/lib/csum-wrappers_64.c
25626@@ -52,7 +52,7 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
25627 len -= 2;
25628 }
25629 }
25630- isum = csum_partial_copy_generic((__force const void *)src,
25631+ isum = csum_partial_copy_generic((const void __force_kernel *)____m(src),
25632 dst, len, isum, errp, NULL);
25633 if (unlikely(*errp))
25634 goto out_err;
25635@@ -105,7 +105,7 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
25636 }
25637
25638 *errp = 0;
25639- return csum_partial_copy_generic(src, (void __force *)dst,
25640+ return csum_partial_copy_generic(src, (void __force_kernel *)____m(dst),
25641 len, isum, NULL, errp);
25642 }
25643 EXPORT_SYMBOL(csum_partial_copy_to_user);
25644diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
25645index 156b9c8..b144132 100644
25646--- a/arch/x86/lib/getuser.S
25647+++ b/arch/x86/lib/getuser.S
25648@@ -34,17 +34,40 @@
25649 #include <asm/thread_info.h>
25650 #include <asm/asm.h>
25651 #include <asm/smap.h>
25652+#include <asm/segment.h>
25653+#include <asm/pgtable.h>
25654+#include <asm/alternative-asm.h>
25655+
25656+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
25657+#define __copyuser_seg gs;
25658+#else
25659+#define __copyuser_seg
25660+#endif
25661
25662 .text
25663 ENTRY(__get_user_1)
25664 CFI_STARTPROC
25665+
25666+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
25667 GET_THREAD_INFO(%_ASM_DX)
25668 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
25669 jae bad_get_user
25670 ASM_STAC
25671-1: movzb (%_ASM_AX),%edx
25672+
25673+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25674+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
25675+ cmp %_ASM_DX,%_ASM_AX
25676+ jae 1234f
25677+ add %_ASM_DX,%_ASM_AX
25678+1234:
25679+#endif
25680+
25681+#endif
25682+
25683+1: __copyuser_seg movzb (%_ASM_AX),%edx
25684 xor %eax,%eax
25685 ASM_CLAC
25686+ pax_force_retaddr
25687 ret
25688 CFI_ENDPROC
25689 ENDPROC(__get_user_1)
25690@@ -52,14 +75,28 @@ ENDPROC(__get_user_1)
25691 ENTRY(__get_user_2)
25692 CFI_STARTPROC
25693 add $1,%_ASM_AX
25694+
25695+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
25696 jc bad_get_user
25697 GET_THREAD_INFO(%_ASM_DX)
25698 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
25699 jae bad_get_user
25700 ASM_STAC
25701-2: movzwl -1(%_ASM_AX),%edx
25702+
25703+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25704+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
25705+ cmp %_ASM_DX,%_ASM_AX
25706+ jae 1234f
25707+ add %_ASM_DX,%_ASM_AX
25708+1234:
25709+#endif
25710+
25711+#endif
25712+
25713+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
25714 xor %eax,%eax
25715 ASM_CLAC
25716+ pax_force_retaddr
25717 ret
25718 CFI_ENDPROC
25719 ENDPROC(__get_user_2)
25720@@ -67,14 +104,28 @@ ENDPROC(__get_user_2)
25721 ENTRY(__get_user_4)
25722 CFI_STARTPROC
25723 add $3,%_ASM_AX
25724+
25725+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
25726 jc bad_get_user
25727 GET_THREAD_INFO(%_ASM_DX)
25728 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
25729 jae bad_get_user
25730 ASM_STAC
25731-3: mov -3(%_ASM_AX),%edx
25732+
25733+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25734+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
25735+ cmp %_ASM_DX,%_ASM_AX
25736+ jae 1234f
25737+ add %_ASM_DX,%_ASM_AX
25738+1234:
25739+#endif
25740+
25741+#endif
25742+
25743+3: __copyuser_seg mov -3(%_ASM_AX),%edx
25744 xor %eax,%eax
25745 ASM_CLAC
25746+ pax_force_retaddr
25747 ret
25748 CFI_ENDPROC
25749 ENDPROC(__get_user_4)
25750@@ -87,10 +138,20 @@ ENTRY(__get_user_8)
25751 GET_THREAD_INFO(%_ASM_DX)
25752 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
25753 jae bad_get_user
25754+
25755+#ifdef CONFIG_PAX_MEMORY_UDEREF
25756+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
25757+ cmp %_ASM_DX,%_ASM_AX
25758+ jae 1234f
25759+ add %_ASM_DX,%_ASM_AX
25760+1234:
25761+#endif
25762+
25763 ASM_STAC
25764 4: movq -7(%_ASM_AX),%_ASM_DX
25765 xor %eax,%eax
25766 ASM_CLAC
25767+ pax_force_retaddr
25768 ret
25769 CFI_ENDPROC
25770 ENDPROC(__get_user_8)
25771@@ -101,6 +162,7 @@ bad_get_user:
25772 xor %edx,%edx
25773 mov $(-EFAULT),%_ASM_AX
25774 ASM_CLAC
25775+ pax_force_retaddr
25776 ret
25777 CFI_ENDPROC
25778 END(bad_get_user)
25779diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
25780index 54fcffe..7be149e 100644
25781--- a/arch/x86/lib/insn.c
25782+++ b/arch/x86/lib/insn.c
25783@@ -20,8 +20,10 @@
25784
25785 #ifdef __KERNEL__
25786 #include <linux/string.h>
25787+#include <asm/pgtable_types.h>
25788 #else
25789 #include <string.h>
25790+#define ktla_ktva(addr) addr
25791 #endif
25792 #include <asm/inat.h>
25793 #include <asm/insn.h>
25794@@ -53,8 +55,8 @@
25795 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
25796 {
25797 memset(insn, 0, sizeof(*insn));
25798- insn->kaddr = kaddr;
25799- insn->next_byte = kaddr;
25800+ insn->kaddr = ktla_ktva(kaddr);
25801+ insn->next_byte = ktla_ktva(kaddr);
25802 insn->x86_64 = x86_64 ? 1 : 0;
25803 insn->opnd_bytes = 4;
25804 if (x86_64)
25805diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
25806index 05a95e7..326f2fa 100644
25807--- a/arch/x86/lib/iomap_copy_64.S
25808+++ b/arch/x86/lib/iomap_copy_64.S
25809@@ -17,6 +17,7 @@
25810
25811 #include <linux/linkage.h>
25812 #include <asm/dwarf2.h>
25813+#include <asm/alternative-asm.h>
25814
25815 /*
25816 * override generic version in lib/iomap_copy.c
25817@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
25818 CFI_STARTPROC
25819 movl %edx,%ecx
25820 rep movsd
25821+ pax_force_retaddr
25822 ret
25823 CFI_ENDPROC
25824 ENDPROC(__iowrite32_copy)
25825diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
25826index 1c273be..da9cc0e 100644
25827--- a/arch/x86/lib/memcpy_64.S
25828+++ b/arch/x86/lib/memcpy_64.S
25829@@ -33,6 +33,7 @@
25830 rep movsq
25831 movl %edx, %ecx
25832 rep movsb
25833+ pax_force_retaddr
25834 ret
25835 .Lmemcpy_e:
25836 .previous
25837@@ -49,6 +50,7 @@
25838 movq %rdi, %rax
25839 movq %rdx, %rcx
25840 rep movsb
25841+ pax_force_retaddr
25842 ret
25843 .Lmemcpy_e_e:
25844 .previous
25845@@ -76,13 +78,13 @@ ENTRY(memcpy)
25846 */
25847 movq 0*8(%rsi), %r8
25848 movq 1*8(%rsi), %r9
25849- movq 2*8(%rsi), %r10
25850+ movq 2*8(%rsi), %rcx
25851 movq 3*8(%rsi), %r11
25852 leaq 4*8(%rsi), %rsi
25853
25854 movq %r8, 0*8(%rdi)
25855 movq %r9, 1*8(%rdi)
25856- movq %r10, 2*8(%rdi)
25857+ movq %rcx, 2*8(%rdi)
25858 movq %r11, 3*8(%rdi)
25859 leaq 4*8(%rdi), %rdi
25860 jae .Lcopy_forward_loop
25861@@ -105,12 +107,12 @@ ENTRY(memcpy)
25862 subq $0x20, %rdx
25863 movq -1*8(%rsi), %r8
25864 movq -2*8(%rsi), %r9
25865- movq -3*8(%rsi), %r10
25866+ movq -3*8(%rsi), %rcx
25867 movq -4*8(%rsi), %r11
25868 leaq -4*8(%rsi), %rsi
25869 movq %r8, -1*8(%rdi)
25870 movq %r9, -2*8(%rdi)
25871- movq %r10, -3*8(%rdi)
25872+ movq %rcx, -3*8(%rdi)
25873 movq %r11, -4*8(%rdi)
25874 leaq -4*8(%rdi), %rdi
25875 jae .Lcopy_backward_loop
25876@@ -130,12 +132,13 @@ ENTRY(memcpy)
25877 */
25878 movq 0*8(%rsi), %r8
25879 movq 1*8(%rsi), %r9
25880- movq -2*8(%rsi, %rdx), %r10
25881+ movq -2*8(%rsi, %rdx), %rcx
25882 movq -1*8(%rsi, %rdx), %r11
25883 movq %r8, 0*8(%rdi)
25884 movq %r9, 1*8(%rdi)
25885- movq %r10, -2*8(%rdi, %rdx)
25886+ movq %rcx, -2*8(%rdi, %rdx)
25887 movq %r11, -1*8(%rdi, %rdx)
25888+ pax_force_retaddr
25889 retq
25890 .p2align 4
25891 .Lless_16bytes:
25892@@ -148,6 +151,7 @@ ENTRY(memcpy)
25893 movq -1*8(%rsi, %rdx), %r9
25894 movq %r8, 0*8(%rdi)
25895 movq %r9, -1*8(%rdi, %rdx)
25896+ pax_force_retaddr
25897 retq
25898 .p2align 4
25899 .Lless_8bytes:
25900@@ -161,6 +165,7 @@ ENTRY(memcpy)
25901 movl -4(%rsi, %rdx), %r8d
25902 movl %ecx, (%rdi)
25903 movl %r8d, -4(%rdi, %rdx)
25904+ pax_force_retaddr
25905 retq
25906 .p2align 4
25907 .Lless_3bytes:
25908@@ -179,6 +184,7 @@ ENTRY(memcpy)
25909 movb %cl, (%rdi)
25910
25911 .Lend:
25912+ pax_force_retaddr
25913 retq
25914 CFI_ENDPROC
25915 ENDPROC(memcpy)
25916diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
25917index ee16461..c39c199 100644
25918--- a/arch/x86/lib/memmove_64.S
25919+++ b/arch/x86/lib/memmove_64.S
25920@@ -61,13 +61,13 @@ ENTRY(memmove)
25921 5:
25922 sub $0x20, %rdx
25923 movq 0*8(%rsi), %r11
25924- movq 1*8(%rsi), %r10
25925+ movq 1*8(%rsi), %rcx
25926 movq 2*8(%rsi), %r9
25927 movq 3*8(%rsi), %r8
25928 leaq 4*8(%rsi), %rsi
25929
25930 movq %r11, 0*8(%rdi)
25931- movq %r10, 1*8(%rdi)
25932+ movq %rcx, 1*8(%rdi)
25933 movq %r9, 2*8(%rdi)
25934 movq %r8, 3*8(%rdi)
25935 leaq 4*8(%rdi), %rdi
25936@@ -81,10 +81,10 @@ ENTRY(memmove)
25937 4:
25938 movq %rdx, %rcx
25939 movq -8(%rsi, %rdx), %r11
25940- lea -8(%rdi, %rdx), %r10
25941+ lea -8(%rdi, %rdx), %r9
25942 shrq $3, %rcx
25943 rep movsq
25944- movq %r11, (%r10)
25945+ movq %r11, (%r9)
25946 jmp 13f
25947 .Lmemmove_end_forward:
25948
25949@@ -95,14 +95,14 @@ ENTRY(memmove)
25950 7:
25951 movq %rdx, %rcx
25952 movq (%rsi), %r11
25953- movq %rdi, %r10
25954+ movq %rdi, %r9
25955 leaq -8(%rsi, %rdx), %rsi
25956 leaq -8(%rdi, %rdx), %rdi
25957 shrq $3, %rcx
25958 std
25959 rep movsq
25960 cld
25961- movq %r11, (%r10)
25962+ movq %r11, (%r9)
25963 jmp 13f
25964
25965 /*
25966@@ -127,13 +127,13 @@ ENTRY(memmove)
25967 8:
25968 subq $0x20, %rdx
25969 movq -1*8(%rsi), %r11
25970- movq -2*8(%rsi), %r10
25971+ movq -2*8(%rsi), %rcx
25972 movq -3*8(%rsi), %r9
25973 movq -4*8(%rsi), %r8
25974 leaq -4*8(%rsi), %rsi
25975
25976 movq %r11, -1*8(%rdi)
25977- movq %r10, -2*8(%rdi)
25978+ movq %rcx, -2*8(%rdi)
25979 movq %r9, -3*8(%rdi)
25980 movq %r8, -4*8(%rdi)
25981 leaq -4*8(%rdi), %rdi
25982@@ -151,11 +151,11 @@ ENTRY(memmove)
25983 * Move data from 16 bytes to 31 bytes.
25984 */
25985 movq 0*8(%rsi), %r11
25986- movq 1*8(%rsi), %r10
25987+ movq 1*8(%rsi), %rcx
25988 movq -2*8(%rsi, %rdx), %r9
25989 movq -1*8(%rsi, %rdx), %r8
25990 movq %r11, 0*8(%rdi)
25991- movq %r10, 1*8(%rdi)
25992+ movq %rcx, 1*8(%rdi)
25993 movq %r9, -2*8(%rdi, %rdx)
25994 movq %r8, -1*8(%rdi, %rdx)
25995 jmp 13f
25996@@ -167,9 +167,9 @@ ENTRY(memmove)
25997 * Move data from 8 bytes to 15 bytes.
25998 */
25999 movq 0*8(%rsi), %r11
26000- movq -1*8(%rsi, %rdx), %r10
26001+ movq -1*8(%rsi, %rdx), %r9
26002 movq %r11, 0*8(%rdi)
26003- movq %r10, -1*8(%rdi, %rdx)
26004+ movq %r9, -1*8(%rdi, %rdx)
26005 jmp 13f
26006 10:
26007 cmpq $4, %rdx
26008@@ -178,9 +178,9 @@ ENTRY(memmove)
26009 * Move data from 4 bytes to 7 bytes.
26010 */
26011 movl (%rsi), %r11d
26012- movl -4(%rsi, %rdx), %r10d
26013+ movl -4(%rsi, %rdx), %r9d
26014 movl %r11d, (%rdi)
26015- movl %r10d, -4(%rdi, %rdx)
26016+ movl %r9d, -4(%rdi, %rdx)
26017 jmp 13f
26018 11:
26019 cmp $2, %rdx
26020@@ -189,9 +189,9 @@ ENTRY(memmove)
26021 * Move data from 2 bytes to 3 bytes.
26022 */
26023 movw (%rsi), %r11w
26024- movw -2(%rsi, %rdx), %r10w
26025+ movw -2(%rsi, %rdx), %r9w
26026 movw %r11w, (%rdi)
26027- movw %r10w, -2(%rdi, %rdx)
26028+ movw %r9w, -2(%rdi, %rdx)
26029 jmp 13f
26030 12:
26031 cmp $1, %rdx
26032@@ -202,6 +202,7 @@ ENTRY(memmove)
26033 movb (%rsi), %r11b
26034 movb %r11b, (%rdi)
26035 13:
26036+ pax_force_retaddr
26037 retq
26038 CFI_ENDPROC
26039
26040@@ -210,6 +211,7 @@ ENTRY(memmove)
26041 /* Forward moving data. */
26042 movq %rdx, %rcx
26043 rep movsb
26044+ pax_force_retaddr
26045 retq
26046 .Lmemmove_end_forward_efs:
26047 .previous
26048diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
26049index 2dcb380..963660a 100644
26050--- a/arch/x86/lib/memset_64.S
26051+++ b/arch/x86/lib/memset_64.S
26052@@ -30,6 +30,7 @@
26053 movl %edx,%ecx
26054 rep stosb
26055 movq %r9,%rax
26056+ pax_force_retaddr
26057 ret
26058 .Lmemset_e:
26059 .previous
26060@@ -52,6 +53,7 @@
26061 movq %rdx,%rcx
26062 rep stosb
26063 movq %r9,%rax
26064+ pax_force_retaddr
26065 ret
26066 .Lmemset_e_e:
26067 .previous
26068@@ -59,7 +61,7 @@
26069 ENTRY(memset)
26070 ENTRY(__memset)
26071 CFI_STARTPROC
26072- movq %rdi,%r10
26073+ movq %rdi,%r11
26074
26075 /* expand byte value */
26076 movzbl %sil,%ecx
26077@@ -117,7 +119,8 @@ ENTRY(__memset)
26078 jnz .Lloop_1
26079
26080 .Lende:
26081- movq %r10,%rax
26082+ movq %r11,%rax
26083+ pax_force_retaddr
26084 ret
26085
26086 CFI_RESTORE_STATE
26087diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
26088index c9f2d9b..e7fd2c0 100644
26089--- a/arch/x86/lib/mmx_32.c
26090+++ b/arch/x86/lib/mmx_32.c
26091@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
26092 {
26093 void *p;
26094 int i;
26095+ unsigned long cr0;
26096
26097 if (unlikely(in_interrupt()))
26098 return __memcpy(to, from, len);
26099@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
26100 kernel_fpu_begin();
26101
26102 __asm__ __volatile__ (
26103- "1: prefetch (%0)\n" /* This set is 28 bytes */
26104- " prefetch 64(%0)\n"
26105- " prefetch 128(%0)\n"
26106- " prefetch 192(%0)\n"
26107- " prefetch 256(%0)\n"
26108+ "1: prefetch (%1)\n" /* This set is 28 bytes */
26109+ " prefetch 64(%1)\n"
26110+ " prefetch 128(%1)\n"
26111+ " prefetch 192(%1)\n"
26112+ " prefetch 256(%1)\n"
26113 "2: \n"
26114 ".section .fixup, \"ax\"\n"
26115- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
26116+ "3: \n"
26117+
26118+#ifdef CONFIG_PAX_KERNEXEC
26119+ " movl %%cr0, %0\n"
26120+ " movl %0, %%eax\n"
26121+ " andl $0xFFFEFFFF, %%eax\n"
26122+ " movl %%eax, %%cr0\n"
26123+#endif
26124+
26125+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
26126+
26127+#ifdef CONFIG_PAX_KERNEXEC
26128+ " movl %0, %%cr0\n"
26129+#endif
26130+
26131 " jmp 2b\n"
26132 ".previous\n"
26133 _ASM_EXTABLE(1b, 3b)
26134- : : "r" (from));
26135+ : "=&r" (cr0) : "r" (from) : "ax");
26136
26137 for ( ; i > 5; i--) {
26138 __asm__ __volatile__ (
26139- "1: prefetch 320(%0)\n"
26140- "2: movq (%0), %%mm0\n"
26141- " movq 8(%0), %%mm1\n"
26142- " movq 16(%0), %%mm2\n"
26143- " movq 24(%0), %%mm3\n"
26144- " movq %%mm0, (%1)\n"
26145- " movq %%mm1, 8(%1)\n"
26146- " movq %%mm2, 16(%1)\n"
26147- " movq %%mm3, 24(%1)\n"
26148- " movq 32(%0), %%mm0\n"
26149- " movq 40(%0), %%mm1\n"
26150- " movq 48(%0), %%mm2\n"
26151- " movq 56(%0), %%mm3\n"
26152- " movq %%mm0, 32(%1)\n"
26153- " movq %%mm1, 40(%1)\n"
26154- " movq %%mm2, 48(%1)\n"
26155- " movq %%mm3, 56(%1)\n"
26156+ "1: prefetch 320(%1)\n"
26157+ "2: movq (%1), %%mm0\n"
26158+ " movq 8(%1), %%mm1\n"
26159+ " movq 16(%1), %%mm2\n"
26160+ " movq 24(%1), %%mm3\n"
26161+ " movq %%mm0, (%2)\n"
26162+ " movq %%mm1, 8(%2)\n"
26163+ " movq %%mm2, 16(%2)\n"
26164+ " movq %%mm3, 24(%2)\n"
26165+ " movq 32(%1), %%mm0\n"
26166+ " movq 40(%1), %%mm1\n"
26167+ " movq 48(%1), %%mm2\n"
26168+ " movq 56(%1), %%mm3\n"
26169+ " movq %%mm0, 32(%2)\n"
26170+ " movq %%mm1, 40(%2)\n"
26171+ " movq %%mm2, 48(%2)\n"
26172+ " movq %%mm3, 56(%2)\n"
26173 ".section .fixup, \"ax\"\n"
26174- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
26175+ "3:\n"
26176+
26177+#ifdef CONFIG_PAX_KERNEXEC
26178+ " movl %%cr0, %0\n"
26179+ " movl %0, %%eax\n"
26180+ " andl $0xFFFEFFFF, %%eax\n"
26181+ " movl %%eax, %%cr0\n"
26182+#endif
26183+
26184+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
26185+
26186+#ifdef CONFIG_PAX_KERNEXEC
26187+ " movl %0, %%cr0\n"
26188+#endif
26189+
26190 " jmp 2b\n"
26191 ".previous\n"
26192 _ASM_EXTABLE(1b, 3b)
26193- : : "r" (from), "r" (to) : "memory");
26194+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
26195
26196 from += 64;
26197 to += 64;
26198@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
26199 static void fast_copy_page(void *to, void *from)
26200 {
26201 int i;
26202+ unsigned long cr0;
26203
26204 kernel_fpu_begin();
26205
26206@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
26207 * but that is for later. -AV
26208 */
26209 __asm__ __volatile__(
26210- "1: prefetch (%0)\n"
26211- " prefetch 64(%0)\n"
26212- " prefetch 128(%0)\n"
26213- " prefetch 192(%0)\n"
26214- " prefetch 256(%0)\n"
26215+ "1: prefetch (%1)\n"
26216+ " prefetch 64(%1)\n"
26217+ " prefetch 128(%1)\n"
26218+ " prefetch 192(%1)\n"
26219+ " prefetch 256(%1)\n"
26220 "2: \n"
26221 ".section .fixup, \"ax\"\n"
26222- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
26223+ "3: \n"
26224+
26225+#ifdef CONFIG_PAX_KERNEXEC
26226+ " movl %%cr0, %0\n"
26227+ " movl %0, %%eax\n"
26228+ " andl $0xFFFEFFFF, %%eax\n"
26229+ " movl %%eax, %%cr0\n"
26230+#endif
26231+
26232+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
26233+
26234+#ifdef CONFIG_PAX_KERNEXEC
26235+ " movl %0, %%cr0\n"
26236+#endif
26237+
26238 " jmp 2b\n"
26239 ".previous\n"
26240- _ASM_EXTABLE(1b, 3b) : : "r" (from));
26241+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
26242
26243 for (i = 0; i < (4096-320)/64; i++) {
26244 __asm__ __volatile__ (
26245- "1: prefetch 320(%0)\n"
26246- "2: movq (%0), %%mm0\n"
26247- " movntq %%mm0, (%1)\n"
26248- " movq 8(%0), %%mm1\n"
26249- " movntq %%mm1, 8(%1)\n"
26250- " movq 16(%0), %%mm2\n"
26251- " movntq %%mm2, 16(%1)\n"
26252- " movq 24(%0), %%mm3\n"
26253- " movntq %%mm3, 24(%1)\n"
26254- " movq 32(%0), %%mm4\n"
26255- " movntq %%mm4, 32(%1)\n"
26256- " movq 40(%0), %%mm5\n"
26257- " movntq %%mm5, 40(%1)\n"
26258- " movq 48(%0), %%mm6\n"
26259- " movntq %%mm6, 48(%1)\n"
26260- " movq 56(%0), %%mm7\n"
26261- " movntq %%mm7, 56(%1)\n"
26262+ "1: prefetch 320(%1)\n"
26263+ "2: movq (%1), %%mm0\n"
26264+ " movntq %%mm0, (%2)\n"
26265+ " movq 8(%1), %%mm1\n"
26266+ " movntq %%mm1, 8(%2)\n"
26267+ " movq 16(%1), %%mm2\n"
26268+ " movntq %%mm2, 16(%2)\n"
26269+ " movq 24(%1), %%mm3\n"
26270+ " movntq %%mm3, 24(%2)\n"
26271+ " movq 32(%1), %%mm4\n"
26272+ " movntq %%mm4, 32(%2)\n"
26273+ " movq 40(%1), %%mm5\n"
26274+ " movntq %%mm5, 40(%2)\n"
26275+ " movq 48(%1), %%mm6\n"
26276+ " movntq %%mm6, 48(%2)\n"
26277+ " movq 56(%1), %%mm7\n"
26278+ " movntq %%mm7, 56(%2)\n"
26279 ".section .fixup, \"ax\"\n"
26280- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
26281+ "3:\n"
26282+
26283+#ifdef CONFIG_PAX_KERNEXEC
26284+ " movl %%cr0, %0\n"
26285+ " movl %0, %%eax\n"
26286+ " andl $0xFFFEFFFF, %%eax\n"
26287+ " movl %%eax, %%cr0\n"
26288+#endif
26289+
26290+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
26291+
26292+#ifdef CONFIG_PAX_KERNEXEC
26293+ " movl %0, %%cr0\n"
26294+#endif
26295+
26296 " jmp 2b\n"
26297 ".previous\n"
26298- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
26299+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
26300
26301 from += 64;
26302 to += 64;
26303@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
26304 static void fast_copy_page(void *to, void *from)
26305 {
26306 int i;
26307+ unsigned long cr0;
26308
26309 kernel_fpu_begin();
26310
26311 __asm__ __volatile__ (
26312- "1: prefetch (%0)\n"
26313- " prefetch 64(%0)\n"
26314- " prefetch 128(%0)\n"
26315- " prefetch 192(%0)\n"
26316- " prefetch 256(%0)\n"
26317+ "1: prefetch (%1)\n"
26318+ " prefetch 64(%1)\n"
26319+ " prefetch 128(%1)\n"
26320+ " prefetch 192(%1)\n"
26321+ " prefetch 256(%1)\n"
26322 "2: \n"
26323 ".section .fixup, \"ax\"\n"
26324- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
26325+ "3: \n"
26326+
26327+#ifdef CONFIG_PAX_KERNEXEC
26328+ " movl %%cr0, %0\n"
26329+ " movl %0, %%eax\n"
26330+ " andl $0xFFFEFFFF, %%eax\n"
26331+ " movl %%eax, %%cr0\n"
26332+#endif
26333+
26334+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
26335+
26336+#ifdef CONFIG_PAX_KERNEXEC
26337+ " movl %0, %%cr0\n"
26338+#endif
26339+
26340 " jmp 2b\n"
26341 ".previous\n"
26342- _ASM_EXTABLE(1b, 3b) : : "r" (from));
26343+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
26344
26345 for (i = 0; i < 4096/64; i++) {
26346 __asm__ __volatile__ (
26347- "1: prefetch 320(%0)\n"
26348- "2: movq (%0), %%mm0\n"
26349- " movq 8(%0), %%mm1\n"
26350- " movq 16(%0), %%mm2\n"
26351- " movq 24(%0), %%mm3\n"
26352- " movq %%mm0, (%1)\n"
26353- " movq %%mm1, 8(%1)\n"
26354- " movq %%mm2, 16(%1)\n"
26355- " movq %%mm3, 24(%1)\n"
26356- " movq 32(%0), %%mm0\n"
26357- " movq 40(%0), %%mm1\n"
26358- " movq 48(%0), %%mm2\n"
26359- " movq 56(%0), %%mm3\n"
26360- " movq %%mm0, 32(%1)\n"
26361- " movq %%mm1, 40(%1)\n"
26362- " movq %%mm2, 48(%1)\n"
26363- " movq %%mm3, 56(%1)\n"
26364+ "1: prefetch 320(%1)\n"
26365+ "2: movq (%1), %%mm0\n"
26366+ " movq 8(%1), %%mm1\n"
26367+ " movq 16(%1), %%mm2\n"
26368+ " movq 24(%1), %%mm3\n"
26369+ " movq %%mm0, (%2)\n"
26370+ " movq %%mm1, 8(%2)\n"
26371+ " movq %%mm2, 16(%2)\n"
26372+ " movq %%mm3, 24(%2)\n"
26373+ " movq 32(%1), %%mm0\n"
26374+ " movq 40(%1), %%mm1\n"
26375+ " movq 48(%1), %%mm2\n"
26376+ " movq 56(%1), %%mm3\n"
26377+ " movq %%mm0, 32(%2)\n"
26378+ " movq %%mm1, 40(%2)\n"
26379+ " movq %%mm2, 48(%2)\n"
26380+ " movq %%mm3, 56(%2)\n"
26381 ".section .fixup, \"ax\"\n"
26382- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
26383+ "3:\n"
26384+
26385+#ifdef CONFIG_PAX_KERNEXEC
26386+ " movl %%cr0, %0\n"
26387+ " movl %0, %%eax\n"
26388+ " andl $0xFFFEFFFF, %%eax\n"
26389+ " movl %%eax, %%cr0\n"
26390+#endif
26391+
26392+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
26393+
26394+#ifdef CONFIG_PAX_KERNEXEC
26395+ " movl %0, %%cr0\n"
26396+#endif
26397+
26398 " jmp 2b\n"
26399 ".previous\n"
26400 _ASM_EXTABLE(1b, 3b)
26401- : : "r" (from), "r" (to) : "memory");
26402+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
26403
26404 from += 64;
26405 to += 64;
26406diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
26407index f6d13ee..aca5f0b 100644
26408--- a/arch/x86/lib/msr-reg.S
26409+++ b/arch/x86/lib/msr-reg.S
26410@@ -3,6 +3,7 @@
26411 #include <asm/dwarf2.h>
26412 #include <asm/asm.h>
26413 #include <asm/msr.h>
26414+#include <asm/alternative-asm.h>
26415
26416 #ifdef CONFIG_X86_64
26417 /*
26418@@ -16,7 +17,7 @@ ENTRY(\op\()_safe_regs)
26419 CFI_STARTPROC
26420 pushq_cfi %rbx
26421 pushq_cfi %rbp
26422- movq %rdi, %r10 /* Save pointer */
26423+ movq %rdi, %r9 /* Save pointer */
26424 xorl %r11d, %r11d /* Return value */
26425 movl (%rdi), %eax
26426 movl 4(%rdi), %ecx
26427@@ -27,16 +28,17 @@ ENTRY(\op\()_safe_regs)
26428 movl 28(%rdi), %edi
26429 CFI_REMEMBER_STATE
26430 1: \op
26431-2: movl %eax, (%r10)
26432+2: movl %eax, (%r9)
26433 movl %r11d, %eax /* Return value */
26434- movl %ecx, 4(%r10)
26435- movl %edx, 8(%r10)
26436- movl %ebx, 12(%r10)
26437- movl %ebp, 20(%r10)
26438- movl %esi, 24(%r10)
26439- movl %edi, 28(%r10)
26440+ movl %ecx, 4(%r9)
26441+ movl %edx, 8(%r9)
26442+ movl %ebx, 12(%r9)
26443+ movl %ebp, 20(%r9)
26444+ movl %esi, 24(%r9)
26445+ movl %edi, 28(%r9)
26446 popq_cfi %rbp
26447 popq_cfi %rbx
26448+ pax_force_retaddr
26449 ret
26450 3:
26451 CFI_RESTORE_STATE
26452diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
26453index fc6ba17..04471c5 100644
26454--- a/arch/x86/lib/putuser.S
26455+++ b/arch/x86/lib/putuser.S
26456@@ -16,7 +16,9 @@
26457 #include <asm/errno.h>
26458 #include <asm/asm.h>
26459 #include <asm/smap.h>
26460-
26461+#include <asm/segment.h>
26462+#include <asm/pgtable.h>
26463+#include <asm/alternative-asm.h>
26464
26465 /*
26466 * __put_user_X
26467@@ -30,57 +32,125 @@
26468 * as they get called from within inline assembly.
26469 */
26470
26471-#define ENTER CFI_STARTPROC ; \
26472- GET_THREAD_INFO(%_ASM_BX)
26473-#define EXIT ASM_CLAC ; \
26474- ret ; \
26475+#define ENTER CFI_STARTPROC
26476+#define EXIT ASM_CLAC ; \
26477+ pax_force_retaddr ; \
26478+ ret ; \
26479 CFI_ENDPROC
26480
26481+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
26482+#define _DEST %_ASM_CX,%_ASM_BX
26483+#else
26484+#define _DEST %_ASM_CX
26485+#endif
26486+
26487+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
26488+#define __copyuser_seg gs;
26489+#else
26490+#define __copyuser_seg
26491+#endif
26492+
26493 .text
26494 ENTRY(__put_user_1)
26495 ENTER
26496+
26497+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
26498+ GET_THREAD_INFO(%_ASM_BX)
26499 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
26500 jae bad_put_user
26501 ASM_STAC
26502-1: movb %al,(%_ASM_CX)
26503+
26504+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
26505+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
26506+ cmp %_ASM_BX,%_ASM_CX
26507+ jb 1234f
26508+ xor %ebx,%ebx
26509+1234:
26510+#endif
26511+
26512+#endif
26513+
26514+1: __copyuser_seg movb %al,(_DEST)
26515 xor %eax,%eax
26516 EXIT
26517 ENDPROC(__put_user_1)
26518
26519 ENTRY(__put_user_2)
26520 ENTER
26521+
26522+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
26523+ GET_THREAD_INFO(%_ASM_BX)
26524 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
26525 sub $1,%_ASM_BX
26526 cmp %_ASM_BX,%_ASM_CX
26527 jae bad_put_user
26528 ASM_STAC
26529-2: movw %ax,(%_ASM_CX)
26530+
26531+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
26532+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
26533+ cmp %_ASM_BX,%_ASM_CX
26534+ jb 1234f
26535+ xor %ebx,%ebx
26536+1234:
26537+#endif
26538+
26539+#endif
26540+
26541+2: __copyuser_seg movw %ax,(_DEST)
26542 xor %eax,%eax
26543 EXIT
26544 ENDPROC(__put_user_2)
26545
26546 ENTRY(__put_user_4)
26547 ENTER
26548+
26549+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
26550+ GET_THREAD_INFO(%_ASM_BX)
26551 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
26552 sub $3,%_ASM_BX
26553 cmp %_ASM_BX,%_ASM_CX
26554 jae bad_put_user
26555 ASM_STAC
26556-3: movl %eax,(%_ASM_CX)
26557+
26558+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
26559+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
26560+ cmp %_ASM_BX,%_ASM_CX
26561+ jb 1234f
26562+ xor %ebx,%ebx
26563+1234:
26564+#endif
26565+
26566+#endif
26567+
26568+3: __copyuser_seg movl %eax,(_DEST)
26569 xor %eax,%eax
26570 EXIT
26571 ENDPROC(__put_user_4)
26572
26573 ENTRY(__put_user_8)
26574 ENTER
26575+
26576+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
26577+ GET_THREAD_INFO(%_ASM_BX)
26578 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
26579 sub $7,%_ASM_BX
26580 cmp %_ASM_BX,%_ASM_CX
26581 jae bad_put_user
26582 ASM_STAC
26583-4: mov %_ASM_AX,(%_ASM_CX)
26584+
26585+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
26586+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
26587+ cmp %_ASM_BX,%_ASM_CX
26588+ jb 1234f
26589+ xor %ebx,%ebx
26590+1234:
26591+#endif
26592+
26593+#endif
26594+
26595+4: __copyuser_seg mov %_ASM_AX,(_DEST)
26596 #ifdef CONFIG_X86_32
26597-5: movl %edx,4(%_ASM_CX)
26598+5: __copyuser_seg movl %edx,4(_DEST)
26599 #endif
26600 xor %eax,%eax
26601 EXIT
26602diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
26603index 1cad221..de671ee 100644
26604--- a/arch/x86/lib/rwlock.S
26605+++ b/arch/x86/lib/rwlock.S
26606@@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
26607 FRAME
26608 0: LOCK_PREFIX
26609 WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
26610+
26611+#ifdef CONFIG_PAX_REFCOUNT
26612+ jno 1234f
26613+ LOCK_PREFIX
26614+ WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
26615+ int $4
26616+1234:
26617+ _ASM_EXTABLE(1234b, 1234b)
26618+#endif
26619+
26620 1: rep; nop
26621 cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
26622 jne 1b
26623 LOCK_PREFIX
26624 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
26625+
26626+#ifdef CONFIG_PAX_REFCOUNT
26627+ jno 1234f
26628+ LOCK_PREFIX
26629+ WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
26630+ int $4
26631+1234:
26632+ _ASM_EXTABLE(1234b, 1234b)
26633+#endif
26634+
26635 jnz 0b
26636 ENDFRAME
26637+ pax_force_retaddr
26638 ret
26639 CFI_ENDPROC
26640 END(__write_lock_failed)
26641@@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
26642 FRAME
26643 0: LOCK_PREFIX
26644 READ_LOCK_SIZE(inc) (%__lock_ptr)
26645+
26646+#ifdef CONFIG_PAX_REFCOUNT
26647+ jno 1234f
26648+ LOCK_PREFIX
26649+ READ_LOCK_SIZE(dec) (%__lock_ptr)
26650+ int $4
26651+1234:
26652+ _ASM_EXTABLE(1234b, 1234b)
26653+#endif
26654+
26655 1: rep; nop
26656 READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
26657 js 1b
26658 LOCK_PREFIX
26659 READ_LOCK_SIZE(dec) (%__lock_ptr)
26660+
26661+#ifdef CONFIG_PAX_REFCOUNT
26662+ jno 1234f
26663+ LOCK_PREFIX
26664+ READ_LOCK_SIZE(inc) (%__lock_ptr)
26665+ int $4
26666+1234:
26667+ _ASM_EXTABLE(1234b, 1234b)
26668+#endif
26669+
26670 js 0b
26671 ENDFRAME
26672+ pax_force_retaddr
26673 ret
26674 CFI_ENDPROC
26675 END(__read_lock_failed)
26676diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
26677index 5dff5f0..cadebf4 100644
26678--- a/arch/x86/lib/rwsem.S
26679+++ b/arch/x86/lib/rwsem.S
26680@@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
26681 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
26682 CFI_RESTORE __ASM_REG(dx)
26683 restore_common_regs
26684+ pax_force_retaddr
26685 ret
26686 CFI_ENDPROC
26687 ENDPROC(call_rwsem_down_read_failed)
26688@@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
26689 movq %rax,%rdi
26690 call rwsem_down_write_failed
26691 restore_common_regs
26692+ pax_force_retaddr
26693 ret
26694 CFI_ENDPROC
26695 ENDPROC(call_rwsem_down_write_failed)
26696@@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
26697 movq %rax,%rdi
26698 call rwsem_wake
26699 restore_common_regs
26700-1: ret
26701+1: pax_force_retaddr
26702+ ret
26703 CFI_ENDPROC
26704 ENDPROC(call_rwsem_wake)
26705
26706@@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
26707 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
26708 CFI_RESTORE __ASM_REG(dx)
26709 restore_common_regs
26710+ pax_force_retaddr
26711 ret
26712 CFI_ENDPROC
26713 ENDPROC(call_rwsem_downgrade_wake)
26714diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
26715index a63efd6..ccecad8 100644
26716--- a/arch/x86/lib/thunk_64.S
26717+++ b/arch/x86/lib/thunk_64.S
26718@@ -8,6 +8,7 @@
26719 #include <linux/linkage.h>
26720 #include <asm/dwarf2.h>
26721 #include <asm/calling.h>
26722+#include <asm/alternative-asm.h>
26723
26724 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
26725 .macro THUNK name, func, put_ret_addr_in_rdi=0
26726@@ -41,5 +42,6 @@
26727 SAVE_ARGS
26728 restore:
26729 RESTORE_ARGS
26730+ pax_force_retaddr
26731 ret
26732 CFI_ENDPROC
26733diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
26734index f0312d7..9c39d63 100644
26735--- a/arch/x86/lib/usercopy_32.c
26736+++ b/arch/x86/lib/usercopy_32.c
26737@@ -42,11 +42,13 @@ do { \
26738 int __d0; \
26739 might_fault(); \
26740 __asm__ __volatile__( \
26741+ __COPYUSER_SET_ES \
26742 ASM_STAC "\n" \
26743 "0: rep; stosl\n" \
26744 " movl %2,%0\n" \
26745 "1: rep; stosb\n" \
26746 "2: " ASM_CLAC "\n" \
26747+ __COPYUSER_RESTORE_ES \
26748 ".section .fixup,\"ax\"\n" \
26749 "3: lea 0(%2,%0,4),%0\n" \
26750 " jmp 2b\n" \
26751@@ -98,7 +100,7 @@ EXPORT_SYMBOL(__clear_user);
26752
26753 #ifdef CONFIG_X86_INTEL_USERCOPY
26754 static unsigned long
26755-__copy_user_intel(void __user *to, const void *from, unsigned long size)
26756+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
26757 {
26758 int d0, d1;
26759 __asm__ __volatile__(
26760@@ -110,36 +112,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
26761 " .align 2,0x90\n"
26762 "3: movl 0(%4), %%eax\n"
26763 "4: movl 4(%4), %%edx\n"
26764- "5: movl %%eax, 0(%3)\n"
26765- "6: movl %%edx, 4(%3)\n"
26766+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
26767+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
26768 "7: movl 8(%4), %%eax\n"
26769 "8: movl 12(%4),%%edx\n"
26770- "9: movl %%eax, 8(%3)\n"
26771- "10: movl %%edx, 12(%3)\n"
26772+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
26773+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
26774 "11: movl 16(%4), %%eax\n"
26775 "12: movl 20(%4), %%edx\n"
26776- "13: movl %%eax, 16(%3)\n"
26777- "14: movl %%edx, 20(%3)\n"
26778+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
26779+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
26780 "15: movl 24(%4), %%eax\n"
26781 "16: movl 28(%4), %%edx\n"
26782- "17: movl %%eax, 24(%3)\n"
26783- "18: movl %%edx, 28(%3)\n"
26784+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
26785+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
26786 "19: movl 32(%4), %%eax\n"
26787 "20: movl 36(%4), %%edx\n"
26788- "21: movl %%eax, 32(%3)\n"
26789- "22: movl %%edx, 36(%3)\n"
26790+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
26791+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
26792 "23: movl 40(%4), %%eax\n"
26793 "24: movl 44(%4), %%edx\n"
26794- "25: movl %%eax, 40(%3)\n"
26795- "26: movl %%edx, 44(%3)\n"
26796+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
26797+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
26798 "27: movl 48(%4), %%eax\n"
26799 "28: movl 52(%4), %%edx\n"
26800- "29: movl %%eax, 48(%3)\n"
26801- "30: movl %%edx, 52(%3)\n"
26802+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
26803+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
26804 "31: movl 56(%4), %%eax\n"
26805 "32: movl 60(%4), %%edx\n"
26806- "33: movl %%eax, 56(%3)\n"
26807- "34: movl %%edx, 60(%3)\n"
26808+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
26809+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
26810 " addl $-64, %0\n"
26811 " addl $64, %4\n"
26812 " addl $64, %3\n"
26813@@ -149,10 +151,12 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
26814 " shrl $2, %0\n"
26815 " andl $3, %%eax\n"
26816 " cld\n"
26817+ __COPYUSER_SET_ES
26818 "99: rep; movsl\n"
26819 "36: movl %%eax, %0\n"
26820 "37: rep; movsb\n"
26821 "100:\n"
26822+ __COPYUSER_RESTORE_ES
26823 ".section .fixup,\"ax\"\n"
26824 "101: lea 0(%%eax,%0,4),%0\n"
26825 " jmp 100b\n"
26826@@ -202,46 +206,150 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
26827 }
26828
26829 static unsigned long
26830+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
26831+{
26832+ int d0, d1;
26833+ __asm__ __volatile__(
26834+ " .align 2,0x90\n"
26835+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
26836+ " cmpl $67, %0\n"
26837+ " jbe 3f\n"
26838+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
26839+ " .align 2,0x90\n"
26840+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
26841+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
26842+ "5: movl %%eax, 0(%3)\n"
26843+ "6: movl %%edx, 4(%3)\n"
26844+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
26845+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
26846+ "9: movl %%eax, 8(%3)\n"
26847+ "10: movl %%edx, 12(%3)\n"
26848+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
26849+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
26850+ "13: movl %%eax, 16(%3)\n"
26851+ "14: movl %%edx, 20(%3)\n"
26852+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
26853+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
26854+ "17: movl %%eax, 24(%3)\n"
26855+ "18: movl %%edx, 28(%3)\n"
26856+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
26857+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
26858+ "21: movl %%eax, 32(%3)\n"
26859+ "22: movl %%edx, 36(%3)\n"
26860+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
26861+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
26862+ "25: movl %%eax, 40(%3)\n"
26863+ "26: movl %%edx, 44(%3)\n"
26864+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
26865+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
26866+ "29: movl %%eax, 48(%3)\n"
26867+ "30: movl %%edx, 52(%3)\n"
26868+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
26869+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
26870+ "33: movl %%eax, 56(%3)\n"
26871+ "34: movl %%edx, 60(%3)\n"
26872+ " addl $-64, %0\n"
26873+ " addl $64, %4\n"
26874+ " addl $64, %3\n"
26875+ " cmpl $63, %0\n"
26876+ " ja 1b\n"
26877+ "35: movl %0, %%eax\n"
26878+ " shrl $2, %0\n"
26879+ " andl $3, %%eax\n"
26880+ " cld\n"
26881+ "99: rep; "__copyuser_seg" movsl\n"
26882+ "36: movl %%eax, %0\n"
26883+ "37: rep; "__copyuser_seg" movsb\n"
26884+ "100:\n"
26885+ ".section .fixup,\"ax\"\n"
26886+ "101: lea 0(%%eax,%0,4),%0\n"
26887+ " jmp 100b\n"
26888+ ".previous\n"
26889+ _ASM_EXTABLE(1b,100b)
26890+ _ASM_EXTABLE(2b,100b)
26891+ _ASM_EXTABLE(3b,100b)
26892+ _ASM_EXTABLE(4b,100b)
26893+ _ASM_EXTABLE(5b,100b)
26894+ _ASM_EXTABLE(6b,100b)
26895+ _ASM_EXTABLE(7b,100b)
26896+ _ASM_EXTABLE(8b,100b)
26897+ _ASM_EXTABLE(9b,100b)
26898+ _ASM_EXTABLE(10b,100b)
26899+ _ASM_EXTABLE(11b,100b)
26900+ _ASM_EXTABLE(12b,100b)
26901+ _ASM_EXTABLE(13b,100b)
26902+ _ASM_EXTABLE(14b,100b)
26903+ _ASM_EXTABLE(15b,100b)
26904+ _ASM_EXTABLE(16b,100b)
26905+ _ASM_EXTABLE(17b,100b)
26906+ _ASM_EXTABLE(18b,100b)
26907+ _ASM_EXTABLE(19b,100b)
26908+ _ASM_EXTABLE(20b,100b)
26909+ _ASM_EXTABLE(21b,100b)
26910+ _ASM_EXTABLE(22b,100b)
26911+ _ASM_EXTABLE(23b,100b)
26912+ _ASM_EXTABLE(24b,100b)
26913+ _ASM_EXTABLE(25b,100b)
26914+ _ASM_EXTABLE(26b,100b)
26915+ _ASM_EXTABLE(27b,100b)
26916+ _ASM_EXTABLE(28b,100b)
26917+ _ASM_EXTABLE(29b,100b)
26918+ _ASM_EXTABLE(30b,100b)
26919+ _ASM_EXTABLE(31b,100b)
26920+ _ASM_EXTABLE(32b,100b)
26921+ _ASM_EXTABLE(33b,100b)
26922+ _ASM_EXTABLE(34b,100b)
26923+ _ASM_EXTABLE(35b,100b)
26924+ _ASM_EXTABLE(36b,100b)
26925+ _ASM_EXTABLE(37b,100b)
26926+ _ASM_EXTABLE(99b,101b)
26927+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
26928+ : "1"(to), "2"(from), "0"(size)
26929+ : "eax", "edx", "memory");
26930+ return size;
26931+}
26932+
26933+static unsigned long __size_overflow(3)
26934 __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
26935 {
26936 int d0, d1;
26937 __asm__ __volatile__(
26938 " .align 2,0x90\n"
26939- "0: movl 32(%4), %%eax\n"
26940+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
26941 " cmpl $67, %0\n"
26942 " jbe 2f\n"
26943- "1: movl 64(%4), %%eax\n"
26944+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
26945 " .align 2,0x90\n"
26946- "2: movl 0(%4), %%eax\n"
26947- "21: movl 4(%4), %%edx\n"
26948+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
26949+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
26950 " movl %%eax, 0(%3)\n"
26951 " movl %%edx, 4(%3)\n"
26952- "3: movl 8(%4), %%eax\n"
26953- "31: movl 12(%4),%%edx\n"
26954+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
26955+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
26956 " movl %%eax, 8(%3)\n"
26957 " movl %%edx, 12(%3)\n"
26958- "4: movl 16(%4), %%eax\n"
26959- "41: movl 20(%4), %%edx\n"
26960+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
26961+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
26962 " movl %%eax, 16(%3)\n"
26963 " movl %%edx, 20(%3)\n"
26964- "10: movl 24(%4), %%eax\n"
26965- "51: movl 28(%4), %%edx\n"
26966+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
26967+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
26968 " movl %%eax, 24(%3)\n"
26969 " movl %%edx, 28(%3)\n"
26970- "11: movl 32(%4), %%eax\n"
26971- "61: movl 36(%4), %%edx\n"
26972+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
26973+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
26974 " movl %%eax, 32(%3)\n"
26975 " movl %%edx, 36(%3)\n"
26976- "12: movl 40(%4), %%eax\n"
26977- "71: movl 44(%4), %%edx\n"
26978+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
26979+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
26980 " movl %%eax, 40(%3)\n"
26981 " movl %%edx, 44(%3)\n"
26982- "13: movl 48(%4), %%eax\n"
26983- "81: movl 52(%4), %%edx\n"
26984+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
26985+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
26986 " movl %%eax, 48(%3)\n"
26987 " movl %%edx, 52(%3)\n"
26988- "14: movl 56(%4), %%eax\n"
26989- "91: movl 60(%4), %%edx\n"
26990+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
26991+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
26992 " movl %%eax, 56(%3)\n"
26993 " movl %%edx, 60(%3)\n"
26994 " addl $-64, %0\n"
26995@@ -253,9 +361,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
26996 " shrl $2, %0\n"
26997 " andl $3, %%eax\n"
26998 " cld\n"
26999- "6: rep; movsl\n"
27000+ "6: rep; "__copyuser_seg" movsl\n"
27001 " movl %%eax,%0\n"
27002- "7: rep; movsb\n"
27003+ "7: rep; "__copyuser_seg" movsb\n"
27004 "8:\n"
27005 ".section .fixup,\"ax\"\n"
27006 "9: lea 0(%%eax,%0,4),%0\n"
27007@@ -298,48 +406,48 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
27008 * hyoshiok@miraclelinux.com
27009 */
27010
27011-static unsigned long __copy_user_zeroing_intel_nocache(void *to,
27012+static unsigned long __size_overflow(3) __copy_user_zeroing_intel_nocache(void *to,
27013 const void __user *from, unsigned long size)
27014 {
27015 int d0, d1;
27016
27017 __asm__ __volatile__(
27018 " .align 2,0x90\n"
27019- "0: movl 32(%4), %%eax\n"
27020+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
27021 " cmpl $67, %0\n"
27022 " jbe 2f\n"
27023- "1: movl 64(%4), %%eax\n"
27024+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
27025 " .align 2,0x90\n"
27026- "2: movl 0(%4), %%eax\n"
27027- "21: movl 4(%4), %%edx\n"
27028+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
27029+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
27030 " movnti %%eax, 0(%3)\n"
27031 " movnti %%edx, 4(%3)\n"
27032- "3: movl 8(%4), %%eax\n"
27033- "31: movl 12(%4),%%edx\n"
27034+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
27035+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
27036 " movnti %%eax, 8(%3)\n"
27037 " movnti %%edx, 12(%3)\n"
27038- "4: movl 16(%4), %%eax\n"
27039- "41: movl 20(%4), %%edx\n"
27040+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
27041+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
27042 " movnti %%eax, 16(%3)\n"
27043 " movnti %%edx, 20(%3)\n"
27044- "10: movl 24(%4), %%eax\n"
27045- "51: movl 28(%4), %%edx\n"
27046+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
27047+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
27048 " movnti %%eax, 24(%3)\n"
27049 " movnti %%edx, 28(%3)\n"
27050- "11: movl 32(%4), %%eax\n"
27051- "61: movl 36(%4), %%edx\n"
27052+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
27053+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
27054 " movnti %%eax, 32(%3)\n"
27055 " movnti %%edx, 36(%3)\n"
27056- "12: movl 40(%4), %%eax\n"
27057- "71: movl 44(%4), %%edx\n"
27058+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
27059+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
27060 " movnti %%eax, 40(%3)\n"
27061 " movnti %%edx, 44(%3)\n"
27062- "13: movl 48(%4), %%eax\n"
27063- "81: movl 52(%4), %%edx\n"
27064+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
27065+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
27066 " movnti %%eax, 48(%3)\n"
27067 " movnti %%edx, 52(%3)\n"
27068- "14: movl 56(%4), %%eax\n"
27069- "91: movl 60(%4), %%edx\n"
27070+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
27071+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
27072 " movnti %%eax, 56(%3)\n"
27073 " movnti %%edx, 60(%3)\n"
27074 " addl $-64, %0\n"
27075@@ -352,9 +460,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
27076 " shrl $2, %0\n"
27077 " andl $3, %%eax\n"
27078 " cld\n"
27079- "6: rep; movsl\n"
27080+ "6: rep; "__copyuser_seg" movsl\n"
27081 " movl %%eax,%0\n"
27082- "7: rep; movsb\n"
27083+ "7: rep; "__copyuser_seg" movsb\n"
27084 "8:\n"
27085 ".section .fixup,\"ax\"\n"
27086 "9: lea 0(%%eax,%0,4),%0\n"
27087@@ -392,48 +500,48 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
27088 return size;
27089 }
27090
27091-static unsigned long __copy_user_intel_nocache(void *to,
27092+static unsigned long __size_overflow(3) __copy_user_intel_nocache(void *to,
27093 const void __user *from, unsigned long size)
27094 {
27095 int d0, d1;
27096
27097 __asm__ __volatile__(
27098 " .align 2,0x90\n"
27099- "0: movl 32(%4), %%eax\n"
27100+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
27101 " cmpl $67, %0\n"
27102 " jbe 2f\n"
27103- "1: movl 64(%4), %%eax\n"
27104+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
27105 " .align 2,0x90\n"
27106- "2: movl 0(%4), %%eax\n"
27107- "21: movl 4(%4), %%edx\n"
27108+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
27109+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
27110 " movnti %%eax, 0(%3)\n"
27111 " movnti %%edx, 4(%3)\n"
27112- "3: movl 8(%4), %%eax\n"
27113- "31: movl 12(%4),%%edx\n"
27114+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
27115+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
27116 " movnti %%eax, 8(%3)\n"
27117 " movnti %%edx, 12(%3)\n"
27118- "4: movl 16(%4), %%eax\n"
27119- "41: movl 20(%4), %%edx\n"
27120+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
27121+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
27122 " movnti %%eax, 16(%3)\n"
27123 " movnti %%edx, 20(%3)\n"
27124- "10: movl 24(%4), %%eax\n"
27125- "51: movl 28(%4), %%edx\n"
27126+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
27127+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
27128 " movnti %%eax, 24(%3)\n"
27129 " movnti %%edx, 28(%3)\n"
27130- "11: movl 32(%4), %%eax\n"
27131- "61: movl 36(%4), %%edx\n"
27132+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
27133+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
27134 " movnti %%eax, 32(%3)\n"
27135 " movnti %%edx, 36(%3)\n"
27136- "12: movl 40(%4), %%eax\n"
27137- "71: movl 44(%4), %%edx\n"
27138+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
27139+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
27140 " movnti %%eax, 40(%3)\n"
27141 " movnti %%edx, 44(%3)\n"
27142- "13: movl 48(%4), %%eax\n"
27143- "81: movl 52(%4), %%edx\n"
27144+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
27145+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
27146 " movnti %%eax, 48(%3)\n"
27147 " movnti %%edx, 52(%3)\n"
27148- "14: movl 56(%4), %%eax\n"
27149- "91: movl 60(%4), %%edx\n"
27150+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
27151+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
27152 " movnti %%eax, 56(%3)\n"
27153 " movnti %%edx, 60(%3)\n"
27154 " addl $-64, %0\n"
27155@@ -446,9 +554,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
27156 " shrl $2, %0\n"
27157 " andl $3, %%eax\n"
27158 " cld\n"
27159- "6: rep; movsl\n"
27160+ "6: rep; "__copyuser_seg" movsl\n"
27161 " movl %%eax,%0\n"
27162- "7: rep; movsb\n"
27163+ "7: rep; "__copyuser_seg" movsb\n"
27164 "8:\n"
27165 ".section .fixup,\"ax\"\n"
27166 "9: lea 0(%%eax,%0,4),%0\n"
27167@@ -488,32 +596,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
27168 */
27169 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
27170 unsigned long size);
27171-unsigned long __copy_user_intel(void __user *to, const void *from,
27172+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
27173+ unsigned long size);
27174+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
27175 unsigned long size);
27176 unsigned long __copy_user_zeroing_intel_nocache(void *to,
27177 const void __user *from, unsigned long size);
27178 #endif /* CONFIG_X86_INTEL_USERCOPY */
27179
27180 /* Generic arbitrary sized copy. */
27181-#define __copy_user(to, from, size) \
27182+#define __copy_user(to, from, size, prefix, set, restore) \
27183 do { \
27184 int __d0, __d1, __d2; \
27185 __asm__ __volatile__( \
27186+ set \
27187 " cmp $7,%0\n" \
27188 " jbe 1f\n" \
27189 " movl %1,%0\n" \
27190 " negl %0\n" \
27191 " andl $7,%0\n" \
27192 " subl %0,%3\n" \
27193- "4: rep; movsb\n" \
27194+ "4: rep; "prefix"movsb\n" \
27195 " movl %3,%0\n" \
27196 " shrl $2,%0\n" \
27197 " andl $3,%3\n" \
27198 " .align 2,0x90\n" \
27199- "0: rep; movsl\n" \
27200+ "0: rep; "prefix"movsl\n" \
27201 " movl %3,%0\n" \
27202- "1: rep; movsb\n" \
27203+ "1: rep; "prefix"movsb\n" \
27204 "2:\n" \
27205+ restore \
27206 ".section .fixup,\"ax\"\n" \
27207 "5: addl %3,%0\n" \
27208 " jmp 2b\n" \
27209@@ -538,14 +650,14 @@ do { \
27210 " negl %0\n" \
27211 " andl $7,%0\n" \
27212 " subl %0,%3\n" \
27213- "4: rep; movsb\n" \
27214+ "4: rep; "__copyuser_seg"movsb\n" \
27215 " movl %3,%0\n" \
27216 " shrl $2,%0\n" \
27217 " andl $3,%3\n" \
27218 " .align 2,0x90\n" \
27219- "0: rep; movsl\n" \
27220+ "0: rep; "__copyuser_seg"movsl\n" \
27221 " movl %3,%0\n" \
27222- "1: rep; movsb\n" \
27223+ "1: rep; "__copyuser_seg"movsb\n" \
27224 "2:\n" \
27225 ".section .fixup,\"ax\"\n" \
27226 "5: addl %3,%0\n" \
27227@@ -572,9 +684,9 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from,
27228 {
27229 stac();
27230 if (movsl_is_ok(to, from, n))
27231- __copy_user(to, from, n);
27232+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
27233 else
27234- n = __copy_user_intel(to, from, n);
27235+ n = __generic_copy_to_user_intel(to, from, n);
27236 clac();
27237 return n;
27238 }
27239@@ -598,10 +710,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
27240 {
27241 stac();
27242 if (movsl_is_ok(to, from, n))
27243- __copy_user(to, from, n);
27244+ __copy_user(to, from, n, __copyuser_seg, "", "");
27245 else
27246- n = __copy_user_intel((void __user *)to,
27247- (const void *)from, n);
27248+ n = __generic_copy_from_user_intel(to, from, n);
27249 clac();
27250 return n;
27251 }
27252@@ -632,66 +743,51 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
27253 if (n > 64 && cpu_has_xmm2)
27254 n = __copy_user_intel_nocache(to, from, n);
27255 else
27256- __copy_user(to, from, n);
27257+ __copy_user(to, from, n, __copyuser_seg, "", "");
27258 #else
27259- __copy_user(to, from, n);
27260+ __copy_user(to, from, n, __copyuser_seg, "", "");
27261 #endif
27262 clac();
27263 return n;
27264 }
27265 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
27266
27267-/**
27268- * copy_to_user: - Copy a block of data into user space.
27269- * @to: Destination address, in user space.
27270- * @from: Source address, in kernel space.
27271- * @n: Number of bytes to copy.
27272- *
27273- * Context: User context only. This function may sleep.
27274- *
27275- * Copy data from kernel space to user space.
27276- *
27277- * Returns number of bytes that could not be copied.
27278- * On success, this will be zero.
27279- */
27280-unsigned long
27281-copy_to_user(void __user *to, const void *from, unsigned long n)
27282-{
27283- if (access_ok(VERIFY_WRITE, to, n))
27284- n = __copy_to_user(to, from, n);
27285- return n;
27286-}
27287-EXPORT_SYMBOL(copy_to_user);
27288-
27289-/**
27290- * copy_from_user: - Copy a block of data from user space.
27291- * @to: Destination address, in kernel space.
27292- * @from: Source address, in user space.
27293- * @n: Number of bytes to copy.
27294- *
27295- * Context: User context only. This function may sleep.
27296- *
27297- * Copy data from user space to kernel space.
27298- *
27299- * Returns number of bytes that could not be copied.
27300- * On success, this will be zero.
27301- *
27302- * If some data could not be copied, this function will pad the copied
27303- * data to the requested size using zero bytes.
27304- */
27305-unsigned long
27306-_copy_from_user(void *to, const void __user *from, unsigned long n)
27307-{
27308- if (access_ok(VERIFY_READ, from, n))
27309- n = __copy_from_user(to, from, n);
27310- else
27311- memset(to, 0, n);
27312- return n;
27313-}
27314-EXPORT_SYMBOL(_copy_from_user);
27315-
27316 void copy_from_user_overflow(void)
27317 {
27318 WARN(1, "Buffer overflow detected!\n");
27319 }
27320 EXPORT_SYMBOL(copy_from_user_overflow);
27321+
27322+void copy_to_user_overflow(void)
27323+{
27324+ WARN(1, "Buffer overflow detected!\n");
27325+}
27326+EXPORT_SYMBOL(copy_to_user_overflow);
27327+
27328+#ifdef CONFIG_PAX_MEMORY_UDEREF
27329+void __set_fs(mm_segment_t x)
27330+{
27331+ switch (x.seg) {
27332+ case 0:
27333+ loadsegment(gs, 0);
27334+ break;
27335+ case TASK_SIZE_MAX:
27336+ loadsegment(gs, __USER_DS);
27337+ break;
27338+ case -1UL:
27339+ loadsegment(gs, __KERNEL_DS);
27340+ break;
27341+ default:
27342+ BUG();
27343+ }
27344+ return;
27345+}
27346+EXPORT_SYMBOL(__set_fs);
27347+
27348+void set_fs(mm_segment_t x)
27349+{
27350+ current_thread_info()->addr_limit = x;
27351+ __set_fs(x);
27352+}
27353+EXPORT_SYMBOL(set_fs);
27354+#endif
27355diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
27356index 906fea3..ee8a097 100644
27357--- a/arch/x86/lib/usercopy_64.c
27358+++ b/arch/x86/lib/usercopy_64.c
27359@@ -39,7 +39,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
27360 _ASM_EXTABLE(0b,3b)
27361 _ASM_EXTABLE(1b,2b)
27362 : [size8] "=&c"(size), [dst] "=&D" (__d0)
27363- : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
27364+ : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(____m(addr)),
27365 [zero] "r" (0UL), [eight] "r" (8UL));
27366 clac();
27367 return size;
27368@@ -54,12 +54,11 @@ unsigned long clear_user(void __user *to, unsigned long n)
27369 }
27370 EXPORT_SYMBOL(clear_user);
27371
27372-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
27373+unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
27374 {
27375- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
27376- return copy_user_generic((__force void *)to, (__force void *)from, len);
27377- }
27378- return len;
27379+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len))
27380+ return copy_user_generic((void __force_kernel *)____m(to), (void __force_kernel *)____m(from), len);
27381+ return len;
27382 }
27383 EXPORT_SYMBOL(copy_in_user);
27384
27385@@ -69,7 +68,7 @@ EXPORT_SYMBOL(copy_in_user);
27386 * it is not necessary to optimize tail handling.
27387 */
27388 unsigned long
27389-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
27390+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
27391 {
27392 char c;
27393 unsigned zero_len;
27394@@ -87,3 +86,15 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
27395 clac();
27396 return len;
27397 }
27398+
27399+void copy_from_user_overflow(void)
27400+{
27401+ WARN(1, "Buffer overflow detected!\n");
27402+}
27403+EXPORT_SYMBOL(copy_from_user_overflow);
27404+
27405+void copy_to_user_overflow(void)
27406+{
27407+ WARN(1, "Buffer overflow detected!\n");
27408+}
27409+EXPORT_SYMBOL(copy_to_user_overflow);
27410diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
27411index 903ec1e..c4166b2 100644
27412--- a/arch/x86/mm/extable.c
27413+++ b/arch/x86/mm/extable.c
27414@@ -6,12 +6,24 @@
27415 static inline unsigned long
27416 ex_insn_addr(const struct exception_table_entry *x)
27417 {
27418- return (unsigned long)&x->insn + x->insn;
27419+ unsigned long reloc = 0;
27420+
27421+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
27422+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
27423+#endif
27424+
27425+ return (unsigned long)&x->insn + x->insn + reloc;
27426 }
27427 static inline unsigned long
27428 ex_fixup_addr(const struct exception_table_entry *x)
27429 {
27430- return (unsigned long)&x->fixup + x->fixup;
27431+ unsigned long reloc = 0;
27432+
27433+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
27434+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
27435+#endif
27436+
27437+ return (unsigned long)&x->fixup + x->fixup + reloc;
27438 }
27439
27440 int fixup_exception(struct pt_regs *regs)
27441@@ -20,7 +32,7 @@ int fixup_exception(struct pt_regs *regs)
27442 unsigned long new_ip;
27443
27444 #ifdef CONFIG_PNPBIOS
27445- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
27446+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
27447 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
27448 extern u32 pnp_bios_is_utter_crap;
27449 pnp_bios_is_utter_crap = 1;
27450@@ -145,6 +157,13 @@ void sort_extable(struct exception_table_entry *start,
27451 i += 4;
27452 p->fixup -= i;
27453 i += 4;
27454+
27455+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
27456+ BUILD_BUG_ON(!IS_ENABLED(CONFIG_BUILDTIME_EXTABLE_SORT));
27457+ p->insn -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
27458+ p->fixup -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
27459+#endif
27460+
27461 }
27462 }
27463
27464diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
27465index fb674fd..223a693 100644
27466--- a/arch/x86/mm/fault.c
27467+++ b/arch/x86/mm/fault.c
27468@@ -13,12 +13,19 @@
27469 #include <linux/perf_event.h> /* perf_sw_event */
27470 #include <linux/hugetlb.h> /* hstate_index_to_shift */
27471 #include <linux/prefetch.h> /* prefetchw */
27472+#include <linux/unistd.h>
27473+#include <linux/compiler.h>
27474
27475 #include <asm/traps.h> /* dotraplinkage, ... */
27476 #include <asm/pgalloc.h> /* pgd_*(), ... */
27477 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
27478 #include <asm/fixmap.h> /* VSYSCALL_START */
27479 #include <asm/context_tracking.h> /* exception_enter(), ... */
27480+#include <asm/tlbflush.h>
27481+
27482+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
27483+#include <asm/stacktrace.h>
27484+#endif
27485
27486 /*
27487 * Page fault error code bits:
27488@@ -56,7 +63,7 @@ static inline int __kprobes notify_page_fault(struct pt_regs *regs)
27489 int ret = 0;
27490
27491 /* kprobe_running() needs smp_processor_id() */
27492- if (kprobes_built_in() && !user_mode_vm(regs)) {
27493+ if (kprobes_built_in() && !user_mode(regs)) {
27494 preempt_disable();
27495 if (kprobe_running() && kprobe_fault_handler(regs, 14))
27496 ret = 1;
27497@@ -117,7 +124,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
27498 return !instr_lo || (instr_lo>>1) == 1;
27499 case 0x00:
27500 /* Prefetch instruction is 0x0F0D or 0x0F18 */
27501- if (probe_kernel_address(instr, opcode))
27502+ if (user_mode(regs)) {
27503+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
27504+ return 0;
27505+ } else if (probe_kernel_address(instr, opcode))
27506 return 0;
27507
27508 *prefetch = (instr_lo == 0xF) &&
27509@@ -151,7 +161,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
27510 while (instr < max_instr) {
27511 unsigned char opcode;
27512
27513- if (probe_kernel_address(instr, opcode))
27514+ if (user_mode(regs)) {
27515+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
27516+ break;
27517+ } else if (probe_kernel_address(instr, opcode))
27518 break;
27519
27520 instr++;
27521@@ -182,6 +195,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
27522 force_sig_info(si_signo, &info, tsk);
27523 }
27524
27525+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
27526+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
27527+#endif
27528+
27529+#ifdef CONFIG_PAX_EMUTRAMP
27530+static int pax_handle_fetch_fault(struct pt_regs *regs);
27531+#endif
27532+
27533+#ifdef CONFIG_PAX_PAGEEXEC
27534+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
27535+{
27536+ pgd_t *pgd;
27537+ pud_t *pud;
27538+ pmd_t *pmd;
27539+
27540+ pgd = pgd_offset(mm, address);
27541+ if (!pgd_present(*pgd))
27542+ return NULL;
27543+ pud = pud_offset(pgd, address);
27544+ if (!pud_present(*pud))
27545+ return NULL;
27546+ pmd = pmd_offset(pud, address);
27547+ if (!pmd_present(*pmd))
27548+ return NULL;
27549+ return pmd;
27550+}
27551+#endif
27552+
27553 DEFINE_SPINLOCK(pgd_lock);
27554 LIST_HEAD(pgd_list);
27555
27556@@ -232,10 +273,22 @@ void vmalloc_sync_all(void)
27557 for (address = VMALLOC_START & PMD_MASK;
27558 address >= TASK_SIZE && address < FIXADDR_TOP;
27559 address += PMD_SIZE) {
27560+
27561+#ifdef CONFIG_PAX_PER_CPU_PGD
27562+ unsigned long cpu;
27563+#else
27564 struct page *page;
27565+#endif
27566
27567 spin_lock(&pgd_lock);
27568+
27569+#ifdef CONFIG_PAX_PER_CPU_PGD
27570+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
27571+ pgd_t *pgd = get_cpu_pgd(cpu);
27572+ pmd_t *ret;
27573+#else
27574 list_for_each_entry(page, &pgd_list, lru) {
27575+ pgd_t *pgd;
27576 spinlock_t *pgt_lock;
27577 pmd_t *ret;
27578
27579@@ -243,8 +296,14 @@ void vmalloc_sync_all(void)
27580 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
27581
27582 spin_lock(pgt_lock);
27583- ret = vmalloc_sync_one(page_address(page), address);
27584+ pgd = page_address(page);
27585+#endif
27586+
27587+ ret = vmalloc_sync_one(pgd, address);
27588+
27589+#ifndef CONFIG_PAX_PER_CPU_PGD
27590 spin_unlock(pgt_lock);
27591+#endif
27592
27593 if (!ret)
27594 break;
27595@@ -278,6 +337,11 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
27596 * an interrupt in the middle of a task switch..
27597 */
27598 pgd_paddr = read_cr3();
27599+
27600+#ifdef CONFIG_PAX_PER_CPU_PGD
27601+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
27602+#endif
27603+
27604 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
27605 if (!pmd_k)
27606 return -1;
27607@@ -373,7 +437,14 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
27608 * happen within a race in page table update. In the later
27609 * case just flush:
27610 */
27611+
27612+#ifdef CONFIG_PAX_PER_CPU_PGD
27613+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
27614+ pgd = pgd_offset_cpu(smp_processor_id(), address);
27615+#else
27616 pgd = pgd_offset(current->active_mm, address);
27617+#endif
27618+
27619 pgd_ref = pgd_offset_k(address);
27620 if (pgd_none(*pgd_ref))
27621 return -1;
27622@@ -541,7 +612,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
27623 static int is_errata100(struct pt_regs *regs, unsigned long address)
27624 {
27625 #ifdef CONFIG_X86_64
27626- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
27627+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
27628 return 1;
27629 #endif
27630 return 0;
27631@@ -568,7 +639,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
27632 }
27633
27634 static const char nx_warning[] = KERN_CRIT
27635-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
27636+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
27637
27638 static void
27639 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
27640@@ -577,15 +648,27 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
27641 if (!oops_may_print())
27642 return;
27643
27644- if (error_code & PF_INSTR) {
27645+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
27646 unsigned int level;
27647
27648 pte_t *pte = lookup_address(address, &level);
27649
27650 if (pte && pte_present(*pte) && !pte_exec(*pte))
27651- printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
27652+ printk(nx_warning, from_kuid_munged(&init_user_ns, current_uid()), current->comm, task_pid_nr(current));
27653 }
27654
27655+#ifdef CONFIG_PAX_KERNEXEC
27656+ if (init_mm.start_code <= address && address < init_mm.end_code) {
27657+ if (current->signal->curr_ip)
27658+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
27659+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
27660+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
27661+ else
27662+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
27663+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
27664+ }
27665+#endif
27666+
27667 printk(KERN_ALERT "BUG: unable to handle kernel ");
27668 if (address < PAGE_SIZE)
27669 printk(KERN_CONT "NULL pointer dereference");
27670@@ -748,6 +831,22 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
27671 return;
27672 }
27673 #endif
27674+
27675+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
27676+ if (pax_is_fetch_fault(regs, error_code, address)) {
27677+
27678+#ifdef CONFIG_PAX_EMUTRAMP
27679+ switch (pax_handle_fetch_fault(regs)) {
27680+ case 2:
27681+ return;
27682+ }
27683+#endif
27684+
27685+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
27686+ do_group_exit(SIGKILL);
27687+ }
27688+#endif
27689+
27690 /* Kernel addresses are always protection faults: */
27691 if (address >= TASK_SIZE)
27692 error_code |= PF_PROT;
27693@@ -833,7 +932,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
27694 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
27695 printk(KERN_ERR
27696 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
27697- tsk->comm, tsk->pid, address);
27698+ tsk->comm, task_pid_nr(tsk), address);
27699 code = BUS_MCEERR_AR;
27700 }
27701 #endif
27702@@ -896,6 +995,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
27703 return 1;
27704 }
27705
27706+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
27707+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
27708+{
27709+ pte_t *pte;
27710+ pmd_t *pmd;
27711+ spinlock_t *ptl;
27712+ unsigned char pte_mask;
27713+
27714+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
27715+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
27716+ return 0;
27717+
27718+ /* PaX: it's our fault, let's handle it if we can */
27719+
27720+ /* PaX: take a look at read faults before acquiring any locks */
27721+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
27722+ /* instruction fetch attempt from a protected page in user mode */
27723+ up_read(&mm->mmap_sem);
27724+
27725+#ifdef CONFIG_PAX_EMUTRAMP
27726+ switch (pax_handle_fetch_fault(regs)) {
27727+ case 2:
27728+ return 1;
27729+ }
27730+#endif
27731+
27732+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
27733+ do_group_exit(SIGKILL);
27734+ }
27735+
27736+ pmd = pax_get_pmd(mm, address);
27737+ if (unlikely(!pmd))
27738+ return 0;
27739+
27740+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
27741+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
27742+ pte_unmap_unlock(pte, ptl);
27743+ return 0;
27744+ }
27745+
27746+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
27747+ /* write attempt to a protected page in user mode */
27748+ pte_unmap_unlock(pte, ptl);
27749+ return 0;
27750+ }
27751+
27752+#ifdef CONFIG_SMP
27753+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
27754+#else
27755+ if (likely(address > get_limit(regs->cs)))
27756+#endif
27757+ {
27758+ set_pte(pte, pte_mkread(*pte));
27759+ __flush_tlb_one(address);
27760+ pte_unmap_unlock(pte, ptl);
27761+ up_read(&mm->mmap_sem);
27762+ return 1;
27763+ }
27764+
27765+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
27766+
27767+ /*
27768+ * PaX: fill DTLB with user rights and retry
27769+ */
27770+ __asm__ __volatile__ (
27771+ "orb %2,(%1)\n"
27772+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
27773+/*
27774+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
27775+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
27776+ * page fault when examined during a TLB load attempt. this is true not only
27777+ * for PTEs holding a non-present entry but also present entries that will
27778+ * raise a page fault (such as those set up by PaX, or the copy-on-write
27779+ * mechanism). in effect it means that we do *not* need to flush the TLBs
27780+ * for our target pages since their PTEs are simply not in the TLBs at all.
27781+
27782+ * the best thing in omitting it is that we gain around 15-20% speed in the
27783+ * fast path of the page fault handler and can get rid of tracing since we
27784+ * can no longer flush unintended entries.
27785+ */
27786+ "invlpg (%0)\n"
27787+#endif
27788+ __copyuser_seg"testb $0,(%0)\n"
27789+ "xorb %3,(%1)\n"
27790+ :
27791+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
27792+ : "memory", "cc");
27793+ pte_unmap_unlock(pte, ptl);
27794+ up_read(&mm->mmap_sem);
27795+ return 1;
27796+}
27797+#endif
27798+
27799 /*
27800 * Handle a spurious fault caused by a stale TLB entry.
27801 *
27802@@ -968,6 +1160,9 @@ int show_unhandled_signals = 1;
27803 static inline int
27804 access_error(unsigned long error_code, struct vm_area_struct *vma)
27805 {
27806+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
27807+ return 1;
27808+
27809 if (error_code & PF_WRITE) {
27810 /* write, present and write, not present: */
27811 if (unlikely(!(vma->vm_flags & VM_WRITE)))
27812@@ -996,7 +1191,7 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs)
27813 if (error_code & PF_USER)
27814 return false;
27815
27816- if (!user_mode_vm(regs) && (regs->flags & X86_EFLAGS_AC))
27817+ if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC))
27818 return false;
27819
27820 return true;
27821@@ -1012,18 +1207,33 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
27822 {
27823 struct vm_area_struct *vma;
27824 struct task_struct *tsk;
27825- unsigned long address;
27826 struct mm_struct *mm;
27827 int fault;
27828 int write = error_code & PF_WRITE;
27829 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
27830 (write ? FAULT_FLAG_WRITE : 0);
27831
27832- tsk = current;
27833- mm = tsk->mm;
27834-
27835 /* Get the faulting address: */
27836- address = read_cr2();
27837+ unsigned long address = read_cr2();
27838+
27839+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
27840+ if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
27841+ if (!search_exception_tables(regs->ip)) {
27842+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
27843+ bad_area_nosemaphore(regs, error_code, address);
27844+ return;
27845+ }
27846+ if (address < PAX_USER_SHADOW_BASE) {
27847+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
27848+ printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
27849+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
27850+ } else
27851+ address -= PAX_USER_SHADOW_BASE;
27852+ }
27853+#endif
27854+
27855+ tsk = current;
27856+ mm = tsk->mm;
27857
27858 /*
27859 * Detect and handle instructions that would cause a page fault for
27860@@ -1084,7 +1294,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
27861 * User-mode registers count as a user access even for any
27862 * potential system fault or CPU buglet:
27863 */
27864- if (user_mode_vm(regs)) {
27865+ if (user_mode(regs)) {
27866 local_irq_enable();
27867 error_code |= PF_USER;
27868 } else {
27869@@ -1146,6 +1356,11 @@ retry:
27870 might_sleep();
27871 }
27872
27873+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
27874+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
27875+ return;
27876+#endif
27877+
27878 vma = find_vma(mm, address);
27879 if (unlikely(!vma)) {
27880 bad_area(regs, error_code, address);
27881@@ -1157,18 +1372,24 @@ retry:
27882 bad_area(regs, error_code, address);
27883 return;
27884 }
27885- if (error_code & PF_USER) {
27886- /*
27887- * Accessing the stack below %sp is always a bug.
27888- * The large cushion allows instructions like enter
27889- * and pusha to work. ("enter $65535, $31" pushes
27890- * 32 pointers and then decrements %sp by 65535.)
27891- */
27892- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
27893- bad_area(regs, error_code, address);
27894- return;
27895- }
27896+ /*
27897+ * Accessing the stack below %sp is always a bug.
27898+ * The large cushion allows instructions like enter
27899+ * and pusha to work. ("enter $65535, $31" pushes
27900+ * 32 pointers and then decrements %sp by 65535.)
27901+ */
27902+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
27903+ bad_area(regs, error_code, address);
27904+ return;
27905 }
27906+
27907+#ifdef CONFIG_PAX_SEGMEXEC
27908+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
27909+ bad_area(regs, error_code, address);
27910+ return;
27911+ }
27912+#endif
27913+
27914 if (unlikely(expand_stack(vma, address))) {
27915 bad_area(regs, error_code, address);
27916 return;
27917@@ -1232,3 +1453,292 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
27918 __do_page_fault(regs, error_code);
27919 exception_exit(regs);
27920 }
27921+
27922+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
27923+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
27924+{
27925+ struct mm_struct *mm = current->mm;
27926+ unsigned long ip = regs->ip;
27927+
27928+ if (v8086_mode(regs))
27929+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
27930+
27931+#ifdef CONFIG_PAX_PAGEEXEC
27932+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
27933+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
27934+ return true;
27935+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
27936+ return true;
27937+ return false;
27938+ }
27939+#endif
27940+
27941+#ifdef CONFIG_PAX_SEGMEXEC
27942+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
27943+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
27944+ return true;
27945+ return false;
27946+ }
27947+#endif
27948+
27949+ return false;
27950+}
27951+#endif
27952+
27953+#ifdef CONFIG_PAX_EMUTRAMP
27954+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
27955+{
27956+ int err;
27957+
27958+ do { /* PaX: libffi trampoline emulation */
27959+ unsigned char mov, jmp;
27960+ unsigned int addr1, addr2;
27961+
27962+#ifdef CONFIG_X86_64
27963+ if ((regs->ip + 9) >> 32)
27964+ break;
27965+#endif
27966+
27967+ err = get_user(mov, (unsigned char __user *)regs->ip);
27968+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
27969+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
27970+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
27971+
27972+ if (err)
27973+ break;
27974+
27975+ if (mov == 0xB8 && jmp == 0xE9) {
27976+ regs->ax = addr1;
27977+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
27978+ return 2;
27979+ }
27980+ } while (0);
27981+
27982+ do { /* PaX: gcc trampoline emulation #1 */
27983+ unsigned char mov1, mov2;
27984+ unsigned short jmp;
27985+ unsigned int addr1, addr2;
27986+
27987+#ifdef CONFIG_X86_64
27988+ if ((regs->ip + 11) >> 32)
27989+ break;
27990+#endif
27991+
27992+ err = get_user(mov1, (unsigned char __user *)regs->ip);
27993+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
27994+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
27995+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
27996+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
27997+
27998+ if (err)
27999+ break;
28000+
28001+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
28002+ regs->cx = addr1;
28003+ regs->ax = addr2;
28004+ regs->ip = addr2;
28005+ return 2;
28006+ }
28007+ } while (0);
28008+
28009+ do { /* PaX: gcc trampoline emulation #2 */
28010+ unsigned char mov, jmp;
28011+ unsigned int addr1, addr2;
28012+
28013+#ifdef CONFIG_X86_64
28014+ if ((regs->ip + 9) >> 32)
28015+ break;
28016+#endif
28017+
28018+ err = get_user(mov, (unsigned char __user *)regs->ip);
28019+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
28020+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
28021+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
28022+
28023+ if (err)
28024+ break;
28025+
28026+ if (mov == 0xB9 && jmp == 0xE9) {
28027+ regs->cx = addr1;
28028+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
28029+ return 2;
28030+ }
28031+ } while (0);
28032+
28033+ return 1; /* PaX in action */
28034+}
28035+
28036+#ifdef CONFIG_X86_64
28037+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
28038+{
28039+ int err;
28040+
28041+ do { /* PaX: libffi trampoline emulation */
28042+ unsigned short mov1, mov2, jmp1;
28043+ unsigned char stcclc, jmp2;
28044+ unsigned long addr1, addr2;
28045+
28046+ err = get_user(mov1, (unsigned short __user *)regs->ip);
28047+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
28048+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
28049+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
28050+ err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
28051+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
28052+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
28053+
28054+ if (err)
28055+ break;
28056+
28057+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
28058+ regs->r11 = addr1;
28059+ regs->r10 = addr2;
28060+ if (stcclc == 0xF8)
28061+ regs->flags &= ~X86_EFLAGS_CF;
28062+ else
28063+ regs->flags |= X86_EFLAGS_CF;
28064+ regs->ip = addr1;
28065+ return 2;
28066+ }
28067+ } while (0);
28068+
28069+ do { /* PaX: gcc trampoline emulation #1 */
28070+ unsigned short mov1, mov2, jmp1;
28071+ unsigned char jmp2;
28072+ unsigned int addr1;
28073+ unsigned long addr2;
28074+
28075+ err = get_user(mov1, (unsigned short __user *)regs->ip);
28076+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
28077+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
28078+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
28079+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
28080+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
28081+
28082+ if (err)
28083+ break;
28084+
28085+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
28086+ regs->r11 = addr1;
28087+ regs->r10 = addr2;
28088+ regs->ip = addr1;
28089+ return 2;
28090+ }
28091+ } while (0);
28092+
28093+ do { /* PaX: gcc trampoline emulation #2 */
28094+ unsigned short mov1, mov2, jmp1;
28095+ unsigned char jmp2;
28096+ unsigned long addr1, addr2;
28097+
28098+ err = get_user(mov1, (unsigned short __user *)regs->ip);
28099+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
28100+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
28101+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
28102+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
28103+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
28104+
28105+ if (err)
28106+ break;
28107+
28108+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
28109+ regs->r11 = addr1;
28110+ regs->r10 = addr2;
28111+ regs->ip = addr1;
28112+ return 2;
28113+ }
28114+ } while (0);
28115+
28116+ return 1; /* PaX in action */
28117+}
28118+#endif
28119+
28120+/*
28121+ * PaX: decide what to do with offenders (regs->ip = fault address)
28122+ *
28123+ * returns 1 when task should be killed
28124+ * 2 when gcc trampoline was detected
28125+ */
28126+static int pax_handle_fetch_fault(struct pt_regs *regs)
28127+{
28128+ if (v8086_mode(regs))
28129+ return 1;
28130+
28131+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
28132+ return 1;
28133+
28134+#ifdef CONFIG_X86_32
28135+ return pax_handle_fetch_fault_32(regs);
28136+#else
28137+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
28138+ return pax_handle_fetch_fault_32(regs);
28139+ else
28140+ return pax_handle_fetch_fault_64(regs);
28141+#endif
28142+}
28143+#endif
28144+
28145+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
28146+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
28147+{
28148+ long i;
28149+
28150+ printk(KERN_ERR "PAX: bytes at PC: ");
28151+ for (i = 0; i < 20; i++) {
28152+ unsigned char c;
28153+ if (get_user(c, (unsigned char __force_user *)pc+i))
28154+ printk(KERN_CONT "?? ");
28155+ else
28156+ printk(KERN_CONT "%02x ", c);
28157+ }
28158+ printk("\n");
28159+
28160+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
28161+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
28162+ unsigned long c;
28163+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
28164+#ifdef CONFIG_X86_32
28165+ printk(KERN_CONT "???????? ");
28166+#else
28167+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
28168+ printk(KERN_CONT "???????? ???????? ");
28169+ else
28170+ printk(KERN_CONT "???????????????? ");
28171+#endif
28172+ } else {
28173+#ifdef CONFIG_X86_64
28174+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
28175+ printk(KERN_CONT "%08x ", (unsigned int)c);
28176+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
28177+ } else
28178+#endif
28179+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
28180+ }
28181+ }
28182+ printk("\n");
28183+}
28184+#endif
28185+
28186+/**
28187+ * probe_kernel_write(): safely attempt to write to a location
28188+ * @dst: address to write to
28189+ * @src: pointer to the data that shall be written
28190+ * @size: size of the data chunk
28191+ *
28192+ * Safely write to address @dst from the buffer at @src. If a kernel fault
28193+ * happens, handle that and return -EFAULT.
28194+ */
28195+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
28196+{
28197+ long ret;
28198+ mm_segment_t old_fs = get_fs();
28199+
28200+ set_fs(KERNEL_DS);
28201+ pagefault_disable();
28202+ pax_open_kernel();
28203+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
28204+ pax_close_kernel();
28205+ pagefault_enable();
28206+ set_fs(old_fs);
28207+
28208+ return ret ? -EFAULT : 0;
28209+}
28210diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
28211index dd74e46..7d26398 100644
28212--- a/arch/x86/mm/gup.c
28213+++ b/arch/x86/mm/gup.c
28214@@ -255,7 +255,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
28215 addr = start;
28216 len = (unsigned long) nr_pages << PAGE_SHIFT;
28217 end = start + len;
28218- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
28219+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
28220 (void __user *)start, len)))
28221 return 0;
28222
28223diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
28224index 6f31ee5..8ee4164 100644
28225--- a/arch/x86/mm/highmem_32.c
28226+++ b/arch/x86/mm/highmem_32.c
28227@@ -44,7 +44,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
28228 idx = type + KM_TYPE_NR*smp_processor_id();
28229 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
28230 BUG_ON(!pte_none(*(kmap_pte-idx)));
28231+
28232+ pax_open_kernel();
28233 set_pte(kmap_pte-idx, mk_pte(page, prot));
28234+ pax_close_kernel();
28235+
28236 arch_flush_lazy_mmu_mode();
28237
28238 return (void *)vaddr;
28239diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
28240index ae1aa71..56316db 100644
28241--- a/arch/x86/mm/hugetlbpage.c
28242+++ b/arch/x86/mm/hugetlbpage.c
28243@@ -279,6 +279,12 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
28244 info.flags = 0;
28245 info.length = len;
28246 info.low_limit = TASK_UNMAPPED_BASE;
28247+
28248+#ifdef CONFIG_PAX_RANDMMAP
28249+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
28250+ info.low_limit += current->mm->delta_mmap;
28251+#endif
28252+
28253 info.high_limit = TASK_SIZE;
28254 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
28255 info.align_offset = 0;
28256@@ -311,6 +317,12 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
28257 VM_BUG_ON(addr != -ENOMEM);
28258 info.flags = 0;
28259 info.low_limit = TASK_UNMAPPED_BASE;
28260+
28261+#ifdef CONFIG_PAX_RANDMMAP
28262+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
28263+ info.low_limit += current->mm->delta_mmap;
28264+#endif
28265+
28266 info.high_limit = TASK_SIZE;
28267 addr = vm_unmapped_area(&info);
28268 }
28269@@ -325,10 +337,20 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
28270 struct hstate *h = hstate_file(file);
28271 struct mm_struct *mm = current->mm;
28272 struct vm_area_struct *vma;
28273+ unsigned long pax_task_size = TASK_SIZE;
28274+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
28275
28276 if (len & ~huge_page_mask(h))
28277 return -EINVAL;
28278- if (len > TASK_SIZE)
28279+
28280+#ifdef CONFIG_PAX_SEGMEXEC
28281+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
28282+ pax_task_size = SEGMEXEC_TASK_SIZE;
28283+#endif
28284+
28285+ pax_task_size -= PAGE_SIZE;
28286+
28287+ if (len > pax_task_size)
28288 return -ENOMEM;
28289
28290 if (flags & MAP_FIXED) {
28291@@ -337,11 +359,14 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
28292 return addr;
28293 }
28294
28295+#ifdef CONFIG_PAX_RANDMMAP
28296+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
28297+#endif
28298+
28299 if (addr) {
28300 addr = ALIGN(addr, huge_page_size(h));
28301 vma = find_vma(mm, addr);
28302- if (TASK_SIZE - len >= addr &&
28303- (!vma || addr + len <= vma->vm_start))
28304+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
28305 return addr;
28306 }
28307 if (mm->get_unmapped_area == arch_get_unmapped_area)
28308diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
28309index d7aea41..0fc945b 100644
28310--- a/arch/x86/mm/init.c
28311+++ b/arch/x86/mm/init.c
28312@@ -4,6 +4,7 @@
28313 #include <linux/swap.h>
28314 #include <linux/memblock.h>
28315 #include <linux/bootmem.h> /* for max_low_pfn */
28316+#include <linux/tboot.h>
28317
28318 #include <asm/cacheflush.h>
28319 #include <asm/e820.h>
28320@@ -16,6 +17,8 @@
28321 #include <asm/tlb.h>
28322 #include <asm/proto.h>
28323 #include <asm/dma.h> /* for MAX_DMA_PFN */
28324+#include <asm/desc.h>
28325+#include <asm/bios_ebda.h>
28326
28327 unsigned long __initdata pgt_buf_start;
28328 unsigned long __meminitdata pgt_buf_end;
28329@@ -44,7 +47,7 @@ static void __init find_early_table_space(struct map_range *mr, int nr_range)
28330 {
28331 int i;
28332 unsigned long puds = 0, pmds = 0, ptes = 0, tables;
28333- unsigned long start = 0, good_end;
28334+ unsigned long start = 0x100000, good_end;
28335 phys_addr_t base;
28336
28337 for (i = 0; i < nr_range; i++) {
28338@@ -321,10 +324,40 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
28339 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
28340 * mmio resources as well as potential bios/acpi data regions.
28341 */
28342+
28343+#ifdef CONFIG_GRKERNSEC_KMEM
28344+static unsigned int ebda_start __read_only;
28345+static unsigned int ebda_end __read_only;
28346+#endif
28347+
28348 int devmem_is_allowed(unsigned long pagenr)
28349 {
28350- if (pagenr < 256)
28351+#ifdef CONFIG_GRKERNSEC_KMEM
28352+ /* allow BDA */
28353+ if (!pagenr)
28354 return 1;
28355+ /* allow EBDA */
28356+ if (pagenr >= ebda_start && pagenr < ebda_end)
28357+ return 1;
28358+ /* if tboot is in use, allow access to its hardcoded serial log range */
28359+ if (tboot_enabled() && ((0x60000 >> PAGE_SHIFT) <= pagenr) && (pagenr < (0x68000 >> PAGE_SHIFT)))
28360+ return 1;
28361+#else
28362+ if (!pagenr)
28363+ return 1;
28364+#ifdef CONFIG_VM86
28365+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
28366+ return 1;
28367+#endif
28368+#endif
28369+
28370+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
28371+ return 1;
28372+#ifdef CONFIG_GRKERNSEC_KMEM
28373+ /* throw out everything else below 1MB */
28374+ if (pagenr <= 256)
28375+ return 0;
28376+#endif
28377 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
28378 return 0;
28379 if (!page_is_ram(pagenr))
28380@@ -381,8 +414,117 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
28381 #endif
28382 }
28383
28384+#ifdef CONFIG_GRKERNSEC_KMEM
28385+static inline void gr_init_ebda(void)
28386+{
28387+ unsigned int ebda_addr;
28388+ unsigned int ebda_size = 0;
28389+
28390+ ebda_addr = get_bios_ebda();
28391+ if (ebda_addr) {
28392+ ebda_size = *(unsigned char *)phys_to_virt(ebda_addr);
28393+ ebda_size <<= 10;
28394+ }
28395+ if (ebda_addr && ebda_size) {
28396+ ebda_start = ebda_addr >> PAGE_SHIFT;
28397+ ebda_end = min((unsigned int)PAGE_ALIGN(ebda_addr + ebda_size), (unsigned int)0xa0000) >> PAGE_SHIFT;
28398+ } else {
28399+ ebda_start = 0x9f000 >> PAGE_SHIFT;
28400+ ebda_end = 0xa0000 >> PAGE_SHIFT;
28401+ }
28402+}
28403+#else
28404+static inline void gr_init_ebda(void) { }
28405+#endif
28406+
28407 void free_initmem(void)
28408 {
28409+#ifdef CONFIG_PAX_KERNEXEC
28410+#ifdef CONFIG_X86_32
28411+ /* PaX: limit KERNEL_CS to actual size */
28412+ unsigned long addr, limit;
28413+ struct desc_struct d;
28414+ int cpu;
28415+#else
28416+ pgd_t *pgd;
28417+ pud_t *pud;
28418+ pmd_t *pmd;
28419+ unsigned long addr, end;
28420+#endif
28421+#endif
28422+
28423+ gr_init_ebda();
28424+
28425+#ifdef CONFIG_PAX_KERNEXEC
28426+#ifdef CONFIG_X86_32
28427+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
28428+ limit = (limit - 1UL) >> PAGE_SHIFT;
28429+
28430+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
28431+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
28432+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
28433+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
28434+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEXEC_KERNEL_CS, &d, DESCTYPE_S);
28435+ }
28436+
28437+ /* PaX: make KERNEL_CS read-only */
28438+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
28439+ if (!paravirt_enabled())
28440+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
28441+/*
28442+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
28443+ pgd = pgd_offset_k(addr);
28444+ pud = pud_offset(pgd, addr);
28445+ pmd = pmd_offset(pud, addr);
28446+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
28447+ }
28448+*/
28449+#ifdef CONFIG_X86_PAE
28450+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
28451+/*
28452+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
28453+ pgd = pgd_offset_k(addr);
28454+ pud = pud_offset(pgd, addr);
28455+ pmd = pmd_offset(pud, addr);
28456+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
28457+ }
28458+*/
28459+#endif
28460+
28461+#ifdef CONFIG_MODULES
28462+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
28463+#endif
28464+
28465+#else
28466+ /* PaX: make kernel code/rodata read-only, rest non-executable */
28467+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
28468+ pgd = pgd_offset_k(addr);
28469+ pud = pud_offset(pgd, addr);
28470+ pmd = pmd_offset(pud, addr);
28471+ if (!pmd_present(*pmd))
28472+ continue;
28473+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
28474+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
28475+ else
28476+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
28477+ }
28478+
28479+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
28480+ end = addr + KERNEL_IMAGE_SIZE;
28481+ for (; addr < end; addr += PMD_SIZE) {
28482+ pgd = pgd_offset_k(addr);
28483+ pud = pud_offset(pgd, addr);
28484+ pmd = pmd_offset(pud, addr);
28485+ if (!pmd_present(*pmd))
28486+ continue;
28487+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
28488+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
28489+ }
28490+#endif
28491+
28492+ flush_tlb_all();
28493+#endif
28494+
28495 free_init_pages("unused kernel memory",
28496 (unsigned long)(&__init_begin),
28497 (unsigned long)(&__init_end));
28498diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
28499index 745d66b..56bf568 100644
28500--- a/arch/x86/mm/init_32.c
28501+++ b/arch/x86/mm/init_32.c
28502@@ -73,36 +73,6 @@ static __init void *alloc_low_page(void)
28503 }
28504
28505 /*
28506- * Creates a middle page table and puts a pointer to it in the
28507- * given global directory entry. This only returns the gd entry
28508- * in non-PAE compilation mode, since the middle layer is folded.
28509- */
28510-static pmd_t * __init one_md_table_init(pgd_t *pgd)
28511-{
28512- pud_t *pud;
28513- pmd_t *pmd_table;
28514-
28515-#ifdef CONFIG_X86_PAE
28516- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
28517- if (after_bootmem)
28518- pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
28519- else
28520- pmd_table = (pmd_t *)alloc_low_page();
28521- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
28522- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
28523- pud = pud_offset(pgd, 0);
28524- BUG_ON(pmd_table != pmd_offset(pud, 0));
28525-
28526- return pmd_table;
28527- }
28528-#endif
28529- pud = pud_offset(pgd, 0);
28530- pmd_table = pmd_offset(pud, 0);
28531-
28532- return pmd_table;
28533-}
28534-
28535-/*
28536 * Create a page table and place a pointer to it in a middle page
28537 * directory entry:
28538 */
28539@@ -122,13 +92,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
28540 page_table = (pte_t *)alloc_low_page();
28541
28542 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
28543+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
28544+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
28545+#else
28546 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
28547+#endif
28548 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
28549 }
28550
28551 return pte_offset_kernel(pmd, 0);
28552 }
28553
28554+static pmd_t * __init one_md_table_init(pgd_t *pgd)
28555+{
28556+ pud_t *pud;
28557+ pmd_t *pmd_table;
28558+
28559+ pud = pud_offset(pgd, 0);
28560+ pmd_table = pmd_offset(pud, 0);
28561+
28562+ return pmd_table;
28563+}
28564+
28565 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
28566 {
28567 int pgd_idx = pgd_index(vaddr);
28568@@ -202,6 +187,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
28569 int pgd_idx, pmd_idx;
28570 unsigned long vaddr;
28571 pgd_t *pgd;
28572+ pud_t *pud;
28573 pmd_t *pmd;
28574 pte_t *pte = NULL;
28575
28576@@ -211,8 +197,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
28577 pgd = pgd_base + pgd_idx;
28578
28579 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
28580- pmd = one_md_table_init(pgd);
28581- pmd = pmd + pmd_index(vaddr);
28582+ pud = pud_offset(pgd, vaddr);
28583+ pmd = pmd_offset(pud, vaddr);
28584+
28585+#ifdef CONFIG_X86_PAE
28586+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
28587+#endif
28588+
28589 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
28590 pmd++, pmd_idx++) {
28591 pte = page_table_kmap_check(one_page_table_init(pmd),
28592@@ -224,11 +215,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
28593 }
28594 }
28595
28596-static inline int is_kernel_text(unsigned long addr)
28597+static inline int is_kernel_text(unsigned long start, unsigned long end)
28598 {
28599- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
28600- return 1;
28601- return 0;
28602+ if ((start > ktla_ktva((unsigned long)_etext) ||
28603+ end <= ktla_ktva((unsigned long)_stext)) &&
28604+ (start > ktla_ktva((unsigned long)_einittext) ||
28605+ end <= ktla_ktva((unsigned long)_sinittext)) &&
28606+
28607+#ifdef CONFIG_ACPI_SLEEP
28608+ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
28609+#endif
28610+
28611+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
28612+ return 0;
28613+ return 1;
28614 }
28615
28616 /*
28617@@ -245,9 +245,10 @@ kernel_physical_mapping_init(unsigned long start,
28618 unsigned long last_map_addr = end;
28619 unsigned long start_pfn, end_pfn;
28620 pgd_t *pgd_base = swapper_pg_dir;
28621- int pgd_idx, pmd_idx, pte_ofs;
28622+ unsigned int pgd_idx, pmd_idx, pte_ofs;
28623 unsigned long pfn;
28624 pgd_t *pgd;
28625+ pud_t *pud;
28626 pmd_t *pmd;
28627 pte_t *pte;
28628 unsigned pages_2m, pages_4k;
28629@@ -280,8 +281,13 @@ repeat:
28630 pfn = start_pfn;
28631 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
28632 pgd = pgd_base + pgd_idx;
28633- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
28634- pmd = one_md_table_init(pgd);
28635+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
28636+ pud = pud_offset(pgd, 0);
28637+ pmd = pmd_offset(pud, 0);
28638+
28639+#ifdef CONFIG_X86_PAE
28640+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
28641+#endif
28642
28643 if (pfn >= end_pfn)
28644 continue;
28645@@ -293,14 +299,13 @@ repeat:
28646 #endif
28647 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
28648 pmd++, pmd_idx++) {
28649- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
28650+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
28651
28652 /*
28653 * Map with big pages if possible, otherwise
28654 * create normal page tables:
28655 */
28656 if (use_pse) {
28657- unsigned int addr2;
28658 pgprot_t prot = PAGE_KERNEL_LARGE;
28659 /*
28660 * first pass will use the same initial
28661@@ -310,11 +315,7 @@ repeat:
28662 __pgprot(PTE_IDENT_ATTR |
28663 _PAGE_PSE);
28664
28665- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
28666- PAGE_OFFSET + PAGE_SIZE-1;
28667-
28668- if (is_kernel_text(addr) ||
28669- is_kernel_text(addr2))
28670+ if (is_kernel_text(address, address + PMD_SIZE))
28671 prot = PAGE_KERNEL_LARGE_EXEC;
28672
28673 pages_2m++;
28674@@ -331,7 +332,7 @@ repeat:
28675 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
28676 pte += pte_ofs;
28677 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
28678- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
28679+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
28680 pgprot_t prot = PAGE_KERNEL;
28681 /*
28682 * first pass will use the same initial
28683@@ -339,7 +340,7 @@ repeat:
28684 */
28685 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
28686
28687- if (is_kernel_text(addr))
28688+ if (is_kernel_text(address, address + PAGE_SIZE))
28689 prot = PAGE_KERNEL_EXEC;
28690
28691 pages_4k++;
28692@@ -465,7 +466,7 @@ void __init native_pagetable_init(void)
28693
28694 pud = pud_offset(pgd, va);
28695 pmd = pmd_offset(pud, va);
28696- if (!pmd_present(*pmd))
28697+ if (!pmd_present(*pmd) || pmd_huge(*pmd))
28698 break;
28699
28700 pte = pte_offset_kernel(pmd, va);
28701@@ -514,12 +515,10 @@ void __init early_ioremap_page_table_range_init(void)
28702
28703 static void __init pagetable_init(void)
28704 {
28705- pgd_t *pgd_base = swapper_pg_dir;
28706-
28707- permanent_kmaps_init(pgd_base);
28708+ permanent_kmaps_init(swapper_pg_dir);
28709 }
28710
28711-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
28712+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
28713 EXPORT_SYMBOL_GPL(__supported_pte_mask);
28714
28715 /* user-defined highmem size */
28716@@ -728,6 +727,12 @@ void __init mem_init(void)
28717
28718 pci_iommu_alloc();
28719
28720+#ifdef CONFIG_PAX_PER_CPU_PGD
28721+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
28722+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
28723+ KERNEL_PGD_PTRS);
28724+#endif
28725+
28726 #ifdef CONFIG_FLATMEM
28727 BUG_ON(!mem_map);
28728 #endif
28729@@ -754,7 +759,7 @@ void __init mem_init(void)
28730 reservedpages++;
28731
28732 codesize = (unsigned long) &_etext - (unsigned long) &_text;
28733- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
28734+ datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
28735 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
28736
28737 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
28738@@ -795,10 +800,10 @@ void __init mem_init(void)
28739 ((unsigned long)&__init_end -
28740 (unsigned long)&__init_begin) >> 10,
28741
28742- (unsigned long)&_etext, (unsigned long)&_edata,
28743- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
28744+ (unsigned long)&_sdata, (unsigned long)&_edata,
28745+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
28746
28747- (unsigned long)&_text, (unsigned long)&_etext,
28748+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
28749 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
28750
28751 /*
28752@@ -876,6 +881,7 @@ void set_kernel_text_rw(void)
28753 if (!kernel_set_to_readonly)
28754 return;
28755
28756+ start = ktla_ktva(start);
28757 pr_debug("Set kernel text: %lx - %lx for read write\n",
28758 start, start+size);
28759
28760@@ -890,6 +896,7 @@ void set_kernel_text_ro(void)
28761 if (!kernel_set_to_readonly)
28762 return;
28763
28764+ start = ktla_ktva(start);
28765 pr_debug("Set kernel text: %lx - %lx for read only\n",
28766 start, start+size);
28767
28768@@ -918,6 +925,7 @@ void mark_rodata_ro(void)
28769 unsigned long start = PFN_ALIGN(_text);
28770 unsigned long size = PFN_ALIGN(_etext) - start;
28771
28772+ start = ktla_ktva(start);
28773 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
28774 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
28775 size >> 10);
28776diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
28777index 75c9a6a..498d677 100644
28778--- a/arch/x86/mm/init_64.c
28779+++ b/arch/x86/mm/init_64.c
28780@@ -74,7 +74,7 @@ early_param("gbpages", parse_direct_gbpages_on);
28781 * around without checking the pgd every time.
28782 */
28783
28784-pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
28785+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
28786 EXPORT_SYMBOL_GPL(__supported_pte_mask);
28787
28788 int force_personality32;
28789@@ -107,12 +107,22 @@ void sync_global_pgds(unsigned long start, unsigned long end)
28790
28791 for (address = start; address <= end; address += PGDIR_SIZE) {
28792 const pgd_t *pgd_ref = pgd_offset_k(address);
28793+
28794+#ifdef CONFIG_PAX_PER_CPU_PGD
28795+ unsigned long cpu;
28796+#else
28797 struct page *page;
28798+#endif
28799
28800 if (pgd_none(*pgd_ref))
28801 continue;
28802
28803 spin_lock(&pgd_lock);
28804+
28805+#ifdef CONFIG_PAX_PER_CPU_PGD
28806+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
28807+ pgd_t *pgd = pgd_offset_cpu(cpu, address);
28808+#else
28809 list_for_each_entry(page, &pgd_list, lru) {
28810 pgd_t *pgd;
28811 spinlock_t *pgt_lock;
28812@@ -121,6 +131,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
28813 /* the pgt_lock only for Xen */
28814 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
28815 spin_lock(pgt_lock);
28816+#endif
28817
28818 if (pgd_none(*pgd))
28819 set_pgd(pgd, *pgd_ref);
28820@@ -128,7 +139,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
28821 BUG_ON(pgd_page_vaddr(*pgd)
28822 != pgd_page_vaddr(*pgd_ref));
28823
28824+#ifndef CONFIG_PAX_PER_CPU_PGD
28825 spin_unlock(pgt_lock);
28826+#endif
28827+
28828 }
28829 spin_unlock(&pgd_lock);
28830 }
28831@@ -161,7 +175,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
28832 {
28833 if (pgd_none(*pgd)) {
28834 pud_t *pud = (pud_t *)spp_getpage();
28835- pgd_populate(&init_mm, pgd, pud);
28836+ pgd_populate_kernel(&init_mm, pgd, pud);
28837 if (pud != pud_offset(pgd, 0))
28838 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
28839 pud, pud_offset(pgd, 0));
28840@@ -173,7 +187,7 @@ static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
28841 {
28842 if (pud_none(*pud)) {
28843 pmd_t *pmd = (pmd_t *) spp_getpage();
28844- pud_populate(&init_mm, pud, pmd);
28845+ pud_populate_kernel(&init_mm, pud, pmd);
28846 if (pmd != pmd_offset(pud, 0))
28847 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
28848 pmd, pmd_offset(pud, 0));
28849@@ -202,7 +216,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
28850 pmd = fill_pmd(pud, vaddr);
28851 pte = fill_pte(pmd, vaddr);
28852
28853+ pax_open_kernel();
28854 set_pte(pte, new_pte);
28855+ pax_close_kernel();
28856
28857 /*
28858 * It's enough to flush this one mapping.
28859@@ -261,14 +277,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
28860 pgd = pgd_offset_k((unsigned long)__va(phys));
28861 if (pgd_none(*pgd)) {
28862 pud = (pud_t *) spp_getpage();
28863- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
28864- _PAGE_USER));
28865+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
28866 }
28867 pud = pud_offset(pgd, (unsigned long)__va(phys));
28868 if (pud_none(*pud)) {
28869 pmd = (pmd_t *) spp_getpage();
28870- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
28871- _PAGE_USER));
28872+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
28873 }
28874 pmd = pmd_offset(pud, phys);
28875 BUG_ON(!pmd_none(*pmd));
28876@@ -329,7 +343,7 @@ static __ref void *alloc_low_page(unsigned long *phys)
28877 if (pfn >= pgt_buf_top)
28878 panic("alloc_low_page: ran out of memory");
28879
28880- adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
28881+ adr = (void __force_kernel *)early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
28882 clear_page(adr);
28883 *phys = pfn * PAGE_SIZE;
28884 return adr;
28885@@ -345,7 +359,7 @@ static __ref void *map_low_page(void *virt)
28886
28887 phys = __pa(virt);
28888 left = phys & (PAGE_SIZE - 1);
28889- adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE);
28890+ adr = (void __force_kernel *)early_memremap(phys & PAGE_MASK, PAGE_SIZE);
28891 adr = (void *)(((unsigned long)adr) | left);
28892
28893 return adr;
28894@@ -553,7 +567,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
28895 unmap_low_page(pmd);
28896
28897 spin_lock(&init_mm.page_table_lock);
28898- pud_populate(&init_mm, pud, __va(pmd_phys));
28899+ pud_populate_kernel(&init_mm, pud, __va(pmd_phys));
28900 spin_unlock(&init_mm.page_table_lock);
28901 }
28902 __flush_tlb_all();
28903@@ -599,7 +613,7 @@ kernel_physical_mapping_init(unsigned long start,
28904 unmap_low_page(pud);
28905
28906 spin_lock(&init_mm.page_table_lock);
28907- pgd_populate(&init_mm, pgd, __va(pud_phys));
28908+ pgd_populate_kernel(&init_mm, pgd, __va(pud_phys));
28909 spin_unlock(&init_mm.page_table_lock);
28910 pgd_changed = true;
28911 }
28912@@ -693,6 +707,12 @@ void __init mem_init(void)
28913
28914 pci_iommu_alloc();
28915
28916+#ifdef CONFIG_PAX_PER_CPU_PGD
28917+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
28918+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
28919+ KERNEL_PGD_PTRS);
28920+#endif
28921+
28922 /* clear_bss() already clear the empty_zero_page */
28923
28924 reservedpages = 0;
28925@@ -856,8 +876,8 @@ int kern_addr_valid(unsigned long addr)
28926 static struct vm_area_struct gate_vma = {
28927 .vm_start = VSYSCALL_START,
28928 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
28929- .vm_page_prot = PAGE_READONLY_EXEC,
28930- .vm_flags = VM_READ | VM_EXEC
28931+ .vm_page_prot = PAGE_READONLY,
28932+ .vm_flags = VM_READ
28933 };
28934
28935 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
28936@@ -891,7 +911,7 @@ int in_gate_area_no_mm(unsigned long addr)
28937
28938 const char *arch_vma_name(struct vm_area_struct *vma)
28939 {
28940- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
28941+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
28942 return "[vdso]";
28943 if (vma == &gate_vma)
28944 return "[vsyscall]";
28945diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
28946index 7b179b4..6bd1777 100644
28947--- a/arch/x86/mm/iomap_32.c
28948+++ b/arch/x86/mm/iomap_32.c
28949@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
28950 type = kmap_atomic_idx_push();
28951 idx = type + KM_TYPE_NR * smp_processor_id();
28952 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
28953+
28954+ pax_open_kernel();
28955 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
28956+ pax_close_kernel();
28957+
28958 arch_flush_lazy_mmu_mode();
28959
28960 return (void *)vaddr;
28961diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
28962index 78fe3f1..73b95e2 100644
28963--- a/arch/x86/mm/ioremap.c
28964+++ b/arch/x86/mm/ioremap.c
28965@@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
28966 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
28967 int is_ram = page_is_ram(pfn);
28968
28969- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
28970+ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
28971 return NULL;
28972 WARN_ON_ONCE(is_ram);
28973 }
28974@@ -256,7 +256,7 @@ EXPORT_SYMBOL(ioremap_prot);
28975 *
28976 * Caller must ensure there is only one unmapping for the same pointer.
28977 */
28978-void iounmap(volatile void __iomem *addr)
28979+void iounmap(const volatile void __iomem *addr)
28980 {
28981 struct vm_struct *p, *o;
28982
28983@@ -315,6 +315,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
28984
28985 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
28986 if (page_is_ram(start >> PAGE_SHIFT))
28987+#ifdef CONFIG_HIGHMEM
28988+ if ((start >> PAGE_SHIFT) < max_low_pfn)
28989+#endif
28990 return __va(phys);
28991
28992 addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
28993@@ -327,6 +330,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
28994 void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
28995 {
28996 if (page_is_ram(phys >> PAGE_SHIFT))
28997+#ifdef CONFIG_HIGHMEM
28998+ if ((phys >> PAGE_SHIFT) < max_low_pfn)
28999+#endif
29000 return;
29001
29002 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
29003@@ -344,7 +350,7 @@ static int __init early_ioremap_debug_setup(char *str)
29004 early_param("early_ioremap_debug", early_ioremap_debug_setup);
29005
29006 static __initdata int after_paging_init;
29007-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
29008+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
29009
29010 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
29011 {
29012@@ -381,8 +387,7 @@ void __init early_ioremap_init(void)
29013 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
29014
29015 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
29016- memset(bm_pte, 0, sizeof(bm_pte));
29017- pmd_populate_kernel(&init_mm, pmd, bm_pte);
29018+ pmd_populate_user(&init_mm, pmd, bm_pte);
29019
29020 /*
29021 * The boot-ioremap range spans multiple pmds, for which
29022diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
29023index d87dd6d..bf3fa66 100644
29024--- a/arch/x86/mm/kmemcheck/kmemcheck.c
29025+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
29026@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
29027 * memory (e.g. tracked pages)? For now, we need this to avoid
29028 * invoking kmemcheck for PnP BIOS calls.
29029 */
29030- if (regs->flags & X86_VM_MASK)
29031+ if (v8086_mode(regs))
29032 return false;
29033- if (regs->cs != __KERNEL_CS)
29034+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
29035 return false;
29036
29037 pte = kmemcheck_pte_lookup(address);
29038diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
29039index 845df68..1d8d29f 100644
29040--- a/arch/x86/mm/mmap.c
29041+++ b/arch/x86/mm/mmap.c
29042@@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size(void)
29043 * Leave an at least ~128 MB hole with possible stack randomization.
29044 */
29045 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
29046-#define MAX_GAP (TASK_SIZE/6*5)
29047+#define MAX_GAP (pax_task_size/6*5)
29048
29049 static int mmap_is_legacy(void)
29050 {
29051@@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
29052 return rnd << PAGE_SHIFT;
29053 }
29054
29055-static unsigned long mmap_base(void)
29056+static unsigned long mmap_base(struct mm_struct *mm)
29057 {
29058 unsigned long gap = rlimit(RLIMIT_STACK);
29059+ unsigned long pax_task_size = TASK_SIZE;
29060+
29061+#ifdef CONFIG_PAX_SEGMEXEC
29062+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
29063+ pax_task_size = SEGMEXEC_TASK_SIZE;
29064+#endif
29065
29066 if (gap < MIN_GAP)
29067 gap = MIN_GAP;
29068 else if (gap > MAX_GAP)
29069 gap = MAX_GAP;
29070
29071- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
29072+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
29073 }
29074
29075 /*
29076 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
29077 * does, but not when emulating X86_32
29078 */
29079-static unsigned long mmap_legacy_base(void)
29080+static unsigned long mmap_legacy_base(struct mm_struct *mm)
29081 {
29082- if (mmap_is_ia32())
29083+ if (mmap_is_ia32()) {
29084+
29085+#ifdef CONFIG_PAX_SEGMEXEC
29086+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
29087+ return SEGMEXEC_TASK_UNMAPPED_BASE;
29088+ else
29089+#endif
29090+
29091 return TASK_UNMAPPED_BASE;
29092- else
29093+ } else
29094 return TASK_UNMAPPED_BASE + mmap_rnd();
29095 }
29096
29097@@ -113,11 +126,23 @@ static unsigned long mmap_legacy_base(void)
29098 void arch_pick_mmap_layout(struct mm_struct *mm)
29099 {
29100 if (mmap_is_legacy()) {
29101- mm->mmap_base = mmap_legacy_base();
29102+ mm->mmap_base = mmap_legacy_base(mm);
29103+
29104+#ifdef CONFIG_PAX_RANDMMAP
29105+ if (mm->pax_flags & MF_PAX_RANDMMAP)
29106+ mm->mmap_base += mm->delta_mmap;
29107+#endif
29108+
29109 mm->get_unmapped_area = arch_get_unmapped_area;
29110 mm->unmap_area = arch_unmap_area;
29111 } else {
29112- mm->mmap_base = mmap_base();
29113+ mm->mmap_base = mmap_base(mm);
29114+
29115+#ifdef CONFIG_PAX_RANDMMAP
29116+ if (mm->pax_flags & MF_PAX_RANDMMAP)
29117+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
29118+#endif
29119+
29120 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
29121 mm->unmap_area = arch_unmap_area_topdown;
29122 }
29123diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
29124index dc0b727..f612039 100644
29125--- a/arch/x86/mm/mmio-mod.c
29126+++ b/arch/x86/mm/mmio-mod.c
29127@@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
29128 break;
29129 default:
29130 {
29131- unsigned char *ip = (unsigned char *)instptr;
29132+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
29133 my_trace->opcode = MMIO_UNKNOWN_OP;
29134 my_trace->width = 0;
29135 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
29136@@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
29137 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
29138 void __iomem *addr)
29139 {
29140- static atomic_t next_id;
29141+ static atomic_unchecked_t next_id;
29142 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
29143 /* These are page-unaligned. */
29144 struct mmiotrace_map map = {
29145@@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
29146 .private = trace
29147 },
29148 .phys = offset,
29149- .id = atomic_inc_return(&next_id)
29150+ .id = atomic_inc_return_unchecked(&next_id)
29151 };
29152 map.map_id = trace->id;
29153
29154@@ -290,7 +290,7 @@ void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
29155 ioremap_trace_core(offset, size, addr);
29156 }
29157
29158-static void iounmap_trace_core(volatile void __iomem *addr)
29159+static void iounmap_trace_core(const volatile void __iomem *addr)
29160 {
29161 struct mmiotrace_map map = {
29162 .phys = 0,
29163@@ -328,7 +328,7 @@ not_enabled:
29164 }
29165 }
29166
29167-void mmiotrace_iounmap(volatile void __iomem *addr)
29168+void mmiotrace_iounmap(const volatile void __iomem *addr)
29169 {
29170 might_sleep();
29171 if (is_enabled()) /* recheck and proper locking in *_core() */
29172diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
29173index 8504f36..5fc68f2 100644
29174--- a/arch/x86/mm/numa.c
29175+++ b/arch/x86/mm/numa.c
29176@@ -478,7 +478,7 @@ static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
29177 return true;
29178 }
29179
29180-static int __init numa_register_memblks(struct numa_meminfo *mi)
29181+static int __init __intentional_overflow(-1) numa_register_memblks(struct numa_meminfo *mi)
29182 {
29183 unsigned long uninitialized_var(pfn_align);
29184 int i, nid;
29185diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
29186index b008656..773eac2 100644
29187--- a/arch/x86/mm/pageattr-test.c
29188+++ b/arch/x86/mm/pageattr-test.c
29189@@ -36,7 +36,7 @@ enum {
29190
29191 static int pte_testbit(pte_t pte)
29192 {
29193- return pte_flags(pte) & _PAGE_UNUSED1;
29194+ return pte_flags(pte) & _PAGE_CPA_TEST;
29195 }
29196
29197 struct split_state {
29198diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
29199index a718e0d..77419bc 100644
29200--- a/arch/x86/mm/pageattr.c
29201+++ b/arch/x86/mm/pageattr.c
29202@@ -261,7 +261,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
29203 */
29204 #ifdef CONFIG_PCI_BIOS
29205 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
29206- pgprot_val(forbidden) |= _PAGE_NX;
29207+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
29208 #endif
29209
29210 /*
29211@@ -269,9 +269,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
29212 * Does not cover __inittext since that is gone later on. On
29213 * 64bit we do not enforce !NX on the low mapping
29214 */
29215- if (within(address, (unsigned long)_text, (unsigned long)_etext))
29216- pgprot_val(forbidden) |= _PAGE_NX;
29217+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
29218+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
29219
29220+#ifdef CONFIG_DEBUG_RODATA
29221 /*
29222 * The .rodata section needs to be read-only. Using the pfn
29223 * catches all aliases.
29224@@ -279,6 +280,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
29225 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
29226 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
29227 pgprot_val(forbidden) |= _PAGE_RW;
29228+#endif
29229
29230 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
29231 /*
29232@@ -317,6 +319,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
29233 }
29234 #endif
29235
29236+#ifdef CONFIG_PAX_KERNEXEC
29237+ if (within(pfn, __pa(ktla_ktva((unsigned long)&_text)), __pa((unsigned long)&_sdata))) {
29238+ pgprot_val(forbidden) |= _PAGE_RW;
29239+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
29240+ }
29241+#endif
29242+
29243 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
29244
29245 return prot;
29246@@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
29247 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
29248 {
29249 /* change init_mm */
29250+ pax_open_kernel();
29251 set_pte_atomic(kpte, pte);
29252+
29253 #ifdef CONFIG_X86_32
29254 if (!SHARED_KERNEL_PMD) {
29255+
29256+#ifdef CONFIG_PAX_PER_CPU_PGD
29257+ unsigned long cpu;
29258+#else
29259 struct page *page;
29260+#endif
29261
29262+#ifdef CONFIG_PAX_PER_CPU_PGD
29263+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
29264+ pgd_t *pgd = get_cpu_pgd(cpu);
29265+#else
29266 list_for_each_entry(page, &pgd_list, lru) {
29267- pgd_t *pgd;
29268+ pgd_t *pgd = (pgd_t *)page_address(page);
29269+#endif
29270+
29271 pud_t *pud;
29272 pmd_t *pmd;
29273
29274- pgd = (pgd_t *)page_address(page) + pgd_index(address);
29275+ pgd += pgd_index(address);
29276 pud = pud_offset(pgd, address);
29277 pmd = pmd_offset(pud, address);
29278 set_pte_atomic((pte_t *)pmd, pte);
29279 }
29280 }
29281 #endif
29282+ pax_close_kernel();
29283 }
29284
29285 static int
29286diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
29287index 0eb572e..92f5c1e 100644
29288--- a/arch/x86/mm/pat.c
29289+++ b/arch/x86/mm/pat.c
29290@@ -376,7 +376,7 @@ int free_memtype(u64 start, u64 end)
29291
29292 if (!entry) {
29293 printk(KERN_INFO "%s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
29294- current->comm, current->pid, start, end - 1);
29295+ current->comm, task_pid_nr(current), start, end - 1);
29296 return -EINVAL;
29297 }
29298
29299@@ -506,8 +506,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
29300
29301 while (cursor < to) {
29302 if (!devmem_is_allowed(pfn)) {
29303- printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx]\n",
29304- current->comm, from, to - 1);
29305+ printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx] (%#010Lx)\n",
29306+ current->comm, from, to - 1, cursor);
29307 return 0;
29308 }
29309 cursor += PAGE_SIZE;
29310@@ -570,7 +570,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
29311 if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) {
29312 printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
29313 "for [mem %#010Lx-%#010Lx]\n",
29314- current->comm, current->pid,
29315+ current->comm, task_pid_nr(current),
29316 cattr_name(flags),
29317 base, (unsigned long long)(base + size-1));
29318 return -EINVAL;
29319@@ -605,7 +605,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
29320 flags = lookup_memtype(paddr);
29321 if (want_flags != flags) {
29322 printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
29323- current->comm, current->pid,
29324+ current->comm, task_pid_nr(current),
29325 cattr_name(want_flags),
29326 (unsigned long long)paddr,
29327 (unsigned long long)(paddr + size - 1),
29328@@ -627,7 +627,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
29329 free_memtype(paddr, paddr + size);
29330 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
29331 " for [mem %#010Lx-%#010Lx], got %s\n",
29332- current->comm, current->pid,
29333+ current->comm, task_pid_nr(current),
29334 cattr_name(want_flags),
29335 (unsigned long long)paddr,
29336 (unsigned long long)(paddr + size - 1),
29337diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
29338index 9f0614d..92ae64a 100644
29339--- a/arch/x86/mm/pf_in.c
29340+++ b/arch/x86/mm/pf_in.c
29341@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
29342 int i;
29343 enum reason_type rv = OTHERS;
29344
29345- p = (unsigned char *)ins_addr;
29346+ p = (unsigned char *)ktla_ktva(ins_addr);
29347 p += skip_prefix(p, &prf);
29348 p += get_opcode(p, &opcode);
29349
29350@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
29351 struct prefix_bits prf;
29352 int i;
29353
29354- p = (unsigned char *)ins_addr;
29355+ p = (unsigned char *)ktla_ktva(ins_addr);
29356 p += skip_prefix(p, &prf);
29357 p += get_opcode(p, &opcode);
29358
29359@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
29360 struct prefix_bits prf;
29361 int i;
29362
29363- p = (unsigned char *)ins_addr;
29364+ p = (unsigned char *)ktla_ktva(ins_addr);
29365 p += skip_prefix(p, &prf);
29366 p += get_opcode(p, &opcode);
29367
29368@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
29369 struct prefix_bits prf;
29370 int i;
29371
29372- p = (unsigned char *)ins_addr;
29373+ p = (unsigned char *)ktla_ktva(ins_addr);
29374 p += skip_prefix(p, &prf);
29375 p += get_opcode(p, &opcode);
29376 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
29377@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
29378 struct prefix_bits prf;
29379 int i;
29380
29381- p = (unsigned char *)ins_addr;
29382+ p = (unsigned char *)ktla_ktva(ins_addr);
29383 p += skip_prefix(p, &prf);
29384 p += get_opcode(p, &opcode);
29385 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
29386diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
29387index e27fbf8..8b56dc9 100644
29388--- a/arch/x86/mm/pgtable.c
29389+++ b/arch/x86/mm/pgtable.c
29390@@ -84,10 +84,64 @@ static inline void pgd_list_del(pgd_t *pgd)
29391 list_del(&page->lru);
29392 }
29393
29394-#define UNSHARED_PTRS_PER_PGD \
29395- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
29396+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
29397+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
29398
29399+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src)
29400+{
29401+ unsigned int count = USER_PGD_PTRS;
29402
29403+ while (count--)
29404+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
29405+}
29406+#endif
29407+
29408+#ifdef CONFIG_PAX_PER_CPU_PGD
29409+void __clone_user_pgds(pgd_t *dst, const pgd_t *src)
29410+{
29411+ unsigned int count = USER_PGD_PTRS;
29412+
29413+ while (count--) {
29414+ pgd_t pgd;
29415+
29416+#ifdef CONFIG_X86_64
29417+ pgd = __pgd(pgd_val(*src++) | _PAGE_USER);
29418+#else
29419+ pgd = *src++;
29420+#endif
29421+
29422+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
29423+ pgd = __pgd(pgd_val(pgd) & clone_pgd_mask);
29424+#endif
29425+
29426+ *dst++ = pgd;
29427+ }
29428+
29429+}
29430+#endif
29431+
29432+#ifdef CONFIG_X86_64
29433+#define pxd_t pud_t
29434+#define pyd_t pgd_t
29435+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
29436+#define pxd_free(mm, pud) pud_free((mm), (pud))
29437+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
29438+#define pyd_offset(mm, address) pgd_offset((mm), (address))
29439+#define PYD_SIZE PGDIR_SIZE
29440+#else
29441+#define pxd_t pmd_t
29442+#define pyd_t pud_t
29443+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
29444+#define pxd_free(mm, pud) pmd_free((mm), (pud))
29445+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
29446+#define pyd_offset(mm, address) pud_offset((mm), (address))
29447+#define PYD_SIZE PUD_SIZE
29448+#endif
29449+
29450+#ifdef CONFIG_PAX_PER_CPU_PGD
29451+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
29452+static inline void pgd_dtor(pgd_t *pgd) {}
29453+#else
29454 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
29455 {
29456 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
29457@@ -128,6 +182,7 @@ static void pgd_dtor(pgd_t *pgd)
29458 pgd_list_del(pgd);
29459 spin_unlock(&pgd_lock);
29460 }
29461+#endif
29462
29463 /*
29464 * List of all pgd's needed for non-PAE so it can invalidate entries
29465@@ -140,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
29466 * -- nyc
29467 */
29468
29469-#ifdef CONFIG_X86_PAE
29470+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
29471 /*
29472 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
29473 * updating the top-level pagetable entries to guarantee the
29474@@ -152,7 +207,7 @@ static void pgd_dtor(pgd_t *pgd)
29475 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
29476 * and initialize the kernel pmds here.
29477 */
29478-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
29479+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
29480
29481 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
29482 {
29483@@ -170,36 +225,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
29484 */
29485 flush_tlb_mm(mm);
29486 }
29487+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
29488+#define PREALLOCATED_PXDS USER_PGD_PTRS
29489 #else /* !CONFIG_X86_PAE */
29490
29491 /* No need to prepopulate any pagetable entries in non-PAE modes. */
29492-#define PREALLOCATED_PMDS 0
29493+#define PREALLOCATED_PXDS 0
29494
29495 #endif /* CONFIG_X86_PAE */
29496
29497-static void free_pmds(pmd_t *pmds[])
29498+static void free_pxds(pxd_t *pxds[])
29499 {
29500 int i;
29501
29502- for(i = 0; i < PREALLOCATED_PMDS; i++)
29503- if (pmds[i])
29504- free_page((unsigned long)pmds[i]);
29505+ for(i = 0; i < PREALLOCATED_PXDS; i++)
29506+ if (pxds[i])
29507+ free_page((unsigned long)pxds[i]);
29508 }
29509
29510-static int preallocate_pmds(pmd_t *pmds[])
29511+static int preallocate_pxds(pxd_t *pxds[])
29512 {
29513 int i;
29514 bool failed = false;
29515
29516- for(i = 0; i < PREALLOCATED_PMDS; i++) {
29517- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
29518- if (pmd == NULL)
29519+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
29520+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
29521+ if (pxd == NULL)
29522 failed = true;
29523- pmds[i] = pmd;
29524+ pxds[i] = pxd;
29525 }
29526
29527 if (failed) {
29528- free_pmds(pmds);
29529+ free_pxds(pxds);
29530 return -ENOMEM;
29531 }
29532
29533@@ -212,51 +269,55 @@ static int preallocate_pmds(pmd_t *pmds[])
29534 * preallocate which never got a corresponding vma will need to be
29535 * freed manually.
29536 */
29537-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
29538+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
29539 {
29540 int i;
29541
29542- for(i = 0; i < PREALLOCATED_PMDS; i++) {
29543+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
29544 pgd_t pgd = pgdp[i];
29545
29546 if (pgd_val(pgd) != 0) {
29547- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
29548+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
29549
29550- pgdp[i] = native_make_pgd(0);
29551+ set_pgd(pgdp + i, native_make_pgd(0));
29552
29553- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
29554- pmd_free(mm, pmd);
29555+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
29556+ pxd_free(mm, pxd);
29557 }
29558 }
29559 }
29560
29561-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
29562+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
29563 {
29564- pud_t *pud;
29565+ pyd_t *pyd;
29566 unsigned long addr;
29567 int i;
29568
29569- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
29570+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
29571 return;
29572
29573- pud = pud_offset(pgd, 0);
29574+#ifdef CONFIG_X86_64
29575+ pyd = pyd_offset(mm, 0L);
29576+#else
29577+ pyd = pyd_offset(pgd, 0L);
29578+#endif
29579
29580- for (addr = i = 0; i < PREALLOCATED_PMDS;
29581- i++, pud++, addr += PUD_SIZE) {
29582- pmd_t *pmd = pmds[i];
29583+ for (addr = i = 0; i < PREALLOCATED_PXDS;
29584+ i++, pyd++, addr += PYD_SIZE) {
29585+ pxd_t *pxd = pxds[i];
29586
29587 if (i >= KERNEL_PGD_BOUNDARY)
29588- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
29589- sizeof(pmd_t) * PTRS_PER_PMD);
29590+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
29591+ sizeof(pxd_t) * PTRS_PER_PMD);
29592
29593- pud_populate(mm, pud, pmd);
29594+ pyd_populate(mm, pyd, pxd);
29595 }
29596 }
29597
29598 pgd_t *pgd_alloc(struct mm_struct *mm)
29599 {
29600 pgd_t *pgd;
29601- pmd_t *pmds[PREALLOCATED_PMDS];
29602+ pxd_t *pxds[PREALLOCATED_PXDS];
29603
29604 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
29605
29606@@ -265,11 +326,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
29607
29608 mm->pgd = pgd;
29609
29610- if (preallocate_pmds(pmds) != 0)
29611+ if (preallocate_pxds(pxds) != 0)
29612 goto out_free_pgd;
29613
29614 if (paravirt_pgd_alloc(mm) != 0)
29615- goto out_free_pmds;
29616+ goto out_free_pxds;
29617
29618 /*
29619 * Make sure that pre-populating the pmds is atomic with
29620@@ -279,14 +340,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
29621 spin_lock(&pgd_lock);
29622
29623 pgd_ctor(mm, pgd);
29624- pgd_prepopulate_pmd(mm, pgd, pmds);
29625+ pgd_prepopulate_pxd(mm, pgd, pxds);
29626
29627 spin_unlock(&pgd_lock);
29628
29629 return pgd;
29630
29631-out_free_pmds:
29632- free_pmds(pmds);
29633+out_free_pxds:
29634+ free_pxds(pxds);
29635 out_free_pgd:
29636 free_page((unsigned long)pgd);
29637 out:
29638@@ -295,7 +356,7 @@ out:
29639
29640 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
29641 {
29642- pgd_mop_up_pmds(mm, pgd);
29643+ pgd_mop_up_pxds(mm, pgd);
29644 pgd_dtor(pgd);
29645 paravirt_pgd_free(mm, pgd);
29646 free_page((unsigned long)pgd);
29647diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
29648index a69bcb8..19068ab 100644
29649--- a/arch/x86/mm/pgtable_32.c
29650+++ b/arch/x86/mm/pgtable_32.c
29651@@ -47,10 +47,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
29652 return;
29653 }
29654 pte = pte_offset_kernel(pmd, vaddr);
29655+
29656+ pax_open_kernel();
29657 if (pte_val(pteval))
29658 set_pte_at(&init_mm, vaddr, pte, pteval);
29659 else
29660 pte_clear(&init_mm, vaddr, pte);
29661+ pax_close_kernel();
29662
29663 /*
29664 * It's enough to flush this one mapping.
29665diff --git a/arch/x86/mm/physaddr.c b/arch/x86/mm/physaddr.c
29666index d2e2735..5c6586f 100644
29667--- a/arch/x86/mm/physaddr.c
29668+++ b/arch/x86/mm/physaddr.c
29669@@ -8,7 +8,7 @@
29670
29671 #ifdef CONFIG_X86_64
29672
29673-unsigned long __phys_addr(unsigned long x)
29674+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
29675 {
29676 if (x >= __START_KERNEL_map) {
29677 x -= __START_KERNEL_map;
29678@@ -45,7 +45,7 @@ EXPORT_SYMBOL(__virt_addr_valid);
29679 #else
29680
29681 #ifdef CONFIG_DEBUG_VIRTUAL
29682-unsigned long __phys_addr(unsigned long x)
29683+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
29684 {
29685 /* VMALLOC_* aren't constants */
29686 VIRTUAL_BUG_ON(x < PAGE_OFFSET);
29687diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
29688index 410531d..0f16030 100644
29689--- a/arch/x86/mm/setup_nx.c
29690+++ b/arch/x86/mm/setup_nx.c
29691@@ -5,8 +5,10 @@
29692 #include <asm/pgtable.h>
29693 #include <asm/proto.h>
29694
29695+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
29696 static int disable_nx __cpuinitdata;
29697
29698+#ifndef CONFIG_PAX_PAGEEXEC
29699 /*
29700 * noexec = on|off
29701 *
29702@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
29703 return 0;
29704 }
29705 early_param("noexec", noexec_setup);
29706+#endif
29707+
29708+#endif
29709
29710 void __cpuinit x86_configure_nx(void)
29711 {
29712+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
29713 if (cpu_has_nx && !disable_nx)
29714 __supported_pte_mask |= _PAGE_NX;
29715 else
29716+#endif
29717 __supported_pte_mask &= ~_PAGE_NX;
29718 }
29719
29720diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
29721index 13a6b29..c2fff23 100644
29722--- a/arch/x86/mm/tlb.c
29723+++ b/arch/x86/mm/tlb.c
29724@@ -48,7 +48,11 @@ void leave_mm(int cpu)
29725 BUG();
29726 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
29727 cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
29728+
29729+#ifndef CONFIG_PAX_PER_CPU_PGD
29730 load_cr3(swapper_pg_dir);
29731+#endif
29732+
29733 }
29734 }
29735 EXPORT_SYMBOL_GPL(leave_mm);
29736diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
29737index 877b9a1..a8ecf42 100644
29738--- a/arch/x86/net/bpf_jit.S
29739+++ b/arch/x86/net/bpf_jit.S
29740@@ -9,6 +9,7 @@
29741 */
29742 #include <linux/linkage.h>
29743 #include <asm/dwarf2.h>
29744+#include <asm/alternative-asm.h>
29745
29746 /*
29747 * Calling convention :
29748@@ -35,6 +36,7 @@ sk_load_word_positive_offset:
29749 jle bpf_slow_path_word
29750 mov (SKBDATA,%rsi),%eax
29751 bswap %eax /* ntohl() */
29752+ pax_force_retaddr
29753 ret
29754
29755 sk_load_half:
29756@@ -52,6 +54,7 @@ sk_load_half_positive_offset:
29757 jle bpf_slow_path_half
29758 movzwl (SKBDATA,%rsi),%eax
29759 rol $8,%ax # ntohs()
29760+ pax_force_retaddr
29761 ret
29762
29763 sk_load_byte:
29764@@ -66,6 +69,7 @@ sk_load_byte_positive_offset:
29765 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
29766 jle bpf_slow_path_byte
29767 movzbl (SKBDATA,%rsi),%eax
29768+ pax_force_retaddr
29769 ret
29770
29771 /**
29772@@ -87,6 +91,7 @@ sk_load_byte_msh_positive_offset:
29773 movzbl (SKBDATA,%rsi),%ebx
29774 and $15,%bl
29775 shl $2,%bl
29776+ pax_force_retaddr
29777 ret
29778
29779 /* rsi contains offset and can be scratched */
29780@@ -109,6 +114,7 @@ bpf_slow_path_word:
29781 js bpf_error
29782 mov -12(%rbp),%eax
29783 bswap %eax
29784+ pax_force_retaddr
29785 ret
29786
29787 bpf_slow_path_half:
29788@@ -117,12 +123,14 @@ bpf_slow_path_half:
29789 mov -12(%rbp),%ax
29790 rol $8,%ax
29791 movzwl %ax,%eax
29792+ pax_force_retaddr
29793 ret
29794
29795 bpf_slow_path_byte:
29796 bpf_slow_path_common(1)
29797 js bpf_error
29798 movzbl -12(%rbp),%eax
29799+ pax_force_retaddr
29800 ret
29801
29802 bpf_slow_path_byte_msh:
29803@@ -133,6 +141,7 @@ bpf_slow_path_byte_msh:
29804 and $15,%al
29805 shl $2,%al
29806 xchg %eax,%ebx
29807+ pax_force_retaddr
29808 ret
29809
29810 #define sk_negative_common(SIZE) \
29811@@ -157,6 +166,7 @@ sk_load_word_negative_offset:
29812 sk_negative_common(4)
29813 mov (%rax), %eax
29814 bswap %eax
29815+ pax_force_retaddr
29816 ret
29817
29818 bpf_slow_path_half_neg:
29819@@ -168,6 +178,7 @@ sk_load_half_negative_offset:
29820 mov (%rax),%ax
29821 rol $8,%ax
29822 movzwl %ax,%eax
29823+ pax_force_retaddr
29824 ret
29825
29826 bpf_slow_path_byte_neg:
29827@@ -177,6 +188,7 @@ sk_load_byte_negative_offset:
29828 .globl sk_load_byte_negative_offset
29829 sk_negative_common(1)
29830 movzbl (%rax), %eax
29831+ pax_force_retaddr
29832 ret
29833
29834 bpf_slow_path_byte_msh_neg:
29835@@ -190,6 +202,7 @@ sk_load_byte_msh_negative_offset:
29836 and $15,%al
29837 shl $2,%al
29838 xchg %eax,%ebx
29839+ pax_force_retaddr
29840 ret
29841
29842 bpf_error:
29843@@ -197,4 +210,5 @@ bpf_error:
29844 xor %eax,%eax
29845 mov -8(%rbp),%rbx
29846 leaveq
29847+ pax_force_retaddr
29848 ret
29849diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
29850index d11a470..3f9adff3 100644
29851--- a/arch/x86/net/bpf_jit_comp.c
29852+++ b/arch/x86/net/bpf_jit_comp.c
29853@@ -12,6 +12,7 @@
29854 #include <linux/netdevice.h>
29855 #include <linux/filter.h>
29856 #include <linux/if_vlan.h>
29857+#include <linux/random.h>
29858
29859 /*
29860 * Conventions :
29861@@ -49,13 +50,87 @@ static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
29862 return ptr + len;
29863 }
29864
29865+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
29866+#define MAX_INSTR_CODE_SIZE 96
29867+#else
29868+#define MAX_INSTR_CODE_SIZE 64
29869+#endif
29870+
29871 #define EMIT(bytes, len) do { prog = emit_code(prog, bytes, len); } while (0)
29872
29873 #define EMIT1(b1) EMIT(b1, 1)
29874 #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
29875 #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
29876 #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
29877+
29878+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
29879+/* original constant will appear in ecx */
29880+#define DILUTE_CONST_SEQUENCE(_off, _key) \
29881+do { \
29882+ /* mov ecx, randkey */ \
29883+ EMIT1(0xb9); \
29884+ EMIT(_key, 4); \
29885+ /* xor ecx, randkey ^ off */ \
29886+ EMIT2(0x81, 0xf1); \
29887+ EMIT((_key) ^ (_off), 4); \
29888+} while (0)
29889+
29890+#define EMIT1_off32(b1, _off) \
29891+do { \
29892+ switch (b1) { \
29893+ case 0x05: /* add eax, imm32 */ \
29894+ case 0x2d: /* sub eax, imm32 */ \
29895+ case 0x25: /* and eax, imm32 */ \
29896+ case 0x0d: /* or eax, imm32 */ \
29897+ case 0xb8: /* mov eax, imm32 */ \
29898+ case 0x3d: /* cmp eax, imm32 */ \
29899+ case 0xa9: /* test eax, imm32 */ \
29900+ DILUTE_CONST_SEQUENCE(_off, randkey); \
29901+ EMIT2((b1) - 4, 0xc8); /* convert imm instruction to eax, ecx */\
29902+ break; \
29903+ case 0xbb: /* mov ebx, imm32 */ \
29904+ DILUTE_CONST_SEQUENCE(_off, randkey); \
29905+ /* mov ebx, ecx */ \
29906+ EMIT2(0x89, 0xcb); \
29907+ break; \
29908+ case 0xbe: /* mov esi, imm32 */ \
29909+ DILUTE_CONST_SEQUENCE(_off, randkey); \
29910+ /* mov esi, ecx */ \
29911+ EMIT2(0x89, 0xce); \
29912+ break; \
29913+ case 0xe9: /* jmp rel imm32 */ \
29914+ EMIT1(b1); \
29915+ EMIT(_off, 4); \
29916+ /* prevent fall-through, we're not called if off = 0 */ \
29917+ EMIT(0xcccccccc, 4); \
29918+ EMIT(0xcccccccc, 4); \
29919+ break; \
29920+ default: \
29921+ EMIT1(b1); \
29922+ EMIT(_off, 4); \
29923+ } \
29924+} while (0)
29925+
29926+#define EMIT2_off32(b1, b2, _off) \
29927+do { \
29928+ if ((b1) == 0x8d && (b2) == 0xb3) { /* lea esi, [rbx+imm32] */ \
29929+ EMIT2(0x8d, 0xb3); /* lea esi, [rbx+randkey] */ \
29930+ EMIT(randkey, 4); \
29931+ EMIT2(0x8d, 0xb6); /* lea esi, [esi+off-randkey] */ \
29932+ EMIT((_off) - randkey, 4); \
29933+ } else if ((b1) == 0x69 && (b2) == 0xc0) { /* imul eax, imm32 */\
29934+ DILUTE_CONST_SEQUENCE(_off, randkey); \
29935+ /* imul eax, ecx */ \
29936+ EMIT3(0x0f, 0xaf, 0xc1); \
29937+ } else { \
29938+ EMIT2(b1, b2); \
29939+ EMIT(_off, 4); \
29940+ } \
29941+} while (0)
29942+#else
29943 #define EMIT1_off32(b1, off) do { EMIT1(b1); EMIT(off, 4);} while (0)
29944+#define EMIT2_off32(b1, b2, off) do { EMIT2(b1, b2); EMIT(off, 4);} while (0)
29945+#endif
29946
29947 #define CLEAR_A() EMIT2(0x31, 0xc0) /* xor %eax,%eax */
29948 #define CLEAR_X() EMIT2(0x31, 0xdb) /* xor %ebx,%ebx */
29949@@ -90,6 +165,24 @@ do { \
29950 #define X86_JBE 0x76
29951 #define X86_JA 0x77
29952
29953+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
29954+#define APPEND_FLOW_VERIFY() \
29955+do { \
29956+ /* mov ecx, randkey */ \
29957+ EMIT1(0xb9); \
29958+ EMIT(randkey, 4); \
29959+ /* cmp ecx, randkey */ \
29960+ EMIT2(0x81, 0xf9); \
29961+ EMIT(randkey, 4); \
29962+ /* jz after 8 int 3s */ \
29963+ EMIT2(0x74, 0x08); \
29964+ EMIT(0xcccccccc, 4); \
29965+ EMIT(0xcccccccc, 4); \
29966+} while (0)
29967+#else
29968+#define APPEND_FLOW_VERIFY() do { } while (0)
29969+#endif
29970+
29971 #define EMIT_COND_JMP(op, offset) \
29972 do { \
29973 if (is_near(offset)) \
29974@@ -97,6 +190,7 @@ do { \
29975 else { \
29976 EMIT2(0x0f, op + 0x10); \
29977 EMIT(offset, 4); /* jxx .+off32 */ \
29978+ APPEND_FLOW_VERIFY(); \
29979 } \
29980 } while (0)
29981
29982@@ -121,12 +215,17 @@ static inline void bpf_flush_icache(void *start, void *end)
29983 set_fs(old_fs);
29984 }
29985
29986+struct bpf_jit_work {
29987+ struct work_struct work;
29988+ void *image;
29989+};
29990+
29991 #define CHOOSE_LOAD_FUNC(K, func) \
29992 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
29993
29994 void bpf_jit_compile(struct sk_filter *fp)
29995 {
29996- u8 temp[64];
29997+ u8 temp[MAX_INSTR_CODE_SIZE];
29998 u8 *prog;
29999 unsigned int proglen, oldproglen = 0;
30000 int ilen, i;
30001@@ -139,6 +238,9 @@ void bpf_jit_compile(struct sk_filter *fp)
30002 unsigned int *addrs;
30003 const struct sock_filter *filter = fp->insns;
30004 int flen = fp->len;
30005+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
30006+ unsigned int randkey;
30007+#endif
30008
30009 if (!bpf_jit_enable)
30010 return;
30011@@ -147,11 +249,19 @@ void bpf_jit_compile(struct sk_filter *fp)
30012 if (addrs == NULL)
30013 return;
30014
30015+ fp->work = kmalloc(sizeof(*fp->work), GFP_KERNEL);
30016+ if (!fp->work)
30017+ goto out;
30018+
30019+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
30020+ randkey = get_random_int();
30021+#endif
30022+
30023 /* Before first pass, make a rough estimation of addrs[]
30024- * each bpf instruction is translated to less than 64 bytes
30025+ * each bpf instruction is translated to less than MAX_INSTR_CODE_SIZE bytes
30026 */
30027 for (proglen = 0, i = 0; i < flen; i++) {
30028- proglen += 64;
30029+ proglen += MAX_INSTR_CODE_SIZE;
30030 addrs[i] = proglen;
30031 }
30032 cleanup_addr = proglen; /* epilogue address */
30033@@ -261,10 +371,8 @@ void bpf_jit_compile(struct sk_filter *fp)
30034 case BPF_S_ALU_MUL_K: /* A *= K */
30035 if (is_imm8(K))
30036 EMIT3(0x6b, 0xc0, K); /* imul imm8,%eax,%eax */
30037- else {
30038- EMIT2(0x69, 0xc0); /* imul imm32,%eax */
30039- EMIT(K, 4);
30040- }
30041+ else
30042+ EMIT2_off32(0x69, 0xc0, K); /* imul imm32,%eax */
30043 break;
30044 case BPF_S_ALU_DIV_X: /* A /= X; */
30045 seen |= SEEN_XREG;
30046@@ -304,13 +412,23 @@ void bpf_jit_compile(struct sk_filter *fp)
30047 break;
30048 case BPF_S_ALU_MOD_K: /* A %= K; */
30049 EMIT2(0x31, 0xd2); /* xor %edx,%edx */
30050+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
30051+ DILUTE_CONST_SEQUENCE(K, randkey);
30052+#else
30053 EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */
30054+#endif
30055 EMIT2(0xf7, 0xf1); /* div %ecx */
30056 EMIT2(0x89, 0xd0); /* mov %edx,%eax */
30057 break;
30058 case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K); */
30059+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
30060+ DILUTE_CONST_SEQUENCE(K, randkey);
30061+ // imul rax, rcx
30062+ EMIT4(0x48, 0x0f, 0xaf, 0xc1);
30063+#else
30064 EMIT3(0x48, 0x69, 0xc0); /* imul imm32,%rax,%rax */
30065 EMIT(K, 4);
30066+#endif
30067 EMIT4(0x48, 0xc1, 0xe8, 0x20); /* shr $0x20,%rax */
30068 break;
30069 case BPF_S_ALU_AND_X:
30070@@ -564,8 +682,7 @@ common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG;
30071 if (is_imm8(K)) {
30072 EMIT3(0x8d, 0x73, K); /* lea imm8(%rbx), %esi */
30073 } else {
30074- EMIT2(0x8d, 0xb3); /* lea imm32(%rbx),%esi */
30075- EMIT(K, 4);
30076+ EMIT2_off32(0x8d, 0xb3, K); /* lea imm32(%rbx),%esi */
30077 }
30078 } else {
30079 EMIT2(0x89,0xde); /* mov %ebx,%esi */
30080@@ -648,17 +765,18 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
30081 break;
30082 default:
30083 /* hmm, too complex filter, give up with jit compiler */
30084- goto out;
30085+ goto error;
30086 }
30087 ilen = prog - temp;
30088 if (image) {
30089 if (unlikely(proglen + ilen > oldproglen)) {
30090 pr_err("bpb_jit_compile fatal error\n");
30091- kfree(addrs);
30092- module_free(NULL, image);
30093- return;
30094+ module_free_exec(NULL, image);
30095+ goto error;
30096 }
30097+ pax_open_kernel();
30098 memcpy(image + proglen, temp, ilen);
30099+ pax_close_kernel();
30100 }
30101 proglen += ilen;
30102 addrs[i] = proglen;
30103@@ -679,11 +797,9 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
30104 break;
30105 }
30106 if (proglen == oldproglen) {
30107- image = module_alloc(max_t(unsigned int,
30108- proglen,
30109- sizeof(struct work_struct)));
30110+ image = module_alloc_exec(proglen);
30111 if (!image)
30112- goto out;
30113+ goto error;
30114 }
30115 oldproglen = proglen;
30116 }
30117@@ -699,7 +815,10 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
30118 bpf_flush_icache(image, image + proglen);
30119
30120 fp->bpf_func = (void *)image;
30121- }
30122+ } else
30123+error:
30124+ kfree(fp->work);
30125+
30126 out:
30127 kfree(addrs);
30128 return;
30129@@ -707,18 +826,20 @@ out:
30130
30131 static void jit_free_defer(struct work_struct *arg)
30132 {
30133- module_free(NULL, arg);
30134+ module_free_exec(NULL, ((struct bpf_jit_work *)arg)->image);
30135+ kfree(arg);
30136 }
30137
30138 /* run from softirq, we must use a work_struct to call
30139- * module_free() from process context
30140+ * module_free_exec() from process context
30141 */
30142 void bpf_jit_free(struct sk_filter *fp)
30143 {
30144 if (fp->bpf_func != sk_run_filter) {
30145- struct work_struct *work = (struct work_struct *)fp->bpf_func;
30146+ struct work_struct *work = &fp->work->work;
30147
30148 INIT_WORK(work, jit_free_defer);
30149+ fp->work->image = fp->bpf_func;
30150 schedule_work(work);
30151 }
30152 }
30153diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
30154index d6aa6e8..266395a 100644
30155--- a/arch/x86/oprofile/backtrace.c
30156+++ b/arch/x86/oprofile/backtrace.c
30157@@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
30158 struct stack_frame_ia32 *fp;
30159 unsigned long bytes;
30160
30161- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
30162+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
30163 if (bytes != sizeof(bufhead))
30164 return NULL;
30165
30166- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
30167+ fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
30168
30169 oprofile_add_trace(bufhead[0].return_address);
30170
30171@@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
30172 struct stack_frame bufhead[2];
30173 unsigned long bytes;
30174
30175- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
30176+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
30177 if (bytes != sizeof(bufhead))
30178 return NULL;
30179
30180@@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
30181 {
30182 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
30183
30184- if (!user_mode_vm(regs)) {
30185+ if (!user_mode(regs)) {
30186 unsigned long stack = kernel_stack_pointer(regs);
30187 if (depth)
30188 dump_trace(NULL, regs, (unsigned long *)stack, 0,
30189diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
30190index 48768df..ba9143c 100644
30191--- a/arch/x86/oprofile/nmi_int.c
30192+++ b/arch/x86/oprofile/nmi_int.c
30193@@ -23,6 +23,7 @@
30194 #include <asm/nmi.h>
30195 #include <asm/msr.h>
30196 #include <asm/apic.h>
30197+#include <asm/pgtable.h>
30198
30199 #include "op_counter.h"
30200 #include "op_x86_model.h"
30201@@ -774,8 +775,11 @@ int __init op_nmi_init(struct oprofile_operations *ops)
30202 if (ret)
30203 return ret;
30204
30205- if (!model->num_virt_counters)
30206- model->num_virt_counters = model->num_counters;
30207+ if (!model->num_virt_counters) {
30208+ pax_open_kernel();
30209+ *(unsigned int *)&model->num_virt_counters = model->num_counters;
30210+ pax_close_kernel();
30211+ }
30212
30213 mux_init(ops);
30214
30215diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
30216index b2b9443..be58856 100644
30217--- a/arch/x86/oprofile/op_model_amd.c
30218+++ b/arch/x86/oprofile/op_model_amd.c
30219@@ -519,9 +519,11 @@ static int op_amd_init(struct oprofile_operations *ops)
30220 num_counters = AMD64_NUM_COUNTERS;
30221 }
30222
30223- op_amd_spec.num_counters = num_counters;
30224- op_amd_spec.num_controls = num_counters;
30225- op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
30226+ pax_open_kernel();
30227+ *(unsigned int *)&op_amd_spec.num_counters = num_counters;
30228+ *(unsigned int *)&op_amd_spec.num_controls = num_counters;
30229+ *(unsigned int *)&op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
30230+ pax_close_kernel();
30231
30232 return 0;
30233 }
30234diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
30235index d90528e..0127e2b 100644
30236--- a/arch/x86/oprofile/op_model_ppro.c
30237+++ b/arch/x86/oprofile/op_model_ppro.c
30238@@ -19,6 +19,7 @@
30239 #include <asm/msr.h>
30240 #include <asm/apic.h>
30241 #include <asm/nmi.h>
30242+#include <asm/pgtable.h>
30243
30244 #include "op_x86_model.h"
30245 #include "op_counter.h"
30246@@ -221,8 +222,10 @@ static void arch_perfmon_setup_counters(void)
30247
30248 num_counters = min((int)eax.split.num_counters, OP_MAX_COUNTER);
30249
30250- op_arch_perfmon_spec.num_counters = num_counters;
30251- op_arch_perfmon_spec.num_controls = num_counters;
30252+ pax_open_kernel();
30253+ *(unsigned int *)&op_arch_perfmon_spec.num_counters = num_counters;
30254+ *(unsigned int *)&op_arch_perfmon_spec.num_controls = num_counters;
30255+ pax_close_kernel();
30256 }
30257
30258 static int arch_perfmon_init(struct oprofile_operations *ignore)
30259diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h
30260index 71e8a67..6a313bb 100644
30261--- a/arch/x86/oprofile/op_x86_model.h
30262+++ b/arch/x86/oprofile/op_x86_model.h
30263@@ -52,7 +52,7 @@ struct op_x86_model_spec {
30264 void (*switch_ctrl)(struct op_x86_model_spec const *model,
30265 struct op_msrs const * const msrs);
30266 #endif
30267-};
30268+} __do_const;
30269
30270 struct op_counter_config;
30271
30272diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c
30273index e9e6ed5..e47ae67 100644
30274--- a/arch/x86/pci/amd_bus.c
30275+++ b/arch/x86/pci/amd_bus.c
30276@@ -337,7 +337,7 @@ static int __cpuinit amd_cpu_notify(struct notifier_block *self,
30277 return NOTIFY_OK;
30278 }
30279
30280-static struct notifier_block __cpuinitdata amd_cpu_notifier = {
30281+static struct notifier_block amd_cpu_notifier = {
30282 .notifier_call = amd_cpu_notify,
30283 };
30284
30285diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
30286index 372e9b8..e775a6c 100644
30287--- a/arch/x86/pci/irq.c
30288+++ b/arch/x86/pci/irq.c
30289@@ -50,7 +50,7 @@ struct irq_router {
30290 struct irq_router_handler {
30291 u16 vendor;
30292 int (*probe)(struct irq_router *r, struct pci_dev *router, u16 device);
30293-};
30294+} __do_const;
30295
30296 int (*pcibios_enable_irq)(struct pci_dev *dev) = pirq_enable_irq;
30297 void (*pcibios_disable_irq)(struct pci_dev *dev) = NULL;
30298@@ -794,7 +794,7 @@ static __init int pico_router_probe(struct irq_router *r, struct pci_dev *router
30299 return 0;
30300 }
30301
30302-static __initdata struct irq_router_handler pirq_routers[] = {
30303+static __initconst const struct irq_router_handler pirq_routers[] = {
30304 { PCI_VENDOR_ID_INTEL, intel_router_probe },
30305 { PCI_VENDOR_ID_AL, ali_router_probe },
30306 { PCI_VENDOR_ID_ITE, ite_router_probe },
30307@@ -821,7 +821,7 @@ static struct pci_dev *pirq_router_dev;
30308 static void __init pirq_find_router(struct irq_router *r)
30309 {
30310 struct irq_routing_table *rt = pirq_table;
30311- struct irq_router_handler *h;
30312+ const struct irq_router_handler *h;
30313
30314 #ifdef CONFIG_PCI_BIOS
30315 if (!rt->signature) {
30316@@ -1094,7 +1094,7 @@ static int __init fix_acer_tm360_irqrouting(const struct dmi_system_id *d)
30317 return 0;
30318 }
30319
30320-static struct dmi_system_id __initdata pciirq_dmi_table[] = {
30321+static const struct dmi_system_id __initconst pciirq_dmi_table[] = {
30322 {
30323 .callback = fix_broken_hp_bios_irq9,
30324 .ident = "HP Pavilion N5400 Series Laptop",
30325diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
30326index 6eb18c4..20d83de 100644
30327--- a/arch/x86/pci/mrst.c
30328+++ b/arch/x86/pci/mrst.c
30329@@ -238,7 +238,9 @@ int __init pci_mrst_init(void)
30330 printk(KERN_INFO "Intel MID platform detected, using MID PCI ops\n");
30331 pci_mmcfg_late_init();
30332 pcibios_enable_irq = mrst_pci_irq_enable;
30333- pci_root_ops = pci_mrst_ops;
30334+ pax_open_kernel();
30335+ memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
30336+ pax_close_kernel();
30337 pci_soc_mode = 1;
30338 /* Continue with standard init */
30339 return 1;
30340diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
30341index c77b24a..c979855 100644
30342--- a/arch/x86/pci/pcbios.c
30343+++ b/arch/x86/pci/pcbios.c
30344@@ -79,7 +79,7 @@ union bios32 {
30345 static struct {
30346 unsigned long address;
30347 unsigned short segment;
30348-} bios32_indirect = { 0, __KERNEL_CS };
30349+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
30350
30351 /*
30352 * Returns the entry point for the given service, NULL on error
30353@@ -92,37 +92,80 @@ static unsigned long bios32_service(unsigned long service)
30354 unsigned long length; /* %ecx */
30355 unsigned long entry; /* %edx */
30356 unsigned long flags;
30357+ struct desc_struct d, *gdt;
30358
30359 local_irq_save(flags);
30360- __asm__("lcall *(%%edi); cld"
30361+
30362+ gdt = get_cpu_gdt_table(smp_processor_id());
30363+
30364+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
30365+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
30366+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
30367+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
30368+
30369+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
30370 : "=a" (return_code),
30371 "=b" (address),
30372 "=c" (length),
30373 "=d" (entry)
30374 : "0" (service),
30375 "1" (0),
30376- "D" (&bios32_indirect));
30377+ "D" (&bios32_indirect),
30378+ "r"(__PCIBIOS_DS)
30379+ : "memory");
30380+
30381+ pax_open_kernel();
30382+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
30383+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
30384+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
30385+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
30386+ pax_close_kernel();
30387+
30388 local_irq_restore(flags);
30389
30390 switch (return_code) {
30391- case 0:
30392- return address + entry;
30393- case 0x80: /* Not present */
30394- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
30395- return 0;
30396- default: /* Shouldn't happen */
30397- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
30398- service, return_code);
30399+ case 0: {
30400+ int cpu;
30401+ unsigned char flags;
30402+
30403+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
30404+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
30405+ printk(KERN_WARNING "bios32_service: not valid\n");
30406 return 0;
30407+ }
30408+ address = address + PAGE_OFFSET;
30409+ length += 16UL; /* some BIOSs underreport this... */
30410+ flags = 4;
30411+ if (length >= 64*1024*1024) {
30412+ length >>= PAGE_SHIFT;
30413+ flags |= 8;
30414+ }
30415+
30416+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
30417+ gdt = get_cpu_gdt_table(cpu);
30418+ pack_descriptor(&d, address, length, 0x9b, flags);
30419+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
30420+ pack_descriptor(&d, address, length, 0x93, flags);
30421+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
30422+ }
30423+ return entry;
30424+ }
30425+ case 0x80: /* Not present */
30426+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
30427+ return 0;
30428+ default: /* Shouldn't happen */
30429+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
30430+ service, return_code);
30431+ return 0;
30432 }
30433 }
30434
30435 static struct {
30436 unsigned long address;
30437 unsigned short segment;
30438-} pci_indirect = { 0, __KERNEL_CS };
30439+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
30440
30441-static int pci_bios_present;
30442+static int pci_bios_present __read_only;
30443
30444 static int check_pcibios(void)
30445 {
30446@@ -131,11 +174,13 @@ static int check_pcibios(void)
30447 unsigned long flags, pcibios_entry;
30448
30449 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
30450- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
30451+ pci_indirect.address = pcibios_entry;
30452
30453 local_irq_save(flags);
30454- __asm__(
30455- "lcall *(%%edi); cld\n\t"
30456+ __asm__("movw %w6, %%ds\n\t"
30457+ "lcall *%%ss:(%%edi); cld\n\t"
30458+ "push %%ss\n\t"
30459+ "pop %%ds\n\t"
30460 "jc 1f\n\t"
30461 "xor %%ah, %%ah\n"
30462 "1:"
30463@@ -144,7 +189,8 @@ static int check_pcibios(void)
30464 "=b" (ebx),
30465 "=c" (ecx)
30466 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
30467- "D" (&pci_indirect)
30468+ "D" (&pci_indirect),
30469+ "r" (__PCIBIOS_DS)
30470 : "memory");
30471 local_irq_restore(flags);
30472
30473@@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
30474
30475 switch (len) {
30476 case 1:
30477- __asm__("lcall *(%%esi); cld\n\t"
30478+ __asm__("movw %w6, %%ds\n\t"
30479+ "lcall *%%ss:(%%esi); cld\n\t"
30480+ "push %%ss\n\t"
30481+ "pop %%ds\n\t"
30482 "jc 1f\n\t"
30483 "xor %%ah, %%ah\n"
30484 "1:"
30485@@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
30486 : "1" (PCIBIOS_READ_CONFIG_BYTE),
30487 "b" (bx),
30488 "D" ((long)reg),
30489- "S" (&pci_indirect));
30490+ "S" (&pci_indirect),
30491+ "r" (__PCIBIOS_DS));
30492 /*
30493 * Zero-extend the result beyond 8 bits, do not trust the
30494 * BIOS having done it:
30495@@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
30496 *value &= 0xff;
30497 break;
30498 case 2:
30499- __asm__("lcall *(%%esi); cld\n\t"
30500+ __asm__("movw %w6, %%ds\n\t"
30501+ "lcall *%%ss:(%%esi); cld\n\t"
30502+ "push %%ss\n\t"
30503+ "pop %%ds\n\t"
30504 "jc 1f\n\t"
30505 "xor %%ah, %%ah\n"
30506 "1:"
30507@@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
30508 : "1" (PCIBIOS_READ_CONFIG_WORD),
30509 "b" (bx),
30510 "D" ((long)reg),
30511- "S" (&pci_indirect));
30512+ "S" (&pci_indirect),
30513+ "r" (__PCIBIOS_DS));
30514 /*
30515 * Zero-extend the result beyond 16 bits, do not trust the
30516 * BIOS having done it:
30517@@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
30518 *value &= 0xffff;
30519 break;
30520 case 4:
30521- __asm__("lcall *(%%esi); cld\n\t"
30522+ __asm__("movw %w6, %%ds\n\t"
30523+ "lcall *%%ss:(%%esi); cld\n\t"
30524+ "push %%ss\n\t"
30525+ "pop %%ds\n\t"
30526 "jc 1f\n\t"
30527 "xor %%ah, %%ah\n"
30528 "1:"
30529@@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
30530 : "1" (PCIBIOS_READ_CONFIG_DWORD),
30531 "b" (bx),
30532 "D" ((long)reg),
30533- "S" (&pci_indirect));
30534+ "S" (&pci_indirect),
30535+ "r" (__PCIBIOS_DS));
30536 break;
30537 }
30538
30539@@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
30540
30541 switch (len) {
30542 case 1:
30543- __asm__("lcall *(%%esi); cld\n\t"
30544+ __asm__("movw %w6, %%ds\n\t"
30545+ "lcall *%%ss:(%%esi); cld\n\t"
30546+ "push %%ss\n\t"
30547+ "pop %%ds\n\t"
30548 "jc 1f\n\t"
30549 "xor %%ah, %%ah\n"
30550 "1:"
30551@@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
30552 "c" (value),
30553 "b" (bx),
30554 "D" ((long)reg),
30555- "S" (&pci_indirect));
30556+ "S" (&pci_indirect),
30557+ "r" (__PCIBIOS_DS));
30558 break;
30559 case 2:
30560- __asm__("lcall *(%%esi); cld\n\t"
30561+ __asm__("movw %w6, %%ds\n\t"
30562+ "lcall *%%ss:(%%esi); cld\n\t"
30563+ "push %%ss\n\t"
30564+ "pop %%ds\n\t"
30565 "jc 1f\n\t"
30566 "xor %%ah, %%ah\n"
30567 "1:"
30568@@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
30569 "c" (value),
30570 "b" (bx),
30571 "D" ((long)reg),
30572- "S" (&pci_indirect));
30573+ "S" (&pci_indirect),
30574+ "r" (__PCIBIOS_DS));
30575 break;
30576 case 4:
30577- __asm__("lcall *(%%esi); cld\n\t"
30578+ __asm__("movw %w6, %%ds\n\t"
30579+ "lcall *%%ss:(%%esi); cld\n\t"
30580+ "push %%ss\n\t"
30581+ "pop %%ds\n\t"
30582 "jc 1f\n\t"
30583 "xor %%ah, %%ah\n"
30584 "1:"
30585@@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
30586 "c" (value),
30587 "b" (bx),
30588 "D" ((long)reg),
30589- "S" (&pci_indirect));
30590+ "S" (&pci_indirect),
30591+ "r" (__PCIBIOS_DS));
30592 break;
30593 }
30594
30595@@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
30596
30597 DBG("PCI: Fetching IRQ routing table... ");
30598 __asm__("push %%es\n\t"
30599+ "movw %w8, %%ds\n\t"
30600 "push %%ds\n\t"
30601 "pop %%es\n\t"
30602- "lcall *(%%esi); cld\n\t"
30603+ "lcall *%%ss:(%%esi); cld\n\t"
30604 "pop %%es\n\t"
30605+ "push %%ss\n\t"
30606+ "pop %%ds\n"
30607 "jc 1f\n\t"
30608 "xor %%ah, %%ah\n"
30609 "1:"
30610@@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
30611 "1" (0),
30612 "D" ((long) &opt),
30613 "S" (&pci_indirect),
30614- "m" (opt)
30615+ "m" (opt),
30616+ "r" (__PCIBIOS_DS)
30617 : "memory");
30618 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
30619 if (ret & 0xff00)
30620@@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
30621 {
30622 int ret;
30623
30624- __asm__("lcall *(%%esi); cld\n\t"
30625+ __asm__("movw %w5, %%ds\n\t"
30626+ "lcall *%%ss:(%%esi); cld\n\t"
30627+ "push %%ss\n\t"
30628+ "pop %%ds\n"
30629 "jc 1f\n\t"
30630 "xor %%ah, %%ah\n"
30631 "1:"
30632@@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
30633 : "0" (PCIBIOS_SET_PCI_HW_INT),
30634 "b" ((dev->bus->number << 8) | dev->devfn),
30635 "c" ((irq << 8) | (pin + 10)),
30636- "S" (&pci_indirect));
30637+ "S" (&pci_indirect),
30638+ "r" (__PCIBIOS_DS));
30639 return !(ret & 0xff00);
30640 }
30641 EXPORT_SYMBOL(pcibios_set_irq_routing);
30642diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
30643index 40e4469..1ab536e 100644
30644--- a/arch/x86/platform/efi/efi_32.c
30645+++ b/arch/x86/platform/efi/efi_32.c
30646@@ -44,11 +44,22 @@ void efi_call_phys_prelog(void)
30647 {
30648 struct desc_ptr gdt_descr;
30649
30650+#ifdef CONFIG_PAX_KERNEXEC
30651+ struct desc_struct d;
30652+#endif
30653+
30654 local_irq_save(efi_rt_eflags);
30655
30656 load_cr3(initial_page_table);
30657 __flush_tlb_all();
30658
30659+#ifdef CONFIG_PAX_KERNEXEC
30660+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
30661+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
30662+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
30663+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
30664+#endif
30665+
30666 gdt_descr.address = __pa(get_cpu_gdt_table(0));
30667 gdt_descr.size = GDT_SIZE - 1;
30668 load_gdt(&gdt_descr);
30669@@ -58,6 +69,14 @@ void efi_call_phys_epilog(void)
30670 {
30671 struct desc_ptr gdt_descr;
30672
30673+#ifdef CONFIG_PAX_KERNEXEC
30674+ struct desc_struct d;
30675+
30676+ memset(&d, 0, sizeof d);
30677+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
30678+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
30679+#endif
30680+
30681 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
30682 gdt_descr.size = GDT_SIZE - 1;
30683 load_gdt(&gdt_descr);
30684diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
30685index fbe66e6..eae5e38 100644
30686--- a/arch/x86/platform/efi/efi_stub_32.S
30687+++ b/arch/x86/platform/efi/efi_stub_32.S
30688@@ -6,7 +6,9 @@
30689 */
30690
30691 #include <linux/linkage.h>
30692+#include <linux/init.h>
30693 #include <asm/page_types.h>
30694+#include <asm/segment.h>
30695
30696 /*
30697 * efi_call_phys(void *, ...) is a function with variable parameters.
30698@@ -20,7 +22,7 @@
30699 * service functions will comply with gcc calling convention, too.
30700 */
30701
30702-.text
30703+__INIT
30704 ENTRY(efi_call_phys)
30705 /*
30706 * 0. The function can only be called in Linux kernel. So CS has been
30707@@ -36,10 +38,24 @@ ENTRY(efi_call_phys)
30708 * The mapping of lower virtual memory has been created in prelog and
30709 * epilog.
30710 */
30711- movl $1f, %edx
30712- subl $__PAGE_OFFSET, %edx
30713- jmp *%edx
30714+#ifdef CONFIG_PAX_KERNEXEC
30715+ movl $(__KERNEXEC_EFI_DS), %edx
30716+ mov %edx, %ds
30717+ mov %edx, %es
30718+ mov %edx, %ss
30719+ addl $2f,(1f)
30720+ ljmp *(1f)
30721+
30722+__INITDATA
30723+1: .long __LOAD_PHYSICAL_ADDR, __KERNEXEC_EFI_CS
30724+.previous
30725+
30726+2:
30727+ subl $2b,(1b)
30728+#else
30729+ jmp 1f-__PAGE_OFFSET
30730 1:
30731+#endif
30732
30733 /*
30734 * 2. Now on the top of stack is the return
30735@@ -47,14 +63,8 @@ ENTRY(efi_call_phys)
30736 * parameter 2, ..., param n. To make things easy, we save the return
30737 * address of efi_call_phys in a global variable.
30738 */
30739- popl %edx
30740- movl %edx, saved_return_addr
30741- /* get the function pointer into ECX*/
30742- popl %ecx
30743- movl %ecx, efi_rt_function_ptr
30744- movl $2f, %edx
30745- subl $__PAGE_OFFSET, %edx
30746- pushl %edx
30747+ popl (saved_return_addr)
30748+ popl (efi_rt_function_ptr)
30749
30750 /*
30751 * 3. Clear PG bit in %CR0.
30752@@ -73,9 +83,8 @@ ENTRY(efi_call_phys)
30753 /*
30754 * 5. Call the physical function.
30755 */
30756- jmp *%ecx
30757+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
30758
30759-2:
30760 /*
30761 * 6. After EFI runtime service returns, control will return to
30762 * following instruction. We'd better readjust stack pointer first.
30763@@ -88,35 +97,36 @@ ENTRY(efi_call_phys)
30764 movl %cr0, %edx
30765 orl $0x80000000, %edx
30766 movl %edx, %cr0
30767- jmp 1f
30768-1:
30769+
30770 /*
30771 * 8. Now restore the virtual mode from flat mode by
30772 * adding EIP with PAGE_OFFSET.
30773 */
30774- movl $1f, %edx
30775- jmp *%edx
30776+#ifdef CONFIG_PAX_KERNEXEC
30777+ movl $(__KERNEL_DS), %edx
30778+ mov %edx, %ds
30779+ mov %edx, %es
30780+ mov %edx, %ss
30781+ ljmp $(__KERNEL_CS),$1f
30782+#else
30783+ jmp 1f+__PAGE_OFFSET
30784+#endif
30785 1:
30786
30787 /*
30788 * 9. Balance the stack. And because EAX contain the return value,
30789 * we'd better not clobber it.
30790 */
30791- leal efi_rt_function_ptr, %edx
30792- movl (%edx), %ecx
30793- pushl %ecx
30794+ pushl (efi_rt_function_ptr)
30795
30796 /*
30797- * 10. Push the saved return address onto the stack and return.
30798+ * 10. Return to the saved return address.
30799 */
30800- leal saved_return_addr, %edx
30801- movl (%edx), %ecx
30802- pushl %ecx
30803- ret
30804+ jmpl *(saved_return_addr)
30805 ENDPROC(efi_call_phys)
30806 .previous
30807
30808-.data
30809+__INITDATA
30810 saved_return_addr:
30811 .long 0
30812 efi_rt_function_ptr:
30813diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
30814index 4c07cca..2c8427d 100644
30815--- a/arch/x86/platform/efi/efi_stub_64.S
30816+++ b/arch/x86/platform/efi/efi_stub_64.S
30817@@ -7,6 +7,7 @@
30818 */
30819
30820 #include <linux/linkage.h>
30821+#include <asm/alternative-asm.h>
30822
30823 #define SAVE_XMM \
30824 mov %rsp, %rax; \
30825@@ -40,6 +41,7 @@ ENTRY(efi_call0)
30826 call *%rdi
30827 addq $32, %rsp
30828 RESTORE_XMM
30829+ pax_force_retaddr 0, 1
30830 ret
30831 ENDPROC(efi_call0)
30832
30833@@ -50,6 +52,7 @@ ENTRY(efi_call1)
30834 call *%rdi
30835 addq $32, %rsp
30836 RESTORE_XMM
30837+ pax_force_retaddr 0, 1
30838 ret
30839 ENDPROC(efi_call1)
30840
30841@@ -60,6 +63,7 @@ ENTRY(efi_call2)
30842 call *%rdi
30843 addq $32, %rsp
30844 RESTORE_XMM
30845+ pax_force_retaddr 0, 1
30846 ret
30847 ENDPROC(efi_call2)
30848
30849@@ -71,6 +75,7 @@ ENTRY(efi_call3)
30850 call *%rdi
30851 addq $32, %rsp
30852 RESTORE_XMM
30853+ pax_force_retaddr 0, 1
30854 ret
30855 ENDPROC(efi_call3)
30856
30857@@ -83,6 +88,7 @@ ENTRY(efi_call4)
30858 call *%rdi
30859 addq $32, %rsp
30860 RESTORE_XMM
30861+ pax_force_retaddr 0, 1
30862 ret
30863 ENDPROC(efi_call4)
30864
30865@@ -96,6 +102,7 @@ ENTRY(efi_call5)
30866 call *%rdi
30867 addq $48, %rsp
30868 RESTORE_XMM
30869+ pax_force_retaddr 0, 1
30870 ret
30871 ENDPROC(efi_call5)
30872
30873@@ -112,5 +119,6 @@ ENTRY(efi_call6)
30874 call *%rdi
30875 addq $48, %rsp
30876 RESTORE_XMM
30877+ pax_force_retaddr 0, 1
30878 ret
30879 ENDPROC(efi_call6)
30880diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
30881index e31bcd8..f12dc46 100644
30882--- a/arch/x86/platform/mrst/mrst.c
30883+++ b/arch/x86/platform/mrst/mrst.c
30884@@ -78,13 +78,15 @@ struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
30885 EXPORT_SYMBOL_GPL(sfi_mrtc_array);
30886 int sfi_mrtc_num;
30887
30888-static void mrst_power_off(void)
30889+static __noreturn void mrst_power_off(void)
30890 {
30891+ BUG();
30892 }
30893
30894-static void mrst_reboot(void)
30895+static __noreturn void mrst_reboot(void)
30896 {
30897 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
30898+ BUG();
30899 }
30900
30901 /* parse all the mtimer info to a static mtimer array */
30902diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c
30903index d6ee929..3637cb5 100644
30904--- a/arch/x86/platform/olpc/olpc_dt.c
30905+++ b/arch/x86/platform/olpc/olpc_dt.c
30906@@ -156,7 +156,7 @@ void * __init prom_early_alloc(unsigned long size)
30907 return res;
30908 }
30909
30910-static struct of_pdt_ops prom_olpc_ops __initdata = {
30911+static struct of_pdt_ops prom_olpc_ops __initconst = {
30912 .nextprop = olpc_dt_nextprop,
30913 .getproplen = olpc_dt_getproplen,
30914 .getproperty = olpc_dt_getproperty,
30915diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
30916index 3c68768..07e82b8 100644
30917--- a/arch/x86/power/cpu.c
30918+++ b/arch/x86/power/cpu.c
30919@@ -134,7 +134,7 @@ static void do_fpu_end(void)
30920 static void fix_processor_context(void)
30921 {
30922 int cpu = smp_processor_id();
30923- struct tss_struct *t = &per_cpu(init_tss, cpu);
30924+ struct tss_struct *t = init_tss + cpu;
30925
30926 set_tss_desc(cpu, t); /*
30927 * This just modifies memory; should not be
30928@@ -144,8 +144,6 @@ static void fix_processor_context(void)
30929 */
30930
30931 #ifdef CONFIG_X86_64
30932- get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
30933-
30934 syscall_init(); /* This sets MSR_*STAR and related */
30935 #endif
30936 load_TR_desc(); /* This does ltr */
30937diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
30938index cbca565..bae7133 100644
30939--- a/arch/x86/realmode/init.c
30940+++ b/arch/x86/realmode/init.c
30941@@ -62,7 +62,13 @@ void __init setup_real_mode(void)
30942 __va(real_mode_header->trampoline_header);
30943
30944 #ifdef CONFIG_X86_32
30945- trampoline_header->start = __pa(startup_32_smp);
30946+ trampoline_header->start = __pa(ktla_ktva(startup_32_smp));
30947+
30948+#ifdef CONFIG_PAX_KERNEXEC
30949+ trampoline_header->start -= LOAD_PHYSICAL_ADDR;
30950+#endif
30951+
30952+ trampoline_header->boot_cs = __BOOT_CS;
30953 trampoline_header->gdt_limit = __BOOT_DS + 7;
30954 trampoline_header->gdt_base = __pa(boot_gdt);
30955 #else
30956diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
30957index 8869287..d577672 100644
30958--- a/arch/x86/realmode/rm/Makefile
30959+++ b/arch/x86/realmode/rm/Makefile
30960@@ -78,5 +78,8 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ -D_WAKEUP \
30961 $(call cc-option, -fno-unit-at-a-time)) \
30962 $(call cc-option, -fno-stack-protector) \
30963 $(call cc-option, -mpreferred-stack-boundary=2)
30964+ifdef CONSTIFY_PLUGIN
30965+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
30966+endif
30967 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
30968 GCOV_PROFILE := n
30969diff --git a/arch/x86/realmode/rm/header.S b/arch/x86/realmode/rm/header.S
30970index a28221d..93c40f1 100644
30971--- a/arch/x86/realmode/rm/header.S
30972+++ b/arch/x86/realmode/rm/header.S
30973@@ -30,7 +30,9 @@ GLOBAL(real_mode_header)
30974 #endif
30975 /* APM/BIOS reboot */
30976 .long pa_machine_real_restart_asm
30977-#ifdef CONFIG_X86_64
30978+#ifdef CONFIG_X86_32
30979+ .long __KERNEL_CS
30980+#else
30981 .long __KERNEL32_CS
30982 #endif
30983 END(real_mode_header)
30984diff --git a/arch/x86/realmode/rm/trampoline_32.S b/arch/x86/realmode/rm/trampoline_32.S
30985index c1b2791..f9e31c7 100644
30986--- a/arch/x86/realmode/rm/trampoline_32.S
30987+++ b/arch/x86/realmode/rm/trampoline_32.S
30988@@ -25,6 +25,12 @@
30989 #include <asm/page_types.h>
30990 #include "realmode.h"
30991
30992+#ifdef CONFIG_PAX_KERNEXEC
30993+#define ta(X) (X)
30994+#else
30995+#define ta(X) (pa_ ## X)
30996+#endif
30997+
30998 .text
30999 .code16
31000
31001@@ -39,8 +45,6 @@ ENTRY(trampoline_start)
31002
31003 cli # We should be safe anyway
31004
31005- movl tr_start, %eax # where we need to go
31006-
31007 movl $0xA5A5A5A5, trampoline_status
31008 # write marker for master knows we're running
31009
31010@@ -56,7 +60,7 @@ ENTRY(trampoline_start)
31011 movw $1, %dx # protected mode (PE) bit
31012 lmsw %dx # into protected mode
31013
31014- ljmpl $__BOOT_CS, $pa_startup_32
31015+ ljmpl *(trampoline_header)
31016
31017 .section ".text32","ax"
31018 .code32
31019@@ -67,7 +71,7 @@ ENTRY(startup_32) # note: also used from wakeup_asm.S
31020 .balign 8
31021 GLOBAL(trampoline_header)
31022 tr_start: .space 4
31023- tr_gdt_pad: .space 2
31024+ tr_boot_cs: .space 2
31025 tr_gdt: .space 6
31026 END(trampoline_header)
31027
31028diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S
31029index bb360dc..3e5945f 100644
31030--- a/arch/x86/realmode/rm/trampoline_64.S
31031+++ b/arch/x86/realmode/rm/trampoline_64.S
31032@@ -107,7 +107,7 @@ ENTRY(startup_32)
31033 wrmsr
31034
31035 # Enable paging and in turn activate Long Mode
31036- movl $(X86_CR0_PG | X86_CR0_WP | X86_CR0_PE), %eax
31037+ movl $(X86_CR0_PG | X86_CR0_PE), %eax
31038 movl %eax, %cr0
31039
31040 /*
31041diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
31042index 79d67bd..c7e1b90 100644
31043--- a/arch/x86/tools/relocs.c
31044+++ b/arch/x86/tools/relocs.c
31045@@ -12,10 +12,13 @@
31046 #include <regex.h>
31047 #include <tools/le_byteshift.h>
31048
31049+#include "../../../include/generated/autoconf.h"
31050+
31051 static void die(char *fmt, ...);
31052
31053 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
31054 static Elf32_Ehdr ehdr;
31055+static Elf32_Phdr *phdr;
31056 static unsigned long reloc_count, reloc_idx;
31057 static unsigned long *relocs;
31058 static unsigned long reloc16_count, reloc16_idx;
31059@@ -330,9 +333,39 @@ static void read_ehdr(FILE *fp)
31060 }
31061 }
31062
31063+static void read_phdrs(FILE *fp)
31064+{
31065+ unsigned int i;
31066+
31067+ phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
31068+ if (!phdr) {
31069+ die("Unable to allocate %d program headers\n",
31070+ ehdr.e_phnum);
31071+ }
31072+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
31073+ die("Seek to %d failed: %s\n",
31074+ ehdr.e_phoff, strerror(errno));
31075+ }
31076+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
31077+ die("Cannot read ELF program headers: %s\n",
31078+ strerror(errno));
31079+ }
31080+ for(i = 0; i < ehdr.e_phnum; i++) {
31081+ phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
31082+ phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
31083+ phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
31084+ phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
31085+ phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
31086+ phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
31087+ phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
31088+ phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
31089+ }
31090+
31091+}
31092+
31093 static void read_shdrs(FILE *fp)
31094 {
31095- int i;
31096+ unsigned int i;
31097 Elf32_Shdr shdr;
31098
31099 secs = calloc(ehdr.e_shnum, sizeof(struct section));
31100@@ -367,7 +400,7 @@ static void read_shdrs(FILE *fp)
31101
31102 static void read_strtabs(FILE *fp)
31103 {
31104- int i;
31105+ unsigned int i;
31106 for (i = 0; i < ehdr.e_shnum; i++) {
31107 struct section *sec = &secs[i];
31108 if (sec->shdr.sh_type != SHT_STRTAB) {
31109@@ -392,7 +425,7 @@ static void read_strtabs(FILE *fp)
31110
31111 static void read_symtabs(FILE *fp)
31112 {
31113- int i,j;
31114+ unsigned int i,j;
31115 for (i = 0; i < ehdr.e_shnum; i++) {
31116 struct section *sec = &secs[i];
31117 if (sec->shdr.sh_type != SHT_SYMTAB) {
31118@@ -423,9 +456,11 @@ static void read_symtabs(FILE *fp)
31119 }
31120
31121
31122-static void read_relocs(FILE *fp)
31123+static void read_relocs(FILE *fp, int use_real_mode)
31124 {
31125- int i,j;
31126+ unsigned int i,j;
31127+ uint32_t base;
31128+
31129 for (i = 0; i < ehdr.e_shnum; i++) {
31130 struct section *sec = &secs[i];
31131 if (sec->shdr.sh_type != SHT_REL) {
31132@@ -445,9 +480,22 @@ static void read_relocs(FILE *fp)
31133 die("Cannot read symbol table: %s\n",
31134 strerror(errno));
31135 }
31136+ base = 0;
31137+
31138+#ifdef CONFIG_X86_32
31139+ for (j = 0; !use_real_mode && j < ehdr.e_phnum; j++) {
31140+ if (phdr[j].p_type != PT_LOAD )
31141+ continue;
31142+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
31143+ continue;
31144+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
31145+ break;
31146+ }
31147+#endif
31148+
31149 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
31150 Elf32_Rel *rel = &sec->reltab[j];
31151- rel->r_offset = elf32_to_cpu(rel->r_offset);
31152+ rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
31153 rel->r_info = elf32_to_cpu(rel->r_info);
31154 }
31155 }
31156@@ -456,13 +504,13 @@ static void read_relocs(FILE *fp)
31157
31158 static void print_absolute_symbols(void)
31159 {
31160- int i;
31161+ unsigned int i;
31162 printf("Absolute symbols\n");
31163 printf(" Num: Value Size Type Bind Visibility Name\n");
31164 for (i = 0; i < ehdr.e_shnum; i++) {
31165 struct section *sec = &secs[i];
31166 char *sym_strtab;
31167- int j;
31168+ unsigned int j;
31169
31170 if (sec->shdr.sh_type != SHT_SYMTAB) {
31171 continue;
31172@@ -489,14 +537,14 @@ static void print_absolute_symbols(void)
31173
31174 static void print_absolute_relocs(void)
31175 {
31176- int i, printed = 0;
31177+ unsigned int i, printed = 0;
31178
31179 for (i = 0; i < ehdr.e_shnum; i++) {
31180 struct section *sec = &secs[i];
31181 struct section *sec_applies, *sec_symtab;
31182 char *sym_strtab;
31183 Elf32_Sym *sh_symtab;
31184- int j;
31185+ unsigned int j;
31186 if (sec->shdr.sh_type != SHT_REL) {
31187 continue;
31188 }
31189@@ -558,13 +606,13 @@ static void print_absolute_relocs(void)
31190 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym),
31191 int use_real_mode)
31192 {
31193- int i;
31194+ unsigned int i;
31195 /* Walk through the relocations */
31196 for (i = 0; i < ehdr.e_shnum; i++) {
31197 char *sym_strtab;
31198 Elf32_Sym *sh_symtab;
31199 struct section *sec_applies, *sec_symtab;
31200- int j;
31201+ unsigned int j;
31202 struct section *sec = &secs[i];
31203
31204 if (sec->shdr.sh_type != SHT_REL) {
31205@@ -588,6 +636,24 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym),
31206 sym = &sh_symtab[ELF32_R_SYM(rel->r_info)];
31207 r_type = ELF32_R_TYPE(rel->r_info);
31208
31209+ if (!use_real_mode) {
31210+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
31211+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
31212+ continue;
31213+
31214+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
31215+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
31216+ if (!strcmp(sec_name(sym->st_shndx), ".text.end") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
31217+ continue;
31218+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
31219+ continue;
31220+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
31221+ continue;
31222+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
31223+ continue;
31224+#endif
31225+ }
31226+
31227 shn_abs = sym->st_shndx == SHN_ABS;
31228
31229 switch (r_type) {
31230@@ -681,7 +747,7 @@ static int write32(unsigned int v, FILE *f)
31231
31232 static void emit_relocs(int as_text, int use_real_mode)
31233 {
31234- int i;
31235+ unsigned int i;
31236 /* Count how many relocations I have and allocate space for them. */
31237 reloc_count = 0;
31238 walk_relocs(count_reloc, use_real_mode);
31239@@ -808,10 +874,11 @@ int main(int argc, char **argv)
31240 fname, strerror(errno));
31241 }
31242 read_ehdr(fp);
31243+ read_phdrs(fp);
31244 read_shdrs(fp);
31245 read_strtabs(fp);
31246 read_symtabs(fp);
31247- read_relocs(fp);
31248+ read_relocs(fp, use_real_mode);
31249 if (show_absolute_syms) {
31250 print_absolute_symbols();
31251 goto out;
31252diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
31253index fd14be1..e3c79c0 100644
31254--- a/arch/x86/vdso/Makefile
31255+++ b/arch/x86/vdso/Makefile
31256@@ -181,7 +181,7 @@ quiet_cmd_vdso = VDSO $@
31257 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
31258 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
31259
31260-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
31261+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
31262 GCOV_PROFILE := n
31263
31264 #
31265diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
31266index 0faad64..39ef157 100644
31267--- a/arch/x86/vdso/vdso32-setup.c
31268+++ b/arch/x86/vdso/vdso32-setup.c
31269@@ -25,6 +25,7 @@
31270 #include <asm/tlbflush.h>
31271 #include <asm/vdso.h>
31272 #include <asm/proto.h>
31273+#include <asm/mman.h>
31274
31275 enum {
31276 VDSO_DISABLED = 0,
31277@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
31278 void enable_sep_cpu(void)
31279 {
31280 int cpu = get_cpu();
31281- struct tss_struct *tss = &per_cpu(init_tss, cpu);
31282+ struct tss_struct *tss = init_tss + cpu;
31283
31284 if (!boot_cpu_has(X86_FEATURE_SEP)) {
31285 put_cpu();
31286@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
31287 gate_vma.vm_start = FIXADDR_USER_START;
31288 gate_vma.vm_end = FIXADDR_USER_END;
31289 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
31290- gate_vma.vm_page_prot = __P101;
31291+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
31292
31293 return 0;
31294 }
31295@@ -330,14 +331,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
31296 if (compat)
31297 addr = VDSO_HIGH_BASE;
31298 else {
31299- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
31300+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
31301 if (IS_ERR_VALUE(addr)) {
31302 ret = addr;
31303 goto up_fail;
31304 }
31305 }
31306
31307- current->mm->context.vdso = (void *)addr;
31308+ current->mm->context.vdso = addr;
31309
31310 if (compat_uses_vma || !compat) {
31311 /*
31312@@ -353,11 +354,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
31313 }
31314
31315 current_thread_info()->sysenter_return =
31316- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
31317+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
31318
31319 up_fail:
31320 if (ret)
31321- current->mm->context.vdso = NULL;
31322+ current->mm->context.vdso = 0;
31323
31324 up_write(&mm->mmap_sem);
31325
31326@@ -404,8 +405,14 @@ __initcall(ia32_binfmt_init);
31327
31328 const char *arch_vma_name(struct vm_area_struct *vma)
31329 {
31330- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
31331+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
31332 return "[vdso]";
31333+
31334+#ifdef CONFIG_PAX_SEGMEXEC
31335+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
31336+ return "[vdso]";
31337+#endif
31338+
31339 return NULL;
31340 }
31341
31342@@ -415,7 +422,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
31343 * Check to see if the corresponding task was created in compat vdso
31344 * mode.
31345 */
31346- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
31347+ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
31348 return &gate_vma;
31349 return NULL;
31350 }
31351diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
31352index 431e875..cbb23f3 100644
31353--- a/arch/x86/vdso/vma.c
31354+++ b/arch/x86/vdso/vma.c
31355@@ -16,8 +16,6 @@
31356 #include <asm/vdso.h>
31357 #include <asm/page.h>
31358
31359-unsigned int __read_mostly vdso_enabled = 1;
31360-
31361 extern char vdso_start[], vdso_end[];
31362 extern unsigned short vdso_sync_cpuid;
31363
31364@@ -141,7 +139,6 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
31365 * unaligned here as a result of stack start randomization.
31366 */
31367 addr = PAGE_ALIGN(addr);
31368- addr = align_vdso_addr(addr);
31369
31370 return addr;
31371 }
31372@@ -154,30 +151,31 @@ static int setup_additional_pages(struct linux_binprm *bprm,
31373 unsigned size)
31374 {
31375 struct mm_struct *mm = current->mm;
31376- unsigned long addr;
31377+ unsigned long addr = 0;
31378 int ret;
31379
31380- if (!vdso_enabled)
31381- return 0;
31382-
31383 down_write(&mm->mmap_sem);
31384+
31385+#ifdef CONFIG_PAX_RANDMMAP
31386+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
31387+#endif
31388+
31389 addr = vdso_addr(mm->start_stack, size);
31390+ addr = align_vdso_addr(addr);
31391 addr = get_unmapped_area(NULL, addr, size, 0, 0);
31392 if (IS_ERR_VALUE(addr)) {
31393 ret = addr;
31394 goto up_fail;
31395 }
31396
31397- current->mm->context.vdso = (void *)addr;
31398+ mm->context.vdso = addr;
31399
31400 ret = install_special_mapping(mm, addr, size,
31401 VM_READ|VM_EXEC|
31402 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
31403 pages);
31404- if (ret) {
31405- current->mm->context.vdso = NULL;
31406- goto up_fail;
31407- }
31408+ if (ret)
31409+ mm->context.vdso = 0;
31410
31411 up_fail:
31412 up_write(&mm->mmap_sem);
31413@@ -197,10 +195,3 @@ int x32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
31414 vdsox32_size);
31415 }
31416 #endif
31417-
31418-static __init int vdso_setup(char *s)
31419-{
31420- vdso_enabled = simple_strtoul(s, NULL, 0);
31421- return 0;
31422-}
31423-__setup("vdso=", vdso_setup);
31424diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
31425index 2262003..f229ced 100644
31426--- a/arch/x86/xen/enlighten.c
31427+++ b/arch/x86/xen/enlighten.c
31428@@ -100,8 +100,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
31429
31430 struct shared_info xen_dummy_shared_info;
31431
31432-void *xen_initial_gdt;
31433-
31434 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
31435 __read_mostly int xen_have_vector_callback;
31436 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
31437@@ -496,8 +494,7 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
31438 {
31439 unsigned long va = dtr->address;
31440 unsigned int size = dtr->size + 1;
31441- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
31442- unsigned long frames[pages];
31443+ unsigned long frames[65536 / PAGE_SIZE];
31444 int f;
31445
31446 /*
31447@@ -545,8 +542,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
31448 {
31449 unsigned long va = dtr->address;
31450 unsigned int size = dtr->size + 1;
31451- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
31452- unsigned long frames[pages];
31453+ unsigned long frames[65536 / PAGE_SIZE];
31454 int f;
31455
31456 /*
31457@@ -939,7 +935,7 @@ static u32 xen_safe_apic_wait_icr_idle(void)
31458 return 0;
31459 }
31460
31461-static void set_xen_basic_apic_ops(void)
31462+static void __init set_xen_basic_apic_ops(void)
31463 {
31464 apic->read = xen_apic_read;
31465 apic->write = xen_apic_write;
31466@@ -1245,30 +1241,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
31467 #endif
31468 };
31469
31470-static void xen_reboot(int reason)
31471+static __noreturn void xen_reboot(int reason)
31472 {
31473 struct sched_shutdown r = { .reason = reason };
31474
31475- if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
31476- BUG();
31477+ HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
31478+ BUG();
31479 }
31480
31481-static void xen_restart(char *msg)
31482+static __noreturn void xen_restart(char *msg)
31483 {
31484 xen_reboot(SHUTDOWN_reboot);
31485 }
31486
31487-static void xen_emergency_restart(void)
31488+static __noreturn void xen_emergency_restart(void)
31489 {
31490 xen_reboot(SHUTDOWN_reboot);
31491 }
31492
31493-static void xen_machine_halt(void)
31494+static __noreturn void xen_machine_halt(void)
31495 {
31496 xen_reboot(SHUTDOWN_poweroff);
31497 }
31498
31499-static void xen_machine_power_off(void)
31500+static __noreturn void xen_machine_power_off(void)
31501 {
31502 if (pm_power_off)
31503 pm_power_off();
31504@@ -1370,7 +1366,17 @@ asmlinkage void __init xen_start_kernel(void)
31505 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
31506
31507 /* Work out if we support NX */
31508- x86_configure_nx();
31509+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
31510+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
31511+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
31512+ unsigned l, h;
31513+
31514+ __supported_pte_mask |= _PAGE_NX;
31515+ rdmsr(MSR_EFER, l, h);
31516+ l |= EFER_NX;
31517+ wrmsr(MSR_EFER, l, h);
31518+ }
31519+#endif
31520
31521 xen_setup_features();
31522
31523@@ -1399,14 +1405,7 @@ asmlinkage void __init xen_start_kernel(void)
31524 pv_mmu_ops.ptep_modify_prot_commit = xen_ptep_modify_prot_commit;
31525 }
31526
31527- machine_ops = xen_machine_ops;
31528-
31529- /*
31530- * The only reliable way to retain the initial address of the
31531- * percpu gdt_page is to remember it here, so we can go and
31532- * mark it RW later, when the initial percpu area is freed.
31533- */
31534- xen_initial_gdt = &per_cpu(gdt_page, 0);
31535+ memcpy((void *)&machine_ops, &xen_machine_ops, sizeof machine_ops);
31536
31537 xen_smp_init();
31538
31539@@ -1598,7 +1597,7 @@ static int __cpuinit xen_hvm_cpu_notify(struct notifier_block *self,
31540 return NOTIFY_OK;
31541 }
31542
31543-static struct notifier_block xen_hvm_cpu_notifier __cpuinitdata = {
31544+static struct notifier_block xen_hvm_cpu_notifier = {
31545 .notifier_call = xen_hvm_cpu_notify,
31546 };
31547
31548diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
31549index 01de35c..0bda07b 100644
31550--- a/arch/x86/xen/mmu.c
31551+++ b/arch/x86/xen/mmu.c
31552@@ -1881,6 +1881,9 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
31553 /* L3_k[510] -> level2_kernel_pgt
31554 * L3_i[511] -> level2_fixmap_pgt */
31555 convert_pfn_mfn(level3_kernel_pgt);
31556+ convert_pfn_mfn(level3_vmalloc_start_pgt);
31557+ convert_pfn_mfn(level3_vmalloc_end_pgt);
31558+ convert_pfn_mfn(level3_vmemmap_pgt);
31559
31560 /* We get [511][511] and have Xen's version of level2_kernel_pgt */
31561 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
31562@@ -1910,8 +1913,12 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
31563 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
31564 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
31565 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
31566+ set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
31567+ set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
31568+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
31569 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
31570 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
31571+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
31572 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
31573 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
31574
31575@@ -2097,6 +2104,7 @@ static void __init xen_post_allocator_init(void)
31576 pv_mmu_ops.set_pud = xen_set_pud;
31577 #if PAGETABLE_LEVELS == 4
31578 pv_mmu_ops.set_pgd = xen_set_pgd;
31579+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
31580 #endif
31581
31582 /* This will work as long as patching hasn't happened yet
31583@@ -2178,6 +2186,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
31584 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
31585 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
31586 .set_pgd = xen_set_pgd_hyper,
31587+ .set_pgd_batched = xen_set_pgd_hyper,
31588
31589 .alloc_pud = xen_alloc_pmd_init,
31590 .release_pud = xen_release_pmd_init,
31591diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
31592index 34bc4ce..c34aa24 100644
31593--- a/arch/x86/xen/smp.c
31594+++ b/arch/x86/xen/smp.c
31595@@ -229,11 +229,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
31596 {
31597 BUG_ON(smp_processor_id() != 0);
31598 native_smp_prepare_boot_cpu();
31599-
31600- /* We've switched to the "real" per-cpu gdt, so make sure the
31601- old memory can be recycled */
31602- make_lowmem_page_readwrite(xen_initial_gdt);
31603-
31604 xen_filter_cpu_maps();
31605 xen_setup_vcpu_info_placement();
31606 }
31607@@ -300,12 +295,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
31608 gdt = get_cpu_gdt_table(cpu);
31609
31610 ctxt->flags = VGCF_IN_KERNEL;
31611- ctxt->user_regs.ds = __USER_DS;
31612- ctxt->user_regs.es = __USER_DS;
31613+ ctxt->user_regs.ds = __KERNEL_DS;
31614+ ctxt->user_regs.es = __KERNEL_DS;
31615 ctxt->user_regs.ss = __KERNEL_DS;
31616 #ifdef CONFIG_X86_32
31617 ctxt->user_regs.fs = __KERNEL_PERCPU;
31618- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
31619+ savesegment(gs, ctxt->user_regs.gs);
31620 #else
31621 ctxt->gs_base_kernel = per_cpu_offset(cpu);
31622 #endif
31623@@ -355,13 +350,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu, struct task_struct *idle)
31624 int rc;
31625
31626 per_cpu(current_task, cpu) = idle;
31627+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
31628 #ifdef CONFIG_X86_32
31629 irq_ctx_init(cpu);
31630 #else
31631 clear_tsk_thread_flag(idle, TIF_FORK);
31632- per_cpu(kernel_stack, cpu) =
31633- (unsigned long)task_stack_page(idle) -
31634- KERNEL_STACK_OFFSET + THREAD_SIZE;
31635+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
31636 #endif
31637 xen_setup_runstate_info(cpu);
31638 xen_setup_timer(cpu);
31639@@ -630,7 +624,7 @@ static const struct smp_ops xen_smp_ops __initconst = {
31640
31641 void __init xen_smp_init(void)
31642 {
31643- smp_ops = xen_smp_ops;
31644+ memcpy((void *)&smp_ops, &xen_smp_ops, sizeof smp_ops);
31645 xen_fill_possible_map();
31646 xen_init_spinlocks();
31647 }
31648diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
31649index 33ca6e4..0ded929 100644
31650--- a/arch/x86/xen/xen-asm_32.S
31651+++ b/arch/x86/xen/xen-asm_32.S
31652@@ -84,14 +84,14 @@ ENTRY(xen_iret)
31653 ESP_OFFSET=4 # bytes pushed onto stack
31654
31655 /*
31656- * Store vcpu_info pointer for easy access. Do it this way to
31657- * avoid having to reload %fs
31658+ * Store vcpu_info pointer for easy access.
31659 */
31660 #ifdef CONFIG_SMP
31661- GET_THREAD_INFO(%eax)
31662- movl %ss:TI_cpu(%eax), %eax
31663- movl %ss:__per_cpu_offset(,%eax,4), %eax
31664- mov %ss:xen_vcpu(%eax), %eax
31665+ push %fs
31666+ mov $(__KERNEL_PERCPU), %eax
31667+ mov %eax, %fs
31668+ mov PER_CPU_VAR(xen_vcpu), %eax
31669+ pop %fs
31670 #else
31671 movl %ss:xen_vcpu, %eax
31672 #endif
31673diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
31674index 7faed58..ba4427c 100644
31675--- a/arch/x86/xen/xen-head.S
31676+++ b/arch/x86/xen/xen-head.S
31677@@ -19,6 +19,17 @@ ENTRY(startup_xen)
31678 #ifdef CONFIG_X86_32
31679 mov %esi,xen_start_info
31680 mov $init_thread_union+THREAD_SIZE,%esp
31681+#ifdef CONFIG_SMP
31682+ movl $cpu_gdt_table,%edi
31683+ movl $__per_cpu_load,%eax
31684+ movw %ax,__KERNEL_PERCPU + 2(%edi)
31685+ rorl $16,%eax
31686+ movb %al,__KERNEL_PERCPU + 4(%edi)
31687+ movb %ah,__KERNEL_PERCPU + 7(%edi)
31688+ movl $__per_cpu_end - 1,%eax
31689+ subl $__per_cpu_start,%eax
31690+ movw %ax,__KERNEL_PERCPU + 0(%edi)
31691+#endif
31692 #else
31693 mov %rsi,xen_start_info
31694 mov $init_thread_union+THREAD_SIZE,%rsp
31695diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
31696index a95b417..b6dbd0b 100644
31697--- a/arch/x86/xen/xen-ops.h
31698+++ b/arch/x86/xen/xen-ops.h
31699@@ -10,8 +10,6 @@
31700 extern const char xen_hypervisor_callback[];
31701 extern const char xen_failsafe_callback[];
31702
31703-extern void *xen_initial_gdt;
31704-
31705 struct trap_info;
31706 void xen_copy_trap_info(struct trap_info *traps);
31707
31708diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
31709index 525bd3d..ef888b1 100644
31710--- a/arch/xtensa/variants/dc232b/include/variant/core.h
31711+++ b/arch/xtensa/variants/dc232b/include/variant/core.h
31712@@ -119,9 +119,9 @@
31713 ----------------------------------------------------------------------*/
31714
31715 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
31716-#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
31717 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
31718 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
31719+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
31720
31721 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
31722 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
31723diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
31724index 2f33760..835e50a 100644
31725--- a/arch/xtensa/variants/fsf/include/variant/core.h
31726+++ b/arch/xtensa/variants/fsf/include/variant/core.h
31727@@ -11,6 +11,7 @@
31728 #ifndef _XTENSA_CORE_H
31729 #define _XTENSA_CORE_H
31730
31731+#include <linux/const.h>
31732
31733 /****************************************************************************
31734 Parameters Useful for Any Code, USER or PRIVILEGED
31735@@ -112,9 +113,9 @@
31736 ----------------------------------------------------------------------*/
31737
31738 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
31739-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
31740 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
31741 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
31742+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
31743
31744 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
31745 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
31746diff --git a/arch/xtensa/variants/s6000/include/variant/core.h b/arch/xtensa/variants/s6000/include/variant/core.h
31747index af00795..2bb8105 100644
31748--- a/arch/xtensa/variants/s6000/include/variant/core.h
31749+++ b/arch/xtensa/variants/s6000/include/variant/core.h
31750@@ -11,6 +11,7 @@
31751 #ifndef _XTENSA_CORE_CONFIGURATION_H
31752 #define _XTENSA_CORE_CONFIGURATION_H
31753
31754+#include <linux/const.h>
31755
31756 /****************************************************************************
31757 Parameters Useful for Any Code, USER or PRIVILEGED
31758@@ -118,9 +119,9 @@
31759 ----------------------------------------------------------------------*/
31760
31761 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
31762-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
31763 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
31764 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
31765+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
31766
31767 #define XCHAL_ICACHE_SIZE 32768 /* I-cache size in bytes or 0 */
31768 #define XCHAL_DCACHE_SIZE 32768 /* D-cache size in bytes or 0 */
31769diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
31770index 58916af..eb9dbcf6 100644
31771--- a/block/blk-iopoll.c
31772+++ b/block/blk-iopoll.c
31773@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
31774 }
31775 EXPORT_SYMBOL(blk_iopoll_complete);
31776
31777-static void blk_iopoll_softirq(struct softirq_action *h)
31778+static void blk_iopoll_softirq(void)
31779 {
31780 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
31781 int rearm = 0, budget = blk_iopoll_budget;
31782@@ -209,7 +209,7 @@ static int __cpuinit blk_iopoll_cpu_notify(struct notifier_block *self,
31783 return NOTIFY_OK;
31784 }
31785
31786-static struct notifier_block __cpuinitdata blk_iopoll_cpu_notifier = {
31787+static struct notifier_block blk_iopoll_cpu_notifier = {
31788 .notifier_call = blk_iopoll_cpu_notify,
31789 };
31790
31791diff --git a/block/blk-map.c b/block/blk-map.c
31792index 623e1cd..ca1e109 100644
31793--- a/block/blk-map.c
31794+++ b/block/blk-map.c
31795@@ -302,7 +302,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
31796 if (!len || !kbuf)
31797 return -EINVAL;
31798
31799- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
31800+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
31801 if (do_copy)
31802 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
31803 else
31804diff --git a/block/blk-softirq.c b/block/blk-softirq.c
31805index 467c8de..f3628c5 100644
31806--- a/block/blk-softirq.c
31807+++ b/block/blk-softirq.c
31808@@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
31809 * Softirq action handler - move entries to local list and loop over them
31810 * while passing them to the queue registered handler.
31811 */
31812-static void blk_done_softirq(struct softirq_action *h)
31813+static void blk_done_softirq(void)
31814 {
31815 struct list_head *cpu_list, local_list;
31816
31817@@ -98,7 +98,7 @@ static int __cpuinit blk_cpu_notify(struct notifier_block *self,
31818 return NOTIFY_OK;
31819 }
31820
31821-static struct notifier_block __cpuinitdata blk_cpu_notifier = {
31822+static struct notifier_block blk_cpu_notifier = {
31823 .notifier_call = blk_cpu_notify,
31824 };
31825
31826diff --git a/block/bsg.c b/block/bsg.c
31827index ff64ae3..593560c 100644
31828--- a/block/bsg.c
31829+++ b/block/bsg.c
31830@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
31831 struct sg_io_v4 *hdr, struct bsg_device *bd,
31832 fmode_t has_write_perm)
31833 {
31834+ unsigned char tmpcmd[sizeof(rq->__cmd)];
31835+ unsigned char *cmdptr;
31836+
31837 if (hdr->request_len > BLK_MAX_CDB) {
31838 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
31839 if (!rq->cmd)
31840 return -ENOMEM;
31841- }
31842+ cmdptr = rq->cmd;
31843+ } else
31844+ cmdptr = tmpcmd;
31845
31846- if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
31847+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
31848 hdr->request_len))
31849 return -EFAULT;
31850
31851+ if (cmdptr != rq->cmd)
31852+ memcpy(rq->cmd, cmdptr, hdr->request_len);
31853+
31854 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
31855 if (blk_verify_command(rq->cmd, has_write_perm))
31856 return -EPERM;
31857diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
31858index 7c668c8..db3521c 100644
31859--- a/block/compat_ioctl.c
31860+++ b/block/compat_ioctl.c
31861@@ -340,7 +340,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
31862 err |= __get_user(f->spec1, &uf->spec1);
31863 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
31864 err |= __get_user(name, &uf->name);
31865- f->name = compat_ptr(name);
31866+ f->name = (void __force_kernel *)compat_ptr(name);
31867 if (err) {
31868 err = -EFAULT;
31869 goto out;
31870diff --git a/block/partitions/efi.c b/block/partitions/efi.c
31871index b62fb88..bdab4c4 100644
31872--- a/block/partitions/efi.c
31873+++ b/block/partitions/efi.c
31874@@ -234,14 +234,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
31875 if (!gpt)
31876 return NULL;
31877
31878+ if (!le32_to_cpu(gpt->num_partition_entries))
31879+ return NULL;
31880+ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
31881+ if (!pte)
31882+ return NULL;
31883+
31884 count = le32_to_cpu(gpt->num_partition_entries) *
31885 le32_to_cpu(gpt->sizeof_partition_entry);
31886- if (!count)
31887- return NULL;
31888- pte = kzalloc(count, GFP_KERNEL);
31889- if (!pte)
31890- return NULL;
31891-
31892 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
31893 (u8 *) pte,
31894 count) < count) {
31895diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
31896index 9a87daa..fb17486 100644
31897--- a/block/scsi_ioctl.c
31898+++ b/block/scsi_ioctl.c
31899@@ -223,8 +223,20 @@ EXPORT_SYMBOL(blk_verify_command);
31900 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
31901 struct sg_io_hdr *hdr, fmode_t mode)
31902 {
31903- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
31904+ unsigned char tmpcmd[sizeof(rq->__cmd)];
31905+ unsigned char *cmdptr;
31906+
31907+ if (rq->cmd != rq->__cmd)
31908+ cmdptr = rq->cmd;
31909+ else
31910+ cmdptr = tmpcmd;
31911+
31912+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
31913 return -EFAULT;
31914+
31915+ if (cmdptr != rq->cmd)
31916+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
31917+
31918 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
31919 return -EPERM;
31920
31921@@ -433,6 +445,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
31922 int err;
31923 unsigned int in_len, out_len, bytes, opcode, cmdlen;
31924 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
31925+ unsigned char tmpcmd[sizeof(rq->__cmd)];
31926+ unsigned char *cmdptr;
31927
31928 if (!sic)
31929 return -EINVAL;
31930@@ -466,9 +480,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
31931 */
31932 err = -EFAULT;
31933 rq->cmd_len = cmdlen;
31934- if (copy_from_user(rq->cmd, sic->data, cmdlen))
31935+
31936+ if (rq->cmd != rq->__cmd)
31937+ cmdptr = rq->cmd;
31938+ else
31939+ cmdptr = tmpcmd;
31940+
31941+ if (copy_from_user(cmdptr, sic->data, cmdlen))
31942 goto error;
31943
31944+ if (rq->cmd != cmdptr)
31945+ memcpy(rq->cmd, cmdptr, cmdlen);
31946+
31947 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
31948 goto error;
31949
31950diff --git a/crypto/cryptd.c b/crypto/cryptd.c
31951index 7bdd61b..afec999 100644
31952--- a/crypto/cryptd.c
31953+++ b/crypto/cryptd.c
31954@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
31955
31956 struct cryptd_blkcipher_request_ctx {
31957 crypto_completion_t complete;
31958-};
31959+} __no_const;
31960
31961 struct cryptd_hash_ctx {
31962 struct crypto_shash *child;
31963@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
31964
31965 struct cryptd_aead_request_ctx {
31966 crypto_completion_t complete;
31967-};
31968+} __no_const;
31969
31970 static void cryptd_queue_worker(struct work_struct *work);
31971
31972diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c
31973index f6d9baf..dfd511f 100644
31974--- a/crypto/crypto_user.c
31975+++ b/crypto/crypto_user.c
31976@@ -30,6 +30,8 @@
31977
31978 #include "internal.h"
31979
31980+#define null_terminated(x) (strnlen(x, sizeof(x)) < sizeof(x))
31981+
31982 static DEFINE_MUTEX(crypto_cfg_mutex);
31983
31984 /* The crypto netlink socket */
31985@@ -196,7 +198,10 @@ static int crypto_report(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
31986 struct crypto_dump_info info;
31987 int err;
31988
31989- if (!p->cru_driver_name)
31990+ if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
31991+ return -EINVAL;
31992+
31993+ if (!p->cru_driver_name[0])
31994 return -EINVAL;
31995
31996 alg = crypto_alg_match(p, 1);
31997@@ -260,6 +265,9 @@ static int crypto_update_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
31998 struct nlattr *priority = attrs[CRYPTOCFGA_PRIORITY_VAL];
31999 LIST_HEAD(list);
32000
32001+ if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
32002+ return -EINVAL;
32003+
32004 if (priority && !strlen(p->cru_driver_name))
32005 return -EINVAL;
32006
32007@@ -287,6 +295,9 @@ static int crypto_del_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
32008 struct crypto_alg *alg;
32009 struct crypto_user_alg *p = nlmsg_data(nlh);
32010
32011+ if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
32012+ return -EINVAL;
32013+
32014 alg = crypto_alg_match(p, 1);
32015 if (!alg)
32016 return -ENOENT;
32017@@ -368,6 +379,9 @@ static int crypto_add_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
32018 struct crypto_user_alg *p = nlmsg_data(nlh);
32019 struct nlattr *priority = attrs[CRYPTOCFGA_PRIORITY_VAL];
32020
32021+ if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
32022+ return -EINVAL;
32023+
32024 if (strlen(p->cru_driver_name))
32025 exact = 1;
32026
32027diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
32028index f220d64..d359ad6 100644
32029--- a/drivers/acpi/apei/apei-internal.h
32030+++ b/drivers/acpi/apei/apei-internal.h
32031@@ -20,7 +20,7 @@ typedef int (*apei_exec_ins_func_t)(struct apei_exec_context *ctx,
32032 struct apei_exec_ins_type {
32033 u32 flags;
32034 apei_exec_ins_func_t run;
32035-};
32036+} __do_const;
32037
32038 struct apei_exec_context {
32039 u32 ip;
32040diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
32041index e6defd8..c26a225 100644
32042--- a/drivers/acpi/apei/cper.c
32043+++ b/drivers/acpi/apei/cper.c
32044@@ -38,12 +38,12 @@
32045 */
32046 u64 cper_next_record_id(void)
32047 {
32048- static atomic64_t seq;
32049+ static atomic64_unchecked_t seq;
32050
32051- if (!atomic64_read(&seq))
32052- atomic64_set(&seq, ((u64)get_seconds()) << 32);
32053+ if (!atomic64_read_unchecked(&seq))
32054+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
32055
32056- return atomic64_inc_return(&seq);
32057+ return atomic64_inc_return_unchecked(&seq);
32058 }
32059 EXPORT_SYMBOL_GPL(cper_next_record_id);
32060
32061diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c
32062index be60399..778b33e8 100644
32063--- a/drivers/acpi/bgrt.c
32064+++ b/drivers/acpi/bgrt.c
32065@@ -87,8 +87,10 @@ static int __init bgrt_init(void)
32066 return -ENODEV;
32067
32068 sysfs_bin_attr_init(&image_attr);
32069- image_attr.private = bgrt_image;
32070- image_attr.size = bgrt_image_size;
32071+ pax_open_kernel();
32072+ *(void **)&image_attr.private = bgrt_image;
32073+ *(size_t *)&image_attr.size = bgrt_image_size;
32074+ pax_close_kernel();
32075
32076 bgrt_kobj = kobject_create_and_add("bgrt", acpi_kobj);
32077 if (!bgrt_kobj)
32078diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
32079index cb96296..b81293b 100644
32080--- a/drivers/acpi/blacklist.c
32081+++ b/drivers/acpi/blacklist.c
32082@@ -52,7 +52,7 @@ struct acpi_blacklist_item {
32083 u32 is_critical_error;
32084 };
32085
32086-static struct dmi_system_id acpi_osi_dmi_table[] __initdata;
32087+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst;
32088
32089 /*
32090 * POLICY: If *anything* doesn't work, put it on the blacklist.
32091@@ -193,7 +193,7 @@ static int __init dmi_disable_osi_win7(const struct dmi_system_id *d)
32092 return 0;
32093 }
32094
32095-static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
32096+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst = {
32097 {
32098 .callback = dmi_disable_osi_vista,
32099 .ident = "Fujitsu Siemens",
32100diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
32101index 7586544..636a2f0 100644
32102--- a/drivers/acpi/ec_sys.c
32103+++ b/drivers/acpi/ec_sys.c
32104@@ -12,6 +12,7 @@
32105 #include <linux/acpi.h>
32106 #include <linux/debugfs.h>
32107 #include <linux/module.h>
32108+#include <linux/uaccess.h>
32109 #include "internal.h"
32110
32111 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
32112@@ -34,7 +35,7 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
32113 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
32114 */
32115 unsigned int size = EC_SPACE_SIZE;
32116- u8 *data = (u8 *) buf;
32117+ u8 data;
32118 loff_t init_off = *off;
32119 int err = 0;
32120
32121@@ -47,9 +48,11 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
32122 size = count;
32123
32124 while (size) {
32125- err = ec_read(*off, &data[*off - init_off]);
32126+ err = ec_read(*off, &data);
32127 if (err)
32128 return err;
32129+ if (put_user(data, &buf[*off - init_off]))
32130+ return -EFAULT;
32131 *off += 1;
32132 size--;
32133 }
32134@@ -65,7 +68,6 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
32135
32136 unsigned int size = count;
32137 loff_t init_off = *off;
32138- u8 *data = (u8 *) buf;
32139 int err = 0;
32140
32141 if (*off >= EC_SPACE_SIZE)
32142@@ -76,7 +78,9 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
32143 }
32144
32145 while (size) {
32146- u8 byte_write = data[*off - init_off];
32147+ u8 byte_write;
32148+ if (get_user(byte_write, &buf[*off - init_off]))
32149+ return -EFAULT;
32150 err = ec_write(*off, byte_write);
32151 if (err)
32152 return err;
32153diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
32154index e83311b..142b5cc 100644
32155--- a/drivers/acpi/processor_driver.c
32156+++ b/drivers/acpi/processor_driver.c
32157@@ -558,7 +558,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
32158 return 0;
32159 #endif
32160
32161- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
32162+ BUG_ON(pr->id >= nr_cpu_ids);
32163
32164 /*
32165 * Buggy BIOS check
32166diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
32167index ed9a1cc..f4a354c 100644
32168--- a/drivers/acpi/processor_idle.c
32169+++ b/drivers/acpi/processor_idle.c
32170@@ -1005,7 +1005,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
32171 {
32172 int i, count = CPUIDLE_DRIVER_STATE_START;
32173 struct acpi_processor_cx *cx;
32174- struct cpuidle_state *state;
32175+ cpuidle_state_no_const *state;
32176 struct cpuidle_driver *drv = &acpi_idle_driver;
32177
32178 if (!pr->flags.power_setup_done)
32179diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
32180index ea61ca9..3fdd70d 100644
32181--- a/drivers/acpi/sysfs.c
32182+++ b/drivers/acpi/sysfs.c
32183@@ -420,11 +420,11 @@ static u32 num_counters;
32184 static struct attribute **all_attrs;
32185 static u32 acpi_gpe_count;
32186
32187-static struct attribute_group interrupt_stats_attr_group = {
32188+static attribute_group_no_const interrupt_stats_attr_group = {
32189 .name = "interrupts",
32190 };
32191
32192-static struct kobj_attribute *counter_attrs;
32193+static kobj_attribute_no_const *counter_attrs;
32194
32195 static void delete_gpe_attr_array(void)
32196 {
32197diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
32198index 6cd7805..07facb3 100644
32199--- a/drivers/ata/libahci.c
32200+++ b/drivers/ata/libahci.c
32201@@ -1230,7 +1230,7 @@ int ahci_kick_engine(struct ata_port *ap)
32202 }
32203 EXPORT_SYMBOL_GPL(ahci_kick_engine);
32204
32205-static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
32206+static int __intentional_overflow(-1) ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
32207 struct ata_taskfile *tf, int is_cmd, u16 flags,
32208 unsigned long timeout_msec)
32209 {
32210diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
32211index 46cd3f4..0871ad0 100644
32212--- a/drivers/ata/libata-core.c
32213+++ b/drivers/ata/libata-core.c
32214@@ -4780,7 +4780,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
32215 struct ata_port *ap;
32216 unsigned int tag;
32217
32218- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
32219+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
32220 ap = qc->ap;
32221
32222 qc->flags = 0;
32223@@ -4796,7 +4796,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
32224 struct ata_port *ap;
32225 struct ata_link *link;
32226
32227- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
32228+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
32229 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
32230 ap = qc->ap;
32231 link = qc->dev->link;
32232@@ -5892,6 +5892,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
32233 return;
32234
32235 spin_lock(&lock);
32236+ pax_open_kernel();
32237
32238 for (cur = ops->inherits; cur; cur = cur->inherits) {
32239 void **inherit = (void **)cur;
32240@@ -5905,8 +5906,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
32241 if (IS_ERR(*pp))
32242 *pp = NULL;
32243
32244- ops->inherits = NULL;
32245+ *(struct ata_port_operations **)&ops->inherits = NULL;
32246
32247+ pax_close_kernel();
32248 spin_unlock(&lock);
32249 }
32250
32251diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
32252index 405022d..fb70e53 100644
32253--- a/drivers/ata/pata_arasan_cf.c
32254+++ b/drivers/ata/pata_arasan_cf.c
32255@@ -864,7 +864,9 @@ static int arasan_cf_probe(struct platform_device *pdev)
32256 /* Handle platform specific quirks */
32257 if (pdata->quirk) {
32258 if (pdata->quirk & CF_BROKEN_PIO) {
32259- ap->ops->set_piomode = NULL;
32260+ pax_open_kernel();
32261+ *(void **)&ap->ops->set_piomode = NULL;
32262+ pax_close_kernel();
32263 ap->pio_mask = 0;
32264 }
32265 if (pdata->quirk & CF_BROKEN_MWDMA)
32266diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
32267index f9b983a..887b9d8 100644
32268--- a/drivers/atm/adummy.c
32269+++ b/drivers/atm/adummy.c
32270@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
32271 vcc->pop(vcc, skb);
32272 else
32273 dev_kfree_skb_any(skb);
32274- atomic_inc(&vcc->stats->tx);
32275+ atomic_inc_unchecked(&vcc->stats->tx);
32276
32277 return 0;
32278 }
32279diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
32280index 77a7480..05cde58 100644
32281--- a/drivers/atm/ambassador.c
32282+++ b/drivers/atm/ambassador.c
32283@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
32284 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
32285
32286 // VC layer stats
32287- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
32288+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
32289
32290 // free the descriptor
32291 kfree (tx_descr);
32292@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
32293 dump_skb ("<<<", vc, skb);
32294
32295 // VC layer stats
32296- atomic_inc(&atm_vcc->stats->rx);
32297+ atomic_inc_unchecked(&atm_vcc->stats->rx);
32298 __net_timestamp(skb);
32299 // end of our responsibility
32300 atm_vcc->push (atm_vcc, skb);
32301@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
32302 } else {
32303 PRINTK (KERN_INFO, "dropped over-size frame");
32304 // should we count this?
32305- atomic_inc(&atm_vcc->stats->rx_drop);
32306+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
32307 }
32308
32309 } else {
32310@@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
32311 }
32312
32313 if (check_area (skb->data, skb->len)) {
32314- atomic_inc(&atm_vcc->stats->tx_err);
32315+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
32316 return -ENOMEM; // ?
32317 }
32318
32319diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
32320index b22d71c..d6e1049 100644
32321--- a/drivers/atm/atmtcp.c
32322+++ b/drivers/atm/atmtcp.c
32323@@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
32324 if (vcc->pop) vcc->pop(vcc,skb);
32325 else dev_kfree_skb(skb);
32326 if (dev_data) return 0;
32327- atomic_inc(&vcc->stats->tx_err);
32328+ atomic_inc_unchecked(&vcc->stats->tx_err);
32329 return -ENOLINK;
32330 }
32331 size = skb->len+sizeof(struct atmtcp_hdr);
32332@@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
32333 if (!new_skb) {
32334 if (vcc->pop) vcc->pop(vcc,skb);
32335 else dev_kfree_skb(skb);
32336- atomic_inc(&vcc->stats->tx_err);
32337+ atomic_inc_unchecked(&vcc->stats->tx_err);
32338 return -ENOBUFS;
32339 }
32340 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
32341@@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
32342 if (vcc->pop) vcc->pop(vcc,skb);
32343 else dev_kfree_skb(skb);
32344 out_vcc->push(out_vcc,new_skb);
32345- atomic_inc(&vcc->stats->tx);
32346- atomic_inc(&out_vcc->stats->rx);
32347+ atomic_inc_unchecked(&vcc->stats->tx);
32348+ atomic_inc_unchecked(&out_vcc->stats->rx);
32349 return 0;
32350 }
32351
32352@@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
32353 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
32354 read_unlock(&vcc_sklist_lock);
32355 if (!out_vcc) {
32356- atomic_inc(&vcc->stats->tx_err);
32357+ atomic_inc_unchecked(&vcc->stats->tx_err);
32358 goto done;
32359 }
32360 skb_pull(skb,sizeof(struct atmtcp_hdr));
32361@@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
32362 __net_timestamp(new_skb);
32363 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
32364 out_vcc->push(out_vcc,new_skb);
32365- atomic_inc(&vcc->stats->tx);
32366- atomic_inc(&out_vcc->stats->rx);
32367+ atomic_inc_unchecked(&vcc->stats->tx);
32368+ atomic_inc_unchecked(&out_vcc->stats->rx);
32369 done:
32370 if (vcc->pop) vcc->pop(vcc,skb);
32371 else dev_kfree_skb(skb);
32372diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
32373index c1eb6fa..4c71be9 100644
32374--- a/drivers/atm/eni.c
32375+++ b/drivers/atm/eni.c
32376@@ -522,7 +522,7 @@ static int rx_aal0(struct atm_vcc *vcc)
32377 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
32378 vcc->dev->number);
32379 length = 0;
32380- atomic_inc(&vcc->stats->rx_err);
32381+ atomic_inc_unchecked(&vcc->stats->rx_err);
32382 }
32383 else {
32384 length = ATM_CELL_SIZE-1; /* no HEC */
32385@@ -577,7 +577,7 @@ static int rx_aal5(struct atm_vcc *vcc)
32386 size);
32387 }
32388 eff = length = 0;
32389- atomic_inc(&vcc->stats->rx_err);
32390+ atomic_inc_unchecked(&vcc->stats->rx_err);
32391 }
32392 else {
32393 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
32394@@ -594,7 +594,7 @@ static int rx_aal5(struct atm_vcc *vcc)
32395 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
32396 vcc->dev->number,vcc->vci,length,size << 2,descr);
32397 length = eff = 0;
32398- atomic_inc(&vcc->stats->rx_err);
32399+ atomic_inc_unchecked(&vcc->stats->rx_err);
32400 }
32401 }
32402 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
32403@@ -767,7 +767,7 @@ rx_dequeued++;
32404 vcc->push(vcc,skb);
32405 pushed++;
32406 }
32407- atomic_inc(&vcc->stats->rx);
32408+ atomic_inc_unchecked(&vcc->stats->rx);
32409 }
32410 wake_up(&eni_dev->rx_wait);
32411 }
32412@@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *dev)
32413 PCI_DMA_TODEVICE);
32414 if (vcc->pop) vcc->pop(vcc,skb);
32415 else dev_kfree_skb_irq(skb);
32416- atomic_inc(&vcc->stats->tx);
32417+ atomic_inc_unchecked(&vcc->stats->tx);
32418 wake_up(&eni_dev->tx_wait);
32419 dma_complete++;
32420 }
32421diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
32422index b41c948..a002b17 100644
32423--- a/drivers/atm/firestream.c
32424+++ b/drivers/atm/firestream.c
32425@@ -749,7 +749,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
32426 }
32427 }
32428
32429- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
32430+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
32431
32432 fs_dprintk (FS_DEBUG_TXMEM, "i");
32433 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
32434@@ -816,7 +816,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
32435 #endif
32436 skb_put (skb, qe->p1 & 0xffff);
32437 ATM_SKB(skb)->vcc = atm_vcc;
32438- atomic_inc(&atm_vcc->stats->rx);
32439+ atomic_inc_unchecked(&atm_vcc->stats->rx);
32440 __net_timestamp(skb);
32441 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
32442 atm_vcc->push (atm_vcc, skb);
32443@@ -837,12 +837,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
32444 kfree (pe);
32445 }
32446 if (atm_vcc)
32447- atomic_inc(&atm_vcc->stats->rx_drop);
32448+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
32449 break;
32450 case 0x1f: /* Reassembly abort: no buffers. */
32451 /* Silently increment error counter. */
32452 if (atm_vcc)
32453- atomic_inc(&atm_vcc->stats->rx_drop);
32454+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
32455 break;
32456 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
32457 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
32458diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
32459index 204814e..cede831 100644
32460--- a/drivers/atm/fore200e.c
32461+++ b/drivers/atm/fore200e.c
32462@@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
32463 #endif
32464 /* check error condition */
32465 if (*entry->status & STATUS_ERROR)
32466- atomic_inc(&vcc->stats->tx_err);
32467+ atomic_inc_unchecked(&vcc->stats->tx_err);
32468 else
32469- atomic_inc(&vcc->stats->tx);
32470+ atomic_inc_unchecked(&vcc->stats->tx);
32471 }
32472 }
32473
32474@@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
32475 if (skb == NULL) {
32476 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
32477
32478- atomic_inc(&vcc->stats->rx_drop);
32479+ atomic_inc_unchecked(&vcc->stats->rx_drop);
32480 return -ENOMEM;
32481 }
32482
32483@@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
32484
32485 dev_kfree_skb_any(skb);
32486
32487- atomic_inc(&vcc->stats->rx_drop);
32488+ atomic_inc_unchecked(&vcc->stats->rx_drop);
32489 return -ENOMEM;
32490 }
32491
32492 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
32493
32494 vcc->push(vcc, skb);
32495- atomic_inc(&vcc->stats->rx);
32496+ atomic_inc_unchecked(&vcc->stats->rx);
32497
32498 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
32499
32500@@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
32501 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
32502 fore200e->atm_dev->number,
32503 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
32504- atomic_inc(&vcc->stats->rx_err);
32505+ atomic_inc_unchecked(&vcc->stats->rx_err);
32506 }
32507 }
32508
32509@@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
32510 goto retry_here;
32511 }
32512
32513- atomic_inc(&vcc->stats->tx_err);
32514+ atomic_inc_unchecked(&vcc->stats->tx_err);
32515
32516 fore200e->tx_sat++;
32517 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
32518diff --git a/drivers/atm/he.c b/drivers/atm/he.c
32519index 72b6960..cf9167a 100644
32520--- a/drivers/atm/he.c
32521+++ b/drivers/atm/he.c
32522@@ -1699,7 +1699,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
32523
32524 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
32525 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
32526- atomic_inc(&vcc->stats->rx_drop);
32527+ atomic_inc_unchecked(&vcc->stats->rx_drop);
32528 goto return_host_buffers;
32529 }
32530
32531@@ -1726,7 +1726,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
32532 RBRQ_LEN_ERR(he_dev->rbrq_head)
32533 ? "LEN_ERR" : "",
32534 vcc->vpi, vcc->vci);
32535- atomic_inc(&vcc->stats->rx_err);
32536+ atomic_inc_unchecked(&vcc->stats->rx_err);
32537 goto return_host_buffers;
32538 }
32539
32540@@ -1778,7 +1778,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
32541 vcc->push(vcc, skb);
32542 spin_lock(&he_dev->global_lock);
32543
32544- atomic_inc(&vcc->stats->rx);
32545+ atomic_inc_unchecked(&vcc->stats->rx);
32546
32547 return_host_buffers:
32548 ++pdus_assembled;
32549@@ -2104,7 +2104,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
32550 tpd->vcc->pop(tpd->vcc, tpd->skb);
32551 else
32552 dev_kfree_skb_any(tpd->skb);
32553- atomic_inc(&tpd->vcc->stats->tx_err);
32554+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
32555 }
32556 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
32557 return;
32558@@ -2516,7 +2516,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
32559 vcc->pop(vcc, skb);
32560 else
32561 dev_kfree_skb_any(skb);
32562- atomic_inc(&vcc->stats->tx_err);
32563+ atomic_inc_unchecked(&vcc->stats->tx_err);
32564 return -EINVAL;
32565 }
32566
32567@@ -2527,7 +2527,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
32568 vcc->pop(vcc, skb);
32569 else
32570 dev_kfree_skb_any(skb);
32571- atomic_inc(&vcc->stats->tx_err);
32572+ atomic_inc_unchecked(&vcc->stats->tx_err);
32573 return -EINVAL;
32574 }
32575 #endif
32576@@ -2539,7 +2539,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
32577 vcc->pop(vcc, skb);
32578 else
32579 dev_kfree_skb_any(skb);
32580- atomic_inc(&vcc->stats->tx_err);
32581+ atomic_inc_unchecked(&vcc->stats->tx_err);
32582 spin_unlock_irqrestore(&he_dev->global_lock, flags);
32583 return -ENOMEM;
32584 }
32585@@ -2581,7 +2581,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
32586 vcc->pop(vcc, skb);
32587 else
32588 dev_kfree_skb_any(skb);
32589- atomic_inc(&vcc->stats->tx_err);
32590+ atomic_inc_unchecked(&vcc->stats->tx_err);
32591 spin_unlock_irqrestore(&he_dev->global_lock, flags);
32592 return -ENOMEM;
32593 }
32594@@ -2612,7 +2612,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
32595 __enqueue_tpd(he_dev, tpd, cid);
32596 spin_unlock_irqrestore(&he_dev->global_lock, flags);
32597
32598- atomic_inc(&vcc->stats->tx);
32599+ atomic_inc_unchecked(&vcc->stats->tx);
32600
32601 return 0;
32602 }
32603diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
32604index 1dc0519..1aadaf7 100644
32605--- a/drivers/atm/horizon.c
32606+++ b/drivers/atm/horizon.c
32607@@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
32608 {
32609 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
32610 // VC layer stats
32611- atomic_inc(&vcc->stats->rx);
32612+ atomic_inc_unchecked(&vcc->stats->rx);
32613 __net_timestamp(skb);
32614 // end of our responsibility
32615 vcc->push (vcc, skb);
32616@@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
32617 dev->tx_iovec = NULL;
32618
32619 // VC layer stats
32620- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
32621+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
32622
32623 // free the skb
32624 hrz_kfree_skb (skb);
32625diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
32626index 272f009..a18ba55 100644
32627--- a/drivers/atm/idt77252.c
32628+++ b/drivers/atm/idt77252.c
32629@@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
32630 else
32631 dev_kfree_skb(skb);
32632
32633- atomic_inc(&vcc->stats->tx);
32634+ atomic_inc_unchecked(&vcc->stats->tx);
32635 }
32636
32637 atomic_dec(&scq->used);
32638@@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
32639 if ((sb = dev_alloc_skb(64)) == NULL) {
32640 printk("%s: Can't allocate buffers for aal0.\n",
32641 card->name);
32642- atomic_add(i, &vcc->stats->rx_drop);
32643+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
32644 break;
32645 }
32646 if (!atm_charge(vcc, sb->truesize)) {
32647 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
32648 card->name);
32649- atomic_add(i - 1, &vcc->stats->rx_drop);
32650+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
32651 dev_kfree_skb(sb);
32652 break;
32653 }
32654@@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
32655 ATM_SKB(sb)->vcc = vcc;
32656 __net_timestamp(sb);
32657 vcc->push(vcc, sb);
32658- atomic_inc(&vcc->stats->rx);
32659+ atomic_inc_unchecked(&vcc->stats->rx);
32660
32661 cell += ATM_CELL_PAYLOAD;
32662 }
32663@@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
32664 "(CDC: %08x)\n",
32665 card->name, len, rpp->len, readl(SAR_REG_CDC));
32666 recycle_rx_pool_skb(card, rpp);
32667- atomic_inc(&vcc->stats->rx_err);
32668+ atomic_inc_unchecked(&vcc->stats->rx_err);
32669 return;
32670 }
32671 if (stat & SAR_RSQE_CRC) {
32672 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
32673 recycle_rx_pool_skb(card, rpp);
32674- atomic_inc(&vcc->stats->rx_err);
32675+ atomic_inc_unchecked(&vcc->stats->rx_err);
32676 return;
32677 }
32678 if (skb_queue_len(&rpp->queue) > 1) {
32679@@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
32680 RXPRINTK("%s: Can't alloc RX skb.\n",
32681 card->name);
32682 recycle_rx_pool_skb(card, rpp);
32683- atomic_inc(&vcc->stats->rx_err);
32684+ atomic_inc_unchecked(&vcc->stats->rx_err);
32685 return;
32686 }
32687 if (!atm_charge(vcc, skb->truesize)) {
32688@@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
32689 __net_timestamp(skb);
32690
32691 vcc->push(vcc, skb);
32692- atomic_inc(&vcc->stats->rx);
32693+ atomic_inc_unchecked(&vcc->stats->rx);
32694
32695 return;
32696 }
32697@@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
32698 __net_timestamp(skb);
32699
32700 vcc->push(vcc, skb);
32701- atomic_inc(&vcc->stats->rx);
32702+ atomic_inc_unchecked(&vcc->stats->rx);
32703
32704 if (skb->truesize > SAR_FB_SIZE_3)
32705 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
32706@@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
32707 if (vcc->qos.aal != ATM_AAL0) {
32708 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
32709 card->name, vpi, vci);
32710- atomic_inc(&vcc->stats->rx_drop);
32711+ atomic_inc_unchecked(&vcc->stats->rx_drop);
32712 goto drop;
32713 }
32714
32715 if ((sb = dev_alloc_skb(64)) == NULL) {
32716 printk("%s: Can't allocate buffers for AAL0.\n",
32717 card->name);
32718- atomic_inc(&vcc->stats->rx_err);
32719+ atomic_inc_unchecked(&vcc->stats->rx_err);
32720 goto drop;
32721 }
32722
32723@@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
32724 ATM_SKB(sb)->vcc = vcc;
32725 __net_timestamp(sb);
32726 vcc->push(vcc, sb);
32727- atomic_inc(&vcc->stats->rx);
32728+ atomic_inc_unchecked(&vcc->stats->rx);
32729
32730 drop:
32731 skb_pull(queue, 64);
32732@@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
32733
32734 if (vc == NULL) {
32735 printk("%s: NULL connection in send().\n", card->name);
32736- atomic_inc(&vcc->stats->tx_err);
32737+ atomic_inc_unchecked(&vcc->stats->tx_err);
32738 dev_kfree_skb(skb);
32739 return -EINVAL;
32740 }
32741 if (!test_bit(VCF_TX, &vc->flags)) {
32742 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
32743- atomic_inc(&vcc->stats->tx_err);
32744+ atomic_inc_unchecked(&vcc->stats->tx_err);
32745 dev_kfree_skb(skb);
32746 return -EINVAL;
32747 }
32748@@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
32749 break;
32750 default:
32751 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
32752- atomic_inc(&vcc->stats->tx_err);
32753+ atomic_inc_unchecked(&vcc->stats->tx_err);
32754 dev_kfree_skb(skb);
32755 return -EINVAL;
32756 }
32757
32758 if (skb_shinfo(skb)->nr_frags != 0) {
32759 printk("%s: No scatter-gather yet.\n", card->name);
32760- atomic_inc(&vcc->stats->tx_err);
32761+ atomic_inc_unchecked(&vcc->stats->tx_err);
32762 dev_kfree_skb(skb);
32763 return -EINVAL;
32764 }
32765@@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
32766
32767 err = queue_skb(card, vc, skb, oam);
32768 if (err) {
32769- atomic_inc(&vcc->stats->tx_err);
32770+ atomic_inc_unchecked(&vcc->stats->tx_err);
32771 dev_kfree_skb(skb);
32772 return err;
32773 }
32774@@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
32775 skb = dev_alloc_skb(64);
32776 if (!skb) {
32777 printk("%s: Out of memory in send_oam().\n", card->name);
32778- atomic_inc(&vcc->stats->tx_err);
32779+ atomic_inc_unchecked(&vcc->stats->tx_err);
32780 return -ENOMEM;
32781 }
32782 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
32783diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
32784index 4217f29..88f547a 100644
32785--- a/drivers/atm/iphase.c
32786+++ b/drivers/atm/iphase.c
32787@@ -1145,7 +1145,7 @@ static int rx_pkt(struct atm_dev *dev)
32788 status = (u_short) (buf_desc_ptr->desc_mode);
32789 if (status & (RX_CER | RX_PTE | RX_OFL))
32790 {
32791- atomic_inc(&vcc->stats->rx_err);
32792+ atomic_inc_unchecked(&vcc->stats->rx_err);
32793 IF_ERR(printk("IA: bad packet, dropping it");)
32794 if (status & RX_CER) {
32795 IF_ERR(printk(" cause: packet CRC error\n");)
32796@@ -1168,7 +1168,7 @@ static int rx_pkt(struct atm_dev *dev)
32797 len = dma_addr - buf_addr;
32798 if (len > iadev->rx_buf_sz) {
32799 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
32800- atomic_inc(&vcc->stats->rx_err);
32801+ atomic_inc_unchecked(&vcc->stats->rx_err);
32802 goto out_free_desc;
32803 }
32804
32805@@ -1318,7 +1318,7 @@ static void rx_dle_intr(struct atm_dev *dev)
32806 ia_vcc = INPH_IA_VCC(vcc);
32807 if (ia_vcc == NULL)
32808 {
32809- atomic_inc(&vcc->stats->rx_err);
32810+ atomic_inc_unchecked(&vcc->stats->rx_err);
32811 atm_return(vcc, skb->truesize);
32812 dev_kfree_skb_any(skb);
32813 goto INCR_DLE;
32814@@ -1330,7 +1330,7 @@ static void rx_dle_intr(struct atm_dev *dev)
32815 if ((length > iadev->rx_buf_sz) || (length >
32816 (skb->len - sizeof(struct cpcs_trailer))))
32817 {
32818- atomic_inc(&vcc->stats->rx_err);
32819+ atomic_inc_unchecked(&vcc->stats->rx_err);
32820 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
32821 length, skb->len);)
32822 atm_return(vcc, skb->truesize);
32823@@ -1346,7 +1346,7 @@ static void rx_dle_intr(struct atm_dev *dev)
32824
32825 IF_RX(printk("rx_dle_intr: skb push");)
32826 vcc->push(vcc,skb);
32827- atomic_inc(&vcc->stats->rx);
32828+ atomic_inc_unchecked(&vcc->stats->rx);
32829 iadev->rx_pkt_cnt++;
32830 }
32831 INCR_DLE:
32832@@ -2826,15 +2826,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
32833 {
32834 struct k_sonet_stats *stats;
32835 stats = &PRIV(_ia_dev[board])->sonet_stats;
32836- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
32837- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
32838- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
32839- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
32840- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
32841- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
32842- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
32843- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
32844- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
32845+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
32846+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
32847+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
32848+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
32849+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
32850+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
32851+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
32852+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
32853+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
32854 }
32855 ia_cmds.status = 0;
32856 break;
32857@@ -2939,7 +2939,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
32858 if ((desc == 0) || (desc > iadev->num_tx_desc))
32859 {
32860 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
32861- atomic_inc(&vcc->stats->tx);
32862+ atomic_inc_unchecked(&vcc->stats->tx);
32863 if (vcc->pop)
32864 vcc->pop(vcc, skb);
32865 else
32866@@ -3044,14 +3044,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
32867 ATM_DESC(skb) = vcc->vci;
32868 skb_queue_tail(&iadev->tx_dma_q, skb);
32869
32870- atomic_inc(&vcc->stats->tx);
32871+ atomic_inc_unchecked(&vcc->stats->tx);
32872 iadev->tx_pkt_cnt++;
32873 /* Increment transaction counter */
32874 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
32875
32876 #if 0
32877 /* add flow control logic */
32878- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
32879+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
32880 if (iavcc->vc_desc_cnt > 10) {
32881 vcc->tx_quota = vcc->tx_quota * 3 / 4;
32882 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
32883diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
32884index fa7d701..1e404c7 100644
32885--- a/drivers/atm/lanai.c
32886+++ b/drivers/atm/lanai.c
32887@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
32888 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
32889 lanai_endtx(lanai, lvcc);
32890 lanai_free_skb(lvcc->tx.atmvcc, skb);
32891- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
32892+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
32893 }
32894
32895 /* Try to fill the buffer - don't call unless there is backlog */
32896@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
32897 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
32898 __net_timestamp(skb);
32899 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
32900- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
32901+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
32902 out:
32903 lvcc->rx.buf.ptr = end;
32904 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
32905@@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
32906 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
32907 "vcc %d\n", lanai->number, (unsigned int) s, vci);
32908 lanai->stats.service_rxnotaal5++;
32909- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
32910+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
32911 return 0;
32912 }
32913 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
32914@@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
32915 int bytes;
32916 read_unlock(&vcc_sklist_lock);
32917 DPRINTK("got trashed rx pdu on vci %d\n", vci);
32918- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
32919+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
32920 lvcc->stats.x.aal5.service_trash++;
32921 bytes = (SERVICE_GET_END(s) * 16) -
32922 (((unsigned long) lvcc->rx.buf.ptr) -
32923@@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
32924 }
32925 if (s & SERVICE_STREAM) {
32926 read_unlock(&vcc_sklist_lock);
32927- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
32928+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
32929 lvcc->stats.x.aal5.service_stream++;
32930 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
32931 "PDU on VCI %d!\n", lanai->number, vci);
32932@@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
32933 return 0;
32934 }
32935 DPRINTK("got rx crc error on vci %d\n", vci);
32936- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
32937+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
32938 lvcc->stats.x.aal5.service_rxcrc++;
32939 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
32940 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
32941diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
32942index ed1d2b7..8cffc1f 100644
32943--- a/drivers/atm/nicstar.c
32944+++ b/drivers/atm/nicstar.c
32945@@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
32946 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
32947 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
32948 card->index);
32949- atomic_inc(&vcc->stats->tx_err);
32950+ atomic_inc_unchecked(&vcc->stats->tx_err);
32951 dev_kfree_skb_any(skb);
32952 return -EINVAL;
32953 }
32954@@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
32955 if (!vc->tx) {
32956 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
32957 card->index);
32958- atomic_inc(&vcc->stats->tx_err);
32959+ atomic_inc_unchecked(&vcc->stats->tx_err);
32960 dev_kfree_skb_any(skb);
32961 return -EINVAL;
32962 }
32963@@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
32964 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
32965 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
32966 card->index);
32967- atomic_inc(&vcc->stats->tx_err);
32968+ atomic_inc_unchecked(&vcc->stats->tx_err);
32969 dev_kfree_skb_any(skb);
32970 return -EINVAL;
32971 }
32972
32973 if (skb_shinfo(skb)->nr_frags != 0) {
32974 printk("nicstar%d: No scatter-gather yet.\n", card->index);
32975- atomic_inc(&vcc->stats->tx_err);
32976+ atomic_inc_unchecked(&vcc->stats->tx_err);
32977 dev_kfree_skb_any(skb);
32978 return -EINVAL;
32979 }
32980@@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
32981 }
32982
32983 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
32984- atomic_inc(&vcc->stats->tx_err);
32985+ atomic_inc_unchecked(&vcc->stats->tx_err);
32986 dev_kfree_skb_any(skb);
32987 return -EIO;
32988 }
32989- atomic_inc(&vcc->stats->tx);
32990+ atomic_inc_unchecked(&vcc->stats->tx);
32991
32992 return 0;
32993 }
32994@@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32995 printk
32996 ("nicstar%d: Can't allocate buffers for aal0.\n",
32997 card->index);
32998- atomic_add(i, &vcc->stats->rx_drop);
32999+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
33000 break;
33001 }
33002 if (!atm_charge(vcc, sb->truesize)) {
33003 RXPRINTK
33004 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
33005 card->index);
33006- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
33007+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
33008 dev_kfree_skb_any(sb);
33009 break;
33010 }
33011@@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
33012 ATM_SKB(sb)->vcc = vcc;
33013 __net_timestamp(sb);
33014 vcc->push(vcc, sb);
33015- atomic_inc(&vcc->stats->rx);
33016+ atomic_inc_unchecked(&vcc->stats->rx);
33017 cell += ATM_CELL_PAYLOAD;
33018 }
33019
33020@@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
33021 if (iovb == NULL) {
33022 printk("nicstar%d: Out of iovec buffers.\n",
33023 card->index);
33024- atomic_inc(&vcc->stats->rx_drop);
33025+ atomic_inc_unchecked(&vcc->stats->rx_drop);
33026 recycle_rx_buf(card, skb);
33027 return;
33028 }
33029@@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
33030 small or large buffer itself. */
33031 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
33032 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
33033- atomic_inc(&vcc->stats->rx_err);
33034+ atomic_inc_unchecked(&vcc->stats->rx_err);
33035 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
33036 NS_MAX_IOVECS);
33037 NS_PRV_IOVCNT(iovb) = 0;
33038@@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
33039 ("nicstar%d: Expected a small buffer, and this is not one.\n",
33040 card->index);
33041 which_list(card, skb);
33042- atomic_inc(&vcc->stats->rx_err);
33043+ atomic_inc_unchecked(&vcc->stats->rx_err);
33044 recycle_rx_buf(card, skb);
33045 vc->rx_iov = NULL;
33046 recycle_iov_buf(card, iovb);
33047@@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
33048 ("nicstar%d: Expected a large buffer, and this is not one.\n",
33049 card->index);
33050 which_list(card, skb);
33051- atomic_inc(&vcc->stats->rx_err);
33052+ atomic_inc_unchecked(&vcc->stats->rx_err);
33053 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
33054 NS_PRV_IOVCNT(iovb));
33055 vc->rx_iov = NULL;
33056@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
33057 printk(" - PDU size mismatch.\n");
33058 else
33059 printk(".\n");
33060- atomic_inc(&vcc->stats->rx_err);
33061+ atomic_inc_unchecked(&vcc->stats->rx_err);
33062 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
33063 NS_PRV_IOVCNT(iovb));
33064 vc->rx_iov = NULL;
33065@@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
33066 /* skb points to a small buffer */
33067 if (!atm_charge(vcc, skb->truesize)) {
33068 push_rxbufs(card, skb);
33069- atomic_inc(&vcc->stats->rx_drop);
33070+ atomic_inc_unchecked(&vcc->stats->rx_drop);
33071 } else {
33072 skb_put(skb, len);
33073 dequeue_sm_buf(card, skb);
33074@@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
33075 ATM_SKB(skb)->vcc = vcc;
33076 __net_timestamp(skb);
33077 vcc->push(vcc, skb);
33078- atomic_inc(&vcc->stats->rx);
33079+ atomic_inc_unchecked(&vcc->stats->rx);
33080 }
33081 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
33082 struct sk_buff *sb;
33083@@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
33084 if (len <= NS_SMBUFSIZE) {
33085 if (!atm_charge(vcc, sb->truesize)) {
33086 push_rxbufs(card, sb);
33087- atomic_inc(&vcc->stats->rx_drop);
33088+ atomic_inc_unchecked(&vcc->stats->rx_drop);
33089 } else {
33090 skb_put(sb, len);
33091 dequeue_sm_buf(card, sb);
33092@@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
33093 ATM_SKB(sb)->vcc = vcc;
33094 __net_timestamp(sb);
33095 vcc->push(vcc, sb);
33096- atomic_inc(&vcc->stats->rx);
33097+ atomic_inc_unchecked(&vcc->stats->rx);
33098 }
33099
33100 push_rxbufs(card, skb);
33101@@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
33102
33103 if (!atm_charge(vcc, skb->truesize)) {
33104 push_rxbufs(card, skb);
33105- atomic_inc(&vcc->stats->rx_drop);
33106+ atomic_inc_unchecked(&vcc->stats->rx_drop);
33107 } else {
33108 dequeue_lg_buf(card, skb);
33109 #ifdef NS_USE_DESTRUCTORS
33110@@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
33111 ATM_SKB(skb)->vcc = vcc;
33112 __net_timestamp(skb);
33113 vcc->push(vcc, skb);
33114- atomic_inc(&vcc->stats->rx);
33115+ atomic_inc_unchecked(&vcc->stats->rx);
33116 }
33117
33118 push_rxbufs(card, sb);
33119@@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
33120 printk
33121 ("nicstar%d: Out of huge buffers.\n",
33122 card->index);
33123- atomic_inc(&vcc->stats->rx_drop);
33124+ atomic_inc_unchecked(&vcc->stats->rx_drop);
33125 recycle_iovec_rx_bufs(card,
33126 (struct iovec *)
33127 iovb->data,
33128@@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
33129 card->hbpool.count++;
33130 } else
33131 dev_kfree_skb_any(hb);
33132- atomic_inc(&vcc->stats->rx_drop);
33133+ atomic_inc_unchecked(&vcc->stats->rx_drop);
33134 } else {
33135 /* Copy the small buffer to the huge buffer */
33136 sb = (struct sk_buff *)iov->iov_base;
33137@@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
33138 #endif /* NS_USE_DESTRUCTORS */
33139 __net_timestamp(hb);
33140 vcc->push(vcc, hb);
33141- atomic_inc(&vcc->stats->rx);
33142+ atomic_inc_unchecked(&vcc->stats->rx);
33143 }
33144 }
33145
33146diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
33147index 0474a89..06ea4a1 100644
33148--- a/drivers/atm/solos-pci.c
33149+++ b/drivers/atm/solos-pci.c
33150@@ -838,7 +838,7 @@ void solos_bh(unsigned long card_arg)
33151 }
33152 atm_charge(vcc, skb->truesize);
33153 vcc->push(vcc, skb);
33154- atomic_inc(&vcc->stats->rx);
33155+ atomic_inc_unchecked(&vcc->stats->rx);
33156 break;
33157
33158 case PKT_STATUS:
33159@@ -1117,7 +1117,7 @@ static uint32_t fpga_tx(struct solos_card *card)
33160 vcc = SKB_CB(oldskb)->vcc;
33161
33162 if (vcc) {
33163- atomic_inc(&vcc->stats->tx);
33164+ atomic_inc_unchecked(&vcc->stats->tx);
33165 solos_pop(vcc, oldskb);
33166 } else {
33167 dev_kfree_skb_irq(oldskb);
33168diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
33169index 0215934..ce9f5b1 100644
33170--- a/drivers/atm/suni.c
33171+++ b/drivers/atm/suni.c
33172@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
33173
33174
33175 #define ADD_LIMITED(s,v) \
33176- atomic_add((v),&stats->s); \
33177- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
33178+ atomic_add_unchecked((v),&stats->s); \
33179+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
33180
33181
33182 static void suni_hz(unsigned long from_timer)
33183diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
33184index 5120a96..e2572bd 100644
33185--- a/drivers/atm/uPD98402.c
33186+++ b/drivers/atm/uPD98402.c
33187@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
33188 struct sonet_stats tmp;
33189 int error = 0;
33190
33191- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
33192+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
33193 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
33194 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
33195 if (zero && !error) {
33196@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
33197
33198
33199 #define ADD_LIMITED(s,v) \
33200- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
33201- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
33202- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
33203+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
33204+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
33205+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
33206
33207
33208 static void stat_event(struct atm_dev *dev)
33209@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
33210 if (reason & uPD98402_INT_PFM) stat_event(dev);
33211 if (reason & uPD98402_INT_PCO) {
33212 (void) GET(PCOCR); /* clear interrupt cause */
33213- atomic_add(GET(HECCT),
33214+ atomic_add_unchecked(GET(HECCT),
33215 &PRIV(dev)->sonet_stats.uncorr_hcs);
33216 }
33217 if ((reason & uPD98402_INT_RFO) &&
33218@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
33219 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
33220 uPD98402_INT_LOS),PIMR); /* enable them */
33221 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
33222- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
33223- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
33224- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
33225+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
33226+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
33227+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
33228 return 0;
33229 }
33230
33231diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
33232index 969c3c2..9b72956 100644
33233--- a/drivers/atm/zatm.c
33234+++ b/drivers/atm/zatm.c
33235@@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
33236 }
33237 if (!size) {
33238 dev_kfree_skb_irq(skb);
33239- if (vcc) atomic_inc(&vcc->stats->rx_err);
33240+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
33241 continue;
33242 }
33243 if (!atm_charge(vcc,skb->truesize)) {
33244@@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
33245 skb->len = size;
33246 ATM_SKB(skb)->vcc = vcc;
33247 vcc->push(vcc,skb);
33248- atomic_inc(&vcc->stats->rx);
33249+ atomic_inc_unchecked(&vcc->stats->rx);
33250 }
33251 zout(pos & 0xffff,MTA(mbx));
33252 #if 0 /* probably a stupid idea */
33253@@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
33254 skb_queue_head(&zatm_vcc->backlog,skb);
33255 break;
33256 }
33257- atomic_inc(&vcc->stats->tx);
33258+ atomic_inc_unchecked(&vcc->stats->tx);
33259 wake_up(&zatm_vcc->tx_wait);
33260 }
33261
33262diff --git a/drivers/base/bus.c b/drivers/base/bus.c
33263index 6856303..0602d70 100644
33264--- a/drivers/base/bus.c
33265+++ b/drivers/base/bus.c
33266@@ -1163,7 +1163,7 @@ int subsys_interface_register(struct subsys_interface *sif)
33267 return -EINVAL;
33268
33269 mutex_lock(&subsys->p->mutex);
33270- list_add_tail(&sif->node, &subsys->p->interfaces);
33271+ pax_list_add_tail((struct list_head *)&sif->node, &subsys->p->interfaces);
33272 if (sif->add_dev) {
33273 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
33274 while ((dev = subsys_dev_iter_next(&iter)))
33275@@ -1188,7 +1188,7 @@ void subsys_interface_unregister(struct subsys_interface *sif)
33276 subsys = sif->subsys;
33277
33278 mutex_lock(&subsys->p->mutex);
33279- list_del_init(&sif->node);
33280+ pax_list_del_init((struct list_head *)&sif->node);
33281 if (sif->remove_dev) {
33282 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
33283 while ((dev = subsys_dev_iter_next(&iter)))
33284diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
33285index 17cf7ca..7e553e1 100644
33286--- a/drivers/base/devtmpfs.c
33287+++ b/drivers/base/devtmpfs.c
33288@@ -347,7 +347,7 @@ int devtmpfs_mount(const char *mntdir)
33289 if (!thread)
33290 return 0;
33291
33292- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
33293+ err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
33294 if (err)
33295 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
33296 else
33297diff --git a/drivers/base/node.c b/drivers/base/node.c
33298index fac124a..66bd4ab 100644
33299--- a/drivers/base/node.c
33300+++ b/drivers/base/node.c
33301@@ -625,7 +625,7 @@ static ssize_t print_nodes_state(enum node_states state, char *buf)
33302 struct node_attr {
33303 struct device_attribute attr;
33304 enum node_states state;
33305-};
33306+} __do_const;
33307
33308 static ssize_t show_node_state(struct device *dev,
33309 struct device_attribute *attr, char *buf)
33310diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
33311index acc3a8d..981c236 100644
33312--- a/drivers/base/power/domain.c
33313+++ b/drivers/base/power/domain.c
33314@@ -1851,7 +1851,7 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
33315 {
33316 struct cpuidle_driver *cpuidle_drv;
33317 struct gpd_cpu_data *cpu_data;
33318- struct cpuidle_state *idle_state;
33319+ cpuidle_state_no_const *idle_state;
33320 int ret = 0;
33321
33322 if (IS_ERR_OR_NULL(genpd) || state < 0)
33323@@ -1919,7 +1919,7 @@ int pm_genpd_name_attach_cpuidle(const char *name, int state)
33324 int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
33325 {
33326 struct gpd_cpu_data *cpu_data;
33327- struct cpuidle_state *idle_state;
33328+ cpuidle_state_no_const *idle_state;
33329 int ret = 0;
33330
33331 if (IS_ERR_OR_NULL(genpd))
33332diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
33333index e6ee5e8..98ad7fc 100644
33334--- a/drivers/base/power/wakeup.c
33335+++ b/drivers/base/power/wakeup.c
33336@@ -29,14 +29,14 @@ bool events_check_enabled __read_mostly;
33337 * They need to be modified together atomically, so it's better to use one
33338 * atomic variable to hold them both.
33339 */
33340-static atomic_t combined_event_count = ATOMIC_INIT(0);
33341+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
33342
33343 #define IN_PROGRESS_BITS (sizeof(int) * 4)
33344 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
33345
33346 static void split_counters(unsigned int *cnt, unsigned int *inpr)
33347 {
33348- unsigned int comb = atomic_read(&combined_event_count);
33349+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
33350
33351 *cnt = (comb >> IN_PROGRESS_BITS);
33352 *inpr = comb & MAX_IN_PROGRESS;
33353@@ -389,7 +389,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
33354 ws->start_prevent_time = ws->last_time;
33355
33356 /* Increment the counter of events in progress. */
33357- cec = atomic_inc_return(&combined_event_count);
33358+ cec = atomic_inc_return_unchecked(&combined_event_count);
33359
33360 trace_wakeup_source_activate(ws->name, cec);
33361 }
33362@@ -515,7 +515,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
33363 * Increment the counter of registered wakeup events and decrement the
33364 * couter of wakeup events in progress simultaneously.
33365 */
33366- cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
33367+ cec = atomic_add_return_unchecked(MAX_IN_PROGRESS, &combined_event_count);
33368 trace_wakeup_source_deactivate(ws->name, cec);
33369
33370 split_counters(&cnt, &inpr);
33371diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
33372index e8d11b6..7b1b36f 100644
33373--- a/drivers/base/syscore.c
33374+++ b/drivers/base/syscore.c
33375@@ -21,7 +21,7 @@ static DEFINE_MUTEX(syscore_ops_lock);
33376 void register_syscore_ops(struct syscore_ops *ops)
33377 {
33378 mutex_lock(&syscore_ops_lock);
33379- list_add_tail(&ops->node, &syscore_ops_list);
33380+ pax_list_add_tail((struct list_head *)&ops->node, &syscore_ops_list);
33381 mutex_unlock(&syscore_ops_lock);
33382 }
33383 EXPORT_SYMBOL_GPL(register_syscore_ops);
33384@@ -33,7 +33,7 @@ EXPORT_SYMBOL_GPL(register_syscore_ops);
33385 void unregister_syscore_ops(struct syscore_ops *ops)
33386 {
33387 mutex_lock(&syscore_ops_lock);
33388- list_del(&ops->node);
33389+ pax_list_del((struct list_head *)&ops->node);
33390 mutex_unlock(&syscore_ops_lock);
33391 }
33392 EXPORT_SYMBOL_GPL(unregister_syscore_ops);
33393diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
33394index ade58bc..867143d 100644
33395--- a/drivers/block/cciss.c
33396+++ b/drivers/block/cciss.c
33397@@ -1196,6 +1196,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
33398 int err;
33399 u32 cp;
33400
33401+ memset(&arg64, 0, sizeof(arg64));
33402+
33403 err = 0;
33404 err |=
33405 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
33406@@ -3005,7 +3007,7 @@ static void start_io(ctlr_info_t *h)
33407 while (!list_empty(&h->reqQ)) {
33408 c = list_entry(h->reqQ.next, CommandList_struct, list);
33409 /* can't do anything if fifo is full */
33410- if ((h->access.fifo_full(h))) {
33411+ if ((h->access->fifo_full(h))) {
33412 dev_warn(&h->pdev->dev, "fifo full\n");
33413 break;
33414 }
33415@@ -3015,7 +3017,7 @@ static void start_io(ctlr_info_t *h)
33416 h->Qdepth--;
33417
33418 /* Tell the controller execute command */
33419- h->access.submit_command(h, c);
33420+ h->access->submit_command(h, c);
33421
33422 /* Put job onto the completed Q */
33423 addQ(&h->cmpQ, c);
33424@@ -3441,17 +3443,17 @@ startio:
33425
33426 static inline unsigned long get_next_completion(ctlr_info_t *h)
33427 {
33428- return h->access.command_completed(h);
33429+ return h->access->command_completed(h);
33430 }
33431
33432 static inline int interrupt_pending(ctlr_info_t *h)
33433 {
33434- return h->access.intr_pending(h);
33435+ return h->access->intr_pending(h);
33436 }
33437
33438 static inline long interrupt_not_for_us(ctlr_info_t *h)
33439 {
33440- return ((h->access.intr_pending(h) == 0) ||
33441+ return ((h->access->intr_pending(h) == 0) ||
33442 (h->interrupts_enabled == 0));
33443 }
33444
33445@@ -3484,7 +3486,7 @@ static inline u32 next_command(ctlr_info_t *h)
33446 u32 a;
33447
33448 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
33449- return h->access.command_completed(h);
33450+ return h->access->command_completed(h);
33451
33452 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
33453 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
33454@@ -4041,7 +4043,7 @@ static void cciss_put_controller_into_performant_mode(ctlr_info_t *h)
33455 trans_support & CFGTBL_Trans_use_short_tags);
33456
33457 /* Change the access methods to the performant access methods */
33458- h->access = SA5_performant_access;
33459+ h->access = &SA5_performant_access;
33460 h->transMethod = CFGTBL_Trans_Performant;
33461
33462 return;
33463@@ -4310,7 +4312,7 @@ static int cciss_pci_init(ctlr_info_t *h)
33464 if (prod_index < 0)
33465 return -ENODEV;
33466 h->product_name = products[prod_index].product_name;
33467- h->access = *(products[prod_index].access);
33468+ h->access = products[prod_index].access;
33469
33470 if (cciss_board_disabled(h)) {
33471 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
33472@@ -5032,7 +5034,7 @@ reinit_after_soft_reset:
33473 }
33474
33475 /* make sure the board interrupts are off */
33476- h->access.set_intr_mask(h, CCISS_INTR_OFF);
33477+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
33478 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
33479 if (rc)
33480 goto clean2;
33481@@ -5082,7 +5084,7 @@ reinit_after_soft_reset:
33482 * fake ones to scoop up any residual completions.
33483 */
33484 spin_lock_irqsave(&h->lock, flags);
33485- h->access.set_intr_mask(h, CCISS_INTR_OFF);
33486+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
33487 spin_unlock_irqrestore(&h->lock, flags);
33488 free_irq(h->intr[h->intr_mode], h);
33489 rc = cciss_request_irq(h, cciss_msix_discard_completions,
33490@@ -5102,9 +5104,9 @@ reinit_after_soft_reset:
33491 dev_info(&h->pdev->dev, "Board READY.\n");
33492 dev_info(&h->pdev->dev,
33493 "Waiting for stale completions to drain.\n");
33494- h->access.set_intr_mask(h, CCISS_INTR_ON);
33495+ h->access->set_intr_mask(h, CCISS_INTR_ON);
33496 msleep(10000);
33497- h->access.set_intr_mask(h, CCISS_INTR_OFF);
33498+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
33499
33500 rc = controller_reset_failed(h->cfgtable);
33501 if (rc)
33502@@ -5127,7 +5129,7 @@ reinit_after_soft_reset:
33503 cciss_scsi_setup(h);
33504
33505 /* Turn the interrupts on so we can service requests */
33506- h->access.set_intr_mask(h, CCISS_INTR_ON);
33507+ h->access->set_intr_mask(h, CCISS_INTR_ON);
33508
33509 /* Get the firmware version */
33510 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
33511@@ -5199,7 +5201,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
33512 kfree(flush_buf);
33513 if (return_code != IO_OK)
33514 dev_warn(&h->pdev->dev, "Error flushing cache\n");
33515- h->access.set_intr_mask(h, CCISS_INTR_OFF);
33516+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
33517 free_irq(h->intr[h->intr_mode], h);
33518 }
33519
33520diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
33521index 7fda30e..eb5dfe0 100644
33522--- a/drivers/block/cciss.h
33523+++ b/drivers/block/cciss.h
33524@@ -101,7 +101,7 @@ struct ctlr_info
33525 /* information about each logical volume */
33526 drive_info_struct *drv[CISS_MAX_LUN];
33527
33528- struct access_method access;
33529+ struct access_method *access;
33530
33531 /* queue and queue Info */
33532 struct list_head reqQ;
33533diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
33534index 3f08713..56a586a 100644
33535--- a/drivers/block/cpqarray.c
33536+++ b/drivers/block/cpqarray.c
33537@@ -404,7 +404,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
33538 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
33539 goto Enomem4;
33540 }
33541- hba[i]->access.set_intr_mask(hba[i], 0);
33542+ hba[i]->access->set_intr_mask(hba[i], 0);
33543 if (request_irq(hba[i]->intr, do_ida_intr,
33544 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
33545 {
33546@@ -459,7 +459,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
33547 add_timer(&hba[i]->timer);
33548
33549 /* Enable IRQ now that spinlock and rate limit timer are set up */
33550- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
33551+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
33552
33553 for(j=0; j<NWD; j++) {
33554 struct gendisk *disk = ida_gendisk[i][j];
33555@@ -694,7 +694,7 @@ DBGINFO(
33556 for(i=0; i<NR_PRODUCTS; i++) {
33557 if (board_id == products[i].board_id) {
33558 c->product_name = products[i].product_name;
33559- c->access = *(products[i].access);
33560+ c->access = products[i].access;
33561 break;
33562 }
33563 }
33564@@ -792,7 +792,7 @@ static int cpqarray_eisa_detect(void)
33565 hba[ctlr]->intr = intr;
33566 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
33567 hba[ctlr]->product_name = products[j].product_name;
33568- hba[ctlr]->access = *(products[j].access);
33569+ hba[ctlr]->access = products[j].access;
33570 hba[ctlr]->ctlr = ctlr;
33571 hba[ctlr]->board_id = board_id;
33572 hba[ctlr]->pci_dev = NULL; /* not PCI */
33573@@ -980,7 +980,7 @@ static void start_io(ctlr_info_t *h)
33574
33575 while((c = h->reqQ) != NULL) {
33576 /* Can't do anything if we're busy */
33577- if (h->access.fifo_full(h) == 0)
33578+ if (h->access->fifo_full(h) == 0)
33579 return;
33580
33581 /* Get the first entry from the request Q */
33582@@ -988,7 +988,7 @@ static void start_io(ctlr_info_t *h)
33583 h->Qdepth--;
33584
33585 /* Tell the controller to do our bidding */
33586- h->access.submit_command(h, c);
33587+ h->access->submit_command(h, c);
33588
33589 /* Get onto the completion Q */
33590 addQ(&h->cmpQ, c);
33591@@ -1050,7 +1050,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
33592 unsigned long flags;
33593 __u32 a,a1;
33594
33595- istat = h->access.intr_pending(h);
33596+ istat = h->access->intr_pending(h);
33597 /* Is this interrupt for us? */
33598 if (istat == 0)
33599 return IRQ_NONE;
33600@@ -1061,7 +1061,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
33601 */
33602 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
33603 if (istat & FIFO_NOT_EMPTY) {
33604- while((a = h->access.command_completed(h))) {
33605+ while((a = h->access->command_completed(h))) {
33606 a1 = a; a &= ~3;
33607 if ((c = h->cmpQ) == NULL)
33608 {
33609@@ -1449,11 +1449,11 @@ static int sendcmd(
33610 /*
33611 * Disable interrupt
33612 */
33613- info_p->access.set_intr_mask(info_p, 0);
33614+ info_p->access->set_intr_mask(info_p, 0);
33615 /* Make sure there is room in the command FIFO */
33616 /* Actually it should be completely empty at this time. */
33617 for (i = 200000; i > 0; i--) {
33618- temp = info_p->access.fifo_full(info_p);
33619+ temp = info_p->access->fifo_full(info_p);
33620 if (temp != 0) {
33621 break;
33622 }
33623@@ -1466,7 +1466,7 @@ DBG(
33624 /*
33625 * Send the cmd
33626 */
33627- info_p->access.submit_command(info_p, c);
33628+ info_p->access->submit_command(info_p, c);
33629 complete = pollcomplete(ctlr);
33630
33631 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
33632@@ -1549,9 +1549,9 @@ static int revalidate_allvol(ctlr_info_t *host)
33633 * we check the new geometry. Then turn interrupts back on when
33634 * we're done.
33635 */
33636- host->access.set_intr_mask(host, 0);
33637+ host->access->set_intr_mask(host, 0);
33638 getgeometry(ctlr);
33639- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
33640+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
33641
33642 for(i=0; i<NWD; i++) {
33643 struct gendisk *disk = ida_gendisk[ctlr][i];
33644@@ -1591,7 +1591,7 @@ static int pollcomplete(int ctlr)
33645 /* Wait (up to 2 seconds) for a command to complete */
33646
33647 for (i = 200000; i > 0; i--) {
33648- done = hba[ctlr]->access.command_completed(hba[ctlr]);
33649+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
33650 if (done == 0) {
33651 udelay(10); /* a short fixed delay */
33652 } else
33653diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
33654index be73e9d..7fbf140 100644
33655--- a/drivers/block/cpqarray.h
33656+++ b/drivers/block/cpqarray.h
33657@@ -99,7 +99,7 @@ struct ctlr_info {
33658 drv_info_t drv[NWD];
33659 struct proc_dir_entry *proc;
33660
33661- struct access_method access;
33662+ struct access_method *access;
33663
33664 cmdlist_t *reqQ;
33665 cmdlist_t *cmpQ;
33666diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
33667index 6b51afa..17e1191 100644
33668--- a/drivers/block/drbd/drbd_int.h
33669+++ b/drivers/block/drbd/drbd_int.h
33670@@ -582,7 +582,7 @@ struct drbd_epoch {
33671 struct drbd_tconn *tconn;
33672 struct list_head list;
33673 unsigned int barrier_nr;
33674- atomic_t epoch_size; /* increased on every request added. */
33675+ atomic_unchecked_t epoch_size; /* increased on every request added. */
33676 atomic_t active; /* increased on every req. added, and dec on every finished. */
33677 unsigned long flags;
33678 };
33679@@ -1011,7 +1011,7 @@ struct drbd_conf {
33680 int al_tr_cycle;
33681 int al_tr_pos; /* position of the next transaction in the journal */
33682 wait_queue_head_t seq_wait;
33683- atomic_t packet_seq;
33684+ atomic_unchecked_t packet_seq;
33685 unsigned int peer_seq;
33686 spinlock_t peer_seq_lock;
33687 unsigned int minor;
33688@@ -1527,7 +1527,7 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
33689 char __user *uoptval;
33690 int err;
33691
33692- uoptval = (char __user __force *)optval;
33693+ uoptval = (char __force_user *)optval;
33694
33695 set_fs(KERNEL_DS);
33696 if (level == SOL_SOCKET)
33697diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
33698index 8c13eeb..217adee 100644
33699--- a/drivers/block/drbd/drbd_main.c
33700+++ b/drivers/block/drbd/drbd_main.c
33701@@ -1317,7 +1317,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd,
33702 p->sector = sector;
33703 p->block_id = block_id;
33704 p->blksize = blksize;
33705- p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
33706+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&mdev->packet_seq));
33707 return drbd_send_command(mdev, sock, cmd, sizeof(*p), NULL, 0);
33708 }
33709
33710@@ -1619,7 +1619,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
33711 return -EIO;
33712 p->sector = cpu_to_be64(req->i.sector);
33713 p->block_id = (unsigned long)req;
33714- p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
33715+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&mdev->packet_seq));
33716 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
33717 if (mdev->state.conn >= C_SYNC_SOURCE &&
33718 mdev->state.conn <= C_PAUSED_SYNC_T)
33719@@ -2574,8 +2574,8 @@ void conn_destroy(struct kref *kref)
33720 {
33721 struct drbd_tconn *tconn = container_of(kref, struct drbd_tconn, kref);
33722
33723- if (atomic_read(&tconn->current_epoch->epoch_size) != 0)
33724- conn_err(tconn, "epoch_size:%d\n", atomic_read(&tconn->current_epoch->epoch_size));
33725+ if (atomic_read_unchecked(&tconn->current_epoch->epoch_size) != 0)
33726+ conn_err(tconn, "epoch_size:%d\n", atomic_read_unchecked(&tconn->current_epoch->epoch_size));
33727 kfree(tconn->current_epoch);
33728
33729 idr_destroy(&tconn->volumes);
33730diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
33731index a9eccfc..f5efe87 100644
33732--- a/drivers/block/drbd/drbd_receiver.c
33733+++ b/drivers/block/drbd/drbd_receiver.c
33734@@ -833,7 +833,7 @@ int drbd_connected(struct drbd_conf *mdev)
33735 {
33736 int err;
33737
33738- atomic_set(&mdev->packet_seq, 0);
33739+ atomic_set_unchecked(&mdev->packet_seq, 0);
33740 mdev->peer_seq = 0;
33741
33742 mdev->state_mutex = mdev->tconn->agreed_pro_version < 100 ?
33743@@ -1191,7 +1191,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
33744 do {
33745 next_epoch = NULL;
33746
33747- epoch_size = atomic_read(&epoch->epoch_size);
33748+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
33749
33750 switch (ev & ~EV_CLEANUP) {
33751 case EV_PUT:
33752@@ -1231,7 +1231,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
33753 rv = FE_DESTROYED;
33754 } else {
33755 epoch->flags = 0;
33756- atomic_set(&epoch->epoch_size, 0);
33757+ atomic_set_unchecked(&epoch->epoch_size, 0);
33758 /* atomic_set(&epoch->active, 0); is already zero */
33759 if (rv == FE_STILL_LIVE)
33760 rv = FE_RECYCLED;
33761@@ -1449,7 +1449,7 @@ static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
33762 conn_wait_active_ee_empty(tconn);
33763 drbd_flush(tconn);
33764
33765- if (atomic_read(&tconn->current_epoch->epoch_size)) {
33766+ if (atomic_read_unchecked(&tconn->current_epoch->epoch_size)) {
33767 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
33768 if (epoch)
33769 break;
33770@@ -1462,11 +1462,11 @@ static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
33771 }
33772
33773 epoch->flags = 0;
33774- atomic_set(&epoch->epoch_size, 0);
33775+ atomic_set_unchecked(&epoch->epoch_size, 0);
33776 atomic_set(&epoch->active, 0);
33777
33778 spin_lock(&tconn->epoch_lock);
33779- if (atomic_read(&tconn->current_epoch->epoch_size)) {
33780+ if (atomic_read_unchecked(&tconn->current_epoch->epoch_size)) {
33781 list_add(&epoch->list, &tconn->current_epoch->list);
33782 tconn->current_epoch = epoch;
33783 tconn->epochs++;
33784@@ -2170,7 +2170,7 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
33785
33786 err = wait_for_and_update_peer_seq(mdev, peer_seq);
33787 drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
33788- atomic_inc(&tconn->current_epoch->epoch_size);
33789+ atomic_inc_unchecked(&tconn->current_epoch->epoch_size);
33790 err2 = drbd_drain_block(mdev, pi->size);
33791 if (!err)
33792 err = err2;
33793@@ -2204,7 +2204,7 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
33794
33795 spin_lock(&tconn->epoch_lock);
33796 peer_req->epoch = tconn->current_epoch;
33797- atomic_inc(&peer_req->epoch->epoch_size);
33798+ atomic_inc_unchecked(&peer_req->epoch->epoch_size);
33799 atomic_inc(&peer_req->epoch->active);
33800 spin_unlock(&tconn->epoch_lock);
33801
33802@@ -4346,7 +4346,7 @@ struct data_cmd {
33803 int expect_payload;
33804 size_t pkt_size;
33805 int (*fn)(struct drbd_tconn *, struct packet_info *);
33806-};
33807+} __do_const;
33808
33809 static struct data_cmd drbd_cmd_handler[] = {
33810 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
33811@@ -4466,7 +4466,7 @@ static void conn_disconnect(struct drbd_tconn *tconn)
33812 if (!list_empty(&tconn->current_epoch->list))
33813 conn_err(tconn, "ASSERTION FAILED: tconn->current_epoch->list not empty\n");
33814 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
33815- atomic_set(&tconn->current_epoch->epoch_size, 0);
33816+ atomic_set_unchecked(&tconn->current_epoch->epoch_size, 0);
33817 tconn->send.seen_any_write_yet = false;
33818
33819 conn_info(tconn, "Connection closed\n");
33820@@ -5222,7 +5222,7 @@ static int tconn_finish_peer_reqs(struct drbd_tconn *tconn)
33821 struct asender_cmd {
33822 size_t pkt_size;
33823 int (*fn)(struct drbd_tconn *tconn, struct packet_info *);
33824-};
33825+} __do_const;
33826
33827 static struct asender_cmd asender_tbl[] = {
33828 [P_PING] = { 0, got_Ping },
33829diff --git a/drivers/block/loop.c b/drivers/block/loop.c
33830index f74f2c0..bb668af 100644
33831--- a/drivers/block/loop.c
33832+++ b/drivers/block/loop.c
33833@@ -226,7 +226,7 @@ static int __do_lo_send_write(struct file *file,
33834 mm_segment_t old_fs = get_fs();
33835
33836 set_fs(get_ds());
33837- bw = file->f_op->write(file, buf, len, &pos);
33838+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
33839 set_fs(old_fs);
33840 if (likely(bw == len))
33841 return 0;
33842diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
33843index d620b44..587561e 100644
33844--- a/drivers/cdrom/cdrom.c
33845+++ b/drivers/cdrom/cdrom.c
33846@@ -416,7 +416,6 @@ int register_cdrom(struct cdrom_device_info *cdi)
33847 ENSURE(reset, CDC_RESET);
33848 ENSURE(generic_packet, CDC_GENERIC_PACKET);
33849 cdi->mc_flags = 0;
33850- cdo->n_minors = 0;
33851 cdi->options = CDO_USE_FFLAGS;
33852
33853 if (autoclose==1 && CDROM_CAN(CDC_CLOSE_TRAY))
33854@@ -436,8 +435,11 @@ int register_cdrom(struct cdrom_device_info *cdi)
33855 else
33856 cdi->cdda_method = CDDA_OLD;
33857
33858- if (!cdo->generic_packet)
33859- cdo->generic_packet = cdrom_dummy_generic_packet;
33860+ if (!cdo->generic_packet) {
33861+ pax_open_kernel();
33862+ *(void **)&cdo->generic_packet = cdrom_dummy_generic_packet;
33863+ pax_close_kernel();
33864+ }
33865
33866 cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" registered\n", cdi->name);
33867 mutex_lock(&cdrom_mutex);
33868@@ -458,7 +460,6 @@ void unregister_cdrom(struct cdrom_device_info *cdi)
33869 if (cdi->exit)
33870 cdi->exit(cdi);
33871
33872- cdi->ops->n_minors--;
33873 cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name);
33874 }
33875
33876diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
33877index d59cdcb..11afddf 100644
33878--- a/drivers/cdrom/gdrom.c
33879+++ b/drivers/cdrom/gdrom.c
33880@@ -491,7 +491,6 @@ static struct cdrom_device_ops gdrom_ops = {
33881 .audio_ioctl = gdrom_audio_ioctl,
33882 .capability = CDC_MULTI_SESSION | CDC_MEDIA_CHANGED |
33883 CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R,
33884- .n_minors = 1,
33885 };
33886
33887 static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
33888diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
33889index 72bedad..8181ce1 100644
33890--- a/drivers/char/Kconfig
33891+++ b/drivers/char/Kconfig
33892@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
33893
33894 config DEVKMEM
33895 bool "/dev/kmem virtual device support"
33896- default y
33897+ default n
33898+ depends on !GRKERNSEC_KMEM
33899 help
33900 Say Y here if you want to support the /dev/kmem device. The
33901 /dev/kmem device is rarely used, but can be used for certain
33902@@ -581,6 +582,7 @@ config DEVPORT
33903 bool
33904 depends on !M68K
33905 depends on ISA || PCI
33906+ depends on !GRKERNSEC_KMEM
33907 default y
33908
33909 source "drivers/s390/char/Kconfig"
33910diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
33911index 2e04433..22afc64 100644
33912--- a/drivers/char/agp/frontend.c
33913+++ b/drivers/char/agp/frontend.c
33914@@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
33915 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
33916 return -EFAULT;
33917
33918- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
33919+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
33920 return -EFAULT;
33921
33922 client = agp_find_client_by_pid(reserve.pid);
33923diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
33924index 21cb980..f15107c 100644
33925--- a/drivers/char/genrtc.c
33926+++ b/drivers/char/genrtc.c
33927@@ -272,6 +272,7 @@ static int gen_rtc_ioctl(struct file *file,
33928 switch (cmd) {
33929
33930 case RTC_PLL_GET:
33931+ memset(&pll, 0, sizeof(pll));
33932 if (get_rtc_pll(&pll))
33933 return -EINVAL;
33934 else
33935diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
33936index fe6d4be..89f32100 100644
33937--- a/drivers/char/hpet.c
33938+++ b/drivers/char/hpet.c
33939@@ -571,7 +571,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
33940 }
33941
33942 static int
33943-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
33944+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
33945 struct hpet_info *info)
33946 {
33947 struct hpet_timer __iomem *timer;
33948diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
33949index 053201b0..8335cce 100644
33950--- a/drivers/char/ipmi/ipmi_msghandler.c
33951+++ b/drivers/char/ipmi/ipmi_msghandler.c
33952@@ -420,7 +420,7 @@ struct ipmi_smi {
33953 struct proc_dir_entry *proc_dir;
33954 char proc_dir_name[10];
33955
33956- atomic_t stats[IPMI_NUM_STATS];
33957+ atomic_unchecked_t stats[IPMI_NUM_STATS];
33958
33959 /*
33960 * run_to_completion duplicate of smb_info, smi_info
33961@@ -453,9 +453,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
33962
33963
33964 #define ipmi_inc_stat(intf, stat) \
33965- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
33966+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
33967 #define ipmi_get_stat(intf, stat) \
33968- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
33969+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
33970
33971 static int is_lan_addr(struct ipmi_addr *addr)
33972 {
33973@@ -2884,7 +2884,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
33974 INIT_LIST_HEAD(&intf->cmd_rcvrs);
33975 init_waitqueue_head(&intf->waitq);
33976 for (i = 0; i < IPMI_NUM_STATS; i++)
33977- atomic_set(&intf->stats[i], 0);
33978+ atomic_set_unchecked(&intf->stats[i], 0);
33979
33980 intf->proc_dir = NULL;
33981
33982diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
33983index 1c7fdcd..4899100 100644
33984--- a/drivers/char/ipmi/ipmi_si_intf.c
33985+++ b/drivers/char/ipmi/ipmi_si_intf.c
33986@@ -275,7 +275,7 @@ struct smi_info {
33987 unsigned char slave_addr;
33988
33989 /* Counters and things for the proc filesystem. */
33990- atomic_t stats[SI_NUM_STATS];
33991+ atomic_unchecked_t stats[SI_NUM_STATS];
33992
33993 struct task_struct *thread;
33994
33995@@ -284,9 +284,9 @@ struct smi_info {
33996 };
33997
33998 #define smi_inc_stat(smi, stat) \
33999- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
34000+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
34001 #define smi_get_stat(smi, stat) \
34002- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
34003+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
34004
34005 #define SI_MAX_PARMS 4
34006
34007@@ -3225,7 +3225,7 @@ static int try_smi_init(struct smi_info *new_smi)
34008 atomic_set(&new_smi->req_events, 0);
34009 new_smi->run_to_completion = 0;
34010 for (i = 0; i < SI_NUM_STATS; i++)
34011- atomic_set(&new_smi->stats[i], 0);
34012+ atomic_set_unchecked(&new_smi->stats[i], 0);
34013
34014 new_smi->interrupt_disabled = 1;
34015 atomic_set(&new_smi->stop_operation, 0);
34016diff --git a/drivers/char/mem.c b/drivers/char/mem.c
34017index c6fa3bc..4ca3e42 100644
34018--- a/drivers/char/mem.c
34019+++ b/drivers/char/mem.c
34020@@ -18,6 +18,7 @@
34021 #include <linux/raw.h>
34022 #include <linux/tty.h>
34023 #include <linux/capability.h>
34024+#include <linux/security.h>
34025 #include <linux/ptrace.h>
34026 #include <linux/device.h>
34027 #include <linux/highmem.h>
34028@@ -37,6 +38,10 @@
34029
34030 #define DEVPORT_MINOR 4
34031
34032+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
34033+extern const struct file_operations grsec_fops;
34034+#endif
34035+
34036 static inline unsigned long size_inside_page(unsigned long start,
34037 unsigned long size)
34038 {
34039@@ -68,9 +73,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
34040
34041 while (cursor < to) {
34042 if (!devmem_is_allowed(pfn)) {
34043+#ifdef CONFIG_GRKERNSEC_KMEM
34044+ gr_handle_mem_readwrite(from, to);
34045+#else
34046 printk(KERN_INFO
34047 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
34048 current->comm, from, to);
34049+#endif
34050 return 0;
34051 }
34052 cursor += PAGE_SIZE;
34053@@ -78,6 +87,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
34054 }
34055 return 1;
34056 }
34057+#elif defined(CONFIG_GRKERNSEC_KMEM)
34058+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
34059+{
34060+ return 0;
34061+}
34062 #else
34063 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
34064 {
34065@@ -120,6 +134,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
34066
34067 while (count > 0) {
34068 unsigned long remaining;
34069+ char *temp;
34070
34071 sz = size_inside_page(p, count);
34072
34073@@ -135,7 +150,23 @@ static ssize_t read_mem(struct file *file, char __user *buf,
34074 if (!ptr)
34075 return -EFAULT;
34076
34077- remaining = copy_to_user(buf, ptr, sz);
34078+#ifdef CONFIG_PAX_USERCOPY
34079+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
34080+ if (!temp) {
34081+ unxlate_dev_mem_ptr(p, ptr);
34082+ return -ENOMEM;
34083+ }
34084+ memcpy(temp, ptr, sz);
34085+#else
34086+ temp = ptr;
34087+#endif
34088+
34089+ remaining = copy_to_user(buf, temp, sz);
34090+
34091+#ifdef CONFIG_PAX_USERCOPY
34092+ kfree(temp);
34093+#endif
34094+
34095 unxlate_dev_mem_ptr(p, ptr);
34096 if (remaining)
34097 return -EFAULT;
34098@@ -398,9 +429,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
34099 size_t count, loff_t *ppos)
34100 {
34101 unsigned long p = *ppos;
34102- ssize_t low_count, read, sz;
34103+ ssize_t low_count, read, sz, err = 0;
34104 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
34105- int err = 0;
34106
34107 read = 0;
34108 if (p < (unsigned long) high_memory) {
34109@@ -422,6 +452,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
34110 }
34111 #endif
34112 while (low_count > 0) {
34113+ char *temp;
34114+
34115 sz = size_inside_page(p, low_count);
34116
34117 /*
34118@@ -431,7 +463,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
34119 */
34120 kbuf = xlate_dev_kmem_ptr((char *)p);
34121
34122- if (copy_to_user(buf, kbuf, sz))
34123+#ifdef CONFIG_PAX_USERCOPY
34124+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
34125+ if (!temp)
34126+ return -ENOMEM;
34127+ memcpy(temp, kbuf, sz);
34128+#else
34129+ temp = kbuf;
34130+#endif
34131+
34132+ err = copy_to_user(buf, temp, sz);
34133+
34134+#ifdef CONFIG_PAX_USERCOPY
34135+ kfree(temp);
34136+#endif
34137+
34138+ if (err)
34139 return -EFAULT;
34140 buf += sz;
34141 p += sz;
34142@@ -833,6 +880,9 @@ static const struct memdev {
34143 #ifdef CONFIG_CRASH_DUMP
34144 [12] = { "oldmem", 0, &oldmem_fops, NULL },
34145 #endif
34146+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
34147+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
34148+#endif
34149 };
34150
34151 static int memory_open(struct inode *inode, struct file *filp)
34152diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
34153index 9df78e2..01ba9ae 100644
34154--- a/drivers/char/nvram.c
34155+++ b/drivers/char/nvram.c
34156@@ -247,7 +247,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
34157
34158 spin_unlock_irq(&rtc_lock);
34159
34160- if (copy_to_user(buf, contents, tmp - contents))
34161+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
34162 return -EFAULT;
34163
34164 *ppos = i;
34165diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
34166index b66eaa0..2619d1b 100644
34167--- a/drivers/char/pcmcia/synclink_cs.c
34168+++ b/drivers/char/pcmcia/synclink_cs.c
34169@@ -2348,9 +2348,9 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
34170
34171 if (debug_level >= DEBUG_LEVEL_INFO)
34172 printk("%s(%d):mgslpc_close(%s) entry, count=%d\n",
34173- __FILE__,__LINE__, info->device_name, port->count);
34174+ __FILE__,__LINE__, info->device_name, atomic_read(&port->count));
34175
34176- WARN_ON(!port->count);
34177+ WARN_ON(!atomic_read(&port->count));
34178
34179 if (tty_port_close_start(port, tty, filp) == 0)
34180 goto cleanup;
34181@@ -2368,7 +2368,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
34182 cleanup:
34183 if (debug_level >= DEBUG_LEVEL_INFO)
34184 printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__,__LINE__,
34185- tty->driver->name, port->count);
34186+ tty->driver->name, atomic_read(&port->count));
34187 }
34188
34189 /* Wait until the transmitter is empty.
34190@@ -2510,7 +2510,7 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
34191
34192 if (debug_level >= DEBUG_LEVEL_INFO)
34193 printk("%s(%d):mgslpc_open(%s), old ref count = %d\n",
34194- __FILE__,__LINE__,tty->driver->name, port->count);
34195+ __FILE__,__LINE__,tty->driver->name, atomic_read(&port->count));
34196
34197 /* If port is closing, signal caller to try again */
34198 if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING){
34199@@ -2530,11 +2530,11 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
34200 goto cleanup;
34201 }
34202 spin_lock(&port->lock);
34203- port->count++;
34204+ atomic_inc(&port->count);
34205 spin_unlock(&port->lock);
34206 spin_unlock_irqrestore(&info->netlock, flags);
34207
34208- if (port->count == 1) {
34209+ if (atomic_read(&port->count) == 1) {
34210 /* 1st open on this device, init hardware */
34211 retval = startup(info, tty);
34212 if (retval < 0)
34213@@ -3889,7 +3889,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
34214 unsigned short new_crctype;
34215
34216 /* return error if TTY interface open */
34217- if (info->port.count)
34218+ if (atomic_read(&info->port.count))
34219 return -EBUSY;
34220
34221 switch (encoding)
34222@@ -3992,7 +3992,7 @@ static int hdlcdev_open(struct net_device *dev)
34223
34224 /* arbitrate between network and tty opens */
34225 spin_lock_irqsave(&info->netlock, flags);
34226- if (info->port.count != 0 || info->netcount != 0) {
34227+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
34228 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
34229 spin_unlock_irqrestore(&info->netlock, flags);
34230 return -EBUSY;
34231@@ -4081,7 +4081,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
34232 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
34233
34234 /* return error if TTY interface open */
34235- if (info->port.count)
34236+ if (atomic_read(&info->port.count))
34237 return -EBUSY;
34238
34239 if (cmd != SIOCWANDEV)
34240diff --git a/drivers/char/random.c b/drivers/char/random.c
34241index 57d4b15..253207b 100644
34242--- a/drivers/char/random.c
34243+++ b/drivers/char/random.c
34244@@ -272,8 +272,13 @@
34245 /*
34246 * Configuration information
34247 */
34248+#ifdef CONFIG_GRKERNSEC_RANDNET
34249+#define INPUT_POOL_WORDS 512
34250+#define OUTPUT_POOL_WORDS 128
34251+#else
34252 #define INPUT_POOL_WORDS 128
34253 #define OUTPUT_POOL_WORDS 32
34254+#endif
34255 #define SEC_XFER_SIZE 512
34256 #define EXTRACT_SIZE 10
34257
34258@@ -313,10 +318,17 @@ static struct poolinfo {
34259 int poolwords;
34260 int tap1, tap2, tap3, tap4, tap5;
34261 } poolinfo_table[] = {
34262+#ifdef CONFIG_GRKERNSEC_RANDNET
34263+ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
34264+ { 512, 411, 308, 208, 104, 1 },
34265+ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
34266+ { 128, 103, 76, 51, 25, 1 },
34267+#else
34268 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
34269 { 128, 103, 76, 51, 25, 1 },
34270 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
34271 { 32, 26, 20, 14, 7, 1 },
34272+#endif
34273 #if 0
34274 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
34275 { 2048, 1638, 1231, 819, 411, 1 },
34276@@ -524,8 +536,8 @@ static void _mix_pool_bytes(struct entropy_store *r, const void *in,
34277 input_rotate += i ? 7 : 14;
34278 }
34279
34280- ACCESS_ONCE(r->input_rotate) = input_rotate;
34281- ACCESS_ONCE(r->add_ptr) = i;
34282+ ACCESS_ONCE_RW(r->input_rotate) = input_rotate;
34283+ ACCESS_ONCE_RW(r->add_ptr) = i;
34284 smp_wmb();
34285
34286 if (out)
34287@@ -1024,7 +1036,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
34288
34289 extract_buf(r, tmp);
34290 i = min_t(int, nbytes, EXTRACT_SIZE);
34291- if (copy_to_user(buf, tmp, i)) {
34292+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
34293 ret = -EFAULT;
34294 break;
34295 }
34296@@ -1360,7 +1372,7 @@ EXPORT_SYMBOL(generate_random_uuid);
34297 #include <linux/sysctl.h>
34298
34299 static int min_read_thresh = 8, min_write_thresh;
34300-static int max_read_thresh = INPUT_POOL_WORDS * 32;
34301+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
34302 static int max_write_thresh = INPUT_POOL_WORDS * 32;
34303 static char sysctl_bootid[16];
34304
34305@@ -1376,7 +1388,7 @@ static char sysctl_bootid[16];
34306 static int proc_do_uuid(ctl_table *table, int write,
34307 void __user *buffer, size_t *lenp, loff_t *ppos)
34308 {
34309- ctl_table fake_table;
34310+ ctl_table_no_const fake_table;
34311 unsigned char buf[64], tmp_uuid[16], *uuid;
34312
34313 uuid = table->data;
34314diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
34315index d780295..b29f3a8 100644
34316--- a/drivers/char/sonypi.c
34317+++ b/drivers/char/sonypi.c
34318@@ -54,6 +54,7 @@
34319
34320 #include <asm/uaccess.h>
34321 #include <asm/io.h>
34322+#include <asm/local.h>
34323
34324 #include <linux/sonypi.h>
34325
34326@@ -490,7 +491,7 @@ static struct sonypi_device {
34327 spinlock_t fifo_lock;
34328 wait_queue_head_t fifo_proc_list;
34329 struct fasync_struct *fifo_async;
34330- int open_count;
34331+ local_t open_count;
34332 int model;
34333 struct input_dev *input_jog_dev;
34334 struct input_dev *input_key_dev;
34335@@ -897,7 +898,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
34336 static int sonypi_misc_release(struct inode *inode, struct file *file)
34337 {
34338 mutex_lock(&sonypi_device.lock);
34339- sonypi_device.open_count--;
34340+ local_dec(&sonypi_device.open_count);
34341 mutex_unlock(&sonypi_device.lock);
34342 return 0;
34343 }
34344@@ -906,9 +907,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
34345 {
34346 mutex_lock(&sonypi_device.lock);
34347 /* Flush input queue on first open */
34348- if (!sonypi_device.open_count)
34349+ if (!local_read(&sonypi_device.open_count))
34350 kfifo_reset(&sonypi_device.fifo);
34351- sonypi_device.open_count++;
34352+ local_inc(&sonypi_device.open_count);
34353 mutex_unlock(&sonypi_device.lock);
34354
34355 return 0;
34356diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
34357index 93211df..c7805f7 100644
34358--- a/drivers/char/tpm/tpm.c
34359+++ b/drivers/char/tpm/tpm.c
34360@@ -410,7 +410,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
34361 chip->vendor.req_complete_val)
34362 goto out_recv;
34363
34364- if ((status == chip->vendor.req_canceled)) {
34365+ if (status == chip->vendor.req_canceled) {
34366 dev_err(chip->dev, "Operation Canceled\n");
34367 rc = -ECANCELED;
34368 goto out;
34369diff --git a/drivers/char/tpm/tpm_acpi.c b/drivers/char/tpm/tpm_acpi.c
34370index 56051d0..11cf3b7 100644
34371--- a/drivers/char/tpm/tpm_acpi.c
34372+++ b/drivers/char/tpm/tpm_acpi.c
34373@@ -98,11 +98,12 @@ int read_log(struct tpm_bios_log *log)
34374 virt = acpi_os_map_memory(start, len);
34375 if (!virt) {
34376 kfree(log->bios_event_log);
34377+ log->bios_event_log = NULL;
34378 printk("%s: ERROR - Unable to map memory\n", __func__);
34379 return -EIO;
34380 }
34381
34382- memcpy_fromio(log->bios_event_log, virt, len);
34383+ memcpy_fromio(log->bios_event_log, (const char __force_kernel *)virt, len);
34384
34385 acpi_os_unmap_memory(virt, len);
34386 return 0;
34387diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c
34388index 84ddc55..1d32f1e 100644
34389--- a/drivers/char/tpm/tpm_eventlog.c
34390+++ b/drivers/char/tpm/tpm_eventlog.c
34391@@ -95,7 +95,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
34392 event = addr;
34393
34394 if ((event->event_type == 0 && event->event_size == 0) ||
34395- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
34396+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
34397 return NULL;
34398
34399 return addr;
34400@@ -120,7 +120,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
34401 return NULL;
34402
34403 if ((event->event_type == 0 && event->event_size == 0) ||
34404- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
34405+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
34406 return NULL;
34407
34408 (*pos)++;
34409@@ -213,7 +213,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
34410 int i;
34411
34412 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
34413- seq_putc(m, data[i]);
34414+ if (!seq_putc(m, data[i]))
34415+ return -EFAULT;
34416
34417 return 0;
34418 }
34419diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
34420index a4b7aa0..2faa0bc 100644
34421--- a/drivers/char/virtio_console.c
34422+++ b/drivers/char/virtio_console.c
34423@@ -685,7 +685,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
34424 if (to_user) {
34425 ssize_t ret;
34426
34427- ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
34428+ ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
34429 if (ret)
34430 return -EFAULT;
34431 } else {
34432@@ -784,7 +784,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
34433 if (!port_has_data(port) && !port->host_connected)
34434 return 0;
34435
34436- return fill_readbuf(port, ubuf, count, true);
34437+ return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
34438 }
34439
34440 static int wait_port_writable(struct port *port, bool nonblock)
34441diff --git a/drivers/clocksource/arm_generic.c b/drivers/clocksource/arm_generic.c
34442index 8ae1a61..9c00613 100644
34443--- a/drivers/clocksource/arm_generic.c
34444+++ b/drivers/clocksource/arm_generic.c
34445@@ -181,7 +181,7 @@ static int __cpuinit arch_timer_cpu_notify(struct notifier_block *self,
34446 return NOTIFY_OK;
34447 }
34448
34449-static struct notifier_block __cpuinitdata arch_timer_cpu_nb = {
34450+static struct notifier_block arch_timer_cpu_nb = {
34451 .notifier_call = arch_timer_cpu_notify,
34452 };
34453
34454diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
34455index 7b0d49d..134fac9 100644
34456--- a/drivers/cpufreq/acpi-cpufreq.c
34457+++ b/drivers/cpufreq/acpi-cpufreq.c
34458@@ -172,7 +172,7 @@ static ssize_t show_global_boost(struct kobject *kobj,
34459 return sprintf(buf, "%u\n", boost_enabled);
34460 }
34461
34462-static struct global_attr global_boost = __ATTR(boost, 0644,
34463+static global_attr_no_const global_boost = __ATTR(boost, 0644,
34464 show_global_boost,
34465 store_global_boost);
34466
34467@@ -712,8 +712,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
34468 data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
34469 per_cpu(acfreq_data, cpu) = data;
34470
34471- if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
34472- acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
34473+ if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
34474+ pax_open_kernel();
34475+ *(u8 *)&acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
34476+ pax_close_kernel();
34477+ }
34478
34479 result = acpi_processor_register_performance(data->acpi_data, cpu);
34480 if (result)
34481@@ -835,7 +838,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
34482 policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
34483 break;
34484 case ACPI_ADR_SPACE_FIXED_HARDWARE:
34485- acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
34486+ pax_open_kernel();
34487+ *(void **)&acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
34488+ pax_close_kernel();
34489 policy->cur = get_cur_freq_on_cpu(cpu);
34490 break;
34491 default:
34492@@ -846,8 +851,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
34493 acpi_processor_notify_smm(THIS_MODULE);
34494
34495 /* Check for APERF/MPERF support in hardware */
34496- if (boot_cpu_has(X86_FEATURE_APERFMPERF))
34497- acpi_cpufreq_driver.getavg = cpufreq_get_measured_perf;
34498+ if (boot_cpu_has(X86_FEATURE_APERFMPERF)) {
34499+ pax_open_kernel();
34500+ *(void **)&acpi_cpufreq_driver.getavg = cpufreq_get_measured_perf;
34501+ pax_close_kernel();
34502+ }
34503
34504 pr_debug("CPU%u - ACPI performance management activated.\n", cpu);
34505 for (i = 0; i < perf->state_count; i++)
34506diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
34507index 1f93dbd..305cef1 100644
34508--- a/drivers/cpufreq/cpufreq.c
34509+++ b/drivers/cpufreq/cpufreq.c
34510@@ -1843,7 +1843,7 @@ static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
34511 return NOTIFY_OK;
34512 }
34513
34514-static struct notifier_block __refdata cpufreq_cpu_notifier = {
34515+static struct notifier_block cpufreq_cpu_notifier = {
34516 .notifier_call = cpufreq_cpu_callback,
34517 };
34518
34519@@ -1875,8 +1875,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
34520
34521 pr_debug("trying to register driver %s\n", driver_data->name);
34522
34523- if (driver_data->setpolicy)
34524- driver_data->flags |= CPUFREQ_CONST_LOOPS;
34525+ if (driver_data->setpolicy) {
34526+ pax_open_kernel();
34527+ *(u8 *)&driver_data->flags |= CPUFREQ_CONST_LOOPS;
34528+ pax_close_kernel();
34529+ }
34530
34531 spin_lock_irqsave(&cpufreq_driver_lock, flags);
34532 if (cpufreq_driver) {
34533diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
34534index 6c5f1d3..c7e2f35e 100644
34535--- a/drivers/cpufreq/cpufreq_governor.c
34536+++ b/drivers/cpufreq/cpufreq_governor.c
34537@@ -243,7 +243,7 @@ int cpufreq_governor_dbs(struct dbs_data *dbs_data,
34538 * governor, thus we are bound to jiffes/HZ
34539 */
34540 if (dbs_data->governor == GOV_CONSERVATIVE) {
34541- struct cs_ops *ops = dbs_data->gov_ops;
34542+ const struct cs_ops *ops = dbs_data->gov_ops;
34543
34544 cpufreq_register_notifier(ops->notifier_block,
34545 CPUFREQ_TRANSITION_NOTIFIER);
34546@@ -251,7 +251,7 @@ int cpufreq_governor_dbs(struct dbs_data *dbs_data,
34547 dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
34548 jiffies_to_usecs(10);
34549 } else {
34550- struct od_ops *ops = dbs_data->gov_ops;
34551+ const struct od_ops *ops = dbs_data->gov_ops;
34552
34553 od_tuners->io_is_busy = ops->io_busy();
34554 }
34555@@ -268,7 +268,7 @@ second_time:
34556 cs_dbs_info->enable = 1;
34557 cs_dbs_info->requested_freq = policy->cur;
34558 } else {
34559- struct od_ops *ops = dbs_data->gov_ops;
34560+ const struct od_ops *ops = dbs_data->gov_ops;
34561 od_dbs_info->rate_mult = 1;
34562 od_dbs_info->sample_type = OD_NORMAL_SAMPLE;
34563 ops->powersave_bias_init_cpu(cpu);
34564@@ -289,7 +289,7 @@ second_time:
34565 mutex_destroy(&cpu_cdbs->timer_mutex);
34566 dbs_data->enable--;
34567 if (!dbs_data->enable) {
34568- struct cs_ops *ops = dbs_data->gov_ops;
34569+ const struct cs_ops *ops = dbs_data->gov_ops;
34570
34571 sysfs_remove_group(cpufreq_global_kobject,
34572 dbs_data->attr_group);
34573diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
34574index f661654..6c8e638 100644
34575--- a/drivers/cpufreq/cpufreq_governor.h
34576+++ b/drivers/cpufreq/cpufreq_governor.h
34577@@ -142,7 +142,7 @@ struct dbs_data {
34578 void (*gov_check_cpu)(int cpu, unsigned int load);
34579
34580 /* Governor specific ops, see below */
34581- void *gov_ops;
34582+ const void *gov_ops;
34583 };
34584
34585 /* Governor specific ops, will be passed to dbs_data->gov_ops */
34586diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
34587index 9d7732b..0b1a793 100644
34588--- a/drivers/cpufreq/cpufreq_stats.c
34589+++ b/drivers/cpufreq/cpufreq_stats.c
34590@@ -340,7 +340,7 @@ static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb,
34591 }
34592
34593 /* priority=1 so this will get called before cpufreq_remove_dev */
34594-static struct notifier_block cpufreq_stat_cpu_notifier __refdata = {
34595+static struct notifier_block cpufreq_stat_cpu_notifier = {
34596 .notifier_call = cpufreq_stat_cpu_callback,
34597 .priority = 1,
34598 };
34599diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
34600index 827629c9..0bc6a03 100644
34601--- a/drivers/cpufreq/p4-clockmod.c
34602+++ b/drivers/cpufreq/p4-clockmod.c
34603@@ -167,10 +167,14 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
34604 case 0x0F: /* Core Duo */
34605 case 0x16: /* Celeron Core */
34606 case 0x1C: /* Atom */
34607- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
34608+ pax_open_kernel();
34609+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
34610+ pax_close_kernel();
34611 return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE);
34612 case 0x0D: /* Pentium M (Dothan) */
34613- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
34614+ pax_open_kernel();
34615+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
34616+ pax_close_kernel();
34617 /* fall through */
34618 case 0x09: /* Pentium M (Banias) */
34619 return speedstep_get_frequency(SPEEDSTEP_CPU_PM);
34620@@ -182,7 +186,9 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
34621
34622 /* on P-4s, the TSC runs with constant frequency independent whether
34623 * throttling is active or not. */
34624- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
34625+ pax_open_kernel();
34626+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
34627+ pax_close_kernel();
34628
34629 if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4M) {
34630 printk(KERN_WARNING PFX "Warning: Pentium 4-M detected. "
34631diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
34632index 3a953d5..f5993f6 100644
34633--- a/drivers/cpufreq/speedstep-centrino.c
34634+++ b/drivers/cpufreq/speedstep-centrino.c
34635@@ -353,8 +353,11 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
34636 !cpu_has(cpu, X86_FEATURE_EST))
34637 return -ENODEV;
34638
34639- if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC))
34640- centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
34641+ if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC)) {
34642+ pax_open_kernel();
34643+ *(u8 *)&centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
34644+ pax_close_kernel();
34645+ }
34646
34647 if (policy->cpu != 0)
34648 return -ENODEV;
34649diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
34650index e1f6860..f8de20b 100644
34651--- a/drivers/cpuidle/cpuidle.c
34652+++ b/drivers/cpuidle/cpuidle.c
34653@@ -279,7 +279,7 @@ static int poll_idle(struct cpuidle_device *dev,
34654
34655 static void poll_idle_init(struct cpuidle_driver *drv)
34656 {
34657- struct cpuidle_state *state = &drv->states[0];
34658+ cpuidle_state_no_const *state = &drv->states[0];
34659
34660 snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
34661 snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
34662diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
34663index ea2f8e7..70ac501 100644
34664--- a/drivers/cpuidle/governor.c
34665+++ b/drivers/cpuidle/governor.c
34666@@ -87,7 +87,7 @@ int cpuidle_register_governor(struct cpuidle_governor *gov)
34667 mutex_lock(&cpuidle_lock);
34668 if (__cpuidle_find_governor(gov->name) == NULL) {
34669 ret = 0;
34670- list_add_tail(&gov->governor_list, &cpuidle_governors);
34671+ pax_list_add_tail((struct list_head *)&gov->governor_list, &cpuidle_governors);
34672 if (!cpuidle_curr_governor ||
34673 cpuidle_curr_governor->rating < gov->rating)
34674 cpuidle_switch_governor(gov);
34675@@ -135,7 +135,7 @@ void cpuidle_unregister_governor(struct cpuidle_governor *gov)
34676 new_gov = cpuidle_replace_governor(gov->rating);
34677 cpuidle_switch_governor(new_gov);
34678 }
34679- list_del(&gov->governor_list);
34680+ pax_list_del((struct list_head *)&gov->governor_list);
34681 mutex_unlock(&cpuidle_lock);
34682 }
34683
34684diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
34685index 428754a..8bdf9cc 100644
34686--- a/drivers/cpuidle/sysfs.c
34687+++ b/drivers/cpuidle/sysfs.c
34688@@ -131,7 +131,7 @@ static struct attribute *cpuidle_switch_attrs[] = {
34689 NULL
34690 };
34691
34692-static struct attribute_group cpuidle_attr_group = {
34693+static attribute_group_no_const cpuidle_attr_group = {
34694 .attrs = cpuidle_default_attrs,
34695 .name = "cpuidle",
34696 };
34697diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
34698index 3b36797..289c16a 100644
34699--- a/drivers/devfreq/devfreq.c
34700+++ b/drivers/devfreq/devfreq.c
34701@@ -588,7 +588,7 @@ int devfreq_add_governor(struct devfreq_governor *governor)
34702 goto err_out;
34703 }
34704
34705- list_add(&governor->node, &devfreq_governor_list);
34706+ pax_list_add((struct list_head *)&governor->node, &devfreq_governor_list);
34707
34708 list_for_each_entry(devfreq, &devfreq_list, node) {
34709 int ret = 0;
34710@@ -676,7 +676,7 @@ int devfreq_remove_governor(struct devfreq_governor *governor)
34711 }
34712 }
34713
34714- list_del(&governor->node);
34715+ pax_list_del((struct list_head *)&governor->node);
34716 err_out:
34717 mutex_unlock(&devfreq_list_lock);
34718
34719diff --git a/drivers/dma/sh/shdma.c b/drivers/dma/sh/shdma.c
34720index b70709b..1d8d02a 100644
34721--- a/drivers/dma/sh/shdma.c
34722+++ b/drivers/dma/sh/shdma.c
34723@@ -476,7 +476,7 @@ static int sh_dmae_nmi_handler(struct notifier_block *self,
34724 return ret;
34725 }
34726
34727-static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
34728+static struct notifier_block sh_dmae_nmi_notifier = {
34729 .notifier_call = sh_dmae_nmi_handler,
34730
34731 /* Run before NMI debug handler and KGDB */
34732diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
34733index 0ca1ca7..6e6f454 100644
34734--- a/drivers/edac/edac_mc_sysfs.c
34735+++ b/drivers/edac/edac_mc_sysfs.c
34736@@ -148,7 +148,7 @@ static const char *edac_caps[] = {
34737 struct dev_ch_attribute {
34738 struct device_attribute attr;
34739 int channel;
34740-};
34741+} __do_const;
34742
34743 #define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \
34744 struct dev_ch_attribute dev_attr_legacy_##_name = \
34745diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
34746index 0056c4d..23b54d9 100644
34747--- a/drivers/edac/edac_pci_sysfs.c
34748+++ b/drivers/edac/edac_pci_sysfs.c
34749@@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
34750 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
34751 static int edac_pci_poll_msec = 1000; /* one second workq period */
34752
34753-static atomic_t pci_parity_count = ATOMIC_INIT(0);
34754-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
34755+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
34756+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
34757
34758 static struct kobject *edac_pci_top_main_kobj;
34759 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
34760@@ -235,7 +235,7 @@ struct edac_pci_dev_attribute {
34761 void *value;
34762 ssize_t(*show) (void *, char *);
34763 ssize_t(*store) (void *, const char *, size_t);
34764-};
34765+} __do_const;
34766
34767 /* Set of show/store abstract level functions for PCI Parity object */
34768 static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr,
34769@@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
34770 edac_printk(KERN_CRIT, EDAC_PCI,
34771 "Signaled System Error on %s\n",
34772 pci_name(dev));
34773- atomic_inc(&pci_nonparity_count);
34774+ atomic_inc_unchecked(&pci_nonparity_count);
34775 }
34776
34777 if (status & (PCI_STATUS_PARITY)) {
34778@@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
34779 "Master Data Parity Error on %s\n",
34780 pci_name(dev));
34781
34782- atomic_inc(&pci_parity_count);
34783+ atomic_inc_unchecked(&pci_parity_count);
34784 }
34785
34786 if (status & (PCI_STATUS_DETECTED_PARITY)) {
34787@@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
34788 "Detected Parity Error on %s\n",
34789 pci_name(dev));
34790
34791- atomic_inc(&pci_parity_count);
34792+ atomic_inc_unchecked(&pci_parity_count);
34793 }
34794 }
34795
34796@@ -618,7 +618,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
34797 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
34798 "Signaled System Error on %s\n",
34799 pci_name(dev));
34800- atomic_inc(&pci_nonparity_count);
34801+ atomic_inc_unchecked(&pci_nonparity_count);
34802 }
34803
34804 if (status & (PCI_STATUS_PARITY)) {
34805@@ -626,7 +626,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
34806 "Master Data Parity Error on "
34807 "%s\n", pci_name(dev));
34808
34809- atomic_inc(&pci_parity_count);
34810+ atomic_inc_unchecked(&pci_parity_count);
34811 }
34812
34813 if (status & (PCI_STATUS_DETECTED_PARITY)) {
34814@@ -634,7 +634,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
34815 "Detected Parity Error on %s\n",
34816 pci_name(dev));
34817
34818- atomic_inc(&pci_parity_count);
34819+ atomic_inc_unchecked(&pci_parity_count);
34820 }
34821 }
34822 }
34823@@ -672,7 +672,7 @@ void edac_pci_do_parity_check(void)
34824 if (!check_pci_errors)
34825 return;
34826
34827- before_count = atomic_read(&pci_parity_count);
34828+ before_count = atomic_read_unchecked(&pci_parity_count);
34829
34830 /* scan all PCI devices looking for a Parity Error on devices and
34831 * bridges.
34832@@ -684,7 +684,7 @@ void edac_pci_do_parity_check(void)
34833 /* Only if operator has selected panic on PCI Error */
34834 if (edac_pci_get_panic_on_pe()) {
34835 /* If the count is different 'after' from 'before' */
34836- if (before_count != atomic_read(&pci_parity_count))
34837+ if (before_count != atomic_read_unchecked(&pci_parity_count))
34838 panic("EDAC: PCI Parity Error");
34839 }
34840 }
34841diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
34842index 6796799..99e8377 100644
34843--- a/drivers/edac/mce_amd.h
34844+++ b/drivers/edac/mce_amd.h
34845@@ -78,7 +78,7 @@ extern const char * const ii_msgs[];
34846 struct amd_decoder_ops {
34847 bool (*mc0_mce)(u16, u8);
34848 bool (*mc1_mce)(u16, u8);
34849-};
34850+} __no_const;
34851
34852 void amd_report_gart_errors(bool);
34853 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
34854diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
34855index 57ea7f4..789e3c3 100644
34856--- a/drivers/firewire/core-card.c
34857+++ b/drivers/firewire/core-card.c
34858@@ -680,7 +680,7 @@ EXPORT_SYMBOL_GPL(fw_card_release);
34859
34860 void fw_core_remove_card(struct fw_card *card)
34861 {
34862- struct fw_card_driver dummy_driver = dummy_driver_template;
34863+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
34864
34865 card->driver->update_phy_reg(card, 4,
34866 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
34867diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
34868index f8d2287..5aaf4db 100644
34869--- a/drivers/firewire/core-cdev.c
34870+++ b/drivers/firewire/core-cdev.c
34871@@ -1365,8 +1365,7 @@ static int init_iso_resource(struct client *client,
34872 int ret;
34873
34874 if ((request->channels == 0 && request->bandwidth == 0) ||
34875- request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
34876- request->bandwidth < 0)
34877+ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
34878 return -EINVAL;
34879
34880 r = kmalloc(sizeof(*r), GFP_KERNEL);
34881diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
34882index af3e8aa..eb2f227 100644
34883--- a/drivers/firewire/core-device.c
34884+++ b/drivers/firewire/core-device.c
34885@@ -232,7 +232,7 @@ EXPORT_SYMBOL(fw_device_enable_phys_dma);
34886 struct config_rom_attribute {
34887 struct device_attribute attr;
34888 u32 key;
34889-};
34890+} __do_const;
34891
34892 static ssize_t show_immediate(struct device *dev,
34893 struct device_attribute *dattr, char *buf)
34894diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
34895index 28a94c7..58da63a 100644
34896--- a/drivers/firewire/core-transaction.c
34897+++ b/drivers/firewire/core-transaction.c
34898@@ -38,6 +38,7 @@
34899 #include <linux/timer.h>
34900 #include <linux/types.h>
34901 #include <linux/workqueue.h>
34902+#include <linux/sched.h>
34903
34904 #include <asm/byteorder.h>
34905
34906diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
34907index 515a42c..5ecf3ba 100644
34908--- a/drivers/firewire/core.h
34909+++ b/drivers/firewire/core.h
34910@@ -111,6 +111,7 @@ struct fw_card_driver {
34911
34912 int (*stop_iso)(struct fw_iso_context *ctx);
34913 };
34914+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
34915
34916 void fw_card_initialize(struct fw_card *card,
34917 const struct fw_card_driver *driver, struct device *device);
34918diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c
34919index 94a58a0..f5eba42 100644
34920--- a/drivers/firmware/dmi-id.c
34921+++ b/drivers/firmware/dmi-id.c
34922@@ -16,7 +16,7 @@
34923 struct dmi_device_attribute{
34924 struct device_attribute dev_attr;
34925 int field;
34926-};
34927+} __do_const;
34928 #define to_dmi_dev_attr(_dev_attr) \
34929 container_of(_dev_attr, struct dmi_device_attribute, dev_attr)
34930
34931diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
34932index 4cd392d..4b629e1 100644
34933--- a/drivers/firmware/dmi_scan.c
34934+++ b/drivers/firmware/dmi_scan.c
34935@@ -490,11 +490,6 @@ void __init dmi_scan_machine(void)
34936 }
34937 }
34938 else {
34939- /*
34940- * no iounmap() for that ioremap(); it would be a no-op, but
34941- * it's so early in setup that sucker gets confused into doing
34942- * what it shouldn't if we actually call it.
34943- */
34944 p = dmi_ioremap(0xF0000, 0x10000);
34945 if (p == NULL)
34946 goto error;
34947@@ -769,7 +764,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
34948 if (buf == NULL)
34949 return -1;
34950
34951- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
34952+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
34953
34954 iounmap(buf);
34955 return 0;
34956diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
34957index b07cb37..2a51037 100644
34958--- a/drivers/firmware/efivars.c
34959+++ b/drivers/firmware/efivars.c
34960@@ -138,7 +138,7 @@ struct efivar_attribute {
34961 };
34962
34963 static struct efivars __efivars;
34964-static struct efivar_operations ops;
34965+static efivar_operations_no_const ops __read_only;
34966
34967 #define PSTORE_EFI_ATTRIBUTES \
34968 (EFI_VARIABLE_NON_VOLATILE | \
34969@@ -1834,7 +1834,7 @@ efivar_create_sysfs_entry(struct efivars *efivars,
34970 static int
34971 create_efivars_bin_attributes(struct efivars *efivars)
34972 {
34973- struct bin_attribute *attr;
34974+ bin_attribute_no_const *attr;
34975 int error;
34976
34977 /* new_var */
34978diff --git a/drivers/firmware/google/memconsole.c b/drivers/firmware/google/memconsole.c
34979index 2a90ba6..07f3733 100644
34980--- a/drivers/firmware/google/memconsole.c
34981+++ b/drivers/firmware/google/memconsole.c
34982@@ -147,7 +147,9 @@ static int __init memconsole_init(void)
34983 if (!found_memconsole())
34984 return -ENODEV;
34985
34986- memconsole_bin_attr.size = memconsole_length;
34987+ pax_open_kernel();
34988+ *(size_t *)&memconsole_bin_attr.size = memconsole_length;
34989+ pax_close_kernel();
34990
34991 ret = sysfs_create_bin_file(firmware_kobj, &memconsole_bin_attr);
34992
34993diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
34994index 6f2306d..af9476a 100644
34995--- a/drivers/gpio/gpio-ich.c
34996+++ b/drivers/gpio/gpio-ich.c
34997@@ -69,7 +69,7 @@ struct ichx_desc {
34998 /* Some chipsets have quirks, let these use their own request/get */
34999 int (*request)(struct gpio_chip *chip, unsigned offset);
35000 int (*get)(struct gpio_chip *chip, unsigned offset);
35001-};
35002+} __do_const;
35003
35004 static struct {
35005 spinlock_t lock;
35006diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
35007index 9902732..64b62dd 100644
35008--- a/drivers/gpio/gpio-vr41xx.c
35009+++ b/drivers/gpio/gpio-vr41xx.c
35010@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
35011 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
35012 maskl, pendl, maskh, pendh);
35013
35014- atomic_inc(&irq_err_count);
35015+ atomic_inc_unchecked(&irq_err_count);
35016
35017 return -EINVAL;
35018 }
35019diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
35020index 7b2d378..cc947ea 100644
35021--- a/drivers/gpu/drm/drm_crtc_helper.c
35022+++ b/drivers/gpu/drm/drm_crtc_helper.c
35023@@ -319,7 +319,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
35024 struct drm_crtc *tmp;
35025 int crtc_mask = 1;
35026
35027- WARN(!crtc, "checking null crtc?\n");
35028+ BUG_ON(!crtc);
35029
35030 dev = crtc->dev;
35031
35032diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
35033index be174ca..7f38143 100644
35034--- a/drivers/gpu/drm/drm_drv.c
35035+++ b/drivers/gpu/drm/drm_drv.c
35036@@ -307,7 +307,7 @@ module_exit(drm_core_exit);
35037 /**
35038 * Copy and IOCTL return string to user space
35039 */
35040-static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
35041+static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
35042 {
35043 int len;
35044
35045@@ -377,7 +377,7 @@ long drm_ioctl(struct file *filp,
35046 struct drm_file *file_priv = filp->private_data;
35047 struct drm_device *dev;
35048 struct drm_ioctl_desc *ioctl;
35049- drm_ioctl_t *func;
35050+ drm_ioctl_no_const_t func;
35051 unsigned int nr = DRM_IOCTL_NR(cmd);
35052 int retcode = -EINVAL;
35053 char stack_kdata[128];
35054@@ -390,7 +390,7 @@ long drm_ioctl(struct file *filp,
35055 return -ENODEV;
35056
35057 atomic_inc(&dev->ioctl_count);
35058- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
35059+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
35060 ++file_priv->ioctl_count;
35061
35062 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
35063diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
35064index 133b413..fd68225 100644
35065--- a/drivers/gpu/drm/drm_fops.c
35066+++ b/drivers/gpu/drm/drm_fops.c
35067@@ -71,7 +71,7 @@ static int drm_setup(struct drm_device * dev)
35068 }
35069
35070 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
35071- atomic_set(&dev->counts[i], 0);
35072+ atomic_set_unchecked(&dev->counts[i], 0);
35073
35074 dev->sigdata.lock = NULL;
35075
35076@@ -134,7 +134,7 @@ int drm_open(struct inode *inode, struct file *filp)
35077 if (drm_device_is_unplugged(dev))
35078 return -ENODEV;
35079
35080- if (!dev->open_count++)
35081+ if (local_inc_return(&dev->open_count) == 1)
35082 need_setup = 1;
35083 mutex_lock(&dev->struct_mutex);
35084 old_mapping = dev->dev_mapping;
35085@@ -149,7 +149,7 @@ int drm_open(struct inode *inode, struct file *filp)
35086 retcode = drm_open_helper(inode, filp, dev);
35087 if (retcode)
35088 goto err_undo;
35089- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
35090+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
35091 if (need_setup) {
35092 retcode = drm_setup(dev);
35093 if (retcode)
35094@@ -164,7 +164,7 @@ err_undo:
35095 iput(container_of(dev->dev_mapping, struct inode, i_data));
35096 dev->dev_mapping = old_mapping;
35097 mutex_unlock(&dev->struct_mutex);
35098- dev->open_count--;
35099+ local_dec(&dev->open_count);
35100 return retcode;
35101 }
35102 EXPORT_SYMBOL(drm_open);
35103@@ -438,7 +438,7 @@ int drm_release(struct inode *inode, struct file *filp)
35104
35105 mutex_lock(&drm_global_mutex);
35106
35107- DRM_DEBUG("open_count = %d\n", dev->open_count);
35108+ DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count));
35109
35110 if (dev->driver->preclose)
35111 dev->driver->preclose(dev, file_priv);
35112@@ -447,10 +447,10 @@ int drm_release(struct inode *inode, struct file *filp)
35113 * Begin inline drm_release
35114 */
35115
35116- DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
35117+ DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n",
35118 task_pid_nr(current),
35119 (long)old_encode_dev(file_priv->minor->device),
35120- dev->open_count);
35121+ local_read(&dev->open_count));
35122
35123 /* Release any auth tokens that might point to this file_priv,
35124 (do that under the drm_global_mutex) */
35125@@ -547,8 +547,8 @@ int drm_release(struct inode *inode, struct file *filp)
35126 * End inline drm_release
35127 */
35128
35129- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
35130- if (!--dev->open_count) {
35131+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
35132+ if (local_dec_and_test(&dev->open_count)) {
35133 if (atomic_read(&dev->ioctl_count)) {
35134 DRM_ERROR("Device busy: %d\n",
35135 atomic_read(&dev->ioctl_count));
35136diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
35137index f731116..629842c 100644
35138--- a/drivers/gpu/drm/drm_global.c
35139+++ b/drivers/gpu/drm/drm_global.c
35140@@ -36,7 +36,7 @@
35141 struct drm_global_item {
35142 struct mutex mutex;
35143 void *object;
35144- int refcount;
35145+ atomic_t refcount;
35146 };
35147
35148 static struct drm_global_item glob[DRM_GLOBAL_NUM];
35149@@ -49,7 +49,7 @@ void drm_global_init(void)
35150 struct drm_global_item *item = &glob[i];
35151 mutex_init(&item->mutex);
35152 item->object = NULL;
35153- item->refcount = 0;
35154+ atomic_set(&item->refcount, 0);
35155 }
35156 }
35157
35158@@ -59,7 +59,7 @@ void drm_global_release(void)
35159 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
35160 struct drm_global_item *item = &glob[i];
35161 BUG_ON(item->object != NULL);
35162- BUG_ON(item->refcount != 0);
35163+ BUG_ON(atomic_read(&item->refcount) != 0);
35164 }
35165 }
35166
35167@@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
35168 void *object;
35169
35170 mutex_lock(&item->mutex);
35171- if (item->refcount == 0) {
35172+ if (atomic_read(&item->refcount) == 0) {
35173 item->object = kzalloc(ref->size, GFP_KERNEL);
35174 if (unlikely(item->object == NULL)) {
35175 ret = -ENOMEM;
35176@@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
35177 goto out_err;
35178
35179 }
35180- ++item->refcount;
35181+ atomic_inc(&item->refcount);
35182 ref->object = item->object;
35183 object = item->object;
35184 mutex_unlock(&item->mutex);
35185@@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
35186 struct drm_global_item *item = &glob[ref->global_type];
35187
35188 mutex_lock(&item->mutex);
35189- BUG_ON(item->refcount == 0);
35190+ BUG_ON(atomic_read(&item->refcount) == 0);
35191 BUG_ON(ref->object != item->object);
35192- if (--item->refcount == 0) {
35193+ if (atomic_dec_and_test(&item->refcount)) {
35194 ref->release(ref);
35195 item->object = NULL;
35196 }
35197diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
35198index d4b20ce..77a8d41 100644
35199--- a/drivers/gpu/drm/drm_info.c
35200+++ b/drivers/gpu/drm/drm_info.c
35201@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
35202 struct drm_local_map *map;
35203 struct drm_map_list *r_list;
35204
35205- /* Hardcoded from _DRM_FRAME_BUFFER,
35206- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
35207- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
35208- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
35209+ static const char * const types[] = {
35210+ [_DRM_FRAME_BUFFER] = "FB",
35211+ [_DRM_REGISTERS] = "REG",
35212+ [_DRM_SHM] = "SHM",
35213+ [_DRM_AGP] = "AGP",
35214+ [_DRM_SCATTER_GATHER] = "SG",
35215+ [_DRM_CONSISTENT] = "PCI",
35216+ [_DRM_GEM] = "GEM" };
35217 const char *type;
35218 int i;
35219
35220@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
35221 map = r_list->map;
35222 if (!map)
35223 continue;
35224- if (map->type < 0 || map->type > 5)
35225+ if (map->type >= ARRAY_SIZE(types))
35226 type = "??";
35227 else
35228 type = types[map->type];
35229@@ -253,7 +257,11 @@ int drm_vma_info(struct seq_file *m, void *data)
35230 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
35231 vma->vm_flags & VM_LOCKED ? 'l' : '-',
35232 vma->vm_flags & VM_IO ? 'i' : '-',
35233+#ifdef CONFIG_GRKERNSEC_HIDESYM
35234+ 0);
35235+#else
35236 vma->vm_pgoff);
35237+#endif
35238
35239 #if defined(__i386__)
35240 pgprot = pgprot_val(vma->vm_page_prot);
35241diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
35242index 2f4c434..dd12cd2 100644
35243--- a/drivers/gpu/drm/drm_ioc32.c
35244+++ b/drivers/gpu/drm/drm_ioc32.c
35245@@ -457,7 +457,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
35246 request = compat_alloc_user_space(nbytes);
35247 if (!access_ok(VERIFY_WRITE, request, nbytes))
35248 return -EFAULT;
35249- list = (struct drm_buf_desc *) (request + 1);
35250+ list = (struct drm_buf_desc __user *) (request + 1);
35251
35252 if (__put_user(count, &request->count)
35253 || __put_user(list, &request->list))
35254@@ -518,7 +518,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
35255 request = compat_alloc_user_space(nbytes);
35256 if (!access_ok(VERIFY_WRITE, request, nbytes))
35257 return -EFAULT;
35258- list = (struct drm_buf_pub *) (request + 1);
35259+ list = (struct drm_buf_pub __user *) (request + 1);
35260
35261 if (__put_user(count, &request->count)
35262 || __put_user(list, &request->list))
35263@@ -1016,7 +1016,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
35264 return 0;
35265 }
35266
35267-drm_ioctl_compat_t *drm_compat_ioctls[] = {
35268+drm_ioctl_compat_t drm_compat_ioctls[] = {
35269 [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
35270 [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
35271 [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP32)] = compat_drm_getmap,
35272@@ -1062,7 +1062,6 @@ drm_ioctl_compat_t *drm_compat_ioctls[] = {
35273 long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
35274 {
35275 unsigned int nr = DRM_IOCTL_NR(cmd);
35276- drm_ioctl_compat_t *fn;
35277 int ret;
35278
35279 /* Assume that ioctls without an explicit compat routine will just
35280@@ -1072,10 +1071,8 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
35281 if (nr >= ARRAY_SIZE(drm_compat_ioctls))
35282 return drm_ioctl(filp, cmd, arg);
35283
35284- fn = drm_compat_ioctls[nr];
35285-
35286- if (fn != NULL)
35287- ret = (*fn) (filp, cmd, arg);
35288+ if (drm_compat_ioctls[nr] != NULL)
35289+ ret = (*drm_compat_ioctls[nr]) (filp, cmd, arg);
35290 else
35291 ret = drm_ioctl(filp, cmd, arg);
35292
35293diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
35294index e77bd8b..1571b85 100644
35295--- a/drivers/gpu/drm/drm_ioctl.c
35296+++ b/drivers/gpu/drm/drm_ioctl.c
35297@@ -252,7 +252,7 @@ int drm_getstats(struct drm_device *dev, void *data,
35298 stats->data[i].value =
35299 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
35300 else
35301- stats->data[i].value = atomic_read(&dev->counts[i]);
35302+ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
35303 stats->data[i].type = dev->types[i];
35304 }
35305
35306diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
35307index d752c96..fe08455 100644
35308--- a/drivers/gpu/drm/drm_lock.c
35309+++ b/drivers/gpu/drm/drm_lock.c
35310@@ -86,7 +86,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
35311 if (drm_lock_take(&master->lock, lock->context)) {
35312 master->lock.file_priv = file_priv;
35313 master->lock.lock_time = jiffies;
35314- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
35315+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
35316 break; /* Got lock */
35317 }
35318
35319@@ -157,7 +157,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
35320 return -EINVAL;
35321 }
35322
35323- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
35324+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
35325
35326 if (drm_lock_free(&master->lock, lock->context)) {
35327 /* FIXME: Should really bail out here. */
35328diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
35329index 200e104..59facda 100644
35330--- a/drivers/gpu/drm/drm_stub.c
35331+++ b/drivers/gpu/drm/drm_stub.c
35332@@ -516,7 +516,7 @@ void drm_unplug_dev(struct drm_device *dev)
35333
35334 drm_device_set_unplugged(dev);
35335
35336- if (dev->open_count == 0) {
35337+ if (local_read(&dev->open_count) == 0) {
35338 drm_put_dev(dev);
35339 }
35340 mutex_unlock(&drm_global_mutex);
35341diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
35342index 004ecdf..db1f6e0 100644
35343--- a/drivers/gpu/drm/i810/i810_dma.c
35344+++ b/drivers/gpu/drm/i810/i810_dma.c
35345@@ -945,8 +945,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
35346 dma->buflist[vertex->idx],
35347 vertex->discard, vertex->used);
35348
35349- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
35350- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
35351+ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
35352+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
35353 sarea_priv->last_enqueue = dev_priv->counter - 1;
35354 sarea_priv->last_dispatch = (int)hw_status[5];
35355
35356@@ -1106,8 +1106,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
35357 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
35358 mc->last_render);
35359
35360- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
35361- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
35362+ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
35363+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
35364 sarea_priv->last_enqueue = dev_priv->counter - 1;
35365 sarea_priv->last_dispatch = (int)hw_status[5];
35366
35367diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
35368index 6e0acad..93c8289 100644
35369--- a/drivers/gpu/drm/i810/i810_drv.h
35370+++ b/drivers/gpu/drm/i810/i810_drv.h
35371@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
35372 int page_flipping;
35373
35374 wait_queue_head_t irq_queue;
35375- atomic_t irq_received;
35376- atomic_t irq_emitted;
35377+ atomic_unchecked_t irq_received;
35378+ atomic_unchecked_t irq_emitted;
35379
35380 int front_offset;
35381 } drm_i810_private_t;
35382diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
35383index 261efc8e..27af8a5 100644
35384--- a/drivers/gpu/drm/i915/i915_debugfs.c
35385+++ b/drivers/gpu/drm/i915/i915_debugfs.c
35386@@ -496,7 +496,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
35387 I915_READ(GTIMR));
35388 }
35389 seq_printf(m, "Interrupts received: %d\n",
35390- atomic_read(&dev_priv->irq_received));
35391+ atomic_read_unchecked(&dev_priv->irq_received));
35392 for_each_ring(ring, dev_priv, i) {
35393 if (IS_GEN6(dev) || IS_GEN7(dev)) {
35394 seq_printf(m,
35395diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
35396index 99daa89..84ebd44 100644
35397--- a/drivers/gpu/drm/i915/i915_dma.c
35398+++ b/drivers/gpu/drm/i915/i915_dma.c
35399@@ -1253,7 +1253,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
35400 bool can_switch;
35401
35402 spin_lock(&dev->count_lock);
35403- can_switch = (dev->open_count == 0);
35404+ can_switch = (local_read(&dev->open_count) == 0);
35405 spin_unlock(&dev->count_lock);
35406 return can_switch;
35407 }
35408diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
35409index 7339a4b..445aaba 100644
35410--- a/drivers/gpu/drm/i915/i915_drv.h
35411+++ b/drivers/gpu/drm/i915/i915_drv.h
35412@@ -656,7 +656,7 @@ typedef struct drm_i915_private {
35413 drm_dma_handle_t *status_page_dmah;
35414 struct resource mch_res;
35415
35416- atomic_t irq_received;
35417+ atomic_unchecked_t irq_received;
35418
35419 /* protects the irq masks */
35420 spinlock_t irq_lock;
35421@@ -1102,7 +1102,7 @@ struct drm_i915_gem_object {
35422 * will be page flipped away on the next vblank. When it
35423 * reaches 0, dev_priv->pending_flip_queue will be woken up.
35424 */
35425- atomic_t pending_flip;
35426+ atomic_unchecked_t pending_flip;
35427 };
35428 #define to_gem_object(obj) (&((struct drm_i915_gem_object *)(obj))->base)
35429
35430@@ -1633,7 +1633,7 @@ extern struct i2c_adapter *intel_gmbus_get_adapter(
35431 struct drm_i915_private *dev_priv, unsigned port);
35432 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
35433 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
35434-extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
35435+static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
35436 {
35437 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
35438 }
35439diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
35440index 7adf5a7..e24fb51 100644
35441--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
35442+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
35443@@ -672,7 +672,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
35444 i915_gem_clflush_object(obj);
35445
35446 if (obj->base.pending_write_domain)
35447- flips |= atomic_read(&obj->pending_flip);
35448+ flips |= atomic_read_unchecked(&obj->pending_flip);
35449
35450 flush_domains |= obj->base.write_domain;
35451 }
35452@@ -703,9 +703,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
35453
35454 static int
35455 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
35456- int count)
35457+ unsigned int count)
35458 {
35459- int i;
35460+ unsigned int i;
35461 int relocs_total = 0;
35462 int relocs_max = INT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
35463
35464@@ -1202,7 +1202,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
35465 return -ENOMEM;
35466 }
35467 ret = copy_from_user(exec2_list,
35468- (struct drm_i915_relocation_entry __user *)
35469+ (struct drm_i915_gem_exec_object2 __user *)
35470 (uintptr_t) args->buffers_ptr,
35471 sizeof(*exec2_list) * args->buffer_count);
35472 if (ret != 0) {
35473diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
35474index 3c59584..500f2e9 100644
35475--- a/drivers/gpu/drm/i915/i915_ioc32.c
35476+++ b/drivers/gpu/drm/i915/i915_ioc32.c
35477@@ -181,7 +181,7 @@ static int compat_i915_alloc(struct file *file, unsigned int cmd,
35478 (unsigned long)request);
35479 }
35480
35481-static drm_ioctl_compat_t *i915_compat_ioctls[] = {
35482+static drm_ioctl_compat_t i915_compat_ioctls[] = {
35483 [DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
35484 [DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
35485 [DRM_I915_GETPARAM] = compat_i915_getparam,
35486@@ -202,18 +202,15 @@ static drm_ioctl_compat_t *i915_compat_ioctls[] = {
35487 long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
35488 {
35489 unsigned int nr = DRM_IOCTL_NR(cmd);
35490- drm_ioctl_compat_t *fn = NULL;
35491 int ret;
35492
35493 if (nr < DRM_COMMAND_BASE)
35494 return drm_compat_ioctl(filp, cmd, arg);
35495
35496- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls))
35497- fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
35498-
35499- if (fn != NULL)
35500+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls)) {
35501+ drm_ioctl_compat_t fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
35502 ret = (*fn) (filp, cmd, arg);
35503- else
35504+ } else
35505 ret = drm_ioctl(filp, cmd, arg);
35506
35507 return ret;
35508diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
35509index fe84338..a863190 100644
35510--- a/drivers/gpu/drm/i915/i915_irq.c
35511+++ b/drivers/gpu/drm/i915/i915_irq.c
35512@@ -535,7 +535,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
35513 u32 pipe_stats[I915_MAX_PIPES];
35514 bool blc_event;
35515
35516- atomic_inc(&dev_priv->irq_received);
35517+ atomic_inc_unchecked(&dev_priv->irq_received);
35518
35519 while (true) {
35520 iir = I915_READ(VLV_IIR);
35521@@ -688,7 +688,7 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
35522 irqreturn_t ret = IRQ_NONE;
35523 int i;
35524
35525- atomic_inc(&dev_priv->irq_received);
35526+ atomic_inc_unchecked(&dev_priv->irq_received);
35527
35528 /* disable master interrupt before clearing iir */
35529 de_ier = I915_READ(DEIER);
35530@@ -760,7 +760,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
35531 int ret = IRQ_NONE;
35532 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
35533
35534- atomic_inc(&dev_priv->irq_received);
35535+ atomic_inc_unchecked(&dev_priv->irq_received);
35536
35537 /* disable master interrupt before clearing iir */
35538 de_ier = I915_READ(DEIER);
35539@@ -1787,7 +1787,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
35540 {
35541 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
35542
35543- atomic_set(&dev_priv->irq_received, 0);
35544+ atomic_set_unchecked(&dev_priv->irq_received, 0);
35545
35546 I915_WRITE(HWSTAM, 0xeffe);
35547
35548@@ -1813,7 +1813,7 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
35549 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
35550 int pipe;
35551
35552- atomic_set(&dev_priv->irq_received, 0);
35553+ atomic_set_unchecked(&dev_priv->irq_received, 0);
35554
35555 /* VLV magic */
35556 I915_WRITE(VLV_IMR, 0);
35557@@ -2108,7 +2108,7 @@ static void i8xx_irq_preinstall(struct drm_device * dev)
35558 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
35559 int pipe;
35560
35561- atomic_set(&dev_priv->irq_received, 0);
35562+ atomic_set_unchecked(&dev_priv->irq_received, 0);
35563
35564 for_each_pipe(pipe)
35565 I915_WRITE(PIPESTAT(pipe), 0);
35566@@ -2159,7 +2159,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
35567 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
35568 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
35569
35570- atomic_inc(&dev_priv->irq_received);
35571+ atomic_inc_unchecked(&dev_priv->irq_received);
35572
35573 iir = I915_READ16(IIR);
35574 if (iir == 0)
35575@@ -2244,7 +2244,7 @@ static void i915_irq_preinstall(struct drm_device * dev)
35576 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
35577 int pipe;
35578
35579- atomic_set(&dev_priv->irq_received, 0);
35580+ atomic_set_unchecked(&dev_priv->irq_received, 0);
35581
35582 if (I915_HAS_HOTPLUG(dev)) {
35583 I915_WRITE(PORT_HOTPLUG_EN, 0);
35584@@ -2339,7 +2339,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
35585 };
35586 int pipe, ret = IRQ_NONE;
35587
35588- atomic_inc(&dev_priv->irq_received);
35589+ atomic_inc_unchecked(&dev_priv->irq_received);
35590
35591 iir = I915_READ(IIR);
35592 do {
35593@@ -2465,7 +2465,7 @@ static void i965_irq_preinstall(struct drm_device * dev)
35594 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
35595 int pipe;
35596
35597- atomic_set(&dev_priv->irq_received, 0);
35598+ atomic_set_unchecked(&dev_priv->irq_received, 0);
35599
35600 I915_WRITE(PORT_HOTPLUG_EN, 0);
35601 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
35602@@ -2572,7 +2572,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
35603 int irq_received;
35604 int ret = IRQ_NONE, pipe;
35605
35606- atomic_inc(&dev_priv->irq_received);
35607+ atomic_inc_unchecked(&dev_priv->irq_received);
35608
35609 iir = I915_READ(IIR);
35610
35611diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
35612index e6e4df7..6a9a1bd 100644
35613--- a/drivers/gpu/drm/i915/intel_display.c
35614+++ b/drivers/gpu/drm/i915/intel_display.c
35615@@ -2255,7 +2255,7 @@ intel_finish_fb(struct drm_framebuffer *old_fb)
35616
35617 wait_event(dev_priv->pending_flip_queue,
35618 atomic_read(&dev_priv->mm.wedged) ||
35619- atomic_read(&obj->pending_flip) == 0);
35620+ atomic_read_unchecked(&obj->pending_flip) == 0);
35621
35622 /* Big Hammer, we also need to ensure that any pending
35623 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
35624@@ -7122,8 +7122,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
35625
35626 obj = work->old_fb_obj;
35627
35628- atomic_clear_mask(1 << intel_crtc->plane,
35629- &obj->pending_flip.counter);
35630+ atomic_clear_mask_unchecked(1 << intel_crtc->plane, &obj->pending_flip);
35631 wake_up(&dev_priv->pending_flip_queue);
35632
35633 queue_work(dev_priv->wq, &work->work);
35634@@ -7486,7 +7485,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
35635 /* Block clients from rendering to the new back buffer until
35636 * the flip occurs and the object is no longer visible.
35637 */
35638- atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
35639+ atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
35640 atomic_inc(&intel_crtc->unpin_work_count);
35641
35642 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
35643@@ -7504,7 +7503,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
35644 cleanup_pending:
35645 atomic_dec(&intel_crtc->unpin_work_count);
35646 crtc->fb = old_fb;
35647- atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
35648+ atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
35649 drm_gem_object_unreference(&work->old_fb_obj->base);
35650 drm_gem_object_unreference(&obj->base);
35651 mutex_unlock(&dev->struct_mutex);
35652@@ -8846,13 +8845,13 @@ struct intel_quirk {
35653 int subsystem_vendor;
35654 int subsystem_device;
35655 void (*hook)(struct drm_device *dev);
35656-};
35657+} __do_const;
35658
35659 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
35660 struct intel_dmi_quirk {
35661 void (*hook)(struct drm_device *dev);
35662 const struct dmi_system_id (*dmi_id_list)[];
35663-};
35664+} __do_const;
35665
35666 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
35667 {
35668@@ -8860,18 +8859,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
35669 return 1;
35670 }
35671
35672+static const struct dmi_system_id intel_dmi_quirks_table[] = {
35673+ {
35674+ .callback = intel_dmi_reverse_brightness,
35675+ .ident = "NCR Corporation",
35676+ .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
35677+ DMI_MATCH(DMI_PRODUCT_NAME, ""),
35678+ },
35679+ },
35680+ { } /* terminating entry */
35681+};
35682+
35683 static const struct intel_dmi_quirk intel_dmi_quirks[] = {
35684 {
35685- .dmi_id_list = &(const struct dmi_system_id[]) {
35686- {
35687- .callback = intel_dmi_reverse_brightness,
35688- .ident = "NCR Corporation",
35689- .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
35690- DMI_MATCH(DMI_PRODUCT_NAME, ""),
35691- },
35692- },
35693- { } /* terminating entry */
35694- },
35695+ .dmi_id_list = &intel_dmi_quirks_table,
35696 .hook = quirk_invert_brightness,
35697 },
35698 };
35699diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
35700index 54558a0..2d97005 100644
35701--- a/drivers/gpu/drm/mga/mga_drv.h
35702+++ b/drivers/gpu/drm/mga/mga_drv.h
35703@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
35704 u32 clear_cmd;
35705 u32 maccess;
35706
35707- atomic_t vbl_received; /**< Number of vblanks received. */
35708+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
35709 wait_queue_head_t fence_queue;
35710- atomic_t last_fence_retired;
35711+ atomic_unchecked_t last_fence_retired;
35712 u32 next_fence_to_post;
35713
35714 unsigned int fb_cpp;
35715diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c
35716index 709e90d..89a1c0d 100644
35717--- a/drivers/gpu/drm/mga/mga_ioc32.c
35718+++ b/drivers/gpu/drm/mga/mga_ioc32.c
35719@@ -189,7 +189,7 @@ static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd,
35720 return 0;
35721 }
35722
35723-drm_ioctl_compat_t *mga_compat_ioctls[] = {
35724+drm_ioctl_compat_t mga_compat_ioctls[] = {
35725 [DRM_MGA_INIT] = compat_mga_init,
35726 [DRM_MGA_GETPARAM] = compat_mga_getparam,
35727 [DRM_MGA_DMA_BOOTSTRAP] = compat_mga_dma_bootstrap,
35728@@ -207,18 +207,15 @@ drm_ioctl_compat_t *mga_compat_ioctls[] = {
35729 long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
35730 {
35731 unsigned int nr = DRM_IOCTL_NR(cmd);
35732- drm_ioctl_compat_t *fn = NULL;
35733 int ret;
35734
35735 if (nr < DRM_COMMAND_BASE)
35736 return drm_compat_ioctl(filp, cmd, arg);
35737
35738- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls))
35739- fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
35740-
35741- if (fn != NULL)
35742+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls)) {
35743+ drm_ioctl_compat_t fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
35744 ret = (*fn) (filp, cmd, arg);
35745- else
35746+ } else
35747 ret = drm_ioctl(filp, cmd, arg);
35748
35749 return ret;
35750diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
35751index 598c281..60d590e 100644
35752--- a/drivers/gpu/drm/mga/mga_irq.c
35753+++ b/drivers/gpu/drm/mga/mga_irq.c
35754@@ -43,7 +43,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
35755 if (crtc != 0)
35756 return 0;
35757
35758- return atomic_read(&dev_priv->vbl_received);
35759+ return atomic_read_unchecked(&dev_priv->vbl_received);
35760 }
35761
35762
35763@@ -59,7 +59,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
35764 /* VBLANK interrupt */
35765 if (status & MGA_VLINEPEN) {
35766 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
35767- atomic_inc(&dev_priv->vbl_received);
35768+ atomic_inc_unchecked(&dev_priv->vbl_received);
35769 drm_handle_vblank(dev, 0);
35770 handled = 1;
35771 }
35772@@ -78,7 +78,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
35773 if ((prim_start & ~0x03) != (prim_end & ~0x03))
35774 MGA_WRITE(MGA_PRIMEND, prim_end);
35775
35776- atomic_inc(&dev_priv->last_fence_retired);
35777+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
35778 DRM_WAKEUP(&dev_priv->fence_queue);
35779 handled = 1;
35780 }
35781@@ -129,7 +129,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
35782 * using fences.
35783 */
35784 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
35785- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
35786+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
35787 - *sequence) <= (1 << 23)));
35788
35789 *sequence = cur_fence;
35790diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
35791index 865eddf..62c4cc3 100644
35792--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
35793+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
35794@@ -1015,7 +1015,7 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios,
35795 struct bit_table {
35796 const char id;
35797 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
35798-};
35799+} __no_const;
35800
35801 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
35802
35803diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
35804index aa89eb9..d45d38b 100644
35805--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
35806+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
35807@@ -80,7 +80,7 @@ struct nouveau_drm {
35808 struct drm_global_reference mem_global_ref;
35809 struct ttm_bo_global_ref bo_global_ref;
35810 struct ttm_bo_device bdev;
35811- atomic_t validate_sequence;
35812+ atomic_unchecked_t validate_sequence;
35813 int (*move)(struct nouveau_channel *,
35814 struct ttm_buffer_object *,
35815 struct ttm_mem_reg *, struct ttm_mem_reg *);
35816diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
35817index cdb83ac..27f0a16 100644
35818--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
35819+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
35820@@ -43,7 +43,7 @@ struct nouveau_fence_priv {
35821 int (*sync)(struct nouveau_fence *, struct nouveau_channel *,
35822 struct nouveau_channel *);
35823 u32 (*read)(struct nouveau_channel *);
35824-};
35825+} __no_const;
35826
35827 #define nouveau_fence(drm) ((struct nouveau_fence_priv *)(drm)->fence)
35828
35829diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
35830index 8bf695c..9fbc90a 100644
35831--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
35832+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
35833@@ -321,7 +321,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
35834 int trycnt = 0;
35835 int ret, i;
35836
35837- sequence = atomic_add_return(1, &drm->ttm.validate_sequence);
35838+ sequence = atomic_add_return_unchecked(1, &drm->ttm.validate_sequence);
35839 retry:
35840 if (++trycnt > 100000) {
35841 NV_ERROR(drm, "%s failed and gave up.\n", __func__);
35842diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
35843index 08214bc..9208577 100644
35844--- a/drivers/gpu/drm/nouveau/nouveau_ioc32.c
35845+++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
35846@@ -50,7 +50,7 @@ long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
35847 unsigned long arg)
35848 {
35849 unsigned int nr = DRM_IOCTL_NR(cmd);
35850- drm_ioctl_compat_t *fn = NULL;
35851+ drm_ioctl_compat_t fn = NULL;
35852 int ret;
35853
35854 if (nr < DRM_COMMAND_BASE)
35855diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
35856index 25d3495..d81aaf6 100644
35857--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
35858+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
35859@@ -62,7 +62,7 @@ nouveau_switcheroo_can_switch(struct pci_dev *pdev)
35860 bool can_switch;
35861
35862 spin_lock(&dev->count_lock);
35863- can_switch = (dev->open_count == 0);
35864+ can_switch = (local_read(&dev->open_count) == 0);
35865 spin_unlock(&dev->count_lock);
35866 return can_switch;
35867 }
35868diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
35869index d4660cf..70dbe65 100644
35870--- a/drivers/gpu/drm/r128/r128_cce.c
35871+++ b/drivers/gpu/drm/r128/r128_cce.c
35872@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
35873
35874 /* GH: Simple idle check.
35875 */
35876- atomic_set(&dev_priv->idle_count, 0);
35877+ atomic_set_unchecked(&dev_priv->idle_count, 0);
35878
35879 /* We don't support anything other than bus-mastering ring mode,
35880 * but the ring can be in either AGP or PCI space for the ring
35881diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
35882index 930c71b..499aded 100644
35883--- a/drivers/gpu/drm/r128/r128_drv.h
35884+++ b/drivers/gpu/drm/r128/r128_drv.h
35885@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
35886 int is_pci;
35887 unsigned long cce_buffers_offset;
35888
35889- atomic_t idle_count;
35890+ atomic_unchecked_t idle_count;
35891
35892 int page_flipping;
35893 int current_page;
35894 u32 crtc_offset;
35895 u32 crtc_offset_cntl;
35896
35897- atomic_t vbl_received;
35898+ atomic_unchecked_t vbl_received;
35899
35900 u32 color_fmt;
35901 unsigned int front_offset;
35902diff --git a/drivers/gpu/drm/r128/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c
35903index a954c54..9cc595c 100644
35904--- a/drivers/gpu/drm/r128/r128_ioc32.c
35905+++ b/drivers/gpu/drm/r128/r128_ioc32.c
35906@@ -177,7 +177,7 @@ static int compat_r128_getparam(struct file *file, unsigned int cmd,
35907 return drm_ioctl(file, DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam);
35908 }
35909
35910-drm_ioctl_compat_t *r128_compat_ioctls[] = {
35911+drm_ioctl_compat_t r128_compat_ioctls[] = {
35912 [DRM_R128_INIT] = compat_r128_init,
35913 [DRM_R128_DEPTH] = compat_r128_depth,
35914 [DRM_R128_STIPPLE] = compat_r128_stipple,
35915@@ -196,18 +196,15 @@ drm_ioctl_compat_t *r128_compat_ioctls[] = {
35916 long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
35917 {
35918 unsigned int nr = DRM_IOCTL_NR(cmd);
35919- drm_ioctl_compat_t *fn = NULL;
35920 int ret;
35921
35922 if (nr < DRM_COMMAND_BASE)
35923 return drm_compat_ioctl(filp, cmd, arg);
35924
35925- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(r128_compat_ioctls))
35926- fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
35927-
35928- if (fn != NULL)
35929+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(r128_compat_ioctls)) {
35930+ drm_ioctl_compat_t fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
35931 ret = (*fn) (filp, cmd, arg);
35932- else
35933+ } else
35934 ret = drm_ioctl(filp, cmd, arg);
35935
35936 return ret;
35937diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
35938index 2ea4f09..d391371 100644
35939--- a/drivers/gpu/drm/r128/r128_irq.c
35940+++ b/drivers/gpu/drm/r128/r128_irq.c
35941@@ -41,7 +41,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
35942 if (crtc != 0)
35943 return 0;
35944
35945- return atomic_read(&dev_priv->vbl_received);
35946+ return atomic_read_unchecked(&dev_priv->vbl_received);
35947 }
35948
35949 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
35950@@ -55,7 +55,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
35951 /* VBLANK interrupt */
35952 if (status & R128_CRTC_VBLANK_INT) {
35953 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
35954- atomic_inc(&dev_priv->vbl_received);
35955+ atomic_inc_unchecked(&dev_priv->vbl_received);
35956 drm_handle_vblank(dev, 0);
35957 return IRQ_HANDLED;
35958 }
35959diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
35960index 19bb7e6..de7e2a2 100644
35961--- a/drivers/gpu/drm/r128/r128_state.c
35962+++ b/drivers/gpu/drm/r128/r128_state.c
35963@@ -320,10 +320,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
35964
35965 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
35966 {
35967- if (atomic_read(&dev_priv->idle_count) == 0)
35968+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
35969 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
35970 else
35971- atomic_set(&dev_priv->idle_count, 0);
35972+ atomic_set_unchecked(&dev_priv->idle_count, 0);
35973 }
35974
35975 #endif
35976diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
35977index 5a82b6b..9e69c73 100644
35978--- a/drivers/gpu/drm/radeon/mkregtable.c
35979+++ b/drivers/gpu/drm/radeon/mkregtable.c
35980@@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
35981 regex_t mask_rex;
35982 regmatch_t match[4];
35983 char buf[1024];
35984- size_t end;
35985+ long end;
35986 int len;
35987 int done = 0;
35988 int r;
35989 unsigned o;
35990 struct offset *offset;
35991 char last_reg_s[10];
35992- int last_reg;
35993+ unsigned long last_reg;
35994
35995 if (regcomp
35996 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
35997diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
35998index 0d6562b..a154330 100644
35999--- a/drivers/gpu/drm/radeon/radeon_device.c
36000+++ b/drivers/gpu/drm/radeon/radeon_device.c
36001@@ -969,7 +969,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
36002 bool can_switch;
36003
36004 spin_lock(&dev->count_lock);
36005- can_switch = (dev->open_count == 0);
36006+ can_switch = (local_read(&dev->open_count) == 0);
36007 spin_unlock(&dev->count_lock);
36008 return can_switch;
36009 }
36010diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
36011index e7fdf16..f4f6490 100644
36012--- a/drivers/gpu/drm/radeon/radeon_drv.h
36013+++ b/drivers/gpu/drm/radeon/radeon_drv.h
36014@@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
36015
36016 /* SW interrupt */
36017 wait_queue_head_t swi_queue;
36018- atomic_t swi_emitted;
36019+ atomic_unchecked_t swi_emitted;
36020 int vblank_crtc;
36021 uint32_t irq_enable_reg;
36022 uint32_t r500_disp_irq_reg;
36023diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
36024index c180df8..5fd8186 100644
36025--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
36026+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
36027@@ -358,7 +358,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
36028 request = compat_alloc_user_space(sizeof(*request));
36029 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
36030 || __put_user(req32.param, &request->param)
36031- || __put_user((void __user *)(unsigned long)req32.value,
36032+ || __put_user((unsigned long)req32.value,
36033 &request->value))
36034 return -EFAULT;
36035
36036@@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
36037 #define compat_radeon_cp_setparam NULL
36038 #endif /* X86_64 || IA64 */
36039
36040-static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
36041+static drm_ioctl_compat_t radeon_compat_ioctls[] = {
36042 [DRM_RADEON_CP_INIT] = compat_radeon_cp_init,
36043 [DRM_RADEON_CLEAR] = compat_radeon_cp_clear,
36044 [DRM_RADEON_STIPPLE] = compat_radeon_cp_stipple,
36045@@ -393,18 +393,15 @@ static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
36046 long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
36047 {
36048 unsigned int nr = DRM_IOCTL_NR(cmd);
36049- drm_ioctl_compat_t *fn = NULL;
36050 int ret;
36051
36052 if (nr < DRM_COMMAND_BASE)
36053 return drm_compat_ioctl(filp, cmd, arg);
36054
36055- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls))
36056- fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
36057-
36058- if (fn != NULL)
36059+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls)) {
36060+ drm_ioctl_compat_t fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
36061 ret = (*fn) (filp, cmd, arg);
36062- else
36063+ } else
36064 ret = drm_ioctl(filp, cmd, arg);
36065
36066 return ret;
36067diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
36068index e771033..a0bc6b3 100644
36069--- a/drivers/gpu/drm/radeon/radeon_irq.c
36070+++ b/drivers/gpu/drm/radeon/radeon_irq.c
36071@@ -224,8 +224,8 @@ static int radeon_emit_irq(struct drm_device * dev)
36072 unsigned int ret;
36073 RING_LOCALS;
36074
36075- atomic_inc(&dev_priv->swi_emitted);
36076- ret = atomic_read(&dev_priv->swi_emitted);
36077+ atomic_inc_unchecked(&dev_priv->swi_emitted);
36078+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
36079
36080 BEGIN_RING(4);
36081 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
36082@@ -351,7 +351,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
36083 drm_radeon_private_t *dev_priv =
36084 (drm_radeon_private_t *) dev->dev_private;
36085
36086- atomic_set(&dev_priv->swi_emitted, 0);
36087+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
36088 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
36089
36090 dev->max_vblank_count = 0x001fffff;
36091diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
36092index 8e9057b..af6dacb 100644
36093--- a/drivers/gpu/drm/radeon/radeon_state.c
36094+++ b/drivers/gpu/drm/radeon/radeon_state.c
36095@@ -2166,7 +2166,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
36096 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
36097 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
36098
36099- if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
36100+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
36101 sarea_priv->nbox * sizeof(depth_boxes[0])))
36102 return -EFAULT;
36103
36104@@ -3029,7 +3029,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
36105 {
36106 drm_radeon_private_t *dev_priv = dev->dev_private;
36107 drm_radeon_getparam_t *param = data;
36108- int value;
36109+ int value = 0;
36110
36111 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
36112
36113diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
36114index 93f760e..8088227 100644
36115--- a/drivers/gpu/drm/radeon/radeon_ttm.c
36116+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
36117@@ -782,7 +782,7 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
36118 man->size = size >> PAGE_SHIFT;
36119 }
36120
36121-static struct vm_operations_struct radeon_ttm_vm_ops;
36122+static vm_operations_struct_no_const radeon_ttm_vm_ops __read_only;
36123 static const struct vm_operations_struct *ttm_vm_ops = NULL;
36124
36125 static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
36126@@ -823,8 +823,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
36127 }
36128 if (unlikely(ttm_vm_ops == NULL)) {
36129 ttm_vm_ops = vma->vm_ops;
36130+ pax_open_kernel();
36131 radeon_ttm_vm_ops = *ttm_vm_ops;
36132 radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
36133+ pax_close_kernel();
36134 }
36135 vma->vm_ops = &radeon_ttm_vm_ops;
36136 return 0;
36137@@ -862,28 +864,33 @@ static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
36138 sprintf(radeon_mem_types_names[i], "radeon_vram_mm");
36139 else
36140 sprintf(radeon_mem_types_names[i], "radeon_gtt_mm");
36141- radeon_mem_types_list[i].name = radeon_mem_types_names[i];
36142- radeon_mem_types_list[i].show = &radeon_mm_dump_table;
36143- radeon_mem_types_list[i].driver_features = 0;
36144+ pax_open_kernel();
36145+ *(const char **)&radeon_mem_types_list[i].name = radeon_mem_types_names[i];
36146+ *(void **)&radeon_mem_types_list[i].show = &radeon_mm_dump_table;
36147+ *(u32 *)&radeon_mem_types_list[i].driver_features = 0;
36148 if (i == 0)
36149- radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv;
36150+ *(void **)&radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv;
36151 else
36152- radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_TT].priv;
36153-
36154+ *(void **)&radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_TT].priv;
36155+ pax_close_kernel();
36156 }
36157 /* Add ttm page pool to debugfs */
36158 sprintf(radeon_mem_types_names[i], "ttm_page_pool");
36159- radeon_mem_types_list[i].name = radeon_mem_types_names[i];
36160- radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs;
36161- radeon_mem_types_list[i].driver_features = 0;
36162- radeon_mem_types_list[i++].data = NULL;
36163+ pax_open_kernel();
36164+ *(const char **)&radeon_mem_types_list[i].name = radeon_mem_types_names[i];
36165+ *(void **)&radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs;
36166+ *(u32 *)&radeon_mem_types_list[i].driver_features = 0;
36167+ *(void **)&radeon_mem_types_list[i++].data = NULL;
36168+ pax_close_kernel();
36169 #ifdef CONFIG_SWIOTLB
36170 if (swiotlb_nr_tbl()) {
36171 sprintf(radeon_mem_types_names[i], "ttm_dma_page_pool");
36172- radeon_mem_types_list[i].name = radeon_mem_types_names[i];
36173- radeon_mem_types_list[i].show = &ttm_dma_page_alloc_debugfs;
36174- radeon_mem_types_list[i].driver_features = 0;
36175- radeon_mem_types_list[i++].data = NULL;
36176+ pax_open_kernel();
36177+ *(const char **)&radeon_mem_types_list[i].name = radeon_mem_types_names[i];
36178+ *(void **)&radeon_mem_types_list[i].show = &ttm_dma_page_alloc_debugfs;
36179+ *(u32 *)&radeon_mem_types_list[i].driver_features = 0;
36180+ *(void **)&radeon_mem_types_list[i++].data = NULL;
36181+ pax_close_kernel();
36182 }
36183 #endif
36184 return radeon_debugfs_add_files(rdev, radeon_mem_types_list, i);
36185diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
36186index 5706d2a..17aedaa 100644
36187--- a/drivers/gpu/drm/radeon/rs690.c
36188+++ b/drivers/gpu/drm/radeon/rs690.c
36189@@ -304,9 +304,11 @@ static void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
36190 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
36191 rdev->pm.sideport_bandwidth.full)
36192 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
36193- read_delay_latency.full = dfixed_const(370 * 800 * 1000);
36194+ read_delay_latency.full = dfixed_const(800 * 1000);
36195 read_delay_latency.full = dfixed_div(read_delay_latency,
36196 rdev->pm.igp_sideport_mclk);
36197+ a.full = dfixed_const(370);
36198+ read_delay_latency.full = dfixed_mul(read_delay_latency, a);
36199 } else {
36200 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
36201 rdev->pm.k8_bandwidth.full)
36202diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
36203index bd2a3b4..122d9ad 100644
36204--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
36205+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
36206@@ -394,9 +394,9 @@ static int ttm_pool_get_num_unused_pages(void)
36207 static int ttm_pool_mm_shrink(struct shrinker *shrink,
36208 struct shrink_control *sc)
36209 {
36210- static atomic_t start_pool = ATOMIC_INIT(0);
36211+ static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
36212 unsigned i;
36213- unsigned pool_offset = atomic_add_return(1, &start_pool);
36214+ unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
36215 struct ttm_page_pool *pool;
36216 int shrink_pages = sc->nr_to_scan;
36217
36218diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
36219index 1eb060c..188b1fc 100644
36220--- a/drivers/gpu/drm/udl/udl_fb.c
36221+++ b/drivers/gpu/drm/udl/udl_fb.c
36222@@ -367,7 +367,6 @@ static int udl_fb_release(struct fb_info *info, int user)
36223 fb_deferred_io_cleanup(info);
36224 kfree(info->fbdefio);
36225 info->fbdefio = NULL;
36226- info->fbops->fb_mmap = udl_fb_mmap;
36227 }
36228
36229 pr_warn("released /dev/fb%d user=%d count=%d\n",
36230diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
36231index 893a650..6190d3b 100644
36232--- a/drivers/gpu/drm/via/via_drv.h
36233+++ b/drivers/gpu/drm/via/via_drv.h
36234@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
36235 typedef uint32_t maskarray_t[5];
36236
36237 typedef struct drm_via_irq {
36238- atomic_t irq_received;
36239+ atomic_unchecked_t irq_received;
36240 uint32_t pending_mask;
36241 uint32_t enable_mask;
36242 wait_queue_head_t irq_queue;
36243@@ -75,7 +75,7 @@ typedef struct drm_via_private {
36244 struct timeval last_vblank;
36245 int last_vblank_valid;
36246 unsigned usec_per_vblank;
36247- atomic_t vbl_received;
36248+ atomic_unchecked_t vbl_received;
36249 drm_via_state_t hc_state;
36250 char pci_buf[VIA_PCI_BUF_SIZE];
36251 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
36252diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
36253index ac98964..5dbf512 100644
36254--- a/drivers/gpu/drm/via/via_irq.c
36255+++ b/drivers/gpu/drm/via/via_irq.c
36256@@ -101,7 +101,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
36257 if (crtc != 0)
36258 return 0;
36259
36260- return atomic_read(&dev_priv->vbl_received);
36261+ return atomic_read_unchecked(&dev_priv->vbl_received);
36262 }
36263
36264 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
36265@@ -116,8 +116,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
36266
36267 status = VIA_READ(VIA_REG_INTERRUPT);
36268 if (status & VIA_IRQ_VBLANK_PENDING) {
36269- atomic_inc(&dev_priv->vbl_received);
36270- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
36271+ atomic_inc_unchecked(&dev_priv->vbl_received);
36272+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
36273 do_gettimeofday(&cur_vblank);
36274 if (dev_priv->last_vblank_valid) {
36275 dev_priv->usec_per_vblank =
36276@@ -127,7 +127,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
36277 dev_priv->last_vblank = cur_vblank;
36278 dev_priv->last_vblank_valid = 1;
36279 }
36280- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
36281+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
36282 DRM_DEBUG("US per vblank is: %u\n",
36283 dev_priv->usec_per_vblank);
36284 }
36285@@ -137,7 +137,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
36286
36287 for (i = 0; i < dev_priv->num_irqs; ++i) {
36288 if (status & cur_irq->pending_mask) {
36289- atomic_inc(&cur_irq->irq_received);
36290+ atomic_inc_unchecked(&cur_irq->irq_received);
36291 DRM_WAKEUP(&cur_irq->irq_queue);
36292 handled = 1;
36293 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
36294@@ -242,11 +242,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
36295 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
36296 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
36297 masks[irq][4]));
36298- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
36299+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
36300 } else {
36301 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
36302 (((cur_irq_sequence =
36303- atomic_read(&cur_irq->irq_received)) -
36304+ atomic_read_unchecked(&cur_irq->irq_received)) -
36305 *sequence) <= (1 << 23)));
36306 }
36307 *sequence = cur_irq_sequence;
36308@@ -284,7 +284,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
36309 }
36310
36311 for (i = 0; i < dev_priv->num_irqs; ++i) {
36312- atomic_set(&cur_irq->irq_received, 0);
36313+ atomic_set_unchecked(&cur_irq->irq_received, 0);
36314 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
36315 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
36316 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
36317@@ -366,7 +366,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
36318 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
36319 case VIA_IRQ_RELATIVE:
36320 irqwait->request.sequence +=
36321- atomic_read(&cur_irq->irq_received);
36322+ atomic_read_unchecked(&cur_irq->irq_received);
36323 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
36324 case VIA_IRQ_ABSOLUTE:
36325 break;
36326diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
36327index 13aeda7..4a952d1 100644
36328--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
36329+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
36330@@ -290,7 +290,7 @@ struct vmw_private {
36331 * Fencing and IRQs.
36332 */
36333
36334- atomic_t marker_seq;
36335+ atomic_unchecked_t marker_seq;
36336 wait_queue_head_t fence_queue;
36337 wait_queue_head_t fifo_queue;
36338 int fence_queue_waiters; /* Protected by hw_mutex */
36339diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
36340index 3eb1486..0a47ee9 100644
36341--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
36342+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
36343@@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
36344 (unsigned int) min,
36345 (unsigned int) fifo->capabilities);
36346
36347- atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
36348+ atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
36349 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
36350 vmw_marker_queue_init(&fifo->marker_queue);
36351 return vmw_fifo_send_fence(dev_priv, &dummy);
36352@@ -355,7 +355,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
36353 if (reserveable)
36354 iowrite32(bytes, fifo_mem +
36355 SVGA_FIFO_RESERVED);
36356- return fifo_mem + (next_cmd >> 2);
36357+ return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
36358 } else {
36359 need_bounce = true;
36360 }
36361@@ -475,7 +475,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
36362
36363 fm = vmw_fifo_reserve(dev_priv, bytes);
36364 if (unlikely(fm == NULL)) {
36365- *seqno = atomic_read(&dev_priv->marker_seq);
36366+ *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
36367 ret = -ENOMEM;
36368 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
36369 false, 3*HZ);
36370@@ -483,7 +483,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
36371 }
36372
36373 do {
36374- *seqno = atomic_add_return(1, &dev_priv->marker_seq);
36375+ *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
36376 } while (*seqno == 0);
36377
36378 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
36379diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
36380index 4640adb..e1384ed 100644
36381--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
36382+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
36383@@ -107,7 +107,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
36384 * emitted. Then the fence is stale and signaled.
36385 */
36386
36387- ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
36388+ ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
36389 > VMW_FENCE_WRAP);
36390
36391 return ret;
36392@@ -138,7 +138,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
36393
36394 if (fifo_idle)
36395 down_read(&fifo_state->rwsem);
36396- signal_seq = atomic_read(&dev_priv->marker_seq);
36397+ signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
36398 ret = 0;
36399
36400 for (;;) {
36401diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
36402index 8a8725c..afed796 100644
36403--- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
36404+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
36405@@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
36406 while (!vmw_lag_lt(queue, us)) {
36407 spin_lock(&queue->lock);
36408 if (list_empty(&queue->head))
36409- seqno = atomic_read(&dev_priv->marker_seq);
36410+ seqno = atomic_read_unchecked(&dev_priv->marker_seq);
36411 else {
36412 marker = list_first_entry(&queue->head,
36413 struct vmw_marker, head);
36414diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
36415index ceb3040..6160c5c 100644
36416--- a/drivers/hid/hid-core.c
36417+++ b/drivers/hid/hid-core.c
36418@@ -2242,7 +2242,7 @@ EXPORT_SYMBOL_GPL(hid_ignore);
36419
36420 int hid_add_device(struct hid_device *hdev)
36421 {
36422- static atomic_t id = ATOMIC_INIT(0);
36423+ static atomic_unchecked_t id = ATOMIC_INIT(0);
36424 int ret;
36425
36426 if (WARN_ON(hdev->status & HID_STAT_ADDED))
36427@@ -2276,7 +2276,7 @@ int hid_add_device(struct hid_device *hdev)
36428 /* XXX hack, any other cleaner solution after the driver core
36429 * is converted to allow more than 20 bytes as the device name? */
36430 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
36431- hdev->vendor, hdev->product, atomic_inc_return(&id));
36432+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
36433
36434 hid_debug_register(hdev, dev_name(&hdev->dev));
36435 ret = device_add(&hdev->dev);
36436diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c
36437index eec3291..8ed706b 100644
36438--- a/drivers/hid/hid-wiimote-debug.c
36439+++ b/drivers/hid/hid-wiimote-debug.c
36440@@ -66,7 +66,7 @@ static ssize_t wiidebug_eeprom_read(struct file *f, char __user *u, size_t s,
36441 else if (size == 0)
36442 return -EIO;
36443
36444- if (copy_to_user(u, buf, size))
36445+ if (size > sizeof(buf) || copy_to_user(u, buf, size))
36446 return -EFAULT;
36447
36448 *off += size;
36449diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
36450index 773a2f2..7ce08bc 100644
36451--- a/drivers/hv/channel.c
36452+++ b/drivers/hv/channel.c
36453@@ -394,8 +394,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
36454 int ret = 0;
36455 int t;
36456
36457- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
36458- atomic_inc(&vmbus_connection.next_gpadl_handle);
36459+ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
36460+ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
36461
36462 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
36463 if (ret)
36464diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
36465index 3648f8f..30ef30d 100644
36466--- a/drivers/hv/hv.c
36467+++ b/drivers/hv/hv.c
36468@@ -111,7 +111,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
36469 u64 output_address = (output) ? virt_to_phys(output) : 0;
36470 u32 output_address_hi = output_address >> 32;
36471 u32 output_address_lo = output_address & 0xFFFFFFFF;
36472- void *hypercall_page = hv_context.hypercall_page;
36473+ void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
36474
36475 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
36476 "=a"(hv_status_lo) : "d" (control_hi),
36477diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
36478index d8d1fad..b91caf7 100644
36479--- a/drivers/hv/hyperv_vmbus.h
36480+++ b/drivers/hv/hyperv_vmbus.h
36481@@ -594,7 +594,7 @@ enum vmbus_connect_state {
36482 struct vmbus_connection {
36483 enum vmbus_connect_state conn_state;
36484
36485- atomic_t next_gpadl_handle;
36486+ atomic_unchecked_t next_gpadl_handle;
36487
36488 /*
36489 * Represents channel interrupts. Each bit position represents a
36490diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
36491index 8e1a9ec..4687821 100644
36492--- a/drivers/hv/vmbus_drv.c
36493+++ b/drivers/hv/vmbus_drv.c
36494@@ -629,10 +629,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
36495 {
36496 int ret = 0;
36497
36498- static atomic_t device_num = ATOMIC_INIT(0);
36499+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
36500
36501 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
36502- atomic_inc_return(&device_num));
36503+ atomic_inc_return_unchecked(&device_num));
36504
36505 child_device_obj->device.bus = &hv_bus;
36506 child_device_obj->device.parent = &hv_acpi_dev->dev;
36507diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
36508index 1672e2a..4a6297c 100644
36509--- a/drivers/hwmon/acpi_power_meter.c
36510+++ b/drivers/hwmon/acpi_power_meter.c
36511@@ -117,7 +117,7 @@ struct sensor_template {
36512 struct device_attribute *devattr,
36513 const char *buf, size_t count);
36514 int index;
36515-};
36516+} __do_const;
36517
36518 /* Averaging interval */
36519 static int update_avg_interval(struct acpi_power_meter_resource *resource)
36520@@ -629,7 +629,7 @@ static int register_attrs(struct acpi_power_meter_resource *resource,
36521 struct sensor_template *attrs)
36522 {
36523 struct device *dev = &resource->acpi_dev->dev;
36524- struct sensor_device_attribute *sensors =
36525+ sensor_device_attribute_no_const *sensors =
36526 &resource->sensors[resource->num_sensors];
36527 int res = 0;
36528
36529diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
36530index b41baff..4953e4d 100644
36531--- a/drivers/hwmon/applesmc.c
36532+++ b/drivers/hwmon/applesmc.c
36533@@ -1084,7 +1084,7 @@ static int applesmc_create_nodes(struct applesmc_node_group *groups, int num)
36534 {
36535 struct applesmc_node_group *grp;
36536 struct applesmc_dev_attr *node;
36537- struct attribute *attr;
36538+ attribute_no_const *attr;
36539 int ret, i;
36540
36541 for (grp = groups; grp->format; grp++) {
36542diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
36543index 56dbcfb..9874bf1 100644
36544--- a/drivers/hwmon/asus_atk0110.c
36545+++ b/drivers/hwmon/asus_atk0110.c
36546@@ -152,10 +152,10 @@ MODULE_DEVICE_TABLE(acpi, atk_ids);
36547 struct atk_sensor_data {
36548 struct list_head list;
36549 struct atk_data *data;
36550- struct device_attribute label_attr;
36551- struct device_attribute input_attr;
36552- struct device_attribute limit1_attr;
36553- struct device_attribute limit2_attr;
36554+ device_attribute_no_const label_attr;
36555+ device_attribute_no_const input_attr;
36556+ device_attribute_no_const limit1_attr;
36557+ device_attribute_no_const limit2_attr;
36558 char label_attr_name[ATTR_NAME_SIZE];
36559 char input_attr_name[ATTR_NAME_SIZE];
36560 char limit1_attr_name[ATTR_NAME_SIZE];
36561@@ -275,7 +275,7 @@ static ssize_t atk_name_show(struct device *dev,
36562 static struct device_attribute atk_name_attr =
36563 __ATTR(name, 0444, atk_name_show, NULL);
36564
36565-static void atk_init_attribute(struct device_attribute *attr, char *name,
36566+static void atk_init_attribute(device_attribute_no_const *attr, char *name,
36567 sysfs_show_func show)
36568 {
36569 sysfs_attr_init(&attr->attr);
36570diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
36571index d64923d..72591e8 100644
36572--- a/drivers/hwmon/coretemp.c
36573+++ b/drivers/hwmon/coretemp.c
36574@@ -790,7 +790,7 @@ static int __cpuinit coretemp_cpu_callback(struct notifier_block *nfb,
36575 return NOTIFY_OK;
36576 }
36577
36578-static struct notifier_block coretemp_cpu_notifier __refdata = {
36579+static struct notifier_block coretemp_cpu_notifier = {
36580 .notifier_call = coretemp_cpu_callback,
36581 };
36582
36583diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
36584index a14f634..2916ee2 100644
36585--- a/drivers/hwmon/ibmaem.c
36586+++ b/drivers/hwmon/ibmaem.c
36587@@ -925,7 +925,7 @@ static int aem_register_sensors(struct aem_data *data,
36588 struct aem_rw_sensor_template *rw)
36589 {
36590 struct device *dev = &data->pdev->dev;
36591- struct sensor_device_attribute *sensors = data->sensors;
36592+ sensor_device_attribute_no_const *sensors = data->sensors;
36593 int err;
36594
36595 /* Set up read-only sensors */
36596diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
36597index 7d19b1b..8fdaaac 100644
36598--- a/drivers/hwmon/pmbus/pmbus_core.c
36599+++ b/drivers/hwmon/pmbus/pmbus_core.c
36600@@ -811,7 +811,7 @@ static ssize_t pmbus_show_label(struct device *dev,
36601
36602 #define PMBUS_ADD_ATTR(data, _name, _idx, _mode, _type, _show, _set) \
36603 do { \
36604- struct sensor_device_attribute *a \
36605+ sensor_device_attribute_no_const *a \
36606 = &data->_type##s[data->num_##_type##s].attribute; \
36607 BUG_ON(data->num_attributes >= data->max_attributes); \
36608 sysfs_attr_init(&a->dev_attr.attr); \
36609diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
36610index 8047fed..1e956f0 100644
36611--- a/drivers/hwmon/sht15.c
36612+++ b/drivers/hwmon/sht15.c
36613@@ -169,7 +169,7 @@ struct sht15_data {
36614 int supply_uV;
36615 bool supply_uV_valid;
36616 struct work_struct update_supply_work;
36617- atomic_t interrupt_handled;
36618+ atomic_unchecked_t interrupt_handled;
36619 };
36620
36621 /**
36622@@ -512,13 +512,13 @@ static int sht15_measurement(struct sht15_data *data,
36623 return ret;
36624
36625 gpio_direction_input(data->pdata->gpio_data);
36626- atomic_set(&data->interrupt_handled, 0);
36627+ atomic_set_unchecked(&data->interrupt_handled, 0);
36628
36629 enable_irq(gpio_to_irq(data->pdata->gpio_data));
36630 if (gpio_get_value(data->pdata->gpio_data) == 0) {
36631 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
36632 /* Only relevant if the interrupt hasn't occurred. */
36633- if (!atomic_read(&data->interrupt_handled))
36634+ if (!atomic_read_unchecked(&data->interrupt_handled))
36635 schedule_work(&data->read_work);
36636 }
36637 ret = wait_event_timeout(data->wait_queue,
36638@@ -785,7 +785,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
36639
36640 /* First disable the interrupt */
36641 disable_irq_nosync(irq);
36642- atomic_inc(&data->interrupt_handled);
36643+ atomic_inc_unchecked(&data->interrupt_handled);
36644 /* Then schedule a reading work struct */
36645 if (data->state != SHT15_READING_NOTHING)
36646 schedule_work(&data->read_work);
36647@@ -807,11 +807,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
36648 * If not, then start the interrupt again - care here as could
36649 * have gone low in meantime so verify it hasn't!
36650 */
36651- atomic_set(&data->interrupt_handled, 0);
36652+ atomic_set_unchecked(&data->interrupt_handled, 0);
36653 enable_irq(gpio_to_irq(data->pdata->gpio_data));
36654 /* If still not occurred or another handler was scheduled */
36655 if (gpio_get_value(data->pdata->gpio_data)
36656- || atomic_read(&data->interrupt_handled))
36657+ || atomic_read_unchecked(&data->interrupt_handled))
36658 return;
36659 }
36660
36661diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c
36662index 76f157b..9c0db1b 100644
36663--- a/drivers/hwmon/via-cputemp.c
36664+++ b/drivers/hwmon/via-cputemp.c
36665@@ -296,7 +296,7 @@ static int __cpuinit via_cputemp_cpu_callback(struct notifier_block *nfb,
36666 return NOTIFY_OK;
36667 }
36668
36669-static struct notifier_block via_cputemp_cpu_notifier __refdata = {
36670+static struct notifier_block via_cputemp_cpu_notifier = {
36671 .notifier_call = via_cputemp_cpu_callback,
36672 };
36673
36674diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
36675index 378fcb5..5e91fa8 100644
36676--- a/drivers/i2c/busses/i2c-amd756-s4882.c
36677+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
36678@@ -43,7 +43,7 @@
36679 extern struct i2c_adapter amd756_smbus;
36680
36681 static struct i2c_adapter *s4882_adapter;
36682-static struct i2c_algorithm *s4882_algo;
36683+static i2c_algorithm_no_const *s4882_algo;
36684
36685 /* Wrapper access functions for multiplexed SMBus */
36686 static DEFINE_MUTEX(amd756_lock);
36687diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
36688index 29015eb..af2d8e9 100644
36689--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
36690+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
36691@@ -41,7 +41,7 @@
36692 extern struct i2c_adapter *nforce2_smbus;
36693
36694 static struct i2c_adapter *s4985_adapter;
36695-static struct i2c_algorithm *s4985_algo;
36696+static i2c_algorithm_no_const *s4985_algo;
36697
36698 /* Wrapper access functions for multiplexed SMBus */
36699 static DEFINE_MUTEX(nforce2_lock);
36700diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
36701index 8126824..55a2798 100644
36702--- a/drivers/ide/ide-cd.c
36703+++ b/drivers/ide/ide-cd.c
36704@@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
36705 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
36706 if ((unsigned long)buf & alignment
36707 || blk_rq_bytes(rq) & q->dma_pad_mask
36708- || object_is_on_stack(buf))
36709+ || object_starts_on_stack(buf))
36710 drive->dma = 0;
36711 }
36712 }
36713diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
36714index 8848f16..f8e6dd8 100644
36715--- a/drivers/iio/industrialio-core.c
36716+++ b/drivers/iio/industrialio-core.c
36717@@ -506,7 +506,7 @@ static ssize_t iio_write_channel_info(struct device *dev,
36718 }
36719
36720 static
36721-int __iio_device_attr_init(struct device_attribute *dev_attr,
36722+int __iio_device_attr_init(device_attribute_no_const *dev_attr,
36723 const char *postfix,
36724 struct iio_chan_spec const *chan,
36725 ssize_t (*readfunc)(struct device *dev,
36726diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
36727index 394fea2..c833880 100644
36728--- a/drivers/infiniband/core/cm.c
36729+++ b/drivers/infiniband/core/cm.c
36730@@ -114,7 +114,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
36731
36732 struct cm_counter_group {
36733 struct kobject obj;
36734- atomic_long_t counter[CM_ATTR_COUNT];
36735+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
36736 };
36737
36738 struct cm_counter_attribute {
36739@@ -1394,7 +1394,7 @@ static void cm_dup_req_handler(struct cm_work *work,
36740 struct ib_mad_send_buf *msg = NULL;
36741 int ret;
36742
36743- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36744+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36745 counter[CM_REQ_COUNTER]);
36746
36747 /* Quick state check to discard duplicate REQs. */
36748@@ -1778,7 +1778,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
36749 if (!cm_id_priv)
36750 return;
36751
36752- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36753+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36754 counter[CM_REP_COUNTER]);
36755 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
36756 if (ret)
36757@@ -1945,7 +1945,7 @@ static int cm_rtu_handler(struct cm_work *work)
36758 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
36759 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
36760 spin_unlock_irq(&cm_id_priv->lock);
36761- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36762+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36763 counter[CM_RTU_COUNTER]);
36764 goto out;
36765 }
36766@@ -2128,7 +2128,7 @@ static int cm_dreq_handler(struct cm_work *work)
36767 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
36768 dreq_msg->local_comm_id);
36769 if (!cm_id_priv) {
36770- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36771+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36772 counter[CM_DREQ_COUNTER]);
36773 cm_issue_drep(work->port, work->mad_recv_wc);
36774 return -EINVAL;
36775@@ -2153,7 +2153,7 @@ static int cm_dreq_handler(struct cm_work *work)
36776 case IB_CM_MRA_REP_RCVD:
36777 break;
36778 case IB_CM_TIMEWAIT:
36779- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36780+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36781 counter[CM_DREQ_COUNTER]);
36782 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
36783 goto unlock;
36784@@ -2167,7 +2167,7 @@ static int cm_dreq_handler(struct cm_work *work)
36785 cm_free_msg(msg);
36786 goto deref;
36787 case IB_CM_DREQ_RCVD:
36788- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36789+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36790 counter[CM_DREQ_COUNTER]);
36791 goto unlock;
36792 default:
36793@@ -2534,7 +2534,7 @@ static int cm_mra_handler(struct cm_work *work)
36794 ib_modify_mad(cm_id_priv->av.port->mad_agent,
36795 cm_id_priv->msg, timeout)) {
36796 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
36797- atomic_long_inc(&work->port->
36798+ atomic_long_inc_unchecked(&work->port->
36799 counter_group[CM_RECV_DUPLICATES].
36800 counter[CM_MRA_COUNTER]);
36801 goto out;
36802@@ -2543,7 +2543,7 @@ static int cm_mra_handler(struct cm_work *work)
36803 break;
36804 case IB_CM_MRA_REQ_RCVD:
36805 case IB_CM_MRA_REP_RCVD:
36806- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36807+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36808 counter[CM_MRA_COUNTER]);
36809 /* fall through */
36810 default:
36811@@ -2705,7 +2705,7 @@ static int cm_lap_handler(struct cm_work *work)
36812 case IB_CM_LAP_IDLE:
36813 break;
36814 case IB_CM_MRA_LAP_SENT:
36815- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36816+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36817 counter[CM_LAP_COUNTER]);
36818 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
36819 goto unlock;
36820@@ -2721,7 +2721,7 @@ static int cm_lap_handler(struct cm_work *work)
36821 cm_free_msg(msg);
36822 goto deref;
36823 case IB_CM_LAP_RCVD:
36824- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36825+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36826 counter[CM_LAP_COUNTER]);
36827 goto unlock;
36828 default:
36829@@ -3005,7 +3005,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
36830 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
36831 if (cur_cm_id_priv) {
36832 spin_unlock_irq(&cm.lock);
36833- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36834+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36835 counter[CM_SIDR_REQ_COUNTER]);
36836 goto out; /* Duplicate message. */
36837 }
36838@@ -3217,10 +3217,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
36839 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
36840 msg->retries = 1;
36841
36842- atomic_long_add(1 + msg->retries,
36843+ atomic_long_add_unchecked(1 + msg->retries,
36844 &port->counter_group[CM_XMIT].counter[attr_index]);
36845 if (msg->retries)
36846- atomic_long_add(msg->retries,
36847+ atomic_long_add_unchecked(msg->retries,
36848 &port->counter_group[CM_XMIT_RETRIES].
36849 counter[attr_index]);
36850
36851@@ -3430,7 +3430,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
36852 }
36853
36854 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
36855- atomic_long_inc(&port->counter_group[CM_RECV].
36856+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
36857 counter[attr_id - CM_ATTR_ID_OFFSET]);
36858
36859 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
36860@@ -3635,7 +3635,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
36861 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
36862
36863 return sprintf(buf, "%ld\n",
36864- atomic_long_read(&group->counter[cm_attr->index]));
36865+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
36866 }
36867
36868 static const struct sysfs_ops cm_counter_ops = {
36869diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
36870index 176c8f9..2627b62 100644
36871--- a/drivers/infiniband/core/fmr_pool.c
36872+++ b/drivers/infiniband/core/fmr_pool.c
36873@@ -98,8 +98,8 @@ struct ib_fmr_pool {
36874
36875 struct task_struct *thread;
36876
36877- atomic_t req_ser;
36878- atomic_t flush_ser;
36879+ atomic_unchecked_t req_ser;
36880+ atomic_unchecked_t flush_ser;
36881
36882 wait_queue_head_t force_wait;
36883 };
36884@@ -180,10 +180,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
36885 struct ib_fmr_pool *pool = pool_ptr;
36886
36887 do {
36888- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
36889+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
36890 ib_fmr_batch_release(pool);
36891
36892- atomic_inc(&pool->flush_ser);
36893+ atomic_inc_unchecked(&pool->flush_ser);
36894 wake_up_interruptible(&pool->force_wait);
36895
36896 if (pool->flush_function)
36897@@ -191,7 +191,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
36898 }
36899
36900 set_current_state(TASK_INTERRUPTIBLE);
36901- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
36902+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
36903 !kthread_should_stop())
36904 schedule();
36905 __set_current_state(TASK_RUNNING);
36906@@ -283,8 +283,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
36907 pool->dirty_watermark = params->dirty_watermark;
36908 pool->dirty_len = 0;
36909 spin_lock_init(&pool->pool_lock);
36910- atomic_set(&pool->req_ser, 0);
36911- atomic_set(&pool->flush_ser, 0);
36912+ atomic_set_unchecked(&pool->req_ser, 0);
36913+ atomic_set_unchecked(&pool->flush_ser, 0);
36914 init_waitqueue_head(&pool->force_wait);
36915
36916 pool->thread = kthread_run(ib_fmr_cleanup_thread,
36917@@ -412,11 +412,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
36918 }
36919 spin_unlock_irq(&pool->pool_lock);
36920
36921- serial = atomic_inc_return(&pool->req_ser);
36922+ serial = atomic_inc_return_unchecked(&pool->req_ser);
36923 wake_up_process(pool->thread);
36924
36925 if (wait_event_interruptible(pool->force_wait,
36926- atomic_read(&pool->flush_ser) - serial >= 0))
36927+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
36928 return -EINTR;
36929
36930 return 0;
36931@@ -526,7 +526,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
36932 } else {
36933 list_add_tail(&fmr->list, &pool->dirty_list);
36934 if (++pool->dirty_len >= pool->dirty_watermark) {
36935- atomic_inc(&pool->req_ser);
36936+ atomic_inc_unchecked(&pool->req_ser);
36937 wake_up_process(pool->thread);
36938 }
36939 }
36940diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
36941index afd8179..598063f 100644
36942--- a/drivers/infiniband/hw/cxgb4/mem.c
36943+++ b/drivers/infiniband/hw/cxgb4/mem.c
36944@@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
36945 int err;
36946 struct fw_ri_tpte tpt;
36947 u32 stag_idx;
36948- static atomic_t key;
36949+ static atomic_unchecked_t key;
36950
36951 if (c4iw_fatal_error(rdev))
36952 return -EIO;
36953@@ -139,7 +139,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
36954 if (rdev->stats.stag.cur > rdev->stats.stag.max)
36955 rdev->stats.stag.max = rdev->stats.stag.cur;
36956 mutex_unlock(&rdev->stats.lock);
36957- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
36958+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
36959 }
36960 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
36961 __func__, stag_state, type, pdid, stag_idx);
36962diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
36963index 79b3dbc..96e5fcc 100644
36964--- a/drivers/infiniband/hw/ipath/ipath_rc.c
36965+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
36966@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
36967 struct ib_atomic_eth *ateth;
36968 struct ipath_ack_entry *e;
36969 u64 vaddr;
36970- atomic64_t *maddr;
36971+ atomic64_unchecked_t *maddr;
36972 u64 sdata;
36973 u32 rkey;
36974 u8 next;
36975@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
36976 IB_ACCESS_REMOTE_ATOMIC)))
36977 goto nack_acc_unlck;
36978 /* Perform atomic OP and save result. */
36979- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
36980+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
36981 sdata = be64_to_cpu(ateth->swap_data);
36982 e = &qp->s_ack_queue[qp->r_head_ack_queue];
36983 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
36984- (u64) atomic64_add_return(sdata, maddr) - sdata :
36985+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
36986 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
36987 be64_to_cpu(ateth->compare_data),
36988 sdata);
36989diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
36990index 1f95bba..9530f87 100644
36991--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
36992+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
36993@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
36994 unsigned long flags;
36995 struct ib_wc wc;
36996 u64 sdata;
36997- atomic64_t *maddr;
36998+ atomic64_unchecked_t *maddr;
36999 enum ib_wc_status send_status;
37000
37001 /*
37002@@ -382,11 +382,11 @@ again:
37003 IB_ACCESS_REMOTE_ATOMIC)))
37004 goto acc_err;
37005 /* Perform atomic OP and save result. */
37006- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
37007+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
37008 sdata = wqe->wr.wr.atomic.compare_add;
37009 *(u64 *) sqp->s_sge.sge.vaddr =
37010 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
37011- (u64) atomic64_add_return(sdata, maddr) - sdata :
37012+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
37013 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
37014 sdata, wqe->wr.wr.atomic.swap);
37015 goto send_comp;
37016diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
37017index 9d3e5c1..d9afe4a 100644
37018--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
37019+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
37020@@ -772,7 +772,7 @@ static void mthca_setup_cmd_doorbells(struct mthca_dev *dev, u64 base)
37021 mthca_dbg(dev, "Mapped doorbell page for posting FW commands\n");
37022 }
37023
37024-int mthca_QUERY_FW(struct mthca_dev *dev)
37025+int __intentional_overflow(-1) mthca_QUERY_FW(struct mthca_dev *dev)
37026 {
37027 struct mthca_mailbox *mailbox;
37028 u32 *outbox;
37029diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
37030index ed9a989..e0c5871 100644
37031--- a/drivers/infiniband/hw/mthca/mthca_mr.c
37032+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
37033@@ -426,7 +426,7 @@ static inline u32 adjust_key(struct mthca_dev *dev, u32 key)
37034 return key;
37035 }
37036
37037-int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
37038+int __intentional_overflow(-1) mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
37039 u64 iova, u64 total_size, u32 access, struct mthca_mr *mr)
37040 {
37041 struct mthca_mailbox *mailbox;
37042diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
37043index 5b152a3..c1f3e83 100644
37044--- a/drivers/infiniband/hw/nes/nes.c
37045+++ b/drivers/infiniband/hw/nes/nes.c
37046@@ -98,7 +98,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
37047 LIST_HEAD(nes_adapter_list);
37048 static LIST_HEAD(nes_dev_list);
37049
37050-atomic_t qps_destroyed;
37051+atomic_unchecked_t qps_destroyed;
37052
37053 static unsigned int ee_flsh_adapter;
37054 static unsigned int sysfs_nonidx_addr;
37055@@ -267,7 +267,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
37056 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
37057 struct nes_adapter *nesadapter = nesdev->nesadapter;
37058
37059- atomic_inc(&qps_destroyed);
37060+ atomic_inc_unchecked(&qps_destroyed);
37061
37062 /* Free the control structures */
37063
37064diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
37065index 33cc589..3bd6538 100644
37066--- a/drivers/infiniband/hw/nes/nes.h
37067+++ b/drivers/infiniband/hw/nes/nes.h
37068@@ -177,17 +177,17 @@ extern unsigned int nes_debug_level;
37069 extern unsigned int wqm_quanta;
37070 extern struct list_head nes_adapter_list;
37071
37072-extern atomic_t cm_connects;
37073-extern atomic_t cm_accepts;
37074-extern atomic_t cm_disconnects;
37075-extern atomic_t cm_closes;
37076-extern atomic_t cm_connecteds;
37077-extern atomic_t cm_connect_reqs;
37078-extern atomic_t cm_rejects;
37079-extern atomic_t mod_qp_timouts;
37080-extern atomic_t qps_created;
37081-extern atomic_t qps_destroyed;
37082-extern atomic_t sw_qps_destroyed;
37083+extern atomic_unchecked_t cm_connects;
37084+extern atomic_unchecked_t cm_accepts;
37085+extern atomic_unchecked_t cm_disconnects;
37086+extern atomic_unchecked_t cm_closes;
37087+extern atomic_unchecked_t cm_connecteds;
37088+extern atomic_unchecked_t cm_connect_reqs;
37089+extern atomic_unchecked_t cm_rejects;
37090+extern atomic_unchecked_t mod_qp_timouts;
37091+extern atomic_unchecked_t qps_created;
37092+extern atomic_unchecked_t qps_destroyed;
37093+extern atomic_unchecked_t sw_qps_destroyed;
37094 extern u32 mh_detected;
37095 extern u32 mh_pauses_sent;
37096 extern u32 cm_packets_sent;
37097@@ -196,16 +196,16 @@ extern u32 cm_packets_created;
37098 extern u32 cm_packets_received;
37099 extern u32 cm_packets_dropped;
37100 extern u32 cm_packets_retrans;
37101-extern atomic_t cm_listens_created;
37102-extern atomic_t cm_listens_destroyed;
37103+extern atomic_unchecked_t cm_listens_created;
37104+extern atomic_unchecked_t cm_listens_destroyed;
37105 extern u32 cm_backlog_drops;
37106-extern atomic_t cm_loopbacks;
37107-extern atomic_t cm_nodes_created;
37108-extern atomic_t cm_nodes_destroyed;
37109-extern atomic_t cm_accel_dropped_pkts;
37110-extern atomic_t cm_resets_recvd;
37111-extern atomic_t pau_qps_created;
37112-extern atomic_t pau_qps_destroyed;
37113+extern atomic_unchecked_t cm_loopbacks;
37114+extern atomic_unchecked_t cm_nodes_created;
37115+extern atomic_unchecked_t cm_nodes_destroyed;
37116+extern atomic_unchecked_t cm_accel_dropped_pkts;
37117+extern atomic_unchecked_t cm_resets_recvd;
37118+extern atomic_unchecked_t pau_qps_created;
37119+extern atomic_unchecked_t pau_qps_destroyed;
37120
37121 extern u32 int_mod_timer_init;
37122 extern u32 int_mod_cq_depth_256;
37123diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
37124index 22ea67e..dcbe3bc 100644
37125--- a/drivers/infiniband/hw/nes/nes_cm.c
37126+++ b/drivers/infiniband/hw/nes/nes_cm.c
37127@@ -68,14 +68,14 @@ u32 cm_packets_dropped;
37128 u32 cm_packets_retrans;
37129 u32 cm_packets_created;
37130 u32 cm_packets_received;
37131-atomic_t cm_listens_created;
37132-atomic_t cm_listens_destroyed;
37133+atomic_unchecked_t cm_listens_created;
37134+atomic_unchecked_t cm_listens_destroyed;
37135 u32 cm_backlog_drops;
37136-atomic_t cm_loopbacks;
37137-atomic_t cm_nodes_created;
37138-atomic_t cm_nodes_destroyed;
37139-atomic_t cm_accel_dropped_pkts;
37140-atomic_t cm_resets_recvd;
37141+atomic_unchecked_t cm_loopbacks;
37142+atomic_unchecked_t cm_nodes_created;
37143+atomic_unchecked_t cm_nodes_destroyed;
37144+atomic_unchecked_t cm_accel_dropped_pkts;
37145+atomic_unchecked_t cm_resets_recvd;
37146
37147 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
37148 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
37149@@ -148,13 +148,13 @@ static struct nes_cm_ops nes_cm_api = {
37150
37151 static struct nes_cm_core *g_cm_core;
37152
37153-atomic_t cm_connects;
37154-atomic_t cm_accepts;
37155-atomic_t cm_disconnects;
37156-atomic_t cm_closes;
37157-atomic_t cm_connecteds;
37158-atomic_t cm_connect_reqs;
37159-atomic_t cm_rejects;
37160+atomic_unchecked_t cm_connects;
37161+atomic_unchecked_t cm_accepts;
37162+atomic_unchecked_t cm_disconnects;
37163+atomic_unchecked_t cm_closes;
37164+atomic_unchecked_t cm_connecteds;
37165+atomic_unchecked_t cm_connect_reqs;
37166+atomic_unchecked_t cm_rejects;
37167
37168 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
37169 {
37170@@ -1272,7 +1272,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
37171 kfree(listener);
37172 listener = NULL;
37173 ret = 0;
37174- atomic_inc(&cm_listens_destroyed);
37175+ atomic_inc_unchecked(&cm_listens_destroyed);
37176 } else {
37177 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
37178 }
37179@@ -1466,7 +1466,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
37180 cm_node->rem_mac);
37181
37182 add_hte_node(cm_core, cm_node);
37183- atomic_inc(&cm_nodes_created);
37184+ atomic_inc_unchecked(&cm_nodes_created);
37185
37186 return cm_node;
37187 }
37188@@ -1524,7 +1524,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
37189 }
37190
37191 atomic_dec(&cm_core->node_cnt);
37192- atomic_inc(&cm_nodes_destroyed);
37193+ atomic_inc_unchecked(&cm_nodes_destroyed);
37194 nesqp = cm_node->nesqp;
37195 if (nesqp) {
37196 nesqp->cm_node = NULL;
37197@@ -1588,7 +1588,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
37198
37199 static void drop_packet(struct sk_buff *skb)
37200 {
37201- atomic_inc(&cm_accel_dropped_pkts);
37202+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
37203 dev_kfree_skb_any(skb);
37204 }
37205
37206@@ -1651,7 +1651,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
37207 {
37208
37209 int reset = 0; /* whether to send reset in case of err.. */
37210- atomic_inc(&cm_resets_recvd);
37211+ atomic_inc_unchecked(&cm_resets_recvd);
37212 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
37213 " refcnt=%d\n", cm_node, cm_node->state,
37214 atomic_read(&cm_node->ref_count));
37215@@ -2292,7 +2292,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
37216 rem_ref_cm_node(cm_node->cm_core, cm_node);
37217 return NULL;
37218 }
37219- atomic_inc(&cm_loopbacks);
37220+ atomic_inc_unchecked(&cm_loopbacks);
37221 loopbackremotenode->loopbackpartner = cm_node;
37222 loopbackremotenode->tcp_cntxt.rcv_wscale =
37223 NES_CM_DEFAULT_RCV_WND_SCALE;
37224@@ -2567,7 +2567,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
37225 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
37226 else {
37227 rem_ref_cm_node(cm_core, cm_node);
37228- atomic_inc(&cm_accel_dropped_pkts);
37229+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
37230 dev_kfree_skb_any(skb);
37231 }
37232 break;
37233@@ -2875,7 +2875,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
37234
37235 if ((cm_id) && (cm_id->event_handler)) {
37236 if (issue_disconn) {
37237- atomic_inc(&cm_disconnects);
37238+ atomic_inc_unchecked(&cm_disconnects);
37239 cm_event.event = IW_CM_EVENT_DISCONNECT;
37240 cm_event.status = disconn_status;
37241 cm_event.local_addr = cm_id->local_addr;
37242@@ -2897,7 +2897,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
37243 }
37244
37245 if (issue_close) {
37246- atomic_inc(&cm_closes);
37247+ atomic_inc_unchecked(&cm_closes);
37248 nes_disconnect(nesqp, 1);
37249
37250 cm_id->provider_data = nesqp;
37251@@ -3033,7 +3033,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
37252
37253 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
37254 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
37255- atomic_inc(&cm_accepts);
37256+ atomic_inc_unchecked(&cm_accepts);
37257
37258 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
37259 netdev_refcnt_read(nesvnic->netdev));
37260@@ -3228,7 +3228,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
37261 struct nes_cm_core *cm_core;
37262 u8 *start_buff;
37263
37264- atomic_inc(&cm_rejects);
37265+ atomic_inc_unchecked(&cm_rejects);
37266 cm_node = (struct nes_cm_node *)cm_id->provider_data;
37267 loopback = cm_node->loopbackpartner;
37268 cm_core = cm_node->cm_core;
37269@@ -3288,7 +3288,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
37270 ntohl(cm_id->local_addr.sin_addr.s_addr),
37271 ntohs(cm_id->local_addr.sin_port));
37272
37273- atomic_inc(&cm_connects);
37274+ atomic_inc_unchecked(&cm_connects);
37275 nesqp->active_conn = 1;
37276
37277 /* cache the cm_id in the qp */
37278@@ -3398,7 +3398,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
37279 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
37280 return err;
37281 }
37282- atomic_inc(&cm_listens_created);
37283+ atomic_inc_unchecked(&cm_listens_created);
37284 }
37285
37286 cm_id->add_ref(cm_id);
37287@@ -3499,7 +3499,7 @@ static void cm_event_connected(struct nes_cm_event *event)
37288
37289 if (nesqp->destroyed)
37290 return;
37291- atomic_inc(&cm_connecteds);
37292+ atomic_inc_unchecked(&cm_connecteds);
37293 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
37294 " local port 0x%04X. jiffies = %lu.\n",
37295 nesqp->hwqp.qp_id,
37296@@ -3679,7 +3679,7 @@ static void cm_event_reset(struct nes_cm_event *event)
37297
37298 cm_id->add_ref(cm_id);
37299 ret = cm_id->event_handler(cm_id, &cm_event);
37300- atomic_inc(&cm_closes);
37301+ atomic_inc_unchecked(&cm_closes);
37302 cm_event.event = IW_CM_EVENT_CLOSE;
37303 cm_event.status = 0;
37304 cm_event.provider_data = cm_id->provider_data;
37305@@ -3715,7 +3715,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
37306 return;
37307 cm_id = cm_node->cm_id;
37308
37309- atomic_inc(&cm_connect_reqs);
37310+ atomic_inc_unchecked(&cm_connect_reqs);
37311 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
37312 cm_node, cm_id, jiffies);
37313
37314@@ -3755,7 +3755,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
37315 return;
37316 cm_id = cm_node->cm_id;
37317
37318- atomic_inc(&cm_connect_reqs);
37319+ atomic_inc_unchecked(&cm_connect_reqs);
37320 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
37321 cm_node, cm_id, jiffies);
37322
37323diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
37324index 4166452..fc952c3 100644
37325--- a/drivers/infiniband/hw/nes/nes_mgt.c
37326+++ b/drivers/infiniband/hw/nes/nes_mgt.c
37327@@ -40,8 +40,8 @@
37328 #include "nes.h"
37329 #include "nes_mgt.h"
37330
37331-atomic_t pau_qps_created;
37332-atomic_t pau_qps_destroyed;
37333+atomic_unchecked_t pau_qps_created;
37334+atomic_unchecked_t pau_qps_destroyed;
37335
37336 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
37337 {
37338@@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
37339 {
37340 struct sk_buff *skb;
37341 unsigned long flags;
37342- atomic_inc(&pau_qps_destroyed);
37343+ atomic_inc_unchecked(&pau_qps_destroyed);
37344
37345 /* Free packets that have not yet been forwarded */
37346 /* Lock is acquired by skb_dequeue when removing the skb */
37347@@ -810,7 +810,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
37348 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
37349 skb_queue_head_init(&nesqp->pau_list);
37350 spin_lock_init(&nesqp->pau_lock);
37351- atomic_inc(&pau_qps_created);
37352+ atomic_inc_unchecked(&pau_qps_created);
37353 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
37354 }
37355
37356diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
37357index 9542e16..a008c40 100644
37358--- a/drivers/infiniband/hw/nes/nes_nic.c
37359+++ b/drivers/infiniband/hw/nes/nes_nic.c
37360@@ -1273,39 +1273,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
37361 target_stat_values[++index] = mh_detected;
37362 target_stat_values[++index] = mh_pauses_sent;
37363 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
37364- target_stat_values[++index] = atomic_read(&cm_connects);
37365- target_stat_values[++index] = atomic_read(&cm_accepts);
37366- target_stat_values[++index] = atomic_read(&cm_disconnects);
37367- target_stat_values[++index] = atomic_read(&cm_connecteds);
37368- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
37369- target_stat_values[++index] = atomic_read(&cm_rejects);
37370- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
37371- target_stat_values[++index] = atomic_read(&qps_created);
37372- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
37373- target_stat_values[++index] = atomic_read(&qps_destroyed);
37374- target_stat_values[++index] = atomic_read(&cm_closes);
37375+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
37376+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
37377+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
37378+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
37379+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
37380+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
37381+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
37382+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
37383+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
37384+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
37385+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
37386 target_stat_values[++index] = cm_packets_sent;
37387 target_stat_values[++index] = cm_packets_bounced;
37388 target_stat_values[++index] = cm_packets_created;
37389 target_stat_values[++index] = cm_packets_received;
37390 target_stat_values[++index] = cm_packets_dropped;
37391 target_stat_values[++index] = cm_packets_retrans;
37392- target_stat_values[++index] = atomic_read(&cm_listens_created);
37393- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
37394+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
37395+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
37396 target_stat_values[++index] = cm_backlog_drops;
37397- target_stat_values[++index] = atomic_read(&cm_loopbacks);
37398- target_stat_values[++index] = atomic_read(&cm_nodes_created);
37399- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
37400- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
37401- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
37402+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
37403+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
37404+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
37405+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
37406+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
37407 target_stat_values[++index] = nesadapter->free_4kpbl;
37408 target_stat_values[++index] = nesadapter->free_256pbl;
37409 target_stat_values[++index] = int_mod_timer_init;
37410 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
37411 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
37412 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
37413- target_stat_values[++index] = atomic_read(&pau_qps_created);
37414- target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
37415+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
37416+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
37417 }
37418
37419 /**
37420diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
37421index 07e4fba..685f041 100644
37422--- a/drivers/infiniband/hw/nes/nes_verbs.c
37423+++ b/drivers/infiniband/hw/nes/nes_verbs.c
37424@@ -46,9 +46,9 @@
37425
37426 #include <rdma/ib_umem.h>
37427
37428-atomic_t mod_qp_timouts;
37429-atomic_t qps_created;
37430-atomic_t sw_qps_destroyed;
37431+atomic_unchecked_t mod_qp_timouts;
37432+atomic_unchecked_t qps_created;
37433+atomic_unchecked_t sw_qps_destroyed;
37434
37435 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
37436
37437@@ -1131,7 +1131,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
37438 if (init_attr->create_flags)
37439 return ERR_PTR(-EINVAL);
37440
37441- atomic_inc(&qps_created);
37442+ atomic_inc_unchecked(&qps_created);
37443 switch (init_attr->qp_type) {
37444 case IB_QPT_RC:
37445 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
37446@@ -1462,7 +1462,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
37447 struct iw_cm_event cm_event;
37448 int ret = 0;
37449
37450- atomic_inc(&sw_qps_destroyed);
37451+ atomic_inc_unchecked(&sw_qps_destroyed);
37452 nesqp->destroyed = 1;
37453
37454 /* Blow away the connection if it exists. */
37455diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
37456index 4d11575..3e890e5 100644
37457--- a/drivers/infiniband/hw/qib/qib.h
37458+++ b/drivers/infiniband/hw/qib/qib.h
37459@@ -51,6 +51,7 @@
37460 #include <linux/completion.h>
37461 #include <linux/kref.h>
37462 #include <linux/sched.h>
37463+#include <linux/slab.h>
37464
37465 #include "qib_common.h"
37466 #include "qib_verbs.h"
37467diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
37468index da739d9..da1c7f4 100644
37469--- a/drivers/input/gameport/gameport.c
37470+++ b/drivers/input/gameport/gameport.c
37471@@ -487,14 +487,14 @@ EXPORT_SYMBOL(gameport_set_phys);
37472 */
37473 static void gameport_init_port(struct gameport *gameport)
37474 {
37475- static atomic_t gameport_no = ATOMIC_INIT(0);
37476+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
37477
37478 __module_get(THIS_MODULE);
37479
37480 mutex_init(&gameport->drv_mutex);
37481 device_initialize(&gameport->dev);
37482 dev_set_name(&gameport->dev, "gameport%lu",
37483- (unsigned long)atomic_inc_return(&gameport_no) - 1);
37484+ (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
37485 gameport->dev.bus = &gameport_bus;
37486 gameport->dev.release = gameport_release_port;
37487 if (gameport->parent)
37488diff --git a/drivers/input/input.c b/drivers/input/input.c
37489index c044699..174d71a 100644
37490--- a/drivers/input/input.c
37491+++ b/drivers/input/input.c
37492@@ -2019,7 +2019,7 @@ static void devm_input_device_unregister(struct device *dev, void *res)
37493 */
37494 int input_register_device(struct input_dev *dev)
37495 {
37496- static atomic_t input_no = ATOMIC_INIT(0);
37497+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
37498 struct input_devres *devres = NULL;
37499 struct input_handler *handler;
37500 unsigned int packet_size;
37501@@ -2074,7 +2074,7 @@ int input_register_device(struct input_dev *dev)
37502 dev->setkeycode = input_default_setkeycode;
37503
37504 dev_set_name(&dev->dev, "input%ld",
37505- (unsigned long) atomic_inc_return(&input_no) - 1);
37506+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
37507
37508 error = device_add(&dev->dev);
37509 if (error)
37510diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
37511index 04c69af..5f92d00 100644
37512--- a/drivers/input/joystick/sidewinder.c
37513+++ b/drivers/input/joystick/sidewinder.c
37514@@ -30,6 +30,7 @@
37515 #include <linux/kernel.h>
37516 #include <linux/module.h>
37517 #include <linux/slab.h>
37518+#include <linux/sched.h>
37519 #include <linux/init.h>
37520 #include <linux/input.h>
37521 #include <linux/gameport.h>
37522diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
37523index d6cbfe9..6225402 100644
37524--- a/drivers/input/joystick/xpad.c
37525+++ b/drivers/input/joystick/xpad.c
37526@@ -735,7 +735,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
37527
37528 static int xpad_led_probe(struct usb_xpad *xpad)
37529 {
37530- static atomic_t led_seq = ATOMIC_INIT(0);
37531+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
37532 long led_no;
37533 struct xpad_led *led;
37534 struct led_classdev *led_cdev;
37535@@ -748,7 +748,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
37536 if (!led)
37537 return -ENOMEM;
37538
37539- led_no = (long)atomic_inc_return(&led_seq) - 1;
37540+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
37541
37542 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
37543 led->xpad = xpad;
37544diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h
37545index fe1df23..5b710f3 100644
37546--- a/drivers/input/mouse/psmouse.h
37547+++ b/drivers/input/mouse/psmouse.h
37548@@ -115,7 +115,7 @@ struct psmouse_attribute {
37549 ssize_t (*set)(struct psmouse *psmouse, void *data,
37550 const char *buf, size_t count);
37551 bool protect;
37552-};
37553+} __do_const;
37554 #define to_psmouse_attr(a) container_of((a), struct psmouse_attribute, dattr)
37555
37556 ssize_t psmouse_attr_show_helper(struct device *dev, struct device_attribute *attr,
37557diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
37558index 4c842c3..590b0bf 100644
37559--- a/drivers/input/mousedev.c
37560+++ b/drivers/input/mousedev.c
37561@@ -738,7 +738,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
37562
37563 spin_unlock_irq(&client->packet_lock);
37564
37565- if (copy_to_user(buffer, data, count))
37566+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
37567 return -EFAULT;
37568
37569 return count;
37570diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
37571index 25fc597..558bf3b 100644
37572--- a/drivers/input/serio/serio.c
37573+++ b/drivers/input/serio/serio.c
37574@@ -496,7 +496,7 @@ static void serio_release_port(struct device *dev)
37575 */
37576 static void serio_init_port(struct serio *serio)
37577 {
37578- static atomic_t serio_no = ATOMIC_INIT(0);
37579+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
37580
37581 __module_get(THIS_MODULE);
37582
37583@@ -507,7 +507,7 @@ static void serio_init_port(struct serio *serio)
37584 mutex_init(&serio->drv_mutex);
37585 device_initialize(&serio->dev);
37586 dev_set_name(&serio->dev, "serio%ld",
37587- (long)atomic_inc_return(&serio_no) - 1);
37588+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
37589 serio->dev.bus = &serio_bus;
37590 serio->dev.release = serio_release_port;
37591 serio->dev.groups = serio_device_attr_groups;
37592diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
37593index ddbdaca..be18a78 100644
37594--- a/drivers/iommu/iommu.c
37595+++ b/drivers/iommu/iommu.c
37596@@ -554,7 +554,7 @@ static struct notifier_block iommu_bus_nb = {
37597 static void iommu_bus_init(struct bus_type *bus, struct iommu_ops *ops)
37598 {
37599 bus_register_notifier(bus, &iommu_bus_nb);
37600- bus_for_each_dev(bus, NULL, ops, add_iommu_group);
37601+ bus_for_each_dev(bus, NULL, (void *)ops, add_iommu_group);
37602 }
37603
37604 /**
37605diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
37606index 89562a8..218999b 100644
37607--- a/drivers/isdn/capi/capi.c
37608+++ b/drivers/isdn/capi/capi.c
37609@@ -81,8 +81,8 @@ struct capiminor {
37610
37611 struct capi20_appl *ap;
37612 u32 ncci;
37613- atomic_t datahandle;
37614- atomic_t msgid;
37615+ atomic_unchecked_t datahandle;
37616+ atomic_unchecked_t msgid;
37617
37618 struct tty_port port;
37619 int ttyinstop;
37620@@ -391,7 +391,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
37621 capimsg_setu16(s, 2, mp->ap->applid);
37622 capimsg_setu8 (s, 4, CAPI_DATA_B3);
37623 capimsg_setu8 (s, 5, CAPI_RESP);
37624- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
37625+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
37626 capimsg_setu32(s, 8, mp->ncci);
37627 capimsg_setu16(s, 12, datahandle);
37628 }
37629@@ -512,14 +512,14 @@ static void handle_minor_send(struct capiminor *mp)
37630 mp->outbytes -= len;
37631 spin_unlock_bh(&mp->outlock);
37632
37633- datahandle = atomic_inc_return(&mp->datahandle);
37634+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
37635 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
37636 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
37637 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
37638 capimsg_setu16(skb->data, 2, mp->ap->applid);
37639 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
37640 capimsg_setu8 (skb->data, 5, CAPI_REQ);
37641- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
37642+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
37643 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
37644 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
37645 capimsg_setu16(skb->data, 16, len); /* Data length */
37646diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
37647index 67abf3f..076b3a6 100644
37648--- a/drivers/isdn/gigaset/interface.c
37649+++ b/drivers/isdn/gigaset/interface.c
37650@@ -160,9 +160,9 @@ static int if_open(struct tty_struct *tty, struct file *filp)
37651 }
37652 tty->driver_data = cs;
37653
37654- ++cs->port.count;
37655+ atomic_inc(&cs->port.count);
37656
37657- if (cs->port.count == 1) {
37658+ if (atomic_read(&cs->port.count) == 1) {
37659 tty_port_tty_set(&cs->port, tty);
37660 tty->low_latency = 1;
37661 }
37662@@ -186,9 +186,9 @@ static void if_close(struct tty_struct *tty, struct file *filp)
37663
37664 if (!cs->connected)
37665 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
37666- else if (!cs->port.count)
37667+ else if (!atomic_read(&cs->port.count))
37668 dev_warn(cs->dev, "%s: device not opened\n", __func__);
37669- else if (!--cs->port.count)
37670+ else if (!atomic_dec_return(&cs->port.count))
37671 tty_port_tty_set(&cs->port, NULL);
37672
37673 mutex_unlock(&cs->mutex);
37674diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
37675index 821f7ac..28d4030 100644
37676--- a/drivers/isdn/hardware/avm/b1.c
37677+++ b/drivers/isdn/hardware/avm/b1.c
37678@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart *t4file)
37679 }
37680 if (left) {
37681 if (t4file->user) {
37682- if (copy_from_user(buf, dp, left))
37683+ if (left > sizeof buf || copy_from_user(buf, dp, left))
37684 return -EFAULT;
37685 } else {
37686 memcpy(buf, dp, left);
37687@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart *config)
37688 }
37689 if (left) {
37690 if (config->user) {
37691- if (copy_from_user(buf, dp, left))
37692+ if (left > sizeof buf || copy_from_user(buf, dp, left))
37693 return -EFAULT;
37694 } else {
37695 memcpy(buf, dp, left);
37696diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
37697index e09dc8a..15e2efb 100644
37698--- a/drivers/isdn/i4l/isdn_tty.c
37699+++ b/drivers/isdn/i4l/isdn_tty.c
37700@@ -1513,9 +1513,9 @@ isdn_tty_open(struct tty_struct *tty, struct file *filp)
37701
37702 #ifdef ISDN_DEBUG_MODEM_OPEN
37703 printk(KERN_DEBUG "isdn_tty_open %s, count = %d\n", tty->name,
37704- port->count);
37705+ atomic_read(&port->count));
37706 #endif
37707- port->count++;
37708+ atomic_inc(&port->count);
37709 port->tty = tty;
37710 /*
37711 * Start up serial port
37712@@ -1559,7 +1559,7 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
37713 #endif
37714 return;
37715 }
37716- if ((tty->count == 1) && (port->count != 1)) {
37717+ if ((tty->count == 1) && (atomic_read(&port->count) != 1)) {
37718 /*
37719 * Uh, oh. tty->count is 1, which means that the tty
37720 * structure will be freed. Info->count should always
37721@@ -1568,15 +1568,15 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
37722 * serial port won't be shutdown.
37723 */
37724 printk(KERN_ERR "isdn_tty_close: bad port count; tty->count is 1, "
37725- "info->count is %d\n", port->count);
37726- port->count = 1;
37727+ "info->count is %d\n", atomic_read(&port->count));
37728+ atomic_set(&port->count, 1);
37729 }
37730- if (--port->count < 0) {
37731+ if (atomic_dec_return(&port->count) < 0) {
37732 printk(KERN_ERR "isdn_tty_close: bad port count for ttyi%d: %d\n",
37733- info->line, port->count);
37734- port->count = 0;
37735+ info->line, atomic_read(&port->count));
37736+ atomic_set(&port->count, 0);
37737 }
37738- if (port->count) {
37739+ if (atomic_read(&port->count)) {
37740 #ifdef ISDN_DEBUG_MODEM_OPEN
37741 printk(KERN_DEBUG "isdn_tty_close after info->count != 0\n");
37742 #endif
37743@@ -1630,7 +1630,7 @@ isdn_tty_hangup(struct tty_struct *tty)
37744 if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_hangup"))
37745 return;
37746 isdn_tty_shutdown(info);
37747- port->count = 0;
37748+ atomic_set(&port->count, 0);
37749 port->flags &= ~ASYNC_NORMAL_ACTIVE;
37750 port->tty = NULL;
37751 wake_up_interruptible(&port->open_wait);
37752@@ -1975,7 +1975,7 @@ isdn_tty_find_icall(int di, int ch, setup_parm *setup)
37753 for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
37754 modem_info *info = &dev->mdm.info[i];
37755
37756- if (info->port.count == 0)
37757+ if (atomic_read(&info->port.count) == 0)
37758 continue;
37759 if ((info->emu.mdmreg[REG_SI1] & si2bit[si1]) && /* SI1 is matching */
37760 (info->emu.mdmreg[REG_SI2] == si2)) { /* SI2 is matching */
37761diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
37762index e74df7c..03a03ba 100644
37763--- a/drivers/isdn/icn/icn.c
37764+++ b/drivers/isdn/icn/icn.c
37765@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char *buf, int len, int user, icn_card *card)
37766 if (count > len)
37767 count = len;
37768 if (user) {
37769- if (copy_from_user(msg, buf, count))
37770+ if (count > sizeof msg || copy_from_user(msg, buf, count))
37771 return -EFAULT;
37772 } else
37773 memcpy(msg, buf, count);
37774diff --git a/drivers/leds/leds-clevo-mail.c b/drivers/leds/leds-clevo-mail.c
37775index 6a8405d..0bd1c7e 100644
37776--- a/drivers/leds/leds-clevo-mail.c
37777+++ b/drivers/leds/leds-clevo-mail.c
37778@@ -40,7 +40,7 @@ static int __init clevo_mail_led_dmi_callback(const struct dmi_system_id *id)
37779 * detected as working, but in reality it is not) as low as
37780 * possible.
37781 */
37782-static struct dmi_system_id __initdata clevo_mail_led_dmi_table[] = {
37783+static const struct dmi_system_id __initconst clevo_mail_led_dmi_table[] = {
37784 {
37785 .callback = clevo_mail_led_dmi_callback,
37786 .ident = "Clevo D410J",
37787diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c
37788index ec9b287..65c9bf4 100644
37789--- a/drivers/leds/leds-ss4200.c
37790+++ b/drivers/leds/leds-ss4200.c
37791@@ -92,7 +92,7 @@ MODULE_PARM_DESC(nodetect, "Skip DMI-based hardware detection");
37792 * detected as working, but in reality it is not) as low as
37793 * possible.
37794 */
37795-static struct dmi_system_id __initdata nas_led_whitelist[] = {
37796+static const struct dmi_system_id __initconst nas_led_whitelist[] = {
37797 {
37798 .callback = ss4200_led_dmi_callback,
37799 .ident = "Intel SS4200-E",
37800diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
37801index a5ebc00..982886f 100644
37802--- a/drivers/lguest/core.c
37803+++ b/drivers/lguest/core.c
37804@@ -92,9 +92,17 @@ static __init int map_switcher(void)
37805 * it's worked so far. The end address needs +1 because __get_vm_area
37806 * allocates an extra guard page, so we need space for that.
37807 */
37808+
37809+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
37810+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
37811+ VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
37812+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
37813+#else
37814 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
37815 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
37816 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
37817+#endif
37818+
37819 if (!switcher_vma) {
37820 err = -ENOMEM;
37821 printk("lguest: could not map switcher pages high\n");
37822@@ -119,7 +127,7 @@ static __init int map_switcher(void)
37823 * Now the Switcher is mapped at the right address, we can't fail!
37824 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
37825 */
37826- memcpy(switcher_vma->addr, start_switcher_text,
37827+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
37828 end_switcher_text - start_switcher_text);
37829
37830 printk(KERN_INFO "lguest: mapped switcher at %p\n",
37831diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
37832index 3b62be16..e33134a 100644
37833--- a/drivers/lguest/page_tables.c
37834+++ b/drivers/lguest/page_tables.c
37835@@ -532,7 +532,7 @@ void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
37836 /*:*/
37837
37838 #ifdef CONFIG_X86_PAE
37839-static void release_pmd(pmd_t *spmd)
37840+static void __intentional_overflow(-1) release_pmd(pmd_t *spmd)
37841 {
37842 /* If the entry's not present, there's nothing to release. */
37843 if (pmd_flags(*spmd) & _PAGE_PRESENT) {
37844diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
37845index 4af12e1..0e89afe 100644
37846--- a/drivers/lguest/x86/core.c
37847+++ b/drivers/lguest/x86/core.c
37848@@ -59,7 +59,7 @@ static struct {
37849 /* Offset from where switcher.S was compiled to where we've copied it */
37850 static unsigned long switcher_offset(void)
37851 {
37852- return SWITCHER_ADDR - (unsigned long)start_switcher_text;
37853+ return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
37854 }
37855
37856 /* This cpu's struct lguest_pages. */
37857@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
37858 * These copies are pretty cheap, so we do them unconditionally: */
37859 /* Save the current Host top-level page directory.
37860 */
37861+
37862+#ifdef CONFIG_PAX_PER_CPU_PGD
37863+ pages->state.host_cr3 = read_cr3();
37864+#else
37865 pages->state.host_cr3 = __pa(current->mm->pgd);
37866+#endif
37867+
37868 /*
37869 * Set up the Guest's page tables to see this CPU's pages (and no
37870 * other CPU's pages).
37871@@ -476,7 +482,7 @@ void __init lguest_arch_host_init(void)
37872 * compiled-in switcher code and the high-mapped copy we just made.
37873 */
37874 for (i = 0; i < IDT_ENTRIES; i++)
37875- default_idt_entries[i] += switcher_offset();
37876+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
37877
37878 /*
37879 * Set up the Switcher's per-cpu areas.
37880@@ -559,7 +565,7 @@ void __init lguest_arch_host_init(void)
37881 * it will be undisturbed when we switch. To change %cs and jump we
37882 * need this structure to feed to Intel's "lcall" instruction.
37883 */
37884- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
37885+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
37886 lguest_entry.segment = LGUEST_CS;
37887
37888 /*
37889diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
37890index 40634b0..4f5855e 100644
37891--- a/drivers/lguest/x86/switcher_32.S
37892+++ b/drivers/lguest/x86/switcher_32.S
37893@@ -87,6 +87,7 @@
37894 #include <asm/page.h>
37895 #include <asm/segment.h>
37896 #include <asm/lguest.h>
37897+#include <asm/processor-flags.h>
37898
37899 // We mark the start of the code to copy
37900 // It's placed in .text tho it's never run here
37901@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
37902 // Changes type when we load it: damn Intel!
37903 // For after we switch over our page tables
37904 // That entry will be read-only: we'd crash.
37905+
37906+#ifdef CONFIG_PAX_KERNEXEC
37907+ mov %cr0, %edx
37908+ xor $X86_CR0_WP, %edx
37909+ mov %edx, %cr0
37910+#endif
37911+
37912 movl $(GDT_ENTRY_TSS*8), %edx
37913 ltr %dx
37914
37915@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
37916 // Let's clear it again for our return.
37917 // The GDT descriptor of the Host
37918 // Points to the table after two "size" bytes
37919- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
37920+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
37921 // Clear "used" from type field (byte 5, bit 2)
37922- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
37923+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
37924+
37925+#ifdef CONFIG_PAX_KERNEXEC
37926+ mov %cr0, %eax
37927+ xor $X86_CR0_WP, %eax
37928+ mov %eax, %cr0
37929+#endif
37930
37931 // Once our page table's switched, the Guest is live!
37932 // The Host fades as we run this final step.
37933@@ -295,13 +309,12 @@ deliver_to_host:
37934 // I consulted gcc, and it gave
37935 // These instructions, which I gladly credit:
37936 leal (%edx,%ebx,8), %eax
37937- movzwl (%eax),%edx
37938- movl 4(%eax), %eax
37939- xorw %ax, %ax
37940- orl %eax, %edx
37941+ movl 4(%eax), %edx
37942+ movw (%eax), %dx
37943 // Now the address of the handler's in %edx
37944 // We call it now: its "iret" drops us home.
37945- jmp *%edx
37946+ ljmp $__KERNEL_CS, $1f
37947+1: jmp *%edx
37948
37949 // Every interrupt can come to us here
37950 // But we must truly tell each apart.
37951diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
37952index 7155945..4bcc562 100644
37953--- a/drivers/md/bitmap.c
37954+++ b/drivers/md/bitmap.c
37955@@ -1779,7 +1779,7 @@ void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
37956 chunk_kb ? "KB" : "B");
37957 if (bitmap->storage.file) {
37958 seq_printf(seq, ", file: ");
37959- seq_path(seq, &bitmap->storage.file->f_path, " \t\n");
37960+ seq_path(seq, &bitmap->storage.file->f_path, " \t\n\\");
37961 }
37962
37963 seq_printf(seq, "\n");
37964diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
37965index eee353d..74504c4 100644
37966--- a/drivers/md/dm-ioctl.c
37967+++ b/drivers/md/dm-ioctl.c
37968@@ -1632,7 +1632,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
37969 cmd == DM_LIST_VERSIONS_CMD)
37970 return 0;
37971
37972- if ((cmd == DM_DEV_CREATE_CMD)) {
37973+ if (cmd == DM_DEV_CREATE_CMD) {
37974 if (!*param->name) {
37975 DMWARN("name not supplied when creating device");
37976 return -EINVAL;
37977diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
37978index 7f24190..0e18099 100644
37979--- a/drivers/md/dm-raid1.c
37980+++ b/drivers/md/dm-raid1.c
37981@@ -40,7 +40,7 @@ enum dm_raid1_error {
37982
37983 struct mirror {
37984 struct mirror_set *ms;
37985- atomic_t error_count;
37986+ atomic_unchecked_t error_count;
37987 unsigned long error_type;
37988 struct dm_dev *dev;
37989 sector_t offset;
37990@@ -183,7 +183,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
37991 struct mirror *m;
37992
37993 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
37994- if (!atomic_read(&m->error_count))
37995+ if (!atomic_read_unchecked(&m->error_count))
37996 return m;
37997
37998 return NULL;
37999@@ -215,7 +215,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
38000 * simple way to tell if a device has encountered
38001 * errors.
38002 */
38003- atomic_inc(&m->error_count);
38004+ atomic_inc_unchecked(&m->error_count);
38005
38006 if (test_and_set_bit(error_type, &m->error_type))
38007 return;
38008@@ -406,7 +406,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
38009 struct mirror *m = get_default_mirror(ms);
38010
38011 do {
38012- if (likely(!atomic_read(&m->error_count)))
38013+ if (likely(!atomic_read_unchecked(&m->error_count)))
38014 return m;
38015
38016 if (m-- == ms->mirror)
38017@@ -420,7 +420,7 @@ static int default_ok(struct mirror *m)
38018 {
38019 struct mirror *default_mirror = get_default_mirror(m->ms);
38020
38021- return !atomic_read(&default_mirror->error_count);
38022+ return !atomic_read_unchecked(&default_mirror->error_count);
38023 }
38024
38025 static int mirror_available(struct mirror_set *ms, struct bio *bio)
38026@@ -557,7 +557,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
38027 */
38028 if (likely(region_in_sync(ms, region, 1)))
38029 m = choose_mirror(ms, bio->bi_sector);
38030- else if (m && atomic_read(&m->error_count))
38031+ else if (m && atomic_read_unchecked(&m->error_count))
38032 m = NULL;
38033
38034 if (likely(m))
38035@@ -924,7 +924,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
38036 }
38037
38038 ms->mirror[mirror].ms = ms;
38039- atomic_set(&(ms->mirror[mirror].error_count), 0);
38040+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
38041 ms->mirror[mirror].error_type = 0;
38042 ms->mirror[mirror].offset = offset;
38043
38044@@ -1337,7 +1337,7 @@ static void mirror_resume(struct dm_target *ti)
38045 */
38046 static char device_status_char(struct mirror *m)
38047 {
38048- if (!atomic_read(&(m->error_count)))
38049+ if (!atomic_read_unchecked(&(m->error_count)))
38050 return 'A';
38051
38052 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
38053diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
38054index aaecefa..23b3026 100644
38055--- a/drivers/md/dm-stripe.c
38056+++ b/drivers/md/dm-stripe.c
38057@@ -20,7 +20,7 @@ struct stripe {
38058 struct dm_dev *dev;
38059 sector_t physical_start;
38060
38061- atomic_t error_count;
38062+ atomic_unchecked_t error_count;
38063 };
38064
38065 struct stripe_c {
38066@@ -184,7 +184,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
38067 kfree(sc);
38068 return r;
38069 }
38070- atomic_set(&(sc->stripe[i].error_count), 0);
38071+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
38072 }
38073
38074 ti->private = sc;
38075@@ -325,7 +325,7 @@ static void stripe_status(struct dm_target *ti, status_type_t type,
38076 DMEMIT("%d ", sc->stripes);
38077 for (i = 0; i < sc->stripes; i++) {
38078 DMEMIT("%s ", sc->stripe[i].dev->name);
38079- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
38080+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
38081 'D' : 'A';
38082 }
38083 buffer[i] = '\0';
38084@@ -370,8 +370,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error)
38085 */
38086 for (i = 0; i < sc->stripes; i++)
38087 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
38088- atomic_inc(&(sc->stripe[i].error_count));
38089- if (atomic_read(&(sc->stripe[i].error_count)) <
38090+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
38091+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
38092 DM_IO_ERROR_THRESHOLD)
38093 schedule_work(&sc->trigger_event);
38094 }
38095diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
38096index daf25d0..d74f49f 100644
38097--- a/drivers/md/dm-table.c
38098+++ b/drivers/md/dm-table.c
38099@@ -390,7 +390,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
38100 if (!dev_size)
38101 return 0;
38102
38103- if ((start >= dev_size) || (start + len > dev_size)) {
38104+ if ((start >= dev_size) || (len > dev_size - start)) {
38105 DMWARN("%s: %s too small for target: "
38106 "start=%llu, len=%llu, dev_size=%llu",
38107 dm_device_name(ti->table->md), bdevname(bdev, b),
38108diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
38109index 4d6e853..a234157 100644
38110--- a/drivers/md/dm-thin-metadata.c
38111+++ b/drivers/md/dm-thin-metadata.c
38112@@ -397,7 +397,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
38113 {
38114 pmd->info.tm = pmd->tm;
38115 pmd->info.levels = 2;
38116- pmd->info.value_type.context = pmd->data_sm;
38117+ pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
38118 pmd->info.value_type.size = sizeof(__le64);
38119 pmd->info.value_type.inc = data_block_inc;
38120 pmd->info.value_type.dec = data_block_dec;
38121@@ -416,7 +416,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
38122
38123 pmd->bl_info.tm = pmd->tm;
38124 pmd->bl_info.levels = 1;
38125- pmd->bl_info.value_type.context = pmd->data_sm;
38126+ pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
38127 pmd->bl_info.value_type.size = sizeof(__le64);
38128 pmd->bl_info.value_type.inc = data_block_inc;
38129 pmd->bl_info.value_type.dec = data_block_dec;
38130diff --git a/drivers/md/dm.c b/drivers/md/dm.c
38131index 0d8f086..f5a91d5 100644
38132--- a/drivers/md/dm.c
38133+++ b/drivers/md/dm.c
38134@@ -170,9 +170,9 @@ struct mapped_device {
38135 /*
38136 * Event handling.
38137 */
38138- atomic_t event_nr;
38139+ atomic_unchecked_t event_nr;
38140 wait_queue_head_t eventq;
38141- atomic_t uevent_seq;
38142+ atomic_unchecked_t uevent_seq;
38143 struct list_head uevent_list;
38144 spinlock_t uevent_lock; /* Protect access to uevent_list */
38145
38146@@ -1872,8 +1872,8 @@ static struct mapped_device *alloc_dev(int minor)
38147 rwlock_init(&md->map_lock);
38148 atomic_set(&md->holders, 1);
38149 atomic_set(&md->open_count, 0);
38150- atomic_set(&md->event_nr, 0);
38151- atomic_set(&md->uevent_seq, 0);
38152+ atomic_set_unchecked(&md->event_nr, 0);
38153+ atomic_set_unchecked(&md->uevent_seq, 0);
38154 INIT_LIST_HEAD(&md->uevent_list);
38155 spin_lock_init(&md->uevent_lock);
38156
38157@@ -2026,7 +2026,7 @@ static void event_callback(void *context)
38158
38159 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
38160
38161- atomic_inc(&md->event_nr);
38162+ atomic_inc_unchecked(&md->event_nr);
38163 wake_up(&md->eventq);
38164 }
38165
38166@@ -2683,18 +2683,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
38167
38168 uint32_t dm_next_uevent_seq(struct mapped_device *md)
38169 {
38170- return atomic_add_return(1, &md->uevent_seq);
38171+ return atomic_add_return_unchecked(1, &md->uevent_seq);
38172 }
38173
38174 uint32_t dm_get_event_nr(struct mapped_device *md)
38175 {
38176- return atomic_read(&md->event_nr);
38177+ return atomic_read_unchecked(&md->event_nr);
38178 }
38179
38180 int dm_wait_event(struct mapped_device *md, int event_nr)
38181 {
38182 return wait_event_interruptible(md->eventq,
38183- (event_nr != atomic_read(&md->event_nr)));
38184+ (event_nr != atomic_read_unchecked(&md->event_nr)));
38185 }
38186
38187 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
38188diff --git a/drivers/md/md.c b/drivers/md/md.c
38189index f363135..9b38815 100644
38190--- a/drivers/md/md.c
38191+++ b/drivers/md/md.c
38192@@ -240,10 +240,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio);
38193 * start build, activate spare
38194 */
38195 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
38196-static atomic_t md_event_count;
38197+static atomic_unchecked_t md_event_count;
38198 void md_new_event(struct mddev *mddev)
38199 {
38200- atomic_inc(&md_event_count);
38201+ atomic_inc_unchecked(&md_event_count);
38202 wake_up(&md_event_waiters);
38203 }
38204 EXPORT_SYMBOL_GPL(md_new_event);
38205@@ -253,7 +253,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
38206 */
38207 static void md_new_event_inintr(struct mddev *mddev)
38208 {
38209- atomic_inc(&md_event_count);
38210+ atomic_inc_unchecked(&md_event_count);
38211 wake_up(&md_event_waiters);
38212 }
38213
38214@@ -1507,7 +1507,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
38215 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
38216 (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
38217 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
38218- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
38219+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
38220
38221 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
38222 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
38223@@ -1751,7 +1751,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
38224 else
38225 sb->resync_offset = cpu_to_le64(0);
38226
38227- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
38228+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
38229
38230 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
38231 sb->size = cpu_to_le64(mddev->dev_sectors);
38232@@ -2751,7 +2751,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
38233 static ssize_t
38234 errors_show(struct md_rdev *rdev, char *page)
38235 {
38236- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
38237+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
38238 }
38239
38240 static ssize_t
38241@@ -2760,7 +2760,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
38242 char *e;
38243 unsigned long n = simple_strtoul(buf, &e, 10);
38244 if (*buf && (*e == 0 || *e == '\n')) {
38245- atomic_set(&rdev->corrected_errors, n);
38246+ atomic_set_unchecked(&rdev->corrected_errors, n);
38247 return len;
38248 }
38249 return -EINVAL;
38250@@ -3210,8 +3210,8 @@ int md_rdev_init(struct md_rdev *rdev)
38251 rdev->sb_loaded = 0;
38252 rdev->bb_page = NULL;
38253 atomic_set(&rdev->nr_pending, 0);
38254- atomic_set(&rdev->read_errors, 0);
38255- atomic_set(&rdev->corrected_errors, 0);
38256+ atomic_set_unchecked(&rdev->read_errors, 0);
38257+ atomic_set_unchecked(&rdev->corrected_errors, 0);
38258
38259 INIT_LIST_HEAD(&rdev->same_set);
38260 init_waitqueue_head(&rdev->blocked_wait);
38261@@ -6987,7 +6987,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
38262
38263 spin_unlock(&pers_lock);
38264 seq_printf(seq, "\n");
38265- seq->poll_event = atomic_read(&md_event_count);
38266+ seq->poll_event = atomic_read_unchecked(&md_event_count);
38267 return 0;
38268 }
38269 if (v == (void*)2) {
38270@@ -7090,7 +7090,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
38271 return error;
38272
38273 seq = file->private_data;
38274- seq->poll_event = atomic_read(&md_event_count);
38275+ seq->poll_event = atomic_read_unchecked(&md_event_count);
38276 return error;
38277 }
38278
38279@@ -7104,7 +7104,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
38280 /* always allow read */
38281 mask = POLLIN | POLLRDNORM;
38282
38283- if (seq->poll_event != atomic_read(&md_event_count))
38284+ if (seq->poll_event != atomic_read_unchecked(&md_event_count))
38285 mask |= POLLERR | POLLPRI;
38286 return mask;
38287 }
38288@@ -7148,7 +7148,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
38289 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
38290 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
38291 (int)part_stat_read(&disk->part0, sectors[1]) -
38292- atomic_read(&disk->sync_io);
38293+ atomic_read_unchecked(&disk->sync_io);
38294 /* sync IO will cause sync_io to increase before the disk_stats
38295 * as sync_io is counted when a request starts, and
38296 * disk_stats is counted when it completes.
38297diff --git a/drivers/md/md.h b/drivers/md/md.h
38298index eca59c3..7c42285 100644
38299--- a/drivers/md/md.h
38300+++ b/drivers/md/md.h
38301@@ -94,13 +94,13 @@ struct md_rdev {
38302 * only maintained for arrays that
38303 * support hot removal
38304 */
38305- atomic_t read_errors; /* number of consecutive read errors that
38306+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
38307 * we have tried to ignore.
38308 */
38309 struct timespec last_read_error; /* monotonic time since our
38310 * last read error
38311 */
38312- atomic_t corrected_errors; /* number of corrected read errors,
38313+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
38314 * for reporting to userspace and storing
38315 * in superblock.
38316 */
38317@@ -434,7 +434,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
38318
38319 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
38320 {
38321- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
38322+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
38323 }
38324
38325 struct md_personality
38326diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
38327index 1cbfc6b..56e1dbb 100644
38328--- a/drivers/md/persistent-data/dm-space-map.h
38329+++ b/drivers/md/persistent-data/dm-space-map.h
38330@@ -60,6 +60,7 @@ struct dm_space_map {
38331 int (*root_size)(struct dm_space_map *sm, size_t *result);
38332 int (*copy_root)(struct dm_space_map *sm, void *copy_to_here_le, size_t len);
38333 };
38334+typedef struct dm_space_map __no_const dm_space_map_no_const;
38335
38336 /*----------------------------------------------------------------*/
38337
38338diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
38339index 75b1f89..00ba344 100644
38340--- a/drivers/md/raid1.c
38341+++ b/drivers/md/raid1.c
38342@@ -1819,7 +1819,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
38343 if (r1_sync_page_io(rdev, sect, s,
38344 bio->bi_io_vec[idx].bv_page,
38345 READ) != 0)
38346- atomic_add(s, &rdev->corrected_errors);
38347+ atomic_add_unchecked(s, &rdev->corrected_errors);
38348 }
38349 sectors -= s;
38350 sect += s;
38351@@ -2041,7 +2041,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
38352 test_bit(In_sync, &rdev->flags)) {
38353 if (r1_sync_page_io(rdev, sect, s,
38354 conf->tmppage, READ)) {
38355- atomic_add(s, &rdev->corrected_errors);
38356+ atomic_add_unchecked(s, &rdev->corrected_errors);
38357 printk(KERN_INFO
38358 "md/raid1:%s: read error corrected "
38359 "(%d sectors at %llu on %s)\n",
38360diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
38361index 8d925dc..11d674f 100644
38362--- a/drivers/md/raid10.c
38363+++ b/drivers/md/raid10.c
38364@@ -1878,7 +1878,7 @@ static void end_sync_read(struct bio *bio, int error)
38365 /* The write handler will notice the lack of
38366 * R10BIO_Uptodate and record any errors etc
38367 */
38368- atomic_add(r10_bio->sectors,
38369+ atomic_add_unchecked(r10_bio->sectors,
38370 &conf->mirrors[d].rdev->corrected_errors);
38371
38372 /* for reconstruct, we always reschedule after a read.
38373@@ -2227,7 +2227,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
38374 {
38375 struct timespec cur_time_mon;
38376 unsigned long hours_since_last;
38377- unsigned int read_errors = atomic_read(&rdev->read_errors);
38378+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
38379
38380 ktime_get_ts(&cur_time_mon);
38381
38382@@ -2249,9 +2249,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
38383 * overflowing the shift of read_errors by hours_since_last.
38384 */
38385 if (hours_since_last >= 8 * sizeof(read_errors))
38386- atomic_set(&rdev->read_errors, 0);
38387+ atomic_set_unchecked(&rdev->read_errors, 0);
38388 else
38389- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
38390+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
38391 }
38392
38393 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
38394@@ -2305,8 +2305,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
38395 return;
38396
38397 check_decay_read_errors(mddev, rdev);
38398- atomic_inc(&rdev->read_errors);
38399- if (atomic_read(&rdev->read_errors) > max_read_errors) {
38400+ atomic_inc_unchecked(&rdev->read_errors);
38401+ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
38402 char b[BDEVNAME_SIZE];
38403 bdevname(rdev->bdev, b);
38404
38405@@ -2314,7 +2314,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
38406 "md/raid10:%s: %s: Raid device exceeded "
38407 "read_error threshold [cur %d:max %d]\n",
38408 mdname(mddev), b,
38409- atomic_read(&rdev->read_errors), max_read_errors);
38410+ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
38411 printk(KERN_NOTICE
38412 "md/raid10:%s: %s: Failing raid device\n",
38413 mdname(mddev), b);
38414@@ -2469,7 +2469,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
38415 sect +
38416 choose_data_offset(r10_bio, rdev)),
38417 bdevname(rdev->bdev, b));
38418- atomic_add(s, &rdev->corrected_errors);
38419+ atomic_add_unchecked(s, &rdev->corrected_errors);
38420 }
38421
38422 rdev_dec_pending(rdev, mddev);
38423diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
38424index 94ce78e..df99e24 100644
38425--- a/drivers/md/raid5.c
38426+++ b/drivers/md/raid5.c
38427@@ -1800,21 +1800,21 @@ static void raid5_end_read_request(struct bio * bi, int error)
38428 mdname(conf->mddev), STRIPE_SECTORS,
38429 (unsigned long long)s,
38430 bdevname(rdev->bdev, b));
38431- atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
38432+ atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
38433 clear_bit(R5_ReadError, &sh->dev[i].flags);
38434 clear_bit(R5_ReWrite, &sh->dev[i].flags);
38435 } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
38436 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
38437
38438- if (atomic_read(&rdev->read_errors))
38439- atomic_set(&rdev->read_errors, 0);
38440+ if (atomic_read_unchecked(&rdev->read_errors))
38441+ atomic_set_unchecked(&rdev->read_errors, 0);
38442 } else {
38443 const char *bdn = bdevname(rdev->bdev, b);
38444 int retry = 0;
38445 int set_bad = 0;
38446
38447 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
38448- atomic_inc(&rdev->read_errors);
38449+ atomic_inc_unchecked(&rdev->read_errors);
38450 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
38451 printk_ratelimited(
38452 KERN_WARNING
38453@@ -1842,7 +1842,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
38454 mdname(conf->mddev),
38455 (unsigned long long)s,
38456 bdn);
38457- } else if (atomic_read(&rdev->read_errors)
38458+ } else if (atomic_read_unchecked(&rdev->read_errors)
38459 > conf->max_nr_stripes)
38460 printk(KERN_WARNING
38461 "md/raid:%s: Too many read errors, failing device %s.\n",
38462diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
38463index d33101a..6b13069 100644
38464--- a/drivers/media/dvb-core/dvbdev.c
38465+++ b/drivers/media/dvb-core/dvbdev.c
38466@@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
38467 const struct dvb_device *template, void *priv, int type)
38468 {
38469 struct dvb_device *dvbdev;
38470- struct file_operations *dvbdevfops;
38471+ file_operations_no_const *dvbdevfops;
38472 struct device *clsdev;
38473 int minor;
38474 int id;
38475diff --git a/drivers/media/dvb-frontends/dib3000.h b/drivers/media/dvb-frontends/dib3000.h
38476index 404f63a..4796533 100644
38477--- a/drivers/media/dvb-frontends/dib3000.h
38478+++ b/drivers/media/dvb-frontends/dib3000.h
38479@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
38480 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
38481 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
38482 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
38483-};
38484+} __no_const;
38485
38486 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
38487 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
38488diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
38489index bc78354..42c9459 100644
38490--- a/drivers/media/pci/cx88/cx88-video.c
38491+++ b/drivers/media/pci/cx88/cx88-video.c
38492@@ -50,9 +50,9 @@ MODULE_VERSION(CX88_VERSION);
38493
38494 /* ------------------------------------------------------------------ */
38495
38496-static unsigned int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
38497-static unsigned int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
38498-static unsigned int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
38499+static int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
38500+static int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
38501+static int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
38502
38503 module_param_array(video_nr, int, NULL, 0444);
38504 module_param_array(vbi_nr, int, NULL, 0444);
38505diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
38506index 8e9a668..78d6310 100644
38507--- a/drivers/media/platform/omap/omap_vout.c
38508+++ b/drivers/media/platform/omap/omap_vout.c
38509@@ -63,7 +63,6 @@ enum omap_vout_channels {
38510 OMAP_VIDEO2,
38511 };
38512
38513-static struct videobuf_queue_ops video_vbq_ops;
38514 /* Variables configurable through module params*/
38515 static u32 video1_numbuffers = 3;
38516 static u32 video2_numbuffers = 3;
38517@@ -1012,6 +1011,12 @@ static int omap_vout_open(struct file *file)
38518 {
38519 struct videobuf_queue *q;
38520 struct omap_vout_device *vout = NULL;
38521+ static struct videobuf_queue_ops video_vbq_ops = {
38522+ .buf_setup = omap_vout_buffer_setup,
38523+ .buf_prepare = omap_vout_buffer_prepare,
38524+ .buf_release = omap_vout_buffer_release,
38525+ .buf_queue = omap_vout_buffer_queue,
38526+ };
38527
38528 vout = video_drvdata(file);
38529 v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
38530@@ -1029,10 +1034,6 @@ static int omap_vout_open(struct file *file)
38531 vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
38532
38533 q = &vout->vbq;
38534- video_vbq_ops.buf_setup = omap_vout_buffer_setup;
38535- video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
38536- video_vbq_ops.buf_release = omap_vout_buffer_release;
38537- video_vbq_ops.buf_queue = omap_vout_buffer_queue;
38538 spin_lock_init(&vout->vbq_lock);
38539
38540 videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
38541diff --git a/drivers/media/platform/s5p-tv/mixer.h b/drivers/media/platform/s5p-tv/mixer.h
38542index b671e20..34088b7 100644
38543--- a/drivers/media/platform/s5p-tv/mixer.h
38544+++ b/drivers/media/platform/s5p-tv/mixer.h
38545@@ -155,7 +155,7 @@ struct mxr_layer {
38546 /** layer index (unique identifier) */
38547 int idx;
38548 /** callbacks for layer methods */
38549- struct mxr_layer_ops ops;
38550+ struct mxr_layer_ops *ops;
38551 /** format array */
38552 const struct mxr_format **fmt_array;
38553 /** size of format array */
38554diff --git a/drivers/media/platform/s5p-tv/mixer_grp_layer.c b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
38555index b93a21f..2535195 100644
38556--- a/drivers/media/platform/s5p-tv/mixer_grp_layer.c
38557+++ b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
38558@@ -235,7 +235,7 @@ struct mxr_layer *mxr_graph_layer_create(struct mxr_device *mdev, int idx)
38559 {
38560 struct mxr_layer *layer;
38561 int ret;
38562- struct mxr_layer_ops ops = {
38563+ static struct mxr_layer_ops ops = {
38564 .release = mxr_graph_layer_release,
38565 .buffer_set = mxr_graph_buffer_set,
38566 .stream_set = mxr_graph_stream_set,
38567diff --git a/drivers/media/platform/s5p-tv/mixer_reg.c b/drivers/media/platform/s5p-tv/mixer_reg.c
38568index 3b1670a..595c939 100644
38569--- a/drivers/media/platform/s5p-tv/mixer_reg.c
38570+++ b/drivers/media/platform/s5p-tv/mixer_reg.c
38571@@ -276,7 +276,7 @@ static void mxr_irq_layer_handle(struct mxr_layer *layer)
38572 layer->update_buf = next;
38573 }
38574
38575- layer->ops.buffer_set(layer, layer->update_buf);
38576+ layer->ops->buffer_set(layer, layer->update_buf);
38577
38578 if (done && done != layer->shadow_buf)
38579 vb2_buffer_done(&done->vb, VB2_BUF_STATE_DONE);
38580diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c
38581index 1f3b743..e839271 100644
38582--- a/drivers/media/platform/s5p-tv/mixer_video.c
38583+++ b/drivers/media/platform/s5p-tv/mixer_video.c
38584@@ -208,7 +208,7 @@ static void mxr_layer_default_geo(struct mxr_layer *layer)
38585 layer->geo.src.height = layer->geo.src.full_height;
38586
38587 mxr_geometry_dump(mdev, &layer->geo);
38588- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
38589+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
38590 mxr_geometry_dump(mdev, &layer->geo);
38591 }
38592
38593@@ -226,7 +226,7 @@ static void mxr_layer_update_output(struct mxr_layer *layer)
38594 layer->geo.dst.full_width = mbus_fmt.width;
38595 layer->geo.dst.full_height = mbus_fmt.height;
38596 layer->geo.dst.field = mbus_fmt.field;
38597- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
38598+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
38599
38600 mxr_geometry_dump(mdev, &layer->geo);
38601 }
38602@@ -332,7 +332,7 @@ static int mxr_s_fmt(struct file *file, void *priv,
38603 /* set source size to highest accepted value */
38604 geo->src.full_width = max(geo->dst.full_width, pix->width);
38605 geo->src.full_height = max(geo->dst.full_height, pix->height);
38606- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
38607+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
38608 mxr_geometry_dump(mdev, &layer->geo);
38609 /* set cropping to total visible screen */
38610 geo->src.width = pix->width;
38611@@ -340,12 +340,12 @@ static int mxr_s_fmt(struct file *file, void *priv,
38612 geo->src.x_offset = 0;
38613 geo->src.y_offset = 0;
38614 /* assure consistency of geometry */
38615- layer->ops.fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
38616+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
38617 mxr_geometry_dump(mdev, &layer->geo);
38618 /* set full size to lowest possible value */
38619 geo->src.full_width = 0;
38620 geo->src.full_height = 0;
38621- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
38622+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
38623 mxr_geometry_dump(mdev, &layer->geo);
38624
38625 /* returning results */
38626@@ -472,7 +472,7 @@ static int mxr_s_selection(struct file *file, void *fh,
38627 target->width = s->r.width;
38628 target->height = s->r.height;
38629
38630- layer->ops.fix_geometry(layer, stage, s->flags);
38631+ layer->ops->fix_geometry(layer, stage, s->flags);
38632
38633 /* retrieve update selection rectangle */
38634 res.left = target->x_offset;
38635@@ -937,13 +937,13 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
38636 mxr_output_get(mdev);
38637
38638 mxr_layer_update_output(layer);
38639- layer->ops.format_set(layer);
38640+ layer->ops->format_set(layer);
38641 /* enabling layer in hardware */
38642 spin_lock_irqsave(&layer->enq_slock, flags);
38643 layer->state = MXR_LAYER_STREAMING;
38644 spin_unlock_irqrestore(&layer->enq_slock, flags);
38645
38646- layer->ops.stream_set(layer, MXR_ENABLE);
38647+ layer->ops->stream_set(layer, MXR_ENABLE);
38648 mxr_streamer_get(mdev);
38649
38650 return 0;
38651@@ -1013,7 +1013,7 @@ static int stop_streaming(struct vb2_queue *vq)
38652 spin_unlock_irqrestore(&layer->enq_slock, flags);
38653
38654 /* disabling layer in hardware */
38655- layer->ops.stream_set(layer, MXR_DISABLE);
38656+ layer->ops->stream_set(layer, MXR_DISABLE);
38657 /* remove one streamer */
38658 mxr_streamer_put(mdev);
38659 /* allow changes in output configuration */
38660@@ -1052,8 +1052,8 @@ void mxr_base_layer_unregister(struct mxr_layer *layer)
38661
38662 void mxr_layer_release(struct mxr_layer *layer)
38663 {
38664- if (layer->ops.release)
38665- layer->ops.release(layer);
38666+ if (layer->ops->release)
38667+ layer->ops->release(layer);
38668 }
38669
38670 void mxr_base_layer_release(struct mxr_layer *layer)
38671@@ -1079,7 +1079,7 @@ struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
38672
38673 layer->mdev = mdev;
38674 layer->idx = idx;
38675- layer->ops = *ops;
38676+ layer->ops = ops;
38677
38678 spin_lock_init(&layer->enq_slock);
38679 INIT_LIST_HEAD(&layer->enq_list);
38680diff --git a/drivers/media/platform/s5p-tv/mixer_vp_layer.c b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
38681index 3d13a63..da31bf1 100644
38682--- a/drivers/media/platform/s5p-tv/mixer_vp_layer.c
38683+++ b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
38684@@ -206,7 +206,7 @@ struct mxr_layer *mxr_vp_layer_create(struct mxr_device *mdev, int idx)
38685 {
38686 struct mxr_layer *layer;
38687 int ret;
38688- struct mxr_layer_ops ops = {
38689+ static struct mxr_layer_ops ops = {
38690 .release = mxr_vp_layer_release,
38691 .buffer_set = mxr_vp_buffer_set,
38692 .stream_set = mxr_vp_stream_set,
38693diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
38694index 643d80a..56bb96b 100644
38695--- a/drivers/media/radio/radio-cadet.c
38696+++ b/drivers/media/radio/radio-cadet.c
38697@@ -302,6 +302,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
38698 unsigned char readbuf[RDS_BUFFER];
38699 int i = 0;
38700
38701+ if (count > RDS_BUFFER)
38702+ return -EFAULT;
38703 mutex_lock(&dev->lock);
38704 if (dev->rdsstat == 0)
38705 cadet_start_rds(dev);
38706@@ -317,7 +319,7 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
38707 while (i < count && dev->rdsin != dev->rdsout)
38708 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
38709
38710- if (i && copy_to_user(data, readbuf, i))
38711+ if (i > sizeof(readbuf) || copy_to_user(data, readbuf, i))
38712 i = -EFAULT;
38713 unlock:
38714 mutex_unlock(&dev->lock);
38715diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
38716index 3940bb0..fb3952a 100644
38717--- a/drivers/media/usb/dvb-usb/cxusb.c
38718+++ b/drivers/media/usb/dvb-usb/cxusb.c
38719@@ -1068,7 +1068,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
38720
38721 struct dib0700_adapter_state {
38722 int (*set_param_save) (struct dvb_frontend *);
38723-};
38724+} __no_const;
38725
38726 static int dib7070_set_param_override(struct dvb_frontend *fe)
38727 {
38728diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
38729index 9382895..ac8093c 100644
38730--- a/drivers/media/usb/dvb-usb/dw2102.c
38731+++ b/drivers/media/usb/dvb-usb/dw2102.c
38732@@ -95,7 +95,7 @@ struct su3000_state {
38733
38734 struct s6x0_state {
38735 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
38736-};
38737+} __no_const;
38738
38739 /* debug */
38740 static int dvb_usb_dw2102_debug;
38741diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
38742index aa6e7c7..4cd8061 100644
38743--- a/drivers/media/v4l2-core/v4l2-ioctl.c
38744+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
38745@@ -1923,7 +1923,8 @@ struct v4l2_ioctl_info {
38746 struct file *file, void *fh, void *p);
38747 } u;
38748 void (*debug)(const void *arg, bool write_only);
38749-};
38750+} __do_const;
38751+typedef struct v4l2_ioctl_info __no_const v4l2_ioctl_info_no_const;
38752
38753 /* This control needs a priority check */
38754 #define INFO_FL_PRIO (1 << 0)
38755@@ -2108,7 +2109,7 @@ static long __video_do_ioctl(struct file *file,
38756 struct video_device *vfd = video_devdata(file);
38757 const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;
38758 bool write_only = false;
38759- struct v4l2_ioctl_info default_info;
38760+ v4l2_ioctl_info_no_const default_info;
38761 const struct v4l2_ioctl_info *info;
38762 void *fh = file->private_data;
38763 struct v4l2_fh *vfh = NULL;
38764diff --git a/drivers/memstick/host/r592.c b/drivers/memstick/host/r592.c
38765index 29b2172..a7c5b31 100644
38766--- a/drivers/memstick/host/r592.c
38767+++ b/drivers/memstick/host/r592.c
38768@@ -454,7 +454,7 @@ static int r592_transfer_fifo_pio(struct r592_device *dev)
38769 /* Executes one TPC (data is read/written from small or large fifo) */
38770 static void r592_execute_tpc(struct r592_device *dev)
38771 {
38772- bool is_write = dev->req->tpc >= MS_TPC_SET_RW_REG_ADRS;
38773+ bool is_write;
38774 int len, error;
38775 u32 status, reg;
38776
38777@@ -463,6 +463,7 @@ static void r592_execute_tpc(struct r592_device *dev)
38778 return;
38779 }
38780
38781+ is_write = dev->req->tpc >= MS_TPC_SET_RW_REG_ADRS;
38782 len = dev->req->long_data ?
38783 dev->req->sg.length : dev->req->data_len;
38784
38785diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
38786index fb69baa..3aeea2e 100644
38787--- a/drivers/message/fusion/mptbase.c
38788+++ b/drivers/message/fusion/mptbase.c
38789@@ -6755,8 +6755,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
38790 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
38791 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
38792
38793+#ifdef CONFIG_GRKERNSEC_HIDESYM
38794+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
38795+#else
38796 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
38797 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
38798+#endif
38799+
38800 /*
38801 * Rounding UP to nearest 4-kB boundary here...
38802 */
38803@@ -6769,7 +6774,11 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
38804 ioc->facts.GlobalCredits);
38805
38806 seq_printf(m, " Frames @ 0x%p (Dma @ 0x%p)\n",
38807+#ifdef CONFIG_GRKERNSEC_HIDESYM
38808+ NULL, NULL);
38809+#else
38810 (void *)ioc->alloc, (void *)(ulong)ioc->alloc_dma);
38811+#endif
38812 sz = (ioc->reply_sz * ioc->reply_depth) + 128;
38813 seq_printf(m, " {CurRepSz=%d} x {CurRepDepth=%d} = %d bytes ^= 0x%x\n",
38814 ioc->reply_sz, ioc->reply_depth, ioc->reply_sz*ioc->reply_depth, sz);
38815diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
38816index fa43c39..daeb158 100644
38817--- a/drivers/message/fusion/mptsas.c
38818+++ b/drivers/message/fusion/mptsas.c
38819@@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
38820 return 0;
38821 }
38822
38823+static inline void
38824+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
38825+{
38826+ if (phy_info->port_details) {
38827+ phy_info->port_details->rphy = rphy;
38828+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
38829+ ioc->name, rphy));
38830+ }
38831+
38832+ if (rphy) {
38833+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
38834+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
38835+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
38836+ ioc->name, rphy, rphy->dev.release));
38837+ }
38838+}
38839+
38840 /* no mutex */
38841 static void
38842 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
38843@@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
38844 return NULL;
38845 }
38846
38847-static inline void
38848-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
38849-{
38850- if (phy_info->port_details) {
38851- phy_info->port_details->rphy = rphy;
38852- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
38853- ioc->name, rphy));
38854- }
38855-
38856- if (rphy) {
38857- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
38858- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
38859- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
38860- ioc->name, rphy, rphy->dev.release));
38861- }
38862-}
38863-
38864 static inline struct sas_port *
38865 mptsas_get_port(struct mptsas_phyinfo *phy_info)
38866 {
38867diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
38868index 164afa7..b6b2e74 100644
38869--- a/drivers/message/fusion/mptscsih.c
38870+++ b/drivers/message/fusion/mptscsih.c
38871@@ -1271,15 +1271,16 @@ mptscsih_info(struct Scsi_Host *SChost)
38872
38873 h = shost_priv(SChost);
38874
38875- if (h) {
38876- if (h->info_kbuf == NULL)
38877- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
38878- return h->info_kbuf;
38879- h->info_kbuf[0] = '\0';
38880+ if (!h)
38881+ return NULL;
38882
38883- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
38884- h->info_kbuf[size-1] = '\0';
38885- }
38886+ if (h->info_kbuf == NULL)
38887+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
38888+ return h->info_kbuf;
38889+ h->info_kbuf[0] = '\0';
38890+
38891+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
38892+ h->info_kbuf[size-1] = '\0';
38893
38894 return h->info_kbuf;
38895 }
38896diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
38897index 8001aa6..b137580 100644
38898--- a/drivers/message/i2o/i2o_proc.c
38899+++ b/drivers/message/i2o/i2o_proc.c
38900@@ -255,12 +255,6 @@ static char *scsi_devices[] = {
38901 "Array Controller Device"
38902 };
38903
38904-static char *chtostr(char *tmp, u8 *chars, int n)
38905-{
38906- tmp[0] = 0;
38907- return strncat(tmp, (char *)chars, n);
38908-}
38909-
38910 static int i2o_report_query_status(struct seq_file *seq, int block_status,
38911 char *group)
38912 {
38913@@ -790,7 +784,6 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
38914 } *result;
38915
38916 i2o_exec_execute_ddm_table ddm_table;
38917- char tmp[28 + 1];
38918
38919 result = kmalloc(sizeof(*result), GFP_KERNEL);
38920 if (!result)
38921@@ -825,8 +818,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
38922
38923 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
38924 seq_printf(seq, "%-#8x", ddm_table.module_id);
38925- seq_printf(seq, "%-29s",
38926- chtostr(tmp, ddm_table.module_name_version, 28));
38927+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
38928 seq_printf(seq, "%9d ", ddm_table.data_size);
38929 seq_printf(seq, "%8d", ddm_table.code_size);
38930
38931@@ -893,7 +885,6 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
38932
38933 i2o_driver_result_table *result;
38934 i2o_driver_store_table *dst;
38935- char tmp[28 + 1];
38936
38937 result = kmalloc(sizeof(i2o_driver_result_table), GFP_KERNEL);
38938 if (result == NULL)
38939@@ -928,9 +919,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
38940
38941 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
38942 seq_printf(seq, "%-#8x", dst->module_id);
38943- seq_printf(seq, "%-29s",
38944- chtostr(tmp, dst->module_name_version, 28));
38945- seq_printf(seq, "%-9s", chtostr(tmp, dst->date, 8));
38946+ seq_printf(seq, "%-.28s", dst->module_name_version);
38947+ seq_printf(seq, "%-.8s", dst->date);
38948 seq_printf(seq, "%8d ", dst->module_size);
38949 seq_printf(seq, "%8d ", dst->mpb_size);
38950 seq_printf(seq, "0x%04x", dst->module_flags);
38951@@ -1250,7 +1240,6 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
38952 // == (allow) 512d bytes (max)
38953 static u16 *work16 = (u16 *) work32;
38954 int token;
38955- char tmp[16 + 1];
38956
38957 token = i2o_parm_field_get(d, 0xF100, -1, &work32, sizeof(work32));
38958
38959@@ -1262,14 +1251,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
38960 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
38961 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
38962 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
38963- seq_printf(seq, "Vendor info : %s\n",
38964- chtostr(tmp, (u8 *) (work32 + 2), 16));
38965- seq_printf(seq, "Product info : %s\n",
38966- chtostr(tmp, (u8 *) (work32 + 6), 16));
38967- seq_printf(seq, "Description : %s\n",
38968- chtostr(tmp, (u8 *) (work32 + 10), 16));
38969- seq_printf(seq, "Product rev. : %s\n",
38970- chtostr(tmp, (u8 *) (work32 + 14), 8));
38971+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
38972+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
38973+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
38974+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
38975
38976 seq_printf(seq, "Serial number : ");
38977 print_serial_number(seq, (u8 *) (work32 + 16),
38978@@ -1306,8 +1291,6 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
38979 u8 pad[256]; // allow up to 256 byte (max) serial number
38980 } result;
38981
38982- char tmp[24 + 1];
38983-
38984 token = i2o_parm_field_get(d, 0xF101, -1, &result, sizeof(result));
38985
38986 if (token < 0) {
38987@@ -1316,10 +1299,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
38988 }
38989
38990 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
38991- seq_printf(seq, "Module name : %s\n",
38992- chtostr(tmp, result.module_name, 24));
38993- seq_printf(seq, "Module revision : %s\n",
38994- chtostr(tmp, result.module_rev, 8));
38995+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
38996+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
38997
38998 seq_printf(seq, "Serial number : ");
38999 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
39000@@ -1343,8 +1324,6 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
39001 u8 instance_number[4];
39002 } result;
39003
39004- char tmp[64 + 1];
39005-
39006 token = i2o_parm_field_get(d, 0xF102, -1, &result, sizeof(result));
39007
39008 if (token < 0) {
39009@@ -1352,14 +1331,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
39010 return 0;
39011 }
39012
39013- seq_printf(seq, "Device name : %s\n",
39014- chtostr(tmp, result.device_name, 64));
39015- seq_printf(seq, "Service name : %s\n",
39016- chtostr(tmp, result.service_name, 64));
39017- seq_printf(seq, "Physical name : %s\n",
39018- chtostr(tmp, result.physical_location, 64));
39019- seq_printf(seq, "Instance number : %s\n",
39020- chtostr(tmp, result.instance_number, 4));
39021+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
39022+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
39023+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
39024+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
39025
39026 return 0;
39027 }
39028diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
39029index a8c08f3..155fe3d 100644
39030--- a/drivers/message/i2o/iop.c
39031+++ b/drivers/message/i2o/iop.c
39032@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
39033
39034 spin_lock_irqsave(&c->context_list_lock, flags);
39035
39036- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
39037- atomic_inc(&c->context_list_counter);
39038+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
39039+ atomic_inc_unchecked(&c->context_list_counter);
39040
39041- entry->context = atomic_read(&c->context_list_counter);
39042+ entry->context = atomic_read_unchecked(&c->context_list_counter);
39043
39044 list_add(&entry->list, &c->context_list);
39045
39046@@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(void)
39047
39048 #if BITS_PER_LONG == 64
39049 spin_lock_init(&c->context_list_lock);
39050- atomic_set(&c->context_list_counter, 0);
39051+ atomic_set_unchecked(&c->context_list_counter, 0);
39052 INIT_LIST_HEAD(&c->context_list);
39053 #endif
39054
39055diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
39056index 45ece11..8efa218 100644
39057--- a/drivers/mfd/janz-cmodio.c
39058+++ b/drivers/mfd/janz-cmodio.c
39059@@ -13,6 +13,7 @@
39060
39061 #include <linux/kernel.h>
39062 #include <linux/module.h>
39063+#include <linux/slab.h>
39064 #include <linux/init.h>
39065 #include <linux/pci.h>
39066 #include <linux/interrupt.h>
39067diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
39068index a5f9888..1c0ed56 100644
39069--- a/drivers/mfd/twl4030-irq.c
39070+++ b/drivers/mfd/twl4030-irq.c
39071@@ -35,6 +35,7 @@
39072 #include <linux/of.h>
39073 #include <linux/irqdomain.h>
39074 #include <linux/i2c/twl.h>
39075+#include <asm/pgtable.h>
39076
39077 #include "twl-core.h"
39078
39079@@ -728,10 +729,12 @@ int twl4030_init_irq(struct device *dev, int irq_num)
39080 * Install an irq handler for each of the SIH modules;
39081 * clone dummy irq_chip since PIH can't *do* anything
39082 */
39083- twl4030_irq_chip = dummy_irq_chip;
39084- twl4030_irq_chip.name = "twl4030";
39085+ pax_open_kernel();
39086+ memcpy((void *)&twl4030_irq_chip, &dummy_irq_chip, sizeof twl4030_irq_chip);
39087+ *(const char **)&twl4030_irq_chip.name = "twl4030";
39088
39089- twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
39090+ *(void **)&twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
39091+ pax_close_kernel();
39092
39093 for (i = irq_base; i < irq_end; i++) {
39094 irq_set_chip_and_handler(i, &twl4030_irq_chip,
39095diff --git a/drivers/mfd/twl6030-irq.c b/drivers/mfd/twl6030-irq.c
39096index 277a8db..0e0b754 100644
39097--- a/drivers/mfd/twl6030-irq.c
39098+++ b/drivers/mfd/twl6030-irq.c
39099@@ -387,10 +387,12 @@ int twl6030_init_irq(struct device *dev, int irq_num)
39100 * install an irq handler for each of the modules;
39101 * clone dummy irq_chip since PIH can't *do* anything
39102 */
39103- twl6030_irq_chip = dummy_irq_chip;
39104- twl6030_irq_chip.name = "twl6030";
39105- twl6030_irq_chip.irq_set_type = NULL;
39106- twl6030_irq_chip.irq_set_wake = twl6030_irq_set_wake;
39107+ pax_open_kernel();
39108+ memcpy((void *)&twl6030_irq_chip, &dummy_irq_chip, sizeof twl6030_irq_chip);
39109+ *(const char **)&twl6030_irq_chip.name = "twl6030";
39110+ *(void **)&twl6030_irq_chip.irq_set_type = NULL;
39111+ *(void **)&twl6030_irq_chip.irq_set_wake = twl6030_irq_set_wake;
39112+ pax_close_kernel();
39113
39114 for (i = irq_base; i < irq_end; i++) {
39115 irq_set_chip_and_handler(i, &twl6030_irq_chip,
39116diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
39117index f428d86..274c368 100644
39118--- a/drivers/misc/c2port/core.c
39119+++ b/drivers/misc/c2port/core.c
39120@@ -924,7 +924,9 @@ struct c2port_device *c2port_device_register(char *name,
39121 mutex_init(&c2dev->mutex);
39122
39123 /* Create binary file */
39124- c2port_bin_attrs.size = ops->blocks_num * ops->block_size;
39125+ pax_open_kernel();
39126+ *(size_t *)&c2port_bin_attrs.size = ops->blocks_num * ops->block_size;
39127+ pax_close_kernel();
39128 ret = device_create_bin_file(c2dev->dev, &c2port_bin_attrs);
39129 if (unlikely(ret))
39130 goto error_device_create_bin_file;
39131diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
39132index 3aa9a96..59cf685 100644
39133--- a/drivers/misc/kgdbts.c
39134+++ b/drivers/misc/kgdbts.c
39135@@ -832,7 +832,7 @@ static void run_plant_and_detach_test(int is_early)
39136 char before[BREAK_INSTR_SIZE];
39137 char after[BREAK_INSTR_SIZE];
39138
39139- probe_kernel_read(before, (char *)kgdbts_break_test,
39140+ probe_kernel_read(before, ktla_ktva((char *)kgdbts_break_test),
39141 BREAK_INSTR_SIZE);
39142 init_simple_test();
39143 ts.tst = plant_and_detach_test;
39144@@ -840,7 +840,7 @@ static void run_plant_and_detach_test(int is_early)
39145 /* Activate test with initial breakpoint */
39146 if (!is_early)
39147 kgdb_breakpoint();
39148- probe_kernel_read(after, (char *)kgdbts_break_test,
39149+ probe_kernel_read(after, ktla_ktva((char *)kgdbts_break_test),
39150 BREAK_INSTR_SIZE);
39151 if (memcmp(before, after, BREAK_INSTR_SIZE)) {
39152 printk(KERN_CRIT "kgdbts: ERROR kgdb corrupted memory\n");
39153diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
39154index 4a87e5c..76bdf5c 100644
39155--- a/drivers/misc/lis3lv02d/lis3lv02d.c
39156+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
39157@@ -498,7 +498,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
39158 * the lid is closed. This leads to interrupts as soon as a little move
39159 * is done.
39160 */
39161- atomic_inc(&lis3->count);
39162+ atomic_inc_unchecked(&lis3->count);
39163
39164 wake_up_interruptible(&lis3->misc_wait);
39165 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
39166@@ -584,7 +584,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
39167 if (lis3->pm_dev)
39168 pm_runtime_get_sync(lis3->pm_dev);
39169
39170- atomic_set(&lis3->count, 0);
39171+ atomic_set_unchecked(&lis3->count, 0);
39172 return 0;
39173 }
39174
39175@@ -617,7 +617,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
39176 add_wait_queue(&lis3->misc_wait, &wait);
39177 while (true) {
39178 set_current_state(TASK_INTERRUPTIBLE);
39179- data = atomic_xchg(&lis3->count, 0);
39180+ data = atomic_xchg_unchecked(&lis3->count, 0);
39181 if (data)
39182 break;
39183
39184@@ -658,7 +658,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
39185 struct lis3lv02d, miscdev);
39186
39187 poll_wait(file, &lis3->misc_wait, wait);
39188- if (atomic_read(&lis3->count))
39189+ if (atomic_read_unchecked(&lis3->count))
39190 return POLLIN | POLLRDNORM;
39191 return 0;
39192 }
39193diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
39194index c439c82..1f20f57 100644
39195--- a/drivers/misc/lis3lv02d/lis3lv02d.h
39196+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
39197@@ -297,7 +297,7 @@ struct lis3lv02d {
39198 struct input_polled_dev *idev; /* input device */
39199 struct platform_device *pdev; /* platform device */
39200 struct regulator_bulk_data regulators[2];
39201- atomic_t count; /* interrupt count after last read */
39202+ atomic_unchecked_t count; /* interrupt count after last read */
39203 union axis_conversion ac; /* hw -> logical axis */
39204 int mapped_btns[3];
39205
39206diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
39207index 2f30bad..c4c13d0 100644
39208--- a/drivers/misc/sgi-gru/gruhandles.c
39209+++ b/drivers/misc/sgi-gru/gruhandles.c
39210@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
39211 unsigned long nsec;
39212
39213 nsec = CLKS2NSEC(clks);
39214- atomic_long_inc(&mcs_op_statistics[op].count);
39215- atomic_long_add(nsec, &mcs_op_statistics[op].total);
39216+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
39217+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
39218 if (mcs_op_statistics[op].max < nsec)
39219 mcs_op_statistics[op].max = nsec;
39220 }
39221diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
39222index 950dbe9..eeef0f8 100644
39223--- a/drivers/misc/sgi-gru/gruprocfs.c
39224+++ b/drivers/misc/sgi-gru/gruprocfs.c
39225@@ -32,9 +32,9 @@
39226
39227 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
39228
39229-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
39230+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
39231 {
39232- unsigned long val = atomic_long_read(v);
39233+ unsigned long val = atomic_long_read_unchecked(v);
39234
39235 seq_printf(s, "%16lu %s\n", val, id);
39236 }
39237@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
39238
39239 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
39240 for (op = 0; op < mcsop_last; op++) {
39241- count = atomic_long_read(&mcs_op_statistics[op].count);
39242- total = atomic_long_read(&mcs_op_statistics[op].total);
39243+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
39244+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
39245 max = mcs_op_statistics[op].max;
39246 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
39247 count ? total / count : 0, max);
39248diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
39249index 5c3ce24..4915ccb 100644
39250--- a/drivers/misc/sgi-gru/grutables.h
39251+++ b/drivers/misc/sgi-gru/grutables.h
39252@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
39253 * GRU statistics.
39254 */
39255 struct gru_stats_s {
39256- atomic_long_t vdata_alloc;
39257- atomic_long_t vdata_free;
39258- atomic_long_t gts_alloc;
39259- atomic_long_t gts_free;
39260- atomic_long_t gms_alloc;
39261- atomic_long_t gms_free;
39262- atomic_long_t gts_double_allocate;
39263- atomic_long_t assign_context;
39264- atomic_long_t assign_context_failed;
39265- atomic_long_t free_context;
39266- atomic_long_t load_user_context;
39267- atomic_long_t load_kernel_context;
39268- atomic_long_t lock_kernel_context;
39269- atomic_long_t unlock_kernel_context;
39270- atomic_long_t steal_user_context;
39271- atomic_long_t steal_kernel_context;
39272- atomic_long_t steal_context_failed;
39273- atomic_long_t nopfn;
39274- atomic_long_t asid_new;
39275- atomic_long_t asid_next;
39276- atomic_long_t asid_wrap;
39277- atomic_long_t asid_reuse;
39278- atomic_long_t intr;
39279- atomic_long_t intr_cbr;
39280- atomic_long_t intr_tfh;
39281- atomic_long_t intr_spurious;
39282- atomic_long_t intr_mm_lock_failed;
39283- atomic_long_t call_os;
39284- atomic_long_t call_os_wait_queue;
39285- atomic_long_t user_flush_tlb;
39286- atomic_long_t user_unload_context;
39287- atomic_long_t user_exception;
39288- atomic_long_t set_context_option;
39289- atomic_long_t check_context_retarget_intr;
39290- atomic_long_t check_context_unload;
39291- atomic_long_t tlb_dropin;
39292- atomic_long_t tlb_preload_page;
39293- atomic_long_t tlb_dropin_fail_no_asid;
39294- atomic_long_t tlb_dropin_fail_upm;
39295- atomic_long_t tlb_dropin_fail_invalid;
39296- atomic_long_t tlb_dropin_fail_range_active;
39297- atomic_long_t tlb_dropin_fail_idle;
39298- atomic_long_t tlb_dropin_fail_fmm;
39299- atomic_long_t tlb_dropin_fail_no_exception;
39300- atomic_long_t tfh_stale_on_fault;
39301- atomic_long_t mmu_invalidate_range;
39302- atomic_long_t mmu_invalidate_page;
39303- atomic_long_t flush_tlb;
39304- atomic_long_t flush_tlb_gru;
39305- atomic_long_t flush_tlb_gru_tgh;
39306- atomic_long_t flush_tlb_gru_zero_asid;
39307+ atomic_long_unchecked_t vdata_alloc;
39308+ atomic_long_unchecked_t vdata_free;
39309+ atomic_long_unchecked_t gts_alloc;
39310+ atomic_long_unchecked_t gts_free;
39311+ atomic_long_unchecked_t gms_alloc;
39312+ atomic_long_unchecked_t gms_free;
39313+ atomic_long_unchecked_t gts_double_allocate;
39314+ atomic_long_unchecked_t assign_context;
39315+ atomic_long_unchecked_t assign_context_failed;
39316+ atomic_long_unchecked_t free_context;
39317+ atomic_long_unchecked_t load_user_context;
39318+ atomic_long_unchecked_t load_kernel_context;
39319+ atomic_long_unchecked_t lock_kernel_context;
39320+ atomic_long_unchecked_t unlock_kernel_context;
39321+ atomic_long_unchecked_t steal_user_context;
39322+ atomic_long_unchecked_t steal_kernel_context;
39323+ atomic_long_unchecked_t steal_context_failed;
39324+ atomic_long_unchecked_t nopfn;
39325+ atomic_long_unchecked_t asid_new;
39326+ atomic_long_unchecked_t asid_next;
39327+ atomic_long_unchecked_t asid_wrap;
39328+ atomic_long_unchecked_t asid_reuse;
39329+ atomic_long_unchecked_t intr;
39330+ atomic_long_unchecked_t intr_cbr;
39331+ atomic_long_unchecked_t intr_tfh;
39332+ atomic_long_unchecked_t intr_spurious;
39333+ atomic_long_unchecked_t intr_mm_lock_failed;
39334+ atomic_long_unchecked_t call_os;
39335+ atomic_long_unchecked_t call_os_wait_queue;
39336+ atomic_long_unchecked_t user_flush_tlb;
39337+ atomic_long_unchecked_t user_unload_context;
39338+ atomic_long_unchecked_t user_exception;
39339+ atomic_long_unchecked_t set_context_option;
39340+ atomic_long_unchecked_t check_context_retarget_intr;
39341+ atomic_long_unchecked_t check_context_unload;
39342+ atomic_long_unchecked_t tlb_dropin;
39343+ atomic_long_unchecked_t tlb_preload_page;
39344+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
39345+ atomic_long_unchecked_t tlb_dropin_fail_upm;
39346+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
39347+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
39348+ atomic_long_unchecked_t tlb_dropin_fail_idle;
39349+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
39350+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
39351+ atomic_long_unchecked_t tfh_stale_on_fault;
39352+ atomic_long_unchecked_t mmu_invalidate_range;
39353+ atomic_long_unchecked_t mmu_invalidate_page;
39354+ atomic_long_unchecked_t flush_tlb;
39355+ atomic_long_unchecked_t flush_tlb_gru;
39356+ atomic_long_unchecked_t flush_tlb_gru_tgh;
39357+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
39358
39359- atomic_long_t copy_gpa;
39360- atomic_long_t read_gpa;
39361+ atomic_long_unchecked_t copy_gpa;
39362+ atomic_long_unchecked_t read_gpa;
39363
39364- atomic_long_t mesq_receive;
39365- atomic_long_t mesq_receive_none;
39366- atomic_long_t mesq_send;
39367- atomic_long_t mesq_send_failed;
39368- atomic_long_t mesq_noop;
39369- atomic_long_t mesq_send_unexpected_error;
39370- atomic_long_t mesq_send_lb_overflow;
39371- atomic_long_t mesq_send_qlimit_reached;
39372- atomic_long_t mesq_send_amo_nacked;
39373- atomic_long_t mesq_send_put_nacked;
39374- atomic_long_t mesq_page_overflow;
39375- atomic_long_t mesq_qf_locked;
39376- atomic_long_t mesq_qf_noop_not_full;
39377- atomic_long_t mesq_qf_switch_head_failed;
39378- atomic_long_t mesq_qf_unexpected_error;
39379- atomic_long_t mesq_noop_unexpected_error;
39380- atomic_long_t mesq_noop_lb_overflow;
39381- atomic_long_t mesq_noop_qlimit_reached;
39382- atomic_long_t mesq_noop_amo_nacked;
39383- atomic_long_t mesq_noop_put_nacked;
39384- atomic_long_t mesq_noop_page_overflow;
39385+ atomic_long_unchecked_t mesq_receive;
39386+ atomic_long_unchecked_t mesq_receive_none;
39387+ atomic_long_unchecked_t mesq_send;
39388+ atomic_long_unchecked_t mesq_send_failed;
39389+ atomic_long_unchecked_t mesq_noop;
39390+ atomic_long_unchecked_t mesq_send_unexpected_error;
39391+ atomic_long_unchecked_t mesq_send_lb_overflow;
39392+ atomic_long_unchecked_t mesq_send_qlimit_reached;
39393+ atomic_long_unchecked_t mesq_send_amo_nacked;
39394+ atomic_long_unchecked_t mesq_send_put_nacked;
39395+ atomic_long_unchecked_t mesq_page_overflow;
39396+ atomic_long_unchecked_t mesq_qf_locked;
39397+ atomic_long_unchecked_t mesq_qf_noop_not_full;
39398+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
39399+ atomic_long_unchecked_t mesq_qf_unexpected_error;
39400+ atomic_long_unchecked_t mesq_noop_unexpected_error;
39401+ atomic_long_unchecked_t mesq_noop_lb_overflow;
39402+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
39403+ atomic_long_unchecked_t mesq_noop_amo_nacked;
39404+ atomic_long_unchecked_t mesq_noop_put_nacked;
39405+ atomic_long_unchecked_t mesq_noop_page_overflow;
39406
39407 };
39408
39409@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
39410 tghop_invalidate, mcsop_last};
39411
39412 struct mcs_op_statistic {
39413- atomic_long_t count;
39414- atomic_long_t total;
39415+ atomic_long_unchecked_t count;
39416+ atomic_long_unchecked_t total;
39417 unsigned long max;
39418 };
39419
39420@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
39421
39422 #define STAT(id) do { \
39423 if (gru_options & OPT_STATS) \
39424- atomic_long_inc(&gru_stats.id); \
39425+ atomic_long_inc_unchecked(&gru_stats.id); \
39426 } while (0)
39427
39428 #ifdef CONFIG_SGI_GRU_DEBUG
39429diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
39430index c862cd4..0d176fe 100644
39431--- a/drivers/misc/sgi-xp/xp.h
39432+++ b/drivers/misc/sgi-xp/xp.h
39433@@ -288,7 +288,7 @@ struct xpc_interface {
39434 xpc_notify_func, void *);
39435 void (*received) (short, int, void *);
39436 enum xp_retval (*partid_to_nasids) (short, void *);
39437-};
39438+} __no_const;
39439
39440 extern struct xpc_interface xpc_interface;
39441
39442diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
39443index b94d5f7..7f494c5 100644
39444--- a/drivers/misc/sgi-xp/xpc.h
39445+++ b/drivers/misc/sgi-xp/xpc.h
39446@@ -835,6 +835,7 @@ struct xpc_arch_operations {
39447 void (*received_payload) (struct xpc_channel *, void *);
39448 void (*notify_senders_of_disconnect) (struct xpc_channel *);
39449 };
39450+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
39451
39452 /* struct xpc_partition act_state values (for XPC HB) */
39453
39454@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
39455 /* found in xpc_main.c */
39456 extern struct device *xpc_part;
39457 extern struct device *xpc_chan;
39458-extern struct xpc_arch_operations xpc_arch_ops;
39459+extern xpc_arch_operations_no_const xpc_arch_ops;
39460 extern int xpc_disengage_timelimit;
39461 extern int xpc_disengage_timedout;
39462 extern int xpc_activate_IRQ_rcvd;
39463diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
39464index d971817..33bdca5 100644
39465--- a/drivers/misc/sgi-xp/xpc_main.c
39466+++ b/drivers/misc/sgi-xp/xpc_main.c
39467@@ -166,7 +166,7 @@ static struct notifier_block xpc_die_notifier = {
39468 .notifier_call = xpc_system_die,
39469 };
39470
39471-struct xpc_arch_operations xpc_arch_ops;
39472+xpc_arch_operations_no_const xpc_arch_ops;
39473
39474 /*
39475 * Timer function to enforce the timelimit on the partition disengage.
39476@@ -1210,7 +1210,7 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args)
39477
39478 if (((die_args->trapnr == X86_TRAP_MF) ||
39479 (die_args->trapnr == X86_TRAP_XF)) &&
39480- !user_mode_vm(die_args->regs))
39481+ !user_mode(die_args->regs))
39482 xpc_die_deactivate();
39483
39484 break;
39485diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
39486index 6d8f701..35b6369 100644
39487--- a/drivers/mmc/core/mmc_ops.c
39488+++ b/drivers/mmc/core/mmc_ops.c
39489@@ -247,7 +247,7 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
39490 void *data_buf;
39491 int is_on_stack;
39492
39493- is_on_stack = object_is_on_stack(buf);
39494+ is_on_stack = object_starts_on_stack(buf);
39495 if (is_on_stack) {
39496 /*
39497 * dma onto stack is unsafe/nonportable, but callers to this
39498diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
39499index 53b8fd9..615b462 100644
39500--- a/drivers/mmc/host/dw_mmc.h
39501+++ b/drivers/mmc/host/dw_mmc.h
39502@@ -205,5 +205,5 @@ struct dw_mci_drv_data {
39503 int (*parse_dt)(struct dw_mci *host);
39504 int (*setup_bus)(struct dw_mci *host,
39505 struct device_node *slot_np, u8 bus_width);
39506-};
39507+} __do_const;
39508 #endif /* _DW_MMC_H_ */
39509diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
39510index 82a8de1..3c56ccb 100644
39511--- a/drivers/mmc/host/sdhci-s3c.c
39512+++ b/drivers/mmc/host/sdhci-s3c.c
39513@@ -721,9 +721,11 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
39514 * we can use overriding functions instead of default.
39515 */
39516 if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK) {
39517- sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
39518- sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
39519- sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
39520+ pax_open_kernel();
39521+ *(void **)&sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
39522+ *(void **)&sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
39523+ *(void **)&sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
39524+ pax_close_kernel();
39525 }
39526
39527 /* It supports additional host capabilities if needed */
39528diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
39529index a4eb8b5..8c0628f 100644
39530--- a/drivers/mtd/devices/doc2000.c
39531+++ b/drivers/mtd/devices/doc2000.c
39532@@ -753,7 +753,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
39533
39534 /* The ECC will not be calculated correctly if less than 512 is written */
39535 /* DBB-
39536- if (len != 0x200 && eccbuf)
39537+ if (len != 0x200)
39538 printk(KERN_WARNING
39539 "ECC needs a full sector write (adr: %lx size %lx)\n",
39540 (long) to, (long) len);
39541diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
39542index 0c8bb6b..6f35deb 100644
39543--- a/drivers/mtd/nand/denali.c
39544+++ b/drivers/mtd/nand/denali.c
39545@@ -24,6 +24,7 @@
39546 #include <linux/slab.h>
39547 #include <linux/mtd/mtd.h>
39548 #include <linux/module.h>
39549+#include <linux/slab.h>
39550
39551 #include "denali.h"
39552
39553diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
39554index 51b9d6a..52af9a7 100644
39555--- a/drivers/mtd/nftlmount.c
39556+++ b/drivers/mtd/nftlmount.c
39557@@ -24,6 +24,7 @@
39558 #include <asm/errno.h>
39559 #include <linux/delay.h>
39560 #include <linux/slab.h>
39561+#include <linux/sched.h>
39562 #include <linux/mtd/mtd.h>
39563 #include <linux/mtd/nand.h>
39564 #include <linux/mtd/nftl.h>
39565diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
39566index 8dd6ba5..419cc1d 100644
39567--- a/drivers/mtd/sm_ftl.c
39568+++ b/drivers/mtd/sm_ftl.c
39569@@ -56,7 +56,7 @@ ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr,
39570 #define SM_CIS_VENDOR_OFFSET 0x59
39571 struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
39572 {
39573- struct attribute_group *attr_group;
39574+ attribute_group_no_const *attr_group;
39575 struct attribute **attributes;
39576 struct sm_sysfs_attribute *vendor_attribute;
39577
39578diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
39579index 27cdf1f..8c37357 100644
39580--- a/drivers/net/bonding/bond_main.c
39581+++ b/drivers/net/bonding/bond_main.c
39582@@ -4859,7 +4859,7 @@ static unsigned int bond_get_num_tx_queues(void)
39583 return tx_queues;
39584 }
39585
39586-static struct rtnl_link_ops bond_link_ops __read_mostly = {
39587+static struct rtnl_link_ops bond_link_ops = {
39588 .kind = "bond",
39589 .priv_size = sizeof(struct bonding),
39590 .setup = bond_setup,
39591@@ -4975,8 +4975,8 @@ static void __exit bonding_exit(void)
39592
39593 bond_destroy_debugfs();
39594
39595- rtnl_link_unregister(&bond_link_ops);
39596 unregister_pernet_subsys(&bond_net_ops);
39597+ rtnl_link_unregister(&bond_link_ops);
39598
39599 #ifdef CONFIG_NET_POLL_CONTROLLER
39600 /*
39601diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
39602index 70dba5d..11a0919 100644
39603--- a/drivers/net/ethernet/8390/ax88796.c
39604+++ b/drivers/net/ethernet/8390/ax88796.c
39605@@ -872,9 +872,11 @@ static int ax_probe(struct platform_device *pdev)
39606 if (ax->plat->reg_offsets)
39607 ei_local->reg_offset = ax->plat->reg_offsets;
39608 else {
39609+ resource_size_t _mem_size = mem_size;
39610+ do_div(_mem_size, 0x18);
39611 ei_local->reg_offset = ax->reg_offsets;
39612 for (ret = 0; ret < 0x18; ret++)
39613- ax->reg_offsets[ret] = (mem_size / 0x18) * ret;
39614+ ax->reg_offsets[ret] = _mem_size * ret;
39615 }
39616
39617 if (!request_mem_region(mem->start, mem_size, pdev->name)) {
39618diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
39619index 0991534..8098e92 100644
39620--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
39621+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
39622@@ -1094,7 +1094,7 @@ static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
39623 static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
39624 {
39625 /* RX_MODE controlling object */
39626- bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj);
39627+ bnx2x_init_rx_mode_obj(bp);
39628
39629 /* multicast configuration controlling object */
39630 bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid,
39631diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
39632index 10bc093..a2fb42a 100644
39633--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
39634+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
39635@@ -2136,12 +2136,12 @@ static u8 bnx2x_dcbnl_get_cap(struct net_device *netdev, int capid, u8 *cap)
39636 break;
39637 default:
39638 BNX2X_ERR("Non valid capability ID\n");
39639- rval = -EINVAL;
39640+ rval = 1;
39641 break;
39642 }
39643 } else {
39644 DP(BNX2X_MSG_DCB, "DCB disabled\n");
39645- rval = -EINVAL;
39646+ rval = 1;
39647 }
39648
39649 DP(BNX2X_MSG_DCB, "capid %d:%x\n", capid, *cap);
39650@@ -2167,12 +2167,12 @@ static int bnx2x_dcbnl_get_numtcs(struct net_device *netdev, int tcid, u8 *num)
39651 break;
39652 default:
39653 BNX2X_ERR("Non valid TC-ID\n");
39654- rval = -EINVAL;
39655+ rval = 1;
39656 break;
39657 }
39658 } else {
39659 DP(BNX2X_MSG_DCB, "DCB disabled\n");
39660- rval = -EINVAL;
39661+ rval = 1;
39662 }
39663
39664 return rval;
39665@@ -2185,7 +2185,7 @@ static int bnx2x_dcbnl_set_numtcs(struct net_device *netdev, int tcid, u8 num)
39666 return -EINVAL;
39667 }
39668
39669-static u8 bnx2x_dcbnl_get_pfc_state(struct net_device *netdev)
39670+static u8 bnx2x_dcbnl_get_pfc_state(struct net_device *netdev)
39671 {
39672 struct bnx2x *bp = netdev_priv(netdev);
39673 DP(BNX2X_MSG_DCB, "state = %d\n", bp->dcbx_local_feat.pfc.enabled);
39674@@ -2387,12 +2387,12 @@ static u8 bnx2x_dcbnl_get_featcfg(struct net_device *netdev, int featid,
39675 break;
39676 default:
39677 BNX2X_ERR("Non valid featrue-ID\n");
39678- rval = -EINVAL;
39679+ rval = 1;
39680 break;
39681 }
39682 } else {
39683 DP(BNX2X_MSG_DCB, "DCB disabled\n");
39684- rval = -EINVAL;
39685+ rval = 1;
39686 }
39687
39688 return rval;
39689@@ -2428,12 +2428,12 @@ static u8 bnx2x_dcbnl_set_featcfg(struct net_device *netdev, int featid,
39690 break;
39691 default:
39692 BNX2X_ERR("Non valid featrue-ID\n");
39693- rval = -EINVAL;
39694+ rval = 1;
39695 break;
39696 }
39697 } else {
39698 DP(BNX2X_MSG_DCB, "dcbnl call not valid\n");
39699- rval = -EINVAL;
39700+ rval = 1;
39701 }
39702
39703 return rval;
39704diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
39705index 09b625e..15b16fe 100644
39706--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
39707+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
39708@@ -2375,15 +2375,14 @@ int bnx2x_config_rx_mode(struct bnx2x *bp,
39709 return rc;
39710 }
39711
39712-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
39713- struct bnx2x_rx_mode_obj *o)
39714+void bnx2x_init_rx_mode_obj(struct bnx2x *bp)
39715 {
39716 if (CHIP_IS_E1x(bp)) {
39717- o->wait_comp = bnx2x_empty_rx_mode_wait;
39718- o->config_rx_mode = bnx2x_set_rx_mode_e1x;
39719+ bp->rx_mode_obj.wait_comp = bnx2x_empty_rx_mode_wait;
39720+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e1x;
39721 } else {
39722- o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
39723- o->config_rx_mode = bnx2x_set_rx_mode_e2;
39724+ bp->rx_mode_obj.wait_comp = bnx2x_wait_rx_mode_comp_e2;
39725+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e2;
39726 }
39727 }
39728
39729diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
39730index adbd91b..58ec94a 100644
39731--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
39732+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
39733@@ -1293,8 +1293,7 @@ int bnx2x_vlan_mac_move(struct bnx2x *bp,
39734
39735 /********************* RX MODE ****************/
39736
39737-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
39738- struct bnx2x_rx_mode_obj *o);
39739+void bnx2x_init_rx_mode_obj(struct bnx2x *bp);
39740
39741 /**
39742 * bnx2x_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters.
39743diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
39744index d330e81..ce1fb9a 100644
39745--- a/drivers/net/ethernet/broadcom/tg3.h
39746+++ b/drivers/net/ethernet/broadcom/tg3.h
39747@@ -146,6 +146,7 @@
39748 #define CHIPREV_ID_5750_A0 0x4000
39749 #define CHIPREV_ID_5750_A1 0x4001
39750 #define CHIPREV_ID_5750_A3 0x4003
39751+#define CHIPREV_ID_5750_C1 0x4201
39752 #define CHIPREV_ID_5750_C2 0x4202
39753 #define CHIPREV_ID_5752_A0_HW 0x5000
39754 #define CHIPREV_ID_5752_A0 0x6000
39755diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
39756index 8cffcdf..aadf043 100644
39757--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
39758+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
39759@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
39760 */
39761 struct l2t_skb_cb {
39762 arp_failure_handler_func arp_failure_handler;
39763-};
39764+} __no_const;
39765
39766 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
39767
39768diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
39769index 4c83003..2a2a5b9 100644
39770--- a/drivers/net/ethernet/dec/tulip/de4x5.c
39771+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
39772@@ -5388,7 +5388,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
39773 for (i=0; i<ETH_ALEN; i++) {
39774 tmp.addr[i] = dev->dev_addr[i];
39775 }
39776- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
39777+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
39778 break;
39779
39780 case DE4X5_SET_HWADDR: /* Set the hardware address */
39781@@ -5428,7 +5428,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
39782 spin_lock_irqsave(&lp->lock, flags);
39783 memcpy(&statbuf, &lp->pktStats, ioc->len);
39784 spin_unlock_irqrestore(&lp->lock, flags);
39785- if (copy_to_user(ioc->data, &statbuf, ioc->len))
39786+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
39787 return -EFAULT;
39788 break;
39789 }
39790diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
39791index 4d6f3c5..6169e60 100644
39792--- a/drivers/net/ethernet/emulex/benet/be_main.c
39793+++ b/drivers/net/ethernet/emulex/benet/be_main.c
39794@@ -455,7 +455,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
39795
39796 if (wrapped)
39797 newacc += 65536;
39798- ACCESS_ONCE(*acc) = newacc;
39799+ ACCESS_ONCE_RW(*acc) = newacc;
39800 }
39801
39802 void be_parse_stats(struct be_adapter *adapter)
39803diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
39804index 74d749e..eefb1bd 100644
39805--- a/drivers/net/ethernet/faraday/ftgmac100.c
39806+++ b/drivers/net/ethernet/faraday/ftgmac100.c
39807@@ -31,6 +31,8 @@
39808 #include <linux/netdevice.h>
39809 #include <linux/phy.h>
39810 #include <linux/platform_device.h>
39811+#include <linux/interrupt.h>
39812+#include <linux/irqreturn.h>
39813 #include <net/ip.h>
39814
39815 #include "ftgmac100.h"
39816diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
39817index b901a01..1ff32ee 100644
39818--- a/drivers/net/ethernet/faraday/ftmac100.c
39819+++ b/drivers/net/ethernet/faraday/ftmac100.c
39820@@ -31,6 +31,8 @@
39821 #include <linux/module.h>
39822 #include <linux/netdevice.h>
39823 #include <linux/platform_device.h>
39824+#include <linux/interrupt.h>
39825+#include <linux/irqreturn.h>
39826
39827 #include "ftmac100.h"
39828
39829diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
39830index bb9256a..56d8752 100644
39831--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
39832+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
39833@@ -806,7 +806,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
39834 }
39835
39836 /* update the base incval used to calculate frequency adjustment */
39837- ACCESS_ONCE(adapter->base_incval) = incval;
39838+ ACCESS_ONCE_RW(adapter->base_incval) = incval;
39839 smp_mb();
39840
39841 /* need lock to prevent incorrect read while modifying cyclecounter */
39842diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
39843index c124e67..db9b897 100644
39844--- a/drivers/net/ethernet/lantiq_etop.c
39845+++ b/drivers/net/ethernet/lantiq_etop.c
39846@@ -769,7 +769,7 @@ ltq_etop_probe(struct platform_device *pdev)
39847 return 0;
39848
39849 err_free:
39850- kfree(dev);
39851+ free_netdev(dev);
39852 err_out:
39853 return err;
39854 }
39855diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
39856index fbe5363..266b4e3 100644
39857--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
39858+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
39859@@ -3461,7 +3461,10 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
39860 struct __vxge_hw_fifo *fifo;
39861 struct vxge_hw_fifo_config *config;
39862 u32 txdl_size, txdl_per_memblock;
39863- struct vxge_hw_mempool_cbs fifo_mp_callback;
39864+ static struct vxge_hw_mempool_cbs fifo_mp_callback = {
39865+ .item_func_alloc = __vxge_hw_fifo_mempool_item_alloc,
39866+ };
39867+
39868 struct __vxge_hw_virtualpath *vpath;
39869
39870 if ((vp == NULL) || (attr == NULL)) {
39871@@ -3544,8 +3547,6 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
39872 goto exit;
39873 }
39874
39875- fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
39876-
39877 fifo->mempool =
39878 __vxge_hw_mempool_create(vpath->hldev,
39879 fifo->config->memblock_size,
39880diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
39881index 998974f..ecd26db 100644
39882--- a/drivers/net/ethernet/realtek/r8169.c
39883+++ b/drivers/net/ethernet/realtek/r8169.c
39884@@ -741,22 +741,22 @@ struct rtl8169_private {
39885 struct mdio_ops {
39886 void (*write)(struct rtl8169_private *, int, int);
39887 int (*read)(struct rtl8169_private *, int);
39888- } mdio_ops;
39889+ } __no_const mdio_ops;
39890
39891 struct pll_power_ops {
39892 void (*down)(struct rtl8169_private *);
39893 void (*up)(struct rtl8169_private *);
39894- } pll_power_ops;
39895+ } __no_const pll_power_ops;
39896
39897 struct jumbo_ops {
39898 void (*enable)(struct rtl8169_private *);
39899 void (*disable)(struct rtl8169_private *);
39900- } jumbo_ops;
39901+ } __no_const jumbo_ops;
39902
39903 struct csi_ops {
39904 void (*write)(struct rtl8169_private *, int, int);
39905 u32 (*read)(struct rtl8169_private *, int);
39906- } csi_ops;
39907+ } __no_const csi_ops;
39908
39909 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
39910 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
39911diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
39912index 3f93624..cf01144 100644
39913--- a/drivers/net/ethernet/sfc/ptp.c
39914+++ b/drivers/net/ethernet/sfc/ptp.c
39915@@ -553,7 +553,7 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
39916 (u32)((u64)ptp->start.dma_addr >> 32));
39917
39918 /* Clear flag that signals MC ready */
39919- ACCESS_ONCE(*start) = 0;
39920+ ACCESS_ONCE_RW(*start) = 0;
39921 efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
39922 MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
39923
39924diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
39925index 0c74a70..3bc6f68 100644
39926--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
39927+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
39928@@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
39929
39930 writel(value, ioaddr + MMC_CNTRL);
39931
39932- pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
39933- MMC_CNTRL, value);
39934+// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
39935+// MMC_CNTRL, value);
39936 }
39937
39938 /* To mask all all interrupts.*/
39939diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
39940index e6fe0d8..2b7d752 100644
39941--- a/drivers/net/hyperv/hyperv_net.h
39942+++ b/drivers/net/hyperv/hyperv_net.h
39943@@ -101,7 +101,7 @@ struct rndis_device {
39944
39945 enum rndis_device_state state;
39946 bool link_state;
39947- atomic_t new_req_id;
39948+ atomic_unchecked_t new_req_id;
39949
39950 spinlock_t request_lock;
39951 struct list_head req_list;
39952diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
39953index 2b657d4..9903bc0 100644
39954--- a/drivers/net/hyperv/rndis_filter.c
39955+++ b/drivers/net/hyperv/rndis_filter.c
39956@@ -107,7 +107,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
39957 * template
39958 */
39959 set = &rndis_msg->msg.set_req;
39960- set->req_id = atomic_inc_return(&dev->new_req_id);
39961+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
39962
39963 /* Add to the request list */
39964 spin_lock_irqsave(&dev->request_lock, flags);
39965@@ -758,7 +758,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
39966
39967 /* Setup the rndis set */
39968 halt = &request->request_msg.msg.halt_req;
39969- halt->req_id = atomic_inc_return(&dev->new_req_id);
39970+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
39971
39972 /* Ignore return since this msg is optional. */
39973 rndis_filter_send_request(dev, request);
39974diff --git a/drivers/net/ieee802154/fakehard.c b/drivers/net/ieee802154/fakehard.c
39975index 1e9cb0b..7839125 100644
39976--- a/drivers/net/ieee802154/fakehard.c
39977+++ b/drivers/net/ieee802154/fakehard.c
39978@@ -386,7 +386,7 @@ static int ieee802154fake_probe(struct platform_device *pdev)
39979 phy->transmit_power = 0xbf;
39980
39981 dev->netdev_ops = &fake_ops;
39982- dev->ml_priv = &fake_mlme;
39983+ dev->ml_priv = (void *)&fake_mlme;
39984
39985 priv = netdev_priv(dev);
39986 priv->phy = phy;
39987diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
39988index e5cb723..1fc0461 100644
39989--- a/drivers/net/macvlan.c
39990+++ b/drivers/net/macvlan.c
39991@@ -852,13 +852,15 @@ static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = {
39992 int macvlan_link_register(struct rtnl_link_ops *ops)
39993 {
39994 /* common fields */
39995- ops->priv_size = sizeof(struct macvlan_dev);
39996- ops->validate = macvlan_validate;
39997- ops->maxtype = IFLA_MACVLAN_MAX;
39998- ops->policy = macvlan_policy;
39999- ops->changelink = macvlan_changelink;
40000- ops->get_size = macvlan_get_size;
40001- ops->fill_info = macvlan_fill_info;
40002+ pax_open_kernel();
40003+ *(size_t *)&ops->priv_size = sizeof(struct macvlan_dev);
40004+ *(void **)&ops->validate = macvlan_validate;
40005+ *(int *)&ops->maxtype = IFLA_MACVLAN_MAX;
40006+ *(const void **)&ops->policy = macvlan_policy;
40007+ *(void **)&ops->changelink = macvlan_changelink;
40008+ *(void **)&ops->get_size = macvlan_get_size;
40009+ *(void **)&ops->fill_info = macvlan_fill_info;
40010+ pax_close_kernel();
40011
40012 return rtnl_link_register(ops);
40013 };
40014@@ -914,7 +916,7 @@ static int macvlan_device_event(struct notifier_block *unused,
40015 return NOTIFY_DONE;
40016 }
40017
40018-static struct notifier_block macvlan_notifier_block __read_mostly = {
40019+static struct notifier_block macvlan_notifier_block = {
40020 .notifier_call = macvlan_device_event,
40021 };
40022
40023diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
40024index 0f0f9ce..0ca5819 100644
40025--- a/drivers/net/macvtap.c
40026+++ b/drivers/net/macvtap.c
40027@@ -1100,7 +1100,7 @@ static int macvtap_device_event(struct notifier_block *unused,
40028 return NOTIFY_DONE;
40029 }
40030
40031-static struct notifier_block macvtap_notifier_block __read_mostly = {
40032+static struct notifier_block macvtap_notifier_block = {
40033 .notifier_call = macvtap_device_event,
40034 };
40035
40036diff --git a/drivers/net/phy/mdio-bitbang.c b/drivers/net/phy/mdio-bitbang.c
40037index daec9b0..6428fcb 100644
40038--- a/drivers/net/phy/mdio-bitbang.c
40039+++ b/drivers/net/phy/mdio-bitbang.c
40040@@ -234,6 +234,7 @@ void free_mdio_bitbang(struct mii_bus *bus)
40041 struct mdiobb_ctrl *ctrl = bus->priv;
40042
40043 module_put(ctrl->ops->owner);
40044+ mdiobus_unregister(bus);
40045 mdiobus_free(bus);
40046 }
40047 EXPORT_SYMBOL(free_mdio_bitbang);
40048diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
40049index 508570e..f706dc7 100644
40050--- a/drivers/net/ppp/ppp_generic.c
40051+++ b/drivers/net/ppp/ppp_generic.c
40052@@ -999,7 +999,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
40053 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
40054 struct ppp_stats stats;
40055 struct ppp_comp_stats cstats;
40056- char *vers;
40057
40058 switch (cmd) {
40059 case SIOCGPPPSTATS:
40060@@ -1021,8 +1020,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
40061 break;
40062
40063 case SIOCGPPPVER:
40064- vers = PPP_VERSION;
40065- if (copy_to_user(addr, vers, strlen(vers) + 1))
40066+ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
40067 break;
40068 err = 0;
40069 break;
40070diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
40071index 8efe47a..a8075c5 100644
40072--- a/drivers/net/team/team.c
40073+++ b/drivers/net/team/team.c
40074@@ -2603,7 +2603,7 @@ static int team_device_event(struct notifier_block *unused,
40075 return NOTIFY_DONE;
40076 }
40077
40078-static struct notifier_block team_notifier_block __read_mostly = {
40079+static struct notifier_block team_notifier_block = {
40080 .notifier_call = team_device_event,
40081 };
40082
40083diff --git a/drivers/net/tun.c b/drivers/net/tun.c
40084index cb95fe5..a5bdab5 100644
40085--- a/drivers/net/tun.c
40086+++ b/drivers/net/tun.c
40087@@ -1838,7 +1838,7 @@ unlock:
40088 }
40089
40090 static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
40091- unsigned long arg, int ifreq_len)
40092+ unsigned long arg, size_t ifreq_len)
40093 {
40094 struct tun_file *tfile = file->private_data;
40095 struct tun_struct *tun;
40096@@ -1850,6 +1850,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
40097 int vnet_hdr_sz;
40098 int ret;
40099
40100+ if (ifreq_len > sizeof ifr)
40101+ return -EFAULT;
40102+
40103 if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
40104 if (copy_from_user(&ifr, argp, ifreq_len))
40105 return -EFAULT;
40106diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
40107index cd8ccb2..cff5144 100644
40108--- a/drivers/net/usb/hso.c
40109+++ b/drivers/net/usb/hso.c
40110@@ -71,7 +71,7 @@
40111 #include <asm/byteorder.h>
40112 #include <linux/serial_core.h>
40113 #include <linux/serial.h>
40114-
40115+#include <asm/local.h>
40116
40117 #define MOD_AUTHOR "Option Wireless"
40118 #define MOD_DESCRIPTION "USB High Speed Option driver"
40119@@ -1180,7 +1180,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
40120 struct urb *urb;
40121
40122 urb = serial->rx_urb[0];
40123- if (serial->port.count > 0) {
40124+ if (atomic_read(&serial->port.count) > 0) {
40125 count = put_rxbuf_data(urb, serial);
40126 if (count == -1)
40127 return;
40128@@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
40129 DUMP1(urb->transfer_buffer, urb->actual_length);
40130
40131 /* Anyone listening? */
40132- if (serial->port.count == 0)
40133+ if (atomic_read(&serial->port.count) == 0)
40134 return;
40135
40136 if (status == 0) {
40137@@ -1298,8 +1298,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
40138 tty_port_tty_set(&serial->port, tty);
40139
40140 /* check for port already opened, if not set the termios */
40141- serial->port.count++;
40142- if (serial->port.count == 1) {
40143+ if (atomic_inc_return(&serial->port.count) == 1) {
40144 serial->rx_state = RX_IDLE;
40145 /* Force default termio settings */
40146 _hso_serial_set_termios(tty, NULL);
40147@@ -1311,7 +1310,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
40148 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
40149 if (result) {
40150 hso_stop_serial_device(serial->parent);
40151- serial->port.count--;
40152+ atomic_dec(&serial->port.count);
40153 kref_put(&serial->parent->ref, hso_serial_ref_free);
40154 }
40155 } else {
40156@@ -1348,10 +1347,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
40157
40158 /* reset the rts and dtr */
40159 /* do the actual close */
40160- serial->port.count--;
40161+ atomic_dec(&serial->port.count);
40162
40163- if (serial->port.count <= 0) {
40164- serial->port.count = 0;
40165+ if (atomic_read(&serial->port.count) <= 0) {
40166+ atomic_set(&serial->port.count, 0);
40167 tty_port_tty_set(&serial->port, NULL);
40168 if (!usb_gone)
40169 hso_stop_serial_device(serial->parent);
40170@@ -1427,7 +1426,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
40171
40172 /* the actual setup */
40173 spin_lock_irqsave(&serial->serial_lock, flags);
40174- if (serial->port.count)
40175+ if (atomic_read(&serial->port.count))
40176 _hso_serial_set_termios(tty, old);
40177 else
40178 tty->termios = *old;
40179@@ -1886,7 +1885,7 @@ static void intr_callback(struct urb *urb)
40180 D1("Pending read interrupt on port %d\n", i);
40181 spin_lock(&serial->serial_lock);
40182 if (serial->rx_state == RX_IDLE &&
40183- serial->port.count > 0) {
40184+ atomic_read(&serial->port.count) > 0) {
40185 /* Setup and send a ctrl req read on
40186 * port i */
40187 if (!serial->rx_urb_filled[0]) {
40188@@ -3079,7 +3078,7 @@ static int hso_resume(struct usb_interface *iface)
40189 /* Start all serial ports */
40190 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
40191 if (serial_table[i] && (serial_table[i]->interface == iface)) {
40192- if (dev2ser(serial_table[i])->port.count) {
40193+ if (atomic_read(&dev2ser(serial_table[i])->port.count)) {
40194 result =
40195 hso_start_serial_device(serial_table[i], GFP_NOIO);
40196 hso_kick_transmit(dev2ser(serial_table[i]));
40197diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
40198index 6993bfa..9053a34 100644
40199--- a/drivers/net/vxlan.c
40200+++ b/drivers/net/vxlan.c
40201@@ -1428,7 +1428,7 @@ nla_put_failure:
40202 return -EMSGSIZE;
40203 }
40204
40205-static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
40206+static struct rtnl_link_ops vxlan_link_ops = {
40207 .kind = "vxlan",
40208 .maxtype = IFLA_VXLAN_MAX,
40209 .policy = vxlan_policy,
40210diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
40211index 77fa428..996b355 100644
40212--- a/drivers/net/wireless/at76c50x-usb.c
40213+++ b/drivers/net/wireless/at76c50x-usb.c
40214@@ -353,7 +353,7 @@ static int at76_dfu_get_state(struct usb_device *udev, u8 *state)
40215 }
40216
40217 /* Convert timeout from the DFU status to jiffies */
40218-static inline unsigned long at76_get_timeout(struct dfu_status *s)
40219+static inline unsigned long __intentional_overflow(-1) at76_get_timeout(struct dfu_status *s)
40220 {
40221 return msecs_to_jiffies((s->poll_timeout[2] << 16)
40222 | (s->poll_timeout[1] << 8)
40223diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
40224index 8d78253..bebbb68 100644
40225--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
40226+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
40227@@ -184,8 +184,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
40228 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
40229 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
40230
40231- ACCESS_ONCE(ads->ds_link) = i->link;
40232- ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
40233+ ACCESS_ONCE_RW(ads->ds_link) = i->link;
40234+ ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
40235
40236 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
40237 ctl6 = SM(i->keytype, AR_EncrType);
40238@@ -199,26 +199,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
40239
40240 if ((i->is_first || i->is_last) &&
40241 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
40242- ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
40243+ ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
40244 | set11nTries(i->rates, 1)
40245 | set11nTries(i->rates, 2)
40246 | set11nTries(i->rates, 3)
40247 | (i->dur_update ? AR_DurUpdateEna : 0)
40248 | SM(0, AR_BurstDur);
40249
40250- ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
40251+ ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
40252 | set11nRate(i->rates, 1)
40253 | set11nRate(i->rates, 2)
40254 | set11nRate(i->rates, 3);
40255 } else {
40256- ACCESS_ONCE(ads->ds_ctl2) = 0;
40257- ACCESS_ONCE(ads->ds_ctl3) = 0;
40258+ ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
40259+ ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
40260 }
40261
40262 if (!i->is_first) {
40263- ACCESS_ONCE(ads->ds_ctl0) = 0;
40264- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
40265- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
40266+ ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
40267+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
40268+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
40269 return;
40270 }
40271
40272@@ -243,7 +243,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
40273 break;
40274 }
40275
40276- ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
40277+ ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
40278 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
40279 | SM(i->txpower, AR_XmitPower)
40280 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
40281@@ -253,19 +253,19 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
40282 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
40283 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
40284
40285- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
40286- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
40287+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
40288+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
40289
40290 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
40291 return;
40292
40293- ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
40294+ ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
40295 | set11nPktDurRTSCTS(i->rates, 1);
40296
40297- ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
40298+ ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
40299 | set11nPktDurRTSCTS(i->rates, 3);
40300
40301- ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
40302+ ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
40303 | set11nRateFlags(i->rates, 1)
40304 | set11nRateFlags(i->rates, 2)
40305 | set11nRateFlags(i->rates, 3)
40306diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
40307index 301bf72..3f5654f 100644
40308--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
40309+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
40310@@ -39,47 +39,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
40311 (i->qcu << AR_TxQcuNum_S) | desc_len;
40312
40313 checksum += val;
40314- ACCESS_ONCE(ads->info) = val;
40315+ ACCESS_ONCE_RW(ads->info) = val;
40316
40317 checksum += i->link;
40318- ACCESS_ONCE(ads->link) = i->link;
40319+ ACCESS_ONCE_RW(ads->link) = i->link;
40320
40321 checksum += i->buf_addr[0];
40322- ACCESS_ONCE(ads->data0) = i->buf_addr[0];
40323+ ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
40324 checksum += i->buf_addr[1];
40325- ACCESS_ONCE(ads->data1) = i->buf_addr[1];
40326+ ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
40327 checksum += i->buf_addr[2];
40328- ACCESS_ONCE(ads->data2) = i->buf_addr[2];
40329+ ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
40330 checksum += i->buf_addr[3];
40331- ACCESS_ONCE(ads->data3) = i->buf_addr[3];
40332+ ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
40333
40334 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
40335- ACCESS_ONCE(ads->ctl3) = val;
40336+ ACCESS_ONCE_RW(ads->ctl3) = val;
40337 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
40338- ACCESS_ONCE(ads->ctl5) = val;
40339+ ACCESS_ONCE_RW(ads->ctl5) = val;
40340 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
40341- ACCESS_ONCE(ads->ctl7) = val;
40342+ ACCESS_ONCE_RW(ads->ctl7) = val;
40343 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
40344- ACCESS_ONCE(ads->ctl9) = val;
40345+ ACCESS_ONCE_RW(ads->ctl9) = val;
40346
40347 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
40348- ACCESS_ONCE(ads->ctl10) = checksum;
40349+ ACCESS_ONCE_RW(ads->ctl10) = checksum;
40350
40351 if (i->is_first || i->is_last) {
40352- ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
40353+ ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
40354 | set11nTries(i->rates, 1)
40355 | set11nTries(i->rates, 2)
40356 | set11nTries(i->rates, 3)
40357 | (i->dur_update ? AR_DurUpdateEna : 0)
40358 | SM(0, AR_BurstDur);
40359
40360- ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
40361+ ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
40362 | set11nRate(i->rates, 1)
40363 | set11nRate(i->rates, 2)
40364 | set11nRate(i->rates, 3);
40365 } else {
40366- ACCESS_ONCE(ads->ctl13) = 0;
40367- ACCESS_ONCE(ads->ctl14) = 0;
40368+ ACCESS_ONCE_RW(ads->ctl13) = 0;
40369+ ACCESS_ONCE_RW(ads->ctl14) = 0;
40370 }
40371
40372 ads->ctl20 = 0;
40373@@ -89,17 +89,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
40374
40375 ctl17 = SM(i->keytype, AR_EncrType);
40376 if (!i->is_first) {
40377- ACCESS_ONCE(ads->ctl11) = 0;
40378- ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
40379- ACCESS_ONCE(ads->ctl15) = 0;
40380- ACCESS_ONCE(ads->ctl16) = 0;
40381- ACCESS_ONCE(ads->ctl17) = ctl17;
40382- ACCESS_ONCE(ads->ctl18) = 0;
40383- ACCESS_ONCE(ads->ctl19) = 0;
40384+ ACCESS_ONCE_RW(ads->ctl11) = 0;
40385+ ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
40386+ ACCESS_ONCE_RW(ads->ctl15) = 0;
40387+ ACCESS_ONCE_RW(ads->ctl16) = 0;
40388+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
40389+ ACCESS_ONCE_RW(ads->ctl18) = 0;
40390+ ACCESS_ONCE_RW(ads->ctl19) = 0;
40391 return;
40392 }
40393
40394- ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
40395+ ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
40396 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
40397 | SM(i->txpower, AR_XmitPower)
40398 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
40399@@ -135,22 +135,22 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
40400 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
40401 ctl12 |= SM(val, AR_PAPRDChainMask);
40402
40403- ACCESS_ONCE(ads->ctl12) = ctl12;
40404- ACCESS_ONCE(ads->ctl17) = ctl17;
40405+ ACCESS_ONCE_RW(ads->ctl12) = ctl12;
40406+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
40407
40408- ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
40409+ ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
40410 | set11nPktDurRTSCTS(i->rates, 1);
40411
40412- ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
40413+ ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
40414 | set11nPktDurRTSCTS(i->rates, 3);
40415
40416- ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
40417+ ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
40418 | set11nRateFlags(i->rates, 1)
40419 | set11nRateFlags(i->rates, 2)
40420 | set11nRateFlags(i->rates, 3)
40421 | SM(i->rtscts_rate, AR_RTSCTSRate);
40422
40423- ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
40424+ ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
40425 }
40426
40427 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
40428diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
40429index 9d26fc5..60d9f14 100644
40430--- a/drivers/net/wireless/ath/ath9k/hw.h
40431+++ b/drivers/net/wireless/ath/ath9k/hw.h
40432@@ -658,7 +658,7 @@ struct ath_hw_private_ops {
40433
40434 /* ANI */
40435 void (*ani_cache_ini_regs)(struct ath_hw *ah);
40436-};
40437+} __no_const;
40438
40439 /**
40440 * struct ath_hw_ops - callbacks used by hardware code and driver code
40441@@ -688,7 +688,7 @@ struct ath_hw_ops {
40442 void (*antdiv_comb_conf_set)(struct ath_hw *ah,
40443 struct ath_hw_antcomb_conf *antconf);
40444 void (*antctrl_shared_chain_lnadiv)(struct ath_hw *hw, bool enable);
40445-};
40446+} __no_const;
40447
40448 struct ath_nf_limits {
40449 s16 max;
40450diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
40451index 3726cd6..b655808 100644
40452--- a/drivers/net/wireless/iwlegacy/3945-mac.c
40453+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
40454@@ -3615,7 +3615,9 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
40455 */
40456 if (il3945_mod_params.disable_hw_scan) {
40457 D_INFO("Disabling hw_scan\n");
40458- il3945_mac_ops.hw_scan = NULL;
40459+ pax_open_kernel();
40460+ *(void **)&il3945_mac_ops.hw_scan = NULL;
40461+ pax_close_kernel();
40462 }
40463
40464 D_INFO("*** LOAD DRIVER ***\n");
40465diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
40466index 5b9533e..7733880 100644
40467--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
40468+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
40469@@ -203,7 +203,7 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
40470 {
40471 struct iwl_priv *priv = file->private_data;
40472 char buf[64];
40473- int buf_size;
40474+ size_t buf_size;
40475 u32 offset, len;
40476
40477 memset(buf, 0, sizeof(buf));
40478@@ -473,7 +473,7 @@ static ssize_t iwl_dbgfs_rx_handlers_write(struct file *file,
40479 struct iwl_priv *priv = file->private_data;
40480
40481 char buf[8];
40482- int buf_size;
40483+ size_t buf_size;
40484 u32 reset_flag;
40485
40486 memset(buf, 0, sizeof(buf));
40487@@ -554,7 +554,7 @@ static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file,
40488 {
40489 struct iwl_priv *priv = file->private_data;
40490 char buf[8];
40491- int buf_size;
40492+ size_t buf_size;
40493 int ht40;
40494
40495 memset(buf, 0, sizeof(buf));
40496@@ -606,7 +606,7 @@ static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
40497 {
40498 struct iwl_priv *priv = file->private_data;
40499 char buf[8];
40500- int buf_size;
40501+ size_t buf_size;
40502 int value;
40503
40504 memset(buf, 0, sizeof(buf));
40505@@ -1871,7 +1871,7 @@ static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
40506 {
40507 struct iwl_priv *priv = file->private_data;
40508 char buf[8];
40509- int buf_size;
40510+ size_t buf_size;
40511 int clear;
40512
40513 memset(buf, 0, sizeof(buf));
40514@@ -1916,7 +1916,7 @@ static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
40515 {
40516 struct iwl_priv *priv = file->private_data;
40517 char buf[8];
40518- int buf_size;
40519+ size_t buf_size;
40520 int trace;
40521
40522 memset(buf, 0, sizeof(buf));
40523@@ -1987,7 +1987,7 @@ static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file,
40524 {
40525 struct iwl_priv *priv = file->private_data;
40526 char buf[8];
40527- int buf_size;
40528+ size_t buf_size;
40529 int missed;
40530
40531 memset(buf, 0, sizeof(buf));
40532@@ -2028,7 +2028,7 @@ static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
40533
40534 struct iwl_priv *priv = file->private_data;
40535 char buf[8];
40536- int buf_size;
40537+ size_t buf_size;
40538 int plcp;
40539
40540 memset(buf, 0, sizeof(buf));
40541@@ -2088,7 +2088,7 @@ static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file,
40542
40543 struct iwl_priv *priv = file->private_data;
40544 char buf[8];
40545- int buf_size;
40546+ size_t buf_size;
40547 int flush;
40548
40549 memset(buf, 0, sizeof(buf));
40550@@ -2178,7 +2178,7 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
40551
40552 struct iwl_priv *priv = file->private_data;
40553 char buf[8];
40554- int buf_size;
40555+ size_t buf_size;
40556 int rts;
40557
40558 if (!priv->cfg->ht_params)
40559@@ -2220,7 +2220,7 @@ static ssize_t iwl_dbgfs_echo_test_write(struct file *file,
40560 {
40561 struct iwl_priv *priv = file->private_data;
40562 char buf[8];
40563- int buf_size;
40564+ size_t buf_size;
40565
40566 memset(buf, 0, sizeof(buf));
40567 buf_size = min(count, sizeof(buf) - 1);
40568@@ -2256,7 +2256,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
40569 struct iwl_priv *priv = file->private_data;
40570 u32 event_log_flag;
40571 char buf[8];
40572- int buf_size;
40573+ size_t buf_size;
40574
40575 /* check that the interface is up */
40576 if (!iwl_is_ready(priv))
40577@@ -2310,7 +2310,7 @@ static ssize_t iwl_dbgfs_calib_disabled_write(struct file *file,
40578 struct iwl_priv *priv = file->private_data;
40579 char buf[8];
40580 u32 calib_disabled;
40581- int buf_size;
40582+ size_t buf_size;
40583
40584 memset(buf, 0, sizeof(buf));
40585 buf_size = min(count, sizeof(buf) - 1);
40586diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
40587index 35708b9..31f7754 100644
40588--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
40589+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
40590@@ -1100,7 +1100,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
40591 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
40592
40593 char buf[8];
40594- int buf_size;
40595+ size_t buf_size;
40596 u32 reset_flag;
40597
40598 memset(buf, 0, sizeof(buf));
40599@@ -1121,7 +1121,7 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
40600 {
40601 struct iwl_trans *trans = file->private_data;
40602 char buf[8];
40603- int buf_size;
40604+ size_t buf_size;
40605 int csr;
40606
40607 memset(buf, 0, sizeof(buf));
40608diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
40609index ff90855..e46d223 100644
40610--- a/drivers/net/wireless/mac80211_hwsim.c
40611+++ b/drivers/net/wireless/mac80211_hwsim.c
40612@@ -2062,25 +2062,19 @@ static int __init init_mac80211_hwsim(void)
40613
40614 if (channels > 1) {
40615 hwsim_if_comb.num_different_channels = channels;
40616- mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
40617- mac80211_hwsim_ops.cancel_hw_scan =
40618- mac80211_hwsim_cancel_hw_scan;
40619- mac80211_hwsim_ops.sw_scan_start = NULL;
40620- mac80211_hwsim_ops.sw_scan_complete = NULL;
40621- mac80211_hwsim_ops.remain_on_channel =
40622- mac80211_hwsim_roc;
40623- mac80211_hwsim_ops.cancel_remain_on_channel =
40624- mac80211_hwsim_croc;
40625- mac80211_hwsim_ops.add_chanctx =
40626- mac80211_hwsim_add_chanctx;
40627- mac80211_hwsim_ops.remove_chanctx =
40628- mac80211_hwsim_remove_chanctx;
40629- mac80211_hwsim_ops.change_chanctx =
40630- mac80211_hwsim_change_chanctx;
40631- mac80211_hwsim_ops.assign_vif_chanctx =
40632- mac80211_hwsim_assign_vif_chanctx;
40633- mac80211_hwsim_ops.unassign_vif_chanctx =
40634- mac80211_hwsim_unassign_vif_chanctx;
40635+ pax_open_kernel();
40636+ *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
40637+ *(void **)&mac80211_hwsim_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
40638+ *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
40639+ *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
40640+ *(void **)&mac80211_hwsim_ops.remain_on_channel = mac80211_hwsim_roc;
40641+ *(void **)&mac80211_hwsim_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
40642+ *(void **)&mac80211_hwsim_ops.add_chanctx = mac80211_hwsim_add_chanctx;
40643+ *(void **)&mac80211_hwsim_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
40644+ *(void **)&mac80211_hwsim_ops.change_chanctx = mac80211_hwsim_change_chanctx;
40645+ *(void **)&mac80211_hwsim_ops.assign_vif_chanctx = mac80211_hwsim_assign_vif_chanctx;
40646+ *(void **)&mac80211_hwsim_ops.unassign_vif_chanctx = mac80211_hwsim_unassign_vif_chanctx;
40647+ pax_close_kernel();
40648 }
40649
40650 spin_lock_init(&hwsim_radio_lock);
40651diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
40652index cdb11b3..3eca710 100644
40653--- a/drivers/net/wireless/mwifiex/cfg80211.c
40654+++ b/drivers/net/wireless/mwifiex/cfg80211.c
40655@@ -1846,7 +1846,8 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy,
40656 }
40657 }
40658
40659- for (i = 0; i < request->n_channels; i++) {
40660+ for (i = 0; i < min_t(u32, request->n_channels,
40661+ MWIFIEX_USER_SCAN_CHAN_MAX); i++) {
40662 chan = request->channels[i];
40663 priv->user_scan_cfg->chan_list[i].chan_number = chan->hw_value;
40664 priv->user_scan_cfg->chan_list[i].radio_type = chan->band;
40665diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
40666index abe1d03..fb02c22 100644
40667--- a/drivers/net/wireless/rndis_wlan.c
40668+++ b/drivers/net/wireless/rndis_wlan.c
40669@@ -1238,7 +1238,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
40670
40671 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
40672
40673- if (rts_threshold < 0 || rts_threshold > 2347)
40674+ if (rts_threshold > 2347)
40675 rts_threshold = 2347;
40676
40677 tmp = cpu_to_le32(rts_threshold);
40678diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
40679index 0751b35..246ba3e 100644
40680--- a/drivers/net/wireless/rt2x00/rt2x00.h
40681+++ b/drivers/net/wireless/rt2x00/rt2x00.h
40682@@ -398,7 +398,7 @@ struct rt2x00_intf {
40683 * for hardware which doesn't support hardware
40684 * sequence counting.
40685 */
40686- atomic_t seqno;
40687+ atomic_unchecked_t seqno;
40688 };
40689
40690 static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
40691diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
40692index e488b94..14b6a0c 100644
40693--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
40694+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
40695@@ -240,9 +240,9 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
40696 * sequence counter given by mac80211.
40697 */
40698 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
40699- seqno = atomic_add_return(0x10, &intf->seqno);
40700+ seqno = atomic_add_return_unchecked(0x10, &intf->seqno);
40701 else
40702- seqno = atomic_read(&intf->seqno);
40703+ seqno = atomic_read_unchecked(&intf->seqno);
40704
40705 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
40706 hdr->seq_ctrl |= cpu_to_le16(seqno);
40707diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
40708index e57ee48..541cf6c 100644
40709--- a/drivers/net/wireless/ti/wl1251/sdio.c
40710+++ b/drivers/net/wireless/ti/wl1251/sdio.c
40711@@ -269,13 +269,17 @@ static int wl1251_sdio_probe(struct sdio_func *func,
40712
40713 irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
40714
40715- wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
40716- wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
40717+ pax_open_kernel();
40718+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
40719+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
40720+ pax_close_kernel();
40721
40722 wl1251_info("using dedicated interrupt line");
40723 } else {
40724- wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
40725- wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
40726+ pax_open_kernel();
40727+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
40728+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
40729+ pax_close_kernel();
40730
40731 wl1251_info("using SDIO interrupt");
40732 }
40733diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
40734index e5f5f8f..fdf15b7 100644
40735--- a/drivers/net/wireless/ti/wl12xx/main.c
40736+++ b/drivers/net/wireless/ti/wl12xx/main.c
40737@@ -644,7 +644,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
40738 sizeof(wl->conf.mem));
40739
40740 /* read data preparation is only needed by wl127x */
40741- wl->ops->prepare_read = wl127x_prepare_read;
40742+ pax_open_kernel();
40743+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
40744+ pax_close_kernel();
40745
40746 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER, WL127X_IFTYPE_VER,
40747 WL127X_MAJOR_VER, WL127X_SUBTYPE_VER,
40748@@ -665,7 +667,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
40749 sizeof(wl->conf.mem));
40750
40751 /* read data preparation is only needed by wl127x */
40752- wl->ops->prepare_read = wl127x_prepare_read;
40753+ pax_open_kernel();
40754+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
40755+ pax_close_kernel();
40756
40757 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER, WL127X_IFTYPE_VER,
40758 WL127X_MAJOR_VER, WL127X_SUBTYPE_VER,
40759diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
40760index 8d8c1f8..e754844 100644
40761--- a/drivers/net/wireless/ti/wl18xx/main.c
40762+++ b/drivers/net/wireless/ti/wl18xx/main.c
40763@@ -1489,8 +1489,10 @@ static int wl18xx_setup(struct wl1271 *wl)
40764 }
40765
40766 if (!checksum_param) {
40767- wl18xx_ops.set_rx_csum = NULL;
40768- wl18xx_ops.init_vif = NULL;
40769+ pax_open_kernel();
40770+ *(void **)&wl18xx_ops.set_rx_csum = NULL;
40771+ *(void **)&wl18xx_ops.init_vif = NULL;
40772+ pax_close_kernel();
40773 }
40774
40775 /* Enable 11a Band only if we have 5G antennas */
40776diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
40777index ef2b171..bb513a6 100644
40778--- a/drivers/net/wireless/zd1211rw/zd_usb.c
40779+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
40780@@ -387,7 +387,7 @@ static inline void handle_regs_int(struct urb *urb)
40781 {
40782 struct zd_usb *usb = urb->context;
40783 struct zd_usb_interrupt *intr = &usb->intr;
40784- int len;
40785+ unsigned int len;
40786 u16 int_num;
40787
40788 ZD_ASSERT(in_interrupt());
40789diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
40790index d93b2b6..ae50401 100644
40791--- a/drivers/oprofile/buffer_sync.c
40792+++ b/drivers/oprofile/buffer_sync.c
40793@@ -332,7 +332,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
40794 if (cookie == NO_COOKIE)
40795 offset = pc;
40796 if (cookie == INVALID_COOKIE) {
40797- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
40798+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
40799 offset = pc;
40800 }
40801 if (cookie != last_cookie) {
40802@@ -376,14 +376,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
40803 /* add userspace sample */
40804
40805 if (!mm) {
40806- atomic_inc(&oprofile_stats.sample_lost_no_mm);
40807+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
40808 return 0;
40809 }
40810
40811 cookie = lookup_dcookie(mm, s->eip, &offset);
40812
40813 if (cookie == INVALID_COOKIE) {
40814- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
40815+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
40816 return 0;
40817 }
40818
40819@@ -552,7 +552,7 @@ void sync_buffer(int cpu)
40820 /* ignore backtraces if failed to add a sample */
40821 if (state == sb_bt_start) {
40822 state = sb_bt_ignore;
40823- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
40824+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
40825 }
40826 }
40827 release_mm(mm);
40828diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
40829index c0cc4e7..44d4e54 100644
40830--- a/drivers/oprofile/event_buffer.c
40831+++ b/drivers/oprofile/event_buffer.c
40832@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
40833 }
40834
40835 if (buffer_pos == buffer_size) {
40836- atomic_inc(&oprofile_stats.event_lost_overflow);
40837+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
40838 return;
40839 }
40840
40841diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
40842index ed2c3ec..deda85a 100644
40843--- a/drivers/oprofile/oprof.c
40844+++ b/drivers/oprofile/oprof.c
40845@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
40846 if (oprofile_ops.switch_events())
40847 return;
40848
40849- atomic_inc(&oprofile_stats.multiplex_counter);
40850+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
40851 start_switch_worker();
40852 }
40853
40854diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
40855index 84a208d..d61b0a1 100644
40856--- a/drivers/oprofile/oprofile_files.c
40857+++ b/drivers/oprofile/oprofile_files.c
40858@@ -27,7 +27,7 @@ unsigned long oprofile_time_slice;
40859
40860 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
40861
40862-static ssize_t timeout_read(struct file *file, char __user *buf,
40863+static ssize_t __intentional_overflow(-1) timeout_read(struct file *file, char __user *buf,
40864 size_t count, loff_t *offset)
40865 {
40866 return oprofilefs_ulong_to_user(jiffies_to_msecs(oprofile_time_slice),
40867diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
40868index 917d28e..d62d981 100644
40869--- a/drivers/oprofile/oprofile_stats.c
40870+++ b/drivers/oprofile/oprofile_stats.c
40871@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
40872 cpu_buf->sample_invalid_eip = 0;
40873 }
40874
40875- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
40876- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
40877- atomic_set(&oprofile_stats.event_lost_overflow, 0);
40878- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
40879- atomic_set(&oprofile_stats.multiplex_counter, 0);
40880+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
40881+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
40882+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
40883+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
40884+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
40885 }
40886
40887
40888diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
40889index 38b6fc0..b5cbfce 100644
40890--- a/drivers/oprofile/oprofile_stats.h
40891+++ b/drivers/oprofile/oprofile_stats.h
40892@@ -13,11 +13,11 @@
40893 #include <linux/atomic.h>
40894
40895 struct oprofile_stat_struct {
40896- atomic_t sample_lost_no_mm;
40897- atomic_t sample_lost_no_mapping;
40898- atomic_t bt_lost_no_mapping;
40899- atomic_t event_lost_overflow;
40900- atomic_t multiplex_counter;
40901+ atomic_unchecked_t sample_lost_no_mm;
40902+ atomic_unchecked_t sample_lost_no_mapping;
40903+ atomic_unchecked_t bt_lost_no_mapping;
40904+ atomic_unchecked_t event_lost_overflow;
40905+ atomic_unchecked_t multiplex_counter;
40906 };
40907
40908 extern struct oprofile_stat_struct oprofile_stats;
40909diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
40910index 849357c..b83c1e0 100644
40911--- a/drivers/oprofile/oprofilefs.c
40912+++ b/drivers/oprofile/oprofilefs.c
40913@@ -185,7 +185,7 @@ static const struct file_operations atomic_ro_fops = {
40914
40915
40916 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
40917- char const *name, atomic_t *val)
40918+ char const *name, atomic_unchecked_t *val)
40919 {
40920 return __oprofilefs_create_file(sb, root, name,
40921 &atomic_ro_fops, 0444, val);
40922diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
40923index 93404f7..4a313d8 100644
40924--- a/drivers/oprofile/timer_int.c
40925+++ b/drivers/oprofile/timer_int.c
40926@@ -93,7 +93,7 @@ static int __cpuinit oprofile_cpu_notify(struct notifier_block *self,
40927 return NOTIFY_OK;
40928 }
40929
40930-static struct notifier_block __refdata oprofile_cpu_notifier = {
40931+static struct notifier_block oprofile_cpu_notifier = {
40932 .notifier_call = oprofile_cpu_notify,
40933 };
40934
40935diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
40936index 3f56bc0..707d642 100644
40937--- a/drivers/parport/procfs.c
40938+++ b/drivers/parport/procfs.c
40939@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
40940
40941 *ppos += len;
40942
40943- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
40944+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
40945 }
40946
40947 #ifdef CONFIG_PARPORT_1284
40948@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
40949
40950 *ppos += len;
40951
40952- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
40953+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
40954 }
40955 #endif /* IEEE1284.3 support. */
40956
40957diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
40958index c35e8ad..fc33beb 100644
40959--- a/drivers/pci/hotplug/acpiphp_ibm.c
40960+++ b/drivers/pci/hotplug/acpiphp_ibm.c
40961@@ -464,7 +464,9 @@ static int __init ibm_acpiphp_init(void)
40962 goto init_cleanup;
40963 }
40964
40965- ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
40966+ pax_open_kernel();
40967+ *(size_t *)&ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
40968+ pax_close_kernel();
40969 retval = sysfs_create_bin_file(sysdir, &ibm_apci_table_attr);
40970
40971 return retval;
40972diff --git a/drivers/pci/hotplug/cpcihp_generic.c b/drivers/pci/hotplug/cpcihp_generic.c
40973index a6a71c4..c91097b 100644
40974--- a/drivers/pci/hotplug/cpcihp_generic.c
40975+++ b/drivers/pci/hotplug/cpcihp_generic.c
40976@@ -73,7 +73,6 @@ static u16 port;
40977 static unsigned int enum_bit;
40978 static u8 enum_mask;
40979
40980-static struct cpci_hp_controller_ops generic_hpc_ops;
40981 static struct cpci_hp_controller generic_hpc;
40982
40983 static int __init validate_parameters(void)
40984@@ -139,6 +138,10 @@ static int query_enum(void)
40985 return ((value & enum_mask) == enum_mask);
40986 }
40987
40988+static struct cpci_hp_controller_ops generic_hpc_ops = {
40989+ .query_enum = query_enum,
40990+};
40991+
40992 static int __init cpcihp_generic_init(void)
40993 {
40994 int status;
40995@@ -165,7 +168,6 @@ static int __init cpcihp_generic_init(void)
40996 pci_dev_put(dev);
40997
40998 memset(&generic_hpc, 0, sizeof (struct cpci_hp_controller));
40999- generic_hpc_ops.query_enum = query_enum;
41000 generic_hpc.ops = &generic_hpc_ops;
41001
41002 status = cpci_hp_register_controller(&generic_hpc);
41003diff --git a/drivers/pci/hotplug/cpcihp_zt5550.c b/drivers/pci/hotplug/cpcihp_zt5550.c
41004index 449b4bb..257e2e8 100644
41005--- a/drivers/pci/hotplug/cpcihp_zt5550.c
41006+++ b/drivers/pci/hotplug/cpcihp_zt5550.c
41007@@ -59,7 +59,6 @@
41008 /* local variables */
41009 static bool debug;
41010 static bool poll;
41011-static struct cpci_hp_controller_ops zt5550_hpc_ops;
41012 static struct cpci_hp_controller zt5550_hpc;
41013
41014 /* Primary cPCI bus bridge device */
41015@@ -205,6 +204,10 @@ static int zt5550_hc_disable_irq(void)
41016 return 0;
41017 }
41018
41019+static struct cpci_hp_controller_ops zt5550_hpc_ops = {
41020+ .query_enum = zt5550_hc_query_enum,
41021+};
41022+
41023 static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
41024 {
41025 int status;
41026@@ -216,16 +219,17 @@ static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id
41027 dbg("returned from zt5550_hc_config");
41028
41029 memset(&zt5550_hpc, 0, sizeof (struct cpci_hp_controller));
41030- zt5550_hpc_ops.query_enum = zt5550_hc_query_enum;
41031 zt5550_hpc.ops = &zt5550_hpc_ops;
41032 if(!poll) {
41033 zt5550_hpc.irq = hc_dev->irq;
41034 zt5550_hpc.irq_flags = IRQF_SHARED;
41035 zt5550_hpc.dev_id = hc_dev;
41036
41037- zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
41038- zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
41039- zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
41040+ pax_open_kernel();
41041+ *(void **)&zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
41042+ *(void **)&zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
41043+ *(void **)&zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
41044+ pax_open_kernel();
41045 } else {
41046 info("using ENUM# polling mode");
41047 }
41048diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
41049index 76ba8a1..20ca857 100644
41050--- a/drivers/pci/hotplug/cpqphp_nvram.c
41051+++ b/drivers/pci/hotplug/cpqphp_nvram.c
41052@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
41053
41054 void compaq_nvram_init (void __iomem *rom_start)
41055 {
41056+
41057+#ifndef CONFIG_PAX_KERNEXEC
41058 if (rom_start) {
41059 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
41060 }
41061+#endif
41062+
41063 dbg("int15 entry = %p\n", compaq_int15_entry_point);
41064
41065 /* initialize our int15 lock */
41066diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
41067index 202f4a9..8ee47d0 100644
41068--- a/drivers/pci/hotplug/pci_hotplug_core.c
41069+++ b/drivers/pci/hotplug/pci_hotplug_core.c
41070@@ -448,8 +448,10 @@ int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus,
41071 return -EINVAL;
41072 }
41073
41074- slot->ops->owner = owner;
41075- slot->ops->mod_name = mod_name;
41076+ pax_open_kernel();
41077+ *(struct module **)&slot->ops->owner = owner;
41078+ *(const char **)&slot->ops->mod_name = mod_name;
41079+ pax_close_kernel();
41080
41081 mutex_lock(&pci_hp_mutex);
41082 /*
41083diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
41084index 939bd1d..a1459c9 100644
41085--- a/drivers/pci/hotplug/pciehp_core.c
41086+++ b/drivers/pci/hotplug/pciehp_core.c
41087@@ -91,7 +91,7 @@ static int init_slot(struct controller *ctrl)
41088 struct slot *slot = ctrl->slot;
41089 struct hotplug_slot *hotplug = NULL;
41090 struct hotplug_slot_info *info = NULL;
41091- struct hotplug_slot_ops *ops = NULL;
41092+ hotplug_slot_ops_no_const *ops = NULL;
41093 char name[SLOT_NAME_SIZE];
41094 int retval = -ENOMEM;
41095
41096diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
41097index 9c6e9bb..2916736 100644
41098--- a/drivers/pci/pci-sysfs.c
41099+++ b/drivers/pci/pci-sysfs.c
41100@@ -1071,7 +1071,7 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
41101 {
41102 /* allocate attribute structure, piggyback attribute name */
41103 int name_len = write_combine ? 13 : 10;
41104- struct bin_attribute *res_attr;
41105+ bin_attribute_no_const *res_attr;
41106 int retval;
41107
41108 res_attr = kzalloc(sizeof(*res_attr) + name_len, GFP_ATOMIC);
41109@@ -1256,7 +1256,7 @@ static struct device_attribute reset_attr = __ATTR(reset, 0200, NULL, reset_stor
41110 static int pci_create_capabilities_sysfs(struct pci_dev *dev)
41111 {
41112 int retval;
41113- struct bin_attribute *attr;
41114+ bin_attribute_no_const *attr;
41115
41116 /* If the device has VPD, try to expose it in sysfs. */
41117 if (dev->vpd) {
41118@@ -1303,7 +1303,7 @@ int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev)
41119 {
41120 int retval;
41121 int rom_size = 0;
41122- struct bin_attribute *attr;
41123+ bin_attribute_no_const *attr;
41124
41125 if (!sysfs_initialized)
41126 return -EACCES;
41127diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
41128index e851829..a1a7196 100644
41129--- a/drivers/pci/pci.h
41130+++ b/drivers/pci/pci.h
41131@@ -98,7 +98,7 @@ struct pci_vpd_ops {
41132 struct pci_vpd {
41133 unsigned int len;
41134 const struct pci_vpd_ops *ops;
41135- struct bin_attribute *attr; /* descriptor for sysfs VPD entry */
41136+ bin_attribute_no_const *attr; /* descriptor for sysfs VPD entry */
41137 };
41138
41139 extern int pci_vpd_pci22_init(struct pci_dev *dev);
41140diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
41141index 8474b6a..ee81993 100644
41142--- a/drivers/pci/pcie/aspm.c
41143+++ b/drivers/pci/pcie/aspm.c
41144@@ -27,9 +27,9 @@
41145 #define MODULE_PARAM_PREFIX "pcie_aspm."
41146
41147 /* Note: those are not register definitions */
41148-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
41149-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
41150-#define ASPM_STATE_L1 (4) /* L1 state */
41151+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
41152+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
41153+#define ASPM_STATE_L1 (4U) /* L1 state */
41154 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
41155 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
41156
41157diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
41158index 6186f03..1a78714 100644
41159--- a/drivers/pci/probe.c
41160+++ b/drivers/pci/probe.c
41161@@ -173,7 +173,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
41162 struct pci_bus_region region;
41163 bool bar_too_big = false, bar_disabled = false;
41164
41165- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
41166+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
41167
41168 /* No printks while decoding is disabled! */
41169 if (!dev->mmio_always_on) {
41170diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
41171index 9b8505c..f00870a 100644
41172--- a/drivers/pci/proc.c
41173+++ b/drivers/pci/proc.c
41174@@ -465,7 +465,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
41175 static int __init pci_proc_init(void)
41176 {
41177 struct pci_dev *dev = NULL;
41178+
41179+#ifdef CONFIG_GRKERNSEC_PROC_ADD
41180+#ifdef CONFIG_GRKERNSEC_PROC_USER
41181+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
41182+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
41183+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
41184+#endif
41185+#else
41186 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
41187+#endif
41188 proc_create("devices", 0, proc_bus_pci_dir,
41189 &proc_bus_pci_dev_operations);
41190 proc_initialized = 1;
41191diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
41192index 2111dbb..79e434b 100644
41193--- a/drivers/platform/x86/msi-laptop.c
41194+++ b/drivers/platform/x86/msi-laptop.c
41195@@ -820,12 +820,14 @@ static int __init load_scm_model_init(struct platform_device *sdev)
41196 int result;
41197
41198 /* allow userland write sysfs file */
41199- dev_attr_bluetooth.store = store_bluetooth;
41200- dev_attr_wlan.store = store_wlan;
41201- dev_attr_threeg.store = store_threeg;
41202- dev_attr_bluetooth.attr.mode |= S_IWUSR;
41203- dev_attr_wlan.attr.mode |= S_IWUSR;
41204- dev_attr_threeg.attr.mode |= S_IWUSR;
41205+ pax_open_kernel();
41206+ *(void **)&dev_attr_bluetooth.store = store_bluetooth;
41207+ *(void **)&dev_attr_wlan.store = store_wlan;
41208+ *(void **)&dev_attr_threeg.store = store_threeg;
41209+ *(umode_t *)&dev_attr_bluetooth.attr.mode |= S_IWUSR;
41210+ *(umode_t *)&dev_attr_wlan.attr.mode |= S_IWUSR;
41211+ *(umode_t *)&dev_attr_threeg.attr.mode |= S_IWUSR;
41212+ pax_close_kernel();
41213
41214 /* disable hardware control by fn key */
41215 result = ec_read(MSI_STANDARD_EC_SCM_LOAD_ADDRESS, &data);
41216diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
41217index 0fe987f..6f3d5c3 100644
41218--- a/drivers/platform/x86/sony-laptop.c
41219+++ b/drivers/platform/x86/sony-laptop.c
41220@@ -2356,7 +2356,7 @@ static void sony_nc_lid_resume_cleanup(struct platform_device *pd)
41221 }
41222
41223 /* High speed charging function */
41224-static struct device_attribute *hsc_handle;
41225+static device_attribute_no_const *hsc_handle;
41226
41227 static ssize_t sony_nc_highspeed_charging_store(struct device *dev,
41228 struct device_attribute *attr,
41229diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
41230index f946ca7..f25c833 100644
41231--- a/drivers/platform/x86/thinkpad_acpi.c
41232+++ b/drivers/platform/x86/thinkpad_acpi.c
41233@@ -2097,7 +2097,7 @@ static int hotkey_mask_get(void)
41234 return 0;
41235 }
41236
41237-void static hotkey_mask_warn_incomplete_mask(void)
41238+static void hotkey_mask_warn_incomplete_mask(void)
41239 {
41240 /* log only what the user can fix... */
41241 const u32 wantedmask = hotkey_driver_mask &
41242@@ -2328,11 +2328,6 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
41243 }
41244 }
41245
41246-static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
41247- struct tp_nvram_state *newn,
41248- const u32 event_mask)
41249-{
41250-
41251 #define TPACPI_COMPARE_KEY(__scancode, __member) \
41252 do { \
41253 if ((event_mask & (1 << __scancode)) && \
41254@@ -2346,36 +2341,42 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
41255 tpacpi_hotkey_send_key(__scancode); \
41256 } while (0)
41257
41258- void issue_volchange(const unsigned int oldvol,
41259- const unsigned int newvol)
41260- {
41261- unsigned int i = oldvol;
41262+static void issue_volchange(const unsigned int oldvol,
41263+ const unsigned int newvol,
41264+ const u32 event_mask)
41265+{
41266+ unsigned int i = oldvol;
41267
41268- while (i > newvol) {
41269- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
41270- i--;
41271- }
41272- while (i < newvol) {
41273- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
41274- i++;
41275- }
41276+ while (i > newvol) {
41277+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
41278+ i--;
41279 }
41280+ while (i < newvol) {
41281+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
41282+ i++;
41283+ }
41284+}
41285
41286- void issue_brightnesschange(const unsigned int oldbrt,
41287- const unsigned int newbrt)
41288- {
41289- unsigned int i = oldbrt;
41290+static void issue_brightnesschange(const unsigned int oldbrt,
41291+ const unsigned int newbrt,
41292+ const u32 event_mask)
41293+{
41294+ unsigned int i = oldbrt;
41295
41296- while (i > newbrt) {
41297- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
41298- i--;
41299- }
41300- while (i < newbrt) {
41301- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
41302- i++;
41303- }
41304+ while (i > newbrt) {
41305+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
41306+ i--;
41307+ }
41308+ while (i < newbrt) {
41309+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
41310+ i++;
41311 }
41312+}
41313
41314+static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
41315+ struct tp_nvram_state *newn,
41316+ const u32 event_mask)
41317+{
41318 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
41319 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
41320 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
41321@@ -2409,7 +2410,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
41322 oldn->volume_level != newn->volume_level) {
41323 /* recently muted, or repeated mute keypress, or
41324 * multiple presses ending in mute */
41325- issue_volchange(oldn->volume_level, newn->volume_level);
41326+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
41327 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
41328 }
41329 } else {
41330@@ -2419,7 +2420,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
41331 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
41332 }
41333 if (oldn->volume_level != newn->volume_level) {
41334- issue_volchange(oldn->volume_level, newn->volume_level);
41335+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
41336 } else if (oldn->volume_toggle != newn->volume_toggle) {
41337 /* repeated vol up/down keypress at end of scale ? */
41338 if (newn->volume_level == 0)
41339@@ -2432,7 +2433,8 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
41340 /* handle brightness */
41341 if (oldn->brightness_level != newn->brightness_level) {
41342 issue_brightnesschange(oldn->brightness_level,
41343- newn->brightness_level);
41344+ newn->brightness_level,
41345+ event_mask);
41346 } else if (oldn->brightness_toggle != newn->brightness_toggle) {
41347 /* repeated key presses that didn't change state */
41348 if (newn->brightness_level == 0)
41349@@ -2441,10 +2443,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
41350 && !tp_features.bright_unkfw)
41351 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
41352 }
41353+}
41354
41355 #undef TPACPI_COMPARE_KEY
41356 #undef TPACPI_MAY_SEND_KEY
41357-}
41358
41359 /*
41360 * Polling driver
41361diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
41362index 769d265..a3a05ca 100644
41363--- a/drivers/pnp/pnpbios/bioscalls.c
41364+++ b/drivers/pnp/pnpbios/bioscalls.c
41365@@ -58,7 +58,7 @@ do { \
41366 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
41367 } while(0)
41368
41369-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
41370+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
41371 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
41372
41373 /*
41374@@ -95,7 +95,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
41375
41376 cpu = get_cpu();
41377 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
41378+
41379+ pax_open_kernel();
41380 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
41381+ pax_close_kernel();
41382
41383 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
41384 spin_lock_irqsave(&pnp_bios_lock, flags);
41385@@ -133,7 +136,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
41386 :"memory");
41387 spin_unlock_irqrestore(&pnp_bios_lock, flags);
41388
41389+ pax_open_kernel();
41390 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
41391+ pax_close_kernel();
41392+
41393 put_cpu();
41394
41395 /* If we get here and this is set then the PnP BIOS faulted on us. */
41396@@ -467,7 +473,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
41397 return status;
41398 }
41399
41400-void pnpbios_calls_init(union pnp_bios_install_struct *header)
41401+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
41402 {
41403 int i;
41404
41405@@ -475,6 +481,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
41406 pnp_bios_callpoint.offset = header->fields.pm16offset;
41407 pnp_bios_callpoint.segment = PNP_CS16;
41408
41409+ pax_open_kernel();
41410+
41411 for_each_possible_cpu(i) {
41412 struct desc_struct *gdt = get_cpu_gdt_table(i);
41413 if (!gdt)
41414@@ -486,4 +494,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
41415 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
41416 (unsigned long)__va(header->fields.pm16dseg));
41417 }
41418+
41419+ pax_close_kernel();
41420 }
41421diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
41422index 3e6db1c..1fbbdae 100644
41423--- a/drivers/pnp/resource.c
41424+++ b/drivers/pnp/resource.c
41425@@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
41426 return 1;
41427
41428 /* check if the resource is valid */
41429- if (*irq < 0 || *irq > 15)
41430+ if (*irq > 15)
41431 return 0;
41432
41433 /* check if the resource is reserved */
41434@@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
41435 return 1;
41436
41437 /* check if the resource is valid */
41438- if (*dma < 0 || *dma == 4 || *dma > 7)
41439+ if (*dma == 4 || *dma > 7)
41440 return 0;
41441
41442 /* check if the resource is reserved */
41443diff --git a/drivers/power/pda_power.c b/drivers/power/pda_power.c
41444index 7df7c5f..bd48c47 100644
41445--- a/drivers/power/pda_power.c
41446+++ b/drivers/power/pda_power.c
41447@@ -37,7 +37,11 @@ static int polling;
41448
41449 #ifdef CONFIG_USB_OTG_UTILS
41450 static struct usb_phy *transceiver;
41451-static struct notifier_block otg_nb;
41452+static int otg_handle_notification(struct notifier_block *nb,
41453+ unsigned long event, void *unused);
41454+static struct notifier_block otg_nb = {
41455+ .notifier_call = otg_handle_notification
41456+};
41457 #endif
41458
41459 static struct regulator *ac_draw;
41460@@ -369,7 +373,6 @@ static int pda_power_probe(struct platform_device *pdev)
41461
41462 #ifdef CONFIG_USB_OTG_UTILS
41463 if (!IS_ERR_OR_NULL(transceiver) && pdata->use_otg_notifier) {
41464- otg_nb.notifier_call = otg_handle_notification;
41465 ret = usb_register_notifier(transceiver, &otg_nb);
41466 if (ret) {
41467 dev_err(dev, "failure to register otg notifier\n");
41468diff --git a/drivers/power/power_supply.h b/drivers/power/power_supply.h
41469index cc439fd..8fa30df 100644
41470--- a/drivers/power/power_supply.h
41471+++ b/drivers/power/power_supply.h
41472@@ -16,12 +16,12 @@ struct power_supply;
41473
41474 #ifdef CONFIG_SYSFS
41475
41476-extern void power_supply_init_attrs(struct device_type *dev_type);
41477+extern void power_supply_init_attrs(void);
41478 extern int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env);
41479
41480 #else
41481
41482-static inline void power_supply_init_attrs(struct device_type *dev_type) {}
41483+static inline void power_supply_init_attrs(void) {}
41484 #define power_supply_uevent NULL
41485
41486 #endif /* CONFIG_SYSFS */
41487diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
41488index 8a7cfb3..72e6e9b 100644
41489--- a/drivers/power/power_supply_core.c
41490+++ b/drivers/power/power_supply_core.c
41491@@ -24,7 +24,10 @@
41492 struct class *power_supply_class;
41493 EXPORT_SYMBOL_GPL(power_supply_class);
41494
41495-static struct device_type power_supply_dev_type;
41496+extern const struct attribute_group *power_supply_attr_groups[];
41497+static struct device_type power_supply_dev_type = {
41498+ .groups = power_supply_attr_groups,
41499+};
41500
41501 static int __power_supply_changed_work(struct device *dev, void *data)
41502 {
41503@@ -393,7 +396,7 @@ static int __init power_supply_class_init(void)
41504 return PTR_ERR(power_supply_class);
41505
41506 power_supply_class->dev_uevent = power_supply_uevent;
41507- power_supply_init_attrs(&power_supply_dev_type);
41508+ power_supply_init_attrs();
41509
41510 return 0;
41511 }
41512diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
41513index 40fa3b7..d9c2e0e 100644
41514--- a/drivers/power/power_supply_sysfs.c
41515+++ b/drivers/power/power_supply_sysfs.c
41516@@ -229,17 +229,15 @@ static struct attribute_group power_supply_attr_group = {
41517 .is_visible = power_supply_attr_is_visible,
41518 };
41519
41520-static const struct attribute_group *power_supply_attr_groups[] = {
41521+const struct attribute_group *power_supply_attr_groups[] = {
41522 &power_supply_attr_group,
41523 NULL,
41524 };
41525
41526-void power_supply_init_attrs(struct device_type *dev_type)
41527+void power_supply_init_attrs(void)
41528 {
41529 int i;
41530
41531- dev_type->groups = power_supply_attr_groups;
41532-
41533 for (i = 0; i < ARRAY_SIZE(power_supply_attrs); i++)
41534 __power_supply_attrs[i] = &power_supply_attrs[i].attr;
41535 }
41536diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
41537index 4d7c635..9860196 100644
41538--- a/drivers/regulator/max8660.c
41539+++ b/drivers/regulator/max8660.c
41540@@ -333,8 +333,10 @@ static int max8660_probe(struct i2c_client *client,
41541 max8660->shadow_regs[MAX8660_OVER1] = 5;
41542 } else {
41543 /* Otherwise devices can be toggled via software */
41544- max8660_dcdc_ops.enable = max8660_dcdc_enable;
41545- max8660_dcdc_ops.disable = max8660_dcdc_disable;
41546+ pax_open_kernel();
41547+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
41548+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
41549+ pax_close_kernel();
41550 }
41551
41552 /*
41553diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
41554index 9a8ea91..c483dd9 100644
41555--- a/drivers/regulator/max8973-regulator.c
41556+++ b/drivers/regulator/max8973-regulator.c
41557@@ -401,9 +401,11 @@ static int max8973_probe(struct i2c_client *client,
41558 if (!pdata->enable_ext_control) {
41559 max->desc.enable_reg = MAX8973_VOUT;
41560 max->desc.enable_mask = MAX8973_VOUT_ENABLE;
41561- max8973_dcdc_ops.enable = regulator_enable_regmap;
41562- max8973_dcdc_ops.disable = regulator_disable_regmap;
41563- max8973_dcdc_ops.is_enabled = regulator_is_enabled_regmap;
41564+ pax_open_kernel();
41565+ *(void **)&max8973_dcdc_ops.enable = regulator_enable_regmap;
41566+ *(void **)&max8973_dcdc_ops.disable = regulator_disable_regmap;
41567+ *(void **)&max8973_dcdc_ops.is_enabled = regulator_is_enabled_regmap;
41568+ pax_close_kernel();
41569 }
41570
41571 max->enable_external_control = pdata->enable_ext_control;
41572diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
41573index 0d84b1f..c2da6ac 100644
41574--- a/drivers/regulator/mc13892-regulator.c
41575+++ b/drivers/regulator/mc13892-regulator.c
41576@@ -540,10 +540,12 @@ static int mc13892_regulator_probe(struct platform_device *pdev)
41577 }
41578 mc13xxx_unlock(mc13892);
41579
41580- mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
41581+ pax_open_kernel();
41582+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
41583 = mc13892_vcam_set_mode;
41584- mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
41585+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
41586 = mc13892_vcam_get_mode;
41587+ pax_close_kernel();
41588
41589 mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
41590 ARRAY_SIZE(mc13892_regulators));
41591diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
41592index 16630aa..6afc992 100644
41593--- a/drivers/rtc/rtc-cmos.c
41594+++ b/drivers/rtc/rtc-cmos.c
41595@@ -724,7 +724,9 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
41596 hpet_rtc_timer_init();
41597
41598 /* export at least the first block of NVRAM */
41599- nvram.size = address_space - NVRAM_OFFSET;
41600+ pax_open_kernel();
41601+ *(size_t *)&nvram.size = address_space - NVRAM_OFFSET;
41602+ pax_close_kernel();
41603 retval = sysfs_create_bin_file(&dev->kobj, &nvram);
41604 if (retval < 0) {
41605 dev_dbg(dev, "can't create nvram file? %d\n", retval);
41606diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
41607index 9a86b4b..3a383dc 100644
41608--- a/drivers/rtc/rtc-dev.c
41609+++ b/drivers/rtc/rtc-dev.c
41610@@ -14,6 +14,7 @@
41611 #include <linux/module.h>
41612 #include <linux/rtc.h>
41613 #include <linux/sched.h>
41614+#include <linux/grsecurity.h>
41615 #include "rtc-core.h"
41616
41617 static dev_t rtc_devt;
41618@@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *file,
41619 if (copy_from_user(&tm, uarg, sizeof(tm)))
41620 return -EFAULT;
41621
41622+ gr_log_timechange();
41623+
41624 return rtc_set_time(rtc, &tm);
41625
41626 case RTC_PIE_ON:
41627diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
41628index e0d0ba4..3c65868 100644
41629--- a/drivers/rtc/rtc-ds1307.c
41630+++ b/drivers/rtc/rtc-ds1307.c
41631@@ -106,7 +106,7 @@ struct ds1307 {
41632 u8 offset; /* register's offset */
41633 u8 regs[11];
41634 u16 nvram_offset;
41635- struct bin_attribute *nvram;
41636+ bin_attribute_no_const *nvram;
41637 enum ds_type type;
41638 unsigned long flags;
41639 #define HAS_NVRAM 0 /* bit 0 == sysfs file active */
41640diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
41641index 130f29a..6179d03 100644
41642--- a/drivers/rtc/rtc-m48t59.c
41643+++ b/drivers/rtc/rtc-m48t59.c
41644@@ -482,7 +482,9 @@ static int m48t59_rtc_probe(struct platform_device *pdev)
41645 goto out;
41646 }
41647
41648- m48t59_nvram_attr.size = pdata->offset;
41649+ pax_open_kernel();
41650+ *(size_t *)&m48t59_nvram_attr.size = pdata->offset;
41651+ pax_close_kernel();
41652
41653 ret = sysfs_create_bin_file(&pdev->dev.kobj, &m48t59_nvram_attr);
41654 if (ret) {
41655diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
41656index e693af6..2e525b6 100644
41657--- a/drivers/scsi/bfa/bfa_fcpim.h
41658+++ b/drivers/scsi/bfa/bfa_fcpim.h
41659@@ -36,7 +36,7 @@ struct bfa_iotag_s {
41660
41661 struct bfa_itn_s {
41662 bfa_isr_func_t isr;
41663-};
41664+} __no_const;
41665
41666 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
41667 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
41668diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
41669index 23a90e7..9cf04ee 100644
41670--- a/drivers/scsi/bfa/bfa_ioc.h
41671+++ b/drivers/scsi/bfa/bfa_ioc.h
41672@@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
41673 bfa_ioc_disable_cbfn_t disable_cbfn;
41674 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
41675 bfa_ioc_reset_cbfn_t reset_cbfn;
41676-};
41677+} __no_const;
41678
41679 /*
41680 * IOC event notification mechanism.
41681@@ -346,7 +346,7 @@ struct bfa_ioc_hwif_s {
41682 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
41683 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
41684 bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc);
41685-};
41686+} __no_const;
41687
41688 /*
41689 * Queue element to wait for room in request queue. FIFO order is
41690diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
41691index 593085a..47aa999 100644
41692--- a/drivers/scsi/hosts.c
41693+++ b/drivers/scsi/hosts.c
41694@@ -42,7 +42,7 @@
41695 #include "scsi_logging.h"
41696
41697
41698-static atomic_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
41699+static atomic_unchecked_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
41700
41701
41702 static void scsi_host_cls_release(struct device *dev)
41703@@ -361,7 +361,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
41704 * subtract one because we increment first then return, but we need to
41705 * know what the next host number was before increment
41706 */
41707- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
41708+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
41709 shost->dma_channel = 0xff;
41710
41711 /* These three are default values which can be overridden */
41712diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
41713index 4f33806..afd6f60 100644
41714--- a/drivers/scsi/hpsa.c
41715+++ b/drivers/scsi/hpsa.c
41716@@ -554,7 +554,7 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
41717 unsigned long flags;
41718
41719 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
41720- return h->access.command_completed(h, q);
41721+ return h->access->command_completed(h, q);
41722
41723 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
41724 a = rq->head[rq->current_entry];
41725@@ -3374,7 +3374,7 @@ static void start_io(struct ctlr_info *h)
41726 while (!list_empty(&h->reqQ)) {
41727 c = list_entry(h->reqQ.next, struct CommandList, list);
41728 /* can't do anything if fifo is full */
41729- if ((h->access.fifo_full(h))) {
41730+ if ((h->access->fifo_full(h))) {
41731 dev_warn(&h->pdev->dev, "fifo full\n");
41732 break;
41733 }
41734@@ -3396,7 +3396,7 @@ static void start_io(struct ctlr_info *h)
41735
41736 /* Tell the controller execute command */
41737 spin_unlock_irqrestore(&h->lock, flags);
41738- h->access.submit_command(h, c);
41739+ h->access->submit_command(h, c);
41740 spin_lock_irqsave(&h->lock, flags);
41741 }
41742 spin_unlock_irqrestore(&h->lock, flags);
41743@@ -3404,17 +3404,17 @@ static void start_io(struct ctlr_info *h)
41744
41745 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
41746 {
41747- return h->access.command_completed(h, q);
41748+ return h->access->command_completed(h, q);
41749 }
41750
41751 static inline bool interrupt_pending(struct ctlr_info *h)
41752 {
41753- return h->access.intr_pending(h);
41754+ return h->access->intr_pending(h);
41755 }
41756
41757 static inline long interrupt_not_for_us(struct ctlr_info *h)
41758 {
41759- return (h->access.intr_pending(h) == 0) ||
41760+ return (h->access->intr_pending(h) == 0) ||
41761 (h->interrupts_enabled == 0);
41762 }
41763
41764@@ -4316,7 +4316,7 @@ static int hpsa_pci_init(struct ctlr_info *h)
41765 if (prod_index < 0)
41766 return -ENODEV;
41767 h->product_name = products[prod_index].product_name;
41768- h->access = *(products[prod_index].access);
41769+ h->access = products[prod_index].access;
41770
41771 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
41772 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
41773@@ -4598,7 +4598,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
41774
41775 assert_spin_locked(&lockup_detector_lock);
41776 remove_ctlr_from_lockup_detector_list(h);
41777- h->access.set_intr_mask(h, HPSA_INTR_OFF);
41778+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
41779 spin_lock_irqsave(&h->lock, flags);
41780 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
41781 spin_unlock_irqrestore(&h->lock, flags);
41782@@ -4775,7 +4775,7 @@ reinit_after_soft_reset:
41783 }
41784
41785 /* make sure the board interrupts are off */
41786- h->access.set_intr_mask(h, HPSA_INTR_OFF);
41787+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
41788
41789 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
41790 goto clean2;
41791@@ -4809,7 +4809,7 @@ reinit_after_soft_reset:
41792 * fake ones to scoop up any residual completions.
41793 */
41794 spin_lock_irqsave(&h->lock, flags);
41795- h->access.set_intr_mask(h, HPSA_INTR_OFF);
41796+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
41797 spin_unlock_irqrestore(&h->lock, flags);
41798 free_irqs(h);
41799 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
41800@@ -4828,9 +4828,9 @@ reinit_after_soft_reset:
41801 dev_info(&h->pdev->dev, "Board READY.\n");
41802 dev_info(&h->pdev->dev,
41803 "Waiting for stale completions to drain.\n");
41804- h->access.set_intr_mask(h, HPSA_INTR_ON);
41805+ h->access->set_intr_mask(h, HPSA_INTR_ON);
41806 msleep(10000);
41807- h->access.set_intr_mask(h, HPSA_INTR_OFF);
41808+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
41809
41810 rc = controller_reset_failed(h->cfgtable);
41811 if (rc)
41812@@ -4851,7 +4851,7 @@ reinit_after_soft_reset:
41813 }
41814
41815 /* Turn the interrupts on so we can service requests */
41816- h->access.set_intr_mask(h, HPSA_INTR_ON);
41817+ h->access->set_intr_mask(h, HPSA_INTR_ON);
41818
41819 hpsa_hba_inquiry(h);
41820 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
41821@@ -4903,7 +4903,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
41822 * To write all data in the battery backed cache to disks
41823 */
41824 hpsa_flush_cache(h);
41825- h->access.set_intr_mask(h, HPSA_INTR_OFF);
41826+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
41827 hpsa_free_irqs_and_disable_msix(h);
41828 }
41829
41830@@ -5071,7 +5071,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 use_short_tags)
41831 return;
41832 }
41833 /* Change the access methods to the performant access methods */
41834- h->access = SA5_performant_access;
41835+ h->access = &SA5_performant_access;
41836 h->transMethod = CFGTBL_Trans_Performant;
41837 }
41838
41839diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
41840index 9816479..c5d4e97 100644
41841--- a/drivers/scsi/hpsa.h
41842+++ b/drivers/scsi/hpsa.h
41843@@ -79,7 +79,7 @@ struct ctlr_info {
41844 unsigned int msix_vector;
41845 unsigned int msi_vector;
41846 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
41847- struct access_method access;
41848+ struct access_method *access;
41849
41850 /* queue and queue Info */
41851 struct list_head reqQ;
41852diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
41853index c772d8d..35c362c 100644
41854--- a/drivers/scsi/libfc/fc_exch.c
41855+++ b/drivers/scsi/libfc/fc_exch.c
41856@@ -100,12 +100,12 @@ struct fc_exch_mgr {
41857 u16 pool_max_index;
41858
41859 struct {
41860- atomic_t no_free_exch;
41861- atomic_t no_free_exch_xid;
41862- atomic_t xid_not_found;
41863- atomic_t xid_busy;
41864- atomic_t seq_not_found;
41865- atomic_t non_bls_resp;
41866+ atomic_unchecked_t no_free_exch;
41867+ atomic_unchecked_t no_free_exch_xid;
41868+ atomic_unchecked_t xid_not_found;
41869+ atomic_unchecked_t xid_busy;
41870+ atomic_unchecked_t seq_not_found;
41871+ atomic_unchecked_t non_bls_resp;
41872 } stats;
41873 };
41874
41875@@ -725,7 +725,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
41876 /* allocate memory for exchange */
41877 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
41878 if (!ep) {
41879- atomic_inc(&mp->stats.no_free_exch);
41880+ atomic_inc_unchecked(&mp->stats.no_free_exch);
41881 goto out;
41882 }
41883 memset(ep, 0, sizeof(*ep));
41884@@ -786,7 +786,7 @@ out:
41885 return ep;
41886 err:
41887 spin_unlock_bh(&pool->lock);
41888- atomic_inc(&mp->stats.no_free_exch_xid);
41889+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
41890 mempool_free(ep, mp->ep_pool);
41891 return NULL;
41892 }
41893@@ -929,7 +929,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
41894 xid = ntohs(fh->fh_ox_id); /* we originated exch */
41895 ep = fc_exch_find(mp, xid);
41896 if (!ep) {
41897- atomic_inc(&mp->stats.xid_not_found);
41898+ atomic_inc_unchecked(&mp->stats.xid_not_found);
41899 reject = FC_RJT_OX_ID;
41900 goto out;
41901 }
41902@@ -959,7 +959,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
41903 ep = fc_exch_find(mp, xid);
41904 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
41905 if (ep) {
41906- atomic_inc(&mp->stats.xid_busy);
41907+ atomic_inc_unchecked(&mp->stats.xid_busy);
41908 reject = FC_RJT_RX_ID;
41909 goto rel;
41910 }
41911@@ -970,7 +970,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
41912 }
41913 xid = ep->xid; /* get our XID */
41914 } else if (!ep) {
41915- atomic_inc(&mp->stats.xid_not_found);
41916+ atomic_inc_unchecked(&mp->stats.xid_not_found);
41917 reject = FC_RJT_RX_ID; /* XID not found */
41918 goto out;
41919 }
41920@@ -987,7 +987,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
41921 } else {
41922 sp = &ep->seq;
41923 if (sp->id != fh->fh_seq_id) {
41924- atomic_inc(&mp->stats.seq_not_found);
41925+ atomic_inc_unchecked(&mp->stats.seq_not_found);
41926 if (f_ctl & FC_FC_END_SEQ) {
41927 /*
41928 * Update sequence_id based on incoming last
41929@@ -1437,22 +1437,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
41930
41931 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
41932 if (!ep) {
41933- atomic_inc(&mp->stats.xid_not_found);
41934+ atomic_inc_unchecked(&mp->stats.xid_not_found);
41935 goto out;
41936 }
41937 if (ep->esb_stat & ESB_ST_COMPLETE) {
41938- atomic_inc(&mp->stats.xid_not_found);
41939+ atomic_inc_unchecked(&mp->stats.xid_not_found);
41940 goto rel;
41941 }
41942 if (ep->rxid == FC_XID_UNKNOWN)
41943 ep->rxid = ntohs(fh->fh_rx_id);
41944 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
41945- atomic_inc(&mp->stats.xid_not_found);
41946+ atomic_inc_unchecked(&mp->stats.xid_not_found);
41947 goto rel;
41948 }
41949 if (ep->did != ntoh24(fh->fh_s_id) &&
41950 ep->did != FC_FID_FLOGI) {
41951- atomic_inc(&mp->stats.xid_not_found);
41952+ atomic_inc_unchecked(&mp->stats.xid_not_found);
41953 goto rel;
41954 }
41955 sof = fr_sof(fp);
41956@@ -1461,7 +1461,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
41957 sp->ssb_stat |= SSB_ST_RESP;
41958 sp->id = fh->fh_seq_id;
41959 } else if (sp->id != fh->fh_seq_id) {
41960- atomic_inc(&mp->stats.seq_not_found);
41961+ atomic_inc_unchecked(&mp->stats.seq_not_found);
41962 goto rel;
41963 }
41964
41965@@ -1525,9 +1525,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
41966 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
41967
41968 if (!sp)
41969- atomic_inc(&mp->stats.xid_not_found);
41970+ atomic_inc_unchecked(&mp->stats.xid_not_found);
41971 else
41972- atomic_inc(&mp->stats.non_bls_resp);
41973+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
41974
41975 fc_frame_free(fp);
41976 }
41977@@ -2174,13 +2174,13 @@ void fc_exch_update_stats(struct fc_lport *lport)
41978
41979 list_for_each_entry(ema, &lport->ema_list, ema_list) {
41980 mp = ema->mp;
41981- st->fc_no_free_exch += atomic_read(&mp->stats.no_free_exch);
41982+ st->fc_no_free_exch += atomic_read_unchecked(&mp->stats.no_free_exch);
41983 st->fc_no_free_exch_xid +=
41984- atomic_read(&mp->stats.no_free_exch_xid);
41985- st->fc_xid_not_found += atomic_read(&mp->stats.xid_not_found);
41986- st->fc_xid_busy += atomic_read(&mp->stats.xid_busy);
41987- st->fc_seq_not_found += atomic_read(&mp->stats.seq_not_found);
41988- st->fc_non_bls_resp += atomic_read(&mp->stats.non_bls_resp);
41989+ atomic_read_unchecked(&mp->stats.no_free_exch_xid);
41990+ st->fc_xid_not_found += atomic_read_unchecked(&mp->stats.xid_not_found);
41991+ st->fc_xid_busy += atomic_read_unchecked(&mp->stats.xid_busy);
41992+ st->fc_seq_not_found += atomic_read_unchecked(&mp->stats.seq_not_found);
41993+ st->fc_non_bls_resp += atomic_read_unchecked(&mp->stats.non_bls_resp);
41994 }
41995 }
41996 EXPORT_SYMBOL(fc_exch_update_stats);
41997diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
41998index bdb81cd..d3c7c2c 100644
41999--- a/drivers/scsi/libsas/sas_ata.c
42000+++ b/drivers/scsi/libsas/sas_ata.c
42001@@ -554,7 +554,7 @@ static struct ata_port_operations sas_sata_ops = {
42002 .postreset = ata_std_postreset,
42003 .error_handler = ata_std_error_handler,
42004 .post_internal_cmd = sas_ata_post_internal,
42005- .qc_defer = ata_std_qc_defer,
42006+ .qc_defer = ata_std_qc_defer,
42007 .qc_prep = ata_noop_qc_prep,
42008 .qc_issue = sas_ata_qc_issue,
42009 .qc_fill_rtf = sas_ata_qc_fill_rtf,
42010diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
42011index df4c13a..a51e90c 100644
42012--- a/drivers/scsi/lpfc/lpfc.h
42013+++ b/drivers/scsi/lpfc/lpfc.h
42014@@ -424,7 +424,7 @@ struct lpfc_vport {
42015 struct dentry *debug_nodelist;
42016 struct dentry *vport_debugfs_root;
42017 struct lpfc_debugfs_trc *disc_trc;
42018- atomic_t disc_trc_cnt;
42019+ atomic_unchecked_t disc_trc_cnt;
42020 #endif
42021 uint8_t stat_data_enabled;
42022 uint8_t stat_data_blocked;
42023@@ -842,8 +842,8 @@ struct lpfc_hba {
42024 struct timer_list fabric_block_timer;
42025 unsigned long bit_flags;
42026 #define FABRIC_COMANDS_BLOCKED 0
42027- atomic_t num_rsrc_err;
42028- atomic_t num_cmd_success;
42029+ atomic_unchecked_t num_rsrc_err;
42030+ atomic_unchecked_t num_cmd_success;
42031 unsigned long last_rsrc_error_time;
42032 unsigned long last_ramp_down_time;
42033 unsigned long last_ramp_up_time;
42034@@ -879,7 +879,7 @@ struct lpfc_hba {
42035
42036 struct dentry *debug_slow_ring_trc;
42037 struct lpfc_debugfs_trc *slow_ring_trc;
42038- atomic_t slow_ring_trc_cnt;
42039+ atomic_unchecked_t slow_ring_trc_cnt;
42040 /* iDiag debugfs sub-directory */
42041 struct dentry *idiag_root;
42042 struct dentry *idiag_pci_cfg;
42043diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
42044index f63f5ff..de29189 100644
42045--- a/drivers/scsi/lpfc/lpfc_debugfs.c
42046+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
42047@@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
42048
42049 #include <linux/debugfs.h>
42050
42051-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
42052+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
42053 static unsigned long lpfc_debugfs_start_time = 0L;
42054
42055 /* iDiag */
42056@@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
42057 lpfc_debugfs_enable = 0;
42058
42059 len = 0;
42060- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
42061+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
42062 (lpfc_debugfs_max_disc_trc - 1);
42063 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
42064 dtp = vport->disc_trc + i;
42065@@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
42066 lpfc_debugfs_enable = 0;
42067
42068 len = 0;
42069- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
42070+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
42071 (lpfc_debugfs_max_slow_ring_trc - 1);
42072 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
42073 dtp = phba->slow_ring_trc + i;
42074@@ -646,14 +646,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
42075 !vport || !vport->disc_trc)
42076 return;
42077
42078- index = atomic_inc_return(&vport->disc_trc_cnt) &
42079+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
42080 (lpfc_debugfs_max_disc_trc - 1);
42081 dtp = vport->disc_trc + index;
42082 dtp->fmt = fmt;
42083 dtp->data1 = data1;
42084 dtp->data2 = data2;
42085 dtp->data3 = data3;
42086- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
42087+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
42088 dtp->jif = jiffies;
42089 #endif
42090 return;
42091@@ -684,14 +684,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
42092 !phba || !phba->slow_ring_trc)
42093 return;
42094
42095- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
42096+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
42097 (lpfc_debugfs_max_slow_ring_trc - 1);
42098 dtp = phba->slow_ring_trc + index;
42099 dtp->fmt = fmt;
42100 dtp->data1 = data1;
42101 dtp->data2 = data2;
42102 dtp->data3 = data3;
42103- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
42104+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
42105 dtp->jif = jiffies;
42106 #endif
42107 return;
42108@@ -4182,7 +4182,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
42109 "slow_ring buffer\n");
42110 goto debug_failed;
42111 }
42112- atomic_set(&phba->slow_ring_trc_cnt, 0);
42113+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
42114 memset(phba->slow_ring_trc, 0,
42115 (sizeof(struct lpfc_debugfs_trc) *
42116 lpfc_debugfs_max_slow_ring_trc));
42117@@ -4228,7 +4228,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
42118 "buffer\n");
42119 goto debug_failed;
42120 }
42121- atomic_set(&vport->disc_trc_cnt, 0);
42122+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
42123
42124 snprintf(name, sizeof(name), "discovery_trace");
42125 vport->debug_disc_trc =
42126diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
42127index 89ad558..76956c4 100644
42128--- a/drivers/scsi/lpfc/lpfc_init.c
42129+++ b/drivers/scsi/lpfc/lpfc_init.c
42130@@ -10618,8 +10618,10 @@ lpfc_init(void)
42131 "misc_register returned with status %d", error);
42132
42133 if (lpfc_enable_npiv) {
42134- lpfc_transport_functions.vport_create = lpfc_vport_create;
42135- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
42136+ pax_open_kernel();
42137+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
42138+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
42139+ pax_close_kernel();
42140 }
42141 lpfc_transport_template =
42142 fc_attach_transport(&lpfc_transport_functions);
42143diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
42144index 60e5a17..ff7a793 100644
42145--- a/drivers/scsi/lpfc/lpfc_scsi.c
42146+++ b/drivers/scsi/lpfc/lpfc_scsi.c
42147@@ -305,7 +305,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
42148 uint32_t evt_posted;
42149
42150 spin_lock_irqsave(&phba->hbalock, flags);
42151- atomic_inc(&phba->num_rsrc_err);
42152+ atomic_inc_unchecked(&phba->num_rsrc_err);
42153 phba->last_rsrc_error_time = jiffies;
42154
42155 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
42156@@ -346,7 +346,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
42157 unsigned long flags;
42158 struct lpfc_hba *phba = vport->phba;
42159 uint32_t evt_posted;
42160- atomic_inc(&phba->num_cmd_success);
42161+ atomic_inc_unchecked(&phba->num_cmd_success);
42162
42163 if (vport->cfg_lun_queue_depth <= queue_depth)
42164 return;
42165@@ -390,8 +390,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
42166 unsigned long num_rsrc_err, num_cmd_success;
42167 int i;
42168
42169- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
42170- num_cmd_success = atomic_read(&phba->num_cmd_success);
42171+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
42172+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
42173
42174 /*
42175 * The error and success command counters are global per
42176@@ -419,8 +419,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
42177 }
42178 }
42179 lpfc_destroy_vport_work_array(phba, vports);
42180- atomic_set(&phba->num_rsrc_err, 0);
42181- atomic_set(&phba->num_cmd_success, 0);
42182+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
42183+ atomic_set_unchecked(&phba->num_cmd_success, 0);
42184 }
42185
42186 /**
42187@@ -454,8 +454,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
42188 }
42189 }
42190 lpfc_destroy_vport_work_array(phba, vports);
42191- atomic_set(&phba->num_rsrc_err, 0);
42192- atomic_set(&phba->num_cmd_success, 0);
42193+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
42194+ atomic_set_unchecked(&phba->num_cmd_success, 0);
42195 }
42196
42197 /**
42198diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
42199index b46f5e9..c4c4ccb 100644
42200--- a/drivers/scsi/pmcraid.c
42201+++ b/drivers/scsi/pmcraid.c
42202@@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
42203 res->scsi_dev = scsi_dev;
42204 scsi_dev->hostdata = res;
42205 res->change_detected = 0;
42206- atomic_set(&res->read_failures, 0);
42207- atomic_set(&res->write_failures, 0);
42208+ atomic_set_unchecked(&res->read_failures, 0);
42209+ atomic_set_unchecked(&res->write_failures, 0);
42210 rc = 0;
42211 }
42212 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
42213@@ -2676,9 +2676,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
42214
42215 /* If this was a SCSI read/write command keep count of errors */
42216 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
42217- atomic_inc(&res->read_failures);
42218+ atomic_inc_unchecked(&res->read_failures);
42219 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
42220- atomic_inc(&res->write_failures);
42221+ atomic_inc_unchecked(&res->write_failures);
42222
42223 if (!RES_IS_GSCSI(res->cfg_entry) &&
42224 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
42225@@ -3534,7 +3534,7 @@ static int pmcraid_queuecommand_lck(
42226 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
42227 * hrrq_id assigned here in queuecommand
42228 */
42229- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
42230+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
42231 pinstance->num_hrrq;
42232 cmd->cmd_done = pmcraid_io_done;
42233
42234@@ -3859,7 +3859,7 @@ static long pmcraid_ioctl_passthrough(
42235 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
42236 * hrrq_id assigned here in queuecommand
42237 */
42238- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
42239+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
42240 pinstance->num_hrrq;
42241
42242 if (request_size) {
42243@@ -4497,7 +4497,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
42244
42245 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
42246 /* add resources only after host is added into system */
42247- if (!atomic_read(&pinstance->expose_resources))
42248+ if (!atomic_read_unchecked(&pinstance->expose_resources))
42249 return;
42250
42251 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
42252@@ -5324,8 +5324,8 @@ static int pmcraid_init_instance(struct pci_dev *pdev, struct Scsi_Host *host,
42253 init_waitqueue_head(&pinstance->reset_wait_q);
42254
42255 atomic_set(&pinstance->outstanding_cmds, 0);
42256- atomic_set(&pinstance->last_message_id, 0);
42257- atomic_set(&pinstance->expose_resources, 0);
42258+ atomic_set_unchecked(&pinstance->last_message_id, 0);
42259+ atomic_set_unchecked(&pinstance->expose_resources, 0);
42260
42261 INIT_LIST_HEAD(&pinstance->free_res_q);
42262 INIT_LIST_HEAD(&pinstance->used_res_q);
42263@@ -6038,7 +6038,7 @@ static int pmcraid_probe(struct pci_dev *pdev,
42264 /* Schedule worker thread to handle CCN and take care of adding and
42265 * removing devices to OS
42266 */
42267- atomic_set(&pinstance->expose_resources, 1);
42268+ atomic_set_unchecked(&pinstance->expose_resources, 1);
42269 schedule_work(&pinstance->worker_q);
42270 return rc;
42271
42272diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
42273index e1d150f..6c6df44 100644
42274--- a/drivers/scsi/pmcraid.h
42275+++ b/drivers/scsi/pmcraid.h
42276@@ -748,7 +748,7 @@ struct pmcraid_instance {
42277 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
42278
42279 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
42280- atomic_t last_message_id;
42281+ atomic_unchecked_t last_message_id;
42282
42283 /* configuration table */
42284 struct pmcraid_config_table *cfg_table;
42285@@ -777,7 +777,7 @@ struct pmcraid_instance {
42286 atomic_t outstanding_cmds;
42287
42288 /* should add/delete resources to mid-layer now ?*/
42289- atomic_t expose_resources;
42290+ atomic_unchecked_t expose_resources;
42291
42292
42293
42294@@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
42295 struct pmcraid_config_table_entry_ext cfg_entry_ext;
42296 };
42297 struct scsi_device *scsi_dev; /* Link scsi_device structure */
42298- atomic_t read_failures; /* count of failed READ commands */
42299- atomic_t write_failures; /* count of failed WRITE commands */
42300+ atomic_unchecked_t read_failures; /* count of failed READ commands */
42301+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
42302
42303 /* To indicate add/delete/modify during CCN */
42304 u8 change_detected;
42305diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
42306index 83d7984..a27d947 100644
42307--- a/drivers/scsi/qla2xxx/qla_attr.c
42308+++ b/drivers/scsi/qla2xxx/qla_attr.c
42309@@ -1969,7 +1969,7 @@ qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
42310 return 0;
42311 }
42312
42313-struct fc_function_template qla2xxx_transport_functions = {
42314+fc_function_template_no_const qla2xxx_transport_functions = {
42315
42316 .show_host_node_name = 1,
42317 .show_host_port_name = 1,
42318@@ -2016,7 +2016,7 @@ struct fc_function_template qla2xxx_transport_functions = {
42319 .bsg_timeout = qla24xx_bsg_timeout,
42320 };
42321
42322-struct fc_function_template qla2xxx_transport_vport_functions = {
42323+fc_function_template_no_const qla2xxx_transport_vport_functions = {
42324
42325 .show_host_node_name = 1,
42326 .show_host_port_name = 1,
42327diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
42328index 2411d1a..4673766 100644
42329--- a/drivers/scsi/qla2xxx/qla_gbl.h
42330+++ b/drivers/scsi/qla2xxx/qla_gbl.h
42331@@ -515,8 +515,8 @@ extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *);
42332 struct device_attribute;
42333 extern struct device_attribute *qla2x00_host_attrs[];
42334 struct fc_function_template;
42335-extern struct fc_function_template qla2xxx_transport_functions;
42336-extern struct fc_function_template qla2xxx_transport_vport_functions;
42337+extern fc_function_template_no_const qla2xxx_transport_functions;
42338+extern fc_function_template_no_const qla2xxx_transport_vport_functions;
42339 extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
42340 extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *);
42341 extern void qla2x00_init_host_attr(scsi_qla_host_t *);
42342diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
42343index 10d23f8..a7d5d4c 100644
42344--- a/drivers/scsi/qla2xxx/qla_os.c
42345+++ b/drivers/scsi/qla2xxx/qla_os.c
42346@@ -1472,8 +1472,10 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha)
42347 !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
42348 /* Ok, a 64bit DMA mask is applicable. */
42349 ha->flags.enable_64bit_addressing = 1;
42350- ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
42351- ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
42352+ pax_open_kernel();
42353+ *(void **)&ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
42354+ *(void **)&ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
42355+ pax_close_kernel();
42356 return;
42357 }
42358 }
42359diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
42360index 329d553..f20d31d 100644
42361--- a/drivers/scsi/qla4xxx/ql4_def.h
42362+++ b/drivers/scsi/qla4xxx/ql4_def.h
42363@@ -273,7 +273,7 @@ struct ddb_entry {
42364 * (4000 only) */
42365 atomic_t relogin_timer; /* Max Time to wait for
42366 * relogin to complete */
42367- atomic_t relogin_retry_count; /* Num of times relogin has been
42368+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
42369 * retried */
42370 uint32_t default_time2wait; /* Default Min time between
42371 * relogins (+aens) */
42372diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
42373index 4cec123..7c1329f 100644
42374--- a/drivers/scsi/qla4xxx/ql4_os.c
42375+++ b/drivers/scsi/qla4xxx/ql4_os.c
42376@@ -2621,12 +2621,12 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
42377 */
42378 if (!iscsi_is_session_online(cls_sess)) {
42379 /* Reset retry relogin timer */
42380- atomic_inc(&ddb_entry->relogin_retry_count);
42381+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
42382 DEBUG2(ql4_printk(KERN_INFO, ha,
42383 "%s: index[%d] relogin timed out-retrying"
42384 " relogin (%d), retry (%d)\n", __func__,
42385 ddb_entry->fw_ddb_index,
42386- atomic_read(&ddb_entry->relogin_retry_count),
42387+ atomic_read_unchecked(&ddb_entry->relogin_retry_count),
42388 ddb_entry->default_time2wait + 4));
42389 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
42390 atomic_set(&ddb_entry->retry_relogin_timer,
42391@@ -4738,7 +4738,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
42392
42393 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
42394 atomic_set(&ddb_entry->relogin_timer, 0);
42395- atomic_set(&ddb_entry->relogin_retry_count, 0);
42396+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
42397 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
42398 ddb_entry->default_relogin_timeout =
42399 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
42400diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
42401index 2c0d0ec..4e8681a 100644
42402--- a/drivers/scsi/scsi.c
42403+++ b/drivers/scsi/scsi.c
42404@@ -661,7 +661,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
42405 unsigned long timeout;
42406 int rtn = 0;
42407
42408- atomic_inc(&cmd->device->iorequest_cnt);
42409+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
42410
42411 /* check if the device is still usable */
42412 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
42413diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
42414index f1bf5af..f67e943 100644
42415--- a/drivers/scsi/scsi_lib.c
42416+++ b/drivers/scsi/scsi_lib.c
42417@@ -1454,7 +1454,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
42418 shost = sdev->host;
42419 scsi_init_cmd_errh(cmd);
42420 cmd->result = DID_NO_CONNECT << 16;
42421- atomic_inc(&cmd->device->iorequest_cnt);
42422+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
42423
42424 /*
42425 * SCSI request completion path will do scsi_device_unbusy(),
42426@@ -1480,9 +1480,9 @@ static void scsi_softirq_done(struct request *rq)
42427
42428 INIT_LIST_HEAD(&cmd->eh_entry);
42429
42430- atomic_inc(&cmd->device->iodone_cnt);
42431+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
42432 if (cmd->result)
42433- atomic_inc(&cmd->device->ioerr_cnt);
42434+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
42435
42436 disposition = scsi_decide_disposition(cmd);
42437 if (disposition != SUCCESS &&
42438diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
42439index 931a7d9..0c2a754 100644
42440--- a/drivers/scsi/scsi_sysfs.c
42441+++ b/drivers/scsi/scsi_sysfs.c
42442@@ -658,7 +658,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
42443 char *buf) \
42444 { \
42445 struct scsi_device *sdev = to_scsi_device(dev); \
42446- unsigned long long count = atomic_read(&sdev->field); \
42447+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
42448 return snprintf(buf, 20, "0x%llx\n", count); \
42449 } \
42450 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
42451diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
42452index 84a1fdf..693b0d6 100644
42453--- a/drivers/scsi/scsi_tgt_lib.c
42454+++ b/drivers/scsi/scsi_tgt_lib.c
42455@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
42456 int err;
42457
42458 dprintk("%lx %u\n", uaddr, len);
42459- err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
42460+ err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
42461 if (err) {
42462 /*
42463 * TODO: need to fixup sg_tablesize, max_segment_size,
42464diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
42465index e894ca7..de9d7660 100644
42466--- a/drivers/scsi/scsi_transport_fc.c
42467+++ b/drivers/scsi/scsi_transport_fc.c
42468@@ -498,7 +498,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
42469 * Netlink Infrastructure
42470 */
42471
42472-static atomic_t fc_event_seq;
42473+static atomic_unchecked_t fc_event_seq;
42474
42475 /**
42476 * fc_get_event_number - Obtain the next sequential FC event number
42477@@ -511,7 +511,7 @@ static atomic_t fc_event_seq;
42478 u32
42479 fc_get_event_number(void)
42480 {
42481- return atomic_add_return(1, &fc_event_seq);
42482+ return atomic_add_return_unchecked(1, &fc_event_seq);
42483 }
42484 EXPORT_SYMBOL(fc_get_event_number);
42485
42486@@ -659,7 +659,7 @@ static __init int fc_transport_init(void)
42487 {
42488 int error;
42489
42490- atomic_set(&fc_event_seq, 0);
42491+ atomic_set_unchecked(&fc_event_seq, 0);
42492
42493 error = transport_class_register(&fc_host_class);
42494 if (error)
42495@@ -849,7 +849,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
42496 char *cp;
42497
42498 *val = simple_strtoul(buf, &cp, 0);
42499- if ((*cp && (*cp != '\n')) || (*val < 0))
42500+ if (*cp && (*cp != '\n'))
42501 return -EINVAL;
42502 /*
42503 * Check for overflow; dev_loss_tmo is u32
42504diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
42505index 31969f2..2b348f0 100644
42506--- a/drivers/scsi/scsi_transport_iscsi.c
42507+++ b/drivers/scsi/scsi_transport_iscsi.c
42508@@ -79,7 +79,7 @@ struct iscsi_internal {
42509 struct transport_container session_cont;
42510 };
42511
42512-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
42513+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
42514 static struct workqueue_struct *iscsi_eh_timer_workq;
42515
42516 static DEFINE_IDA(iscsi_sess_ida);
42517@@ -1064,7 +1064,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
42518 int err;
42519
42520 ihost = shost->shost_data;
42521- session->sid = atomic_add_return(1, &iscsi_session_nr);
42522+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
42523
42524 if (target_id == ISCSI_MAX_TARGET) {
42525 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
42526@@ -2943,7 +2943,7 @@ static __init int iscsi_transport_init(void)
42527 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
42528 ISCSI_TRANSPORT_VERSION);
42529
42530- atomic_set(&iscsi_session_nr, 0);
42531+ atomic_set_unchecked(&iscsi_session_nr, 0);
42532
42533 err = class_register(&iscsi_transport_class);
42534 if (err)
42535diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
42536index f379c7f..e8fc69c 100644
42537--- a/drivers/scsi/scsi_transport_srp.c
42538+++ b/drivers/scsi/scsi_transport_srp.c
42539@@ -33,7 +33,7 @@
42540 #include "scsi_transport_srp_internal.h"
42541
42542 struct srp_host_attrs {
42543- atomic_t next_port_id;
42544+ atomic_unchecked_t next_port_id;
42545 };
42546 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
42547
42548@@ -61,7 +61,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
42549 struct Scsi_Host *shost = dev_to_shost(dev);
42550 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
42551
42552- atomic_set(&srp_host->next_port_id, 0);
42553+ atomic_set_unchecked(&srp_host->next_port_id, 0);
42554 return 0;
42555 }
42556
42557@@ -210,7 +210,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
42558 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
42559 rport->roles = ids->roles;
42560
42561- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
42562+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
42563 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
42564
42565 transport_setup_device(&rport->dev);
42566diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
42567index 7992635..609faf8 100644
42568--- a/drivers/scsi/sd.c
42569+++ b/drivers/scsi/sd.c
42570@@ -2909,7 +2909,7 @@ static int sd_probe(struct device *dev)
42571 sdkp->disk = gd;
42572 sdkp->index = index;
42573 atomic_set(&sdkp->openers, 0);
42574- atomic_set(&sdkp->device->ioerr_cnt, 0);
42575+ atomic_set_unchecked(&sdkp->device->ioerr_cnt, 0);
42576
42577 if (!sdp->request_queue->rq_timeout) {
42578 if (sdp->type != TYPE_MOD)
42579diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
42580index be2c9a6..275525c 100644
42581--- a/drivers/scsi/sg.c
42582+++ b/drivers/scsi/sg.c
42583@@ -1101,7 +1101,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
42584 sdp->disk->disk_name,
42585 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
42586 NULL,
42587- (char *)arg);
42588+ (char __user *)arg);
42589 case BLKTRACESTART:
42590 return blk_trace_startstop(sdp->device->request_queue, 1);
42591 case BLKTRACESTOP:
42592diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
42593index 19ee901..6e8c2ef 100644
42594--- a/drivers/spi/spi.c
42595+++ b/drivers/spi/spi.c
42596@@ -1616,7 +1616,7 @@ int spi_bus_unlock(struct spi_master *master)
42597 EXPORT_SYMBOL_GPL(spi_bus_unlock);
42598
42599 /* portable code must never pass more than 32 bytes */
42600-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
42601+#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
42602
42603 static u8 *buf;
42604
42605diff --git a/drivers/staging/iio/iio_hwmon.c b/drivers/staging/iio/iio_hwmon.c
42606index c7a5f97..71ecd35 100644
42607--- a/drivers/staging/iio/iio_hwmon.c
42608+++ b/drivers/staging/iio/iio_hwmon.c
42609@@ -72,7 +72,7 @@ static void iio_hwmon_free_attrs(struct iio_hwmon_state *st)
42610 static int iio_hwmon_probe(struct platform_device *pdev)
42611 {
42612 struct iio_hwmon_state *st;
42613- struct sensor_device_attribute *a;
42614+ sensor_device_attribute_no_const *a;
42615 int ret, i;
42616 int in_i = 1, temp_i = 1, curr_i = 1;
42617 enum iio_chan_type type;
42618diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
42619index 34afc16..ffe44dd 100644
42620--- a/drivers/staging/octeon/ethernet-rx.c
42621+++ b/drivers/staging/octeon/ethernet-rx.c
42622@@ -421,11 +421,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
42623 /* Increment RX stats for virtual ports */
42624 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
42625 #ifdef CONFIG_64BIT
42626- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
42627- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
42628+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
42629+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
42630 #else
42631- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
42632- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
42633+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
42634+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
42635 #endif
42636 }
42637 netif_receive_skb(skb);
42638@@ -437,9 +437,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
42639 dev->name);
42640 */
42641 #ifdef CONFIG_64BIT
42642- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
42643+ atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
42644 #else
42645- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
42646+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
42647 #endif
42648 dev_kfree_skb_irq(skb);
42649 }
42650diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
42651index ef32dc1..a159d68 100644
42652--- a/drivers/staging/octeon/ethernet.c
42653+++ b/drivers/staging/octeon/ethernet.c
42654@@ -252,11 +252,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
42655 * since the RX tasklet also increments it.
42656 */
42657 #ifdef CONFIG_64BIT
42658- atomic64_add(rx_status.dropped_packets,
42659- (atomic64_t *)&priv->stats.rx_dropped);
42660+ atomic64_add_unchecked(rx_status.dropped_packets,
42661+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
42662 #else
42663- atomic_add(rx_status.dropped_packets,
42664- (atomic_t *)&priv->stats.rx_dropped);
42665+ atomic_add_unchecked(rx_status.dropped_packets,
42666+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
42667 #endif
42668 }
42669
42670diff --git a/drivers/staging/ramster/tmem.c b/drivers/staging/ramster/tmem.c
42671index a2b7e03..aaf3630 100644
42672--- a/drivers/staging/ramster/tmem.c
42673+++ b/drivers/staging/ramster/tmem.c
42674@@ -50,25 +50,25 @@
42675 * A tmem host implementation must use this function to register callbacks
42676 * for memory allocation.
42677 */
42678-static struct tmem_hostops tmem_hostops;
42679+static struct tmem_hostops *tmem_hostops;
42680
42681 static void tmem_objnode_tree_init(void);
42682
42683 void tmem_register_hostops(struct tmem_hostops *m)
42684 {
42685 tmem_objnode_tree_init();
42686- tmem_hostops = *m;
42687+ tmem_hostops = m;
42688 }
42689
42690 /*
42691 * A tmem host implementation must use this function to register
42692 * callbacks for a page-accessible memory (PAM) implementation.
42693 */
42694-static struct tmem_pamops tmem_pamops;
42695+static struct tmem_pamops *tmem_pamops;
42696
42697 void tmem_register_pamops(struct tmem_pamops *m)
42698 {
42699- tmem_pamops = *m;
42700+ tmem_pamops = m;
42701 }
42702
42703 /*
42704@@ -174,7 +174,7 @@ static void tmem_obj_init(struct tmem_obj *obj, struct tmem_hashbucket *hb,
42705 obj->pampd_count = 0;
42706 #ifdef CONFIG_RAMSTER
42707 if (tmem_pamops.new_obj != NULL)
42708- (*tmem_pamops.new_obj)(obj);
42709+ (tmem_pamops->new_obj)(obj);
42710 #endif
42711 SET_SENTINEL(obj, OBJ);
42712
42713@@ -210,7 +210,7 @@ static void tmem_pool_flush(struct tmem_pool *pool, bool destroy)
42714 rbnode = rb_next(rbnode);
42715 tmem_pampd_destroy_all_in_obj(obj, true);
42716 tmem_obj_free(obj, hb);
42717- (*tmem_hostops.obj_free)(obj, pool);
42718+ (tmem_hostops->obj_free)(obj, pool);
42719 }
42720 spin_unlock(&hb->lock);
42721 }
42722@@ -261,7 +261,7 @@ static struct tmem_objnode *tmem_objnode_alloc(struct tmem_obj *obj)
42723 ASSERT_SENTINEL(obj, OBJ);
42724 BUG_ON(obj->pool == NULL);
42725 ASSERT_SENTINEL(obj->pool, POOL);
42726- objnode = (*tmem_hostops.objnode_alloc)(obj->pool);
42727+ objnode = (tmem_hostops->objnode_alloc)(obj->pool);
42728 if (unlikely(objnode == NULL))
42729 goto out;
42730 objnode->obj = obj;
42731@@ -290,7 +290,7 @@ static void tmem_objnode_free(struct tmem_objnode *objnode)
42732 ASSERT_SENTINEL(pool, POOL);
42733 objnode->obj->objnode_count--;
42734 objnode->obj = NULL;
42735- (*tmem_hostops.objnode_free)(objnode, pool);
42736+ (tmem_hostops->objnode_free)(objnode, pool);
42737 }
42738
42739 /*
42740@@ -348,7 +348,7 @@ static void *tmem_pampd_replace_in_obj(struct tmem_obj *obj, uint32_t index,
42741 void *old_pampd = *(void **)slot;
42742 *(void **)slot = new_pampd;
42743 if (!no_free)
42744- (*tmem_pamops.free)(old_pampd, obj->pool,
42745+ (tmem_pamops->free)(old_pampd, obj->pool,
42746 NULL, 0, false);
42747 ret = new_pampd;
42748 }
42749@@ -505,7 +505,7 @@ static void tmem_objnode_node_destroy(struct tmem_obj *obj,
42750 if (objnode->slots[i]) {
42751 if (ht == 1) {
42752 obj->pampd_count--;
42753- (*tmem_pamops.free)(objnode->slots[i],
42754+ (tmem_pamops->free)(objnode->slots[i],
42755 obj->pool, NULL, 0, true);
42756 objnode->slots[i] = NULL;
42757 continue;
42758@@ -524,7 +524,7 @@ static void tmem_pampd_destroy_all_in_obj(struct tmem_obj *obj,
42759 return;
42760 if (obj->objnode_tree_height == 0) {
42761 obj->pampd_count--;
42762- (*tmem_pamops.free)(obj->objnode_tree_root,
42763+ (tmem_pamops->free)(obj->objnode_tree_root,
42764 obj->pool, NULL, 0, true);
42765 } else {
42766 tmem_objnode_node_destroy(obj, obj->objnode_tree_root,
42767@@ -535,7 +535,7 @@ static void tmem_pampd_destroy_all_in_obj(struct tmem_obj *obj,
42768 obj->objnode_tree_root = NULL;
42769 #ifdef CONFIG_RAMSTER
42770 if (tmem_pamops.free_obj != NULL)
42771- (*tmem_pamops.free_obj)(obj->pool, obj, pool_destroy);
42772+ (tmem_pamops->free_obj)(obj->pool, obj, pool_destroy);
42773 #endif
42774 }
42775
42776@@ -574,7 +574,7 @@ int tmem_put(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
42777 /* if found, is a dup put, flush the old one */
42778 pampd_del = tmem_pampd_delete_from_obj(obj, index);
42779 BUG_ON(pampd_del != pampd);
42780- (*tmem_pamops.free)(pampd, pool, oidp, index, true);
42781+ (tmem_pamops->free)(pampd, pool, oidp, index, true);
42782 if (obj->pampd_count == 0) {
42783 objnew = obj;
42784 objfound = NULL;
42785@@ -582,7 +582,7 @@ int tmem_put(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
42786 pampd = NULL;
42787 }
42788 } else {
42789- obj = objnew = (*tmem_hostops.obj_alloc)(pool);
42790+ obj = objnew = (tmem_hostops->obj_alloc)(pool);
42791 if (unlikely(obj == NULL)) {
42792 ret = -ENOMEM;
42793 goto out;
42794@@ -597,16 +597,16 @@ int tmem_put(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
42795 if (unlikely(ret == -ENOMEM))
42796 /* may have partially built objnode tree ("stump") */
42797 goto delete_and_free;
42798- (*tmem_pamops.create_finish)(pampd, is_ephemeral(pool));
42799+ (tmem_pamops->create_finish)(pampd, is_ephemeral(pool));
42800 goto out;
42801
42802 delete_and_free:
42803 (void)tmem_pampd_delete_from_obj(obj, index);
42804 if (pampd)
42805- (*tmem_pamops.free)(pampd, pool, NULL, 0, true);
42806+ (tmem_pamops->free)(pampd, pool, NULL, 0, true);
42807 if (objnew) {
42808 tmem_obj_free(objnew, hb);
42809- (*tmem_hostops.obj_free)(objnew, pool);
42810+ (tmem_hostops->obj_free)(objnew, pool);
42811 }
42812 out:
42813 spin_unlock(&hb->lock);
42814@@ -651,7 +651,7 @@ void tmem_localify_finish(struct tmem_obj *obj, uint32_t index,
42815 if (pampd != NULL) {
42816 BUG_ON(obj == NULL);
42817 (void)tmem_pampd_replace_in_obj(obj, index, pampd, 1);
42818- (*tmem_pamops.create_finish)(pampd, is_ephemeral(obj->pool));
42819+ (tmem_pamops->create_finish)(pampd, is_ephemeral(obj->pool));
42820 } else if (delete) {
42821 BUG_ON(obj == NULL);
42822 (void)tmem_pampd_delete_from_obj(obj, index);
42823@@ -671,7 +671,7 @@ static int tmem_repatriate(void **ppampd, struct tmem_hashbucket *hb,
42824 int ret = 0;
42825
42826 if (!is_ephemeral(pool))
42827- new_pampd = (*tmem_pamops.repatriate_preload)(
42828+ new_pampd = (tmem_pamops->repatriate_preload)(
42829 old_pampd, pool, oidp, index, &intransit);
42830 if (intransit)
42831 ret = -EAGAIN;
42832@@ -680,7 +680,7 @@ static int tmem_repatriate(void **ppampd, struct tmem_hashbucket *hb,
42833 /* must release the hb->lock else repatriate can't sleep */
42834 spin_unlock(&hb->lock);
42835 if (!intransit)
42836- ret = (*tmem_pamops.repatriate)(old_pampd, new_pampd, pool,
42837+ ret = (tmem_pamops->repatriate)(old_pampd, new_pampd, pool,
42838 oidp, index, free, data);
42839 if (ret == -EAGAIN) {
42840 /* rare I think, but should cond_resched()??? */
42841@@ -714,7 +714,7 @@ int tmem_replace(struct tmem_pool *pool, struct tmem_oid *oidp,
42842 new_pampd = tmem_pampd_replace_in_obj(obj, index, new_pampd, 0);
42843 /* if we bug here, pamops wasn't properly set up for ramster */
42844 BUG_ON(tmem_pamops.replace_in_obj == NULL);
42845- ret = (*tmem_pamops.replace_in_obj)(new_pampd, obj);
42846+ ret = (tmem_pamops->replace_in_obj)(new_pampd, obj);
42847 out:
42848 spin_unlock(&hb->lock);
42849 return ret;
42850@@ -776,15 +776,15 @@ int tmem_get(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
42851 if (free) {
42852 if (obj->pampd_count == 0) {
42853 tmem_obj_free(obj, hb);
42854- (*tmem_hostops.obj_free)(obj, pool);
42855+ (tmem_hostops->obj_free)(obj, pool);
42856 obj = NULL;
42857 }
42858 }
42859 if (free)
42860- ret = (*tmem_pamops.get_data_and_free)(
42861+ ret = (tmem_pamops->get_data_and_free)(
42862 data, sizep, raw, pampd, pool, oidp, index);
42863 else
42864- ret = (*tmem_pamops.get_data)(
42865+ ret = (tmem_pamops->get_data)(
42866 data, sizep, raw, pampd, pool, oidp, index);
42867 if (ret < 0)
42868 goto out;
42869@@ -816,10 +816,10 @@ int tmem_flush_page(struct tmem_pool *pool,
42870 pampd = tmem_pampd_delete_from_obj(obj, index);
42871 if (pampd == NULL)
42872 goto out;
42873- (*tmem_pamops.free)(pampd, pool, oidp, index, true);
42874+ (tmem_pamops->free)(pampd, pool, oidp, index, true);
42875 if (obj->pampd_count == 0) {
42876 tmem_obj_free(obj, hb);
42877- (*tmem_hostops.obj_free)(obj, pool);
42878+ (tmem_hostops->obj_free)(obj, pool);
42879 }
42880 ret = 0;
42881
42882@@ -844,7 +844,7 @@ int tmem_flush_object(struct tmem_pool *pool, struct tmem_oid *oidp)
42883 goto out;
42884 tmem_pampd_destroy_all_in_obj(obj, false);
42885 tmem_obj_free(obj, hb);
42886- (*tmem_hostops.obj_free)(obj, pool);
42887+ (tmem_hostops->obj_free)(obj, pool);
42888 ret = 0;
42889
42890 out:
42891diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
42892index dc23395..cf7e9b1 100644
42893--- a/drivers/staging/rtl8712/rtl871x_io.h
42894+++ b/drivers/staging/rtl8712/rtl871x_io.h
42895@@ -108,7 +108,7 @@ struct _io_ops {
42896 u8 *pmem);
42897 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
42898 u8 *pmem);
42899-};
42900+} __no_const;
42901
42902 struct io_req {
42903 struct list_head list;
42904diff --git a/drivers/staging/sbe-2t3e3/netdev.c b/drivers/staging/sbe-2t3e3/netdev.c
42905index 1f5088b..0e59820 100644
42906--- a/drivers/staging/sbe-2t3e3/netdev.c
42907+++ b/drivers/staging/sbe-2t3e3/netdev.c
42908@@ -51,7 +51,7 @@ static int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
42909 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
42910
42911 if (rlen)
42912- if (copy_to_user(data, &resp, rlen))
42913+ if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
42914 return -EFAULT;
42915
42916 return 0;
42917diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
42918index 5dddc4d..34fcb2f 100644
42919--- a/drivers/staging/usbip/vhci.h
42920+++ b/drivers/staging/usbip/vhci.h
42921@@ -83,7 +83,7 @@ struct vhci_hcd {
42922 unsigned resuming:1;
42923 unsigned long re_timeout;
42924
42925- atomic_t seqnum;
42926+ atomic_unchecked_t seqnum;
42927
42928 /*
42929 * NOTE:
42930diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
42931index c3aa219..bf8b3de 100644
42932--- a/drivers/staging/usbip/vhci_hcd.c
42933+++ b/drivers/staging/usbip/vhci_hcd.c
42934@@ -451,7 +451,7 @@ static void vhci_tx_urb(struct urb *urb)
42935 return;
42936 }
42937
42938- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
42939+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
42940 if (priv->seqnum == 0xffff)
42941 dev_info(&urb->dev->dev, "seqnum max\n");
42942
42943@@ -703,7 +703,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
42944 return -ENOMEM;
42945 }
42946
42947- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
42948+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
42949 if (unlink->seqnum == 0xffff)
42950 pr_info("seqnum max\n");
42951
42952@@ -907,7 +907,7 @@ static int vhci_start(struct usb_hcd *hcd)
42953 vdev->rhport = rhport;
42954 }
42955
42956- atomic_set(&vhci->seqnum, 0);
42957+ atomic_set_unchecked(&vhci->seqnum, 0);
42958 spin_lock_init(&vhci->lock);
42959
42960 hcd->power_budget = 0; /* no limit */
42961diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
42962index ba5f1c0..11d8122 100644
42963--- a/drivers/staging/usbip/vhci_rx.c
42964+++ b/drivers/staging/usbip/vhci_rx.c
42965@@ -77,7 +77,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
42966 if (!urb) {
42967 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
42968 pr_info("max seqnum %d\n",
42969- atomic_read(&the_controller->seqnum));
42970+ atomic_read_unchecked(&the_controller->seqnum));
42971 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
42972 return;
42973 }
42974diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
42975index 5f13890..36a044b 100644
42976--- a/drivers/staging/vt6655/hostap.c
42977+++ b/drivers/staging/vt6655/hostap.c
42978@@ -73,14 +73,13 @@ static int msglevel =MSG_LEVEL_INFO;
42979 *
42980 */
42981
42982+static net_device_ops_no_const apdev_netdev_ops;
42983+
42984 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
42985 {
42986 PSDevice apdev_priv;
42987 struct net_device *dev = pDevice->dev;
42988 int ret;
42989- const struct net_device_ops apdev_netdev_ops = {
42990- .ndo_start_xmit = pDevice->tx_80211,
42991- };
42992
42993 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
42994
42995@@ -92,6 +91,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
42996 *apdev_priv = *pDevice;
42997 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
42998
42999+ /* only half broken now */
43000+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
43001 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
43002
43003 pDevice->apdev->type = ARPHRD_IEEE80211;
43004diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
43005index 26a7d0e..897b083 100644
43006--- a/drivers/staging/vt6656/hostap.c
43007+++ b/drivers/staging/vt6656/hostap.c
43008@@ -60,14 +60,13 @@ static int msglevel =MSG_LEVEL_INFO;
43009 *
43010 */
43011
43012+static net_device_ops_no_const apdev_netdev_ops;
43013+
43014 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
43015 {
43016 PSDevice apdev_priv;
43017 struct net_device *dev = pDevice->dev;
43018 int ret;
43019- const struct net_device_ops apdev_netdev_ops = {
43020- .ndo_start_xmit = pDevice->tx_80211,
43021- };
43022
43023 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
43024
43025@@ -79,6 +78,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
43026 *apdev_priv = *pDevice;
43027 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
43028
43029+ /* only half broken now */
43030+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
43031 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
43032
43033 pDevice->apdev->type = ARPHRD_IEEE80211;
43034diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c
43035index 56c8e60..1920c63 100644
43036--- a/drivers/staging/zcache/tmem.c
43037+++ b/drivers/staging/zcache/tmem.c
43038@@ -39,7 +39,7 @@
43039 * A tmem host implementation must use this function to register callbacks
43040 * for memory allocation.
43041 */
43042-static struct tmem_hostops tmem_hostops;
43043+static tmem_hostops_no_const tmem_hostops;
43044
43045 static void tmem_objnode_tree_init(void);
43046
43047@@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_hostops *m)
43048 * A tmem host implementation must use this function to register
43049 * callbacks for a page-accessible memory (PAM) implementation
43050 */
43051-static struct tmem_pamops tmem_pamops;
43052+static tmem_pamops_no_const tmem_pamops;
43053
43054 void tmem_register_pamops(struct tmem_pamops *m)
43055 {
43056diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h
43057index 0d4aa82..f7832d4 100644
43058--- a/drivers/staging/zcache/tmem.h
43059+++ b/drivers/staging/zcache/tmem.h
43060@@ -180,6 +180,7 @@ struct tmem_pamops {
43061 void (*new_obj)(struct tmem_obj *);
43062 int (*replace_in_obj)(void *, struct tmem_obj *);
43063 };
43064+typedef struct tmem_pamops __no_const tmem_pamops_no_const;
43065 extern void tmem_register_pamops(struct tmem_pamops *m);
43066
43067 /* memory allocation methods provided by the host implementation */
43068@@ -189,6 +190,7 @@ struct tmem_hostops {
43069 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
43070 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
43071 };
43072+typedef struct tmem_hostops __no_const tmem_hostops_no_const;
43073 extern void tmem_register_hostops(struct tmem_hostops *m);
43074
43075 /* core tmem accessor functions */
43076diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
43077index 96f4981..4daaa7e 100644
43078--- a/drivers/target/target_core_device.c
43079+++ b/drivers/target/target_core_device.c
43080@@ -1370,7 +1370,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
43081 spin_lock_init(&dev->se_port_lock);
43082 spin_lock_init(&dev->se_tmr_lock);
43083 spin_lock_init(&dev->qf_cmd_lock);
43084- atomic_set(&dev->dev_ordered_id, 0);
43085+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
43086 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
43087 spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
43088 INIT_LIST_HEAD(&dev->t10_pr.registration_list);
43089diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
43090index fcf880f..a4d1e8f 100644
43091--- a/drivers/target/target_core_transport.c
43092+++ b/drivers/target/target_core_transport.c
43093@@ -1077,7 +1077,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
43094 * Used to determine when ORDERED commands should go from
43095 * Dormant to Active status.
43096 */
43097- cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id);
43098+ cmd->se_ordered_id = atomic_inc_return_unchecked(&dev->dev_ordered_id);
43099 smp_mb__after_atomic_inc();
43100 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
43101 cmd->se_ordered_id, cmd->sam_task_attr,
43102diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
43103index b09c8d1f..c4225c0 100644
43104--- a/drivers/tty/cyclades.c
43105+++ b/drivers/tty/cyclades.c
43106@@ -1589,10 +1589,10 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
43107 printk(KERN_DEBUG "cyc:cy_open ttyC%d, count = %d\n", info->line,
43108 info->port.count);
43109 #endif
43110- info->port.count++;
43111+ atomic_inc(&info->port.count);
43112 #ifdef CY_DEBUG_COUNT
43113 printk(KERN_DEBUG "cyc:cy_open (%d): incrementing count to %d\n",
43114- current->pid, info->port.count);
43115+ current->pid, atomic_read(&info->port.count));
43116 #endif
43117
43118 /*
43119@@ -3991,7 +3991,7 @@ static int cyclades_proc_show(struct seq_file *m, void *v)
43120 for (j = 0; j < cy_card[i].nports; j++) {
43121 info = &cy_card[i].ports[j];
43122
43123- if (info->port.count) {
43124+ if (atomic_read(&info->port.count)) {
43125 /* XXX is the ldisc num worth this? */
43126 struct tty_struct *tty;
43127 struct tty_ldisc *ld;
43128diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
43129index 13ee53b..418d164 100644
43130--- a/drivers/tty/hvc/hvc_console.c
43131+++ b/drivers/tty/hvc/hvc_console.c
43132@@ -338,7 +338,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
43133
43134 spin_lock_irqsave(&hp->port.lock, flags);
43135 /* Check and then increment for fast path open. */
43136- if (hp->port.count++ > 0) {
43137+ if (atomic_inc_return(&hp->port.count) > 1) {
43138 spin_unlock_irqrestore(&hp->port.lock, flags);
43139 hvc_kick();
43140 return 0;
43141@@ -388,7 +388,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
43142
43143 spin_lock_irqsave(&hp->port.lock, flags);
43144
43145- if (--hp->port.count == 0) {
43146+ if (atomic_dec_return(&hp->port.count) == 0) {
43147 spin_unlock_irqrestore(&hp->port.lock, flags);
43148 /* We are done with the tty pointer now. */
43149 tty_port_tty_set(&hp->port, NULL);
43150@@ -406,9 +406,9 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
43151 */
43152 tty_wait_until_sent_from_close(tty, HVC_CLOSE_WAIT);
43153 } else {
43154- if (hp->port.count < 0)
43155+ if (atomic_read(&hp->port.count) < 0)
43156 printk(KERN_ERR "hvc_close %X: oops, count is %d\n",
43157- hp->vtermno, hp->port.count);
43158+ hp->vtermno, atomic_read(&hp->port.count));
43159 spin_unlock_irqrestore(&hp->port.lock, flags);
43160 }
43161 }
43162@@ -438,12 +438,12 @@ static void hvc_hangup(struct tty_struct *tty)
43163 * open->hangup case this can be called after the final close so prevent
43164 * that from happening for now.
43165 */
43166- if (hp->port.count <= 0) {
43167+ if (atomic_read(&hp->port.count) <= 0) {
43168 spin_unlock_irqrestore(&hp->port.lock, flags);
43169 return;
43170 }
43171
43172- hp->port.count = 0;
43173+ atomic_set(&hp->port.count, 0);
43174 spin_unlock_irqrestore(&hp->port.lock, flags);
43175 tty_port_tty_set(&hp->port, NULL);
43176
43177@@ -491,7 +491,7 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
43178 return -EPIPE;
43179
43180 /* FIXME what's this (unprotected) check for? */
43181- if (hp->port.count <= 0)
43182+ if (atomic_read(&hp->port.count) <= 0)
43183 return -EIO;
43184
43185 spin_lock_irqsave(&hp->lock, flags);
43186diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
43187index 8776357..b2d4afd 100644
43188--- a/drivers/tty/hvc/hvcs.c
43189+++ b/drivers/tty/hvc/hvcs.c
43190@@ -83,6 +83,7 @@
43191 #include <asm/hvcserver.h>
43192 #include <asm/uaccess.h>
43193 #include <asm/vio.h>
43194+#include <asm/local.h>
43195
43196 /*
43197 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
43198@@ -416,7 +417,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
43199
43200 spin_lock_irqsave(&hvcsd->lock, flags);
43201
43202- if (hvcsd->port.count > 0) {
43203+ if (atomic_read(&hvcsd->port.count) > 0) {
43204 spin_unlock_irqrestore(&hvcsd->lock, flags);
43205 printk(KERN_INFO "HVCS: vterm state unchanged. "
43206 "The hvcs device node is still in use.\n");
43207@@ -1132,7 +1133,7 @@ static int hvcs_install(struct tty_driver *driver, struct tty_struct *tty)
43208 }
43209 }
43210
43211- hvcsd->port.count = 0;
43212+ atomic_set(&hvcsd->port.count, 0);
43213 hvcsd->port.tty = tty;
43214 tty->driver_data = hvcsd;
43215
43216@@ -1185,7 +1186,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
43217 unsigned long flags;
43218
43219 spin_lock_irqsave(&hvcsd->lock, flags);
43220- hvcsd->port.count++;
43221+ atomic_inc(&hvcsd->port.count);
43222 hvcsd->todo_mask |= HVCS_SCHED_READ;
43223 spin_unlock_irqrestore(&hvcsd->lock, flags);
43224
43225@@ -1221,7 +1222,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
43226 hvcsd = tty->driver_data;
43227
43228 spin_lock_irqsave(&hvcsd->lock, flags);
43229- if (--hvcsd->port.count == 0) {
43230+ if (atomic_dec_and_test(&hvcsd->port.count)) {
43231
43232 vio_disable_interrupts(hvcsd->vdev);
43233
43234@@ -1246,10 +1247,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
43235
43236 free_irq(irq, hvcsd);
43237 return;
43238- } else if (hvcsd->port.count < 0) {
43239+ } else if (atomic_read(&hvcsd->port.count) < 0) {
43240 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
43241 " is missmanaged.\n",
43242- hvcsd->vdev->unit_address, hvcsd->port.count);
43243+ hvcsd->vdev->unit_address, atomic_read(&hvcsd->port.count));
43244 }
43245
43246 spin_unlock_irqrestore(&hvcsd->lock, flags);
43247@@ -1271,7 +1272,7 @@ static void hvcs_hangup(struct tty_struct * tty)
43248
43249 spin_lock_irqsave(&hvcsd->lock, flags);
43250 /* Preserve this so that we know how many kref refs to put */
43251- temp_open_count = hvcsd->port.count;
43252+ temp_open_count = atomic_read(&hvcsd->port.count);
43253
43254 /*
43255 * Don't kref put inside the spinlock because the destruction
43256@@ -1286,7 +1287,7 @@ static void hvcs_hangup(struct tty_struct * tty)
43257 tty->driver_data = NULL;
43258 hvcsd->port.tty = NULL;
43259
43260- hvcsd->port.count = 0;
43261+ atomic_set(&hvcsd->port.count, 0);
43262
43263 /* This will drop any buffered data on the floor which is OK in a hangup
43264 * scenario. */
43265@@ -1357,7 +1358,7 @@ static int hvcs_write(struct tty_struct *tty,
43266 * the middle of a write operation? This is a crummy place to do this
43267 * but we want to keep it all in the spinlock.
43268 */
43269- if (hvcsd->port.count <= 0) {
43270+ if (atomic_read(&hvcsd->port.count) <= 0) {
43271 spin_unlock_irqrestore(&hvcsd->lock, flags);
43272 return -ENODEV;
43273 }
43274@@ -1431,7 +1432,7 @@ static int hvcs_write_room(struct tty_struct *tty)
43275 {
43276 struct hvcs_struct *hvcsd = tty->driver_data;
43277
43278- if (!hvcsd || hvcsd->port.count <= 0)
43279+ if (!hvcsd || atomic_read(&hvcsd->port.count) <= 0)
43280 return 0;
43281
43282 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
43283diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
43284index 2cde13d..645d78f 100644
43285--- a/drivers/tty/ipwireless/tty.c
43286+++ b/drivers/tty/ipwireless/tty.c
43287@@ -29,6 +29,7 @@
43288 #include <linux/tty_driver.h>
43289 #include <linux/tty_flip.h>
43290 #include <linux/uaccess.h>
43291+#include <asm/local.h>
43292
43293 #include "tty.h"
43294 #include "network.h"
43295@@ -99,10 +100,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
43296 mutex_unlock(&tty->ipw_tty_mutex);
43297 return -ENODEV;
43298 }
43299- if (tty->port.count == 0)
43300+ if (atomic_read(&tty->port.count) == 0)
43301 tty->tx_bytes_queued = 0;
43302
43303- tty->port.count++;
43304+ atomic_inc(&tty->port.count);
43305
43306 tty->port.tty = linux_tty;
43307 linux_tty->driver_data = tty;
43308@@ -118,9 +119,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
43309
43310 static void do_ipw_close(struct ipw_tty *tty)
43311 {
43312- tty->port.count--;
43313-
43314- if (tty->port.count == 0) {
43315+ if (atomic_dec_return(&tty->port.count) == 0) {
43316 struct tty_struct *linux_tty = tty->port.tty;
43317
43318 if (linux_tty != NULL) {
43319@@ -141,7 +140,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
43320 return;
43321
43322 mutex_lock(&tty->ipw_tty_mutex);
43323- if (tty->port.count == 0) {
43324+ if (atomic_read(&tty->port.count) == 0) {
43325 mutex_unlock(&tty->ipw_tty_mutex);
43326 return;
43327 }
43328@@ -170,7 +169,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
43329 return;
43330 }
43331
43332- if (!tty->port.count) {
43333+ if (!atomic_read(&tty->port.count)) {
43334 mutex_unlock(&tty->ipw_tty_mutex);
43335 return;
43336 }
43337@@ -212,7 +211,7 @@ static int ipw_write(struct tty_struct *linux_tty,
43338 return -ENODEV;
43339
43340 mutex_lock(&tty->ipw_tty_mutex);
43341- if (!tty->port.count) {
43342+ if (!atomic_read(&tty->port.count)) {
43343 mutex_unlock(&tty->ipw_tty_mutex);
43344 return -EINVAL;
43345 }
43346@@ -252,7 +251,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
43347 if (!tty)
43348 return -ENODEV;
43349
43350- if (!tty->port.count)
43351+ if (!atomic_read(&tty->port.count))
43352 return -EINVAL;
43353
43354 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
43355@@ -294,7 +293,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
43356 if (!tty)
43357 return 0;
43358
43359- if (!tty->port.count)
43360+ if (!atomic_read(&tty->port.count))
43361 return 0;
43362
43363 return tty->tx_bytes_queued;
43364@@ -375,7 +374,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
43365 if (!tty)
43366 return -ENODEV;
43367
43368- if (!tty->port.count)
43369+ if (!atomic_read(&tty->port.count))
43370 return -EINVAL;
43371
43372 return get_control_lines(tty);
43373@@ -391,7 +390,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
43374 if (!tty)
43375 return -ENODEV;
43376
43377- if (!tty->port.count)
43378+ if (!atomic_read(&tty->port.count))
43379 return -EINVAL;
43380
43381 return set_control_lines(tty, set, clear);
43382@@ -405,7 +404,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
43383 if (!tty)
43384 return -ENODEV;
43385
43386- if (!tty->port.count)
43387+ if (!atomic_read(&tty->port.count))
43388 return -EINVAL;
43389
43390 /* FIXME: Exactly how is the tty object locked here .. */
43391@@ -561,7 +560,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
43392 * are gone */
43393 mutex_lock(&ttyj->ipw_tty_mutex);
43394 }
43395- while (ttyj->port.count)
43396+ while (atomic_read(&ttyj->port.count))
43397 do_ipw_close(ttyj);
43398 ipwireless_disassociate_network_ttys(network,
43399 ttyj->channel_idx);
43400diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
43401index f9d2850..b006f04 100644
43402--- a/drivers/tty/moxa.c
43403+++ b/drivers/tty/moxa.c
43404@@ -1193,7 +1193,7 @@ static int moxa_open(struct tty_struct *tty, struct file *filp)
43405 }
43406
43407 ch = &brd->ports[port % MAX_PORTS_PER_BOARD];
43408- ch->port.count++;
43409+ atomic_inc(&ch->port.count);
43410 tty->driver_data = ch;
43411 tty_port_tty_set(&ch->port, tty);
43412 mutex_lock(&ch->port.mutex);
43413diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
43414index bfd6771..e0d93c4 100644
43415--- a/drivers/tty/n_gsm.c
43416+++ b/drivers/tty/n_gsm.c
43417@@ -1636,7 +1636,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
43418 spin_lock_init(&dlci->lock);
43419 mutex_init(&dlci->mutex);
43420 dlci->fifo = &dlci->_fifo;
43421- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
43422+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
43423 kfree(dlci);
43424 return NULL;
43425 }
43426@@ -2936,7 +2936,7 @@ static int gsmtty_open(struct tty_struct *tty, struct file *filp)
43427 struct gsm_dlci *dlci = tty->driver_data;
43428 struct tty_port *port = &dlci->port;
43429
43430- port->count++;
43431+ atomic_inc(&port->count);
43432 dlci_get(dlci);
43433 dlci_get(dlci->gsm->dlci[0]);
43434 mux_get(dlci->gsm);
43435diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
43436index 19083ef..6e34e97 100644
43437--- a/drivers/tty/n_tty.c
43438+++ b/drivers/tty/n_tty.c
43439@@ -2196,6 +2196,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
43440 {
43441 *ops = tty_ldisc_N_TTY;
43442 ops->owner = NULL;
43443- ops->refcount = ops->flags = 0;
43444+ atomic_set(&ops->refcount, 0);
43445+ ops->flags = 0;
43446 }
43447 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
43448diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
43449index ac35c90..c47deac 100644
43450--- a/drivers/tty/pty.c
43451+++ b/drivers/tty/pty.c
43452@@ -790,8 +790,10 @@ static void __init unix98_pty_init(void)
43453 panic("Couldn't register Unix98 pts driver");
43454
43455 /* Now create the /dev/ptmx special device */
43456+ pax_open_kernel();
43457 tty_default_fops(&ptmx_fops);
43458- ptmx_fops.open = ptmx_open;
43459+ *(void **)&ptmx_fops.open = ptmx_open;
43460+ pax_close_kernel();
43461
43462 cdev_init(&ptmx_cdev, &ptmx_fops);
43463 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
43464diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
43465index e42009a..566a036 100644
43466--- a/drivers/tty/rocket.c
43467+++ b/drivers/tty/rocket.c
43468@@ -925,7 +925,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
43469 tty->driver_data = info;
43470 tty_port_tty_set(port, tty);
43471
43472- if (port->count++ == 0) {
43473+ if (atomic_inc_return(&port->count) == 1) {
43474 atomic_inc(&rp_num_ports_open);
43475
43476 #ifdef ROCKET_DEBUG_OPEN
43477@@ -934,7 +934,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
43478 #endif
43479 }
43480 #ifdef ROCKET_DEBUG_OPEN
43481- printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, info->port.count);
43482+ printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, atomic-read(&info->port.count));
43483 #endif
43484
43485 /*
43486@@ -1529,7 +1529,7 @@ static void rp_hangup(struct tty_struct *tty)
43487 spin_unlock_irqrestore(&info->port.lock, flags);
43488 return;
43489 }
43490- if (info->port.count)
43491+ if (atomic_read(&info->port.count))
43492 atomic_dec(&rp_num_ports_open);
43493 clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]);
43494 spin_unlock_irqrestore(&info->port.lock, flags);
43495diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
43496index 1002054..dd644a8 100644
43497--- a/drivers/tty/serial/kgdboc.c
43498+++ b/drivers/tty/serial/kgdboc.c
43499@@ -24,8 +24,9 @@
43500 #define MAX_CONFIG_LEN 40
43501
43502 static struct kgdb_io kgdboc_io_ops;
43503+static struct kgdb_io kgdboc_io_ops_console;
43504
43505-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
43506+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
43507 static int configured = -1;
43508
43509 static char config[MAX_CONFIG_LEN];
43510@@ -151,6 +152,8 @@ static void cleanup_kgdboc(void)
43511 kgdboc_unregister_kbd();
43512 if (configured == 1)
43513 kgdb_unregister_io_module(&kgdboc_io_ops);
43514+ else if (configured == 2)
43515+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
43516 }
43517
43518 static int configure_kgdboc(void)
43519@@ -160,13 +163,13 @@ static int configure_kgdboc(void)
43520 int err;
43521 char *cptr = config;
43522 struct console *cons;
43523+ int is_console = 0;
43524
43525 err = kgdboc_option_setup(config);
43526 if (err || !strlen(config) || isspace(config[0]))
43527 goto noconfig;
43528
43529 err = -ENODEV;
43530- kgdboc_io_ops.is_console = 0;
43531 kgdb_tty_driver = NULL;
43532
43533 kgdboc_use_kms = 0;
43534@@ -187,7 +190,7 @@ static int configure_kgdboc(void)
43535 int idx;
43536 if (cons->device && cons->device(cons, &idx) == p &&
43537 idx == tty_line) {
43538- kgdboc_io_ops.is_console = 1;
43539+ is_console = 1;
43540 break;
43541 }
43542 cons = cons->next;
43543@@ -197,7 +200,13 @@ static int configure_kgdboc(void)
43544 kgdb_tty_line = tty_line;
43545
43546 do_register:
43547- err = kgdb_register_io_module(&kgdboc_io_ops);
43548+ if (is_console) {
43549+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
43550+ configured = 2;
43551+ } else {
43552+ err = kgdb_register_io_module(&kgdboc_io_ops);
43553+ configured = 1;
43554+ }
43555 if (err)
43556 goto noconfig;
43557
43558@@ -205,8 +214,6 @@ do_register:
43559 if (err)
43560 goto nmi_con_failed;
43561
43562- configured = 1;
43563-
43564 return 0;
43565
43566 nmi_con_failed:
43567@@ -223,7 +230,7 @@ noconfig:
43568 static int __init init_kgdboc(void)
43569 {
43570 /* Already configured? */
43571- if (configured == 1)
43572+ if (configured >= 1)
43573 return 0;
43574
43575 return configure_kgdboc();
43576@@ -272,7 +279,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
43577 if (config[len - 1] == '\n')
43578 config[len - 1] = '\0';
43579
43580- if (configured == 1)
43581+ if (configured >= 1)
43582 cleanup_kgdboc();
43583
43584 /* Go and configure with the new params. */
43585@@ -312,6 +319,15 @@ static struct kgdb_io kgdboc_io_ops = {
43586 .post_exception = kgdboc_post_exp_handler,
43587 };
43588
43589+static struct kgdb_io kgdboc_io_ops_console = {
43590+ .name = "kgdboc",
43591+ .read_char = kgdboc_get_char,
43592+ .write_char = kgdboc_put_char,
43593+ .pre_exception = kgdboc_pre_exp_handler,
43594+ .post_exception = kgdboc_post_exp_handler,
43595+ .is_console = 1
43596+};
43597+
43598 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
43599 /* This is only available if kgdboc is a built in for early debugging */
43600 static int __init kgdboc_early_init(char *opt)
43601diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
43602index e514b3a..c73d614 100644
43603--- a/drivers/tty/serial/samsung.c
43604+++ b/drivers/tty/serial/samsung.c
43605@@ -453,11 +453,16 @@ static void s3c24xx_serial_shutdown(struct uart_port *port)
43606 }
43607 }
43608
43609+static int s3c64xx_serial_startup(struct uart_port *port);
43610 static int s3c24xx_serial_startup(struct uart_port *port)
43611 {
43612 struct s3c24xx_uart_port *ourport = to_ourport(port);
43613 int ret;
43614
43615+ /* Startup sequence is different for s3c64xx and higher SoC's */
43616+ if (s3c24xx_serial_has_interrupt_mask(port))
43617+ return s3c64xx_serial_startup(port);
43618+
43619 dbg("s3c24xx_serial_startup: port=%p (%08lx,%p)\n",
43620 port->mapbase, port->membase);
43621
43622@@ -1122,10 +1127,6 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
43623 /* setup info for port */
43624 port->dev = &platdev->dev;
43625
43626- /* Startup sequence is different for s3c64xx and higher SoC's */
43627- if (s3c24xx_serial_has_interrupt_mask(port))
43628- s3c24xx_serial_ops.startup = s3c64xx_serial_startup;
43629-
43630 port->uartclk = 1;
43631
43632 if (cfg->uart_flags & UPF_CONS_FLOW) {
43633diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
43634index 2c7230a..2104f16 100644
43635--- a/drivers/tty/serial/serial_core.c
43636+++ b/drivers/tty/serial/serial_core.c
43637@@ -1455,7 +1455,7 @@ static void uart_hangup(struct tty_struct *tty)
43638 uart_flush_buffer(tty);
43639 uart_shutdown(tty, state);
43640 spin_lock_irqsave(&port->lock, flags);
43641- port->count = 0;
43642+ atomic_set(&port->count, 0);
43643 clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags);
43644 spin_unlock_irqrestore(&port->lock, flags);
43645 tty_port_tty_set(port, NULL);
43646@@ -1551,7 +1551,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
43647 goto end;
43648 }
43649
43650- port->count++;
43651+ atomic_inc(&port->count);
43652 if (!state->uart_port || state->uart_port->flags & UPF_DEAD) {
43653 retval = -ENXIO;
43654 goto err_dec_count;
43655@@ -1578,7 +1578,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
43656 /*
43657 * Make sure the device is in D0 state.
43658 */
43659- if (port->count == 1)
43660+ if (atomic_read(&port->count) == 1)
43661 uart_change_pm(state, 0);
43662
43663 /*
43664@@ -1596,7 +1596,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
43665 end:
43666 return retval;
43667 err_dec_count:
43668- port->count--;
43669+ atomic_inc(&port->count);
43670 mutex_unlock(&port->mutex);
43671 goto end;
43672 }
43673diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
43674index 9e071f6..f30ae69 100644
43675--- a/drivers/tty/synclink.c
43676+++ b/drivers/tty/synclink.c
43677@@ -3095,7 +3095,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
43678
43679 if (debug_level >= DEBUG_LEVEL_INFO)
43680 printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
43681- __FILE__,__LINE__, info->device_name, info->port.count);
43682+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
43683
43684 if (tty_port_close_start(&info->port, tty, filp) == 0)
43685 goto cleanup;
43686@@ -3113,7 +3113,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
43687 cleanup:
43688 if (debug_level >= DEBUG_LEVEL_INFO)
43689 printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
43690- tty->driver->name, info->port.count);
43691+ tty->driver->name, atomic_read(&info->port.count));
43692
43693 } /* end of mgsl_close() */
43694
43695@@ -3212,8 +3212,8 @@ static void mgsl_hangup(struct tty_struct *tty)
43696
43697 mgsl_flush_buffer(tty);
43698 shutdown(info);
43699-
43700- info->port.count = 0;
43701+
43702+ atomic_set(&info->port.count, 0);
43703 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
43704 info->port.tty = NULL;
43705
43706@@ -3302,12 +3302,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
43707
43708 if (debug_level >= DEBUG_LEVEL_INFO)
43709 printk("%s(%d):block_til_ready before block on %s count=%d\n",
43710- __FILE__,__LINE__, tty->driver->name, port->count );
43711+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
43712
43713 spin_lock_irqsave(&info->irq_spinlock, flags);
43714 if (!tty_hung_up_p(filp)) {
43715 extra_count = true;
43716- port->count--;
43717+ atomic_dec(&port->count);
43718 }
43719 spin_unlock_irqrestore(&info->irq_spinlock, flags);
43720 port->blocked_open++;
43721@@ -3336,7 +3336,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
43722
43723 if (debug_level >= DEBUG_LEVEL_INFO)
43724 printk("%s(%d):block_til_ready blocking on %s count=%d\n",
43725- __FILE__,__LINE__, tty->driver->name, port->count );
43726+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
43727
43728 tty_unlock(tty);
43729 schedule();
43730@@ -3348,12 +3348,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
43731
43732 /* FIXME: Racy on hangup during close wait */
43733 if (extra_count)
43734- port->count++;
43735+ atomic_inc(&port->count);
43736 port->blocked_open--;
43737
43738 if (debug_level >= DEBUG_LEVEL_INFO)
43739 printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
43740- __FILE__,__LINE__, tty->driver->name, port->count );
43741+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
43742
43743 if (!retval)
43744 port->flags |= ASYNC_NORMAL_ACTIVE;
43745@@ -3405,7 +3405,7 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
43746
43747 if (debug_level >= DEBUG_LEVEL_INFO)
43748 printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
43749- __FILE__,__LINE__,tty->driver->name, info->port.count);
43750+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
43751
43752 /* If port is closing, signal caller to try again */
43753 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
43754@@ -3424,10 +3424,10 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
43755 spin_unlock_irqrestore(&info->netlock, flags);
43756 goto cleanup;
43757 }
43758- info->port.count++;
43759+ atomic_inc(&info->port.count);
43760 spin_unlock_irqrestore(&info->netlock, flags);
43761
43762- if (info->port.count == 1) {
43763+ if (atomic_read(&info->port.count) == 1) {
43764 /* 1st open on this device, init hardware */
43765 retval = startup(info);
43766 if (retval < 0)
43767@@ -3451,8 +3451,8 @@ cleanup:
43768 if (retval) {
43769 if (tty->count == 1)
43770 info->port.tty = NULL; /* tty layer will release tty struct */
43771- if(info->port.count)
43772- info->port.count--;
43773+ if (atomic_read(&info->port.count))
43774+ atomic_dec(&info->port.count);
43775 }
43776
43777 return retval;
43778@@ -7662,7 +7662,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
43779 unsigned short new_crctype;
43780
43781 /* return error if TTY interface open */
43782- if (info->port.count)
43783+ if (atomic_read(&info->port.count))
43784 return -EBUSY;
43785
43786 switch (encoding)
43787@@ -7757,7 +7757,7 @@ static int hdlcdev_open(struct net_device *dev)
43788
43789 /* arbitrate between network and tty opens */
43790 spin_lock_irqsave(&info->netlock, flags);
43791- if (info->port.count != 0 || info->netcount != 0) {
43792+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
43793 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
43794 spin_unlock_irqrestore(&info->netlock, flags);
43795 return -EBUSY;
43796@@ -7843,7 +7843,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
43797 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
43798
43799 /* return error if TTY interface open */
43800- if (info->port.count)
43801+ if (atomic_read(&info->port.count))
43802 return -EBUSY;
43803
43804 if (cmd != SIOCWANDEV)
43805diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
43806index aba1e59..877ac33 100644
43807--- a/drivers/tty/synclink_gt.c
43808+++ b/drivers/tty/synclink_gt.c
43809@@ -671,7 +671,7 @@ static int open(struct tty_struct *tty, struct file *filp)
43810 tty->driver_data = info;
43811 info->port.tty = tty;
43812
43813- DBGINFO(("%s open, old ref count = %d\n", info->device_name, info->port.count));
43814+ DBGINFO(("%s open, old ref count = %d\n", info->device_name, atomic_read(&info->port.count)));
43815
43816 /* If port is closing, signal caller to try again */
43817 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
43818@@ -692,10 +692,10 @@ static int open(struct tty_struct *tty, struct file *filp)
43819 mutex_unlock(&info->port.mutex);
43820 goto cleanup;
43821 }
43822- info->port.count++;
43823+ atomic_inc(&info->port.count);
43824 spin_unlock_irqrestore(&info->netlock, flags);
43825
43826- if (info->port.count == 1) {
43827+ if (atomic_read(&info->port.count) == 1) {
43828 /* 1st open on this device, init hardware */
43829 retval = startup(info);
43830 if (retval < 0) {
43831@@ -716,8 +716,8 @@ cleanup:
43832 if (retval) {
43833 if (tty->count == 1)
43834 info->port.tty = NULL; /* tty layer will release tty struct */
43835- if(info->port.count)
43836- info->port.count--;
43837+ if(atomic_read(&info->port.count))
43838+ atomic_dec(&info->port.count);
43839 }
43840
43841 DBGINFO(("%s open rc=%d\n", info->device_name, retval));
43842@@ -730,7 +730,7 @@ static void close(struct tty_struct *tty, struct file *filp)
43843
43844 if (sanity_check(info, tty->name, "close"))
43845 return;
43846- DBGINFO(("%s close entry, count=%d\n", info->device_name, info->port.count));
43847+ DBGINFO(("%s close entry, count=%d\n", info->device_name, atomic_read(&info->port.count)));
43848
43849 if (tty_port_close_start(&info->port, tty, filp) == 0)
43850 goto cleanup;
43851@@ -747,7 +747,7 @@ static void close(struct tty_struct *tty, struct file *filp)
43852 tty_port_close_end(&info->port, tty);
43853 info->port.tty = NULL;
43854 cleanup:
43855- DBGINFO(("%s close exit, count=%d\n", tty->driver->name, info->port.count));
43856+ DBGINFO(("%s close exit, count=%d\n", tty->driver->name, atomic_read(&info->port.count)));
43857 }
43858
43859 static void hangup(struct tty_struct *tty)
43860@@ -765,7 +765,7 @@ static void hangup(struct tty_struct *tty)
43861 shutdown(info);
43862
43863 spin_lock_irqsave(&info->port.lock, flags);
43864- info->port.count = 0;
43865+ atomic_set(&info->port.count, 0);
43866 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
43867 info->port.tty = NULL;
43868 spin_unlock_irqrestore(&info->port.lock, flags);
43869@@ -1450,7 +1450,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
43870 unsigned short new_crctype;
43871
43872 /* return error if TTY interface open */
43873- if (info->port.count)
43874+ if (atomic_read(&info->port.count))
43875 return -EBUSY;
43876
43877 DBGINFO(("%s hdlcdev_attach\n", info->device_name));
43878@@ -1545,7 +1545,7 @@ static int hdlcdev_open(struct net_device *dev)
43879
43880 /* arbitrate between network and tty opens */
43881 spin_lock_irqsave(&info->netlock, flags);
43882- if (info->port.count != 0 || info->netcount != 0) {
43883+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
43884 DBGINFO(("%s hdlc_open busy\n", dev->name));
43885 spin_unlock_irqrestore(&info->netlock, flags);
43886 return -EBUSY;
43887@@ -1630,7 +1630,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
43888 DBGINFO(("%s hdlcdev_ioctl\n", dev->name));
43889
43890 /* return error if TTY interface open */
43891- if (info->port.count)
43892+ if (atomic_read(&info->port.count))
43893 return -EBUSY;
43894
43895 if (cmd != SIOCWANDEV)
43896@@ -2419,7 +2419,7 @@ static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
43897 if (port == NULL)
43898 continue;
43899 spin_lock(&port->lock);
43900- if ((port->port.count || port->netcount) &&
43901+ if ((atomic_read(&port->port.count) || port->netcount) &&
43902 port->pending_bh && !port->bh_running &&
43903 !port->bh_requested) {
43904 DBGISR(("%s bh queued\n", port->device_name));
43905@@ -3308,7 +3308,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
43906 spin_lock_irqsave(&info->lock, flags);
43907 if (!tty_hung_up_p(filp)) {
43908 extra_count = true;
43909- port->count--;
43910+ atomic_dec(&port->count);
43911 }
43912 spin_unlock_irqrestore(&info->lock, flags);
43913 port->blocked_open++;
43914@@ -3345,7 +3345,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
43915 remove_wait_queue(&port->open_wait, &wait);
43916
43917 if (extra_count)
43918- port->count++;
43919+ atomic_inc(&port->count);
43920 port->blocked_open--;
43921
43922 if (!retval)
43923diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
43924index fd43fb6..34704ad 100644
43925--- a/drivers/tty/synclinkmp.c
43926+++ b/drivers/tty/synclinkmp.c
43927@@ -751,7 +751,7 @@ static int open(struct tty_struct *tty, struct file *filp)
43928
43929 if (debug_level >= DEBUG_LEVEL_INFO)
43930 printk("%s(%d):%s open(), old ref count = %d\n",
43931- __FILE__,__LINE__,tty->driver->name, info->port.count);
43932+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
43933
43934 /* If port is closing, signal caller to try again */
43935 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
43936@@ -770,10 +770,10 @@ static int open(struct tty_struct *tty, struct file *filp)
43937 spin_unlock_irqrestore(&info->netlock, flags);
43938 goto cleanup;
43939 }
43940- info->port.count++;
43941+ atomic_inc(&info->port.count);
43942 spin_unlock_irqrestore(&info->netlock, flags);
43943
43944- if (info->port.count == 1) {
43945+ if (atomic_read(&info->port.count) == 1) {
43946 /* 1st open on this device, init hardware */
43947 retval = startup(info);
43948 if (retval < 0)
43949@@ -797,8 +797,8 @@ cleanup:
43950 if (retval) {
43951 if (tty->count == 1)
43952 info->port.tty = NULL; /* tty layer will release tty struct */
43953- if(info->port.count)
43954- info->port.count--;
43955+ if(atomic_read(&info->port.count))
43956+ atomic_dec(&info->port.count);
43957 }
43958
43959 return retval;
43960@@ -816,7 +816,7 @@ static void close(struct tty_struct *tty, struct file *filp)
43961
43962 if (debug_level >= DEBUG_LEVEL_INFO)
43963 printk("%s(%d):%s close() entry, count=%d\n",
43964- __FILE__,__LINE__, info->device_name, info->port.count);
43965+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
43966
43967 if (tty_port_close_start(&info->port, tty, filp) == 0)
43968 goto cleanup;
43969@@ -835,7 +835,7 @@ static void close(struct tty_struct *tty, struct file *filp)
43970 cleanup:
43971 if (debug_level >= DEBUG_LEVEL_INFO)
43972 printk("%s(%d):%s close() exit, count=%d\n", __FILE__,__LINE__,
43973- tty->driver->name, info->port.count);
43974+ tty->driver->name, atomic_read(&info->port.count));
43975 }
43976
43977 /* Called by tty_hangup() when a hangup is signaled.
43978@@ -858,7 +858,7 @@ static void hangup(struct tty_struct *tty)
43979 shutdown(info);
43980
43981 spin_lock_irqsave(&info->port.lock, flags);
43982- info->port.count = 0;
43983+ atomic_set(&info->port.count, 0);
43984 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
43985 info->port.tty = NULL;
43986 spin_unlock_irqrestore(&info->port.lock, flags);
43987@@ -1566,7 +1566,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
43988 unsigned short new_crctype;
43989
43990 /* return error if TTY interface open */
43991- if (info->port.count)
43992+ if (atomic_read(&info->port.count))
43993 return -EBUSY;
43994
43995 switch (encoding)
43996@@ -1661,7 +1661,7 @@ static int hdlcdev_open(struct net_device *dev)
43997
43998 /* arbitrate between network and tty opens */
43999 spin_lock_irqsave(&info->netlock, flags);
44000- if (info->port.count != 0 || info->netcount != 0) {
44001+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
44002 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
44003 spin_unlock_irqrestore(&info->netlock, flags);
44004 return -EBUSY;
44005@@ -1747,7 +1747,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
44006 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
44007
44008 /* return error if TTY interface open */
44009- if (info->port.count)
44010+ if (atomic_read(&info->port.count))
44011 return -EBUSY;
44012
44013 if (cmd != SIOCWANDEV)
44014@@ -2632,7 +2632,7 @@ static irqreturn_t synclinkmp_interrupt(int dummy, void *dev_id)
44015 * do not request bottom half processing if the
44016 * device is not open in a normal mode.
44017 */
44018- if ( port && (port->port.count || port->netcount) &&
44019+ if ( port && (atomic_read(&port->port.count) || port->netcount) &&
44020 port->pending_bh && !port->bh_running &&
44021 !port->bh_requested ) {
44022 if ( debug_level >= DEBUG_LEVEL_ISR )
44023@@ -3330,12 +3330,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
44024
44025 if (debug_level >= DEBUG_LEVEL_INFO)
44026 printk("%s(%d):%s block_til_ready() before block, count=%d\n",
44027- __FILE__,__LINE__, tty->driver->name, port->count );
44028+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
44029
44030 spin_lock_irqsave(&info->lock, flags);
44031 if (!tty_hung_up_p(filp)) {
44032 extra_count = true;
44033- port->count--;
44034+ atomic_dec(&port->count);
44035 }
44036 spin_unlock_irqrestore(&info->lock, flags);
44037 port->blocked_open++;
44038@@ -3364,7 +3364,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
44039
44040 if (debug_level >= DEBUG_LEVEL_INFO)
44041 printk("%s(%d):%s block_til_ready() count=%d\n",
44042- __FILE__,__LINE__, tty->driver->name, port->count );
44043+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
44044
44045 tty_unlock(tty);
44046 schedule();
44047@@ -3375,12 +3375,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
44048 remove_wait_queue(&port->open_wait, &wait);
44049
44050 if (extra_count)
44051- port->count++;
44052+ atomic_inc(&port->count);
44053 port->blocked_open--;
44054
44055 if (debug_level >= DEBUG_LEVEL_INFO)
44056 printk("%s(%d):%s block_til_ready() after, count=%d\n",
44057- __FILE__,__LINE__, tty->driver->name, port->count );
44058+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
44059
44060 if (!retval)
44061 port->flags |= ASYNC_NORMAL_ACTIVE;
44062diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
44063index b3c4a25..723916f 100644
44064--- a/drivers/tty/sysrq.c
44065+++ b/drivers/tty/sysrq.c
44066@@ -867,7 +867,7 @@ EXPORT_SYMBOL(unregister_sysrq_key);
44067 static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
44068 size_t count, loff_t *ppos)
44069 {
44070- if (count) {
44071+ if (count && capable(CAP_SYS_ADMIN)) {
44072 char c;
44073
44074 if (get_user(c, buf))
44075diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
44076index da9fde8..c07975f 100644
44077--- a/drivers/tty/tty_io.c
44078+++ b/drivers/tty/tty_io.c
44079@@ -3391,7 +3391,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
44080
44081 void tty_default_fops(struct file_operations *fops)
44082 {
44083- *fops = tty_fops;
44084+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
44085 }
44086
44087 /*
44088diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
44089index c578229..45aa9ee 100644
44090--- a/drivers/tty/tty_ldisc.c
44091+++ b/drivers/tty/tty_ldisc.c
44092@@ -56,7 +56,7 @@ static void put_ldisc(struct tty_ldisc *ld)
44093 if (atomic_dec_and_test(&ld->users)) {
44094 struct tty_ldisc_ops *ldo = ld->ops;
44095
44096- ldo->refcount--;
44097+ atomic_dec(&ldo->refcount);
44098 module_put(ldo->owner);
44099 raw_spin_unlock_irqrestore(&tty_ldisc_lock, flags);
44100
44101@@ -91,7 +91,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
44102 raw_spin_lock_irqsave(&tty_ldisc_lock, flags);
44103 tty_ldiscs[disc] = new_ldisc;
44104 new_ldisc->num = disc;
44105- new_ldisc->refcount = 0;
44106+ atomic_set(&new_ldisc->refcount, 0);
44107 raw_spin_unlock_irqrestore(&tty_ldisc_lock, flags);
44108
44109 return ret;
44110@@ -119,7 +119,7 @@ int tty_unregister_ldisc(int disc)
44111 return -EINVAL;
44112
44113 raw_spin_lock_irqsave(&tty_ldisc_lock, flags);
44114- if (tty_ldiscs[disc]->refcount)
44115+ if (atomic_read(&tty_ldiscs[disc]->refcount))
44116 ret = -EBUSY;
44117 else
44118 tty_ldiscs[disc] = NULL;
44119@@ -140,7 +140,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
44120 if (ldops) {
44121 ret = ERR_PTR(-EAGAIN);
44122 if (try_module_get(ldops->owner)) {
44123- ldops->refcount++;
44124+ atomic_inc(&ldops->refcount);
44125 ret = ldops;
44126 }
44127 }
44128@@ -153,7 +153,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
44129 unsigned long flags;
44130
44131 raw_spin_lock_irqsave(&tty_ldisc_lock, flags);
44132- ldops->refcount--;
44133+ atomic_dec(&ldops->refcount);
44134 module_put(ldops->owner);
44135 raw_spin_unlock_irqrestore(&tty_ldisc_lock, flags);
44136 }
44137diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
44138index b7ff59d..7c6105e 100644
44139--- a/drivers/tty/tty_port.c
44140+++ b/drivers/tty/tty_port.c
44141@@ -218,7 +218,7 @@ void tty_port_hangup(struct tty_port *port)
44142 unsigned long flags;
44143
44144 spin_lock_irqsave(&port->lock, flags);
44145- port->count = 0;
44146+ atomic_set(&port->count, 0);
44147 port->flags &= ~ASYNC_NORMAL_ACTIVE;
44148 if (port->tty) {
44149 set_bit(TTY_IO_ERROR, &port->tty->flags);
44150@@ -344,7 +344,7 @@ int tty_port_block_til_ready(struct tty_port *port,
44151 /* The port lock protects the port counts */
44152 spin_lock_irqsave(&port->lock, flags);
44153 if (!tty_hung_up_p(filp))
44154- port->count--;
44155+ atomic_dec(&port->count);
44156 port->blocked_open++;
44157 spin_unlock_irqrestore(&port->lock, flags);
44158
44159@@ -386,7 +386,7 @@ int tty_port_block_til_ready(struct tty_port *port,
44160 we must not mess that up further */
44161 spin_lock_irqsave(&port->lock, flags);
44162 if (!tty_hung_up_p(filp))
44163- port->count++;
44164+ atomic_inc(&port->count);
44165 port->blocked_open--;
44166 if (retval == 0)
44167 port->flags |= ASYNC_NORMAL_ACTIVE;
44168@@ -406,19 +406,19 @@ int tty_port_close_start(struct tty_port *port,
44169 return 0;
44170 }
44171
44172- if (tty->count == 1 && port->count != 1) {
44173+ if (tty->count == 1 && atomic_read(&port->count) != 1) {
44174 printk(KERN_WARNING
44175 "tty_port_close_start: tty->count = 1 port count = %d.\n",
44176- port->count);
44177- port->count = 1;
44178+ atomic_read(&port->count));
44179+ atomic_set(&port->count, 1);
44180 }
44181- if (--port->count < 0) {
44182+ if (atomic_dec_return(&port->count) < 0) {
44183 printk(KERN_WARNING "tty_port_close_start: count = %d\n",
44184- port->count);
44185- port->count = 0;
44186+ atomic_read(&port->count));
44187+ atomic_set(&port->count, 0);
44188 }
44189
44190- if (port->count) {
44191+ if (atomic_read(&port->count)) {
44192 spin_unlock_irqrestore(&port->lock, flags);
44193 if (port->ops->drop)
44194 port->ops->drop(port);
44195@@ -516,7 +516,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty,
44196 {
44197 spin_lock_irq(&port->lock);
44198 if (!tty_hung_up_p(filp))
44199- ++port->count;
44200+ atomic_inc(&port->count);
44201 spin_unlock_irq(&port->lock);
44202 tty_port_tty_set(port, tty);
44203
44204diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
44205index 681765b..d3ccdf2 100644
44206--- a/drivers/tty/vt/keyboard.c
44207+++ b/drivers/tty/vt/keyboard.c
44208@@ -660,6 +660,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
44209 kbd->kbdmode == VC_OFF) &&
44210 value != KVAL(K_SAK))
44211 return; /* SAK is allowed even in raw mode */
44212+
44213+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
44214+ {
44215+ void *func = fn_handler[value];
44216+ if (func == fn_show_state || func == fn_show_ptregs ||
44217+ func == fn_show_mem)
44218+ return;
44219+ }
44220+#endif
44221+
44222 fn_handler[value](vc);
44223 }
44224
44225@@ -1808,9 +1818,6 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
44226 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
44227 return -EFAULT;
44228
44229- if (!capable(CAP_SYS_TTY_CONFIG))
44230- perm = 0;
44231-
44232 switch (cmd) {
44233 case KDGKBENT:
44234 /* Ensure another thread doesn't free it under us */
44235@@ -1825,6 +1832,9 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
44236 spin_unlock_irqrestore(&kbd_event_lock, flags);
44237 return put_user(val, &user_kbe->kb_value);
44238 case KDSKBENT:
44239+ if (!capable(CAP_SYS_TTY_CONFIG))
44240+ perm = 0;
44241+
44242 if (!perm)
44243 return -EPERM;
44244 if (!i && v == K_NOSUCHMAP) {
44245@@ -1915,9 +1925,6 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
44246 int i, j, k;
44247 int ret;
44248
44249- if (!capable(CAP_SYS_TTY_CONFIG))
44250- perm = 0;
44251-
44252 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
44253 if (!kbs) {
44254 ret = -ENOMEM;
44255@@ -1951,6 +1958,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
44256 kfree(kbs);
44257 return ((p && *p) ? -EOVERFLOW : 0);
44258 case KDSKBSENT:
44259+ if (!capable(CAP_SYS_TTY_CONFIG))
44260+ perm = 0;
44261+
44262 if (!perm) {
44263 ret = -EPERM;
44264 goto reterr;
44265diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
44266index 5110f36..8dc0a74 100644
44267--- a/drivers/uio/uio.c
44268+++ b/drivers/uio/uio.c
44269@@ -25,6 +25,7 @@
44270 #include <linux/kobject.h>
44271 #include <linux/cdev.h>
44272 #include <linux/uio_driver.h>
44273+#include <asm/local.h>
44274
44275 #define UIO_MAX_DEVICES (1U << MINORBITS)
44276
44277@@ -32,10 +33,10 @@ struct uio_device {
44278 struct module *owner;
44279 struct device *dev;
44280 int minor;
44281- atomic_t event;
44282+ atomic_unchecked_t event;
44283 struct fasync_struct *async_queue;
44284 wait_queue_head_t wait;
44285- int vma_count;
44286+ local_t vma_count;
44287 struct uio_info *info;
44288 struct kobject *map_dir;
44289 struct kobject *portio_dir;
44290@@ -242,7 +243,7 @@ static ssize_t show_event(struct device *dev,
44291 struct device_attribute *attr, char *buf)
44292 {
44293 struct uio_device *idev = dev_get_drvdata(dev);
44294- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
44295+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
44296 }
44297
44298 static struct device_attribute uio_class_attributes[] = {
44299@@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *info)
44300 {
44301 struct uio_device *idev = info->uio_dev;
44302
44303- atomic_inc(&idev->event);
44304+ atomic_inc_unchecked(&idev->event);
44305 wake_up_interruptible(&idev->wait);
44306 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
44307 }
44308@@ -461,7 +462,7 @@ static int uio_open(struct inode *inode, struct file *filep)
44309 }
44310
44311 listener->dev = idev;
44312- listener->event_count = atomic_read(&idev->event);
44313+ listener->event_count = atomic_read_unchecked(&idev->event);
44314 filep->private_data = listener;
44315
44316 if (idev->info->open) {
44317@@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
44318 return -EIO;
44319
44320 poll_wait(filep, &idev->wait, wait);
44321- if (listener->event_count != atomic_read(&idev->event))
44322+ if (listener->event_count != atomic_read_unchecked(&idev->event))
44323 return POLLIN | POLLRDNORM;
44324 return 0;
44325 }
44326@@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
44327 do {
44328 set_current_state(TASK_INTERRUPTIBLE);
44329
44330- event_count = atomic_read(&idev->event);
44331+ event_count = atomic_read_unchecked(&idev->event);
44332 if (event_count != listener->event_count) {
44333 if (copy_to_user(buf, &event_count, count))
44334 retval = -EFAULT;
44335@@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
44336 static void uio_vma_open(struct vm_area_struct *vma)
44337 {
44338 struct uio_device *idev = vma->vm_private_data;
44339- idev->vma_count++;
44340+ local_inc(&idev->vma_count);
44341 }
44342
44343 static void uio_vma_close(struct vm_area_struct *vma)
44344 {
44345 struct uio_device *idev = vma->vm_private_data;
44346- idev->vma_count--;
44347+ local_dec(&idev->vma_count);
44348 }
44349
44350 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
44351@@ -819,7 +820,7 @@ int __uio_register_device(struct module *owner,
44352 idev->owner = owner;
44353 idev->info = info;
44354 init_waitqueue_head(&idev->wait);
44355- atomic_set(&idev->event, 0);
44356+ atomic_set_unchecked(&idev->event, 0);
44357
44358 ret = uio_get_minor(idev);
44359 if (ret)
44360diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
44361index b7eb86a..36d28af 100644
44362--- a/drivers/usb/atm/cxacru.c
44363+++ b/drivers/usb/atm/cxacru.c
44364@@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
44365 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
44366 if (ret < 2)
44367 return -EINVAL;
44368- if (index < 0 || index > 0x7f)
44369+ if (index > 0x7f)
44370 return -EINVAL;
44371 pos += tmp;
44372
44373diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
44374index 35f10bf..6a38a0b 100644
44375--- a/drivers/usb/atm/usbatm.c
44376+++ b/drivers/usb/atm/usbatm.c
44377@@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
44378 if (printk_ratelimit())
44379 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
44380 __func__, vpi, vci);
44381- atomic_inc(&vcc->stats->rx_err);
44382+ atomic_inc_unchecked(&vcc->stats->rx_err);
44383 return;
44384 }
44385
44386@@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
44387 if (length > ATM_MAX_AAL5_PDU) {
44388 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
44389 __func__, length, vcc);
44390- atomic_inc(&vcc->stats->rx_err);
44391+ atomic_inc_unchecked(&vcc->stats->rx_err);
44392 goto out;
44393 }
44394
44395@@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
44396 if (sarb->len < pdu_length) {
44397 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
44398 __func__, pdu_length, sarb->len, vcc);
44399- atomic_inc(&vcc->stats->rx_err);
44400+ atomic_inc_unchecked(&vcc->stats->rx_err);
44401 goto out;
44402 }
44403
44404 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
44405 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
44406 __func__, vcc);
44407- atomic_inc(&vcc->stats->rx_err);
44408+ atomic_inc_unchecked(&vcc->stats->rx_err);
44409 goto out;
44410 }
44411
44412@@ -389,7 +389,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
44413 if (printk_ratelimit())
44414 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
44415 __func__, length);
44416- atomic_inc(&vcc->stats->rx_drop);
44417+ atomic_inc_unchecked(&vcc->stats->rx_drop);
44418 goto out;
44419 }
44420
44421@@ -417,7 +417,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
44422
44423 vcc->push(vcc, skb);
44424
44425- atomic_inc(&vcc->stats->rx);
44426+ atomic_inc_unchecked(&vcc->stats->rx);
44427 out:
44428 skb_trim(sarb, 0);
44429 }
44430@@ -623,7 +623,7 @@ static void usbatm_tx_process(unsigned long data)
44431 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
44432
44433 usbatm_pop(vcc, skb);
44434- atomic_inc(&vcc->stats->tx);
44435+ atomic_inc_unchecked(&vcc->stats->tx);
44436
44437 skb = skb_dequeue(&instance->sndqueue);
44438 }
44439@@ -779,11 +779,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
44440 if (!left--)
44441 return sprintf(page,
44442 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
44443- atomic_read(&atm_dev->stats.aal5.tx),
44444- atomic_read(&atm_dev->stats.aal5.tx_err),
44445- atomic_read(&atm_dev->stats.aal5.rx),
44446- atomic_read(&atm_dev->stats.aal5.rx_err),
44447- atomic_read(&atm_dev->stats.aal5.rx_drop));
44448+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
44449+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
44450+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
44451+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
44452+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
44453
44454 if (!left--) {
44455 if (instance->disconnected)
44456diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
44457index cbacea9..246cccd 100644
44458--- a/drivers/usb/core/devices.c
44459+++ b/drivers/usb/core/devices.c
44460@@ -126,7 +126,7 @@ static const char format_endpt[] =
44461 * time it gets called.
44462 */
44463 static struct device_connect_event {
44464- atomic_t count;
44465+ atomic_unchecked_t count;
44466 wait_queue_head_t wait;
44467 } device_event = {
44468 .count = ATOMIC_INIT(1),
44469@@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
44470
44471 void usbfs_conn_disc_event(void)
44472 {
44473- atomic_add(2, &device_event.count);
44474+ atomic_add_unchecked(2, &device_event.count);
44475 wake_up(&device_event.wait);
44476 }
44477
44478@@ -645,7 +645,7 @@ static unsigned int usb_device_poll(struct file *file,
44479
44480 poll_wait(file, &device_event.wait, wait);
44481
44482- event_count = atomic_read(&device_event.count);
44483+ event_count = atomic_read_unchecked(&device_event.count);
44484 if (file->f_version != event_count) {
44485 file->f_version = event_count;
44486 return POLLIN | POLLRDNORM;
44487diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
44488index 8e64adf..9a33a3c 100644
44489--- a/drivers/usb/core/hcd.c
44490+++ b/drivers/usb/core/hcd.c
44491@@ -1522,7 +1522,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
44492 */
44493 usb_get_urb(urb);
44494 atomic_inc(&urb->use_count);
44495- atomic_inc(&urb->dev->urbnum);
44496+ atomic_inc_unchecked(&urb->dev->urbnum);
44497 usbmon_urb_submit(&hcd->self, urb);
44498
44499 /* NOTE requirements on root-hub callers (usbfs and the hub
44500@@ -1549,7 +1549,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
44501 urb->hcpriv = NULL;
44502 INIT_LIST_HEAD(&urb->urb_list);
44503 atomic_dec(&urb->use_count);
44504- atomic_dec(&urb->dev->urbnum);
44505+ atomic_dec_unchecked(&urb->dev->urbnum);
44506 if (atomic_read(&urb->reject))
44507 wake_up(&usb_kill_urb_queue);
44508 usb_put_urb(urb);
44509diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
44510index 131f736..99004c3 100644
44511--- a/drivers/usb/core/message.c
44512+++ b/drivers/usb/core/message.c
44513@@ -129,7 +129,7 @@ static int usb_internal_control_msg(struct usb_device *usb_dev,
44514 * method can wait for it to complete. Since you don't have a handle on the
44515 * URB used, you can't cancel the request.
44516 */
44517-int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
44518+int __intentional_overflow(-1) usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
44519 __u8 requesttype, __u16 value, __u16 index, void *data,
44520 __u16 size, int timeout)
44521 {
44522diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
44523index 818e4a0..0fc9589 100644
44524--- a/drivers/usb/core/sysfs.c
44525+++ b/drivers/usb/core/sysfs.c
44526@@ -226,7 +226,7 @@ show_urbnum(struct device *dev, struct device_attribute *attr, char *buf)
44527 struct usb_device *udev;
44528
44529 udev = to_usb_device(dev);
44530- return sprintf(buf, "%d\n", atomic_read(&udev->urbnum));
44531+ return sprintf(buf, "%d\n", atomic_read_unchecked(&udev->urbnum));
44532 }
44533 static DEVICE_ATTR(urbnum, S_IRUGO, show_urbnum, NULL);
44534
44535diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
44536index f81b925..78d22ec 100644
44537--- a/drivers/usb/core/usb.c
44538+++ b/drivers/usb/core/usb.c
44539@@ -388,7 +388,7 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
44540 set_dev_node(&dev->dev, dev_to_node(bus->controller));
44541 dev->state = USB_STATE_ATTACHED;
44542 dev->lpm_disable_count = 1;
44543- atomic_set(&dev->urbnum, 0);
44544+ atomic_set_unchecked(&dev->urbnum, 0);
44545
44546 INIT_LIST_HEAD(&dev->ep0.urb_list);
44547 dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE;
44548diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
44549index 5e29dde..eca992f 100644
44550--- a/drivers/usb/early/ehci-dbgp.c
44551+++ b/drivers/usb/early/ehci-dbgp.c
44552@@ -98,7 +98,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
44553
44554 #ifdef CONFIG_KGDB
44555 static struct kgdb_io kgdbdbgp_io_ops;
44556-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
44557+static struct kgdb_io kgdbdbgp_io_ops_console;
44558+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
44559 #else
44560 #define dbgp_kgdb_mode (0)
44561 #endif
44562@@ -1047,6 +1048,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
44563 .write_char = kgdbdbgp_write_char,
44564 };
44565
44566+static struct kgdb_io kgdbdbgp_io_ops_console = {
44567+ .name = "kgdbdbgp",
44568+ .read_char = kgdbdbgp_read_char,
44569+ .write_char = kgdbdbgp_write_char,
44570+ .is_console = 1
44571+};
44572+
44573 static int kgdbdbgp_wait_time;
44574
44575 static int __init kgdbdbgp_parse_config(char *str)
44576@@ -1062,8 +1070,10 @@ static int __init kgdbdbgp_parse_config(char *str)
44577 ptr++;
44578 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
44579 }
44580- kgdb_register_io_module(&kgdbdbgp_io_ops);
44581- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
44582+ if (early_dbgp_console.index != -1)
44583+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
44584+ else
44585+ kgdb_register_io_module(&kgdbdbgp_io_ops);
44586
44587 return 0;
44588 }
44589diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c
44590index 598dcc1..032dd4f 100644
44591--- a/drivers/usb/gadget/u_serial.c
44592+++ b/drivers/usb/gadget/u_serial.c
44593@@ -735,9 +735,9 @@ static int gs_open(struct tty_struct *tty, struct file *file)
44594 spin_lock_irq(&port->port_lock);
44595
44596 /* already open? Great. */
44597- if (port->port.count) {
44598+ if (atomic_read(&port->port.count)) {
44599 status = 0;
44600- port->port.count++;
44601+ atomic_inc(&port->port.count);
44602
44603 /* currently opening/closing? wait ... */
44604 } else if (port->openclose) {
44605@@ -796,7 +796,7 @@ static int gs_open(struct tty_struct *tty, struct file *file)
44606 tty->driver_data = port;
44607 port->port.tty = tty;
44608
44609- port->port.count = 1;
44610+ atomic_set(&port->port.count, 1);
44611 port->openclose = false;
44612
44613 /* if connected, start the I/O stream */
44614@@ -838,11 +838,11 @@ static void gs_close(struct tty_struct *tty, struct file *file)
44615
44616 spin_lock_irq(&port->port_lock);
44617
44618- if (port->port.count != 1) {
44619- if (port->port.count == 0)
44620+ if (atomic_read(&port->port.count) != 1) {
44621+ if (atomic_read(&port->port.count) == 0)
44622 WARN_ON(1);
44623 else
44624- --port->port.count;
44625+ atomic_dec(&port->port.count);
44626 goto exit;
44627 }
44628
44629@@ -852,7 +852,7 @@ static void gs_close(struct tty_struct *tty, struct file *file)
44630 * and sleep if necessary
44631 */
44632 port->openclose = true;
44633- port->port.count = 0;
44634+ atomic_set(&port->port.count, 0);
44635
44636 gser = port->port_usb;
44637 if (gser && gser->disconnect)
44638@@ -1159,7 +1159,7 @@ static int gs_closed(struct gs_port *port)
44639 int cond;
44640
44641 spin_lock_irq(&port->port_lock);
44642- cond = (port->port.count == 0) && !port->openclose;
44643+ cond = (atomic_read(&port->port.count) == 0) && !port->openclose;
44644 spin_unlock_irq(&port->port_lock);
44645 return cond;
44646 }
44647@@ -1273,7 +1273,7 @@ int gserial_connect(struct gserial *gser, u8 port_num)
44648 /* if it's already open, start I/O ... and notify the serial
44649 * protocol about open/close status (connect/disconnect).
44650 */
44651- if (port->port.count) {
44652+ if (atomic_read(&port->port.count)) {
44653 pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
44654 gs_start_io(port);
44655 if (gser->connect)
44656@@ -1320,7 +1320,7 @@ void gserial_disconnect(struct gserial *gser)
44657
44658 port->port_usb = NULL;
44659 gser->ioport = NULL;
44660- if (port->port.count > 0 || port->openclose) {
44661+ if (atomic_read(&port->port.count) > 0 || port->openclose) {
44662 wake_up_interruptible(&port->drain_wait);
44663 if (port->port.tty)
44664 tty_hangup(port->port.tty);
44665@@ -1336,7 +1336,7 @@ void gserial_disconnect(struct gserial *gser)
44666
44667 /* finally, free any unused/unusable I/O buffers */
44668 spin_lock_irqsave(&port->port_lock, flags);
44669- if (port->port.count == 0 && !port->openclose)
44670+ if (atomic_read(&port->port.count) == 0 && !port->openclose)
44671 gs_buf_free(&port->port_write_buf);
44672 gs_free_requests(gser->out, &port->read_pool, NULL);
44673 gs_free_requests(gser->out, &port->read_queue, NULL);
44674diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
44675index 5f3bcd3..bfca43f 100644
44676--- a/drivers/usb/serial/console.c
44677+++ b/drivers/usb/serial/console.c
44678@@ -124,7 +124,7 @@ static int usb_console_setup(struct console *co, char *options)
44679
44680 info->port = port;
44681
44682- ++port->port.count;
44683+ atomic_inc(&port->port.count);
44684 if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags)) {
44685 if (serial->type->set_termios) {
44686 /*
44687@@ -174,7 +174,7 @@ static int usb_console_setup(struct console *co, char *options)
44688 }
44689 /* Now that any required fake tty operations are completed restore
44690 * the tty port count */
44691- --port->port.count;
44692+ atomic_dec(&port->port.count);
44693 /* The console is special in terms of closing the device so
44694 * indicate this port is now acting as a system console. */
44695 port->port.console = 1;
44696@@ -187,7 +187,7 @@ static int usb_console_setup(struct console *co, char *options)
44697 free_tty:
44698 kfree(tty);
44699 reset_open_count:
44700- port->port.count = 0;
44701+ atomic_set(&port->port.count, 0);
44702 usb_autopm_put_interface(serial->interface);
44703 error_get_interface:
44704 usb_serial_put(serial);
44705diff --git a/drivers/usb/storage/realtek_cr.c b/drivers/usb/storage/realtek_cr.c
44706index 6c3586a..a94e621 100644
44707--- a/drivers/usb/storage/realtek_cr.c
44708+++ b/drivers/usb/storage/realtek_cr.c
44709@@ -429,7 +429,7 @@ static int rts51x_read_status(struct us_data *us,
44710
44711 buf = kmalloc(len, GFP_NOIO);
44712 if (buf == NULL)
44713- return USB_STOR_TRANSPORT_ERROR;
44714+ return -ENOMEM;
44715
44716 US_DEBUGP("%s, lun = %d\n", __func__, lun);
44717
44718diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
44719index 75f70f0..d467e1a 100644
44720--- a/drivers/usb/storage/usb.h
44721+++ b/drivers/usb/storage/usb.h
44722@@ -63,7 +63,7 @@ struct us_unusual_dev {
44723 __u8 useProtocol;
44724 __u8 useTransport;
44725 int (*initFunction)(struct us_data *);
44726-};
44727+} __do_const;
44728
44729
44730 /* Dynamic bitflag definitions (us->dflags): used in set_bit() etc. */
44731diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
44732index d6bea3e..60b250e 100644
44733--- a/drivers/usb/wusbcore/wa-hc.h
44734+++ b/drivers/usb/wusbcore/wa-hc.h
44735@@ -192,7 +192,7 @@ struct wahc {
44736 struct list_head xfer_delayed_list;
44737 spinlock_t xfer_list_lock;
44738 struct work_struct xfer_work;
44739- atomic_t xfer_id_count;
44740+ atomic_unchecked_t xfer_id_count;
44741 };
44742
44743
44744@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
44745 INIT_LIST_HEAD(&wa->xfer_delayed_list);
44746 spin_lock_init(&wa->xfer_list_lock);
44747 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
44748- atomic_set(&wa->xfer_id_count, 1);
44749+ atomic_set_unchecked(&wa->xfer_id_count, 1);
44750 }
44751
44752 /**
44753diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
44754index 57c01ab..8a05959 100644
44755--- a/drivers/usb/wusbcore/wa-xfer.c
44756+++ b/drivers/usb/wusbcore/wa-xfer.c
44757@@ -296,7 +296,7 @@ out:
44758 */
44759 static void wa_xfer_id_init(struct wa_xfer *xfer)
44760 {
44761- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
44762+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
44763 }
44764
44765 /*
44766diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
44767index 8c55011..eed4ae1a 100644
44768--- a/drivers/video/aty/aty128fb.c
44769+++ b/drivers/video/aty/aty128fb.c
44770@@ -149,7 +149,7 @@ enum {
44771 };
44772
44773 /* Must match above enum */
44774-static char * const r128_family[] = {
44775+static const char * const r128_family[] = {
44776 "AGP",
44777 "PCI",
44778 "PRO AGP",
44779diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
44780index 4f27fdc..d3537e6 100644
44781--- a/drivers/video/aty/atyfb_base.c
44782+++ b/drivers/video/aty/atyfb_base.c
44783@@ -1325,10 +1325,14 @@ static int atyfb_set_par(struct fb_info *info)
44784 par->accel_flags = var->accel_flags; /* hack */
44785
44786 if (var->accel_flags) {
44787- info->fbops->fb_sync = atyfb_sync;
44788+ pax_open_kernel();
44789+ *(void **)&info->fbops->fb_sync = atyfb_sync;
44790+ pax_close_kernel();
44791 info->flags &= ~FBINFO_HWACCEL_DISABLED;
44792 } else {
44793- info->fbops->fb_sync = NULL;
44794+ pax_open_kernel();
44795+ *(void **)&info->fbops->fb_sync = NULL;
44796+ pax_close_kernel();
44797 info->flags |= FBINFO_HWACCEL_DISABLED;
44798 }
44799
44800diff --git a/drivers/video/aty/mach64_cursor.c b/drivers/video/aty/mach64_cursor.c
44801index 95ec042..e6affdd 100644
44802--- a/drivers/video/aty/mach64_cursor.c
44803+++ b/drivers/video/aty/mach64_cursor.c
44804@@ -7,6 +7,7 @@
44805 #include <linux/string.h>
44806
44807 #include <asm/io.h>
44808+#include <asm/pgtable.h>
44809
44810 #ifdef __sparc__
44811 #include <asm/fbio.h>
44812@@ -208,7 +209,9 @@ int aty_init_cursor(struct fb_info *info)
44813 info->sprite.buf_align = 16; /* and 64 lines tall. */
44814 info->sprite.flags = FB_PIXMAP_IO;
44815
44816- info->fbops->fb_cursor = atyfb_cursor;
44817+ pax_open_kernel();
44818+ *(void **)&info->fbops->fb_cursor = atyfb_cursor;
44819+ pax_close_kernel();
44820
44821 return 0;
44822 }
44823diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
44824index 6c5ed6b..b727c88 100644
44825--- a/drivers/video/backlight/kb3886_bl.c
44826+++ b/drivers/video/backlight/kb3886_bl.c
44827@@ -78,7 +78,7 @@ static struct kb3886bl_machinfo *bl_machinfo;
44828 static unsigned long kb3886bl_flags;
44829 #define KB3886BL_SUSPENDED 0x01
44830
44831-static struct dmi_system_id __initdata kb3886bl_device_table[] = {
44832+static const struct dmi_system_id __initconst kb3886bl_device_table[] = {
44833 {
44834 .ident = "Sahara Touch-iT",
44835 .matches = {
44836diff --git a/drivers/video/fb_defio.c b/drivers/video/fb_defio.c
44837index 88cad6b..dd746c7 100644
44838--- a/drivers/video/fb_defio.c
44839+++ b/drivers/video/fb_defio.c
44840@@ -206,7 +206,9 @@ void fb_deferred_io_init(struct fb_info *info)
44841
44842 BUG_ON(!fbdefio);
44843 mutex_init(&fbdefio->lock);
44844- info->fbops->fb_mmap = fb_deferred_io_mmap;
44845+ pax_open_kernel();
44846+ *(void **)&info->fbops->fb_mmap = fb_deferred_io_mmap;
44847+ pax_close_kernel();
44848 INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
44849 INIT_LIST_HEAD(&fbdefio->pagelist);
44850 if (fbdefio->delay == 0) /* set a default of 1 s */
44851@@ -237,7 +239,7 @@ void fb_deferred_io_cleanup(struct fb_info *info)
44852 page->mapping = NULL;
44853 }
44854
44855- info->fbops->fb_mmap = NULL;
44856+ *(void **)&info->fbops->fb_mmap = NULL;
44857 mutex_destroy(&fbdefio->lock);
44858 }
44859 EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
44860diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
44861index 5c3960d..15cf8fc 100644
44862--- a/drivers/video/fbcmap.c
44863+++ b/drivers/video/fbcmap.c
44864@@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
44865 rc = -ENODEV;
44866 goto out;
44867 }
44868- if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
44869- !info->fbops->fb_setcmap)) {
44870+ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
44871 rc = -EINVAL;
44872 goto out1;
44873 }
44874diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
44875index dc61c12..e29796e 100644
44876--- a/drivers/video/fbmem.c
44877+++ b/drivers/video/fbmem.c
44878@@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
44879 image->dx += image->width + 8;
44880 }
44881 } else if (rotate == FB_ROTATE_UD) {
44882- for (x = 0; x < num && image->dx >= 0; x++) {
44883+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
44884 info->fbops->fb_imageblit(info, image);
44885 image->dx -= image->width + 8;
44886 }
44887@@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
44888 image->dy += image->height + 8;
44889 }
44890 } else if (rotate == FB_ROTATE_CCW) {
44891- for (x = 0; x < num && image->dy >= 0; x++) {
44892+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
44893 info->fbops->fb_imageblit(info, image);
44894 image->dy -= image->height + 8;
44895 }
44896@@ -1166,7 +1166,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
44897 return -EFAULT;
44898 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
44899 return -EINVAL;
44900- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
44901+ if (con2fb.framebuffer >= FB_MAX)
44902 return -EINVAL;
44903 if (!registered_fb[con2fb.framebuffer])
44904 request_module("fb%d", con2fb.framebuffer);
44905diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
44906index 7672d2e..b56437f 100644
44907--- a/drivers/video/i810/i810_accel.c
44908+++ b/drivers/video/i810/i810_accel.c
44909@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
44910 }
44911 }
44912 printk("ringbuffer lockup!!!\n");
44913+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
44914 i810_report_error(mmio);
44915 par->dev_flags |= LOCKUP;
44916 info->pixmap.scan_align = 1;
44917diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
44918index 3c14e43..eafa544 100644
44919--- a/drivers/video/logo/logo_linux_clut224.ppm
44920+++ b/drivers/video/logo/logo_linux_clut224.ppm
44921@@ -1,1604 +1,1123 @@
44922 P3
44923-# Standard 224-color Linux logo
44924 80 80
44925 255
44926- 0 0 0 0 0 0 0 0 0 0 0 0
44927- 0 0 0 0 0 0 0 0 0 0 0 0
44928- 0 0 0 0 0 0 0 0 0 0 0 0
44929- 0 0 0 0 0 0 0 0 0 0 0 0
44930- 0 0 0 0 0 0 0 0 0 0 0 0
44931- 0 0 0 0 0 0 0 0 0 0 0 0
44932- 0 0 0 0 0 0 0 0 0 0 0 0
44933- 0 0 0 0 0 0 0 0 0 0 0 0
44934- 0 0 0 0 0 0 0 0 0 0 0 0
44935- 6 6 6 6 6 6 10 10 10 10 10 10
44936- 10 10 10 6 6 6 6 6 6 6 6 6
44937- 0 0 0 0 0 0 0 0 0 0 0 0
44938- 0 0 0 0 0 0 0 0 0 0 0 0
44939- 0 0 0 0 0 0 0 0 0 0 0 0
44940- 0 0 0 0 0 0 0 0 0 0 0 0
44941- 0 0 0 0 0 0 0 0 0 0 0 0
44942- 0 0 0 0 0 0 0 0 0 0 0 0
44943- 0 0 0 0 0 0 0 0 0 0 0 0
44944- 0 0 0 0 0 0 0 0 0 0 0 0
44945- 0 0 0 0 0 0 0 0 0 0 0 0
44946- 0 0 0 0 0 0 0 0 0 0 0 0
44947- 0 0 0 0 0 0 0 0 0 0 0 0
44948- 0 0 0 0 0 0 0 0 0 0 0 0
44949- 0 0 0 0 0 0 0 0 0 0 0 0
44950- 0 0 0 0 0 0 0 0 0 0 0 0
44951- 0 0 0 0 0 0 0 0 0 0 0 0
44952- 0 0 0 0 0 0 0 0 0 0 0 0
44953- 0 0 0 0 0 0 0 0 0 0 0 0
44954- 0 0 0 6 6 6 10 10 10 14 14 14
44955- 22 22 22 26 26 26 30 30 30 34 34 34
44956- 30 30 30 30 30 30 26 26 26 18 18 18
44957- 14 14 14 10 10 10 6 6 6 0 0 0
44958- 0 0 0 0 0 0 0 0 0 0 0 0
44959- 0 0 0 0 0 0 0 0 0 0 0 0
44960- 0 0 0 0 0 0 0 0 0 0 0 0
44961- 0 0 0 0 0 0 0 0 0 0 0 0
44962- 0 0 0 0 0 0 0 0 0 0 0 0
44963- 0 0 0 0 0 0 0 0 0 0 0 0
44964- 0 0 0 0 0 0 0 0 0 0 0 0
44965- 0 0 0 0 0 0 0 0 0 0 0 0
44966- 0 0 0 0 0 0 0 0 0 0 0 0
44967- 0 0 0 0 0 1 0 0 1 0 0 0
44968- 0 0 0 0 0 0 0 0 0 0 0 0
44969- 0 0 0 0 0 0 0 0 0 0 0 0
44970- 0 0 0 0 0 0 0 0 0 0 0 0
44971- 0 0 0 0 0 0 0 0 0 0 0 0
44972- 0 0 0 0 0 0 0 0 0 0 0 0
44973- 0 0 0 0 0 0 0 0 0 0 0 0
44974- 6 6 6 14 14 14 26 26 26 42 42 42
44975- 54 54 54 66 66 66 78 78 78 78 78 78
44976- 78 78 78 74 74 74 66 66 66 54 54 54
44977- 42 42 42 26 26 26 18 18 18 10 10 10
44978- 6 6 6 0 0 0 0 0 0 0 0 0
44979- 0 0 0 0 0 0 0 0 0 0 0 0
44980- 0 0 0 0 0 0 0 0 0 0 0 0
44981- 0 0 0 0 0 0 0 0 0 0 0 0
44982- 0 0 0 0 0 0 0 0 0 0 0 0
44983- 0 0 0 0 0 0 0 0 0 0 0 0
44984- 0 0 0 0 0 0 0 0 0 0 0 0
44985- 0 0 0 0 0 0 0 0 0 0 0 0
44986- 0 0 0 0 0 0 0 0 0 0 0 0
44987- 0 0 1 0 0 0 0 0 0 0 0 0
44988- 0 0 0 0 0 0 0 0 0 0 0 0
44989- 0 0 0 0 0 0 0 0 0 0 0 0
44990- 0 0 0 0 0 0 0 0 0 0 0 0
44991- 0 0 0 0 0 0 0 0 0 0 0 0
44992- 0 0 0 0 0 0 0 0 0 0 0 0
44993- 0 0 0 0 0 0 0 0 0 10 10 10
44994- 22 22 22 42 42 42 66 66 66 86 86 86
44995- 66 66 66 38 38 38 38 38 38 22 22 22
44996- 26 26 26 34 34 34 54 54 54 66 66 66
44997- 86 86 86 70 70 70 46 46 46 26 26 26
44998- 14 14 14 6 6 6 0 0 0 0 0 0
44999- 0 0 0 0 0 0 0 0 0 0 0 0
45000- 0 0 0 0 0 0 0 0 0 0 0 0
45001- 0 0 0 0 0 0 0 0 0 0 0 0
45002- 0 0 0 0 0 0 0 0 0 0 0 0
45003- 0 0 0 0 0 0 0 0 0 0 0 0
45004- 0 0 0 0 0 0 0 0 0 0 0 0
45005- 0 0 0 0 0 0 0 0 0 0 0 0
45006- 0 0 0 0 0 0 0 0 0 0 0 0
45007- 0 0 1 0 0 1 0 0 1 0 0 0
45008- 0 0 0 0 0 0 0 0 0 0 0 0
45009- 0 0 0 0 0 0 0 0 0 0 0 0
45010- 0 0 0 0 0 0 0 0 0 0 0 0
45011- 0 0 0 0 0 0 0 0 0 0 0 0
45012- 0 0 0 0 0 0 0 0 0 0 0 0
45013- 0 0 0 0 0 0 10 10 10 26 26 26
45014- 50 50 50 82 82 82 58 58 58 6 6 6
45015- 2 2 6 2 2 6 2 2 6 2 2 6
45016- 2 2 6 2 2 6 2 2 6 2 2 6
45017- 6 6 6 54 54 54 86 86 86 66 66 66
45018- 38 38 38 18 18 18 6 6 6 0 0 0
45019- 0 0 0 0 0 0 0 0 0 0 0 0
45020- 0 0 0 0 0 0 0 0 0 0 0 0
45021- 0 0 0 0 0 0 0 0 0 0 0 0
45022- 0 0 0 0 0 0 0 0 0 0 0 0
45023- 0 0 0 0 0 0 0 0 0 0 0 0
45024- 0 0 0 0 0 0 0 0 0 0 0 0
45025- 0 0 0 0 0 0 0 0 0 0 0 0
45026- 0 0 0 0 0 0 0 0 0 0 0 0
45027- 0 0 0 0 0 0 0 0 0 0 0 0
45028- 0 0 0 0 0 0 0 0 0 0 0 0
45029- 0 0 0 0 0 0 0 0 0 0 0 0
45030- 0 0 0 0 0 0 0 0 0 0 0 0
45031- 0 0 0 0 0 0 0 0 0 0 0 0
45032- 0 0 0 0 0 0 0 0 0 0 0 0
45033- 0 0 0 6 6 6 22 22 22 50 50 50
45034- 78 78 78 34 34 34 2 2 6 2 2 6
45035- 2 2 6 2 2 6 2 2 6 2 2 6
45036- 2 2 6 2 2 6 2 2 6 2 2 6
45037- 2 2 6 2 2 6 6 6 6 70 70 70
45038- 78 78 78 46 46 46 22 22 22 6 6 6
45039- 0 0 0 0 0 0 0 0 0 0 0 0
45040- 0 0 0 0 0 0 0 0 0 0 0 0
45041- 0 0 0 0 0 0 0 0 0 0 0 0
45042- 0 0 0 0 0 0 0 0 0 0 0 0
45043- 0 0 0 0 0 0 0 0 0 0 0 0
45044- 0 0 0 0 0 0 0 0 0 0 0 0
45045- 0 0 0 0 0 0 0 0 0 0 0 0
45046- 0 0 0 0 0 0 0 0 0 0 0 0
45047- 0 0 1 0 0 1 0 0 1 0 0 0
45048- 0 0 0 0 0 0 0 0 0 0 0 0
45049- 0 0 0 0 0 0 0 0 0 0 0 0
45050- 0 0 0 0 0 0 0 0 0 0 0 0
45051- 0 0 0 0 0 0 0 0 0 0 0 0
45052- 0 0 0 0 0 0 0 0 0 0 0 0
45053- 6 6 6 18 18 18 42 42 42 82 82 82
45054- 26 26 26 2 2 6 2 2 6 2 2 6
45055- 2 2 6 2 2 6 2 2 6 2 2 6
45056- 2 2 6 2 2 6 2 2 6 14 14 14
45057- 46 46 46 34 34 34 6 6 6 2 2 6
45058- 42 42 42 78 78 78 42 42 42 18 18 18
45059- 6 6 6 0 0 0 0 0 0 0 0 0
45060- 0 0 0 0 0 0 0 0 0 0 0 0
45061- 0 0 0 0 0 0 0 0 0 0 0 0
45062- 0 0 0 0 0 0 0 0 0 0 0 0
45063- 0 0 0 0 0 0 0 0 0 0 0 0
45064- 0 0 0 0 0 0 0 0 0 0 0 0
45065- 0 0 0 0 0 0 0 0 0 0 0 0
45066- 0 0 0 0 0 0 0 0 0 0 0 0
45067- 0 0 1 0 0 0 0 0 1 0 0 0
45068- 0 0 0 0 0 0 0 0 0 0 0 0
45069- 0 0 0 0 0 0 0 0 0 0 0 0
45070- 0 0 0 0 0 0 0 0 0 0 0 0
45071- 0 0 0 0 0 0 0 0 0 0 0 0
45072- 0 0 0 0 0 0 0 0 0 0 0 0
45073- 10 10 10 30 30 30 66 66 66 58 58 58
45074- 2 2 6 2 2 6 2 2 6 2 2 6
45075- 2 2 6 2 2 6 2 2 6 2 2 6
45076- 2 2 6 2 2 6 2 2 6 26 26 26
45077- 86 86 86 101 101 101 46 46 46 10 10 10
45078- 2 2 6 58 58 58 70 70 70 34 34 34
45079- 10 10 10 0 0 0 0 0 0 0 0 0
45080- 0 0 0 0 0 0 0 0 0 0 0 0
45081- 0 0 0 0 0 0 0 0 0 0 0 0
45082- 0 0 0 0 0 0 0 0 0 0 0 0
45083- 0 0 0 0 0 0 0 0 0 0 0 0
45084- 0 0 0 0 0 0 0 0 0 0 0 0
45085- 0 0 0 0 0 0 0 0 0 0 0 0
45086- 0 0 0 0 0 0 0 0 0 0 0 0
45087- 0 0 1 0 0 1 0 0 1 0 0 0
45088- 0 0 0 0 0 0 0 0 0 0 0 0
45089- 0 0 0 0 0 0 0 0 0 0 0 0
45090- 0 0 0 0 0 0 0 0 0 0 0 0
45091- 0 0 0 0 0 0 0 0 0 0 0 0
45092- 0 0 0 0 0 0 0 0 0 0 0 0
45093- 14 14 14 42 42 42 86 86 86 10 10 10
45094- 2 2 6 2 2 6 2 2 6 2 2 6
45095- 2 2 6 2 2 6 2 2 6 2 2 6
45096- 2 2 6 2 2 6 2 2 6 30 30 30
45097- 94 94 94 94 94 94 58 58 58 26 26 26
45098- 2 2 6 6 6 6 78 78 78 54 54 54
45099- 22 22 22 6 6 6 0 0 0 0 0 0
45100- 0 0 0 0 0 0 0 0 0 0 0 0
45101- 0 0 0 0 0 0 0 0 0 0 0 0
45102- 0 0 0 0 0 0 0 0 0 0 0 0
45103- 0 0 0 0 0 0 0 0 0 0 0 0
45104- 0 0 0 0 0 0 0 0 0 0 0 0
45105- 0 0 0 0 0 0 0 0 0 0 0 0
45106- 0 0 0 0 0 0 0 0 0 0 0 0
45107- 0 0 0 0 0 0 0 0 0 0 0 0
45108- 0 0 0 0 0 0 0 0 0 0 0 0
45109- 0 0 0 0 0 0 0 0 0 0 0 0
45110- 0 0 0 0 0 0 0 0 0 0 0 0
45111- 0 0 0 0 0 0 0 0 0 0 0 0
45112- 0 0 0 0 0 0 0 0 0 6 6 6
45113- 22 22 22 62 62 62 62 62 62 2 2 6
45114- 2 2 6 2 2 6 2 2 6 2 2 6
45115- 2 2 6 2 2 6 2 2 6 2 2 6
45116- 2 2 6 2 2 6 2 2 6 26 26 26
45117- 54 54 54 38 38 38 18 18 18 10 10 10
45118- 2 2 6 2 2 6 34 34 34 82 82 82
45119- 38 38 38 14 14 14 0 0 0 0 0 0
45120- 0 0 0 0 0 0 0 0 0 0 0 0
45121- 0 0 0 0 0 0 0 0 0 0 0 0
45122- 0 0 0 0 0 0 0 0 0 0 0 0
45123- 0 0 0 0 0 0 0 0 0 0 0 0
45124- 0 0 0 0 0 0 0 0 0 0 0 0
45125- 0 0 0 0 0 0 0 0 0 0 0 0
45126- 0 0 0 0 0 0 0 0 0 0 0 0
45127- 0 0 0 0 0 1 0 0 1 0 0 0
45128- 0 0 0 0 0 0 0 0 0 0 0 0
45129- 0 0 0 0 0 0 0 0 0 0 0 0
45130- 0 0 0 0 0 0 0 0 0 0 0 0
45131- 0 0 0 0 0 0 0 0 0 0 0 0
45132- 0 0 0 0 0 0 0 0 0 6 6 6
45133- 30 30 30 78 78 78 30 30 30 2 2 6
45134- 2 2 6 2 2 6 2 2 6 2 2 6
45135- 2 2 6 2 2 6 2 2 6 2 2 6
45136- 2 2 6 2 2 6 2 2 6 10 10 10
45137- 10 10 10 2 2 6 2 2 6 2 2 6
45138- 2 2 6 2 2 6 2 2 6 78 78 78
45139- 50 50 50 18 18 18 6 6 6 0 0 0
45140- 0 0 0 0 0 0 0 0 0 0 0 0
45141- 0 0 0 0 0 0 0 0 0 0 0 0
45142- 0 0 0 0 0 0 0 0 0 0 0 0
45143- 0 0 0 0 0 0 0 0 0 0 0 0
45144- 0 0 0 0 0 0 0 0 0 0 0 0
45145- 0 0 0 0 0 0 0 0 0 0 0 0
45146- 0 0 0 0 0 0 0 0 0 0 0 0
45147- 0 0 1 0 0 0 0 0 0 0 0 0
45148- 0 0 0 0 0 0 0 0 0 0 0 0
45149- 0 0 0 0 0 0 0 0 0 0 0 0
45150- 0 0 0 0 0 0 0 0 0 0 0 0
45151- 0 0 0 0 0 0 0 0 0 0 0 0
45152- 0 0 0 0 0 0 0 0 0 10 10 10
45153- 38 38 38 86 86 86 14 14 14 2 2 6
45154- 2 2 6 2 2 6 2 2 6 2 2 6
45155- 2 2 6 2 2 6 2 2 6 2 2 6
45156- 2 2 6 2 2 6 2 2 6 2 2 6
45157- 2 2 6 2 2 6 2 2 6 2 2 6
45158- 2 2 6 2 2 6 2 2 6 54 54 54
45159- 66 66 66 26 26 26 6 6 6 0 0 0
45160- 0 0 0 0 0 0 0 0 0 0 0 0
45161- 0 0 0 0 0 0 0 0 0 0 0 0
45162- 0 0 0 0 0 0 0 0 0 0 0 0
45163- 0 0 0 0 0 0 0 0 0 0 0 0
45164- 0 0 0 0 0 0 0 0 0 0 0 0
45165- 0 0 0 0 0 0 0 0 0 0 0 0
45166- 0 0 0 0 0 0 0 0 0 0 0 0
45167- 0 0 0 0 0 1 0 0 1 0 0 0
45168- 0 0 0 0 0 0 0 0 0 0 0 0
45169- 0 0 0 0 0 0 0 0 0 0 0 0
45170- 0 0 0 0 0 0 0 0 0 0 0 0
45171- 0 0 0 0 0 0 0 0 0 0 0 0
45172- 0 0 0 0 0 0 0 0 0 14 14 14
45173- 42 42 42 82 82 82 2 2 6 2 2 6
45174- 2 2 6 6 6 6 10 10 10 2 2 6
45175- 2 2 6 2 2 6 2 2 6 2 2 6
45176- 2 2 6 2 2 6 2 2 6 6 6 6
45177- 14 14 14 10 10 10 2 2 6 2 2 6
45178- 2 2 6 2 2 6 2 2 6 18 18 18
45179- 82 82 82 34 34 34 10 10 10 0 0 0
45180- 0 0 0 0 0 0 0 0 0 0 0 0
45181- 0 0 0 0 0 0 0 0 0 0 0 0
45182- 0 0 0 0 0 0 0 0 0 0 0 0
45183- 0 0 0 0 0 0 0 0 0 0 0 0
45184- 0 0 0 0 0 0 0 0 0 0 0 0
45185- 0 0 0 0 0 0 0 0 0 0 0 0
45186- 0 0 0 0 0 0 0 0 0 0 0 0
45187- 0 0 1 0 0 0 0 0 0 0 0 0
45188- 0 0 0 0 0 0 0 0 0 0 0 0
45189- 0 0 0 0 0 0 0 0 0 0 0 0
45190- 0 0 0 0 0 0 0 0 0 0 0 0
45191- 0 0 0 0 0 0 0 0 0 0 0 0
45192- 0 0 0 0 0 0 0 0 0 14 14 14
45193- 46 46 46 86 86 86 2 2 6 2 2 6
45194- 6 6 6 6 6 6 22 22 22 34 34 34
45195- 6 6 6 2 2 6 2 2 6 2 2 6
45196- 2 2 6 2 2 6 18 18 18 34 34 34
45197- 10 10 10 50 50 50 22 22 22 2 2 6
45198- 2 2 6 2 2 6 2 2 6 10 10 10
45199- 86 86 86 42 42 42 14 14 14 0 0 0
45200- 0 0 0 0 0 0 0 0 0 0 0 0
45201- 0 0 0 0 0 0 0 0 0 0 0 0
45202- 0 0 0 0 0 0 0 0 0 0 0 0
45203- 0 0 0 0 0 0 0 0 0 0 0 0
45204- 0 0 0 0 0 0 0 0 0 0 0 0
45205- 0 0 0 0 0 0 0 0 0 0 0 0
45206- 0 0 0 0 0 0 0 0 0 0 0 0
45207- 0 0 1 0 0 1 0 0 1 0 0 0
45208- 0 0 0 0 0 0 0 0 0 0 0 0
45209- 0 0 0 0 0 0 0 0 0 0 0 0
45210- 0 0 0 0 0 0 0 0 0 0 0 0
45211- 0 0 0 0 0 0 0 0 0 0 0 0
45212- 0 0 0 0 0 0 0 0 0 14 14 14
45213- 46 46 46 86 86 86 2 2 6 2 2 6
45214- 38 38 38 116 116 116 94 94 94 22 22 22
45215- 22 22 22 2 2 6 2 2 6 2 2 6
45216- 14 14 14 86 86 86 138 138 138 162 162 162
45217-154 154 154 38 38 38 26 26 26 6 6 6
45218- 2 2 6 2 2 6 2 2 6 2 2 6
45219- 86 86 86 46 46 46 14 14 14 0 0 0
45220- 0 0 0 0 0 0 0 0 0 0 0 0
45221- 0 0 0 0 0 0 0 0 0 0 0 0
45222- 0 0 0 0 0 0 0 0 0 0 0 0
45223- 0 0 0 0 0 0 0 0 0 0 0 0
45224- 0 0 0 0 0 0 0 0 0 0 0 0
45225- 0 0 0 0 0 0 0 0 0 0 0 0
45226- 0 0 0 0 0 0 0 0 0 0 0 0
45227- 0 0 0 0 0 0 0 0 0 0 0 0
45228- 0 0 0 0 0 0 0 0 0 0 0 0
45229- 0 0 0 0 0 0 0 0 0 0 0 0
45230- 0 0 0 0 0 0 0 0 0 0 0 0
45231- 0 0 0 0 0 0 0 0 0 0 0 0
45232- 0 0 0 0 0 0 0 0 0 14 14 14
45233- 46 46 46 86 86 86 2 2 6 14 14 14
45234-134 134 134 198 198 198 195 195 195 116 116 116
45235- 10 10 10 2 2 6 2 2 6 6 6 6
45236-101 98 89 187 187 187 210 210 210 218 218 218
45237-214 214 214 134 134 134 14 14 14 6 6 6
45238- 2 2 6 2 2 6 2 2 6 2 2 6
45239- 86 86 86 50 50 50 18 18 18 6 6 6
45240- 0 0 0 0 0 0 0 0 0 0 0 0
45241- 0 0 0 0 0 0 0 0 0 0 0 0
45242- 0 0 0 0 0 0 0 0 0 0 0 0
45243- 0 0 0 0 0 0 0 0 0 0 0 0
45244- 0 0 0 0 0 0 0 0 0 0 0 0
45245- 0 0 0 0 0 0 0 0 0 0 0 0
45246- 0 0 0 0 0 0 0 0 1 0 0 0
45247- 0 0 1 0 0 1 0 0 1 0 0 0
45248- 0 0 0 0 0 0 0 0 0 0 0 0
45249- 0 0 0 0 0 0 0 0 0 0 0 0
45250- 0 0 0 0 0 0 0 0 0 0 0 0
45251- 0 0 0 0 0 0 0 0 0 0 0 0
45252- 0 0 0 0 0 0 0 0 0 14 14 14
45253- 46 46 46 86 86 86 2 2 6 54 54 54
45254-218 218 218 195 195 195 226 226 226 246 246 246
45255- 58 58 58 2 2 6 2 2 6 30 30 30
45256-210 210 210 253 253 253 174 174 174 123 123 123
45257-221 221 221 234 234 234 74 74 74 2 2 6
45258- 2 2 6 2 2 6 2 2 6 2 2 6
45259- 70 70 70 58 58 58 22 22 22 6 6 6
45260- 0 0 0 0 0 0 0 0 0 0 0 0
45261- 0 0 0 0 0 0 0 0 0 0 0 0
45262- 0 0 0 0 0 0 0 0 0 0 0 0
45263- 0 0 0 0 0 0 0 0 0 0 0 0
45264- 0 0 0 0 0 0 0 0 0 0 0 0
45265- 0 0 0 0 0 0 0 0 0 0 0 0
45266- 0 0 0 0 0 0 0 0 0 0 0 0
45267- 0 0 0 0 0 0 0 0 0 0 0 0
45268- 0 0 0 0 0 0 0 0 0 0 0 0
45269- 0 0 0 0 0 0 0 0 0 0 0 0
45270- 0 0 0 0 0 0 0 0 0 0 0 0
45271- 0 0 0 0 0 0 0 0 0 0 0 0
45272- 0 0 0 0 0 0 0 0 0 14 14 14
45273- 46 46 46 82 82 82 2 2 6 106 106 106
45274-170 170 170 26 26 26 86 86 86 226 226 226
45275-123 123 123 10 10 10 14 14 14 46 46 46
45276-231 231 231 190 190 190 6 6 6 70 70 70
45277- 90 90 90 238 238 238 158 158 158 2 2 6
45278- 2 2 6 2 2 6 2 2 6 2 2 6
45279- 70 70 70 58 58 58 22 22 22 6 6 6
45280- 0 0 0 0 0 0 0 0 0 0 0 0
45281- 0 0 0 0 0 0 0 0 0 0 0 0
45282- 0 0 0 0 0 0 0 0 0 0 0 0
45283- 0 0 0 0 0 0 0 0 0 0 0 0
45284- 0 0 0 0 0 0 0 0 0 0 0 0
45285- 0 0 0 0 0 0 0 0 0 0 0 0
45286- 0 0 0 0 0 0 0 0 1 0 0 0
45287- 0 0 1 0 0 1 0 0 1 0 0 0
45288- 0 0 0 0 0 0 0 0 0 0 0 0
45289- 0 0 0 0 0 0 0 0 0 0 0 0
45290- 0 0 0 0 0 0 0 0 0 0 0 0
45291- 0 0 0 0 0 0 0 0 0 0 0 0
45292- 0 0 0 0 0 0 0 0 0 14 14 14
45293- 42 42 42 86 86 86 6 6 6 116 116 116
45294-106 106 106 6 6 6 70 70 70 149 149 149
45295-128 128 128 18 18 18 38 38 38 54 54 54
45296-221 221 221 106 106 106 2 2 6 14 14 14
45297- 46 46 46 190 190 190 198 198 198 2 2 6
45298- 2 2 6 2 2 6 2 2 6 2 2 6
45299- 74 74 74 62 62 62 22 22 22 6 6 6
45300- 0 0 0 0 0 0 0 0 0 0 0 0
45301- 0 0 0 0 0 0 0 0 0 0 0 0
45302- 0 0 0 0 0 0 0 0 0 0 0 0
45303- 0 0 0 0 0 0 0 0 0 0 0 0
45304- 0 0 0 0 0 0 0 0 0 0 0 0
45305- 0 0 0 0 0 0 0 0 0 0 0 0
45306- 0 0 0 0 0 0 0 0 1 0 0 0
45307- 0 0 1 0 0 0 0 0 1 0 0 0
45308- 0 0 0 0 0 0 0 0 0 0 0 0
45309- 0 0 0 0 0 0 0 0 0 0 0 0
45310- 0 0 0 0 0 0 0 0 0 0 0 0
45311- 0 0 0 0 0 0 0 0 0 0 0 0
45312- 0 0 0 0 0 0 0 0 0 14 14 14
45313- 42 42 42 94 94 94 14 14 14 101 101 101
45314-128 128 128 2 2 6 18 18 18 116 116 116
45315-118 98 46 121 92 8 121 92 8 98 78 10
45316-162 162 162 106 106 106 2 2 6 2 2 6
45317- 2 2 6 195 195 195 195 195 195 6 6 6
45318- 2 2 6 2 2 6 2 2 6 2 2 6
45319- 74 74 74 62 62 62 22 22 22 6 6 6
45320- 0 0 0 0 0 0 0 0 0 0 0 0
45321- 0 0 0 0 0 0 0 0 0 0 0 0
45322- 0 0 0 0 0 0 0 0 0 0 0 0
45323- 0 0 0 0 0 0 0 0 0 0 0 0
45324- 0 0 0 0 0 0 0 0 0 0 0 0
45325- 0 0 0 0 0 0 0 0 0 0 0 0
45326- 0 0 0 0 0 0 0 0 1 0 0 1
45327- 0 0 1 0 0 0 0 0 1 0 0 0
45328- 0 0 0 0 0 0 0 0 0 0 0 0
45329- 0 0 0 0 0 0 0 0 0 0 0 0
45330- 0 0 0 0 0 0 0 0 0 0 0 0
45331- 0 0 0 0 0 0 0 0 0 0 0 0
45332- 0 0 0 0 0 0 0 0 0 10 10 10
45333- 38 38 38 90 90 90 14 14 14 58 58 58
45334-210 210 210 26 26 26 54 38 6 154 114 10
45335-226 170 11 236 186 11 225 175 15 184 144 12
45336-215 174 15 175 146 61 37 26 9 2 2 6
45337- 70 70 70 246 246 246 138 138 138 2 2 6
45338- 2 2 6 2 2 6 2 2 6 2 2 6
45339- 70 70 70 66 66 66 26 26 26 6 6 6
45340- 0 0 0 0 0 0 0 0 0 0 0 0
45341- 0 0 0 0 0 0 0 0 0 0 0 0
45342- 0 0 0 0 0 0 0 0 0 0 0 0
45343- 0 0 0 0 0 0 0 0 0 0 0 0
45344- 0 0 0 0 0 0 0 0 0 0 0 0
45345- 0 0 0 0 0 0 0 0 0 0 0 0
45346- 0 0 0 0 0 0 0 0 0 0 0 0
45347- 0 0 0 0 0 0 0 0 0 0 0 0
45348- 0 0 0 0 0 0 0 0 0 0 0 0
45349- 0 0 0 0 0 0 0 0 0 0 0 0
45350- 0 0 0 0 0 0 0 0 0 0 0 0
45351- 0 0 0 0 0 0 0 0 0 0 0 0
45352- 0 0 0 0 0 0 0 0 0 10 10 10
45353- 38 38 38 86 86 86 14 14 14 10 10 10
45354-195 195 195 188 164 115 192 133 9 225 175 15
45355-239 182 13 234 190 10 232 195 16 232 200 30
45356-245 207 45 241 208 19 232 195 16 184 144 12
45357-218 194 134 211 206 186 42 42 42 2 2 6
45358- 2 2 6 2 2 6 2 2 6 2 2 6
45359- 50 50 50 74 74 74 30 30 30 6 6 6
45360- 0 0 0 0 0 0 0 0 0 0 0 0
45361- 0 0 0 0 0 0 0 0 0 0 0 0
45362- 0 0 0 0 0 0 0 0 0 0 0 0
45363- 0 0 0 0 0 0 0 0 0 0 0 0
45364- 0 0 0 0 0 0 0 0 0 0 0 0
45365- 0 0 0 0 0 0 0 0 0 0 0 0
45366- 0 0 0 0 0 0 0 0 0 0 0 0
45367- 0 0 0 0 0 0 0 0 0 0 0 0
45368- 0 0 0 0 0 0 0 0 0 0 0 0
45369- 0 0 0 0 0 0 0 0 0 0 0 0
45370- 0 0 0 0 0 0 0 0 0 0 0 0
45371- 0 0 0 0 0 0 0 0 0 0 0 0
45372- 0 0 0 0 0 0 0 0 0 10 10 10
45373- 34 34 34 86 86 86 14 14 14 2 2 6
45374-121 87 25 192 133 9 219 162 10 239 182 13
45375-236 186 11 232 195 16 241 208 19 244 214 54
45376-246 218 60 246 218 38 246 215 20 241 208 19
45377-241 208 19 226 184 13 121 87 25 2 2 6
45378- 2 2 6 2 2 6 2 2 6 2 2 6
45379- 50 50 50 82 82 82 34 34 34 10 10 10
45380- 0 0 0 0 0 0 0 0 0 0 0 0
45381- 0 0 0 0 0 0 0 0 0 0 0 0
45382- 0 0 0 0 0 0 0 0 0 0 0 0
45383- 0 0 0 0 0 0 0 0 0 0 0 0
45384- 0 0 0 0 0 0 0 0 0 0 0 0
45385- 0 0 0 0 0 0 0 0 0 0 0 0
45386- 0 0 0 0 0 0 0 0 0 0 0 0
45387- 0 0 0 0 0 0 0 0 0 0 0 0
45388- 0 0 0 0 0 0 0 0 0 0 0 0
45389- 0 0 0 0 0 0 0 0 0 0 0 0
45390- 0 0 0 0 0 0 0 0 0 0 0 0
45391- 0 0 0 0 0 0 0 0 0 0 0 0
45392- 0 0 0 0 0 0 0 0 0 10 10 10
45393- 34 34 34 82 82 82 30 30 30 61 42 6
45394-180 123 7 206 145 10 230 174 11 239 182 13
45395-234 190 10 238 202 15 241 208 19 246 218 74
45396-246 218 38 246 215 20 246 215 20 246 215 20
45397-226 184 13 215 174 15 184 144 12 6 6 6
45398- 2 2 6 2 2 6 2 2 6 2 2 6
45399- 26 26 26 94 94 94 42 42 42 14 14 14
45400- 0 0 0 0 0 0 0 0 0 0 0 0
45401- 0 0 0 0 0 0 0 0 0 0 0 0
45402- 0 0 0 0 0 0 0 0 0 0 0 0
45403- 0 0 0 0 0 0 0 0 0 0 0 0
45404- 0 0 0 0 0 0 0 0 0 0 0 0
45405- 0 0 0 0 0 0 0 0 0 0 0 0
45406- 0 0 0 0 0 0 0 0 0 0 0 0
45407- 0 0 0 0 0 0 0 0 0 0 0 0
45408- 0 0 0 0 0 0 0 0 0 0 0 0
45409- 0 0 0 0 0 0 0 0 0 0 0 0
45410- 0 0 0 0 0 0 0 0 0 0 0 0
45411- 0 0 0 0 0 0 0 0 0 0 0 0
45412- 0 0 0 0 0 0 0 0 0 10 10 10
45413- 30 30 30 78 78 78 50 50 50 104 69 6
45414-192 133 9 216 158 10 236 178 12 236 186 11
45415-232 195 16 241 208 19 244 214 54 245 215 43
45416-246 215 20 246 215 20 241 208 19 198 155 10
45417-200 144 11 216 158 10 156 118 10 2 2 6
45418- 2 2 6 2 2 6 2 2 6 2 2 6
45419- 6 6 6 90 90 90 54 54 54 18 18 18
45420- 6 6 6 0 0 0 0 0 0 0 0 0
45421- 0 0 0 0 0 0 0 0 0 0 0 0
45422- 0 0 0 0 0 0 0 0 0 0 0 0
45423- 0 0 0 0 0 0 0 0 0 0 0 0
45424- 0 0 0 0 0 0 0 0 0 0 0 0
45425- 0 0 0 0 0 0 0 0 0 0 0 0
45426- 0 0 0 0 0 0 0 0 0 0 0 0
45427- 0 0 0 0 0 0 0 0 0 0 0 0
45428- 0 0 0 0 0 0 0 0 0 0 0 0
45429- 0 0 0 0 0 0 0 0 0 0 0 0
45430- 0 0 0 0 0 0 0 0 0 0 0 0
45431- 0 0 0 0 0 0 0 0 0 0 0 0
45432- 0 0 0 0 0 0 0 0 0 10 10 10
45433- 30 30 30 78 78 78 46 46 46 22 22 22
45434-137 92 6 210 162 10 239 182 13 238 190 10
45435-238 202 15 241 208 19 246 215 20 246 215 20
45436-241 208 19 203 166 17 185 133 11 210 150 10
45437-216 158 10 210 150 10 102 78 10 2 2 6
45438- 6 6 6 54 54 54 14 14 14 2 2 6
45439- 2 2 6 62 62 62 74 74 74 30 30 30
45440- 10 10 10 0 0 0 0 0 0 0 0 0
45441- 0 0 0 0 0 0 0 0 0 0 0 0
45442- 0 0 0 0 0 0 0 0 0 0 0 0
45443- 0 0 0 0 0 0 0 0 0 0 0 0
45444- 0 0 0 0 0 0 0 0 0 0 0 0
45445- 0 0 0 0 0 0 0 0 0 0 0 0
45446- 0 0 0 0 0 0 0 0 0 0 0 0
45447- 0 0 0 0 0 0 0 0 0 0 0 0
45448- 0 0 0 0 0 0 0 0 0 0 0 0
45449- 0 0 0 0 0 0 0 0 0 0 0 0
45450- 0 0 0 0 0 0 0 0 0 0 0 0
45451- 0 0 0 0 0 0 0 0 0 0 0 0
45452- 0 0 0 0 0 0 0 0 0 10 10 10
45453- 34 34 34 78 78 78 50 50 50 6 6 6
45454- 94 70 30 139 102 15 190 146 13 226 184 13
45455-232 200 30 232 195 16 215 174 15 190 146 13
45456-168 122 10 192 133 9 210 150 10 213 154 11
45457-202 150 34 182 157 106 101 98 89 2 2 6
45458- 2 2 6 78 78 78 116 116 116 58 58 58
45459- 2 2 6 22 22 22 90 90 90 46 46 46
45460- 18 18 18 6 6 6 0 0 0 0 0 0
45461- 0 0 0 0 0 0 0 0 0 0 0 0
45462- 0 0 0 0 0 0 0 0 0 0 0 0
45463- 0 0 0 0 0 0 0 0 0 0 0 0
45464- 0 0 0 0 0 0 0 0 0 0 0 0
45465- 0 0 0 0 0 0 0 0 0 0 0 0
45466- 0 0 0 0 0 0 0 0 0 0 0 0
45467- 0 0 0 0 0 0 0 0 0 0 0 0
45468- 0 0 0 0 0 0 0 0 0 0 0 0
45469- 0 0 0 0 0 0 0 0 0 0 0 0
45470- 0 0 0 0 0 0 0 0 0 0 0 0
45471- 0 0 0 0 0 0 0 0 0 0 0 0
45472- 0 0 0 0 0 0 0 0 0 10 10 10
45473- 38 38 38 86 86 86 50 50 50 6 6 6
45474-128 128 128 174 154 114 156 107 11 168 122 10
45475-198 155 10 184 144 12 197 138 11 200 144 11
45476-206 145 10 206 145 10 197 138 11 188 164 115
45477-195 195 195 198 198 198 174 174 174 14 14 14
45478- 2 2 6 22 22 22 116 116 116 116 116 116
45479- 22 22 22 2 2 6 74 74 74 70 70 70
45480- 30 30 30 10 10 10 0 0 0 0 0 0
45481- 0 0 0 0 0 0 0 0 0 0 0 0
45482- 0 0 0 0 0 0 0 0 0 0 0 0
45483- 0 0 0 0 0 0 0 0 0 0 0 0
45484- 0 0 0 0 0 0 0 0 0 0 0 0
45485- 0 0 0 0 0 0 0 0 0 0 0 0
45486- 0 0 0 0 0 0 0 0 0 0 0 0
45487- 0 0 0 0 0 0 0 0 0 0 0 0
45488- 0 0 0 0 0 0 0 0 0 0 0 0
45489- 0 0 0 0 0 0 0 0 0 0 0 0
45490- 0 0 0 0 0 0 0 0 0 0 0 0
45491- 0 0 0 0 0 0 0 0 0 0 0 0
45492- 0 0 0 0 0 0 6 6 6 18 18 18
45493- 50 50 50 101 101 101 26 26 26 10 10 10
45494-138 138 138 190 190 190 174 154 114 156 107 11
45495-197 138 11 200 144 11 197 138 11 192 133 9
45496-180 123 7 190 142 34 190 178 144 187 187 187
45497-202 202 202 221 221 221 214 214 214 66 66 66
45498- 2 2 6 2 2 6 50 50 50 62 62 62
45499- 6 6 6 2 2 6 10 10 10 90 90 90
45500- 50 50 50 18 18 18 6 6 6 0 0 0
45501- 0 0 0 0 0 0 0 0 0 0 0 0
45502- 0 0 0 0 0 0 0 0 0 0 0 0
45503- 0 0 0 0 0 0 0 0 0 0 0 0
45504- 0 0 0 0 0 0 0 0 0 0 0 0
45505- 0 0 0 0 0 0 0 0 0 0 0 0
45506- 0 0 0 0 0 0 0 0 0 0 0 0
45507- 0 0 0 0 0 0 0 0 0 0 0 0
45508- 0 0 0 0 0 0 0 0 0 0 0 0
45509- 0 0 0 0 0 0 0 0 0 0 0 0
45510- 0 0 0 0 0 0 0 0 0 0 0 0
45511- 0 0 0 0 0 0 0 0 0 0 0 0
45512- 0 0 0 0 0 0 10 10 10 34 34 34
45513- 74 74 74 74 74 74 2 2 6 6 6 6
45514-144 144 144 198 198 198 190 190 190 178 166 146
45515-154 121 60 156 107 11 156 107 11 168 124 44
45516-174 154 114 187 187 187 190 190 190 210 210 210
45517-246 246 246 253 253 253 253 253 253 182 182 182
45518- 6 6 6 2 2 6 2 2 6 2 2 6
45519- 2 2 6 2 2 6 2 2 6 62 62 62
45520- 74 74 74 34 34 34 14 14 14 0 0 0
45521- 0 0 0 0 0 0 0 0 0 0 0 0
45522- 0 0 0 0 0 0 0 0 0 0 0 0
45523- 0 0 0 0 0 0 0 0 0 0 0 0
45524- 0 0 0 0 0 0 0 0 0 0 0 0
45525- 0 0 0 0 0 0 0 0 0 0 0 0
45526- 0 0 0 0 0 0 0 0 0 0 0 0
45527- 0 0 0 0 0 0 0 0 0 0 0 0
45528- 0 0 0 0 0 0 0 0 0 0 0 0
45529- 0 0 0 0 0 0 0 0 0 0 0 0
45530- 0 0 0 0 0 0 0 0 0 0 0 0
45531- 0 0 0 0 0 0 0 0 0 0 0 0
45532- 0 0 0 10 10 10 22 22 22 54 54 54
45533- 94 94 94 18 18 18 2 2 6 46 46 46
45534-234 234 234 221 221 221 190 190 190 190 190 190
45535-190 190 190 187 187 187 187 187 187 190 190 190
45536-190 190 190 195 195 195 214 214 214 242 242 242
45537-253 253 253 253 253 253 253 253 253 253 253 253
45538- 82 82 82 2 2 6 2 2 6 2 2 6
45539- 2 2 6 2 2 6 2 2 6 14 14 14
45540- 86 86 86 54 54 54 22 22 22 6 6 6
45541- 0 0 0 0 0 0 0 0 0 0 0 0
45542- 0 0 0 0 0 0 0 0 0 0 0 0
45543- 0 0 0 0 0 0 0 0 0 0 0 0
45544- 0 0 0 0 0 0 0 0 0 0 0 0
45545- 0 0 0 0 0 0 0 0 0 0 0 0
45546- 0 0 0 0 0 0 0 0 0 0 0 0
45547- 0 0 0 0 0 0 0 0 0 0 0 0
45548- 0 0 0 0 0 0 0 0 0 0 0 0
45549- 0 0 0 0 0 0 0 0 0 0 0 0
45550- 0 0 0 0 0 0 0 0 0 0 0 0
45551- 0 0 0 0 0 0 0 0 0 0 0 0
45552- 6 6 6 18 18 18 46 46 46 90 90 90
45553- 46 46 46 18 18 18 6 6 6 182 182 182
45554-253 253 253 246 246 246 206 206 206 190 190 190
45555-190 190 190 190 190 190 190 190 190 190 190 190
45556-206 206 206 231 231 231 250 250 250 253 253 253
45557-253 253 253 253 253 253 253 253 253 253 253 253
45558-202 202 202 14 14 14 2 2 6 2 2 6
45559- 2 2 6 2 2 6 2 2 6 2 2 6
45560- 42 42 42 86 86 86 42 42 42 18 18 18
45561- 6 6 6 0 0 0 0 0 0 0 0 0
45562- 0 0 0 0 0 0 0 0 0 0 0 0
45563- 0 0 0 0 0 0 0 0 0 0 0 0
45564- 0 0 0 0 0 0 0 0 0 0 0 0
45565- 0 0 0 0 0 0 0 0 0 0 0 0
45566- 0 0 0 0 0 0 0 0 0 0 0 0
45567- 0 0 0 0 0 0 0 0 0 0 0 0
45568- 0 0 0 0 0 0 0 0 0 0 0 0
45569- 0 0 0 0 0 0 0 0 0 0 0 0
45570- 0 0 0 0 0 0 0 0 0 0 0 0
45571- 0 0 0 0 0 0 0 0 0 6 6 6
45572- 14 14 14 38 38 38 74 74 74 66 66 66
45573- 2 2 6 6 6 6 90 90 90 250 250 250
45574-253 253 253 253 253 253 238 238 238 198 198 198
45575-190 190 190 190 190 190 195 195 195 221 221 221
45576-246 246 246 253 253 253 253 253 253 253 253 253
45577-253 253 253 253 253 253 253 253 253 253 253 253
45578-253 253 253 82 82 82 2 2 6 2 2 6
45579- 2 2 6 2 2 6 2 2 6 2 2 6
45580- 2 2 6 78 78 78 70 70 70 34 34 34
45581- 14 14 14 6 6 6 0 0 0 0 0 0
45582- 0 0 0 0 0 0 0 0 0 0 0 0
45583- 0 0 0 0 0 0 0 0 0 0 0 0
45584- 0 0 0 0 0 0 0 0 0 0 0 0
45585- 0 0 0 0 0 0 0 0 0 0 0 0
45586- 0 0 0 0 0 0 0 0 0 0 0 0
45587- 0 0 0 0 0 0 0 0 0 0 0 0
45588- 0 0 0 0 0 0 0 0 0 0 0 0
45589- 0 0 0 0 0 0 0 0 0 0 0 0
45590- 0 0 0 0 0 0 0 0 0 0 0 0
45591- 0 0 0 0 0 0 0 0 0 14 14 14
45592- 34 34 34 66 66 66 78 78 78 6 6 6
45593- 2 2 6 18 18 18 218 218 218 253 253 253
45594-253 253 253 253 253 253 253 253 253 246 246 246
45595-226 226 226 231 231 231 246 246 246 253 253 253
45596-253 253 253 253 253 253 253 253 253 253 253 253
45597-253 253 253 253 253 253 253 253 253 253 253 253
45598-253 253 253 178 178 178 2 2 6 2 2 6
45599- 2 2 6 2 2 6 2 2 6 2 2 6
45600- 2 2 6 18 18 18 90 90 90 62 62 62
45601- 30 30 30 10 10 10 0 0 0 0 0 0
45602- 0 0 0 0 0 0 0 0 0 0 0 0
45603- 0 0 0 0 0 0 0 0 0 0 0 0
45604- 0 0 0 0 0 0 0 0 0 0 0 0
45605- 0 0 0 0 0 0 0 0 0 0 0 0
45606- 0 0 0 0 0 0 0 0 0 0 0 0
45607- 0 0 0 0 0 0 0 0 0 0 0 0
45608- 0 0 0 0 0 0 0 0 0 0 0 0
45609- 0 0 0 0 0 0 0 0 0 0 0 0
45610- 0 0 0 0 0 0 0 0 0 0 0 0
45611- 0 0 0 0 0 0 10 10 10 26 26 26
45612- 58 58 58 90 90 90 18 18 18 2 2 6
45613- 2 2 6 110 110 110 253 253 253 253 253 253
45614-253 253 253 253 253 253 253 253 253 253 253 253
45615-250 250 250 253 253 253 253 253 253 253 253 253
45616-253 253 253 253 253 253 253 253 253 253 253 253
45617-253 253 253 253 253 253 253 253 253 253 253 253
45618-253 253 253 231 231 231 18 18 18 2 2 6
45619- 2 2 6 2 2 6 2 2 6 2 2 6
45620- 2 2 6 2 2 6 18 18 18 94 94 94
45621- 54 54 54 26 26 26 10 10 10 0 0 0
45622- 0 0 0 0 0 0 0 0 0 0 0 0
45623- 0 0 0 0 0 0 0 0 0 0 0 0
45624- 0 0 0 0 0 0 0 0 0 0 0 0
45625- 0 0 0 0 0 0 0 0 0 0 0 0
45626- 0 0 0 0 0 0 0 0 0 0 0 0
45627- 0 0 0 0 0 0 0 0 0 0 0 0
45628- 0 0 0 0 0 0 0 0 0 0 0 0
45629- 0 0 0 0 0 0 0 0 0 0 0 0
45630- 0 0 0 0 0 0 0 0 0 0 0 0
45631- 0 0 0 6 6 6 22 22 22 50 50 50
45632- 90 90 90 26 26 26 2 2 6 2 2 6
45633- 14 14 14 195 195 195 250 250 250 253 253 253
45634-253 253 253 253 253 253 253 253 253 253 253 253
45635-253 253 253 253 253 253 253 253 253 253 253 253
45636-253 253 253 253 253 253 253 253 253 253 253 253
45637-253 253 253 253 253 253 253 253 253 253 253 253
45638-250 250 250 242 242 242 54 54 54 2 2 6
45639- 2 2 6 2 2 6 2 2 6 2 2 6
45640- 2 2 6 2 2 6 2 2 6 38 38 38
45641- 86 86 86 50 50 50 22 22 22 6 6 6
45642- 0 0 0 0 0 0 0 0 0 0 0 0
45643- 0 0 0 0 0 0 0 0 0 0 0 0
45644- 0 0 0 0 0 0 0 0 0 0 0 0
45645- 0 0 0 0 0 0 0 0 0 0 0 0
45646- 0 0 0 0 0 0 0 0 0 0 0 0
45647- 0 0 0 0 0 0 0 0 0 0 0 0
45648- 0 0 0 0 0 0 0 0 0 0 0 0
45649- 0 0 0 0 0 0 0 0 0 0 0 0
45650- 0 0 0 0 0 0 0 0 0 0 0 0
45651- 6 6 6 14 14 14 38 38 38 82 82 82
45652- 34 34 34 2 2 6 2 2 6 2 2 6
45653- 42 42 42 195 195 195 246 246 246 253 253 253
45654-253 253 253 253 253 253 253 253 253 250 250 250
45655-242 242 242 242 242 242 250 250 250 253 253 253
45656-253 253 253 253 253 253 253 253 253 253 253 253
45657-253 253 253 250 250 250 246 246 246 238 238 238
45658-226 226 226 231 231 231 101 101 101 6 6 6
45659- 2 2 6 2 2 6 2 2 6 2 2 6
45660- 2 2 6 2 2 6 2 2 6 2 2 6
45661- 38 38 38 82 82 82 42 42 42 14 14 14
45662- 6 6 6 0 0 0 0 0 0 0 0 0
45663- 0 0 0 0 0 0 0 0 0 0 0 0
45664- 0 0 0 0 0 0 0 0 0 0 0 0
45665- 0 0 0 0 0 0 0 0 0 0 0 0
45666- 0 0 0 0 0 0 0 0 0 0 0 0
45667- 0 0 0 0 0 0 0 0 0 0 0 0
45668- 0 0 0 0 0 0 0 0 0 0 0 0
45669- 0 0 0 0 0 0 0 0 0 0 0 0
45670- 0 0 0 0 0 0 0 0 0 0 0 0
45671- 10 10 10 26 26 26 62 62 62 66 66 66
45672- 2 2 6 2 2 6 2 2 6 6 6 6
45673- 70 70 70 170 170 170 206 206 206 234 234 234
45674-246 246 246 250 250 250 250 250 250 238 238 238
45675-226 226 226 231 231 231 238 238 238 250 250 250
45676-250 250 250 250 250 250 246 246 246 231 231 231
45677-214 214 214 206 206 206 202 202 202 202 202 202
45678-198 198 198 202 202 202 182 182 182 18 18 18
45679- 2 2 6 2 2 6 2 2 6 2 2 6
45680- 2 2 6 2 2 6 2 2 6 2 2 6
45681- 2 2 6 62 62 62 66 66 66 30 30 30
45682- 10 10 10 0 0 0 0 0 0 0 0 0
45683- 0 0 0 0 0 0 0 0 0 0 0 0
45684- 0 0 0 0 0 0 0 0 0 0 0 0
45685- 0 0 0 0 0 0 0 0 0 0 0 0
45686- 0 0 0 0 0 0 0 0 0 0 0 0
45687- 0 0 0 0 0 0 0 0 0 0 0 0
45688- 0 0 0 0 0 0 0 0 0 0 0 0
45689- 0 0 0 0 0 0 0 0 0 0 0 0
45690- 0 0 0 0 0 0 0 0 0 0 0 0
45691- 14 14 14 42 42 42 82 82 82 18 18 18
45692- 2 2 6 2 2 6 2 2 6 10 10 10
45693- 94 94 94 182 182 182 218 218 218 242 242 242
45694-250 250 250 253 253 253 253 253 253 250 250 250
45695-234 234 234 253 253 253 253 253 253 253 253 253
45696-253 253 253 253 253 253 253 253 253 246 246 246
45697-238 238 238 226 226 226 210 210 210 202 202 202
45698-195 195 195 195 195 195 210 210 210 158 158 158
45699- 6 6 6 14 14 14 50 50 50 14 14 14
45700- 2 2 6 2 2 6 2 2 6 2 2 6
45701- 2 2 6 6 6 6 86 86 86 46 46 46
45702- 18 18 18 6 6 6 0 0 0 0 0 0
45703- 0 0 0 0 0 0 0 0 0 0 0 0
45704- 0 0 0 0 0 0 0 0 0 0 0 0
45705- 0 0 0 0 0 0 0 0 0 0 0 0
45706- 0 0 0 0 0 0 0 0 0 0 0 0
45707- 0 0 0 0 0 0 0 0 0 0 0 0
45708- 0 0 0 0 0 0 0 0 0 0 0 0
45709- 0 0 0 0 0 0 0 0 0 0 0 0
45710- 0 0 0 0 0 0 0 0 0 6 6 6
45711- 22 22 22 54 54 54 70 70 70 2 2 6
45712- 2 2 6 10 10 10 2 2 6 22 22 22
45713-166 166 166 231 231 231 250 250 250 253 253 253
45714-253 253 253 253 253 253 253 253 253 250 250 250
45715-242 242 242 253 253 253 253 253 253 253 253 253
45716-253 253 253 253 253 253 253 253 253 253 253 253
45717-253 253 253 253 253 253 253 253 253 246 246 246
45718-231 231 231 206 206 206 198 198 198 226 226 226
45719- 94 94 94 2 2 6 6 6 6 38 38 38
45720- 30 30 30 2 2 6 2 2 6 2 2 6
45721- 2 2 6 2 2 6 62 62 62 66 66 66
45722- 26 26 26 10 10 10 0 0 0 0 0 0
45723- 0 0 0 0 0 0 0 0 0 0 0 0
45724- 0 0 0 0 0 0 0 0 0 0 0 0
45725- 0 0 0 0 0 0 0 0 0 0 0 0
45726- 0 0 0 0 0 0 0 0 0 0 0 0
45727- 0 0 0 0 0 0 0 0 0 0 0 0
45728- 0 0 0 0 0 0 0 0 0 0 0 0
45729- 0 0 0 0 0 0 0 0 0 0 0 0
45730- 0 0 0 0 0 0 0 0 0 10 10 10
45731- 30 30 30 74 74 74 50 50 50 2 2 6
45732- 26 26 26 26 26 26 2 2 6 106 106 106
45733-238 238 238 253 253 253 253 253 253 253 253 253
45734-253 253 253 253 253 253 253 253 253 253 253 253
45735-253 253 253 253 253 253 253 253 253 253 253 253
45736-253 253 253 253 253 253 253 253 253 253 253 253
45737-253 253 253 253 253 253 253 253 253 253 253 253
45738-253 253 253 246 246 246 218 218 218 202 202 202
45739-210 210 210 14 14 14 2 2 6 2 2 6
45740- 30 30 30 22 22 22 2 2 6 2 2 6
45741- 2 2 6 2 2 6 18 18 18 86 86 86
45742- 42 42 42 14 14 14 0 0 0 0 0 0
45743- 0 0 0 0 0 0 0 0 0 0 0 0
45744- 0 0 0 0 0 0 0 0 0 0 0 0
45745- 0 0 0 0 0 0 0 0 0 0 0 0
45746- 0 0 0 0 0 0 0 0 0 0 0 0
45747- 0 0 0 0 0 0 0 0 0 0 0 0
45748- 0 0 0 0 0 0 0 0 0 0 0 0
45749- 0 0 0 0 0 0 0 0 0 0 0 0
45750- 0 0 0 0 0 0 0 0 0 14 14 14
45751- 42 42 42 90 90 90 22 22 22 2 2 6
45752- 42 42 42 2 2 6 18 18 18 218 218 218
45753-253 253 253 253 253 253 253 253 253 253 253 253
45754-253 253 253 253 253 253 253 253 253 253 253 253
45755-253 253 253 253 253 253 253 253 253 253 253 253
45756-253 253 253 253 253 253 253 253 253 253 253 253
45757-253 253 253 253 253 253 253 253 253 253 253 253
45758-253 253 253 253 253 253 250 250 250 221 221 221
45759-218 218 218 101 101 101 2 2 6 14 14 14
45760- 18 18 18 38 38 38 10 10 10 2 2 6
45761- 2 2 6 2 2 6 2 2 6 78 78 78
45762- 58 58 58 22 22 22 6 6 6 0 0 0
45763- 0 0 0 0 0 0 0 0 0 0 0 0
45764- 0 0 0 0 0 0 0 0 0 0 0 0
45765- 0 0 0 0 0 0 0 0 0 0 0 0
45766- 0 0 0 0 0 0 0 0 0 0 0 0
45767- 0 0 0 0 0 0 0 0 0 0 0 0
45768- 0 0 0 0 0 0 0 0 0 0 0 0
45769- 0 0 0 0 0 0 0 0 0 0 0 0
45770- 0 0 0 0 0 0 6 6 6 18 18 18
45771- 54 54 54 82 82 82 2 2 6 26 26 26
45772- 22 22 22 2 2 6 123 123 123 253 253 253
45773-253 253 253 253 253 253 253 253 253 253 253 253
45774-253 253 253 253 253 253 253 253 253 253 253 253
45775-253 253 253 253 253 253 253 253 253 253 253 253
45776-253 253 253 253 253 253 253 253 253 253 253 253
45777-253 253 253 253 253 253 253 253 253 253 253 253
45778-253 253 253 253 253 253 253 253 253 250 250 250
45779-238 238 238 198 198 198 6 6 6 38 38 38
45780- 58 58 58 26 26 26 38 38 38 2 2 6
45781- 2 2 6 2 2 6 2 2 6 46 46 46
45782- 78 78 78 30 30 30 10 10 10 0 0 0
45783- 0 0 0 0 0 0 0 0 0 0 0 0
45784- 0 0 0 0 0 0 0 0 0 0 0 0
45785- 0 0 0 0 0 0 0 0 0 0 0 0
45786- 0 0 0 0 0 0 0 0 0 0 0 0
45787- 0 0 0 0 0 0 0 0 0 0 0 0
45788- 0 0 0 0 0 0 0 0 0 0 0 0
45789- 0 0 0 0 0 0 0 0 0 0 0 0
45790- 0 0 0 0 0 0 10 10 10 30 30 30
45791- 74 74 74 58 58 58 2 2 6 42 42 42
45792- 2 2 6 22 22 22 231 231 231 253 253 253
45793-253 253 253 253 253 253 253 253 253 253 253 253
45794-253 253 253 253 253 253 253 253 253 250 250 250
45795-253 253 253 253 253 253 253 253 253 253 253 253
45796-253 253 253 253 253 253 253 253 253 253 253 253
45797-253 253 253 253 253 253 253 253 253 253 253 253
45798-253 253 253 253 253 253 253 253 253 253 253 253
45799-253 253 253 246 246 246 46 46 46 38 38 38
45800- 42 42 42 14 14 14 38 38 38 14 14 14
45801- 2 2 6 2 2 6 2 2 6 6 6 6
45802- 86 86 86 46 46 46 14 14 14 0 0 0
45803- 0 0 0 0 0 0 0 0 0 0 0 0
45804- 0 0 0 0 0 0 0 0 0 0 0 0
45805- 0 0 0 0 0 0 0 0 0 0 0 0
45806- 0 0 0 0 0 0 0 0 0 0 0 0
45807- 0 0 0 0 0 0 0 0 0 0 0 0
45808- 0 0 0 0 0 0 0 0 0 0 0 0
45809- 0 0 0 0 0 0 0 0 0 0 0 0
45810- 0 0 0 6 6 6 14 14 14 42 42 42
45811- 90 90 90 18 18 18 18 18 18 26 26 26
45812- 2 2 6 116 116 116 253 253 253 253 253 253
45813-253 253 253 253 253 253 253 253 253 253 253 253
45814-253 253 253 253 253 253 250 250 250 238 238 238
45815-253 253 253 253 253 253 253 253 253 253 253 253
45816-253 253 253 253 253 253 253 253 253 253 253 253
45817-253 253 253 253 253 253 253 253 253 253 253 253
45818-253 253 253 253 253 253 253 253 253 253 253 253
45819-253 253 253 253 253 253 94 94 94 6 6 6
45820- 2 2 6 2 2 6 10 10 10 34 34 34
45821- 2 2 6 2 2 6 2 2 6 2 2 6
45822- 74 74 74 58 58 58 22 22 22 6 6 6
45823- 0 0 0 0 0 0 0 0 0 0 0 0
45824- 0 0 0 0 0 0 0 0 0 0 0 0
45825- 0 0 0 0 0 0 0 0 0 0 0 0
45826- 0 0 0 0 0 0 0 0 0 0 0 0
45827- 0 0 0 0 0 0 0 0 0 0 0 0
45828- 0 0 0 0 0 0 0 0 0 0 0 0
45829- 0 0 0 0 0 0 0 0 0 0 0 0
45830- 0 0 0 10 10 10 26 26 26 66 66 66
45831- 82 82 82 2 2 6 38 38 38 6 6 6
45832- 14 14 14 210 210 210 253 253 253 253 253 253
45833-253 253 253 253 253 253 253 253 253 253 253 253
45834-253 253 253 253 253 253 246 246 246 242 242 242
45835-253 253 253 253 253 253 253 253 253 253 253 253
45836-253 253 253 253 253 253 253 253 253 253 253 253
45837-253 253 253 253 253 253 253 253 253 253 253 253
45838-253 253 253 253 253 253 253 253 253 253 253 253
45839-253 253 253 253 253 253 144 144 144 2 2 6
45840- 2 2 6 2 2 6 2 2 6 46 46 46
45841- 2 2 6 2 2 6 2 2 6 2 2 6
45842- 42 42 42 74 74 74 30 30 30 10 10 10
45843- 0 0 0 0 0 0 0 0 0 0 0 0
45844- 0 0 0 0 0 0 0 0 0 0 0 0
45845- 0 0 0 0 0 0 0 0 0 0 0 0
45846- 0 0 0 0 0 0 0 0 0 0 0 0
45847- 0 0 0 0 0 0 0 0 0 0 0 0
45848- 0 0 0 0 0 0 0 0 0 0 0 0
45849- 0 0 0 0 0 0 0 0 0 0 0 0
45850- 6 6 6 14 14 14 42 42 42 90 90 90
45851- 26 26 26 6 6 6 42 42 42 2 2 6
45852- 74 74 74 250 250 250 253 253 253 253 253 253
45853-253 253 253 253 253 253 253 253 253 253 253 253
45854-253 253 253 253 253 253 242 242 242 242 242 242
45855-253 253 253 253 253 253 253 253 253 253 253 253
45856-253 253 253 253 253 253 253 253 253 253 253 253
45857-253 253 253 253 253 253 253 253 253 253 253 253
45858-253 253 253 253 253 253 253 253 253 253 253 253
45859-253 253 253 253 253 253 182 182 182 2 2 6
45860- 2 2 6 2 2 6 2 2 6 46 46 46
45861- 2 2 6 2 2 6 2 2 6 2 2 6
45862- 10 10 10 86 86 86 38 38 38 10 10 10
45863- 0 0 0 0 0 0 0 0 0 0 0 0
45864- 0 0 0 0 0 0 0 0 0 0 0 0
45865- 0 0 0 0 0 0 0 0 0 0 0 0
45866- 0 0 0 0 0 0 0 0 0 0 0 0
45867- 0 0 0 0 0 0 0 0 0 0 0 0
45868- 0 0 0 0 0 0 0 0 0 0 0 0
45869- 0 0 0 0 0 0 0 0 0 0 0 0
45870- 10 10 10 26 26 26 66 66 66 82 82 82
45871- 2 2 6 22 22 22 18 18 18 2 2 6
45872-149 149 149 253 253 253 253 253 253 253 253 253
45873-253 253 253 253 253 253 253 253 253 253 253 253
45874-253 253 253 253 253 253 234 234 234 242 242 242
45875-253 253 253 253 253 253 253 253 253 253 253 253
45876-253 253 253 253 253 253 253 253 253 253 253 253
45877-253 253 253 253 253 253 253 253 253 253 253 253
45878-253 253 253 253 253 253 253 253 253 253 253 253
45879-253 253 253 253 253 253 206 206 206 2 2 6
45880- 2 2 6 2 2 6 2 2 6 38 38 38
45881- 2 2 6 2 2 6 2 2 6 2 2 6
45882- 6 6 6 86 86 86 46 46 46 14 14 14
45883- 0 0 0 0 0 0 0 0 0 0 0 0
45884- 0 0 0 0 0 0 0 0 0 0 0 0
45885- 0 0 0 0 0 0 0 0 0 0 0 0
45886- 0 0 0 0 0 0 0 0 0 0 0 0
45887- 0 0 0 0 0 0 0 0 0 0 0 0
45888- 0 0 0 0 0 0 0 0 0 0 0 0
45889- 0 0 0 0 0 0 0 0 0 6 6 6
45890- 18 18 18 46 46 46 86 86 86 18 18 18
45891- 2 2 6 34 34 34 10 10 10 6 6 6
45892-210 210 210 253 253 253 253 253 253 253 253 253
45893-253 253 253 253 253 253 253 253 253 253 253 253
45894-253 253 253 253 253 253 234 234 234 242 242 242
45895-253 253 253 253 253 253 253 253 253 253 253 253
45896-253 253 253 253 253 253 253 253 253 253 253 253
45897-253 253 253 253 253 253 253 253 253 253 253 253
45898-253 253 253 253 253 253 253 253 253 253 253 253
45899-253 253 253 253 253 253 221 221 221 6 6 6
45900- 2 2 6 2 2 6 6 6 6 30 30 30
45901- 2 2 6 2 2 6 2 2 6 2 2 6
45902- 2 2 6 82 82 82 54 54 54 18 18 18
45903- 6 6 6 0 0 0 0 0 0 0 0 0
45904- 0 0 0 0 0 0 0 0 0 0 0 0
45905- 0 0 0 0 0 0 0 0 0 0 0 0
45906- 0 0 0 0 0 0 0 0 0 0 0 0
45907- 0 0 0 0 0 0 0 0 0 0 0 0
45908- 0 0 0 0 0 0 0 0 0 0 0 0
45909- 0 0 0 0 0 0 0 0 0 10 10 10
45910- 26 26 26 66 66 66 62 62 62 2 2 6
45911- 2 2 6 38 38 38 10 10 10 26 26 26
45912-238 238 238 253 253 253 253 253 253 253 253 253
45913-253 253 253 253 253 253 253 253 253 253 253 253
45914-253 253 253 253 253 253 231 231 231 238 238 238
45915-253 253 253 253 253 253 253 253 253 253 253 253
45916-253 253 253 253 253 253 253 253 253 253 253 253
45917-253 253 253 253 253 253 253 253 253 253 253 253
45918-253 253 253 253 253 253 253 253 253 253 253 253
45919-253 253 253 253 253 253 231 231 231 6 6 6
45920- 2 2 6 2 2 6 10 10 10 30 30 30
45921- 2 2 6 2 2 6 2 2 6 2 2 6
45922- 2 2 6 66 66 66 58 58 58 22 22 22
45923- 6 6 6 0 0 0 0 0 0 0 0 0
45924- 0 0 0 0 0 0 0 0 0 0 0 0
45925- 0 0 0 0 0 0 0 0 0 0 0 0
45926- 0 0 0 0 0 0 0 0 0 0 0 0
45927- 0 0 0 0 0 0 0 0 0 0 0 0
45928- 0 0 0 0 0 0 0 0 0 0 0 0
45929- 0 0 0 0 0 0 0 0 0 10 10 10
45930- 38 38 38 78 78 78 6 6 6 2 2 6
45931- 2 2 6 46 46 46 14 14 14 42 42 42
45932-246 246 246 253 253 253 253 253 253 253 253 253
45933-253 253 253 253 253 253 253 253 253 253 253 253
45934-253 253 253 253 253 253 231 231 231 242 242 242
45935-253 253 253 253 253 253 253 253 253 253 253 253
45936-253 253 253 253 253 253 253 253 253 253 253 253
45937-253 253 253 253 253 253 253 253 253 253 253 253
45938-253 253 253 253 253 253 253 253 253 253 253 253
45939-253 253 253 253 253 253 234 234 234 10 10 10
45940- 2 2 6 2 2 6 22 22 22 14 14 14
45941- 2 2 6 2 2 6 2 2 6 2 2 6
45942- 2 2 6 66 66 66 62 62 62 22 22 22
45943- 6 6 6 0 0 0 0 0 0 0 0 0
45944- 0 0 0 0 0 0 0 0 0 0 0 0
45945- 0 0 0 0 0 0 0 0 0 0 0 0
45946- 0 0 0 0 0 0 0 0 0 0 0 0
45947- 0 0 0 0 0 0 0 0 0 0 0 0
45948- 0 0 0 0 0 0 0 0 0 0 0 0
45949- 0 0 0 0 0 0 6 6 6 18 18 18
45950- 50 50 50 74 74 74 2 2 6 2 2 6
45951- 14 14 14 70 70 70 34 34 34 62 62 62
45952-250 250 250 253 253 253 253 253 253 253 253 253
45953-253 253 253 253 253 253 253 253 253 253 253 253
45954-253 253 253 253 253 253 231 231 231 246 246 246
45955-253 253 253 253 253 253 253 253 253 253 253 253
45956-253 253 253 253 253 253 253 253 253 253 253 253
45957-253 253 253 253 253 253 253 253 253 253 253 253
45958-253 253 253 253 253 253 253 253 253 253 253 253
45959-253 253 253 253 253 253 234 234 234 14 14 14
45960- 2 2 6 2 2 6 30 30 30 2 2 6
45961- 2 2 6 2 2 6 2 2 6 2 2 6
45962- 2 2 6 66 66 66 62 62 62 22 22 22
45963- 6 6 6 0 0 0 0 0 0 0 0 0
45964- 0 0 0 0 0 0 0 0 0 0 0 0
45965- 0 0 0 0 0 0 0 0 0 0 0 0
45966- 0 0 0 0 0 0 0 0 0 0 0 0
45967- 0 0 0 0 0 0 0 0 0 0 0 0
45968- 0 0 0 0 0 0 0 0 0 0 0 0
45969- 0 0 0 0 0 0 6 6 6 18 18 18
45970- 54 54 54 62 62 62 2 2 6 2 2 6
45971- 2 2 6 30 30 30 46 46 46 70 70 70
45972-250 250 250 253 253 253 253 253 253 253 253 253
45973-253 253 253 253 253 253 253 253 253 253 253 253
45974-253 253 253 253 253 253 231 231 231 246 246 246
45975-253 253 253 253 253 253 253 253 253 253 253 253
45976-253 253 253 253 253 253 253 253 253 253 253 253
45977-253 253 253 253 253 253 253 253 253 253 253 253
45978-253 253 253 253 253 253 253 253 253 253 253 253
45979-253 253 253 253 253 253 226 226 226 10 10 10
45980- 2 2 6 6 6 6 30 30 30 2 2 6
45981- 2 2 6 2 2 6 2 2 6 2 2 6
45982- 2 2 6 66 66 66 58 58 58 22 22 22
45983- 6 6 6 0 0 0 0 0 0 0 0 0
45984- 0 0 0 0 0 0 0 0 0 0 0 0
45985- 0 0 0 0 0 0 0 0 0 0 0 0
45986- 0 0 0 0 0 0 0 0 0 0 0 0
45987- 0 0 0 0 0 0 0 0 0 0 0 0
45988- 0 0 0 0 0 0 0 0 0 0 0 0
45989- 0 0 0 0 0 0 6 6 6 22 22 22
45990- 58 58 58 62 62 62 2 2 6 2 2 6
45991- 2 2 6 2 2 6 30 30 30 78 78 78
45992-250 250 250 253 253 253 253 253 253 253 253 253
45993-253 253 253 253 253 253 253 253 253 253 253 253
45994-253 253 253 253 253 253 231 231 231 246 246 246
45995-253 253 253 253 253 253 253 253 253 253 253 253
45996-253 253 253 253 253 253 253 253 253 253 253 253
45997-253 253 253 253 253 253 253 253 253 253 253 253
45998-253 253 253 253 253 253 253 253 253 253 253 253
45999-253 253 253 253 253 253 206 206 206 2 2 6
46000- 22 22 22 34 34 34 18 14 6 22 22 22
46001- 26 26 26 18 18 18 6 6 6 2 2 6
46002- 2 2 6 82 82 82 54 54 54 18 18 18
46003- 6 6 6 0 0 0 0 0 0 0 0 0
46004- 0 0 0 0 0 0 0 0 0 0 0 0
46005- 0 0 0 0 0 0 0 0 0 0 0 0
46006- 0 0 0 0 0 0 0 0 0 0 0 0
46007- 0 0 0 0 0 0 0 0 0 0 0 0
46008- 0 0 0 0 0 0 0 0 0 0 0 0
46009- 0 0 0 0 0 0 6 6 6 26 26 26
46010- 62 62 62 106 106 106 74 54 14 185 133 11
46011-210 162 10 121 92 8 6 6 6 62 62 62
46012-238 238 238 253 253 253 253 253 253 253 253 253
46013-253 253 253 253 253 253 253 253 253 253 253 253
46014-253 253 253 253 253 253 231 231 231 246 246 246
46015-253 253 253 253 253 253 253 253 253 253 253 253
46016-253 253 253 253 253 253 253 253 253 253 253 253
46017-253 253 253 253 253 253 253 253 253 253 253 253
46018-253 253 253 253 253 253 253 253 253 253 253 253
46019-253 253 253 253 253 253 158 158 158 18 18 18
46020- 14 14 14 2 2 6 2 2 6 2 2 6
46021- 6 6 6 18 18 18 66 66 66 38 38 38
46022- 6 6 6 94 94 94 50 50 50 18 18 18
46023- 6 6 6 0 0 0 0 0 0 0 0 0
46024- 0 0 0 0 0 0 0 0 0 0 0 0
46025- 0 0 0 0 0 0 0 0 0 0 0 0
46026- 0 0 0 0 0 0 0 0 0 0 0 0
46027- 0 0 0 0 0 0 0 0 0 0 0 0
46028- 0 0 0 0 0 0 0 0 0 6 6 6
46029- 10 10 10 10 10 10 18 18 18 38 38 38
46030- 78 78 78 142 134 106 216 158 10 242 186 14
46031-246 190 14 246 190 14 156 118 10 10 10 10
46032- 90 90 90 238 238 238 253 253 253 253 253 253
46033-253 253 253 253 253 253 253 253 253 253 253 253
46034-253 253 253 253 253 253 231 231 231 250 250 250
46035-253 253 253 253 253 253 253 253 253 253 253 253
46036-253 253 253 253 253 253 253 253 253 253 253 253
46037-253 253 253 253 253 253 253 253 253 253 253 253
46038-253 253 253 253 253 253 253 253 253 246 230 190
46039-238 204 91 238 204 91 181 142 44 37 26 9
46040- 2 2 6 2 2 6 2 2 6 2 2 6
46041- 2 2 6 2 2 6 38 38 38 46 46 46
46042- 26 26 26 106 106 106 54 54 54 18 18 18
46043- 6 6 6 0 0 0 0 0 0 0 0 0
46044- 0 0 0 0 0 0 0 0 0 0 0 0
46045- 0 0 0 0 0 0 0 0 0 0 0 0
46046- 0 0 0 0 0 0 0 0 0 0 0 0
46047- 0 0 0 0 0 0 0 0 0 0 0 0
46048- 0 0 0 6 6 6 14 14 14 22 22 22
46049- 30 30 30 38 38 38 50 50 50 70 70 70
46050-106 106 106 190 142 34 226 170 11 242 186 14
46051-246 190 14 246 190 14 246 190 14 154 114 10
46052- 6 6 6 74 74 74 226 226 226 253 253 253
46053-253 253 253 253 253 253 253 253 253 253 253 253
46054-253 253 253 253 253 253 231 231 231 250 250 250
46055-253 253 253 253 253 253 253 253 253 253 253 253
46056-253 253 253 253 253 253 253 253 253 253 253 253
46057-253 253 253 253 253 253 253 253 253 253 253 253
46058-253 253 253 253 253 253 253 253 253 228 184 62
46059-241 196 14 241 208 19 232 195 16 38 30 10
46060- 2 2 6 2 2 6 2 2 6 2 2 6
46061- 2 2 6 6 6 6 30 30 30 26 26 26
46062-203 166 17 154 142 90 66 66 66 26 26 26
46063- 6 6 6 0 0 0 0 0 0 0 0 0
46064- 0 0 0 0 0 0 0 0 0 0 0 0
46065- 0 0 0 0 0 0 0 0 0 0 0 0
46066- 0 0 0 0 0 0 0 0 0 0 0 0
46067- 0 0 0 0 0 0 0 0 0 0 0 0
46068- 6 6 6 18 18 18 38 38 38 58 58 58
46069- 78 78 78 86 86 86 101 101 101 123 123 123
46070-175 146 61 210 150 10 234 174 13 246 186 14
46071-246 190 14 246 190 14 246 190 14 238 190 10
46072-102 78 10 2 2 6 46 46 46 198 198 198
46073-253 253 253 253 253 253 253 253 253 253 253 253
46074-253 253 253 253 253 253 234 234 234 242 242 242
46075-253 253 253 253 253 253 253 253 253 253 253 253
46076-253 253 253 253 253 253 253 253 253 253 253 253
46077-253 253 253 253 253 253 253 253 253 253 253 253
46078-253 253 253 253 253 253 253 253 253 224 178 62
46079-242 186 14 241 196 14 210 166 10 22 18 6
46080- 2 2 6 2 2 6 2 2 6 2 2 6
46081- 2 2 6 2 2 6 6 6 6 121 92 8
46082-238 202 15 232 195 16 82 82 82 34 34 34
46083- 10 10 10 0 0 0 0 0 0 0 0 0
46084- 0 0 0 0 0 0 0 0 0 0 0 0
46085- 0 0 0 0 0 0 0 0 0 0 0 0
46086- 0 0 0 0 0 0 0 0 0 0 0 0
46087- 0 0 0 0 0 0 0 0 0 0 0 0
46088- 14 14 14 38 38 38 70 70 70 154 122 46
46089-190 142 34 200 144 11 197 138 11 197 138 11
46090-213 154 11 226 170 11 242 186 14 246 190 14
46091-246 190 14 246 190 14 246 190 14 246 190 14
46092-225 175 15 46 32 6 2 2 6 22 22 22
46093-158 158 158 250 250 250 253 253 253 253 253 253
46094-253 253 253 253 253 253 253 253 253 253 253 253
46095-253 253 253 253 253 253 253 253 253 253 253 253
46096-253 253 253 253 253 253 253 253 253 253 253 253
46097-253 253 253 253 253 253 253 253 253 253 253 253
46098-253 253 253 250 250 250 242 242 242 224 178 62
46099-239 182 13 236 186 11 213 154 11 46 32 6
46100- 2 2 6 2 2 6 2 2 6 2 2 6
46101- 2 2 6 2 2 6 61 42 6 225 175 15
46102-238 190 10 236 186 11 112 100 78 42 42 42
46103- 14 14 14 0 0 0 0 0 0 0 0 0
46104- 0 0 0 0 0 0 0 0 0 0 0 0
46105- 0 0 0 0 0 0 0 0 0 0 0 0
46106- 0 0 0 0 0 0 0 0 0 0 0 0
46107- 0 0 0 0 0 0 0 0 0 6 6 6
46108- 22 22 22 54 54 54 154 122 46 213 154 11
46109-226 170 11 230 174 11 226 170 11 226 170 11
46110-236 178 12 242 186 14 246 190 14 246 190 14
46111-246 190 14 246 190 14 246 190 14 246 190 14
46112-241 196 14 184 144 12 10 10 10 2 2 6
46113- 6 6 6 116 116 116 242 242 242 253 253 253
46114-253 253 253 253 253 253 253 253 253 253 253 253
46115-253 253 253 253 253 253 253 253 253 253 253 253
46116-253 253 253 253 253 253 253 253 253 253 253 253
46117-253 253 253 253 253 253 253 253 253 253 253 253
46118-253 253 253 231 231 231 198 198 198 214 170 54
46119-236 178 12 236 178 12 210 150 10 137 92 6
46120- 18 14 6 2 2 6 2 2 6 2 2 6
46121- 6 6 6 70 47 6 200 144 11 236 178 12
46122-239 182 13 239 182 13 124 112 88 58 58 58
46123- 22 22 22 6 6 6 0 0 0 0 0 0
46124- 0 0 0 0 0 0 0 0 0 0 0 0
46125- 0 0 0 0 0 0 0 0 0 0 0 0
46126- 0 0 0 0 0 0 0 0 0 0 0 0
46127- 0 0 0 0 0 0 0 0 0 10 10 10
46128- 30 30 30 70 70 70 180 133 36 226 170 11
46129-239 182 13 242 186 14 242 186 14 246 186 14
46130-246 190 14 246 190 14 246 190 14 246 190 14
46131-246 190 14 246 190 14 246 190 14 246 190 14
46132-246 190 14 232 195 16 98 70 6 2 2 6
46133- 2 2 6 2 2 6 66 66 66 221 221 221
46134-253 253 253 253 253 253 253 253 253 253 253 253
46135-253 253 253 253 253 253 253 253 253 253 253 253
46136-253 253 253 253 253 253 253 253 253 253 253 253
46137-253 253 253 253 253 253 253 253 253 253 253 253
46138-253 253 253 206 206 206 198 198 198 214 166 58
46139-230 174 11 230 174 11 216 158 10 192 133 9
46140-163 110 8 116 81 8 102 78 10 116 81 8
46141-167 114 7 197 138 11 226 170 11 239 182 13
46142-242 186 14 242 186 14 162 146 94 78 78 78
46143- 34 34 34 14 14 14 6 6 6 0 0 0
46144- 0 0 0 0 0 0 0 0 0 0 0 0
46145- 0 0 0 0 0 0 0 0 0 0 0 0
46146- 0 0 0 0 0 0 0 0 0 0 0 0
46147- 0 0 0 0 0 0 0 0 0 6 6 6
46148- 30 30 30 78 78 78 190 142 34 226 170 11
46149-239 182 13 246 190 14 246 190 14 246 190 14
46150-246 190 14 246 190 14 246 190 14 246 190 14
46151-246 190 14 246 190 14 246 190 14 246 190 14
46152-246 190 14 241 196 14 203 166 17 22 18 6
46153- 2 2 6 2 2 6 2 2 6 38 38 38
46154-218 218 218 253 253 253 253 253 253 253 253 253
46155-253 253 253 253 253 253 253 253 253 253 253 253
46156-253 253 253 253 253 253 253 253 253 253 253 253
46157-253 253 253 253 253 253 253 253 253 253 253 253
46158-250 250 250 206 206 206 198 198 198 202 162 69
46159-226 170 11 236 178 12 224 166 10 210 150 10
46160-200 144 11 197 138 11 192 133 9 197 138 11
46161-210 150 10 226 170 11 242 186 14 246 190 14
46162-246 190 14 246 186 14 225 175 15 124 112 88
46163- 62 62 62 30 30 30 14 14 14 6 6 6
46164- 0 0 0 0 0 0 0 0 0 0 0 0
46165- 0 0 0 0 0 0 0 0 0 0 0 0
46166- 0 0 0 0 0 0 0 0 0 0 0 0
46167- 0 0 0 0 0 0 0 0 0 10 10 10
46168- 30 30 30 78 78 78 174 135 50 224 166 10
46169-239 182 13 246 190 14 246 190 14 246 190 14
46170-246 190 14 246 190 14 246 190 14 246 190 14
46171-246 190 14 246 190 14 246 190 14 246 190 14
46172-246 190 14 246 190 14 241 196 14 139 102 15
46173- 2 2 6 2 2 6 2 2 6 2 2 6
46174- 78 78 78 250 250 250 253 253 253 253 253 253
46175-253 253 253 253 253 253 253 253 253 253 253 253
46176-253 253 253 253 253 253 253 253 253 253 253 253
46177-253 253 253 253 253 253 253 253 253 253 253 253
46178-250 250 250 214 214 214 198 198 198 190 150 46
46179-219 162 10 236 178 12 234 174 13 224 166 10
46180-216 158 10 213 154 11 213 154 11 216 158 10
46181-226 170 11 239 182 13 246 190 14 246 190 14
46182-246 190 14 246 190 14 242 186 14 206 162 42
46183-101 101 101 58 58 58 30 30 30 14 14 14
46184- 6 6 6 0 0 0 0 0 0 0 0 0
46185- 0 0 0 0 0 0 0 0 0 0 0 0
46186- 0 0 0 0 0 0 0 0 0 0 0 0
46187- 0 0 0 0 0 0 0 0 0 10 10 10
46188- 30 30 30 74 74 74 174 135 50 216 158 10
46189-236 178 12 246 190 14 246 190 14 246 190 14
46190-246 190 14 246 190 14 246 190 14 246 190 14
46191-246 190 14 246 190 14 246 190 14 246 190 14
46192-246 190 14 246 190 14 241 196 14 226 184 13
46193- 61 42 6 2 2 6 2 2 6 2 2 6
46194- 22 22 22 238 238 238 253 253 253 253 253 253
46195-253 253 253 253 253 253 253 253 253 253 253 253
46196-253 253 253 253 253 253 253 253 253 253 253 253
46197-253 253 253 253 253 253 253 253 253 253 253 253
46198-253 253 253 226 226 226 187 187 187 180 133 36
46199-216 158 10 236 178 12 239 182 13 236 178 12
46200-230 174 11 226 170 11 226 170 11 230 174 11
46201-236 178 12 242 186 14 246 190 14 246 190 14
46202-246 190 14 246 190 14 246 186 14 239 182 13
46203-206 162 42 106 106 106 66 66 66 34 34 34
46204- 14 14 14 6 6 6 0 0 0 0 0 0
46205- 0 0 0 0 0 0 0 0 0 0 0 0
46206- 0 0 0 0 0 0 0 0 0 0 0 0
46207- 0 0 0 0 0 0 0 0 0 6 6 6
46208- 26 26 26 70 70 70 163 133 67 213 154 11
46209-236 178 12 246 190 14 246 190 14 246 190 14
46210-246 190 14 246 190 14 246 190 14 246 190 14
46211-246 190 14 246 190 14 246 190 14 246 190 14
46212-246 190 14 246 190 14 246 190 14 241 196 14
46213-190 146 13 18 14 6 2 2 6 2 2 6
46214- 46 46 46 246 246 246 253 253 253 253 253 253
46215-253 253 253 253 253 253 253 253 253 253 253 253
46216-253 253 253 253 253 253 253 253 253 253 253 253
46217-253 253 253 253 253 253 253 253 253 253 253 253
46218-253 253 253 221 221 221 86 86 86 156 107 11
46219-216 158 10 236 178 12 242 186 14 246 186 14
46220-242 186 14 239 182 13 239 182 13 242 186 14
46221-242 186 14 246 186 14 246 190 14 246 190 14
46222-246 190 14 246 190 14 246 190 14 246 190 14
46223-242 186 14 225 175 15 142 122 72 66 66 66
46224- 30 30 30 10 10 10 0 0 0 0 0 0
46225- 0 0 0 0 0 0 0 0 0 0 0 0
46226- 0 0 0 0 0 0 0 0 0 0 0 0
46227- 0 0 0 0 0 0 0 0 0 6 6 6
46228- 26 26 26 70 70 70 163 133 67 210 150 10
46229-236 178 12 246 190 14 246 190 14 246 190 14
46230-246 190 14 246 190 14 246 190 14 246 190 14
46231-246 190 14 246 190 14 246 190 14 246 190 14
46232-246 190 14 246 190 14 246 190 14 246 190 14
46233-232 195 16 121 92 8 34 34 34 106 106 106
46234-221 221 221 253 253 253 253 253 253 253 253 253
46235-253 253 253 253 253 253 253 253 253 253 253 253
46236-253 253 253 253 253 253 253 253 253 253 253 253
46237-253 253 253 253 253 253 253 253 253 253 253 253
46238-242 242 242 82 82 82 18 14 6 163 110 8
46239-216 158 10 236 178 12 242 186 14 246 190 14
46240-246 190 14 246 190 14 246 190 14 246 190 14
46241-246 190 14 246 190 14 246 190 14 246 190 14
46242-246 190 14 246 190 14 246 190 14 246 190 14
46243-246 190 14 246 190 14 242 186 14 163 133 67
46244- 46 46 46 18 18 18 6 6 6 0 0 0
46245- 0 0 0 0 0 0 0 0 0 0 0 0
46246- 0 0 0 0 0 0 0 0 0 0 0 0
46247- 0 0 0 0 0 0 0 0 0 10 10 10
46248- 30 30 30 78 78 78 163 133 67 210 150 10
46249-236 178 12 246 186 14 246 190 14 246 190 14
46250-246 190 14 246 190 14 246 190 14 246 190 14
46251-246 190 14 246 190 14 246 190 14 246 190 14
46252-246 190 14 246 190 14 246 190 14 246 190 14
46253-241 196 14 215 174 15 190 178 144 253 253 253
46254-253 253 253 253 253 253 253 253 253 253 253 253
46255-253 253 253 253 253 253 253 253 253 253 253 253
46256-253 253 253 253 253 253 253 253 253 253 253 253
46257-253 253 253 253 253 253 253 253 253 218 218 218
46258- 58 58 58 2 2 6 22 18 6 167 114 7
46259-216 158 10 236 178 12 246 186 14 246 190 14
46260-246 190 14 246 190 14 246 190 14 246 190 14
46261-246 190 14 246 190 14 246 190 14 246 190 14
46262-246 190 14 246 190 14 246 190 14 246 190 14
46263-246 190 14 246 186 14 242 186 14 190 150 46
46264- 54 54 54 22 22 22 6 6 6 0 0 0
46265- 0 0 0 0 0 0 0 0 0 0 0 0
46266- 0 0 0 0 0 0 0 0 0 0 0 0
46267- 0 0 0 0 0 0 0 0 0 14 14 14
46268- 38 38 38 86 86 86 180 133 36 213 154 11
46269-236 178 12 246 186 14 246 190 14 246 190 14
46270-246 190 14 246 190 14 246 190 14 246 190 14
46271-246 190 14 246 190 14 246 190 14 246 190 14
46272-246 190 14 246 190 14 246 190 14 246 190 14
46273-246 190 14 232 195 16 190 146 13 214 214 214
46274-253 253 253 253 253 253 253 253 253 253 253 253
46275-253 253 253 253 253 253 253 253 253 253 253 253
46276-253 253 253 253 253 253 253 253 253 253 253 253
46277-253 253 253 250 250 250 170 170 170 26 26 26
46278- 2 2 6 2 2 6 37 26 9 163 110 8
46279-219 162 10 239 182 13 246 186 14 246 190 14
46280-246 190 14 246 190 14 246 190 14 246 190 14
46281-246 190 14 246 190 14 246 190 14 246 190 14
46282-246 190 14 246 190 14 246 190 14 246 190 14
46283-246 186 14 236 178 12 224 166 10 142 122 72
46284- 46 46 46 18 18 18 6 6 6 0 0 0
46285- 0 0 0 0 0 0 0 0 0 0 0 0
46286- 0 0 0 0 0 0 0 0 0 0 0 0
46287- 0 0 0 0 0 0 6 6 6 18 18 18
46288- 50 50 50 109 106 95 192 133 9 224 166 10
46289-242 186 14 246 190 14 246 190 14 246 190 14
46290-246 190 14 246 190 14 246 190 14 246 190 14
46291-246 190 14 246 190 14 246 190 14 246 190 14
46292-246 190 14 246 190 14 246 190 14 246 190 14
46293-242 186 14 226 184 13 210 162 10 142 110 46
46294-226 226 226 253 253 253 253 253 253 253 253 253
46295-253 253 253 253 253 253 253 253 253 253 253 253
46296-253 253 253 253 253 253 253 253 253 253 253 253
46297-198 198 198 66 66 66 2 2 6 2 2 6
46298- 2 2 6 2 2 6 50 34 6 156 107 11
46299-219 162 10 239 182 13 246 186 14 246 190 14
46300-246 190 14 246 190 14 246 190 14 246 190 14
46301-246 190 14 246 190 14 246 190 14 246 190 14
46302-246 190 14 246 190 14 246 190 14 242 186 14
46303-234 174 13 213 154 11 154 122 46 66 66 66
46304- 30 30 30 10 10 10 0 0 0 0 0 0
46305- 0 0 0 0 0 0 0 0 0 0 0 0
46306- 0 0 0 0 0 0 0 0 0 0 0 0
46307- 0 0 0 0 0 0 6 6 6 22 22 22
46308- 58 58 58 154 121 60 206 145 10 234 174 13
46309-242 186 14 246 186 14 246 190 14 246 190 14
46310-246 190 14 246 190 14 246 190 14 246 190 14
46311-246 190 14 246 190 14 246 190 14 246 190 14
46312-246 190 14 246 190 14 246 190 14 246 190 14
46313-246 186 14 236 178 12 210 162 10 163 110 8
46314- 61 42 6 138 138 138 218 218 218 250 250 250
46315-253 253 253 253 253 253 253 253 253 250 250 250
46316-242 242 242 210 210 210 144 144 144 66 66 66
46317- 6 6 6 2 2 6 2 2 6 2 2 6
46318- 2 2 6 2 2 6 61 42 6 163 110 8
46319-216 158 10 236 178 12 246 190 14 246 190 14
46320-246 190 14 246 190 14 246 190 14 246 190 14
46321-246 190 14 246 190 14 246 190 14 246 190 14
46322-246 190 14 239 182 13 230 174 11 216 158 10
46323-190 142 34 124 112 88 70 70 70 38 38 38
46324- 18 18 18 6 6 6 0 0 0 0 0 0
46325- 0 0 0 0 0 0 0 0 0 0 0 0
46326- 0 0 0 0 0 0 0 0 0 0 0 0
46327- 0 0 0 0 0 0 6 6 6 22 22 22
46328- 62 62 62 168 124 44 206 145 10 224 166 10
46329-236 178 12 239 182 13 242 186 14 242 186 14
46330-246 186 14 246 190 14 246 190 14 246 190 14
46331-246 190 14 246 190 14 246 190 14 246 190 14
46332-246 190 14 246 190 14 246 190 14 246 190 14
46333-246 190 14 236 178 12 216 158 10 175 118 6
46334- 80 54 7 2 2 6 6 6 6 30 30 30
46335- 54 54 54 62 62 62 50 50 50 38 38 38
46336- 14 14 14 2 2 6 2 2 6 2 2 6
46337- 2 2 6 2 2 6 2 2 6 2 2 6
46338- 2 2 6 6 6 6 80 54 7 167 114 7
46339-213 154 11 236 178 12 246 190 14 246 190 14
46340-246 190 14 246 190 14 246 190 14 246 190 14
46341-246 190 14 242 186 14 239 182 13 239 182 13
46342-230 174 11 210 150 10 174 135 50 124 112 88
46343- 82 82 82 54 54 54 34 34 34 18 18 18
46344- 6 6 6 0 0 0 0 0 0 0 0 0
46345- 0 0 0 0 0 0 0 0 0 0 0 0
46346- 0 0 0 0 0 0 0 0 0 0 0 0
46347- 0 0 0 0 0 0 6 6 6 18 18 18
46348- 50 50 50 158 118 36 192 133 9 200 144 11
46349-216 158 10 219 162 10 224 166 10 226 170 11
46350-230 174 11 236 178 12 239 182 13 239 182 13
46351-242 186 14 246 186 14 246 190 14 246 190 14
46352-246 190 14 246 190 14 246 190 14 246 190 14
46353-246 186 14 230 174 11 210 150 10 163 110 8
46354-104 69 6 10 10 10 2 2 6 2 2 6
46355- 2 2 6 2 2 6 2 2 6 2 2 6
46356- 2 2 6 2 2 6 2 2 6 2 2 6
46357- 2 2 6 2 2 6 2 2 6 2 2 6
46358- 2 2 6 6 6 6 91 60 6 167 114 7
46359-206 145 10 230 174 11 242 186 14 246 190 14
46360-246 190 14 246 190 14 246 186 14 242 186 14
46361-239 182 13 230 174 11 224 166 10 213 154 11
46362-180 133 36 124 112 88 86 86 86 58 58 58
46363- 38 38 38 22 22 22 10 10 10 6 6 6
46364- 0 0 0 0 0 0 0 0 0 0 0 0
46365- 0 0 0 0 0 0 0 0 0 0 0 0
46366- 0 0 0 0 0 0 0 0 0 0 0 0
46367- 0 0 0 0 0 0 0 0 0 14 14 14
46368- 34 34 34 70 70 70 138 110 50 158 118 36
46369-167 114 7 180 123 7 192 133 9 197 138 11
46370-200 144 11 206 145 10 213 154 11 219 162 10
46371-224 166 10 230 174 11 239 182 13 242 186 14
46372-246 186 14 246 186 14 246 186 14 246 186 14
46373-239 182 13 216 158 10 185 133 11 152 99 6
46374-104 69 6 18 14 6 2 2 6 2 2 6
46375- 2 2 6 2 2 6 2 2 6 2 2 6
46376- 2 2 6 2 2 6 2 2 6 2 2 6
46377- 2 2 6 2 2 6 2 2 6 2 2 6
46378- 2 2 6 6 6 6 80 54 7 152 99 6
46379-192 133 9 219 162 10 236 178 12 239 182 13
46380-246 186 14 242 186 14 239 182 13 236 178 12
46381-224 166 10 206 145 10 192 133 9 154 121 60
46382- 94 94 94 62 62 62 42 42 42 22 22 22
46383- 14 14 14 6 6 6 0 0 0 0 0 0
46384- 0 0 0 0 0 0 0 0 0 0 0 0
46385- 0 0 0 0 0 0 0 0 0 0 0 0
46386- 0 0 0 0 0 0 0 0 0 0 0 0
46387- 0 0 0 0 0 0 0 0 0 6 6 6
46388- 18 18 18 34 34 34 58 58 58 78 78 78
46389-101 98 89 124 112 88 142 110 46 156 107 11
46390-163 110 8 167 114 7 175 118 6 180 123 7
46391-185 133 11 197 138 11 210 150 10 219 162 10
46392-226 170 11 236 178 12 236 178 12 234 174 13
46393-219 162 10 197 138 11 163 110 8 130 83 6
46394- 91 60 6 10 10 10 2 2 6 2 2 6
46395- 18 18 18 38 38 38 38 38 38 38 38 38
46396- 38 38 38 38 38 38 38 38 38 38 38 38
46397- 38 38 38 38 38 38 26 26 26 2 2 6
46398- 2 2 6 6 6 6 70 47 6 137 92 6
46399-175 118 6 200 144 11 219 162 10 230 174 11
46400-234 174 13 230 174 11 219 162 10 210 150 10
46401-192 133 9 163 110 8 124 112 88 82 82 82
46402- 50 50 50 30 30 30 14 14 14 6 6 6
46403- 0 0 0 0 0 0 0 0 0 0 0 0
46404- 0 0 0 0 0 0 0 0 0 0 0 0
46405- 0 0 0 0 0 0 0 0 0 0 0 0
46406- 0 0 0 0 0 0 0 0 0 0 0 0
46407- 0 0 0 0 0 0 0 0 0 0 0 0
46408- 6 6 6 14 14 14 22 22 22 34 34 34
46409- 42 42 42 58 58 58 74 74 74 86 86 86
46410-101 98 89 122 102 70 130 98 46 121 87 25
46411-137 92 6 152 99 6 163 110 8 180 123 7
46412-185 133 11 197 138 11 206 145 10 200 144 11
46413-180 123 7 156 107 11 130 83 6 104 69 6
46414- 50 34 6 54 54 54 110 110 110 101 98 89
46415- 86 86 86 82 82 82 78 78 78 78 78 78
46416- 78 78 78 78 78 78 78 78 78 78 78 78
46417- 78 78 78 82 82 82 86 86 86 94 94 94
46418-106 106 106 101 101 101 86 66 34 124 80 6
46419-156 107 11 180 123 7 192 133 9 200 144 11
46420-206 145 10 200 144 11 192 133 9 175 118 6
46421-139 102 15 109 106 95 70 70 70 42 42 42
46422- 22 22 22 10 10 10 0 0 0 0 0 0
46423- 0 0 0 0 0 0 0 0 0 0 0 0
46424- 0 0 0 0 0 0 0 0 0 0 0 0
46425- 0 0 0 0 0 0 0 0 0 0 0 0
46426- 0 0 0 0 0 0 0 0 0 0 0 0
46427- 0 0 0 0 0 0 0 0 0 0 0 0
46428- 0 0 0 0 0 0 6 6 6 10 10 10
46429- 14 14 14 22 22 22 30 30 30 38 38 38
46430- 50 50 50 62 62 62 74 74 74 90 90 90
46431-101 98 89 112 100 78 121 87 25 124 80 6
46432-137 92 6 152 99 6 152 99 6 152 99 6
46433-138 86 6 124 80 6 98 70 6 86 66 30
46434-101 98 89 82 82 82 58 58 58 46 46 46
46435- 38 38 38 34 34 34 34 34 34 34 34 34
46436- 34 34 34 34 34 34 34 34 34 34 34 34
46437- 34 34 34 34 34 34 38 38 38 42 42 42
46438- 54 54 54 82 82 82 94 86 76 91 60 6
46439-134 86 6 156 107 11 167 114 7 175 118 6
46440-175 118 6 167 114 7 152 99 6 121 87 25
46441-101 98 89 62 62 62 34 34 34 18 18 18
46442- 6 6 6 0 0 0 0 0 0 0 0 0
46443- 0 0 0 0 0 0 0 0 0 0 0 0
46444- 0 0 0 0 0 0 0 0 0 0 0 0
46445- 0 0 0 0 0 0 0 0 0 0 0 0
46446- 0 0 0 0 0 0 0 0 0 0 0 0
46447- 0 0 0 0 0 0 0 0 0 0 0 0
46448- 0 0 0 0 0 0 0 0 0 0 0 0
46449- 0 0 0 6 6 6 6 6 6 10 10 10
46450- 18 18 18 22 22 22 30 30 30 42 42 42
46451- 50 50 50 66 66 66 86 86 86 101 98 89
46452-106 86 58 98 70 6 104 69 6 104 69 6
46453-104 69 6 91 60 6 82 62 34 90 90 90
46454- 62 62 62 38 38 38 22 22 22 14 14 14
46455- 10 10 10 10 10 10 10 10 10 10 10 10
46456- 10 10 10 10 10 10 6 6 6 10 10 10
46457- 10 10 10 10 10 10 10 10 10 14 14 14
46458- 22 22 22 42 42 42 70 70 70 89 81 66
46459- 80 54 7 104 69 6 124 80 6 137 92 6
46460-134 86 6 116 81 8 100 82 52 86 86 86
46461- 58 58 58 30 30 30 14 14 14 6 6 6
46462- 0 0 0 0 0 0 0 0 0 0 0 0
46463- 0 0 0 0 0 0 0 0 0 0 0 0
46464- 0 0 0 0 0 0 0 0 0 0 0 0
46465- 0 0 0 0 0 0 0 0 0 0 0 0
46466- 0 0 0 0 0 0 0 0 0 0 0 0
46467- 0 0 0 0 0 0 0 0 0 0 0 0
46468- 0 0 0 0 0 0 0 0 0 0 0 0
46469- 0 0 0 0 0 0 0 0 0 0 0 0
46470- 0 0 0 6 6 6 10 10 10 14 14 14
46471- 18 18 18 26 26 26 38 38 38 54 54 54
46472- 70 70 70 86 86 86 94 86 76 89 81 66
46473- 89 81 66 86 86 86 74 74 74 50 50 50
46474- 30 30 30 14 14 14 6 6 6 0 0 0
46475- 0 0 0 0 0 0 0 0 0 0 0 0
46476- 0 0 0 0 0 0 0 0 0 0 0 0
46477- 0 0 0 0 0 0 0 0 0 0 0 0
46478- 6 6 6 18 18 18 34 34 34 58 58 58
46479- 82 82 82 89 81 66 89 81 66 89 81 66
46480- 94 86 66 94 86 76 74 74 74 50 50 50
46481- 26 26 26 14 14 14 6 6 6 0 0 0
46482- 0 0 0 0 0 0 0 0 0 0 0 0
46483- 0 0 0 0 0 0 0 0 0 0 0 0
46484- 0 0 0 0 0 0 0 0 0 0 0 0
46485- 0 0 0 0 0 0 0 0 0 0 0 0
46486- 0 0 0 0 0 0 0 0 0 0 0 0
46487- 0 0 0 0 0 0 0 0 0 0 0 0
46488- 0 0 0 0 0 0 0 0 0 0 0 0
46489- 0 0 0 0 0 0 0 0 0 0 0 0
46490- 0 0 0 0 0 0 0 0 0 0 0 0
46491- 6 6 6 6 6 6 14 14 14 18 18 18
46492- 30 30 30 38 38 38 46 46 46 54 54 54
46493- 50 50 50 42 42 42 30 30 30 18 18 18
46494- 10 10 10 0 0 0 0 0 0 0 0 0
46495- 0 0 0 0 0 0 0 0 0 0 0 0
46496- 0 0 0 0 0 0 0 0 0 0 0 0
46497- 0 0 0 0 0 0 0 0 0 0 0 0
46498- 0 0 0 6 6 6 14 14 14 26 26 26
46499- 38 38 38 50 50 50 58 58 58 58 58 58
46500- 54 54 54 42 42 42 30 30 30 18 18 18
46501- 10 10 10 0 0 0 0 0 0 0 0 0
46502- 0 0 0 0 0 0 0 0 0 0 0 0
46503- 0 0 0 0 0 0 0 0 0 0 0 0
46504- 0 0 0 0 0 0 0 0 0 0 0 0
46505- 0 0 0 0 0 0 0 0 0 0 0 0
46506- 0 0 0 0 0 0 0 0 0 0 0 0
46507- 0 0 0 0 0 0 0 0 0 0 0 0
46508- 0 0 0 0 0 0 0 0 0 0 0 0
46509- 0 0 0 0 0 0 0 0 0 0 0 0
46510- 0 0 0 0 0 0 0 0 0 0 0 0
46511- 0 0 0 0 0 0 0 0 0 6 6 6
46512- 6 6 6 10 10 10 14 14 14 18 18 18
46513- 18 18 18 14 14 14 10 10 10 6 6 6
46514- 0 0 0 0 0 0 0 0 0 0 0 0
46515- 0 0 0 0 0 0 0 0 0 0 0 0
46516- 0 0 0 0 0 0 0 0 0 0 0 0
46517- 0 0 0 0 0 0 0 0 0 0 0 0
46518- 0 0 0 0 0 0 0 0 0 6 6 6
46519- 14 14 14 18 18 18 22 22 22 22 22 22
46520- 18 18 18 14 14 14 10 10 10 6 6 6
46521- 0 0 0 0 0 0 0 0 0 0 0 0
46522- 0 0 0 0 0 0 0 0 0 0 0 0
46523- 0 0 0 0 0 0 0 0 0 0 0 0
46524- 0 0 0 0 0 0 0 0 0 0 0 0
46525- 0 0 0 0 0 0 0 0 0 0 0 0
46526+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46527+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46528+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46529+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46530+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46531+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46532+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46533+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46534+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46535+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46536+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46537+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46538+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46539+4 4 4 4 4 4
46540+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46541+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46542+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46543+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46544+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46545+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46546+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46547+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46548+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46549+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46550+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46551+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46552+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46553+4 4 4 4 4 4
46554+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46555+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46556+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46557+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46558+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46559+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46560+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46561+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46562+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46563+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46564+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46565+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46566+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46567+4 4 4 4 4 4
46568+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46569+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46570+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46571+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46572+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46573+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46574+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46575+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46576+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46577+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46578+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46579+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46580+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46581+4 4 4 4 4 4
46582+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46583+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46584+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46585+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46586+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46587+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46588+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46589+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46590+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46591+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46592+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46593+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46594+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46595+4 4 4 4 4 4
46596+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46597+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46598+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46599+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46600+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46601+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46602+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46603+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46604+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46605+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46606+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46607+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46608+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46609+4 4 4 4 4 4
46610+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46611+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46612+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46613+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46614+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
46615+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
46616+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46617+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46618+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46619+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
46620+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
46621+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
46622+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46623+4 4 4 4 4 4
46624+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46625+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46626+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46627+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46628+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
46629+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
46630+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46631+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46632+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46633+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
46634+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
46635+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
46636+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46637+4 4 4 4 4 4
46638+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46639+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46640+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46641+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46642+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
46643+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
46644+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
46645+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46646+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46647+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
46648+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
46649+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
46650+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
46651+4 4 4 4 4 4
46652+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46653+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46654+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46655+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
46656+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
46657+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
46658+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
46659+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46660+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
46661+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
46662+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
46663+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
46664+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
46665+4 4 4 4 4 4
46666+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46667+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46668+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46669+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
46670+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
46671+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
46672+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
46673+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
46674+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
46675+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
46676+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
46677+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
46678+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
46679+4 4 4 4 4 4
46680+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46681+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46682+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
46683+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
46684+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
46685+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
46686+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
46687+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
46688+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
46689+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
46690+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
46691+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
46692+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
46693+4 4 4 4 4 4
46694+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46695+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46696+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
46697+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
46698+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
46699+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
46700+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
46701+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
46702+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
46703+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
46704+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
46705+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
46706+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
46707+4 4 4 4 4 4
46708+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46709+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46710+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
46711+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
46712+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
46713+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
46714+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
46715+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
46716+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
46717+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
46718+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
46719+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
46720+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
46721+4 4 4 4 4 4
46722+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46723+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46724+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
46725+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
46726+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
46727+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
46728+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
46729+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
46730+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
46731+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
46732+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
46733+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
46734+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
46735+4 4 4 4 4 4
46736+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46737+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46738+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
46739+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
46740+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
46741+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
46742+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
46743+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
46744+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
46745+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
46746+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
46747+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
46748+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
46749+4 4 4 4 4 4
46750+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46751+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
46752+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
46753+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
46754+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
46755+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
46756+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
46757+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
46758+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
46759+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
46760+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
46761+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
46762+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
46763+4 4 4 4 4 4
46764+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46765+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
46766+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
46767+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
46768+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
46769+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
46770+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
46771+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
46772+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
46773+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
46774+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
46775+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
46776+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
46777+0 0 0 4 4 4
46778+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
46779+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
46780+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
46781+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
46782+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
46783+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
46784+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
46785+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
46786+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
46787+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
46788+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
46789+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
46790+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
46791+2 0 0 0 0 0
46792+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
46793+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
46794+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
46795+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
46796+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
46797+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
46798+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
46799+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
46800+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
46801+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
46802+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
46803+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
46804+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
46805+37 38 37 0 0 0
46806+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
46807+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
46808+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
46809+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
46810+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
46811+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
46812+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
46813+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
46814+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
46815+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
46816+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
46817+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
46818+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
46819+85 115 134 4 0 0
46820+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
46821+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
46822+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
46823+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
46824+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
46825+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
46826+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
46827+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
46828+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
46829+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
46830+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
46831+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
46832+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
46833+60 73 81 4 0 0
46834+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
46835+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
46836+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
46837+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
46838+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
46839+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
46840+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
46841+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
46842+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
46843+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
46844+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
46845+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
46846+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
46847+16 19 21 4 0 0
46848+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
46849+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
46850+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
46851+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
46852+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
46853+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
46854+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
46855+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
46856+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
46857+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
46858+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
46859+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
46860+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
46861+4 0 0 4 3 3
46862+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
46863+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
46864+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
46865+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
46866+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
46867+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
46868+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
46869+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
46870+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
46871+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
46872+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
46873+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
46874+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
46875+3 2 2 4 4 4
46876+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
46877+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
46878+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
46879+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
46880+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
46881+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
46882+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
46883+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
46884+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
46885+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
46886+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
46887+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
46888+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
46889+4 4 4 4 4 4
46890+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
46891+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
46892+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
46893+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
46894+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
46895+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
46896+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
46897+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
46898+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
46899+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
46900+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
46901+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
46902+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
46903+4 4 4 4 4 4
46904+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
46905+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
46906+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
46907+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
46908+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
46909+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
46910+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
46911+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
46912+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
46913+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
46914+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
46915+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
46916+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
46917+5 5 5 5 5 5
46918+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
46919+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
46920+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
46921+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
46922+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
46923+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
46924+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
46925+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
46926+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
46927+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
46928+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
46929+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
46930+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
46931+5 5 5 4 4 4
46932+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
46933+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
46934+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
46935+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
46936+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
46937+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
46938+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
46939+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
46940+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
46941+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
46942+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
46943+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
46944+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46945+4 4 4 4 4 4
46946+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
46947+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
46948+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
46949+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
46950+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
46951+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
46952+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
46953+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
46954+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
46955+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
46956+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
46957+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
46958+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46959+4 4 4 4 4 4
46960+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
46961+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
46962+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
46963+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
46964+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
46965+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
46966+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
46967+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
46968+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
46969+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
46970+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
46971+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46972+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46973+4 4 4 4 4 4
46974+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
46975+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
46976+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
46977+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
46978+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
46979+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
46980+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
46981+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
46982+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
46983+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
46984+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
46985+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46986+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46987+4 4 4 4 4 4
46988+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
46989+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
46990+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
46991+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
46992+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
46993+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
46994+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
46995+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
46996+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
46997+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
46998+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46999+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47000+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47001+4 4 4 4 4 4
47002+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
47003+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
47004+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
47005+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
47006+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
47007+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
47008+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
47009+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
47010+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
47011+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
47012+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
47013+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47014+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47015+4 4 4 4 4 4
47016+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
47017+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
47018+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
47019+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
47020+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
47021+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
47022+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
47023+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
47024+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
47025+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
47026+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
47027+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47028+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47029+4 4 4 4 4 4
47030+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
47031+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
47032+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
47033+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
47034+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
47035+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
47036+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
47037+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
47038+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
47039+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
47040+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47041+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47042+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47043+4 4 4 4 4 4
47044+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
47045+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
47046+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
47047+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
47048+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
47049+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
47050+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
47051+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
47052+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
47053+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
47054+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47055+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47056+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47057+4 4 4 4 4 4
47058+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
47059+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
47060+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
47061+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
47062+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
47063+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
47064+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
47065+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
47066+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
47067+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
47068+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47069+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47070+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47071+4 4 4 4 4 4
47072+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
47073+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
47074+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
47075+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
47076+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
47077+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
47078+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
47079+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
47080+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
47081+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47082+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47083+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47084+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47085+4 4 4 4 4 4
47086+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
47087+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
47088+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
47089+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
47090+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
47091+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
47092+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
47093+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
47094+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
47095+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47096+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47097+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47098+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47099+4 4 4 4 4 4
47100+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
47101+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
47102+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
47103+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
47104+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
47105+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
47106+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
47107+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
47108+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
47109+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47110+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47111+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47112+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47113+4 4 4 4 4 4
47114+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
47115+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
47116+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
47117+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
47118+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
47119+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
47120+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
47121+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
47122+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
47123+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47124+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47125+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47126+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47127+4 4 4 4 4 4
47128+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
47129+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
47130+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
47131+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
47132+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
47133+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
47134+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
47135+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
47136+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
47137+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47138+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47139+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47140+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47141+4 4 4 4 4 4
47142+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
47143+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
47144+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
47145+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
47146+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
47147+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
47148+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
47149+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
47150+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
47151+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47152+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47153+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47154+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47155+4 4 4 4 4 4
47156+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
47157+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
47158+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
47159+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
47160+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
47161+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
47162+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
47163+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
47164+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
47165+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47166+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47167+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47168+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47169+4 4 4 4 4 4
47170+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
47171+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
47172+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
47173+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
47174+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
47175+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
47176+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
47177+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
47178+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
47179+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47180+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47181+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47182+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47183+4 4 4 4 4 4
47184+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
47185+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
47186+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
47187+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
47188+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
47189+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
47190+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
47191+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
47192+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
47193+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47194+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47195+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47196+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47197+4 4 4 4 4 4
47198+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
47199+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
47200+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
47201+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
47202+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
47203+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
47204+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
47205+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
47206+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
47207+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47208+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47209+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47210+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47211+4 4 4 4 4 4
47212+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
47213+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
47214+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
47215+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
47216+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
47217+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
47218+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
47219+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
47220+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
47221+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47222+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47223+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47224+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47225+4 4 4 4 4 4
47226+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
47227+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
47228+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
47229+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
47230+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
47231+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
47232+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
47233+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
47234+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
47235+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47236+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47237+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47238+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47239+4 4 4 4 4 4
47240+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
47241+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
47242+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
47243+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
47244+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
47245+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
47246+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
47247+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
47248+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
47249+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47250+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47251+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47252+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47253+4 4 4 4 4 4
47254+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
47255+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
47256+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
47257+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
47258+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
47259+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
47260+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
47261+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
47262+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
47263+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47264+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47265+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47266+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47267+4 4 4 4 4 4
47268+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
47269+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
47270+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
47271+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
47272+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
47273+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
47274+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
47275+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
47276+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
47277+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47278+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47279+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47280+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47281+4 4 4 4 4 4
47282+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
47283+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
47284+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
47285+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
47286+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
47287+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
47288+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
47289+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
47290+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
47291+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
47292+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47293+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47294+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47295+4 4 4 4 4 4
47296+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
47297+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
47298+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
47299+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
47300+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
47301+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
47302+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
47303+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
47304+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
47305+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
47306+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47307+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47308+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47309+4 4 4 4 4 4
47310+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
47311+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
47312+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
47313+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
47314+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
47315+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
47316+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47317+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
47318+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
47319+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
47320+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
47321+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47322+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47323+4 4 4 4 4 4
47324+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
47325+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
47326+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
47327+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
47328+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
47329+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
47330+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
47331+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
47332+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
47333+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
47334+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47335+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47336+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47337+4 4 4 4 4 4
47338+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
47339+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
47340+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
47341+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
47342+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
47343+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
47344+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
47345+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
47346+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
47347+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
47348+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47349+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47350+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47351+4 4 4 4 4 4
47352+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
47353+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
47354+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
47355+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
47356+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
47357+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
47358+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
47359+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
47360+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
47361+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
47362+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47363+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47364+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47365+4 4 4 4 4 4
47366+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
47367+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
47368+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
47369+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
47370+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
47371+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
47372+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
47373+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
47374+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
47375+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
47376+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47377+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47378+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47379+4 4 4 4 4 4
47380+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
47381+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
47382+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
47383+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
47384+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
47385+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
47386+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
47387+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
47388+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
47389+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
47390+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47391+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47392+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47393+4 4 4 4 4 4
47394+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
47395+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
47396+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
47397+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
47398+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
47399+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
47400+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
47401+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
47402+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
47403+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47404+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47405+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47406+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47407+4 4 4 4 4 4
47408+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
47409+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
47410+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
47411+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
47412+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
47413+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
47414+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
47415+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
47416+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
47417+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47418+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47419+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47420+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47421+4 4 4 4 4 4
47422+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
47423+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
47424+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
47425+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
47426+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
47427+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
47428+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
47429+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
47430+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47431+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47432+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47433+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47434+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47435+4 4 4 4 4 4
47436+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
47437+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
47438+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
47439+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
47440+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
47441+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
47442+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
47443+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
47444+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47445+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47446+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47447+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47448+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47449+4 4 4 4 4 4
47450+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
47451+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
47452+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
47453+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
47454+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
47455+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
47456+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
47457+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
47458+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47459+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47460+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47461+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47462+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47463+4 4 4 4 4 4
47464+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
47465+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
47466+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
47467+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
47468+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
47469+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
47470+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
47471+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
47472+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47473+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47474+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47475+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47476+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47477+4 4 4 4 4 4
47478+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47479+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
47480+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
47481+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
47482+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
47483+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
47484+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
47485+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
47486+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47487+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47488+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47489+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47490+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47491+4 4 4 4 4 4
47492+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47493+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
47494+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
47495+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
47496+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
47497+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
47498+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
47499+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
47500+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47501+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47502+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47503+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47504+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47505+4 4 4 4 4 4
47506+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47507+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47508+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
47509+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
47510+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
47511+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
47512+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
47513+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
47514+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47515+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47516+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47517+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47518+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47519+4 4 4 4 4 4
47520+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47521+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47522+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
47523+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
47524+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
47525+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
47526+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
47527+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47528+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47529+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47530+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47531+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47532+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47533+4 4 4 4 4 4
47534+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47535+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47536+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47537+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
47538+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
47539+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
47540+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
47541+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47542+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47543+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47544+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47545+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47546+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47547+4 4 4 4 4 4
47548+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47549+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47550+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47551+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
47552+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
47553+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
47554+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
47555+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47556+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47557+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47558+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47559+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47560+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47561+4 4 4 4 4 4
47562+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47563+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47564+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47565+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
47566+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
47567+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
47568+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
47569+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47570+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47571+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47572+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47573+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47574+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47575+4 4 4 4 4 4
47576+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47577+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47578+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47579+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
47580+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
47581+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
47582+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47583+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47584+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47585+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47586+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47587+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47588+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47589+4 4 4 4 4 4
47590+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47591+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47592+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47593+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47594+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
47595+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
47596+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
47597+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47598+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47599+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47600+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47601+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47602+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47603+4 4 4 4 4 4
47604+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47605+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47606+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47607+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47608+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
47609+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
47610+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47611+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47612+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47613+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47614+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47615+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47616+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47617+4 4 4 4 4 4
47618+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47619+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47620+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47621+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47622+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
47623+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
47624+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47625+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47626+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47627+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47628+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47629+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47630+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47631+4 4 4 4 4 4
47632+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47633+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47634+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47635+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47636+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
47637+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
47638+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47639+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47640+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47641+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47642+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47643+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47644+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47645+4 4 4 4 4 4
47646diff --git a/drivers/video/mb862xx/mb862xxfb_accel.c b/drivers/video/mb862xx/mb862xxfb_accel.c
47647index fe92eed..106e085 100644
47648--- a/drivers/video/mb862xx/mb862xxfb_accel.c
47649+++ b/drivers/video/mb862xx/mb862xxfb_accel.c
47650@@ -312,14 +312,18 @@ void mb862xxfb_init_accel(struct fb_info *info, int xres)
47651 struct mb862xxfb_par *par = info->par;
47652
47653 if (info->var.bits_per_pixel == 32) {
47654- info->fbops->fb_fillrect = cfb_fillrect;
47655- info->fbops->fb_copyarea = cfb_copyarea;
47656- info->fbops->fb_imageblit = cfb_imageblit;
47657+ pax_open_kernel();
47658+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
47659+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
47660+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
47661+ pax_close_kernel();
47662 } else {
47663 outreg(disp, GC_L0EM, 3);
47664- info->fbops->fb_fillrect = mb86290fb_fillrect;
47665- info->fbops->fb_copyarea = mb86290fb_copyarea;
47666- info->fbops->fb_imageblit = mb86290fb_imageblit;
47667+ pax_open_kernel();
47668+ *(void **)&info->fbops->fb_fillrect = mb86290fb_fillrect;
47669+ *(void **)&info->fbops->fb_copyarea = mb86290fb_copyarea;
47670+ *(void **)&info->fbops->fb_imageblit = mb86290fb_imageblit;
47671+ pax_close_kernel();
47672 }
47673 outreg(draw, GDC_REG_DRAW_BASE, 0);
47674 outreg(draw, GDC_REG_MODE_MISC, 0x8000);
47675diff --git a/drivers/video/nvidia/nvidia.c b/drivers/video/nvidia/nvidia.c
47676index ff22871..b129bed 100644
47677--- a/drivers/video/nvidia/nvidia.c
47678+++ b/drivers/video/nvidia/nvidia.c
47679@@ -669,19 +669,23 @@ static int nvidiafb_set_par(struct fb_info *info)
47680 info->fix.line_length = (info->var.xres_virtual *
47681 info->var.bits_per_pixel) >> 3;
47682 if (info->var.accel_flags) {
47683- info->fbops->fb_imageblit = nvidiafb_imageblit;
47684- info->fbops->fb_fillrect = nvidiafb_fillrect;
47685- info->fbops->fb_copyarea = nvidiafb_copyarea;
47686- info->fbops->fb_sync = nvidiafb_sync;
47687+ pax_open_kernel();
47688+ *(void **)&info->fbops->fb_imageblit = nvidiafb_imageblit;
47689+ *(void **)&info->fbops->fb_fillrect = nvidiafb_fillrect;
47690+ *(void **)&info->fbops->fb_copyarea = nvidiafb_copyarea;
47691+ *(void **)&info->fbops->fb_sync = nvidiafb_sync;
47692+ pax_close_kernel();
47693 info->pixmap.scan_align = 4;
47694 info->flags &= ~FBINFO_HWACCEL_DISABLED;
47695 info->flags |= FBINFO_READS_FAST;
47696 NVResetGraphics(info);
47697 } else {
47698- info->fbops->fb_imageblit = cfb_imageblit;
47699- info->fbops->fb_fillrect = cfb_fillrect;
47700- info->fbops->fb_copyarea = cfb_copyarea;
47701- info->fbops->fb_sync = NULL;
47702+ pax_open_kernel();
47703+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
47704+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
47705+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
47706+ *(void **)&info->fbops->fb_sync = NULL;
47707+ pax_close_kernel();
47708 info->pixmap.scan_align = 1;
47709 info->flags |= FBINFO_HWACCEL_DISABLED;
47710 info->flags &= ~FBINFO_READS_FAST;
47711@@ -1173,8 +1177,11 @@ static int nvidia_set_fbinfo(struct fb_info *info)
47712 info->pixmap.size = 8 * 1024;
47713 info->pixmap.flags = FB_PIXMAP_SYSTEM;
47714
47715- if (!hwcur)
47716- info->fbops->fb_cursor = NULL;
47717+ if (!hwcur) {
47718+ pax_open_kernel();
47719+ *(void **)&info->fbops->fb_cursor = NULL;
47720+ pax_close_kernel();
47721+ }
47722
47723 info->var.accel_flags = (!noaccel);
47724
47725diff --git a/drivers/video/s1d13xxxfb.c b/drivers/video/s1d13xxxfb.c
47726index 76d9053..dec2bfd 100644
47727--- a/drivers/video/s1d13xxxfb.c
47728+++ b/drivers/video/s1d13xxxfb.c
47729@@ -881,8 +881,10 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
47730
47731 switch(prod_id) {
47732 case S1D13506_PROD_ID: /* activate acceleration */
47733- s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
47734- s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
47735+ pax_open_kernel();
47736+ *(void **)&s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
47737+ *(void **)&s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
47738+ pax_close_kernel();
47739 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN |
47740 FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_COPYAREA;
47741 break;
47742diff --git a/drivers/video/smscufx.c b/drivers/video/smscufx.c
47743index 97bd662..39fab85 100644
47744--- a/drivers/video/smscufx.c
47745+++ b/drivers/video/smscufx.c
47746@@ -1171,7 +1171,9 @@ static int ufx_ops_release(struct fb_info *info, int user)
47747 fb_deferred_io_cleanup(info);
47748 kfree(info->fbdefio);
47749 info->fbdefio = NULL;
47750- info->fbops->fb_mmap = ufx_ops_mmap;
47751+ pax_open_kernel();
47752+ *(void **)&info->fbops->fb_mmap = ufx_ops_mmap;
47753+ pax_close_kernel();
47754 }
47755
47756 pr_debug("released /dev/fb%d user=%d count=%d",
47757diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
47758index 86d449e..8e04dc5 100644
47759--- a/drivers/video/udlfb.c
47760+++ b/drivers/video/udlfb.c
47761@@ -619,11 +619,11 @@ int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
47762 dlfb_urb_completion(urb);
47763
47764 error:
47765- atomic_add(bytes_sent, &dev->bytes_sent);
47766- atomic_add(bytes_identical, &dev->bytes_identical);
47767- atomic_add(width*height*2, &dev->bytes_rendered);
47768+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
47769+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
47770+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
47771 end_cycles = get_cycles();
47772- atomic_add(((unsigned int) ((end_cycles - start_cycles)
47773+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
47774 >> 10)), /* Kcycles */
47775 &dev->cpu_kcycles_used);
47776
47777@@ -744,11 +744,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
47778 dlfb_urb_completion(urb);
47779
47780 error:
47781- atomic_add(bytes_sent, &dev->bytes_sent);
47782- atomic_add(bytes_identical, &dev->bytes_identical);
47783- atomic_add(bytes_rendered, &dev->bytes_rendered);
47784+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
47785+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
47786+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
47787 end_cycles = get_cycles();
47788- atomic_add(((unsigned int) ((end_cycles - start_cycles)
47789+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
47790 >> 10)), /* Kcycles */
47791 &dev->cpu_kcycles_used);
47792 }
47793@@ -989,7 +989,9 @@ static int dlfb_ops_release(struct fb_info *info, int user)
47794 fb_deferred_io_cleanup(info);
47795 kfree(info->fbdefio);
47796 info->fbdefio = NULL;
47797- info->fbops->fb_mmap = dlfb_ops_mmap;
47798+ pax_open_kernel();
47799+ *(void **)&info->fbops->fb_mmap = dlfb_ops_mmap;
47800+ pax_close_kernel();
47801 }
47802
47803 pr_warn("released /dev/fb%d user=%d count=%d\n",
47804@@ -1372,7 +1374,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
47805 struct fb_info *fb_info = dev_get_drvdata(fbdev);
47806 struct dlfb_data *dev = fb_info->par;
47807 return snprintf(buf, PAGE_SIZE, "%u\n",
47808- atomic_read(&dev->bytes_rendered));
47809+ atomic_read_unchecked(&dev->bytes_rendered));
47810 }
47811
47812 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
47813@@ -1380,7 +1382,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
47814 struct fb_info *fb_info = dev_get_drvdata(fbdev);
47815 struct dlfb_data *dev = fb_info->par;
47816 return snprintf(buf, PAGE_SIZE, "%u\n",
47817- atomic_read(&dev->bytes_identical));
47818+ atomic_read_unchecked(&dev->bytes_identical));
47819 }
47820
47821 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
47822@@ -1388,7 +1390,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
47823 struct fb_info *fb_info = dev_get_drvdata(fbdev);
47824 struct dlfb_data *dev = fb_info->par;
47825 return snprintf(buf, PAGE_SIZE, "%u\n",
47826- atomic_read(&dev->bytes_sent));
47827+ atomic_read_unchecked(&dev->bytes_sent));
47828 }
47829
47830 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
47831@@ -1396,7 +1398,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
47832 struct fb_info *fb_info = dev_get_drvdata(fbdev);
47833 struct dlfb_data *dev = fb_info->par;
47834 return snprintf(buf, PAGE_SIZE, "%u\n",
47835- atomic_read(&dev->cpu_kcycles_used));
47836+ atomic_read_unchecked(&dev->cpu_kcycles_used));
47837 }
47838
47839 static ssize_t edid_show(
47840@@ -1456,10 +1458,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
47841 struct fb_info *fb_info = dev_get_drvdata(fbdev);
47842 struct dlfb_data *dev = fb_info->par;
47843
47844- atomic_set(&dev->bytes_rendered, 0);
47845- atomic_set(&dev->bytes_identical, 0);
47846- atomic_set(&dev->bytes_sent, 0);
47847- atomic_set(&dev->cpu_kcycles_used, 0);
47848+ atomic_set_unchecked(&dev->bytes_rendered, 0);
47849+ atomic_set_unchecked(&dev->bytes_identical, 0);
47850+ atomic_set_unchecked(&dev->bytes_sent, 0);
47851+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
47852
47853 return count;
47854 }
47855diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
47856index b75db01..ad2f34a 100644
47857--- a/drivers/video/uvesafb.c
47858+++ b/drivers/video/uvesafb.c
47859@@ -19,6 +19,7 @@
47860 #include <linux/io.h>
47861 #include <linux/mutex.h>
47862 #include <linux/slab.h>
47863+#include <linux/moduleloader.h>
47864 #include <video/edid.h>
47865 #include <video/uvesafb.h>
47866 #ifdef CONFIG_X86
47867@@ -569,10 +570,32 @@ static int uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
47868 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
47869 par->pmi_setpal = par->ypan = 0;
47870 } else {
47871+
47872+#ifdef CONFIG_PAX_KERNEXEC
47873+#ifdef CONFIG_MODULES
47874+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
47875+#endif
47876+ if (!par->pmi_code) {
47877+ par->pmi_setpal = par->ypan = 0;
47878+ return 0;
47879+ }
47880+#endif
47881+
47882 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
47883 + task->t.regs.edi);
47884+
47885+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
47886+ pax_open_kernel();
47887+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
47888+ pax_close_kernel();
47889+
47890+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
47891+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
47892+#else
47893 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
47894 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
47895+#endif
47896+
47897 printk(KERN_INFO "uvesafb: protected mode interface info at "
47898 "%04x:%04x\n",
47899 (u16)task->t.regs.es, (u16)task->t.regs.edi);
47900@@ -817,13 +840,14 @@ static int uvesafb_vbe_init(struct fb_info *info)
47901 par->ypan = ypan;
47902
47903 if (par->pmi_setpal || par->ypan) {
47904+#if !defined(CONFIG_MODULES) || !defined(CONFIG_PAX_KERNEXEC)
47905 if (__supported_pte_mask & _PAGE_NX) {
47906 par->pmi_setpal = par->ypan = 0;
47907 printk(KERN_WARNING "uvesafb: NX protection is actively."
47908 "We have better not to use the PMI.\n");
47909- } else {
47910+ } else
47911+#endif
47912 uvesafb_vbe_getpmi(task, par);
47913- }
47914 }
47915 #else
47916 /* The protected mode interface is not available on non-x86. */
47917@@ -1457,8 +1481,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
47918 info->fix.ywrapstep = (par->ypan > 1) ? 1 : 0;
47919
47920 /* Disable blanking if the user requested so. */
47921- if (!blank)
47922- info->fbops->fb_blank = NULL;
47923+ if (!blank) {
47924+ pax_open_kernel();
47925+ *(void **)&info->fbops->fb_blank = NULL;
47926+ pax_close_kernel();
47927+ }
47928
47929 /*
47930 * Find out how much IO memory is required for the mode with
47931@@ -1534,8 +1561,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
47932 info->flags = FBINFO_FLAG_DEFAULT |
47933 (par->ypan ? FBINFO_HWACCEL_YPAN : 0);
47934
47935- if (!par->ypan)
47936- info->fbops->fb_pan_display = NULL;
47937+ if (!par->ypan) {
47938+ pax_open_kernel();
47939+ *(void **)&info->fbops->fb_pan_display = NULL;
47940+ pax_close_kernel();
47941+ }
47942 }
47943
47944 static void uvesafb_init_mtrr(struct fb_info *info)
47945@@ -1836,6 +1866,11 @@ out:
47946 if (par->vbe_modes)
47947 kfree(par->vbe_modes);
47948
47949+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
47950+ if (par->pmi_code)
47951+ module_free_exec(NULL, par->pmi_code);
47952+#endif
47953+
47954 framebuffer_release(info);
47955 return err;
47956 }
47957@@ -1862,6 +1897,12 @@ static int uvesafb_remove(struct platform_device *dev)
47958 kfree(par->vbe_state_orig);
47959 if (par->vbe_state_saved)
47960 kfree(par->vbe_state_saved);
47961+
47962+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
47963+ if (par->pmi_code)
47964+ module_free_exec(NULL, par->pmi_code);
47965+#endif
47966+
47967 }
47968
47969 framebuffer_release(info);
47970diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
47971index 501b340..d80aa17 100644
47972--- a/drivers/video/vesafb.c
47973+++ b/drivers/video/vesafb.c
47974@@ -9,6 +9,7 @@
47975 */
47976
47977 #include <linux/module.h>
47978+#include <linux/moduleloader.h>
47979 #include <linux/kernel.h>
47980 #include <linux/errno.h>
47981 #include <linux/string.h>
47982@@ -52,8 +53,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
47983 static int vram_total __initdata; /* Set total amount of memory */
47984 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
47985 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
47986-static void (*pmi_start)(void) __read_mostly;
47987-static void (*pmi_pal) (void) __read_mostly;
47988+static void (*pmi_start)(void) __read_only;
47989+static void (*pmi_pal) (void) __read_only;
47990 static int depth __read_mostly;
47991 static int vga_compat __read_mostly;
47992 /* --------------------------------------------------------------------- */
47993@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
47994 unsigned int size_vmode;
47995 unsigned int size_remap;
47996 unsigned int size_total;
47997+ void *pmi_code = NULL;
47998
47999 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
48000 return -ENODEV;
48001@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
48002 size_remap = size_total;
48003 vesafb_fix.smem_len = size_remap;
48004
48005-#ifndef __i386__
48006- screen_info.vesapm_seg = 0;
48007-#endif
48008-
48009 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
48010 printk(KERN_WARNING
48011 "vesafb: cannot reserve video memory at 0x%lx\n",
48012@@ -307,9 +305,21 @@ static int __init vesafb_probe(struct platform_device *dev)
48013 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
48014 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
48015
48016+#ifdef __i386__
48017+
48018+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
48019+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
48020+ if (!pmi_code)
48021+#elif !defined(CONFIG_PAX_KERNEXEC)
48022+ if (0)
48023+#endif
48024+
48025+#endif
48026+ screen_info.vesapm_seg = 0;
48027+
48028 if (screen_info.vesapm_seg) {
48029- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
48030- screen_info.vesapm_seg,screen_info.vesapm_off);
48031+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
48032+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
48033 }
48034
48035 if (screen_info.vesapm_seg < 0xc000)
48036@@ -317,9 +327,25 @@ static int __init vesafb_probe(struct platform_device *dev)
48037
48038 if (ypan || pmi_setpal) {
48039 unsigned short *pmi_base;
48040+
48041 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
48042- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
48043- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
48044+
48045+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
48046+ pax_open_kernel();
48047+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
48048+#else
48049+ pmi_code = pmi_base;
48050+#endif
48051+
48052+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
48053+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
48054+
48055+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
48056+ pmi_start = ktva_ktla(pmi_start);
48057+ pmi_pal = ktva_ktla(pmi_pal);
48058+ pax_close_kernel();
48059+#endif
48060+
48061 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
48062 if (pmi_base[3]) {
48063 printk(KERN_INFO "vesafb: pmi: ports = ");
48064@@ -472,8 +498,11 @@ static int __init vesafb_probe(struct platform_device *dev)
48065 info->flags = FBINFO_FLAG_DEFAULT | FBINFO_MISC_FIRMWARE |
48066 (ypan ? FBINFO_HWACCEL_YPAN : 0);
48067
48068- if (!ypan)
48069- info->fbops->fb_pan_display = NULL;
48070+ if (!ypan) {
48071+ pax_open_kernel();
48072+ *(void **)&info->fbops->fb_pan_display = NULL;
48073+ pax_close_kernel();
48074+ }
48075
48076 if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
48077 err = -ENOMEM;
48078@@ -488,6 +517,11 @@ static int __init vesafb_probe(struct platform_device *dev)
48079 info->node, info->fix.id);
48080 return 0;
48081 err:
48082+
48083+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
48084+ module_free_exec(NULL, pmi_code);
48085+#endif
48086+
48087 if (info->screen_base)
48088 iounmap(info->screen_base);
48089 framebuffer_release(info);
48090diff --git a/drivers/video/via/via_clock.h b/drivers/video/via/via_clock.h
48091index 88714ae..16c2e11 100644
48092--- a/drivers/video/via/via_clock.h
48093+++ b/drivers/video/via/via_clock.h
48094@@ -56,7 +56,7 @@ struct via_clock {
48095
48096 void (*set_engine_pll_state)(u8 state);
48097 void (*set_engine_pll)(struct via_pll_config config);
48098-};
48099+} __no_const;
48100
48101
48102 static inline u32 get_pll_internal_frequency(u32 ref_freq,
48103diff --git a/drivers/xen/xenfs/xenstored.c b/drivers/xen/xenfs/xenstored.c
48104index fef20db..d28b1ab 100644
48105--- a/drivers/xen/xenfs/xenstored.c
48106+++ b/drivers/xen/xenfs/xenstored.c
48107@@ -24,7 +24,12 @@ static int xsd_release(struct inode *inode, struct file *file)
48108 static int xsd_kva_open(struct inode *inode, struct file *file)
48109 {
48110 file->private_data = (void *)kasprintf(GFP_KERNEL, "0x%p",
48111+#ifdef CONFIG_GRKERNSEC_HIDESYM
48112+ NULL);
48113+#else
48114 xen_store_interface);
48115+#endif
48116+
48117 if (!file->private_data)
48118 return -ENOMEM;
48119 return 0;
48120diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
48121index 890bed5..17ae73e 100644
48122--- a/fs/9p/vfs_inode.c
48123+++ b/fs/9p/vfs_inode.c
48124@@ -1329,7 +1329,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
48125 void
48126 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
48127 {
48128- char *s = nd_get_link(nd);
48129+ const char *s = nd_get_link(nd);
48130
48131 p9_debug(P9_DEBUG_VFS, " %s %s\n",
48132 dentry->d_name.name, IS_ERR(s) ? "<error>" : s);
48133diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
48134index 0efd152..b5802ad 100644
48135--- a/fs/Kconfig.binfmt
48136+++ b/fs/Kconfig.binfmt
48137@@ -89,7 +89,7 @@ config HAVE_AOUT
48138
48139 config BINFMT_AOUT
48140 tristate "Kernel support for a.out and ECOFF binaries"
48141- depends on HAVE_AOUT
48142+ depends on HAVE_AOUT && BROKEN
48143 ---help---
48144 A.out (Assembler.OUTput) is a set of formats for libraries and
48145 executables used in the earliest versions of UNIX. Linux used
48146diff --git a/fs/aio.c b/fs/aio.c
48147index 71f613c..9d01f1f 100644
48148--- a/fs/aio.c
48149+++ b/fs/aio.c
48150@@ -111,7 +111,7 @@ static int aio_setup_ring(struct kioctx *ctx)
48151 size += sizeof(struct io_event) * nr_events;
48152 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
48153
48154- if (nr_pages < 0)
48155+ if (nr_pages <= 0)
48156 return -EINVAL;
48157
48158 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
48159@@ -1373,18 +1373,19 @@ static ssize_t aio_fsync(struct kiocb *iocb)
48160 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
48161 {
48162 ssize_t ret;
48163+ struct iovec iovstack;
48164
48165 #ifdef CONFIG_COMPAT
48166 if (compat)
48167 ret = compat_rw_copy_check_uvector(type,
48168 (struct compat_iovec __user *)kiocb->ki_buf,
48169- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
48170+ kiocb->ki_nbytes, 1, &iovstack,
48171 &kiocb->ki_iovec);
48172 else
48173 #endif
48174 ret = rw_copy_check_uvector(type,
48175 (struct iovec __user *)kiocb->ki_buf,
48176- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
48177+ kiocb->ki_nbytes, 1, &iovstack,
48178 &kiocb->ki_iovec);
48179 if (ret < 0)
48180 goto out;
48181@@ -1393,6 +1394,10 @@ static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
48182 if (ret < 0)
48183 goto out;
48184
48185+ if (kiocb->ki_iovec == &iovstack) {
48186+ kiocb->ki_inline_vec = iovstack;
48187+ kiocb->ki_iovec = &kiocb->ki_inline_vec;
48188+ }
48189 kiocb->ki_nr_segs = kiocb->ki_nbytes;
48190 kiocb->ki_cur_seg = 0;
48191 /* ki_nbytes/left now reflect bytes instead of segs */
48192diff --git a/fs/attr.c b/fs/attr.c
48193index 1449adb..a2038c2 100644
48194--- a/fs/attr.c
48195+++ b/fs/attr.c
48196@@ -102,6 +102,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
48197 unsigned long limit;
48198
48199 limit = rlimit(RLIMIT_FSIZE);
48200+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
48201 if (limit != RLIM_INFINITY && offset > limit)
48202 goto out_sig;
48203 if (offset > inode->i_sb->s_maxbytes)
48204diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
48205index 03bc1d3..6205356 100644
48206--- a/fs/autofs4/waitq.c
48207+++ b/fs/autofs4/waitq.c
48208@@ -61,7 +61,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
48209 {
48210 unsigned long sigpipe, flags;
48211 mm_segment_t fs;
48212- const char *data = (const char *)addr;
48213+ const char __user *data = (const char __force_user *)addr;
48214 ssize_t wr = 0;
48215
48216 sigpipe = sigismember(&current->pending.signal, SIGPIPE);
48217@@ -348,6 +348,10 @@ static int validate_request(struct autofs_wait_queue **wait,
48218 return 1;
48219 }
48220
48221+#ifdef CONFIG_GRKERNSEC_HIDESYM
48222+static atomic_unchecked_t autofs_dummy_name_id = ATOMIC_INIT(0);
48223+#endif
48224+
48225 int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
48226 enum autofs_notify notify)
48227 {
48228@@ -381,7 +385,12 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
48229
48230 /* If this is a direct mount request create a dummy name */
48231 if (IS_ROOT(dentry) && autofs_type_trigger(sbi->type))
48232+#ifdef CONFIG_GRKERNSEC_HIDESYM
48233+ /* this name does get written to userland via autofs4_write() */
48234+ qstr.len = sprintf(name, "%08x", atomic_inc_return_unchecked(&autofs_dummy_name_id));
48235+#else
48236 qstr.len = sprintf(name, "%p", dentry);
48237+#endif
48238 else {
48239 qstr.len = autofs4_getpath(sbi, dentry, &name);
48240 if (!qstr.len) {
48241diff --git a/fs/befs/endian.h b/fs/befs/endian.h
48242index 2722387..c8dd2a7 100644
48243--- a/fs/befs/endian.h
48244+++ b/fs/befs/endian.h
48245@@ -11,7 +11,7 @@
48246
48247 #include <asm/byteorder.h>
48248
48249-static inline u64
48250+static inline u64 __intentional_overflow(-1)
48251 fs64_to_cpu(const struct super_block *sb, fs64 n)
48252 {
48253 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
48254@@ -29,7 +29,7 @@ cpu_to_fs64(const struct super_block *sb, u64 n)
48255 return (__force fs64)cpu_to_be64(n);
48256 }
48257
48258-static inline u32
48259+static inline u32 __intentional_overflow(-1)
48260 fs32_to_cpu(const struct super_block *sb, fs32 n)
48261 {
48262 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
48263diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
48264index 2b3bda8..6a2d4be 100644
48265--- a/fs/befs/linuxvfs.c
48266+++ b/fs/befs/linuxvfs.c
48267@@ -510,7 +510,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
48268 {
48269 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
48270 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
48271- char *link = nd_get_link(nd);
48272+ const char *link = nd_get_link(nd);
48273 if (!IS_ERR(link))
48274 kfree(link);
48275 }
48276diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
48277index 6043567..16a9239 100644
48278--- a/fs/binfmt_aout.c
48279+++ b/fs/binfmt_aout.c
48280@@ -16,6 +16,7 @@
48281 #include <linux/string.h>
48282 #include <linux/fs.h>
48283 #include <linux/file.h>
48284+#include <linux/security.h>
48285 #include <linux/stat.h>
48286 #include <linux/fcntl.h>
48287 #include <linux/ptrace.h>
48288@@ -59,6 +60,8 @@ static int aout_core_dump(struct coredump_params *cprm)
48289 #endif
48290 # define START_STACK(u) ((void __user *)u.start_stack)
48291
48292+ memset(&dump, 0, sizeof(dump));
48293+
48294 fs = get_fs();
48295 set_fs(KERNEL_DS);
48296 has_dumped = 1;
48297@@ -70,10 +73,12 @@ static int aout_core_dump(struct coredump_params *cprm)
48298
48299 /* If the size of the dump file exceeds the rlimit, then see what would happen
48300 if we wrote the stack, but not the data area. */
48301+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
48302 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
48303 dump.u_dsize = 0;
48304
48305 /* Make sure we have enough room to write the stack and data areas. */
48306+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
48307 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
48308 dump.u_ssize = 0;
48309
48310@@ -234,6 +239,8 @@ static int load_aout_binary(struct linux_binprm * bprm)
48311 rlim = rlimit(RLIMIT_DATA);
48312 if (rlim >= RLIM_INFINITY)
48313 rlim = ~0;
48314+
48315+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
48316 if (ex.a_data + ex.a_bss > rlim)
48317 return -ENOMEM;
48318
48319@@ -268,6 +275,27 @@ static int load_aout_binary(struct linux_binprm * bprm)
48320
48321 install_exec_creds(bprm);
48322
48323+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
48324+ current->mm->pax_flags = 0UL;
48325+#endif
48326+
48327+#ifdef CONFIG_PAX_PAGEEXEC
48328+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
48329+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
48330+
48331+#ifdef CONFIG_PAX_EMUTRAMP
48332+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
48333+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
48334+#endif
48335+
48336+#ifdef CONFIG_PAX_MPROTECT
48337+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
48338+ current->mm->pax_flags |= MF_PAX_MPROTECT;
48339+#endif
48340+
48341+ }
48342+#endif
48343+
48344 if (N_MAGIC(ex) == OMAGIC) {
48345 unsigned long text_addr, map_size;
48346 loff_t pos;
48347@@ -333,7 +361,7 @@ static int load_aout_binary(struct linux_binprm * bprm)
48348 }
48349
48350 error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
48351- PROT_READ | PROT_WRITE | PROT_EXEC,
48352+ PROT_READ | PROT_WRITE,
48353 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
48354 fd_offset + ex.a_text);
48355 if (error != N_DATADDR(ex)) {
48356diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
48357index 0c42cdb..b62581e9 100644
48358--- a/fs/binfmt_elf.c
48359+++ b/fs/binfmt_elf.c
48360@@ -33,6 +33,7 @@
48361 #include <linux/elf.h>
48362 #include <linux/utsname.h>
48363 #include <linux/coredump.h>
48364+#include <linux/xattr.h>
48365 #include <asm/uaccess.h>
48366 #include <asm/param.h>
48367 #include <asm/page.h>
48368@@ -59,6 +60,10 @@ static int elf_core_dump(struct coredump_params *cprm);
48369 #define elf_core_dump NULL
48370 #endif
48371
48372+#ifdef CONFIG_PAX_MPROTECT
48373+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
48374+#endif
48375+
48376 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
48377 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
48378 #else
48379@@ -78,6 +83,11 @@ static struct linux_binfmt elf_format = {
48380 .load_binary = load_elf_binary,
48381 .load_shlib = load_elf_library,
48382 .core_dump = elf_core_dump,
48383+
48384+#ifdef CONFIG_PAX_MPROTECT
48385+ .handle_mprotect= elf_handle_mprotect,
48386+#endif
48387+
48388 .min_coredump = ELF_EXEC_PAGESIZE,
48389 };
48390
48391@@ -85,6 +95,8 @@ static struct linux_binfmt elf_format = {
48392
48393 static int set_brk(unsigned long start, unsigned long end)
48394 {
48395+ unsigned long e = end;
48396+
48397 start = ELF_PAGEALIGN(start);
48398 end = ELF_PAGEALIGN(end);
48399 if (end > start) {
48400@@ -93,7 +105,7 @@ static int set_brk(unsigned long start, unsigned long end)
48401 if (BAD_ADDR(addr))
48402 return addr;
48403 }
48404- current->mm->start_brk = current->mm->brk = end;
48405+ current->mm->start_brk = current->mm->brk = e;
48406 return 0;
48407 }
48408
48409@@ -154,12 +166,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
48410 elf_addr_t __user *u_rand_bytes;
48411 const char *k_platform = ELF_PLATFORM;
48412 const char *k_base_platform = ELF_BASE_PLATFORM;
48413- unsigned char k_rand_bytes[16];
48414+ u32 k_rand_bytes[4];
48415 int items;
48416 elf_addr_t *elf_info;
48417 int ei_index = 0;
48418 const struct cred *cred = current_cred();
48419 struct vm_area_struct *vma;
48420+ unsigned long saved_auxv[AT_VECTOR_SIZE];
48421
48422 /*
48423 * In some cases (e.g. Hyper-Threading), we want to avoid L1
48424@@ -201,8 +214,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
48425 * Generate 16 random bytes for userspace PRNG seeding.
48426 */
48427 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
48428- u_rand_bytes = (elf_addr_t __user *)
48429- STACK_ALLOC(p, sizeof(k_rand_bytes));
48430+ srandom32(k_rand_bytes[0] ^ random32());
48431+ srandom32(k_rand_bytes[1] ^ random32());
48432+ srandom32(k_rand_bytes[2] ^ random32());
48433+ srandom32(k_rand_bytes[3] ^ random32());
48434+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
48435+ u_rand_bytes = (elf_addr_t __user *) p;
48436 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
48437 return -EFAULT;
48438
48439@@ -314,9 +331,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
48440 return -EFAULT;
48441 current->mm->env_end = p;
48442
48443+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
48444+
48445 /* Put the elf_info on the stack in the right place. */
48446 sp = (elf_addr_t __user *)envp + 1;
48447- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
48448+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
48449 return -EFAULT;
48450 return 0;
48451 }
48452@@ -380,15 +399,14 @@ static unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
48453 an ELF header */
48454
48455 static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
48456- struct file *interpreter, unsigned long *interp_map_addr,
48457- unsigned long no_base)
48458+ struct file *interpreter, unsigned long no_base)
48459 {
48460 struct elf_phdr *elf_phdata;
48461 struct elf_phdr *eppnt;
48462- unsigned long load_addr = 0;
48463+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
48464 int load_addr_set = 0;
48465 unsigned long last_bss = 0, elf_bss = 0;
48466- unsigned long error = ~0UL;
48467+ unsigned long error = -EINVAL;
48468 unsigned long total_size;
48469 int retval, i, size;
48470
48471@@ -434,6 +452,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
48472 goto out_close;
48473 }
48474
48475+#ifdef CONFIG_PAX_SEGMEXEC
48476+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
48477+ pax_task_size = SEGMEXEC_TASK_SIZE;
48478+#endif
48479+
48480 eppnt = elf_phdata;
48481 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
48482 if (eppnt->p_type == PT_LOAD) {
48483@@ -457,8 +480,6 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
48484 map_addr = elf_map(interpreter, load_addr + vaddr,
48485 eppnt, elf_prot, elf_type, total_size);
48486 total_size = 0;
48487- if (!*interp_map_addr)
48488- *interp_map_addr = map_addr;
48489 error = map_addr;
48490 if (BAD_ADDR(map_addr))
48491 goto out_close;
48492@@ -477,8 +498,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
48493 k = load_addr + eppnt->p_vaddr;
48494 if (BAD_ADDR(k) ||
48495 eppnt->p_filesz > eppnt->p_memsz ||
48496- eppnt->p_memsz > TASK_SIZE ||
48497- TASK_SIZE - eppnt->p_memsz < k) {
48498+ eppnt->p_memsz > pax_task_size ||
48499+ pax_task_size - eppnt->p_memsz < k) {
48500 error = -ENOMEM;
48501 goto out_close;
48502 }
48503@@ -530,6 +551,315 @@ out:
48504 return error;
48505 }
48506
48507+#ifdef CONFIG_PAX_PT_PAX_FLAGS
48508+#ifdef CONFIG_PAX_SOFTMODE
48509+static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
48510+{
48511+ unsigned long pax_flags = 0UL;
48512+
48513+#ifdef CONFIG_PAX_PAGEEXEC
48514+ if (elf_phdata->p_flags & PF_PAGEEXEC)
48515+ pax_flags |= MF_PAX_PAGEEXEC;
48516+#endif
48517+
48518+#ifdef CONFIG_PAX_SEGMEXEC
48519+ if (elf_phdata->p_flags & PF_SEGMEXEC)
48520+ pax_flags |= MF_PAX_SEGMEXEC;
48521+#endif
48522+
48523+#ifdef CONFIG_PAX_EMUTRAMP
48524+ if ((elf_phdata->p_flags & PF_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
48525+ pax_flags |= MF_PAX_EMUTRAMP;
48526+#endif
48527+
48528+#ifdef CONFIG_PAX_MPROTECT
48529+ if (elf_phdata->p_flags & PF_MPROTECT)
48530+ pax_flags |= MF_PAX_MPROTECT;
48531+#endif
48532+
48533+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
48534+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
48535+ pax_flags |= MF_PAX_RANDMMAP;
48536+#endif
48537+
48538+ return pax_flags;
48539+}
48540+#endif
48541+
48542+static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
48543+{
48544+ unsigned long pax_flags = 0UL;
48545+
48546+#ifdef CONFIG_PAX_PAGEEXEC
48547+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
48548+ pax_flags |= MF_PAX_PAGEEXEC;
48549+#endif
48550+
48551+#ifdef CONFIG_PAX_SEGMEXEC
48552+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
48553+ pax_flags |= MF_PAX_SEGMEXEC;
48554+#endif
48555+
48556+#ifdef CONFIG_PAX_EMUTRAMP
48557+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
48558+ pax_flags |= MF_PAX_EMUTRAMP;
48559+#endif
48560+
48561+#ifdef CONFIG_PAX_MPROTECT
48562+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
48563+ pax_flags |= MF_PAX_MPROTECT;
48564+#endif
48565+
48566+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
48567+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
48568+ pax_flags |= MF_PAX_RANDMMAP;
48569+#endif
48570+
48571+ return pax_flags;
48572+}
48573+#endif
48574+
48575+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
48576+#ifdef CONFIG_PAX_SOFTMODE
48577+static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
48578+{
48579+ unsigned long pax_flags = 0UL;
48580+
48581+#ifdef CONFIG_PAX_PAGEEXEC
48582+ if (pax_flags_softmode & MF_PAX_PAGEEXEC)
48583+ pax_flags |= MF_PAX_PAGEEXEC;
48584+#endif
48585+
48586+#ifdef CONFIG_PAX_SEGMEXEC
48587+ if (pax_flags_softmode & MF_PAX_SEGMEXEC)
48588+ pax_flags |= MF_PAX_SEGMEXEC;
48589+#endif
48590+
48591+#ifdef CONFIG_PAX_EMUTRAMP
48592+ if (pax_flags_softmode & MF_PAX_EMUTRAMP)
48593+ pax_flags |= MF_PAX_EMUTRAMP;
48594+#endif
48595+
48596+#ifdef CONFIG_PAX_MPROTECT
48597+ if (pax_flags_softmode & MF_PAX_MPROTECT)
48598+ pax_flags |= MF_PAX_MPROTECT;
48599+#endif
48600+
48601+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
48602+ if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
48603+ pax_flags |= MF_PAX_RANDMMAP;
48604+#endif
48605+
48606+ return pax_flags;
48607+}
48608+#endif
48609+
48610+static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
48611+{
48612+ unsigned long pax_flags = 0UL;
48613+
48614+#ifdef CONFIG_PAX_PAGEEXEC
48615+ if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
48616+ pax_flags |= MF_PAX_PAGEEXEC;
48617+#endif
48618+
48619+#ifdef CONFIG_PAX_SEGMEXEC
48620+ if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
48621+ pax_flags |= MF_PAX_SEGMEXEC;
48622+#endif
48623+
48624+#ifdef CONFIG_PAX_EMUTRAMP
48625+ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
48626+ pax_flags |= MF_PAX_EMUTRAMP;
48627+#endif
48628+
48629+#ifdef CONFIG_PAX_MPROTECT
48630+ if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
48631+ pax_flags |= MF_PAX_MPROTECT;
48632+#endif
48633+
48634+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
48635+ if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
48636+ pax_flags |= MF_PAX_RANDMMAP;
48637+#endif
48638+
48639+ return pax_flags;
48640+}
48641+#endif
48642+
48643+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
48644+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
48645+{
48646+ unsigned long pax_flags = 0UL;
48647+
48648+#ifdef CONFIG_PAX_EI_PAX
48649+
48650+#ifdef CONFIG_PAX_PAGEEXEC
48651+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
48652+ pax_flags |= MF_PAX_PAGEEXEC;
48653+#endif
48654+
48655+#ifdef CONFIG_PAX_SEGMEXEC
48656+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
48657+ pax_flags |= MF_PAX_SEGMEXEC;
48658+#endif
48659+
48660+#ifdef CONFIG_PAX_EMUTRAMP
48661+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
48662+ pax_flags |= MF_PAX_EMUTRAMP;
48663+#endif
48664+
48665+#ifdef CONFIG_PAX_MPROTECT
48666+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
48667+ pax_flags |= MF_PAX_MPROTECT;
48668+#endif
48669+
48670+#ifdef CONFIG_PAX_ASLR
48671+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
48672+ pax_flags |= MF_PAX_RANDMMAP;
48673+#endif
48674+
48675+#else
48676+
48677+#ifdef CONFIG_PAX_PAGEEXEC
48678+ pax_flags |= MF_PAX_PAGEEXEC;
48679+#endif
48680+
48681+#ifdef CONFIG_PAX_SEGMEXEC
48682+ pax_flags |= MF_PAX_SEGMEXEC;
48683+#endif
48684+
48685+#ifdef CONFIG_PAX_MPROTECT
48686+ pax_flags |= MF_PAX_MPROTECT;
48687+#endif
48688+
48689+#ifdef CONFIG_PAX_RANDMMAP
48690+ if (randomize_va_space)
48691+ pax_flags |= MF_PAX_RANDMMAP;
48692+#endif
48693+
48694+#endif
48695+
48696+ return pax_flags;
48697+}
48698+
48699+static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
48700+{
48701+
48702+#ifdef CONFIG_PAX_PT_PAX_FLAGS
48703+ unsigned long i;
48704+
48705+ for (i = 0UL; i < elf_ex->e_phnum; i++)
48706+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
48707+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
48708+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
48709+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
48710+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
48711+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
48712+ return ~0UL;
48713+
48714+#ifdef CONFIG_PAX_SOFTMODE
48715+ if (pax_softmode)
48716+ return pax_parse_pt_pax_softmode(&elf_phdata[i]);
48717+ else
48718+#endif
48719+
48720+ return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
48721+ break;
48722+ }
48723+#endif
48724+
48725+ return ~0UL;
48726+}
48727+
48728+static unsigned long pax_parse_xattr_pax(struct file * const file)
48729+{
48730+
48731+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
48732+ ssize_t xattr_size, i;
48733+ unsigned char xattr_value[5];
48734+ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
48735+
48736+ xattr_size = vfs_getxattr(file->f_path.dentry, XATTR_NAME_PAX_FLAGS, xattr_value, sizeof xattr_value);
48737+ if (xattr_size <= 0 || xattr_size > 5)
48738+ return ~0UL;
48739+
48740+ for (i = 0; i < xattr_size; i++)
48741+ switch (xattr_value[i]) {
48742+ default:
48743+ return ~0UL;
48744+
48745+#define parse_flag(option1, option2, flag) \
48746+ case option1: \
48747+ if (pax_flags_hardmode & MF_PAX_##flag) \
48748+ return ~0UL; \
48749+ pax_flags_hardmode |= MF_PAX_##flag; \
48750+ break; \
48751+ case option2: \
48752+ if (pax_flags_softmode & MF_PAX_##flag) \
48753+ return ~0UL; \
48754+ pax_flags_softmode |= MF_PAX_##flag; \
48755+ break;
48756+
48757+ parse_flag('p', 'P', PAGEEXEC);
48758+ parse_flag('e', 'E', EMUTRAMP);
48759+ parse_flag('m', 'M', MPROTECT);
48760+ parse_flag('r', 'R', RANDMMAP);
48761+ parse_flag('s', 'S', SEGMEXEC);
48762+
48763+#undef parse_flag
48764+ }
48765+
48766+ if (pax_flags_hardmode & pax_flags_softmode)
48767+ return ~0UL;
48768+
48769+#ifdef CONFIG_PAX_SOFTMODE
48770+ if (pax_softmode)
48771+ return pax_parse_xattr_pax_softmode(pax_flags_softmode);
48772+ else
48773+#endif
48774+
48775+ return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
48776+#else
48777+ return ~0UL;
48778+#endif
48779+
48780+}
48781+
48782+static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
48783+{
48784+ unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
48785+
48786+ pax_flags = pax_parse_ei_pax(elf_ex);
48787+ pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
48788+ xattr_pax_flags = pax_parse_xattr_pax(file);
48789+
48790+ if (pt_pax_flags == ~0UL)
48791+ pt_pax_flags = xattr_pax_flags;
48792+ else if (xattr_pax_flags == ~0UL)
48793+ xattr_pax_flags = pt_pax_flags;
48794+ if (pt_pax_flags != xattr_pax_flags)
48795+ return -EINVAL;
48796+ if (pt_pax_flags != ~0UL)
48797+ pax_flags = pt_pax_flags;
48798+
48799+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
48800+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
48801+ if ((__supported_pte_mask & _PAGE_NX))
48802+ pax_flags &= ~MF_PAX_SEGMEXEC;
48803+ else
48804+ pax_flags &= ~MF_PAX_PAGEEXEC;
48805+ }
48806+#endif
48807+
48808+ if (0 > pax_check_flags(&pax_flags))
48809+ return -EINVAL;
48810+
48811+ current->mm->pax_flags = pax_flags;
48812+ return 0;
48813+}
48814+#endif
48815+
48816 /*
48817 * These are the functions used to load ELF style executables and shared
48818 * libraries. There is no binary dependent code anywhere else.
48819@@ -546,6 +876,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
48820 {
48821 unsigned int random_variable = 0;
48822
48823+#ifdef CONFIG_PAX_RANDUSTACK
48824+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
48825+ return stack_top - current->mm->delta_stack;
48826+#endif
48827+
48828 if ((current->flags & PF_RANDOMIZE) &&
48829 !(current->personality & ADDR_NO_RANDOMIZE)) {
48830 random_variable = get_random_int() & STACK_RND_MASK;
48831@@ -564,7 +899,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
48832 unsigned long load_addr = 0, load_bias = 0;
48833 int load_addr_set = 0;
48834 char * elf_interpreter = NULL;
48835- unsigned long error;
48836+ unsigned long error = 0;
48837 struct elf_phdr *elf_ppnt, *elf_phdata;
48838 unsigned long elf_bss, elf_brk;
48839 int retval, i;
48840@@ -574,12 +909,12 @@ static int load_elf_binary(struct linux_binprm *bprm)
48841 unsigned long start_code, end_code, start_data, end_data;
48842 unsigned long reloc_func_desc __maybe_unused = 0;
48843 int executable_stack = EXSTACK_DEFAULT;
48844- unsigned long def_flags = 0;
48845 struct pt_regs *regs = current_pt_regs();
48846 struct {
48847 struct elfhdr elf_ex;
48848 struct elfhdr interp_elf_ex;
48849 } *loc;
48850+ unsigned long pax_task_size = TASK_SIZE;
48851
48852 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
48853 if (!loc) {
48854@@ -715,11 +1050,81 @@ static int load_elf_binary(struct linux_binprm *bprm)
48855 goto out_free_dentry;
48856
48857 /* OK, This is the point of no return */
48858- current->mm->def_flags = def_flags;
48859+
48860+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
48861+ current->mm->pax_flags = 0UL;
48862+#endif
48863+
48864+#ifdef CONFIG_PAX_DLRESOLVE
48865+ current->mm->call_dl_resolve = 0UL;
48866+#endif
48867+
48868+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
48869+ current->mm->call_syscall = 0UL;
48870+#endif
48871+
48872+#ifdef CONFIG_PAX_ASLR
48873+ current->mm->delta_mmap = 0UL;
48874+ current->mm->delta_stack = 0UL;
48875+#endif
48876+
48877+ current->mm->def_flags = 0;
48878+
48879+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
48880+ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
48881+ send_sig(SIGKILL, current, 0);
48882+ goto out_free_dentry;
48883+ }
48884+#endif
48885+
48886+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
48887+ pax_set_initial_flags(bprm);
48888+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
48889+ if (pax_set_initial_flags_func)
48890+ (pax_set_initial_flags_func)(bprm);
48891+#endif
48892+
48893+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
48894+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
48895+ current->mm->context.user_cs_limit = PAGE_SIZE;
48896+ current->mm->def_flags |= VM_PAGEEXEC | VM_NOHUGEPAGE;
48897+ }
48898+#endif
48899+
48900+#ifdef CONFIG_PAX_SEGMEXEC
48901+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
48902+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
48903+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
48904+ pax_task_size = SEGMEXEC_TASK_SIZE;
48905+ current->mm->def_flags |= VM_NOHUGEPAGE;
48906+ }
48907+#endif
48908+
48909+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
48910+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
48911+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
48912+ put_cpu();
48913+ }
48914+#endif
48915
48916 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
48917 may depend on the personality. */
48918 SET_PERSONALITY(loc->elf_ex);
48919+
48920+#ifdef CONFIG_PAX_ASLR
48921+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
48922+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
48923+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
48924+ }
48925+#endif
48926+
48927+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
48928+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
48929+ executable_stack = EXSTACK_DISABLE_X;
48930+ current->personality &= ~READ_IMPLIES_EXEC;
48931+ } else
48932+#endif
48933+
48934 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
48935 current->personality |= READ_IMPLIES_EXEC;
48936
48937@@ -810,6 +1215,20 @@ static int load_elf_binary(struct linux_binprm *bprm)
48938 #else
48939 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
48940 #endif
48941+
48942+#ifdef CONFIG_PAX_RANDMMAP
48943+ /* PaX: randomize base address at the default exe base if requested */
48944+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
48945+#ifdef CONFIG_SPARC64
48946+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
48947+#else
48948+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
48949+#endif
48950+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
48951+ elf_flags |= MAP_FIXED;
48952+ }
48953+#endif
48954+
48955 }
48956
48957 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
48958@@ -842,9 +1261,9 @@ static int load_elf_binary(struct linux_binprm *bprm)
48959 * allowed task size. Note that p_filesz must always be
48960 * <= p_memsz so it is only necessary to check p_memsz.
48961 */
48962- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
48963- elf_ppnt->p_memsz > TASK_SIZE ||
48964- TASK_SIZE - elf_ppnt->p_memsz < k) {
48965+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
48966+ elf_ppnt->p_memsz > pax_task_size ||
48967+ pax_task_size - elf_ppnt->p_memsz < k) {
48968 /* set_brk can never work. Avoid overflows. */
48969 send_sig(SIGKILL, current, 0);
48970 retval = -EINVAL;
48971@@ -883,17 +1302,44 @@ static int load_elf_binary(struct linux_binprm *bprm)
48972 goto out_free_dentry;
48973 }
48974 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
48975- send_sig(SIGSEGV, current, 0);
48976- retval = -EFAULT; /* Nobody gets to see this, but.. */
48977- goto out_free_dentry;
48978+ /*
48979+ * This bss-zeroing can fail if the ELF
48980+ * file specifies odd protections. So
48981+ * we don't check the return value
48982+ */
48983 }
48984
48985+#ifdef CONFIG_PAX_RANDMMAP
48986+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
48987+ unsigned long start, size, flags, vm_flags;
48988+
48989+ start = ELF_PAGEALIGN(elf_brk);
48990+ size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
48991+ flags = MAP_FIXED | MAP_PRIVATE;
48992+ vm_flags = VM_DONTEXPAND | VM_DONTDUMP;
48993+
48994+ down_write(&current->mm->mmap_sem);
48995+ start = get_unmapped_area(NULL, start, PAGE_ALIGN(size), 0, flags);
48996+ retval = -ENOMEM;
48997+ if (!IS_ERR_VALUE(start) && !find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) {
48998+// if (current->personality & ADDR_NO_RANDOMIZE)
48999+// vm_flags |= VM_READ | VM_MAYREAD;
49000+ start = mmap_region(NULL, start, PAGE_ALIGN(size), flags, vm_flags, 0);
49001+ retval = IS_ERR_VALUE(start) ? start : 0;
49002+ }
49003+ up_write(&current->mm->mmap_sem);
49004+ if (retval == 0)
49005+ retval = set_brk(start + size, start + size + PAGE_SIZE);
49006+ if (retval < 0) {
49007+ send_sig(SIGKILL, current, 0);
49008+ goto out_free_dentry;
49009+ }
49010+ }
49011+#endif
49012+
49013 if (elf_interpreter) {
49014- unsigned long interp_map_addr = 0;
49015-
49016 elf_entry = load_elf_interp(&loc->interp_elf_ex,
49017 interpreter,
49018- &interp_map_addr,
49019 load_bias);
49020 if (!IS_ERR((void *)elf_entry)) {
49021 /*
49022@@ -1115,7 +1561,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
49023 * Decide what to dump of a segment, part, all or none.
49024 */
49025 static unsigned long vma_dump_size(struct vm_area_struct *vma,
49026- unsigned long mm_flags)
49027+ unsigned long mm_flags, long signr)
49028 {
49029 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
49030
49031@@ -1152,7 +1598,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
49032 if (vma->vm_file == NULL)
49033 return 0;
49034
49035- if (FILTER(MAPPED_PRIVATE))
49036+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
49037 goto whole;
49038
49039 /*
49040@@ -1374,9 +1820,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
49041 {
49042 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
49043 int i = 0;
49044- do
49045+ do {
49046 i += 2;
49047- while (auxv[i - 2] != AT_NULL);
49048+ } while (auxv[i - 2] != AT_NULL);
49049 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
49050 }
49051
49052@@ -2006,14 +2452,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
49053 }
49054
49055 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
49056- unsigned long mm_flags)
49057+ struct coredump_params *cprm)
49058 {
49059 struct vm_area_struct *vma;
49060 size_t size = 0;
49061
49062 for (vma = first_vma(current, gate_vma); vma != NULL;
49063 vma = next_vma(vma, gate_vma))
49064- size += vma_dump_size(vma, mm_flags);
49065+ size += vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
49066 return size;
49067 }
49068
49069@@ -2107,7 +2553,7 @@ static int elf_core_dump(struct coredump_params *cprm)
49070
49071 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
49072
49073- offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
49074+ offset += elf_core_vma_data_size(gate_vma, cprm);
49075 offset += elf_core_extra_data_size();
49076 e_shoff = offset;
49077
49078@@ -2121,10 +2567,12 @@ static int elf_core_dump(struct coredump_params *cprm)
49079 offset = dataoff;
49080
49081 size += sizeof(*elf);
49082+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
49083 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
49084 goto end_coredump;
49085
49086 size += sizeof(*phdr4note);
49087+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
49088 if (size > cprm->limit
49089 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
49090 goto end_coredump;
49091@@ -2138,7 +2586,7 @@ static int elf_core_dump(struct coredump_params *cprm)
49092 phdr.p_offset = offset;
49093 phdr.p_vaddr = vma->vm_start;
49094 phdr.p_paddr = 0;
49095- phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
49096+ phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
49097 phdr.p_memsz = vma->vm_end - vma->vm_start;
49098 offset += phdr.p_filesz;
49099 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
49100@@ -2149,6 +2597,7 @@ static int elf_core_dump(struct coredump_params *cprm)
49101 phdr.p_align = ELF_EXEC_PAGESIZE;
49102
49103 size += sizeof(phdr);
49104+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
49105 if (size > cprm->limit
49106 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
49107 goto end_coredump;
49108@@ -2173,7 +2622,7 @@ static int elf_core_dump(struct coredump_params *cprm)
49109 unsigned long addr;
49110 unsigned long end;
49111
49112- end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
49113+ end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
49114
49115 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
49116 struct page *page;
49117@@ -2182,6 +2631,7 @@ static int elf_core_dump(struct coredump_params *cprm)
49118 page = get_dump_page(addr);
49119 if (page) {
49120 void *kaddr = kmap(page);
49121+ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
49122 stop = ((size += PAGE_SIZE) > cprm->limit) ||
49123 !dump_write(cprm->file, kaddr,
49124 PAGE_SIZE);
49125@@ -2199,6 +2649,7 @@ static int elf_core_dump(struct coredump_params *cprm)
49126
49127 if (e_phnum == PN_XNUM) {
49128 size += sizeof(*shdr4extnum);
49129+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
49130 if (size > cprm->limit
49131 || !dump_write(cprm->file, shdr4extnum,
49132 sizeof(*shdr4extnum)))
49133@@ -2219,6 +2670,97 @@ out:
49134
49135 #endif /* CONFIG_ELF_CORE */
49136
49137+#ifdef CONFIG_PAX_MPROTECT
49138+/* PaX: non-PIC ELF libraries need relocations on their executable segments
49139+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
49140+ * we'll remove VM_MAYWRITE for good on RELRO segments.
49141+ *
49142+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
49143+ * basis because we want to allow the common case and not the special ones.
49144+ */
49145+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
49146+{
49147+ struct elfhdr elf_h;
49148+ struct elf_phdr elf_p;
49149+ unsigned long i;
49150+ unsigned long oldflags;
49151+ bool is_textrel_rw, is_textrel_rx, is_relro;
49152+
49153+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
49154+ return;
49155+
49156+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
49157+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
49158+
49159+#ifdef CONFIG_PAX_ELFRELOCS
49160+ /* possible TEXTREL */
49161+ is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
49162+ is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
49163+#else
49164+ is_textrel_rw = false;
49165+ is_textrel_rx = false;
49166+#endif
49167+
49168+ /* possible RELRO */
49169+ is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
49170+
49171+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
49172+ return;
49173+
49174+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
49175+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
49176+
49177+#ifdef CONFIG_PAX_ETEXECRELOCS
49178+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
49179+#else
49180+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
49181+#endif
49182+
49183+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
49184+ !elf_check_arch(&elf_h) ||
49185+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
49186+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
49187+ return;
49188+
49189+ for (i = 0UL; i < elf_h.e_phnum; i++) {
49190+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
49191+ return;
49192+ switch (elf_p.p_type) {
49193+ case PT_DYNAMIC:
49194+ if (!is_textrel_rw && !is_textrel_rx)
49195+ continue;
49196+ i = 0UL;
49197+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
49198+ elf_dyn dyn;
49199+
49200+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
49201+ return;
49202+ if (dyn.d_tag == DT_NULL)
49203+ return;
49204+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
49205+ gr_log_textrel(vma);
49206+ if (is_textrel_rw)
49207+ vma->vm_flags |= VM_MAYWRITE;
49208+ else
49209+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
49210+ vma->vm_flags &= ~VM_MAYWRITE;
49211+ return;
49212+ }
49213+ i++;
49214+ }
49215+ return;
49216+
49217+ case PT_GNU_RELRO:
49218+ if (!is_relro)
49219+ continue;
49220+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
49221+ vma->vm_flags &= ~VM_MAYWRITE;
49222+ return;
49223+ }
49224+ }
49225+}
49226+#endif
49227+
49228 static int __init init_elf_binfmt(void)
49229 {
49230 register_binfmt(&elf_format);
49231diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
49232index b563719..3868998 100644
49233--- a/fs/binfmt_flat.c
49234+++ b/fs/binfmt_flat.c
49235@@ -562,7 +562,9 @@ static int load_flat_file(struct linux_binprm * bprm,
49236 realdatastart = (unsigned long) -ENOMEM;
49237 printk("Unable to allocate RAM for process data, errno %d\n",
49238 (int)-realdatastart);
49239+ down_write(&current->mm->mmap_sem);
49240 vm_munmap(textpos, text_len);
49241+ up_write(&current->mm->mmap_sem);
49242 ret = realdatastart;
49243 goto err;
49244 }
49245@@ -586,8 +588,10 @@ static int load_flat_file(struct linux_binprm * bprm,
49246 }
49247 if (IS_ERR_VALUE(result)) {
49248 printk("Unable to read data+bss, errno %d\n", (int)-result);
49249+ down_write(&current->mm->mmap_sem);
49250 vm_munmap(textpos, text_len);
49251 vm_munmap(realdatastart, len);
49252+ up_write(&current->mm->mmap_sem);
49253 ret = result;
49254 goto err;
49255 }
49256@@ -654,8 +658,10 @@ static int load_flat_file(struct linux_binprm * bprm,
49257 }
49258 if (IS_ERR_VALUE(result)) {
49259 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
49260+ down_write(&current->mm->mmap_sem);
49261 vm_munmap(textpos, text_len + data_len + extra +
49262 MAX_SHARED_LIBS * sizeof(unsigned long));
49263+ up_write(&current->mm->mmap_sem);
49264 ret = result;
49265 goto err;
49266 }
49267diff --git a/fs/bio.c b/fs/bio.c
49268index b96fc6c..431d628 100644
49269--- a/fs/bio.c
49270+++ b/fs/bio.c
49271@@ -818,7 +818,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
49272 /*
49273 * Overflow, abort
49274 */
49275- if (end < start)
49276+ if (end < start || end - start > INT_MAX - nr_pages)
49277 return ERR_PTR(-EINVAL);
49278
49279 nr_pages += end - start;
49280@@ -952,7 +952,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
49281 /*
49282 * Overflow, abort
49283 */
49284- if (end < start)
49285+ if (end < start || end - start > INT_MAX - nr_pages)
49286 return ERR_PTR(-EINVAL);
49287
49288 nr_pages += end - start;
49289@@ -1214,7 +1214,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
49290 const int read = bio_data_dir(bio) == READ;
49291 struct bio_map_data *bmd = bio->bi_private;
49292 int i;
49293- char *p = bmd->sgvecs[0].iov_base;
49294+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
49295
49296 __bio_for_each_segment(bvec, bio, i, 0) {
49297 char *addr = page_address(bvec->bv_page);
49298diff --git a/fs/block_dev.c b/fs/block_dev.c
49299index 883dc49..f27794a 100644
49300--- a/fs/block_dev.c
49301+++ b/fs/block_dev.c
49302@@ -652,7 +652,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
49303 else if (bdev->bd_contains == bdev)
49304 return true; /* is a whole device which isn't held */
49305
49306- else if (whole->bd_holder == bd_may_claim)
49307+ else if (whole->bd_holder == (void *)bd_may_claim)
49308 return true; /* is a partition of a device that is being partitioned */
49309 else if (whole->bd_holder != NULL)
49310 return false; /* is a partition of a held device */
49311diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
49312index ce1c169..1ef484f 100644
49313--- a/fs/btrfs/ctree.c
49314+++ b/fs/btrfs/ctree.c
49315@@ -1036,9 +1036,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
49316 free_extent_buffer(buf);
49317 add_root_to_dirty_list(root);
49318 } else {
49319- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
49320- parent_start = parent->start;
49321- else
49322+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
49323+ if (parent)
49324+ parent_start = parent->start;
49325+ else
49326+ parent_start = 0;
49327+ } else
49328 parent_start = 0;
49329
49330 WARN_ON(trans->transid != btrfs_header_generation(parent));
49331diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
49332index 7c4e6cc..27bd5c2 100644
49333--- a/fs/btrfs/inode.c
49334+++ b/fs/btrfs/inode.c
49335@@ -7314,7 +7314,7 @@ fail:
49336 return -ENOMEM;
49337 }
49338
49339-static int btrfs_getattr(struct vfsmount *mnt,
49340+int btrfs_getattr(struct vfsmount *mnt,
49341 struct dentry *dentry, struct kstat *stat)
49342 {
49343 struct inode *inode = dentry->d_inode;
49344@@ -7328,6 +7328,14 @@ static int btrfs_getattr(struct vfsmount *mnt,
49345 return 0;
49346 }
49347
49348+EXPORT_SYMBOL(btrfs_getattr);
49349+
49350+dev_t get_btrfs_dev_from_inode(struct inode *inode)
49351+{
49352+ return BTRFS_I(inode)->root->anon_dev;
49353+}
49354+EXPORT_SYMBOL(get_btrfs_dev_from_inode);
49355+
49356 /*
49357 * If a file is moved, it will inherit the cow and compression flags of the new
49358 * directory.
49359diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
49360index 338f259..b657640 100644
49361--- a/fs/btrfs/ioctl.c
49362+++ b/fs/btrfs/ioctl.c
49363@@ -3033,9 +3033,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
49364 for (i = 0; i < num_types; i++) {
49365 struct btrfs_space_info *tmp;
49366
49367+ /* Don't copy in more than we allocated */
49368 if (!slot_count)
49369 break;
49370
49371+ slot_count--;
49372+
49373 info = NULL;
49374 rcu_read_lock();
49375 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
49376@@ -3057,10 +3060,7 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
49377 memcpy(dest, &space, sizeof(space));
49378 dest++;
49379 space_args.total_spaces++;
49380- slot_count--;
49381 }
49382- if (!slot_count)
49383- break;
49384 }
49385 up_read(&info->groups_sem);
49386 }
49387diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
49388index 300e09a..9fe4539 100644
49389--- a/fs/btrfs/relocation.c
49390+++ b/fs/btrfs/relocation.c
49391@@ -1269,7 +1269,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
49392 }
49393 spin_unlock(&rc->reloc_root_tree.lock);
49394
49395- BUG_ON((struct btrfs_root *)node->data != root);
49396+ BUG_ON(!node || (struct btrfs_root *)node->data != root);
49397
49398 if (!del) {
49399 spin_lock(&rc->reloc_root_tree.lock);
49400diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
49401index d8982e9..29a85fa 100644
49402--- a/fs/btrfs/super.c
49403+++ b/fs/btrfs/super.c
49404@@ -267,7 +267,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
49405 function, line, errstr);
49406 return;
49407 }
49408- ACCESS_ONCE(trans->transaction->aborted) = errno;
49409+ ACCESS_ONCE_RW(trans->transaction->aborted) = errno;
49410 __btrfs_std_error(root->fs_info, function, line, errno, NULL);
49411 }
49412 /*
49413diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
49414index 622f469..e8d2d55 100644
49415--- a/fs/cachefiles/bind.c
49416+++ b/fs/cachefiles/bind.c
49417@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
49418 args);
49419
49420 /* start by checking things over */
49421- ASSERT(cache->fstop_percent >= 0 &&
49422- cache->fstop_percent < cache->fcull_percent &&
49423+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
49424 cache->fcull_percent < cache->frun_percent &&
49425 cache->frun_percent < 100);
49426
49427- ASSERT(cache->bstop_percent >= 0 &&
49428- cache->bstop_percent < cache->bcull_percent &&
49429+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
49430 cache->bcull_percent < cache->brun_percent &&
49431 cache->brun_percent < 100);
49432
49433diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
49434index 0a1467b..6a53245 100644
49435--- a/fs/cachefiles/daemon.c
49436+++ b/fs/cachefiles/daemon.c
49437@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
49438 if (n > buflen)
49439 return -EMSGSIZE;
49440
49441- if (copy_to_user(_buffer, buffer, n) != 0)
49442+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
49443 return -EFAULT;
49444
49445 return n;
49446@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
49447 if (test_bit(CACHEFILES_DEAD, &cache->flags))
49448 return -EIO;
49449
49450- if (datalen < 0 || datalen > PAGE_SIZE - 1)
49451+ if (datalen > PAGE_SIZE - 1)
49452 return -EOPNOTSUPP;
49453
49454 /* drag the command string into the kernel so we can parse it */
49455@@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
49456 if (args[0] != '%' || args[1] != '\0')
49457 return -EINVAL;
49458
49459- if (fstop < 0 || fstop >= cache->fcull_percent)
49460+ if (fstop >= cache->fcull_percent)
49461 return cachefiles_daemon_range_error(cache, args);
49462
49463 cache->fstop_percent = fstop;
49464@@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
49465 if (args[0] != '%' || args[1] != '\0')
49466 return -EINVAL;
49467
49468- if (bstop < 0 || bstop >= cache->bcull_percent)
49469+ if (bstop >= cache->bcull_percent)
49470 return cachefiles_daemon_range_error(cache, args);
49471
49472 cache->bstop_percent = bstop;
49473diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
49474index 4938251..7e01445 100644
49475--- a/fs/cachefiles/internal.h
49476+++ b/fs/cachefiles/internal.h
49477@@ -59,7 +59,7 @@ struct cachefiles_cache {
49478 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
49479 struct rb_root active_nodes; /* active nodes (can't be culled) */
49480 rwlock_t active_lock; /* lock for active_nodes */
49481- atomic_t gravecounter; /* graveyard uniquifier */
49482+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
49483 unsigned frun_percent; /* when to stop culling (% files) */
49484 unsigned fcull_percent; /* when to start culling (% files) */
49485 unsigned fstop_percent; /* when to stop allocating (% files) */
49486@@ -171,19 +171,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
49487 * proc.c
49488 */
49489 #ifdef CONFIG_CACHEFILES_HISTOGRAM
49490-extern atomic_t cachefiles_lookup_histogram[HZ];
49491-extern atomic_t cachefiles_mkdir_histogram[HZ];
49492-extern atomic_t cachefiles_create_histogram[HZ];
49493+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
49494+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
49495+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
49496
49497 extern int __init cachefiles_proc_init(void);
49498 extern void cachefiles_proc_cleanup(void);
49499 static inline
49500-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
49501+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
49502 {
49503 unsigned long jif = jiffies - start_jif;
49504 if (jif >= HZ)
49505 jif = HZ - 1;
49506- atomic_inc(&histogram[jif]);
49507+ atomic_inc_unchecked(&histogram[jif]);
49508 }
49509
49510 #else
49511diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
49512index 8c01c5fc..15f982e 100644
49513--- a/fs/cachefiles/namei.c
49514+++ b/fs/cachefiles/namei.c
49515@@ -317,7 +317,7 @@ try_again:
49516 /* first step is to make up a grave dentry in the graveyard */
49517 sprintf(nbuffer, "%08x%08x",
49518 (uint32_t) get_seconds(),
49519- (uint32_t) atomic_inc_return(&cache->gravecounter));
49520+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
49521
49522 /* do the multiway lock magic */
49523 trap = lock_rename(cache->graveyard, dir);
49524diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
49525index eccd339..4c1d995 100644
49526--- a/fs/cachefiles/proc.c
49527+++ b/fs/cachefiles/proc.c
49528@@ -14,9 +14,9 @@
49529 #include <linux/seq_file.h>
49530 #include "internal.h"
49531
49532-atomic_t cachefiles_lookup_histogram[HZ];
49533-atomic_t cachefiles_mkdir_histogram[HZ];
49534-atomic_t cachefiles_create_histogram[HZ];
49535+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
49536+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
49537+atomic_unchecked_t cachefiles_create_histogram[HZ];
49538
49539 /*
49540 * display the latency histogram
49541@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
49542 return 0;
49543 default:
49544 index = (unsigned long) v - 3;
49545- x = atomic_read(&cachefiles_lookup_histogram[index]);
49546- y = atomic_read(&cachefiles_mkdir_histogram[index]);
49547- z = atomic_read(&cachefiles_create_histogram[index]);
49548+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
49549+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
49550+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
49551 if (x == 0 && y == 0 && z == 0)
49552 return 0;
49553
49554diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
49555index 4809922..aab2c39 100644
49556--- a/fs/cachefiles/rdwr.c
49557+++ b/fs/cachefiles/rdwr.c
49558@@ -965,7 +965,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
49559 old_fs = get_fs();
49560 set_fs(KERNEL_DS);
49561 ret = file->f_op->write(
49562- file, (const void __user *) data, len, &pos);
49563+ file, (const void __force_user *) data, len, &pos);
49564 set_fs(old_fs);
49565 kunmap(page);
49566 if (ret != len)
49567diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
49568index 8c1aabe..bbf856a 100644
49569--- a/fs/ceph/dir.c
49570+++ b/fs/ceph/dir.c
49571@@ -243,7 +243,7 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
49572 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
49573 struct ceph_mds_client *mdsc = fsc->mdsc;
49574 unsigned frag = fpos_frag(filp->f_pos);
49575- int off = fpos_off(filp->f_pos);
49576+ unsigned int off = fpos_off(filp->f_pos);
49577 int err;
49578 u32 ftype;
49579 struct ceph_mds_reply_info_parsed *rinfo;
49580diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
49581index d9ea6ed..1e6c8ac 100644
49582--- a/fs/cifs/cifs_debug.c
49583+++ b/fs/cifs/cifs_debug.c
49584@@ -267,8 +267,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
49585
49586 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
49587 #ifdef CONFIG_CIFS_STATS2
49588- atomic_set(&totBufAllocCount, 0);
49589- atomic_set(&totSmBufAllocCount, 0);
49590+ atomic_set_unchecked(&totBufAllocCount, 0);
49591+ atomic_set_unchecked(&totSmBufAllocCount, 0);
49592 #endif /* CONFIG_CIFS_STATS2 */
49593 spin_lock(&cifs_tcp_ses_lock);
49594 list_for_each(tmp1, &cifs_tcp_ses_list) {
49595@@ -281,7 +281,7 @@ static ssize_t cifs_stats_proc_write(struct file *file,
49596 tcon = list_entry(tmp3,
49597 struct cifs_tcon,
49598 tcon_list);
49599- atomic_set(&tcon->num_smbs_sent, 0);
49600+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
49601 if (server->ops->clear_stats)
49602 server->ops->clear_stats(tcon);
49603 }
49604@@ -313,8 +313,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
49605 smBufAllocCount.counter, cifs_min_small);
49606 #ifdef CONFIG_CIFS_STATS2
49607 seq_printf(m, "Total Large %d Small %d Allocations\n",
49608- atomic_read(&totBufAllocCount),
49609- atomic_read(&totSmBufAllocCount));
49610+ atomic_read_unchecked(&totBufAllocCount),
49611+ atomic_read_unchecked(&totSmBufAllocCount));
49612 #endif /* CONFIG_CIFS_STATS2 */
49613
49614 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
49615@@ -343,7 +343,7 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
49616 if (tcon->need_reconnect)
49617 seq_puts(m, "\tDISCONNECTED ");
49618 seq_printf(m, "\nSMBs: %d",
49619- atomic_read(&tcon->num_smbs_sent));
49620+ atomic_read_unchecked(&tcon->num_smbs_sent));
49621 if (server->ops->print_stats)
49622 server->ops->print_stats(m, tcon);
49623 }
49624diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
49625index b9db388..9a73d6d 100644
49626--- a/fs/cifs/cifsfs.c
49627+++ b/fs/cifs/cifsfs.c
49628@@ -1026,7 +1026,7 @@ cifs_init_request_bufs(void)
49629 /* cERROR(1, "CIFSMaxBufSize %d 0x%x",CIFSMaxBufSize,CIFSMaxBufSize); */
49630 cifs_req_cachep = kmem_cache_create("cifs_request",
49631 CIFSMaxBufSize + max_hdr_size, 0,
49632- SLAB_HWCACHE_ALIGN, NULL);
49633+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
49634 if (cifs_req_cachep == NULL)
49635 return -ENOMEM;
49636
49637@@ -1053,7 +1053,7 @@ cifs_init_request_bufs(void)
49638 efficient to alloc 1 per page off the slab compared to 17K (5page)
49639 alloc of large cifs buffers even when page debugging is on */
49640 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
49641- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
49642+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
49643 NULL);
49644 if (cifs_sm_req_cachep == NULL) {
49645 mempool_destroy(cifs_req_poolp);
49646@@ -1138,8 +1138,8 @@ init_cifs(void)
49647 atomic_set(&bufAllocCount, 0);
49648 atomic_set(&smBufAllocCount, 0);
49649 #ifdef CONFIG_CIFS_STATS2
49650- atomic_set(&totBufAllocCount, 0);
49651- atomic_set(&totSmBufAllocCount, 0);
49652+ atomic_set_unchecked(&totBufAllocCount, 0);
49653+ atomic_set_unchecked(&totSmBufAllocCount, 0);
49654 #endif /* CONFIG_CIFS_STATS2 */
49655
49656 atomic_set(&midCount, 0);
49657diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
49658index e6899ce..d6b2920 100644
49659--- a/fs/cifs/cifsglob.h
49660+++ b/fs/cifs/cifsglob.h
49661@@ -751,35 +751,35 @@ struct cifs_tcon {
49662 __u16 Flags; /* optional support bits */
49663 enum statusEnum tidStatus;
49664 #ifdef CONFIG_CIFS_STATS
49665- atomic_t num_smbs_sent;
49666+ atomic_unchecked_t num_smbs_sent;
49667 union {
49668 struct {
49669- atomic_t num_writes;
49670- atomic_t num_reads;
49671- atomic_t num_flushes;
49672- atomic_t num_oplock_brks;
49673- atomic_t num_opens;
49674- atomic_t num_closes;
49675- atomic_t num_deletes;
49676- atomic_t num_mkdirs;
49677- atomic_t num_posixopens;
49678- atomic_t num_posixmkdirs;
49679- atomic_t num_rmdirs;
49680- atomic_t num_renames;
49681- atomic_t num_t2renames;
49682- atomic_t num_ffirst;
49683- atomic_t num_fnext;
49684- atomic_t num_fclose;
49685- atomic_t num_hardlinks;
49686- atomic_t num_symlinks;
49687- atomic_t num_locks;
49688- atomic_t num_acl_get;
49689- atomic_t num_acl_set;
49690+ atomic_unchecked_t num_writes;
49691+ atomic_unchecked_t num_reads;
49692+ atomic_unchecked_t num_flushes;
49693+ atomic_unchecked_t num_oplock_brks;
49694+ atomic_unchecked_t num_opens;
49695+ atomic_unchecked_t num_closes;
49696+ atomic_unchecked_t num_deletes;
49697+ atomic_unchecked_t num_mkdirs;
49698+ atomic_unchecked_t num_posixopens;
49699+ atomic_unchecked_t num_posixmkdirs;
49700+ atomic_unchecked_t num_rmdirs;
49701+ atomic_unchecked_t num_renames;
49702+ atomic_unchecked_t num_t2renames;
49703+ atomic_unchecked_t num_ffirst;
49704+ atomic_unchecked_t num_fnext;
49705+ atomic_unchecked_t num_fclose;
49706+ atomic_unchecked_t num_hardlinks;
49707+ atomic_unchecked_t num_symlinks;
49708+ atomic_unchecked_t num_locks;
49709+ atomic_unchecked_t num_acl_get;
49710+ atomic_unchecked_t num_acl_set;
49711 } cifs_stats;
49712 #ifdef CONFIG_CIFS_SMB2
49713 struct {
49714- atomic_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
49715- atomic_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
49716+ atomic_unchecked_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
49717+ atomic_unchecked_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
49718 } smb2_stats;
49719 #endif /* CONFIG_CIFS_SMB2 */
49720 } stats;
49721@@ -1080,7 +1080,7 @@ convert_delimiter(char *path, char delim)
49722 }
49723
49724 #ifdef CONFIG_CIFS_STATS
49725-#define cifs_stats_inc atomic_inc
49726+#define cifs_stats_inc atomic_inc_unchecked
49727
49728 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
49729 unsigned int bytes)
49730@@ -1445,8 +1445,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
49731 /* Various Debug counters */
49732 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
49733 #ifdef CONFIG_CIFS_STATS2
49734-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
49735-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
49736+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
49737+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
49738 #endif
49739 GLOBAL_EXTERN atomic_t smBufAllocCount;
49740 GLOBAL_EXTERN atomic_t midCount;
49741diff --git a/fs/cifs/link.c b/fs/cifs/link.c
49742index 51dc2fb..1e12a33 100644
49743--- a/fs/cifs/link.c
49744+++ b/fs/cifs/link.c
49745@@ -616,7 +616,7 @@ symlink_exit:
49746
49747 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
49748 {
49749- char *p = nd_get_link(nd);
49750+ const char *p = nd_get_link(nd);
49751 if (!IS_ERR(p))
49752 kfree(p);
49753 }
49754diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
49755index 3a00c0d..42d901c 100644
49756--- a/fs/cifs/misc.c
49757+++ b/fs/cifs/misc.c
49758@@ -169,7 +169,7 @@ cifs_buf_get(void)
49759 memset(ret_buf, 0, buf_size + 3);
49760 atomic_inc(&bufAllocCount);
49761 #ifdef CONFIG_CIFS_STATS2
49762- atomic_inc(&totBufAllocCount);
49763+ atomic_inc_unchecked(&totBufAllocCount);
49764 #endif /* CONFIG_CIFS_STATS2 */
49765 }
49766
49767@@ -204,7 +204,7 @@ cifs_small_buf_get(void)
49768 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
49769 atomic_inc(&smBufAllocCount);
49770 #ifdef CONFIG_CIFS_STATS2
49771- atomic_inc(&totSmBufAllocCount);
49772+ atomic_inc_unchecked(&totSmBufAllocCount);
49773 #endif /* CONFIG_CIFS_STATS2 */
49774
49775 }
49776diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
49777index 47bc5a8..10decbe 100644
49778--- a/fs/cifs/smb1ops.c
49779+++ b/fs/cifs/smb1ops.c
49780@@ -586,27 +586,27 @@ static void
49781 cifs_clear_stats(struct cifs_tcon *tcon)
49782 {
49783 #ifdef CONFIG_CIFS_STATS
49784- atomic_set(&tcon->stats.cifs_stats.num_writes, 0);
49785- atomic_set(&tcon->stats.cifs_stats.num_reads, 0);
49786- atomic_set(&tcon->stats.cifs_stats.num_flushes, 0);
49787- atomic_set(&tcon->stats.cifs_stats.num_oplock_brks, 0);
49788- atomic_set(&tcon->stats.cifs_stats.num_opens, 0);
49789- atomic_set(&tcon->stats.cifs_stats.num_posixopens, 0);
49790- atomic_set(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
49791- atomic_set(&tcon->stats.cifs_stats.num_closes, 0);
49792- atomic_set(&tcon->stats.cifs_stats.num_deletes, 0);
49793- atomic_set(&tcon->stats.cifs_stats.num_mkdirs, 0);
49794- atomic_set(&tcon->stats.cifs_stats.num_rmdirs, 0);
49795- atomic_set(&tcon->stats.cifs_stats.num_renames, 0);
49796- atomic_set(&tcon->stats.cifs_stats.num_t2renames, 0);
49797- atomic_set(&tcon->stats.cifs_stats.num_ffirst, 0);
49798- atomic_set(&tcon->stats.cifs_stats.num_fnext, 0);
49799- atomic_set(&tcon->stats.cifs_stats.num_fclose, 0);
49800- atomic_set(&tcon->stats.cifs_stats.num_hardlinks, 0);
49801- atomic_set(&tcon->stats.cifs_stats.num_symlinks, 0);
49802- atomic_set(&tcon->stats.cifs_stats.num_locks, 0);
49803- atomic_set(&tcon->stats.cifs_stats.num_acl_get, 0);
49804- atomic_set(&tcon->stats.cifs_stats.num_acl_set, 0);
49805+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_writes, 0);
49806+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_reads, 0);
49807+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_flushes, 0);
49808+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_oplock_brks, 0);
49809+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_opens, 0);
49810+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixopens, 0);
49811+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
49812+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_closes, 0);
49813+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_deletes, 0);
49814+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_mkdirs, 0);
49815+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_rmdirs, 0);
49816+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_renames, 0);
49817+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_t2renames, 0);
49818+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_ffirst, 0);
49819+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fnext, 0);
49820+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fclose, 0);
49821+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_hardlinks, 0);
49822+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_symlinks, 0);
49823+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_locks, 0);
49824+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_get, 0);
49825+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_set, 0);
49826 #endif
49827 }
49828
49829@@ -615,36 +615,36 @@ cifs_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
49830 {
49831 #ifdef CONFIG_CIFS_STATS
49832 seq_printf(m, " Oplocks breaks: %d",
49833- atomic_read(&tcon->stats.cifs_stats.num_oplock_brks));
49834+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_oplock_brks));
49835 seq_printf(m, "\nReads: %d Bytes: %llu",
49836- atomic_read(&tcon->stats.cifs_stats.num_reads),
49837+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_reads),
49838 (long long)(tcon->bytes_read));
49839 seq_printf(m, "\nWrites: %d Bytes: %llu",
49840- atomic_read(&tcon->stats.cifs_stats.num_writes),
49841+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_writes),
49842 (long long)(tcon->bytes_written));
49843 seq_printf(m, "\nFlushes: %d",
49844- atomic_read(&tcon->stats.cifs_stats.num_flushes));
49845+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_flushes));
49846 seq_printf(m, "\nLocks: %d HardLinks: %d Symlinks: %d",
49847- atomic_read(&tcon->stats.cifs_stats.num_locks),
49848- atomic_read(&tcon->stats.cifs_stats.num_hardlinks),
49849- atomic_read(&tcon->stats.cifs_stats.num_symlinks));
49850+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_locks),
49851+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_hardlinks),
49852+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_symlinks));
49853 seq_printf(m, "\nOpens: %d Closes: %d Deletes: %d",
49854- atomic_read(&tcon->stats.cifs_stats.num_opens),
49855- atomic_read(&tcon->stats.cifs_stats.num_closes),
49856- atomic_read(&tcon->stats.cifs_stats.num_deletes));
49857+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_opens),
49858+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_closes),
49859+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_deletes));
49860 seq_printf(m, "\nPosix Opens: %d Posix Mkdirs: %d",
49861- atomic_read(&tcon->stats.cifs_stats.num_posixopens),
49862- atomic_read(&tcon->stats.cifs_stats.num_posixmkdirs));
49863+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixopens),
49864+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs));
49865 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
49866- atomic_read(&tcon->stats.cifs_stats.num_mkdirs),
49867- atomic_read(&tcon->stats.cifs_stats.num_rmdirs));
49868+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_mkdirs),
49869+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_rmdirs));
49870 seq_printf(m, "\nRenames: %d T2 Renames %d",
49871- atomic_read(&tcon->stats.cifs_stats.num_renames),
49872- atomic_read(&tcon->stats.cifs_stats.num_t2renames));
49873+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_renames),
49874+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_t2renames));
49875 seq_printf(m, "\nFindFirst: %d FNext %d FClose %d",
49876- atomic_read(&tcon->stats.cifs_stats.num_ffirst),
49877- atomic_read(&tcon->stats.cifs_stats.num_fnext),
49878- atomic_read(&tcon->stats.cifs_stats.num_fclose));
49879+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_ffirst),
49880+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fnext),
49881+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fclose));
49882 #endif
49883 }
49884
49885diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
49886index bceffe7..cd1ae59 100644
49887--- a/fs/cifs/smb2ops.c
49888+++ b/fs/cifs/smb2ops.c
49889@@ -274,8 +274,8 @@ smb2_clear_stats(struct cifs_tcon *tcon)
49890 #ifdef CONFIG_CIFS_STATS
49891 int i;
49892 for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
49893- atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
49894- atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
49895+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
49896+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
49897 }
49898 #endif
49899 }
49900@@ -284,66 +284,66 @@ static void
49901 smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
49902 {
49903 #ifdef CONFIG_CIFS_STATS
49904- atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
49905- atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
49906+ atomic_unchecked_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
49907+ atomic_unchecked_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
49908 seq_printf(m, "\nNegotiates: %d sent %d failed",
49909- atomic_read(&sent[SMB2_NEGOTIATE_HE]),
49910- atomic_read(&failed[SMB2_NEGOTIATE_HE]));
49911+ atomic_read_unchecked(&sent[SMB2_NEGOTIATE_HE]),
49912+ atomic_read_unchecked(&failed[SMB2_NEGOTIATE_HE]));
49913 seq_printf(m, "\nSessionSetups: %d sent %d failed",
49914- atomic_read(&sent[SMB2_SESSION_SETUP_HE]),
49915- atomic_read(&failed[SMB2_SESSION_SETUP_HE]));
49916+ atomic_read_unchecked(&sent[SMB2_SESSION_SETUP_HE]),
49917+ atomic_read_unchecked(&failed[SMB2_SESSION_SETUP_HE]));
49918 #define SMB2LOGOFF 0x0002 /* trivial request/resp */
49919 seq_printf(m, "\nLogoffs: %d sent %d failed",
49920- atomic_read(&sent[SMB2_LOGOFF_HE]),
49921- atomic_read(&failed[SMB2_LOGOFF_HE]));
49922+ atomic_read_unchecked(&sent[SMB2_LOGOFF_HE]),
49923+ atomic_read_unchecked(&failed[SMB2_LOGOFF_HE]));
49924 seq_printf(m, "\nTreeConnects: %d sent %d failed",
49925- atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
49926- atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
49927+ atomic_read_unchecked(&sent[SMB2_TREE_CONNECT_HE]),
49928+ atomic_read_unchecked(&failed[SMB2_TREE_CONNECT_HE]));
49929 seq_printf(m, "\nTreeDisconnects: %d sent %d failed",
49930- atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]),
49931- atomic_read(&failed[SMB2_TREE_DISCONNECT_HE]));
49932+ atomic_read_unchecked(&sent[SMB2_TREE_DISCONNECT_HE]),
49933+ atomic_read_unchecked(&failed[SMB2_TREE_DISCONNECT_HE]));
49934 seq_printf(m, "\nCreates: %d sent %d failed",
49935- atomic_read(&sent[SMB2_CREATE_HE]),
49936- atomic_read(&failed[SMB2_CREATE_HE]));
49937+ atomic_read_unchecked(&sent[SMB2_CREATE_HE]),
49938+ atomic_read_unchecked(&failed[SMB2_CREATE_HE]));
49939 seq_printf(m, "\nCloses: %d sent %d failed",
49940- atomic_read(&sent[SMB2_CLOSE_HE]),
49941- atomic_read(&failed[SMB2_CLOSE_HE]));
49942+ atomic_read_unchecked(&sent[SMB2_CLOSE_HE]),
49943+ atomic_read_unchecked(&failed[SMB2_CLOSE_HE]));
49944 seq_printf(m, "\nFlushes: %d sent %d failed",
49945- atomic_read(&sent[SMB2_FLUSH_HE]),
49946- atomic_read(&failed[SMB2_FLUSH_HE]));
49947+ atomic_read_unchecked(&sent[SMB2_FLUSH_HE]),
49948+ atomic_read_unchecked(&failed[SMB2_FLUSH_HE]));
49949 seq_printf(m, "\nReads: %d sent %d failed",
49950- atomic_read(&sent[SMB2_READ_HE]),
49951- atomic_read(&failed[SMB2_READ_HE]));
49952+ atomic_read_unchecked(&sent[SMB2_READ_HE]),
49953+ atomic_read_unchecked(&failed[SMB2_READ_HE]));
49954 seq_printf(m, "\nWrites: %d sent %d failed",
49955- atomic_read(&sent[SMB2_WRITE_HE]),
49956- atomic_read(&failed[SMB2_WRITE_HE]));
49957+ atomic_read_unchecked(&sent[SMB2_WRITE_HE]),
49958+ atomic_read_unchecked(&failed[SMB2_WRITE_HE]));
49959 seq_printf(m, "\nLocks: %d sent %d failed",
49960- atomic_read(&sent[SMB2_LOCK_HE]),
49961- atomic_read(&failed[SMB2_LOCK_HE]));
49962+ atomic_read_unchecked(&sent[SMB2_LOCK_HE]),
49963+ atomic_read_unchecked(&failed[SMB2_LOCK_HE]));
49964 seq_printf(m, "\nIOCTLs: %d sent %d failed",
49965- atomic_read(&sent[SMB2_IOCTL_HE]),
49966- atomic_read(&failed[SMB2_IOCTL_HE]));
49967+ atomic_read_unchecked(&sent[SMB2_IOCTL_HE]),
49968+ atomic_read_unchecked(&failed[SMB2_IOCTL_HE]));
49969 seq_printf(m, "\nCancels: %d sent %d failed",
49970- atomic_read(&sent[SMB2_CANCEL_HE]),
49971- atomic_read(&failed[SMB2_CANCEL_HE]));
49972+ atomic_read_unchecked(&sent[SMB2_CANCEL_HE]),
49973+ atomic_read_unchecked(&failed[SMB2_CANCEL_HE]));
49974 seq_printf(m, "\nEchos: %d sent %d failed",
49975- atomic_read(&sent[SMB2_ECHO_HE]),
49976- atomic_read(&failed[SMB2_ECHO_HE]));
49977+ atomic_read_unchecked(&sent[SMB2_ECHO_HE]),
49978+ atomic_read_unchecked(&failed[SMB2_ECHO_HE]));
49979 seq_printf(m, "\nQueryDirectories: %d sent %d failed",
49980- atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]),
49981- atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE]));
49982+ atomic_read_unchecked(&sent[SMB2_QUERY_DIRECTORY_HE]),
49983+ atomic_read_unchecked(&failed[SMB2_QUERY_DIRECTORY_HE]));
49984 seq_printf(m, "\nChangeNotifies: %d sent %d failed",
49985- atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]),
49986- atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE]));
49987+ atomic_read_unchecked(&sent[SMB2_CHANGE_NOTIFY_HE]),
49988+ atomic_read_unchecked(&failed[SMB2_CHANGE_NOTIFY_HE]));
49989 seq_printf(m, "\nQueryInfos: %d sent %d failed",
49990- atomic_read(&sent[SMB2_QUERY_INFO_HE]),
49991- atomic_read(&failed[SMB2_QUERY_INFO_HE]));
49992+ atomic_read_unchecked(&sent[SMB2_QUERY_INFO_HE]),
49993+ atomic_read_unchecked(&failed[SMB2_QUERY_INFO_HE]));
49994 seq_printf(m, "\nSetInfos: %d sent %d failed",
49995- atomic_read(&sent[SMB2_SET_INFO_HE]),
49996- atomic_read(&failed[SMB2_SET_INFO_HE]));
49997+ atomic_read_unchecked(&sent[SMB2_SET_INFO_HE]),
49998+ atomic_read_unchecked(&failed[SMB2_SET_INFO_HE]));
49999 seq_printf(m, "\nOplockBreaks: %d sent %d failed",
50000- atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]),
50001- atomic_read(&failed[SMB2_OPLOCK_BREAK_HE]));
50002+ atomic_read_unchecked(&sent[SMB2_OPLOCK_BREAK_HE]),
50003+ atomic_read_unchecked(&failed[SMB2_OPLOCK_BREAK_HE]));
50004 #endif
50005 }
50006
50007diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
50008index 41d9d07..dbb4772 100644
50009--- a/fs/cifs/smb2pdu.c
50010+++ b/fs/cifs/smb2pdu.c
50011@@ -1761,8 +1761,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
50012 default:
50013 cERROR(1, "info level %u isn't supported",
50014 srch_inf->info_level);
50015- rc = -EINVAL;
50016- goto qdir_exit;
50017+ return -EINVAL;
50018 }
50019
50020 req->FileIndex = cpu_to_le32(index);
50021diff --git a/fs/coda/cache.c b/fs/coda/cache.c
50022index 958ae0e..505c9d0 100644
50023--- a/fs/coda/cache.c
50024+++ b/fs/coda/cache.c
50025@@ -24,7 +24,7 @@
50026 #include "coda_linux.h"
50027 #include "coda_cache.h"
50028
50029-static atomic_t permission_epoch = ATOMIC_INIT(0);
50030+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
50031
50032 /* replace or extend an acl cache hit */
50033 void coda_cache_enter(struct inode *inode, int mask)
50034@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
50035 struct coda_inode_info *cii = ITOC(inode);
50036
50037 spin_lock(&cii->c_lock);
50038- cii->c_cached_epoch = atomic_read(&permission_epoch);
50039+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
50040 if (cii->c_uid != current_fsuid()) {
50041 cii->c_uid = current_fsuid();
50042 cii->c_cached_perm = mask;
50043@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
50044 {
50045 struct coda_inode_info *cii = ITOC(inode);
50046 spin_lock(&cii->c_lock);
50047- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
50048+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
50049 spin_unlock(&cii->c_lock);
50050 }
50051
50052 /* remove all acl caches */
50053 void coda_cache_clear_all(struct super_block *sb)
50054 {
50055- atomic_inc(&permission_epoch);
50056+ atomic_inc_unchecked(&permission_epoch);
50057 }
50058
50059
50060@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
50061 spin_lock(&cii->c_lock);
50062 hit = (mask & cii->c_cached_perm) == mask &&
50063 cii->c_uid == current_fsuid() &&
50064- cii->c_cached_epoch == atomic_read(&permission_epoch);
50065+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
50066 spin_unlock(&cii->c_lock);
50067
50068 return hit;
50069diff --git a/fs/compat.c b/fs/compat.c
50070index a06dcbc..dacb6d3 100644
50071--- a/fs/compat.c
50072+++ b/fs/compat.c
50073@@ -54,7 +54,7 @@
50074 #include <asm/ioctls.h>
50075 #include "internal.h"
50076
50077-int compat_log = 1;
50078+int compat_log = 0;
50079
50080 int compat_printk(const char *fmt, ...)
50081 {
50082@@ -490,7 +490,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
50083
50084 set_fs(KERNEL_DS);
50085 /* The __user pointer cast is valid because of the set_fs() */
50086- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
50087+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
50088 set_fs(oldfs);
50089 /* truncating is ok because it's a user address */
50090 if (!ret)
50091@@ -548,7 +548,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
50092 goto out;
50093
50094 ret = -EINVAL;
50095- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
50096+ if (nr_segs > UIO_MAXIOV)
50097 goto out;
50098 if (nr_segs > fast_segs) {
50099 ret = -ENOMEM;
50100@@ -835,6 +835,7 @@ struct compat_old_linux_dirent {
50101
50102 struct compat_readdir_callback {
50103 struct compat_old_linux_dirent __user *dirent;
50104+ struct file * file;
50105 int result;
50106 };
50107
50108@@ -852,6 +853,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
50109 buf->result = -EOVERFLOW;
50110 return -EOVERFLOW;
50111 }
50112+
50113+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
50114+ return 0;
50115+
50116 buf->result++;
50117 dirent = buf->dirent;
50118 if (!access_ok(VERIFY_WRITE, dirent,
50119@@ -882,6 +887,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
50120
50121 buf.result = 0;
50122 buf.dirent = dirent;
50123+ buf.file = f.file;
50124
50125 error = vfs_readdir(f.file, compat_fillonedir, &buf);
50126 if (buf.result)
50127@@ -901,6 +907,7 @@ struct compat_linux_dirent {
50128 struct compat_getdents_callback {
50129 struct compat_linux_dirent __user *current_dir;
50130 struct compat_linux_dirent __user *previous;
50131+ struct file * file;
50132 int count;
50133 int error;
50134 };
50135@@ -922,6 +929,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
50136 buf->error = -EOVERFLOW;
50137 return -EOVERFLOW;
50138 }
50139+
50140+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
50141+ return 0;
50142+
50143 dirent = buf->previous;
50144 if (dirent) {
50145 if (__put_user(offset, &dirent->d_off))
50146@@ -967,6 +978,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
50147 buf.previous = NULL;
50148 buf.count = count;
50149 buf.error = 0;
50150+ buf.file = f.file;
50151
50152 error = vfs_readdir(f.file, compat_filldir, &buf);
50153 if (error >= 0)
50154@@ -987,6 +999,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
50155 struct compat_getdents_callback64 {
50156 struct linux_dirent64 __user *current_dir;
50157 struct linux_dirent64 __user *previous;
50158+ struct file * file;
50159 int count;
50160 int error;
50161 };
50162@@ -1003,6 +1016,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
50163 buf->error = -EINVAL; /* only used if we fail.. */
50164 if (reclen > buf->count)
50165 return -EINVAL;
50166+
50167+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
50168+ return 0;
50169+
50170 dirent = buf->previous;
50171
50172 if (dirent) {
50173@@ -1052,13 +1069,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
50174 buf.previous = NULL;
50175 buf.count = count;
50176 buf.error = 0;
50177+ buf.file = f.file;
50178
50179 error = vfs_readdir(f.file, compat_filldir64, &buf);
50180 if (error >= 0)
50181 error = buf.error;
50182 lastdirent = buf.previous;
50183 if (lastdirent) {
50184- typeof(lastdirent->d_off) d_off = f.file->f_pos;
50185+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = f.file->f_pos;
50186 if (__put_user_unaligned(d_off, &lastdirent->d_off))
50187 error = -EFAULT;
50188 else
50189diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
50190index a81147e..20bf2b5 100644
50191--- a/fs/compat_binfmt_elf.c
50192+++ b/fs/compat_binfmt_elf.c
50193@@ -30,11 +30,13 @@
50194 #undef elf_phdr
50195 #undef elf_shdr
50196 #undef elf_note
50197+#undef elf_dyn
50198 #undef elf_addr_t
50199 #define elfhdr elf32_hdr
50200 #define elf_phdr elf32_phdr
50201 #define elf_shdr elf32_shdr
50202 #define elf_note elf32_note
50203+#define elf_dyn Elf32_Dyn
50204 #define elf_addr_t Elf32_Addr
50205
50206 /*
50207diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
50208index e2f57a0..3c78771 100644
50209--- a/fs/compat_ioctl.c
50210+++ b/fs/compat_ioctl.c
50211@@ -623,7 +623,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
50212 return -EFAULT;
50213 if (__get_user(udata, &ss32->iomem_base))
50214 return -EFAULT;
50215- ss.iomem_base = compat_ptr(udata);
50216+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
50217 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
50218 __get_user(ss.port_high, &ss32->port_high))
50219 return -EFAULT;
50220@@ -798,7 +798,7 @@ static int compat_ioctl_preallocate(struct file *file,
50221 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
50222 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
50223 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
50224- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
50225+ copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
50226 return -EFAULT;
50227
50228 return ioctl_preallocate(file, p);
50229@@ -1620,8 +1620,8 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
50230 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
50231 {
50232 unsigned int a, b;
50233- a = *(unsigned int *)p;
50234- b = *(unsigned int *)q;
50235+ a = *(const unsigned int *)p;
50236+ b = *(const unsigned int *)q;
50237 if (a > b)
50238 return 1;
50239 if (a < b)
50240diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
50241index 712b10f..c33c4ca 100644
50242--- a/fs/configfs/dir.c
50243+++ b/fs/configfs/dir.c
50244@@ -1037,10 +1037,11 @@ static int configfs_dump(struct configfs_dirent *sd, int level)
50245 static int configfs_depend_prep(struct dentry *origin,
50246 struct config_item *target)
50247 {
50248- struct configfs_dirent *child_sd, *sd = origin->d_fsdata;
50249+ struct configfs_dirent *child_sd, *sd;
50250 int ret = 0;
50251
50252- BUG_ON(!origin || !sd);
50253+ BUG_ON(!origin || !origin->d_fsdata);
50254+ sd = origin->d_fsdata;
50255
50256 if (sd->s_element == target) /* Boo-yah */
50257 goto out;
50258@@ -1564,7 +1565,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
50259 }
50260 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
50261 struct configfs_dirent *next;
50262- const char * name;
50263+ const unsigned char * name;
50264+ char d_name[sizeof(next->s_dentry->d_iname)];
50265 int len;
50266 struct inode *inode = NULL;
50267
50268@@ -1574,7 +1576,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
50269 continue;
50270
50271 name = configfs_get_name(next);
50272- len = strlen(name);
50273+ if (next->s_dentry && name == next->s_dentry->d_iname) {
50274+ len = next->s_dentry->d_name.len;
50275+ memcpy(d_name, name, len);
50276+ name = d_name;
50277+ } else
50278+ len = strlen(name);
50279
50280 /*
50281 * We'll have a dentry and an inode for
50282diff --git a/fs/coredump.c b/fs/coredump.c
50283index 1774932..5812106 100644
50284--- a/fs/coredump.c
50285+++ b/fs/coredump.c
50286@@ -52,7 +52,7 @@ struct core_name {
50287 char *corename;
50288 int used, size;
50289 };
50290-static atomic_t call_count = ATOMIC_INIT(1);
50291+static atomic_unchecked_t call_count = ATOMIC_INIT(1);
50292
50293 /* The maximal length of core_pattern is also specified in sysctl.c */
50294
50295@@ -60,7 +60,7 @@ static int expand_corename(struct core_name *cn)
50296 {
50297 char *old_corename = cn->corename;
50298
50299- cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
50300+ cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
50301 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
50302
50303 if (!cn->corename) {
50304@@ -157,7 +157,7 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm)
50305 int pid_in_pattern = 0;
50306 int err = 0;
50307
50308- cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
50309+ cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
50310 cn->corename = kmalloc(cn->size, GFP_KERNEL);
50311 cn->used = 0;
50312
50313@@ -414,17 +414,17 @@ static void wait_for_dump_helpers(struct file *file)
50314 pipe = file->f_path.dentry->d_inode->i_pipe;
50315
50316 pipe_lock(pipe);
50317- pipe->readers++;
50318- pipe->writers--;
50319+ atomic_inc(&pipe->readers);
50320+ atomic_dec(&pipe->writers);
50321
50322- while ((pipe->readers > 1) && (!signal_pending(current))) {
50323+ while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
50324 wake_up_interruptible_sync(&pipe->wait);
50325 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
50326 pipe_wait(pipe);
50327 }
50328
50329- pipe->readers--;
50330- pipe->writers++;
50331+ atomic_dec(&pipe->readers);
50332+ atomic_inc(&pipe->writers);
50333 pipe_unlock(pipe);
50334
50335 }
50336@@ -471,7 +471,8 @@ void do_coredump(siginfo_t *siginfo)
50337 int ispipe;
50338 struct files_struct *displaced;
50339 bool need_nonrelative = false;
50340- static atomic_t core_dump_count = ATOMIC_INIT(0);
50341+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
50342+ long signr = siginfo->si_signo;
50343 struct coredump_params cprm = {
50344 .siginfo = siginfo,
50345 .regs = signal_pt_regs(),
50346@@ -484,7 +485,10 @@ void do_coredump(siginfo_t *siginfo)
50347 .mm_flags = mm->flags,
50348 };
50349
50350- audit_core_dumps(siginfo->si_signo);
50351+ audit_core_dumps(signr);
50352+
50353+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
50354+ gr_handle_brute_attach(cprm.mm_flags);
50355
50356 binfmt = mm->binfmt;
50357 if (!binfmt || !binfmt->core_dump)
50358@@ -508,7 +512,7 @@ void do_coredump(siginfo_t *siginfo)
50359 need_nonrelative = true;
50360 }
50361
50362- retval = coredump_wait(siginfo->si_signo, &core_state);
50363+ retval = coredump_wait(signr, &core_state);
50364 if (retval < 0)
50365 goto fail_creds;
50366
50367@@ -556,7 +560,7 @@ void do_coredump(siginfo_t *siginfo)
50368 }
50369 cprm.limit = RLIM_INFINITY;
50370
50371- dump_count = atomic_inc_return(&core_dump_count);
50372+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
50373 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
50374 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
50375 task_tgid_vnr(current), current->comm);
50376@@ -583,6 +587,8 @@ void do_coredump(siginfo_t *siginfo)
50377 } else {
50378 struct inode *inode;
50379
50380+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
50381+
50382 if (cprm.limit < binfmt->min_coredump)
50383 goto fail_unlock;
50384
50385@@ -640,7 +646,7 @@ close_fail:
50386 filp_close(cprm.file, NULL);
50387 fail_dropcount:
50388 if (ispipe)
50389- atomic_dec(&core_dump_count);
50390+ atomic_dec_unchecked(&core_dump_count);
50391 fail_unlock:
50392 kfree(cn.corename);
50393 fail_corename:
50394@@ -659,7 +665,7 @@ fail:
50395 */
50396 int dump_write(struct file *file, const void *addr, int nr)
50397 {
50398- return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
50399+ return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
50400 }
50401 EXPORT_SYMBOL(dump_write);
50402
50403diff --git a/fs/dcache.c b/fs/dcache.c
50404index c3bbf85..5b71101 100644
50405--- a/fs/dcache.c
50406+++ b/fs/dcache.c
50407@@ -3139,7 +3139,7 @@ void __init vfs_caches_init(unsigned long mempages)
50408 mempages -= reserve;
50409
50410 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
50411- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
50412+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
50413
50414 dcache_init();
50415 inode_init();
50416diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
50417index a5f12b7..4ee8a6f 100644
50418--- a/fs/debugfs/inode.c
50419+++ b/fs/debugfs/inode.c
50420@@ -415,7 +415,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
50421 */
50422 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
50423 {
50424+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
50425+ return __create_file(name, S_IFDIR | S_IRWXU,
50426+#else
50427 return __create_file(name, S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
50428+#endif
50429 parent, NULL, NULL);
50430 }
50431 EXPORT_SYMBOL_GPL(debugfs_create_dir);
50432diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
50433index cc7709e..7e7211f 100644
50434--- a/fs/ecryptfs/inode.c
50435+++ b/fs/ecryptfs/inode.c
50436@@ -674,7 +674,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
50437 old_fs = get_fs();
50438 set_fs(get_ds());
50439 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
50440- (char __user *)lower_buf,
50441+ (char __force_user *)lower_buf,
50442 PATH_MAX);
50443 set_fs(old_fs);
50444 if (rc < 0)
50445@@ -706,7 +706,7 @@ out:
50446 static void
50447 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
50448 {
50449- char *buf = nd_get_link(nd);
50450+ const char *buf = nd_get_link(nd);
50451 if (!IS_ERR(buf)) {
50452 /* Free the char* */
50453 kfree(buf);
50454diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
50455index 412e6ed..4292d22 100644
50456--- a/fs/ecryptfs/miscdev.c
50457+++ b/fs/ecryptfs/miscdev.c
50458@@ -315,7 +315,7 @@ check_list:
50459 goto out_unlock_msg_ctx;
50460 i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
50461 if (msg_ctx->msg) {
50462- if (copy_to_user(&buf[i], packet_length, packet_length_size))
50463+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
50464 goto out_unlock_msg_ctx;
50465 i += packet_length_size;
50466 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
50467diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
50468index b2a34a1..162fa69 100644
50469--- a/fs/ecryptfs/read_write.c
50470+++ b/fs/ecryptfs/read_write.c
50471@@ -48,7 +48,7 @@ int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data,
50472 return -EIO;
50473 fs_save = get_fs();
50474 set_fs(get_ds());
50475- rc = vfs_write(lower_file, data, size, &offset);
50476+ rc = vfs_write(lower_file, (const char __force_user *)data, size, &offset);
50477 set_fs(fs_save);
50478 mark_inode_dirty_sync(ecryptfs_inode);
50479 return rc;
50480@@ -244,7 +244,7 @@ int ecryptfs_read_lower(char *data, loff_t offset, size_t size,
50481 return -EIO;
50482 fs_save = get_fs();
50483 set_fs(get_ds());
50484- rc = vfs_read(lower_file, data, size, &offset);
50485+ rc = vfs_read(lower_file, (char __force_user *)data, size, &offset);
50486 set_fs(fs_save);
50487 return rc;
50488 }
50489diff --git a/fs/exec.c b/fs/exec.c
50490index 20df02c..1b1d946 100644
50491--- a/fs/exec.c
50492+++ b/fs/exec.c
50493@@ -55,6 +55,17 @@
50494 #include <linux/pipe_fs_i.h>
50495 #include <linux/oom.h>
50496 #include <linux/compat.h>
50497+#include <linux/random.h>
50498+#include <linux/seq_file.h>
50499+#include <linux/coredump.h>
50500+#include <linux/mman.h>
50501+
50502+#ifdef CONFIG_PAX_REFCOUNT
50503+#include <linux/kallsyms.h>
50504+#include <linux/kdebug.h>
50505+#endif
50506+
50507+#include <trace/events/fs.h>
50508
50509 #include <asm/uaccess.h>
50510 #include <asm/mmu_context.h>
50511@@ -66,6 +77,18 @@
50512
50513 #include <trace/events/sched.h>
50514
50515+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
50516+void __weak pax_set_initial_flags(struct linux_binprm *bprm)
50517+{
50518+ pr_warn_once("PAX: PAX_HAVE_ACL_FLAGS was enabled without providing the pax_set_initial_flags callback, this is probably not what you wanted.\n");
50519+}
50520+#endif
50521+
50522+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
50523+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
50524+EXPORT_SYMBOL(pax_set_initial_flags_func);
50525+#endif
50526+
50527 int suid_dumpable = 0;
50528
50529 static LIST_HEAD(formats);
50530@@ -75,8 +98,8 @@ void __register_binfmt(struct linux_binfmt * fmt, int insert)
50531 {
50532 BUG_ON(!fmt);
50533 write_lock(&binfmt_lock);
50534- insert ? list_add(&fmt->lh, &formats) :
50535- list_add_tail(&fmt->lh, &formats);
50536+ insert ? pax_list_add((struct list_head *)&fmt->lh, &formats) :
50537+ pax_list_add_tail((struct list_head *)&fmt->lh, &formats);
50538 write_unlock(&binfmt_lock);
50539 }
50540
50541@@ -85,7 +108,7 @@ EXPORT_SYMBOL(__register_binfmt);
50542 void unregister_binfmt(struct linux_binfmt * fmt)
50543 {
50544 write_lock(&binfmt_lock);
50545- list_del(&fmt->lh);
50546+ pax_list_del((struct list_head *)&fmt->lh);
50547 write_unlock(&binfmt_lock);
50548 }
50549
50550@@ -180,18 +203,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
50551 int write)
50552 {
50553 struct page *page;
50554- int ret;
50555
50556-#ifdef CONFIG_STACK_GROWSUP
50557- if (write) {
50558- ret = expand_downwards(bprm->vma, pos);
50559- if (ret < 0)
50560- return NULL;
50561- }
50562-#endif
50563- ret = get_user_pages(current, bprm->mm, pos,
50564- 1, write, 1, &page, NULL);
50565- if (ret <= 0)
50566+ if (0 > expand_downwards(bprm->vma, pos))
50567+ return NULL;
50568+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
50569 return NULL;
50570
50571 if (write) {
50572@@ -207,6 +222,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
50573 if (size <= ARG_MAX)
50574 return page;
50575
50576+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
50577+ // only allow 512KB for argv+env on suid/sgid binaries
50578+ // to prevent easy ASLR exhaustion
50579+ if (((!uid_eq(bprm->cred->euid, current_euid())) ||
50580+ (!gid_eq(bprm->cred->egid, current_egid()))) &&
50581+ (size > (512 * 1024))) {
50582+ put_page(page);
50583+ return NULL;
50584+ }
50585+#endif
50586+
50587 /*
50588 * Limit to 1/4-th the stack size for the argv+env strings.
50589 * This ensures that:
50590@@ -266,6 +292,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
50591 vma->vm_end = STACK_TOP_MAX;
50592 vma->vm_start = vma->vm_end - PAGE_SIZE;
50593 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
50594+
50595+#ifdef CONFIG_PAX_SEGMEXEC
50596+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
50597+#endif
50598+
50599 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
50600 INIT_LIST_HEAD(&vma->anon_vma_chain);
50601
50602@@ -276,6 +307,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
50603 mm->stack_vm = mm->total_vm = 1;
50604 up_write(&mm->mmap_sem);
50605 bprm->p = vma->vm_end - sizeof(void *);
50606+
50607+#ifdef CONFIG_PAX_RANDUSTACK
50608+ if (randomize_va_space)
50609+ bprm->p ^= random32() & ~PAGE_MASK;
50610+#endif
50611+
50612 return 0;
50613 err:
50614 up_write(&mm->mmap_sem);
50615@@ -396,7 +433,7 @@ struct user_arg_ptr {
50616 } ptr;
50617 };
50618
50619-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
50620+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
50621 {
50622 const char __user *native;
50623
50624@@ -405,14 +442,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
50625 compat_uptr_t compat;
50626
50627 if (get_user(compat, argv.ptr.compat + nr))
50628- return ERR_PTR(-EFAULT);
50629+ return (const char __force_user *)ERR_PTR(-EFAULT);
50630
50631 return compat_ptr(compat);
50632 }
50633 #endif
50634
50635 if (get_user(native, argv.ptr.native + nr))
50636- return ERR_PTR(-EFAULT);
50637+ return (const char __force_user *)ERR_PTR(-EFAULT);
50638
50639 return native;
50640 }
50641@@ -431,7 +468,7 @@ static int count(struct user_arg_ptr argv, int max)
50642 if (!p)
50643 break;
50644
50645- if (IS_ERR(p))
50646+ if (IS_ERR((const char __force_kernel *)p))
50647 return -EFAULT;
50648
50649 if (i >= max)
50650@@ -466,7 +503,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
50651
50652 ret = -EFAULT;
50653 str = get_user_arg_ptr(argv, argc);
50654- if (IS_ERR(str))
50655+ if (IS_ERR((const char __force_kernel *)str))
50656 goto out;
50657
50658 len = strnlen_user(str, MAX_ARG_STRLEN);
50659@@ -548,7 +585,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
50660 int r;
50661 mm_segment_t oldfs = get_fs();
50662 struct user_arg_ptr argv = {
50663- .ptr.native = (const char __user *const __user *)__argv,
50664+ .ptr.native = (const char __force_user *const __force_user *)__argv,
50665 };
50666
50667 set_fs(KERNEL_DS);
50668@@ -583,7 +620,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
50669 unsigned long new_end = old_end - shift;
50670 struct mmu_gather tlb;
50671
50672- BUG_ON(new_start > new_end);
50673+ if (new_start >= new_end || new_start < mmap_min_addr)
50674+ return -ENOMEM;
50675
50676 /*
50677 * ensure there are no vmas between where we want to go
50678@@ -592,6 +630,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
50679 if (vma != find_vma(mm, new_start))
50680 return -EFAULT;
50681
50682+#ifdef CONFIG_PAX_SEGMEXEC
50683+ BUG_ON(pax_find_mirror_vma(vma));
50684+#endif
50685+
50686 /*
50687 * cover the whole range: [new_start, old_end)
50688 */
50689@@ -672,10 +714,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
50690 stack_top = arch_align_stack(stack_top);
50691 stack_top = PAGE_ALIGN(stack_top);
50692
50693- if (unlikely(stack_top < mmap_min_addr) ||
50694- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
50695- return -ENOMEM;
50696-
50697 stack_shift = vma->vm_end - stack_top;
50698
50699 bprm->p -= stack_shift;
50700@@ -687,8 +725,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
50701 bprm->exec -= stack_shift;
50702
50703 down_write(&mm->mmap_sem);
50704+
50705+ /* Move stack pages down in memory. */
50706+ if (stack_shift) {
50707+ ret = shift_arg_pages(vma, stack_shift);
50708+ if (ret)
50709+ goto out_unlock;
50710+ }
50711+
50712 vm_flags = VM_STACK_FLAGS;
50713
50714+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
50715+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
50716+ vm_flags &= ~VM_EXEC;
50717+
50718+#ifdef CONFIG_PAX_MPROTECT
50719+ if (mm->pax_flags & MF_PAX_MPROTECT)
50720+ vm_flags &= ~VM_MAYEXEC;
50721+#endif
50722+
50723+ }
50724+#endif
50725+
50726 /*
50727 * Adjust stack execute permissions; explicitly enable for
50728 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
50729@@ -707,13 +765,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
50730 goto out_unlock;
50731 BUG_ON(prev != vma);
50732
50733- /* Move stack pages down in memory. */
50734- if (stack_shift) {
50735- ret = shift_arg_pages(vma, stack_shift);
50736- if (ret)
50737- goto out_unlock;
50738- }
50739-
50740 /* mprotect_fixup is overkill to remove the temporary stack flags */
50741 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
50742
50743@@ -737,6 +788,27 @@ int setup_arg_pages(struct linux_binprm *bprm,
50744 #endif
50745 current->mm->start_stack = bprm->p;
50746 ret = expand_stack(vma, stack_base);
50747+
50748+#if !defined(CONFIG_STACK_GROWSUP) && defined(CONFIG_PAX_RANDMMAP)
50749+ if (!ret && (mm->pax_flags & MF_PAX_RANDMMAP) && STACK_TOP <= 0xFFFFFFFFU && STACK_TOP > vma->vm_end) {
50750+ unsigned long size, flags, vm_flags;
50751+
50752+ size = STACK_TOP - vma->vm_end;
50753+ flags = MAP_FIXED | MAP_PRIVATE;
50754+ vm_flags = VM_NONE | VM_DONTEXPAND | VM_DONTDUMP;
50755+
50756+ ret = vma->vm_end != mmap_region(NULL, vma->vm_end, size, flags, vm_flags, 0);
50757+
50758+#ifdef CONFIG_X86
50759+ if (!ret) {
50760+ size = mmap_min_addr + ((mm->delta_mmap ^ mm->delta_stack) & (0xFFUL << PAGE_SHIFT));
50761+ ret = 0 != mmap_region(NULL, 0, PAGE_ALIGN(size), flags, vm_flags, 0);
50762+ }
50763+#endif
50764+
50765+ }
50766+#endif
50767+
50768 if (ret)
50769 ret = -EFAULT;
50770
50771@@ -772,6 +844,8 @@ struct file *open_exec(const char *name)
50772
50773 fsnotify_open(file);
50774
50775+ trace_open_exec(name);
50776+
50777 err = deny_write_access(file);
50778 if (err)
50779 goto exit;
50780@@ -795,7 +869,7 @@ int kernel_read(struct file *file, loff_t offset,
50781 old_fs = get_fs();
50782 set_fs(get_ds());
50783 /* The cast to a user pointer is valid due to the set_fs() */
50784- result = vfs_read(file, (void __user *)addr, count, &pos);
50785+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
50786 set_fs(old_fs);
50787 return result;
50788 }
50789@@ -1247,7 +1321,7 @@ static int check_unsafe_exec(struct linux_binprm *bprm)
50790 }
50791 rcu_read_unlock();
50792
50793- if (p->fs->users > n_fs) {
50794+ if (atomic_read(&p->fs->users) > n_fs) {
50795 bprm->unsafe |= LSM_UNSAFE_SHARE;
50796 } else {
50797 res = -EAGAIN;
50798@@ -1447,6 +1521,31 @@ int search_binary_handler(struct linux_binprm *bprm)
50799
50800 EXPORT_SYMBOL(search_binary_handler);
50801
50802+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
50803+static DEFINE_PER_CPU(u64, exec_counter);
50804+static int __init init_exec_counters(void)
50805+{
50806+ unsigned int cpu;
50807+
50808+ for_each_possible_cpu(cpu) {
50809+ per_cpu(exec_counter, cpu) = (u64)cpu;
50810+ }
50811+
50812+ return 0;
50813+}
50814+early_initcall(init_exec_counters);
50815+static inline void increment_exec_counter(void)
50816+{
50817+ BUILD_BUG_ON(NR_CPUS > (1 << 16));
50818+ current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
50819+}
50820+#else
50821+static inline void increment_exec_counter(void) {}
50822+#endif
50823+
50824+extern void gr_handle_exec_args(struct linux_binprm *bprm,
50825+ struct user_arg_ptr argv);
50826+
50827 /*
50828 * sys_execve() executes a new program.
50829 */
50830@@ -1454,6 +1553,11 @@ static int do_execve_common(const char *filename,
50831 struct user_arg_ptr argv,
50832 struct user_arg_ptr envp)
50833 {
50834+#ifdef CONFIG_GRKERNSEC
50835+ struct file *old_exec_file;
50836+ struct acl_subject_label *old_acl;
50837+ struct rlimit old_rlim[RLIM_NLIMITS];
50838+#endif
50839 struct linux_binprm *bprm;
50840 struct file *file;
50841 struct files_struct *displaced;
50842@@ -1461,6 +1565,8 @@ static int do_execve_common(const char *filename,
50843 int retval;
50844 const struct cred *cred = current_cred();
50845
50846+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
50847+
50848 /*
50849 * We move the actual failure in case of RLIMIT_NPROC excess from
50850 * set*uid() to execve() because too many poorly written programs
50851@@ -1501,12 +1607,27 @@ static int do_execve_common(const char *filename,
50852 if (IS_ERR(file))
50853 goto out_unmark;
50854
50855+ if (gr_ptrace_readexec(file, bprm->unsafe)) {
50856+ retval = -EPERM;
50857+ goto out_file;
50858+ }
50859+
50860 sched_exec();
50861
50862 bprm->file = file;
50863 bprm->filename = filename;
50864 bprm->interp = filename;
50865
50866+ if (gr_process_user_ban()) {
50867+ retval = -EPERM;
50868+ goto out_file;
50869+ }
50870+
50871+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
50872+ retval = -EACCES;
50873+ goto out_file;
50874+ }
50875+
50876 retval = bprm_mm_init(bprm);
50877 if (retval)
50878 goto out_file;
50879@@ -1523,24 +1644,65 @@ static int do_execve_common(const char *filename,
50880 if (retval < 0)
50881 goto out;
50882
50883+#ifdef CONFIG_GRKERNSEC
50884+ old_acl = current->acl;
50885+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
50886+ old_exec_file = current->exec_file;
50887+ get_file(file);
50888+ current->exec_file = file;
50889+#endif
50890+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
50891+ /* limit suid stack to 8MB
50892+ * we saved the old limits above and will restore them if this exec fails
50893+ */
50894+ if (((!uid_eq(bprm->cred->euid, current_euid())) || (!gid_eq(bprm->cred->egid, current_egid()))) &&
50895+ (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
50896+ current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
50897+#endif
50898+
50899+ if (!gr_tpe_allow(file)) {
50900+ retval = -EACCES;
50901+ goto out_fail;
50902+ }
50903+
50904+ if (gr_check_crash_exec(file)) {
50905+ retval = -EACCES;
50906+ goto out_fail;
50907+ }
50908+
50909+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
50910+ bprm->unsafe);
50911+ if (retval < 0)
50912+ goto out_fail;
50913+
50914 retval = copy_strings_kernel(1, &bprm->filename, bprm);
50915 if (retval < 0)
50916- goto out;
50917+ goto out_fail;
50918
50919 bprm->exec = bprm->p;
50920 retval = copy_strings(bprm->envc, envp, bprm);
50921 if (retval < 0)
50922- goto out;
50923+ goto out_fail;
50924
50925 retval = copy_strings(bprm->argc, argv, bprm);
50926 if (retval < 0)
50927- goto out;
50928+ goto out_fail;
50929+
50930+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
50931+
50932+ gr_handle_exec_args(bprm, argv);
50933
50934 retval = search_binary_handler(bprm);
50935 if (retval < 0)
50936- goto out;
50937+ goto out_fail;
50938+#ifdef CONFIG_GRKERNSEC
50939+ if (old_exec_file)
50940+ fput(old_exec_file);
50941+#endif
50942
50943 /* execve succeeded */
50944+
50945+ increment_exec_counter();
50946 current->fs->in_exec = 0;
50947 current->in_execve = 0;
50948 acct_update_integrals(current);
50949@@ -1549,6 +1711,14 @@ static int do_execve_common(const char *filename,
50950 put_files_struct(displaced);
50951 return retval;
50952
50953+out_fail:
50954+#ifdef CONFIG_GRKERNSEC
50955+ current->acl = old_acl;
50956+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
50957+ fput(current->exec_file);
50958+ current->exec_file = old_exec_file;
50959+#endif
50960+
50961 out:
50962 if (bprm->mm) {
50963 acct_arg_size(bprm, 0);
50964@@ -1697,3 +1867,253 @@ asmlinkage long compat_sys_execve(const char __user * filename,
50965 return error;
50966 }
50967 #endif
50968+
50969+int pax_check_flags(unsigned long *flags)
50970+{
50971+ int retval = 0;
50972+
50973+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
50974+ if (*flags & MF_PAX_SEGMEXEC)
50975+ {
50976+ *flags &= ~MF_PAX_SEGMEXEC;
50977+ retval = -EINVAL;
50978+ }
50979+#endif
50980+
50981+ if ((*flags & MF_PAX_PAGEEXEC)
50982+
50983+#ifdef CONFIG_PAX_PAGEEXEC
50984+ && (*flags & MF_PAX_SEGMEXEC)
50985+#endif
50986+
50987+ )
50988+ {
50989+ *flags &= ~MF_PAX_PAGEEXEC;
50990+ retval = -EINVAL;
50991+ }
50992+
50993+ if ((*flags & MF_PAX_MPROTECT)
50994+
50995+#ifdef CONFIG_PAX_MPROTECT
50996+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
50997+#endif
50998+
50999+ )
51000+ {
51001+ *flags &= ~MF_PAX_MPROTECT;
51002+ retval = -EINVAL;
51003+ }
51004+
51005+ if ((*flags & MF_PAX_EMUTRAMP)
51006+
51007+#ifdef CONFIG_PAX_EMUTRAMP
51008+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
51009+#endif
51010+
51011+ )
51012+ {
51013+ *flags &= ~MF_PAX_EMUTRAMP;
51014+ retval = -EINVAL;
51015+ }
51016+
51017+ return retval;
51018+}
51019+
51020+EXPORT_SYMBOL(pax_check_flags);
51021+
51022+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
51023+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
51024+{
51025+ struct task_struct *tsk = current;
51026+ struct mm_struct *mm = current->mm;
51027+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
51028+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
51029+ char *path_exec = NULL;
51030+ char *path_fault = NULL;
51031+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
51032+ siginfo_t info = { };
51033+
51034+ if (buffer_exec && buffer_fault) {
51035+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
51036+
51037+ down_read(&mm->mmap_sem);
51038+ vma = mm->mmap;
51039+ while (vma && (!vma_exec || !vma_fault)) {
51040+ if (vma->vm_file && mm->exe_file == vma->vm_file && (vma->vm_flags & VM_EXEC))
51041+ vma_exec = vma;
51042+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
51043+ vma_fault = vma;
51044+ vma = vma->vm_next;
51045+ }
51046+ if (vma_exec) {
51047+ path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
51048+ if (IS_ERR(path_exec))
51049+ path_exec = "<path too long>";
51050+ else {
51051+ path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
51052+ if (path_exec) {
51053+ *path_exec = 0;
51054+ path_exec = buffer_exec;
51055+ } else
51056+ path_exec = "<path too long>";
51057+ }
51058+ }
51059+ if (vma_fault) {
51060+ start = vma_fault->vm_start;
51061+ end = vma_fault->vm_end;
51062+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
51063+ if (vma_fault->vm_file) {
51064+ path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
51065+ if (IS_ERR(path_fault))
51066+ path_fault = "<path too long>";
51067+ else {
51068+ path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
51069+ if (path_fault) {
51070+ *path_fault = 0;
51071+ path_fault = buffer_fault;
51072+ } else
51073+ path_fault = "<path too long>";
51074+ }
51075+ } else
51076+ path_fault = "<anonymous mapping>";
51077+ }
51078+ up_read(&mm->mmap_sem);
51079+ }
51080+ if (tsk->signal->curr_ip)
51081+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
51082+ else
51083+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
51084+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
51085+ from_kuid_munged(&init_user_ns, task_uid(tsk)), from_kuid_munged(&init_user_ns, task_euid(tsk)), pc, sp);
51086+ free_page((unsigned long)buffer_exec);
51087+ free_page((unsigned long)buffer_fault);
51088+ pax_report_insns(regs, pc, sp);
51089+ info.si_signo = SIGKILL;
51090+ info.si_errno = 0;
51091+ info.si_code = SI_KERNEL;
51092+ info.si_pid = 0;
51093+ info.si_uid = 0;
51094+ do_coredump(&info);
51095+}
51096+#endif
51097+
51098+#ifdef CONFIG_PAX_REFCOUNT
51099+void pax_report_refcount_overflow(struct pt_regs *regs)
51100+{
51101+ if (current->signal->curr_ip)
51102+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
51103+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
51104+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
51105+ else
51106+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n", current->comm, task_pid_nr(current),
51107+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
51108+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
51109+ show_regs(regs);
51110+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
51111+}
51112+#endif
51113+
51114+#ifdef CONFIG_PAX_USERCOPY
51115+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
51116+static noinline int check_stack_object(const void *obj, unsigned long len)
51117+{
51118+ const void * const stack = task_stack_page(current);
51119+ const void * const stackend = stack + THREAD_SIZE;
51120+
51121+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
51122+ const void *frame = NULL;
51123+ const void *oldframe;
51124+#endif
51125+
51126+ if (obj + len < obj)
51127+ return -1;
51128+
51129+ if (obj + len <= stack || stackend <= obj)
51130+ return 0;
51131+
51132+ if (obj < stack || stackend < obj + len)
51133+ return -1;
51134+
51135+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
51136+ oldframe = __builtin_frame_address(1);
51137+ if (oldframe)
51138+ frame = __builtin_frame_address(2);
51139+ /*
51140+ low ----------------------------------------------> high
51141+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
51142+ ^----------------^
51143+ allow copies only within here
51144+ */
51145+ while (stack <= frame && frame < stackend) {
51146+ /* if obj + len extends past the last frame, this
51147+ check won't pass and the next frame will be 0,
51148+ causing us to bail out and correctly report
51149+ the copy as invalid
51150+ */
51151+ if (obj + len <= frame)
51152+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
51153+ oldframe = frame;
51154+ frame = *(const void * const *)frame;
51155+ }
51156+ return -1;
51157+#else
51158+ return 1;
51159+#endif
51160+}
51161+
51162+static __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to_user, const char *type)
51163+{
51164+ if (current->signal->curr_ip)
51165+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
51166+ &current->signal->curr_ip, to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
51167+ else
51168+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
51169+ to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
51170+ dump_stack();
51171+ gr_handle_kernel_exploit();
51172+ do_group_exit(SIGKILL);
51173+}
51174+#endif
51175+
51176+void __check_object_size(const void *ptr, unsigned long n, bool to_user)
51177+{
51178+
51179+#ifdef CONFIG_PAX_USERCOPY
51180+ const char *type;
51181+
51182+ if (!n)
51183+ return;
51184+
51185+ type = check_heap_object(ptr, n);
51186+ if (!type) {
51187+ if (check_stack_object(ptr, n) != -1)
51188+ return;
51189+ type = "<process stack>";
51190+ }
51191+
51192+ pax_report_usercopy(ptr, n, to_user, type);
51193+#endif
51194+
51195+}
51196+EXPORT_SYMBOL(__check_object_size);
51197+
51198+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
51199+void pax_track_stack(void)
51200+{
51201+ unsigned long sp = (unsigned long)&sp;
51202+ if (sp < current_thread_info()->lowest_stack &&
51203+ sp > (unsigned long)task_stack_page(current))
51204+ current_thread_info()->lowest_stack = sp;
51205+}
51206+EXPORT_SYMBOL(pax_track_stack);
51207+#endif
51208+
51209+#ifdef CONFIG_PAX_SIZE_OVERFLOW
51210+void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name)
51211+{
51212+ printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u %s", func, file, line, ssa_name);
51213+ dump_stack();
51214+ do_group_exit(SIGKILL);
51215+}
51216+EXPORT_SYMBOL(report_size_overflow);
51217+#endif
51218diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
51219index 2616d0e..2ffdec9 100644
51220--- a/fs/ext2/balloc.c
51221+++ b/fs/ext2/balloc.c
51222@@ -1190,10 +1190,10 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
51223
51224 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
51225 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
51226- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
51227+ if (free_blocks < root_blocks + 1 &&
51228 !uid_eq(sbi->s_resuid, current_fsuid()) &&
51229 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
51230- !in_group_p (sbi->s_resgid))) {
51231+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
51232 return 0;
51233 }
51234 return 1;
51235diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
51236index 22548f5..41521d8 100644
51237--- a/fs/ext3/balloc.c
51238+++ b/fs/ext3/balloc.c
51239@@ -1438,10 +1438,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
51240
51241 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
51242 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
51243- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
51244+ if (free_blocks < root_blocks + 1 &&
51245 !use_reservation && !uid_eq(sbi->s_resuid, current_fsuid()) &&
51246 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
51247- !in_group_p (sbi->s_resgid))) {
51248+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
51249 return 0;
51250 }
51251 return 1;
51252diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
51253index 92e68b3..115d987 100644
51254--- a/fs/ext4/balloc.c
51255+++ b/fs/ext4/balloc.c
51256@@ -505,8 +505,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
51257 /* Hm, nope. Are (enough) root reserved clusters available? */
51258 if (uid_eq(sbi->s_resuid, current_fsuid()) ||
51259 (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) ||
51260- capable(CAP_SYS_RESOURCE) ||
51261- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
51262+ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
51263+ capable_nolog(CAP_SYS_RESOURCE)) {
51264
51265 if (free_clusters >= (nclusters + dirty_clusters))
51266 return 1;
51267diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
51268index bbcd6a0..2824592 100644
51269--- a/fs/ext4/ext4.h
51270+++ b/fs/ext4/ext4.h
51271@@ -1265,19 +1265,19 @@ struct ext4_sb_info {
51272 unsigned long s_mb_last_start;
51273
51274 /* stats for buddy allocator */
51275- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
51276- atomic_t s_bal_success; /* we found long enough chunks */
51277- atomic_t s_bal_allocated; /* in blocks */
51278- atomic_t s_bal_ex_scanned; /* total extents scanned */
51279- atomic_t s_bal_goals; /* goal hits */
51280- atomic_t s_bal_breaks; /* too long searches */
51281- atomic_t s_bal_2orders; /* 2^order hits */
51282+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
51283+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
51284+ atomic_unchecked_t s_bal_allocated; /* in blocks */
51285+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
51286+ atomic_unchecked_t s_bal_goals; /* goal hits */
51287+ atomic_unchecked_t s_bal_breaks; /* too long searches */
51288+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
51289 spinlock_t s_bal_lock;
51290 unsigned long s_mb_buddies_generated;
51291 unsigned long long s_mb_generation_time;
51292- atomic_t s_mb_lost_chunks;
51293- atomic_t s_mb_preallocated;
51294- atomic_t s_mb_discarded;
51295+ atomic_unchecked_t s_mb_lost_chunks;
51296+ atomic_unchecked_t s_mb_preallocated;
51297+ atomic_unchecked_t s_mb_discarded;
51298 atomic_t s_lock_busy;
51299
51300 /* locality groups */
51301diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
51302index 82f8c2d..ce7c889 100644
51303--- a/fs/ext4/mballoc.c
51304+++ b/fs/ext4/mballoc.c
51305@@ -1747,7 +1747,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
51306 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
51307
51308 if (EXT4_SB(sb)->s_mb_stats)
51309- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
51310+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
51311
51312 break;
51313 }
51314@@ -2044,7 +2044,7 @@ repeat:
51315 ac->ac_status = AC_STATUS_CONTINUE;
51316 ac->ac_flags |= EXT4_MB_HINT_FIRST;
51317 cr = 3;
51318- atomic_inc(&sbi->s_mb_lost_chunks);
51319+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
51320 goto repeat;
51321 }
51322 }
51323@@ -2552,25 +2552,25 @@ int ext4_mb_release(struct super_block *sb)
51324 if (sbi->s_mb_stats) {
51325 ext4_msg(sb, KERN_INFO,
51326 "mballoc: %u blocks %u reqs (%u success)",
51327- atomic_read(&sbi->s_bal_allocated),
51328- atomic_read(&sbi->s_bal_reqs),
51329- atomic_read(&sbi->s_bal_success));
51330+ atomic_read_unchecked(&sbi->s_bal_allocated),
51331+ atomic_read_unchecked(&sbi->s_bal_reqs),
51332+ atomic_read_unchecked(&sbi->s_bal_success));
51333 ext4_msg(sb, KERN_INFO,
51334 "mballoc: %u extents scanned, %u goal hits, "
51335 "%u 2^N hits, %u breaks, %u lost",
51336- atomic_read(&sbi->s_bal_ex_scanned),
51337- atomic_read(&sbi->s_bal_goals),
51338- atomic_read(&sbi->s_bal_2orders),
51339- atomic_read(&sbi->s_bal_breaks),
51340- atomic_read(&sbi->s_mb_lost_chunks));
51341+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
51342+ atomic_read_unchecked(&sbi->s_bal_goals),
51343+ atomic_read_unchecked(&sbi->s_bal_2orders),
51344+ atomic_read_unchecked(&sbi->s_bal_breaks),
51345+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
51346 ext4_msg(sb, KERN_INFO,
51347 "mballoc: %lu generated and it took %Lu",
51348 sbi->s_mb_buddies_generated,
51349 sbi->s_mb_generation_time);
51350 ext4_msg(sb, KERN_INFO,
51351 "mballoc: %u preallocated, %u discarded",
51352- atomic_read(&sbi->s_mb_preallocated),
51353- atomic_read(&sbi->s_mb_discarded));
51354+ atomic_read_unchecked(&sbi->s_mb_preallocated),
51355+ atomic_read_unchecked(&sbi->s_mb_discarded));
51356 }
51357
51358 free_percpu(sbi->s_locality_groups);
51359@@ -3060,16 +3060,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
51360 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
51361
51362 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
51363- atomic_inc(&sbi->s_bal_reqs);
51364- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
51365+ atomic_inc_unchecked(&sbi->s_bal_reqs);
51366+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
51367 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
51368- atomic_inc(&sbi->s_bal_success);
51369- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
51370+ atomic_inc_unchecked(&sbi->s_bal_success);
51371+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
51372 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
51373 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
51374- atomic_inc(&sbi->s_bal_goals);
51375+ atomic_inc_unchecked(&sbi->s_bal_goals);
51376 if (ac->ac_found > sbi->s_mb_max_to_scan)
51377- atomic_inc(&sbi->s_bal_breaks);
51378+ atomic_inc_unchecked(&sbi->s_bal_breaks);
51379 }
51380
51381 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
51382@@ -3469,7 +3469,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
51383 trace_ext4_mb_new_inode_pa(ac, pa);
51384
51385 ext4_mb_use_inode_pa(ac, pa);
51386- atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
51387+ atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
51388
51389 ei = EXT4_I(ac->ac_inode);
51390 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
51391@@ -3529,7 +3529,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
51392 trace_ext4_mb_new_group_pa(ac, pa);
51393
51394 ext4_mb_use_group_pa(ac, pa);
51395- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
51396+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
51397
51398 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
51399 lg = ac->ac_lg;
51400@@ -3618,7 +3618,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
51401 * from the bitmap and continue.
51402 */
51403 }
51404- atomic_add(free, &sbi->s_mb_discarded);
51405+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
51406
51407 return err;
51408 }
51409@@ -3636,7 +3636,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
51410 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
51411 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
51412 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
51413- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
51414+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
51415 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
51416
51417 return 0;
51418diff --git a/fs/ext4/super.c b/fs/ext4/super.c
51419index 24c767d..893aa55 100644
51420--- a/fs/ext4/super.c
51421+++ b/fs/ext4/super.c
51422@@ -2429,7 +2429,7 @@ struct ext4_attr {
51423 ssize_t (*store)(struct ext4_attr *, struct ext4_sb_info *,
51424 const char *, size_t);
51425 int offset;
51426-};
51427+} __do_const;
51428
51429 static int parse_strtoul(const char *buf,
51430 unsigned long max, unsigned long *value)
51431diff --git a/fs/fcntl.c b/fs/fcntl.c
51432index 71a600a..20d87b1 100644
51433--- a/fs/fcntl.c
51434+++ b/fs/fcntl.c
51435@@ -107,6 +107,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
51436 if (err)
51437 return err;
51438
51439+ if (gr_handle_chroot_fowner(pid, type))
51440+ return -ENOENT;
51441+ if (gr_check_protected_task_fowner(pid, type))
51442+ return -EACCES;
51443+
51444 f_modown(filp, pid, type, force);
51445 return 0;
51446 }
51447diff --git a/fs/fhandle.c b/fs/fhandle.c
51448index 999ff5c..41f4109 100644
51449--- a/fs/fhandle.c
51450+++ b/fs/fhandle.c
51451@@ -67,8 +67,7 @@ static long do_sys_name_to_handle(struct path *path,
51452 } else
51453 retval = 0;
51454 /* copy the mount id */
51455- if (copy_to_user(mnt_id, &real_mount(path->mnt)->mnt_id,
51456- sizeof(*mnt_id)) ||
51457+ if (put_user(real_mount(path->mnt)->mnt_id, mnt_id) ||
51458 copy_to_user(ufh, handle,
51459 sizeof(struct file_handle) + handle_bytes))
51460 retval = -EFAULT;
51461diff --git a/fs/fifo.c b/fs/fifo.c
51462index cf6f434..3d7942c 100644
51463--- a/fs/fifo.c
51464+++ b/fs/fifo.c
51465@@ -59,10 +59,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
51466 */
51467 filp->f_op = &read_pipefifo_fops;
51468 pipe->r_counter++;
51469- if (pipe->readers++ == 0)
51470+ if (atomic_inc_return(&pipe->readers) == 1)
51471 wake_up_partner(inode);
51472
51473- if (!pipe->writers) {
51474+ if (!atomic_read(&pipe->writers)) {
51475 if ((filp->f_flags & O_NONBLOCK)) {
51476 /* suppress POLLHUP until we have
51477 * seen a writer */
51478@@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
51479 * errno=ENXIO when there is no process reading the FIFO.
51480 */
51481 ret = -ENXIO;
51482- if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
51483+ if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
51484 goto err;
51485
51486 filp->f_op = &write_pipefifo_fops;
51487 pipe->w_counter++;
51488- if (!pipe->writers++)
51489+ if (atomic_inc_return(&pipe->writers) == 1)
51490 wake_up_partner(inode);
51491
51492- if (!pipe->readers) {
51493+ if (!atomic_read(&pipe->readers)) {
51494 if (wait_for_partner(inode, &pipe->r_counter))
51495 goto err_wr;
51496 }
51497@@ -104,11 +104,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
51498 */
51499 filp->f_op = &rdwr_pipefifo_fops;
51500
51501- pipe->readers++;
51502- pipe->writers++;
51503+ atomic_inc(&pipe->readers);
51504+ atomic_inc(&pipe->writers);
51505 pipe->r_counter++;
51506 pipe->w_counter++;
51507- if (pipe->readers == 1 || pipe->writers == 1)
51508+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
51509 wake_up_partner(inode);
51510 break;
51511
51512@@ -122,19 +122,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
51513 return 0;
51514
51515 err_rd:
51516- if (!--pipe->readers)
51517+ if (atomic_dec_and_test(&pipe->readers))
51518 wake_up_interruptible(&pipe->wait);
51519 ret = -ERESTARTSYS;
51520 goto err;
51521
51522 err_wr:
51523- if (!--pipe->writers)
51524+ if (atomic_dec_and_test(&pipe->writers))
51525 wake_up_interruptible(&pipe->wait);
51526 ret = -ERESTARTSYS;
51527 goto err;
51528
51529 err:
51530- if (!pipe->readers && !pipe->writers)
51531+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
51532 free_pipe_info(inode);
51533
51534 err_nocleanup:
51535diff --git a/fs/file.c b/fs/file.c
51536index 2b3570b..c57924b 100644
51537--- a/fs/file.c
51538+++ b/fs/file.c
51539@@ -16,6 +16,7 @@
51540 #include <linux/slab.h>
51541 #include <linux/vmalloc.h>
51542 #include <linux/file.h>
51543+#include <linux/security.h>
51544 #include <linux/fdtable.h>
51545 #include <linux/bitops.h>
51546 #include <linux/interrupt.h>
51547@@ -892,6 +893,7 @@ int replace_fd(unsigned fd, struct file *file, unsigned flags)
51548 if (!file)
51549 return __close_fd(files, fd);
51550
51551+ gr_learn_resource(current, RLIMIT_NOFILE, fd, 0);
51552 if (fd >= rlimit(RLIMIT_NOFILE))
51553 return -EBADF;
51554
51555@@ -918,6 +920,7 @@ SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
51556 if (unlikely(oldfd == newfd))
51557 return -EINVAL;
51558
51559+ gr_learn_resource(current, RLIMIT_NOFILE, newfd, 0);
51560 if (newfd >= rlimit(RLIMIT_NOFILE))
51561 return -EBADF;
51562
51563@@ -973,6 +976,7 @@ SYSCALL_DEFINE1(dup, unsigned int, fildes)
51564 int f_dupfd(unsigned int from, struct file *file, unsigned flags)
51565 {
51566 int err;
51567+ gr_learn_resource(current, RLIMIT_NOFILE, from, 0);
51568 if (from >= rlimit(RLIMIT_NOFILE))
51569 return -EINVAL;
51570 err = alloc_fd(from, flags);
51571diff --git a/fs/filesystems.c b/fs/filesystems.c
51572index da165f6..3671bdb 100644
51573--- a/fs/filesystems.c
51574+++ b/fs/filesystems.c
51575@@ -273,7 +273,12 @@ struct file_system_type *get_fs_type(const char *name)
51576 int len = dot ? dot - name : strlen(name);
51577
51578 fs = __get_fs_type(name, len);
51579+
51580+#ifdef CONFIG_GRKERNSEC_MODHARDEN
51581+ if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
51582+#else
51583 if (!fs && (request_module("%.*s", len, name) == 0))
51584+#endif
51585 fs = __get_fs_type(name, len);
51586
51587 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
51588diff --git a/fs/fs_struct.c b/fs/fs_struct.c
51589index fe6ca58..65318cf 100644
51590--- a/fs/fs_struct.c
51591+++ b/fs/fs_struct.c
51592@@ -4,6 +4,7 @@
51593 #include <linux/path.h>
51594 #include <linux/slab.h>
51595 #include <linux/fs_struct.h>
51596+#include <linux/grsecurity.h>
51597 #include "internal.h"
51598
51599 /*
51600@@ -19,6 +20,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
51601 write_seqcount_begin(&fs->seq);
51602 old_root = fs->root;
51603 fs->root = *path;
51604+ gr_set_chroot_entries(current, path);
51605 write_seqcount_end(&fs->seq);
51606 spin_unlock(&fs->lock);
51607 if (old_root.dentry)
51608@@ -53,6 +55,21 @@ static inline int replace_path(struct path *p, const struct path *old, const str
51609 return 1;
51610 }
51611
51612+static inline int replace_root_path(struct task_struct *task, struct path *p, const struct path *old, struct path *new)
51613+{
51614+ if (likely(p->dentry != old->dentry || p->mnt != old->mnt))
51615+ return 0;
51616+ *p = *new;
51617+
51618+ /* This function is only called from pivot_root(). Leave our
51619+ gr_chroot_dentry and is_chrooted flags as-is, so that a
51620+ pivoted root isn't treated as a chroot
51621+ */
51622+ //gr_set_chroot_entries(task, new);
51623+
51624+ return 1;
51625+}
51626+
51627 void chroot_fs_refs(struct path *old_root, struct path *new_root)
51628 {
51629 struct task_struct *g, *p;
51630@@ -67,7 +84,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
51631 int hits = 0;
51632 spin_lock(&fs->lock);
51633 write_seqcount_begin(&fs->seq);
51634- hits += replace_path(&fs->root, old_root, new_root);
51635+ hits += replace_root_path(p, &fs->root, old_root, new_root);
51636 hits += replace_path(&fs->pwd, old_root, new_root);
51637 write_seqcount_end(&fs->seq);
51638 while (hits--) {
51639@@ -99,7 +116,8 @@ void exit_fs(struct task_struct *tsk)
51640 task_lock(tsk);
51641 spin_lock(&fs->lock);
51642 tsk->fs = NULL;
51643- kill = !--fs->users;
51644+ gr_clear_chroot_entries(tsk);
51645+ kill = !atomic_dec_return(&fs->users);
51646 spin_unlock(&fs->lock);
51647 task_unlock(tsk);
51648 if (kill)
51649@@ -112,7 +130,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
51650 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
51651 /* We don't need to lock fs - think why ;-) */
51652 if (fs) {
51653- fs->users = 1;
51654+ atomic_set(&fs->users, 1);
51655 fs->in_exec = 0;
51656 spin_lock_init(&fs->lock);
51657 seqcount_init(&fs->seq);
51658@@ -121,6 +139,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
51659 spin_lock(&old->lock);
51660 fs->root = old->root;
51661 path_get(&fs->root);
51662+ /* instead of calling gr_set_chroot_entries here,
51663+ we call it from every caller of this function
51664+ */
51665 fs->pwd = old->pwd;
51666 path_get(&fs->pwd);
51667 spin_unlock(&old->lock);
51668@@ -139,8 +160,9 @@ int unshare_fs_struct(void)
51669
51670 task_lock(current);
51671 spin_lock(&fs->lock);
51672- kill = !--fs->users;
51673+ kill = !atomic_dec_return(&fs->users);
51674 current->fs = new_fs;
51675+ gr_set_chroot_entries(current, &new_fs->root);
51676 spin_unlock(&fs->lock);
51677 task_unlock(current);
51678
51679@@ -153,13 +175,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
51680
51681 int current_umask(void)
51682 {
51683- return current->fs->umask;
51684+ return current->fs->umask | gr_acl_umask();
51685 }
51686 EXPORT_SYMBOL(current_umask);
51687
51688 /* to be mentioned only in INIT_TASK */
51689 struct fs_struct init_fs = {
51690- .users = 1,
51691+ .users = ATOMIC_INIT(1),
51692 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
51693 .seq = SEQCNT_ZERO,
51694 .umask = 0022,
51695diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
51696index 8dcb114..b1072e2 100644
51697--- a/fs/fscache/cookie.c
51698+++ b/fs/fscache/cookie.c
51699@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
51700 parent ? (char *) parent->def->name : "<no-parent>",
51701 def->name, netfs_data);
51702
51703- fscache_stat(&fscache_n_acquires);
51704+ fscache_stat_unchecked(&fscache_n_acquires);
51705
51706 /* if there's no parent cookie, then we don't create one here either */
51707 if (!parent) {
51708- fscache_stat(&fscache_n_acquires_null);
51709+ fscache_stat_unchecked(&fscache_n_acquires_null);
51710 _leave(" [no parent]");
51711 return NULL;
51712 }
51713@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
51714 /* allocate and initialise a cookie */
51715 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
51716 if (!cookie) {
51717- fscache_stat(&fscache_n_acquires_oom);
51718+ fscache_stat_unchecked(&fscache_n_acquires_oom);
51719 _leave(" [ENOMEM]");
51720 return NULL;
51721 }
51722@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
51723
51724 switch (cookie->def->type) {
51725 case FSCACHE_COOKIE_TYPE_INDEX:
51726- fscache_stat(&fscache_n_cookie_index);
51727+ fscache_stat_unchecked(&fscache_n_cookie_index);
51728 break;
51729 case FSCACHE_COOKIE_TYPE_DATAFILE:
51730- fscache_stat(&fscache_n_cookie_data);
51731+ fscache_stat_unchecked(&fscache_n_cookie_data);
51732 break;
51733 default:
51734- fscache_stat(&fscache_n_cookie_special);
51735+ fscache_stat_unchecked(&fscache_n_cookie_special);
51736 break;
51737 }
51738
51739@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
51740 if (fscache_acquire_non_index_cookie(cookie) < 0) {
51741 atomic_dec(&parent->n_children);
51742 __fscache_cookie_put(cookie);
51743- fscache_stat(&fscache_n_acquires_nobufs);
51744+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
51745 _leave(" = NULL");
51746 return NULL;
51747 }
51748 }
51749
51750- fscache_stat(&fscache_n_acquires_ok);
51751+ fscache_stat_unchecked(&fscache_n_acquires_ok);
51752 _leave(" = %p", cookie);
51753 return cookie;
51754 }
51755@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
51756 cache = fscache_select_cache_for_object(cookie->parent);
51757 if (!cache) {
51758 up_read(&fscache_addremove_sem);
51759- fscache_stat(&fscache_n_acquires_no_cache);
51760+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
51761 _leave(" = -ENOMEDIUM [no cache]");
51762 return -ENOMEDIUM;
51763 }
51764@@ -256,12 +256,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
51765 object = cache->ops->alloc_object(cache, cookie);
51766 fscache_stat_d(&fscache_n_cop_alloc_object);
51767 if (IS_ERR(object)) {
51768- fscache_stat(&fscache_n_object_no_alloc);
51769+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
51770 ret = PTR_ERR(object);
51771 goto error;
51772 }
51773
51774- fscache_stat(&fscache_n_object_alloc);
51775+ fscache_stat_unchecked(&fscache_n_object_alloc);
51776
51777 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
51778
51779@@ -378,7 +378,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie)
51780
51781 _enter("{%s}", cookie->def->name);
51782
51783- fscache_stat(&fscache_n_invalidates);
51784+ fscache_stat_unchecked(&fscache_n_invalidates);
51785
51786 /* Only permit invalidation of data files. Invalidating an index will
51787 * require the caller to release all its attachments to the tree rooted
51788@@ -437,10 +437,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
51789 struct fscache_object *object;
51790 struct hlist_node *_p;
51791
51792- fscache_stat(&fscache_n_updates);
51793+ fscache_stat_unchecked(&fscache_n_updates);
51794
51795 if (!cookie) {
51796- fscache_stat(&fscache_n_updates_null);
51797+ fscache_stat_unchecked(&fscache_n_updates_null);
51798 _leave(" [no cookie]");
51799 return;
51800 }
51801@@ -474,12 +474,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
51802 struct fscache_object *object;
51803 unsigned long event;
51804
51805- fscache_stat(&fscache_n_relinquishes);
51806+ fscache_stat_unchecked(&fscache_n_relinquishes);
51807 if (retire)
51808- fscache_stat(&fscache_n_relinquishes_retire);
51809+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
51810
51811 if (!cookie) {
51812- fscache_stat(&fscache_n_relinquishes_null);
51813+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
51814 _leave(" [no cookie]");
51815 return;
51816 }
51817@@ -495,7 +495,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
51818
51819 /* wait for the cookie to finish being instantiated (or to fail) */
51820 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
51821- fscache_stat(&fscache_n_relinquishes_waitcrt);
51822+ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
51823 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
51824 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
51825 }
51826diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
51827index ee38fef..0a326d4 100644
51828--- a/fs/fscache/internal.h
51829+++ b/fs/fscache/internal.h
51830@@ -148,101 +148,101 @@ extern void fscache_proc_cleanup(void);
51831 * stats.c
51832 */
51833 #ifdef CONFIG_FSCACHE_STATS
51834-extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
51835-extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
51836+extern atomic_unchecked_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
51837+extern atomic_unchecked_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
51838
51839-extern atomic_t fscache_n_op_pend;
51840-extern atomic_t fscache_n_op_run;
51841-extern atomic_t fscache_n_op_enqueue;
51842-extern atomic_t fscache_n_op_deferred_release;
51843-extern atomic_t fscache_n_op_release;
51844-extern atomic_t fscache_n_op_gc;
51845-extern atomic_t fscache_n_op_cancelled;
51846-extern atomic_t fscache_n_op_rejected;
51847+extern atomic_unchecked_t fscache_n_op_pend;
51848+extern atomic_unchecked_t fscache_n_op_run;
51849+extern atomic_unchecked_t fscache_n_op_enqueue;
51850+extern atomic_unchecked_t fscache_n_op_deferred_release;
51851+extern atomic_unchecked_t fscache_n_op_release;
51852+extern atomic_unchecked_t fscache_n_op_gc;
51853+extern atomic_unchecked_t fscache_n_op_cancelled;
51854+extern atomic_unchecked_t fscache_n_op_rejected;
51855
51856-extern atomic_t fscache_n_attr_changed;
51857-extern atomic_t fscache_n_attr_changed_ok;
51858-extern atomic_t fscache_n_attr_changed_nobufs;
51859-extern atomic_t fscache_n_attr_changed_nomem;
51860-extern atomic_t fscache_n_attr_changed_calls;
51861+extern atomic_unchecked_t fscache_n_attr_changed;
51862+extern atomic_unchecked_t fscache_n_attr_changed_ok;
51863+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
51864+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
51865+extern atomic_unchecked_t fscache_n_attr_changed_calls;
51866
51867-extern atomic_t fscache_n_allocs;
51868-extern atomic_t fscache_n_allocs_ok;
51869-extern atomic_t fscache_n_allocs_wait;
51870-extern atomic_t fscache_n_allocs_nobufs;
51871-extern atomic_t fscache_n_allocs_intr;
51872-extern atomic_t fscache_n_allocs_object_dead;
51873-extern atomic_t fscache_n_alloc_ops;
51874-extern atomic_t fscache_n_alloc_op_waits;
51875+extern atomic_unchecked_t fscache_n_allocs;
51876+extern atomic_unchecked_t fscache_n_allocs_ok;
51877+extern atomic_unchecked_t fscache_n_allocs_wait;
51878+extern atomic_unchecked_t fscache_n_allocs_nobufs;
51879+extern atomic_unchecked_t fscache_n_allocs_intr;
51880+extern atomic_unchecked_t fscache_n_allocs_object_dead;
51881+extern atomic_unchecked_t fscache_n_alloc_ops;
51882+extern atomic_unchecked_t fscache_n_alloc_op_waits;
51883
51884-extern atomic_t fscache_n_retrievals;
51885-extern atomic_t fscache_n_retrievals_ok;
51886-extern atomic_t fscache_n_retrievals_wait;
51887-extern atomic_t fscache_n_retrievals_nodata;
51888-extern atomic_t fscache_n_retrievals_nobufs;
51889-extern atomic_t fscache_n_retrievals_intr;
51890-extern atomic_t fscache_n_retrievals_nomem;
51891-extern atomic_t fscache_n_retrievals_object_dead;
51892-extern atomic_t fscache_n_retrieval_ops;
51893-extern atomic_t fscache_n_retrieval_op_waits;
51894+extern atomic_unchecked_t fscache_n_retrievals;
51895+extern atomic_unchecked_t fscache_n_retrievals_ok;
51896+extern atomic_unchecked_t fscache_n_retrievals_wait;
51897+extern atomic_unchecked_t fscache_n_retrievals_nodata;
51898+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
51899+extern atomic_unchecked_t fscache_n_retrievals_intr;
51900+extern atomic_unchecked_t fscache_n_retrievals_nomem;
51901+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
51902+extern atomic_unchecked_t fscache_n_retrieval_ops;
51903+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
51904
51905-extern atomic_t fscache_n_stores;
51906-extern atomic_t fscache_n_stores_ok;
51907-extern atomic_t fscache_n_stores_again;
51908-extern atomic_t fscache_n_stores_nobufs;
51909-extern atomic_t fscache_n_stores_oom;
51910-extern atomic_t fscache_n_store_ops;
51911-extern atomic_t fscache_n_store_calls;
51912-extern atomic_t fscache_n_store_pages;
51913-extern atomic_t fscache_n_store_radix_deletes;
51914-extern atomic_t fscache_n_store_pages_over_limit;
51915+extern atomic_unchecked_t fscache_n_stores;
51916+extern atomic_unchecked_t fscache_n_stores_ok;
51917+extern atomic_unchecked_t fscache_n_stores_again;
51918+extern atomic_unchecked_t fscache_n_stores_nobufs;
51919+extern atomic_unchecked_t fscache_n_stores_oom;
51920+extern atomic_unchecked_t fscache_n_store_ops;
51921+extern atomic_unchecked_t fscache_n_store_calls;
51922+extern atomic_unchecked_t fscache_n_store_pages;
51923+extern atomic_unchecked_t fscache_n_store_radix_deletes;
51924+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
51925
51926-extern atomic_t fscache_n_store_vmscan_not_storing;
51927-extern atomic_t fscache_n_store_vmscan_gone;
51928-extern atomic_t fscache_n_store_vmscan_busy;
51929-extern atomic_t fscache_n_store_vmscan_cancelled;
51930-extern atomic_t fscache_n_store_vmscan_wait;
51931+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
51932+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
51933+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
51934+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
51935+extern atomic_unchecked_t fscache_n_store_vmscan_wait;
51936
51937-extern atomic_t fscache_n_marks;
51938-extern atomic_t fscache_n_uncaches;
51939+extern atomic_unchecked_t fscache_n_marks;
51940+extern atomic_unchecked_t fscache_n_uncaches;
51941
51942-extern atomic_t fscache_n_acquires;
51943-extern atomic_t fscache_n_acquires_null;
51944-extern atomic_t fscache_n_acquires_no_cache;
51945-extern atomic_t fscache_n_acquires_ok;
51946-extern atomic_t fscache_n_acquires_nobufs;
51947-extern atomic_t fscache_n_acquires_oom;
51948+extern atomic_unchecked_t fscache_n_acquires;
51949+extern atomic_unchecked_t fscache_n_acquires_null;
51950+extern atomic_unchecked_t fscache_n_acquires_no_cache;
51951+extern atomic_unchecked_t fscache_n_acquires_ok;
51952+extern atomic_unchecked_t fscache_n_acquires_nobufs;
51953+extern atomic_unchecked_t fscache_n_acquires_oom;
51954
51955-extern atomic_t fscache_n_invalidates;
51956-extern atomic_t fscache_n_invalidates_run;
51957+extern atomic_unchecked_t fscache_n_invalidates;
51958+extern atomic_unchecked_t fscache_n_invalidates_run;
51959
51960-extern atomic_t fscache_n_updates;
51961-extern atomic_t fscache_n_updates_null;
51962-extern atomic_t fscache_n_updates_run;
51963+extern atomic_unchecked_t fscache_n_updates;
51964+extern atomic_unchecked_t fscache_n_updates_null;
51965+extern atomic_unchecked_t fscache_n_updates_run;
51966
51967-extern atomic_t fscache_n_relinquishes;
51968-extern atomic_t fscache_n_relinquishes_null;
51969-extern atomic_t fscache_n_relinquishes_waitcrt;
51970-extern atomic_t fscache_n_relinquishes_retire;
51971+extern atomic_unchecked_t fscache_n_relinquishes;
51972+extern atomic_unchecked_t fscache_n_relinquishes_null;
51973+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
51974+extern atomic_unchecked_t fscache_n_relinquishes_retire;
51975
51976-extern atomic_t fscache_n_cookie_index;
51977-extern atomic_t fscache_n_cookie_data;
51978-extern atomic_t fscache_n_cookie_special;
51979+extern atomic_unchecked_t fscache_n_cookie_index;
51980+extern atomic_unchecked_t fscache_n_cookie_data;
51981+extern atomic_unchecked_t fscache_n_cookie_special;
51982
51983-extern atomic_t fscache_n_object_alloc;
51984-extern atomic_t fscache_n_object_no_alloc;
51985-extern atomic_t fscache_n_object_lookups;
51986-extern atomic_t fscache_n_object_lookups_negative;
51987-extern atomic_t fscache_n_object_lookups_positive;
51988-extern atomic_t fscache_n_object_lookups_timed_out;
51989-extern atomic_t fscache_n_object_created;
51990-extern atomic_t fscache_n_object_avail;
51991-extern atomic_t fscache_n_object_dead;
51992+extern atomic_unchecked_t fscache_n_object_alloc;
51993+extern atomic_unchecked_t fscache_n_object_no_alloc;
51994+extern atomic_unchecked_t fscache_n_object_lookups;
51995+extern atomic_unchecked_t fscache_n_object_lookups_negative;
51996+extern atomic_unchecked_t fscache_n_object_lookups_positive;
51997+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
51998+extern atomic_unchecked_t fscache_n_object_created;
51999+extern atomic_unchecked_t fscache_n_object_avail;
52000+extern atomic_unchecked_t fscache_n_object_dead;
52001
52002-extern atomic_t fscache_n_checkaux_none;
52003-extern atomic_t fscache_n_checkaux_okay;
52004-extern atomic_t fscache_n_checkaux_update;
52005-extern atomic_t fscache_n_checkaux_obsolete;
52006+extern atomic_unchecked_t fscache_n_checkaux_none;
52007+extern atomic_unchecked_t fscache_n_checkaux_okay;
52008+extern atomic_unchecked_t fscache_n_checkaux_update;
52009+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
52010
52011 extern atomic_t fscache_n_cop_alloc_object;
52012 extern atomic_t fscache_n_cop_lookup_object;
52013@@ -267,6 +267,11 @@ static inline void fscache_stat(atomic_t *stat)
52014 atomic_inc(stat);
52015 }
52016
52017+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
52018+{
52019+ atomic_inc_unchecked(stat);
52020+}
52021+
52022 static inline void fscache_stat_d(atomic_t *stat)
52023 {
52024 atomic_dec(stat);
52025@@ -279,6 +284,7 @@ extern const struct file_operations fscache_stats_fops;
52026
52027 #define __fscache_stat(stat) (NULL)
52028 #define fscache_stat(stat) do {} while (0)
52029+#define fscache_stat_unchecked(stat) do {} while (0)
52030 #define fscache_stat_d(stat) do {} while (0)
52031 #endif
52032
52033diff --git a/fs/fscache/object.c b/fs/fscache/object.c
52034index 50d41c1..10ee117 100644
52035--- a/fs/fscache/object.c
52036+++ b/fs/fscache/object.c
52037@@ -143,7 +143,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
52038 /* Invalidate an object on disk */
52039 case FSCACHE_OBJECT_INVALIDATING:
52040 clear_bit(FSCACHE_OBJECT_EV_INVALIDATE, &object->events);
52041- fscache_stat(&fscache_n_invalidates_run);
52042+ fscache_stat_unchecked(&fscache_n_invalidates_run);
52043 fscache_stat(&fscache_n_cop_invalidate_object);
52044 fscache_invalidate_object(object);
52045 fscache_stat_d(&fscache_n_cop_invalidate_object);
52046@@ -153,7 +153,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
52047 /* update the object metadata on disk */
52048 case FSCACHE_OBJECT_UPDATING:
52049 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
52050- fscache_stat(&fscache_n_updates_run);
52051+ fscache_stat_unchecked(&fscache_n_updates_run);
52052 fscache_stat(&fscache_n_cop_update_object);
52053 object->cache->ops->update_object(object);
52054 fscache_stat_d(&fscache_n_cop_update_object);
52055@@ -242,7 +242,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
52056 spin_lock(&object->lock);
52057 object->state = FSCACHE_OBJECT_DEAD;
52058 spin_unlock(&object->lock);
52059- fscache_stat(&fscache_n_object_dead);
52060+ fscache_stat_unchecked(&fscache_n_object_dead);
52061 goto terminal_transit;
52062
52063 /* handle the parent cache of this object being withdrawn from
52064@@ -257,7 +257,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
52065 spin_lock(&object->lock);
52066 object->state = FSCACHE_OBJECT_DEAD;
52067 spin_unlock(&object->lock);
52068- fscache_stat(&fscache_n_object_dead);
52069+ fscache_stat_unchecked(&fscache_n_object_dead);
52070 goto terminal_transit;
52071
52072 /* complain about the object being woken up once it is
52073@@ -495,7 +495,7 @@ static void fscache_lookup_object(struct fscache_object *object)
52074 parent->cookie->def->name, cookie->def->name,
52075 object->cache->tag->name);
52076
52077- fscache_stat(&fscache_n_object_lookups);
52078+ fscache_stat_unchecked(&fscache_n_object_lookups);
52079 fscache_stat(&fscache_n_cop_lookup_object);
52080 ret = object->cache->ops->lookup_object(object);
52081 fscache_stat_d(&fscache_n_cop_lookup_object);
52082@@ -506,7 +506,7 @@ static void fscache_lookup_object(struct fscache_object *object)
52083 if (ret == -ETIMEDOUT) {
52084 /* probably stuck behind another object, so move this one to
52085 * the back of the queue */
52086- fscache_stat(&fscache_n_object_lookups_timed_out);
52087+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
52088 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
52089 }
52090
52091@@ -529,7 +529,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
52092
52093 spin_lock(&object->lock);
52094 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
52095- fscache_stat(&fscache_n_object_lookups_negative);
52096+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
52097
52098 /* transit here to allow write requests to begin stacking up
52099 * and read requests to begin returning ENODATA */
52100@@ -575,7 +575,7 @@ void fscache_obtained_object(struct fscache_object *object)
52101 * result, in which case there may be data available */
52102 spin_lock(&object->lock);
52103 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
52104- fscache_stat(&fscache_n_object_lookups_positive);
52105+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
52106
52107 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
52108
52109@@ -589,7 +589,7 @@ void fscache_obtained_object(struct fscache_object *object)
52110 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
52111 } else {
52112 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
52113- fscache_stat(&fscache_n_object_created);
52114+ fscache_stat_unchecked(&fscache_n_object_created);
52115
52116 object->state = FSCACHE_OBJECT_AVAILABLE;
52117 spin_unlock(&object->lock);
52118@@ -634,7 +634,7 @@ static void fscache_object_available(struct fscache_object *object)
52119 fscache_enqueue_dependents(object);
52120
52121 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
52122- fscache_stat(&fscache_n_object_avail);
52123+ fscache_stat_unchecked(&fscache_n_object_avail);
52124
52125 _leave("");
52126 }
52127@@ -894,7 +894,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
52128 enum fscache_checkaux result;
52129
52130 if (!object->cookie->def->check_aux) {
52131- fscache_stat(&fscache_n_checkaux_none);
52132+ fscache_stat_unchecked(&fscache_n_checkaux_none);
52133 return FSCACHE_CHECKAUX_OKAY;
52134 }
52135
52136@@ -903,17 +903,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
52137 switch (result) {
52138 /* entry okay as is */
52139 case FSCACHE_CHECKAUX_OKAY:
52140- fscache_stat(&fscache_n_checkaux_okay);
52141+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
52142 break;
52143
52144 /* entry requires update */
52145 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
52146- fscache_stat(&fscache_n_checkaux_update);
52147+ fscache_stat_unchecked(&fscache_n_checkaux_update);
52148 break;
52149
52150 /* entry requires deletion */
52151 case FSCACHE_CHECKAUX_OBSOLETE:
52152- fscache_stat(&fscache_n_checkaux_obsolete);
52153+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
52154 break;
52155
52156 default:
52157diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
52158index 762a9ec..2023284 100644
52159--- a/fs/fscache/operation.c
52160+++ b/fs/fscache/operation.c
52161@@ -17,7 +17,7 @@
52162 #include <linux/slab.h>
52163 #include "internal.h"
52164
52165-atomic_t fscache_op_debug_id;
52166+atomic_unchecked_t fscache_op_debug_id;
52167 EXPORT_SYMBOL(fscache_op_debug_id);
52168
52169 /**
52170@@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
52171 ASSERTCMP(atomic_read(&op->usage), >, 0);
52172 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
52173
52174- fscache_stat(&fscache_n_op_enqueue);
52175+ fscache_stat_unchecked(&fscache_n_op_enqueue);
52176 switch (op->flags & FSCACHE_OP_TYPE) {
52177 case FSCACHE_OP_ASYNC:
52178 _debug("queue async");
52179@@ -73,7 +73,7 @@ static void fscache_run_op(struct fscache_object *object,
52180 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
52181 if (op->processor)
52182 fscache_enqueue_operation(op);
52183- fscache_stat(&fscache_n_op_run);
52184+ fscache_stat_unchecked(&fscache_n_op_run);
52185 }
52186
52187 /*
52188@@ -105,11 +105,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
52189 if (object->n_in_progress > 0) {
52190 atomic_inc(&op->usage);
52191 list_add_tail(&op->pend_link, &object->pending_ops);
52192- fscache_stat(&fscache_n_op_pend);
52193+ fscache_stat_unchecked(&fscache_n_op_pend);
52194 } else if (!list_empty(&object->pending_ops)) {
52195 atomic_inc(&op->usage);
52196 list_add_tail(&op->pend_link, &object->pending_ops);
52197- fscache_stat(&fscache_n_op_pend);
52198+ fscache_stat_unchecked(&fscache_n_op_pend);
52199 fscache_start_operations(object);
52200 } else {
52201 ASSERTCMP(object->n_in_progress, ==, 0);
52202@@ -125,7 +125,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
52203 object->n_exclusive++; /* reads and writes must wait */
52204 atomic_inc(&op->usage);
52205 list_add_tail(&op->pend_link, &object->pending_ops);
52206- fscache_stat(&fscache_n_op_pend);
52207+ fscache_stat_unchecked(&fscache_n_op_pend);
52208 ret = 0;
52209 } else {
52210 /* If we're in any other state, there must have been an I/O
52211@@ -215,11 +215,11 @@ int fscache_submit_op(struct fscache_object *object,
52212 if (object->n_exclusive > 0) {
52213 atomic_inc(&op->usage);
52214 list_add_tail(&op->pend_link, &object->pending_ops);
52215- fscache_stat(&fscache_n_op_pend);
52216+ fscache_stat_unchecked(&fscache_n_op_pend);
52217 } else if (!list_empty(&object->pending_ops)) {
52218 atomic_inc(&op->usage);
52219 list_add_tail(&op->pend_link, &object->pending_ops);
52220- fscache_stat(&fscache_n_op_pend);
52221+ fscache_stat_unchecked(&fscache_n_op_pend);
52222 fscache_start_operations(object);
52223 } else {
52224 ASSERTCMP(object->n_exclusive, ==, 0);
52225@@ -231,12 +231,12 @@ int fscache_submit_op(struct fscache_object *object,
52226 object->n_ops++;
52227 atomic_inc(&op->usage);
52228 list_add_tail(&op->pend_link, &object->pending_ops);
52229- fscache_stat(&fscache_n_op_pend);
52230+ fscache_stat_unchecked(&fscache_n_op_pend);
52231 ret = 0;
52232 } else if (object->state == FSCACHE_OBJECT_DYING ||
52233 object->state == FSCACHE_OBJECT_LC_DYING ||
52234 object->state == FSCACHE_OBJECT_WITHDRAWING) {
52235- fscache_stat(&fscache_n_op_rejected);
52236+ fscache_stat_unchecked(&fscache_n_op_rejected);
52237 op->state = FSCACHE_OP_ST_CANCELLED;
52238 ret = -ENOBUFS;
52239 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
52240@@ -315,7 +315,7 @@ int fscache_cancel_op(struct fscache_operation *op,
52241 ret = -EBUSY;
52242 if (op->state == FSCACHE_OP_ST_PENDING) {
52243 ASSERT(!list_empty(&op->pend_link));
52244- fscache_stat(&fscache_n_op_cancelled);
52245+ fscache_stat_unchecked(&fscache_n_op_cancelled);
52246 list_del_init(&op->pend_link);
52247 if (do_cancel)
52248 do_cancel(op);
52249@@ -347,7 +347,7 @@ void fscache_cancel_all_ops(struct fscache_object *object)
52250 while (!list_empty(&object->pending_ops)) {
52251 op = list_entry(object->pending_ops.next,
52252 struct fscache_operation, pend_link);
52253- fscache_stat(&fscache_n_op_cancelled);
52254+ fscache_stat_unchecked(&fscache_n_op_cancelled);
52255 list_del_init(&op->pend_link);
52256
52257 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING);
52258@@ -419,7 +419,7 @@ void fscache_put_operation(struct fscache_operation *op)
52259 op->state, ==, FSCACHE_OP_ST_CANCELLED);
52260 op->state = FSCACHE_OP_ST_DEAD;
52261
52262- fscache_stat(&fscache_n_op_release);
52263+ fscache_stat_unchecked(&fscache_n_op_release);
52264
52265 if (op->release) {
52266 op->release(op);
52267@@ -442,7 +442,7 @@ void fscache_put_operation(struct fscache_operation *op)
52268 * lock, and defer it otherwise */
52269 if (!spin_trylock(&object->lock)) {
52270 _debug("defer put");
52271- fscache_stat(&fscache_n_op_deferred_release);
52272+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
52273
52274 cache = object->cache;
52275 spin_lock(&cache->op_gc_list_lock);
52276@@ -495,7 +495,7 @@ void fscache_operation_gc(struct work_struct *work)
52277
52278 _debug("GC DEFERRED REL OBJ%x OP%x",
52279 object->debug_id, op->debug_id);
52280- fscache_stat(&fscache_n_op_gc);
52281+ fscache_stat_unchecked(&fscache_n_op_gc);
52282
52283 ASSERTCMP(atomic_read(&op->usage), ==, 0);
52284 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_DEAD);
52285diff --git a/fs/fscache/page.c b/fs/fscache/page.c
52286index ff000e5..c44ec6d 100644
52287--- a/fs/fscache/page.c
52288+++ b/fs/fscache/page.c
52289@@ -61,7 +61,7 @@ try_again:
52290 val = radix_tree_lookup(&cookie->stores, page->index);
52291 if (!val) {
52292 rcu_read_unlock();
52293- fscache_stat(&fscache_n_store_vmscan_not_storing);
52294+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
52295 __fscache_uncache_page(cookie, page);
52296 return true;
52297 }
52298@@ -91,11 +91,11 @@ try_again:
52299 spin_unlock(&cookie->stores_lock);
52300
52301 if (xpage) {
52302- fscache_stat(&fscache_n_store_vmscan_cancelled);
52303- fscache_stat(&fscache_n_store_radix_deletes);
52304+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
52305+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
52306 ASSERTCMP(xpage, ==, page);
52307 } else {
52308- fscache_stat(&fscache_n_store_vmscan_gone);
52309+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
52310 }
52311
52312 wake_up_bit(&cookie->flags, 0);
52313@@ -110,11 +110,11 @@ page_busy:
52314 * sleeping on memory allocation, so we may need to impose a timeout
52315 * too. */
52316 if (!(gfp & __GFP_WAIT)) {
52317- fscache_stat(&fscache_n_store_vmscan_busy);
52318+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
52319 return false;
52320 }
52321
52322- fscache_stat(&fscache_n_store_vmscan_wait);
52323+ fscache_stat_unchecked(&fscache_n_store_vmscan_wait);
52324 __fscache_wait_on_page_write(cookie, page);
52325 gfp &= ~__GFP_WAIT;
52326 goto try_again;
52327@@ -140,7 +140,7 @@ static void fscache_end_page_write(struct fscache_object *object,
52328 FSCACHE_COOKIE_STORING_TAG);
52329 if (!radix_tree_tag_get(&cookie->stores, page->index,
52330 FSCACHE_COOKIE_PENDING_TAG)) {
52331- fscache_stat(&fscache_n_store_radix_deletes);
52332+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
52333 xpage = radix_tree_delete(&cookie->stores, page->index);
52334 }
52335 spin_unlock(&cookie->stores_lock);
52336@@ -161,7 +161,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
52337
52338 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
52339
52340- fscache_stat(&fscache_n_attr_changed_calls);
52341+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
52342
52343 if (fscache_object_is_active(object)) {
52344 fscache_stat(&fscache_n_cop_attr_changed);
52345@@ -187,11 +187,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
52346
52347 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
52348
52349- fscache_stat(&fscache_n_attr_changed);
52350+ fscache_stat_unchecked(&fscache_n_attr_changed);
52351
52352 op = kzalloc(sizeof(*op), GFP_KERNEL);
52353 if (!op) {
52354- fscache_stat(&fscache_n_attr_changed_nomem);
52355+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
52356 _leave(" = -ENOMEM");
52357 return -ENOMEM;
52358 }
52359@@ -209,7 +209,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
52360 if (fscache_submit_exclusive_op(object, op) < 0)
52361 goto nobufs;
52362 spin_unlock(&cookie->lock);
52363- fscache_stat(&fscache_n_attr_changed_ok);
52364+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
52365 fscache_put_operation(op);
52366 _leave(" = 0");
52367 return 0;
52368@@ -217,7 +217,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
52369 nobufs:
52370 spin_unlock(&cookie->lock);
52371 kfree(op);
52372- fscache_stat(&fscache_n_attr_changed_nobufs);
52373+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
52374 _leave(" = %d", -ENOBUFS);
52375 return -ENOBUFS;
52376 }
52377@@ -255,7 +255,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
52378 /* allocate a retrieval operation and attempt to submit it */
52379 op = kzalloc(sizeof(*op), GFP_NOIO);
52380 if (!op) {
52381- fscache_stat(&fscache_n_retrievals_nomem);
52382+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
52383 return NULL;
52384 }
52385
52386@@ -283,13 +283,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
52387 return 0;
52388 }
52389
52390- fscache_stat(&fscache_n_retrievals_wait);
52391+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
52392
52393 jif = jiffies;
52394 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
52395 fscache_wait_bit_interruptible,
52396 TASK_INTERRUPTIBLE) != 0) {
52397- fscache_stat(&fscache_n_retrievals_intr);
52398+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
52399 _leave(" = -ERESTARTSYS");
52400 return -ERESTARTSYS;
52401 }
52402@@ -318,8 +318,8 @@ static void fscache_do_cancel_retrieval(struct fscache_operation *_op)
52403 */
52404 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
52405 struct fscache_retrieval *op,
52406- atomic_t *stat_op_waits,
52407- atomic_t *stat_object_dead)
52408+ atomic_unchecked_t *stat_op_waits,
52409+ atomic_unchecked_t *stat_object_dead)
52410 {
52411 int ret;
52412
52413@@ -327,7 +327,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
52414 goto check_if_dead;
52415
52416 _debug(">>> WT");
52417- fscache_stat(stat_op_waits);
52418+ fscache_stat_unchecked(stat_op_waits);
52419 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
52420 fscache_wait_bit_interruptible,
52421 TASK_INTERRUPTIBLE) != 0) {
52422@@ -344,14 +344,14 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
52423
52424 check_if_dead:
52425 if (op->op.state == FSCACHE_OP_ST_CANCELLED) {
52426- fscache_stat(stat_object_dead);
52427+ fscache_stat_unchecked(stat_object_dead);
52428 _leave(" = -ENOBUFS [cancelled]");
52429 return -ENOBUFS;
52430 }
52431 if (unlikely(fscache_object_is_dead(object))) {
52432 pr_err("%s() = -ENOBUFS [obj dead %d]\n", __func__, op->op.state);
52433 fscache_cancel_op(&op->op, fscache_do_cancel_retrieval);
52434- fscache_stat(stat_object_dead);
52435+ fscache_stat_unchecked(stat_object_dead);
52436 return -ENOBUFS;
52437 }
52438 return 0;
52439@@ -378,7 +378,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
52440
52441 _enter("%p,%p,,,", cookie, page);
52442
52443- fscache_stat(&fscache_n_retrievals);
52444+ fscache_stat_unchecked(&fscache_n_retrievals);
52445
52446 if (hlist_empty(&cookie->backing_objects))
52447 goto nobufs;
52448@@ -417,7 +417,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
52449 goto nobufs_unlock_dec;
52450 spin_unlock(&cookie->lock);
52451
52452- fscache_stat(&fscache_n_retrieval_ops);
52453+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
52454
52455 /* pin the netfs read context in case we need to do the actual netfs
52456 * read because we've encountered a cache read failure */
52457@@ -447,15 +447,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
52458
52459 error:
52460 if (ret == -ENOMEM)
52461- fscache_stat(&fscache_n_retrievals_nomem);
52462+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
52463 else if (ret == -ERESTARTSYS)
52464- fscache_stat(&fscache_n_retrievals_intr);
52465+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
52466 else if (ret == -ENODATA)
52467- fscache_stat(&fscache_n_retrievals_nodata);
52468+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
52469 else if (ret < 0)
52470- fscache_stat(&fscache_n_retrievals_nobufs);
52471+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
52472 else
52473- fscache_stat(&fscache_n_retrievals_ok);
52474+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
52475
52476 fscache_put_retrieval(op);
52477 _leave(" = %d", ret);
52478@@ -467,7 +467,7 @@ nobufs_unlock:
52479 spin_unlock(&cookie->lock);
52480 kfree(op);
52481 nobufs:
52482- fscache_stat(&fscache_n_retrievals_nobufs);
52483+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
52484 _leave(" = -ENOBUFS");
52485 return -ENOBUFS;
52486 }
52487@@ -505,7 +505,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
52488
52489 _enter("%p,,%d,,,", cookie, *nr_pages);
52490
52491- fscache_stat(&fscache_n_retrievals);
52492+ fscache_stat_unchecked(&fscache_n_retrievals);
52493
52494 if (hlist_empty(&cookie->backing_objects))
52495 goto nobufs;
52496@@ -541,7 +541,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
52497 goto nobufs_unlock_dec;
52498 spin_unlock(&cookie->lock);
52499
52500- fscache_stat(&fscache_n_retrieval_ops);
52501+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
52502
52503 /* pin the netfs read context in case we need to do the actual netfs
52504 * read because we've encountered a cache read failure */
52505@@ -571,15 +571,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
52506
52507 error:
52508 if (ret == -ENOMEM)
52509- fscache_stat(&fscache_n_retrievals_nomem);
52510+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
52511 else if (ret == -ERESTARTSYS)
52512- fscache_stat(&fscache_n_retrievals_intr);
52513+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
52514 else if (ret == -ENODATA)
52515- fscache_stat(&fscache_n_retrievals_nodata);
52516+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
52517 else if (ret < 0)
52518- fscache_stat(&fscache_n_retrievals_nobufs);
52519+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
52520 else
52521- fscache_stat(&fscache_n_retrievals_ok);
52522+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
52523
52524 fscache_put_retrieval(op);
52525 _leave(" = %d", ret);
52526@@ -591,7 +591,7 @@ nobufs_unlock:
52527 spin_unlock(&cookie->lock);
52528 kfree(op);
52529 nobufs:
52530- fscache_stat(&fscache_n_retrievals_nobufs);
52531+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
52532 _leave(" = -ENOBUFS");
52533 return -ENOBUFS;
52534 }
52535@@ -615,7 +615,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
52536
52537 _enter("%p,%p,,,", cookie, page);
52538
52539- fscache_stat(&fscache_n_allocs);
52540+ fscache_stat_unchecked(&fscache_n_allocs);
52541
52542 if (hlist_empty(&cookie->backing_objects))
52543 goto nobufs;
52544@@ -647,7 +647,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
52545 goto nobufs_unlock;
52546 spin_unlock(&cookie->lock);
52547
52548- fscache_stat(&fscache_n_alloc_ops);
52549+ fscache_stat_unchecked(&fscache_n_alloc_ops);
52550
52551 ret = fscache_wait_for_retrieval_activation(
52552 object, op,
52553@@ -663,11 +663,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
52554
52555 error:
52556 if (ret == -ERESTARTSYS)
52557- fscache_stat(&fscache_n_allocs_intr);
52558+ fscache_stat_unchecked(&fscache_n_allocs_intr);
52559 else if (ret < 0)
52560- fscache_stat(&fscache_n_allocs_nobufs);
52561+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
52562 else
52563- fscache_stat(&fscache_n_allocs_ok);
52564+ fscache_stat_unchecked(&fscache_n_allocs_ok);
52565
52566 fscache_put_retrieval(op);
52567 _leave(" = %d", ret);
52568@@ -677,7 +677,7 @@ nobufs_unlock:
52569 spin_unlock(&cookie->lock);
52570 kfree(op);
52571 nobufs:
52572- fscache_stat(&fscache_n_allocs_nobufs);
52573+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
52574 _leave(" = -ENOBUFS");
52575 return -ENOBUFS;
52576 }
52577@@ -736,7 +736,7 @@ static void fscache_write_op(struct fscache_operation *_op)
52578
52579 spin_lock(&cookie->stores_lock);
52580
52581- fscache_stat(&fscache_n_store_calls);
52582+ fscache_stat_unchecked(&fscache_n_store_calls);
52583
52584 /* find a page to store */
52585 page = NULL;
52586@@ -747,7 +747,7 @@ static void fscache_write_op(struct fscache_operation *_op)
52587 page = results[0];
52588 _debug("gang %d [%lx]", n, page->index);
52589 if (page->index > op->store_limit) {
52590- fscache_stat(&fscache_n_store_pages_over_limit);
52591+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
52592 goto superseded;
52593 }
52594
52595@@ -759,7 +759,7 @@ static void fscache_write_op(struct fscache_operation *_op)
52596 spin_unlock(&cookie->stores_lock);
52597 spin_unlock(&object->lock);
52598
52599- fscache_stat(&fscache_n_store_pages);
52600+ fscache_stat_unchecked(&fscache_n_store_pages);
52601 fscache_stat(&fscache_n_cop_write_page);
52602 ret = object->cache->ops->write_page(op, page);
52603 fscache_stat_d(&fscache_n_cop_write_page);
52604@@ -860,7 +860,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
52605 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
52606 ASSERT(PageFsCache(page));
52607
52608- fscache_stat(&fscache_n_stores);
52609+ fscache_stat_unchecked(&fscache_n_stores);
52610
52611 if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
52612 _leave(" = -ENOBUFS [invalidating]");
52613@@ -916,7 +916,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
52614 spin_unlock(&cookie->stores_lock);
52615 spin_unlock(&object->lock);
52616
52617- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
52618+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
52619 op->store_limit = object->store_limit;
52620
52621 if (fscache_submit_op(object, &op->op) < 0)
52622@@ -924,8 +924,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
52623
52624 spin_unlock(&cookie->lock);
52625 radix_tree_preload_end();
52626- fscache_stat(&fscache_n_store_ops);
52627- fscache_stat(&fscache_n_stores_ok);
52628+ fscache_stat_unchecked(&fscache_n_store_ops);
52629+ fscache_stat_unchecked(&fscache_n_stores_ok);
52630
52631 /* the work queue now carries its own ref on the object */
52632 fscache_put_operation(&op->op);
52633@@ -933,14 +933,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
52634 return 0;
52635
52636 already_queued:
52637- fscache_stat(&fscache_n_stores_again);
52638+ fscache_stat_unchecked(&fscache_n_stores_again);
52639 already_pending:
52640 spin_unlock(&cookie->stores_lock);
52641 spin_unlock(&object->lock);
52642 spin_unlock(&cookie->lock);
52643 radix_tree_preload_end();
52644 kfree(op);
52645- fscache_stat(&fscache_n_stores_ok);
52646+ fscache_stat_unchecked(&fscache_n_stores_ok);
52647 _leave(" = 0");
52648 return 0;
52649
52650@@ -959,14 +959,14 @@ nobufs:
52651 spin_unlock(&cookie->lock);
52652 radix_tree_preload_end();
52653 kfree(op);
52654- fscache_stat(&fscache_n_stores_nobufs);
52655+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
52656 _leave(" = -ENOBUFS");
52657 return -ENOBUFS;
52658
52659 nomem_free:
52660 kfree(op);
52661 nomem:
52662- fscache_stat(&fscache_n_stores_oom);
52663+ fscache_stat_unchecked(&fscache_n_stores_oom);
52664 _leave(" = -ENOMEM");
52665 return -ENOMEM;
52666 }
52667@@ -984,7 +984,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
52668 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
52669 ASSERTCMP(page, !=, NULL);
52670
52671- fscache_stat(&fscache_n_uncaches);
52672+ fscache_stat_unchecked(&fscache_n_uncaches);
52673
52674 /* cache withdrawal may beat us to it */
52675 if (!PageFsCache(page))
52676@@ -1035,7 +1035,7 @@ void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page)
52677 struct fscache_cookie *cookie = op->op.object->cookie;
52678
52679 #ifdef CONFIG_FSCACHE_STATS
52680- atomic_inc(&fscache_n_marks);
52681+ atomic_inc_unchecked(&fscache_n_marks);
52682 #endif
52683
52684 _debug("- mark %p{%lx}", page, page->index);
52685diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
52686index 8179e8b..5072cc7 100644
52687--- a/fs/fscache/stats.c
52688+++ b/fs/fscache/stats.c
52689@@ -18,99 +18,99 @@
52690 /*
52691 * operation counters
52692 */
52693-atomic_t fscache_n_op_pend;
52694-atomic_t fscache_n_op_run;
52695-atomic_t fscache_n_op_enqueue;
52696-atomic_t fscache_n_op_requeue;
52697-atomic_t fscache_n_op_deferred_release;
52698-atomic_t fscache_n_op_release;
52699-atomic_t fscache_n_op_gc;
52700-atomic_t fscache_n_op_cancelled;
52701-atomic_t fscache_n_op_rejected;
52702+atomic_unchecked_t fscache_n_op_pend;
52703+atomic_unchecked_t fscache_n_op_run;
52704+atomic_unchecked_t fscache_n_op_enqueue;
52705+atomic_unchecked_t fscache_n_op_requeue;
52706+atomic_unchecked_t fscache_n_op_deferred_release;
52707+atomic_unchecked_t fscache_n_op_release;
52708+atomic_unchecked_t fscache_n_op_gc;
52709+atomic_unchecked_t fscache_n_op_cancelled;
52710+atomic_unchecked_t fscache_n_op_rejected;
52711
52712-atomic_t fscache_n_attr_changed;
52713-atomic_t fscache_n_attr_changed_ok;
52714-atomic_t fscache_n_attr_changed_nobufs;
52715-atomic_t fscache_n_attr_changed_nomem;
52716-atomic_t fscache_n_attr_changed_calls;
52717+atomic_unchecked_t fscache_n_attr_changed;
52718+atomic_unchecked_t fscache_n_attr_changed_ok;
52719+atomic_unchecked_t fscache_n_attr_changed_nobufs;
52720+atomic_unchecked_t fscache_n_attr_changed_nomem;
52721+atomic_unchecked_t fscache_n_attr_changed_calls;
52722
52723-atomic_t fscache_n_allocs;
52724-atomic_t fscache_n_allocs_ok;
52725-atomic_t fscache_n_allocs_wait;
52726-atomic_t fscache_n_allocs_nobufs;
52727-atomic_t fscache_n_allocs_intr;
52728-atomic_t fscache_n_allocs_object_dead;
52729-atomic_t fscache_n_alloc_ops;
52730-atomic_t fscache_n_alloc_op_waits;
52731+atomic_unchecked_t fscache_n_allocs;
52732+atomic_unchecked_t fscache_n_allocs_ok;
52733+atomic_unchecked_t fscache_n_allocs_wait;
52734+atomic_unchecked_t fscache_n_allocs_nobufs;
52735+atomic_unchecked_t fscache_n_allocs_intr;
52736+atomic_unchecked_t fscache_n_allocs_object_dead;
52737+atomic_unchecked_t fscache_n_alloc_ops;
52738+atomic_unchecked_t fscache_n_alloc_op_waits;
52739
52740-atomic_t fscache_n_retrievals;
52741-atomic_t fscache_n_retrievals_ok;
52742-atomic_t fscache_n_retrievals_wait;
52743-atomic_t fscache_n_retrievals_nodata;
52744-atomic_t fscache_n_retrievals_nobufs;
52745-atomic_t fscache_n_retrievals_intr;
52746-atomic_t fscache_n_retrievals_nomem;
52747-atomic_t fscache_n_retrievals_object_dead;
52748-atomic_t fscache_n_retrieval_ops;
52749-atomic_t fscache_n_retrieval_op_waits;
52750+atomic_unchecked_t fscache_n_retrievals;
52751+atomic_unchecked_t fscache_n_retrievals_ok;
52752+atomic_unchecked_t fscache_n_retrievals_wait;
52753+atomic_unchecked_t fscache_n_retrievals_nodata;
52754+atomic_unchecked_t fscache_n_retrievals_nobufs;
52755+atomic_unchecked_t fscache_n_retrievals_intr;
52756+atomic_unchecked_t fscache_n_retrievals_nomem;
52757+atomic_unchecked_t fscache_n_retrievals_object_dead;
52758+atomic_unchecked_t fscache_n_retrieval_ops;
52759+atomic_unchecked_t fscache_n_retrieval_op_waits;
52760
52761-atomic_t fscache_n_stores;
52762-atomic_t fscache_n_stores_ok;
52763-atomic_t fscache_n_stores_again;
52764-atomic_t fscache_n_stores_nobufs;
52765-atomic_t fscache_n_stores_oom;
52766-atomic_t fscache_n_store_ops;
52767-atomic_t fscache_n_store_calls;
52768-atomic_t fscache_n_store_pages;
52769-atomic_t fscache_n_store_radix_deletes;
52770-atomic_t fscache_n_store_pages_over_limit;
52771+atomic_unchecked_t fscache_n_stores;
52772+atomic_unchecked_t fscache_n_stores_ok;
52773+atomic_unchecked_t fscache_n_stores_again;
52774+atomic_unchecked_t fscache_n_stores_nobufs;
52775+atomic_unchecked_t fscache_n_stores_oom;
52776+atomic_unchecked_t fscache_n_store_ops;
52777+atomic_unchecked_t fscache_n_store_calls;
52778+atomic_unchecked_t fscache_n_store_pages;
52779+atomic_unchecked_t fscache_n_store_radix_deletes;
52780+atomic_unchecked_t fscache_n_store_pages_over_limit;
52781
52782-atomic_t fscache_n_store_vmscan_not_storing;
52783-atomic_t fscache_n_store_vmscan_gone;
52784-atomic_t fscache_n_store_vmscan_busy;
52785-atomic_t fscache_n_store_vmscan_cancelled;
52786-atomic_t fscache_n_store_vmscan_wait;
52787+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
52788+atomic_unchecked_t fscache_n_store_vmscan_gone;
52789+atomic_unchecked_t fscache_n_store_vmscan_busy;
52790+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
52791+atomic_unchecked_t fscache_n_store_vmscan_wait;
52792
52793-atomic_t fscache_n_marks;
52794-atomic_t fscache_n_uncaches;
52795+atomic_unchecked_t fscache_n_marks;
52796+atomic_unchecked_t fscache_n_uncaches;
52797
52798-atomic_t fscache_n_acquires;
52799-atomic_t fscache_n_acquires_null;
52800-atomic_t fscache_n_acquires_no_cache;
52801-atomic_t fscache_n_acquires_ok;
52802-atomic_t fscache_n_acquires_nobufs;
52803-atomic_t fscache_n_acquires_oom;
52804+atomic_unchecked_t fscache_n_acquires;
52805+atomic_unchecked_t fscache_n_acquires_null;
52806+atomic_unchecked_t fscache_n_acquires_no_cache;
52807+atomic_unchecked_t fscache_n_acquires_ok;
52808+atomic_unchecked_t fscache_n_acquires_nobufs;
52809+atomic_unchecked_t fscache_n_acquires_oom;
52810
52811-atomic_t fscache_n_invalidates;
52812-atomic_t fscache_n_invalidates_run;
52813+atomic_unchecked_t fscache_n_invalidates;
52814+atomic_unchecked_t fscache_n_invalidates_run;
52815
52816-atomic_t fscache_n_updates;
52817-atomic_t fscache_n_updates_null;
52818-atomic_t fscache_n_updates_run;
52819+atomic_unchecked_t fscache_n_updates;
52820+atomic_unchecked_t fscache_n_updates_null;
52821+atomic_unchecked_t fscache_n_updates_run;
52822
52823-atomic_t fscache_n_relinquishes;
52824-atomic_t fscache_n_relinquishes_null;
52825-atomic_t fscache_n_relinquishes_waitcrt;
52826-atomic_t fscache_n_relinquishes_retire;
52827+atomic_unchecked_t fscache_n_relinquishes;
52828+atomic_unchecked_t fscache_n_relinquishes_null;
52829+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
52830+atomic_unchecked_t fscache_n_relinquishes_retire;
52831
52832-atomic_t fscache_n_cookie_index;
52833-atomic_t fscache_n_cookie_data;
52834-atomic_t fscache_n_cookie_special;
52835+atomic_unchecked_t fscache_n_cookie_index;
52836+atomic_unchecked_t fscache_n_cookie_data;
52837+atomic_unchecked_t fscache_n_cookie_special;
52838
52839-atomic_t fscache_n_object_alloc;
52840-atomic_t fscache_n_object_no_alloc;
52841-atomic_t fscache_n_object_lookups;
52842-atomic_t fscache_n_object_lookups_negative;
52843-atomic_t fscache_n_object_lookups_positive;
52844-atomic_t fscache_n_object_lookups_timed_out;
52845-atomic_t fscache_n_object_created;
52846-atomic_t fscache_n_object_avail;
52847-atomic_t fscache_n_object_dead;
52848+atomic_unchecked_t fscache_n_object_alloc;
52849+atomic_unchecked_t fscache_n_object_no_alloc;
52850+atomic_unchecked_t fscache_n_object_lookups;
52851+atomic_unchecked_t fscache_n_object_lookups_negative;
52852+atomic_unchecked_t fscache_n_object_lookups_positive;
52853+atomic_unchecked_t fscache_n_object_lookups_timed_out;
52854+atomic_unchecked_t fscache_n_object_created;
52855+atomic_unchecked_t fscache_n_object_avail;
52856+atomic_unchecked_t fscache_n_object_dead;
52857
52858-atomic_t fscache_n_checkaux_none;
52859-atomic_t fscache_n_checkaux_okay;
52860-atomic_t fscache_n_checkaux_update;
52861-atomic_t fscache_n_checkaux_obsolete;
52862+atomic_unchecked_t fscache_n_checkaux_none;
52863+atomic_unchecked_t fscache_n_checkaux_okay;
52864+atomic_unchecked_t fscache_n_checkaux_update;
52865+atomic_unchecked_t fscache_n_checkaux_obsolete;
52866
52867 atomic_t fscache_n_cop_alloc_object;
52868 atomic_t fscache_n_cop_lookup_object;
52869@@ -138,118 +138,118 @@ static int fscache_stats_show(struct seq_file *m, void *v)
52870 seq_puts(m, "FS-Cache statistics\n");
52871
52872 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
52873- atomic_read(&fscache_n_cookie_index),
52874- atomic_read(&fscache_n_cookie_data),
52875- atomic_read(&fscache_n_cookie_special));
52876+ atomic_read_unchecked(&fscache_n_cookie_index),
52877+ atomic_read_unchecked(&fscache_n_cookie_data),
52878+ atomic_read_unchecked(&fscache_n_cookie_special));
52879
52880 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
52881- atomic_read(&fscache_n_object_alloc),
52882- atomic_read(&fscache_n_object_no_alloc),
52883- atomic_read(&fscache_n_object_avail),
52884- atomic_read(&fscache_n_object_dead));
52885+ atomic_read_unchecked(&fscache_n_object_alloc),
52886+ atomic_read_unchecked(&fscache_n_object_no_alloc),
52887+ atomic_read_unchecked(&fscache_n_object_avail),
52888+ atomic_read_unchecked(&fscache_n_object_dead));
52889 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
52890- atomic_read(&fscache_n_checkaux_none),
52891- atomic_read(&fscache_n_checkaux_okay),
52892- atomic_read(&fscache_n_checkaux_update),
52893- atomic_read(&fscache_n_checkaux_obsolete));
52894+ atomic_read_unchecked(&fscache_n_checkaux_none),
52895+ atomic_read_unchecked(&fscache_n_checkaux_okay),
52896+ atomic_read_unchecked(&fscache_n_checkaux_update),
52897+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
52898
52899 seq_printf(m, "Pages : mrk=%u unc=%u\n",
52900- atomic_read(&fscache_n_marks),
52901- atomic_read(&fscache_n_uncaches));
52902+ atomic_read_unchecked(&fscache_n_marks),
52903+ atomic_read_unchecked(&fscache_n_uncaches));
52904
52905 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
52906 " oom=%u\n",
52907- atomic_read(&fscache_n_acquires),
52908- atomic_read(&fscache_n_acquires_null),
52909- atomic_read(&fscache_n_acquires_no_cache),
52910- atomic_read(&fscache_n_acquires_ok),
52911- atomic_read(&fscache_n_acquires_nobufs),
52912- atomic_read(&fscache_n_acquires_oom));
52913+ atomic_read_unchecked(&fscache_n_acquires),
52914+ atomic_read_unchecked(&fscache_n_acquires_null),
52915+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
52916+ atomic_read_unchecked(&fscache_n_acquires_ok),
52917+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
52918+ atomic_read_unchecked(&fscache_n_acquires_oom));
52919
52920 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
52921- atomic_read(&fscache_n_object_lookups),
52922- atomic_read(&fscache_n_object_lookups_negative),
52923- atomic_read(&fscache_n_object_lookups_positive),
52924- atomic_read(&fscache_n_object_created),
52925- atomic_read(&fscache_n_object_lookups_timed_out));
52926+ atomic_read_unchecked(&fscache_n_object_lookups),
52927+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
52928+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
52929+ atomic_read_unchecked(&fscache_n_object_created),
52930+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
52931
52932 seq_printf(m, "Invals : n=%u run=%u\n",
52933- atomic_read(&fscache_n_invalidates),
52934- atomic_read(&fscache_n_invalidates_run));
52935+ atomic_read_unchecked(&fscache_n_invalidates),
52936+ atomic_read_unchecked(&fscache_n_invalidates_run));
52937
52938 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
52939- atomic_read(&fscache_n_updates),
52940- atomic_read(&fscache_n_updates_null),
52941- atomic_read(&fscache_n_updates_run));
52942+ atomic_read_unchecked(&fscache_n_updates),
52943+ atomic_read_unchecked(&fscache_n_updates_null),
52944+ atomic_read_unchecked(&fscache_n_updates_run));
52945
52946 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
52947- atomic_read(&fscache_n_relinquishes),
52948- atomic_read(&fscache_n_relinquishes_null),
52949- atomic_read(&fscache_n_relinquishes_waitcrt),
52950- atomic_read(&fscache_n_relinquishes_retire));
52951+ atomic_read_unchecked(&fscache_n_relinquishes),
52952+ atomic_read_unchecked(&fscache_n_relinquishes_null),
52953+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
52954+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
52955
52956 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
52957- atomic_read(&fscache_n_attr_changed),
52958- atomic_read(&fscache_n_attr_changed_ok),
52959- atomic_read(&fscache_n_attr_changed_nobufs),
52960- atomic_read(&fscache_n_attr_changed_nomem),
52961- atomic_read(&fscache_n_attr_changed_calls));
52962+ atomic_read_unchecked(&fscache_n_attr_changed),
52963+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
52964+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
52965+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
52966+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
52967
52968 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
52969- atomic_read(&fscache_n_allocs),
52970- atomic_read(&fscache_n_allocs_ok),
52971- atomic_read(&fscache_n_allocs_wait),
52972- atomic_read(&fscache_n_allocs_nobufs),
52973- atomic_read(&fscache_n_allocs_intr));
52974+ atomic_read_unchecked(&fscache_n_allocs),
52975+ atomic_read_unchecked(&fscache_n_allocs_ok),
52976+ atomic_read_unchecked(&fscache_n_allocs_wait),
52977+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
52978+ atomic_read_unchecked(&fscache_n_allocs_intr));
52979 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
52980- atomic_read(&fscache_n_alloc_ops),
52981- atomic_read(&fscache_n_alloc_op_waits),
52982- atomic_read(&fscache_n_allocs_object_dead));
52983+ atomic_read_unchecked(&fscache_n_alloc_ops),
52984+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
52985+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
52986
52987 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
52988 " int=%u oom=%u\n",
52989- atomic_read(&fscache_n_retrievals),
52990- atomic_read(&fscache_n_retrievals_ok),
52991- atomic_read(&fscache_n_retrievals_wait),
52992- atomic_read(&fscache_n_retrievals_nodata),
52993- atomic_read(&fscache_n_retrievals_nobufs),
52994- atomic_read(&fscache_n_retrievals_intr),
52995- atomic_read(&fscache_n_retrievals_nomem));
52996+ atomic_read_unchecked(&fscache_n_retrievals),
52997+ atomic_read_unchecked(&fscache_n_retrievals_ok),
52998+ atomic_read_unchecked(&fscache_n_retrievals_wait),
52999+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
53000+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
53001+ atomic_read_unchecked(&fscache_n_retrievals_intr),
53002+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
53003 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
53004- atomic_read(&fscache_n_retrieval_ops),
53005- atomic_read(&fscache_n_retrieval_op_waits),
53006- atomic_read(&fscache_n_retrievals_object_dead));
53007+ atomic_read_unchecked(&fscache_n_retrieval_ops),
53008+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
53009+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
53010
53011 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
53012- atomic_read(&fscache_n_stores),
53013- atomic_read(&fscache_n_stores_ok),
53014- atomic_read(&fscache_n_stores_again),
53015- atomic_read(&fscache_n_stores_nobufs),
53016- atomic_read(&fscache_n_stores_oom));
53017+ atomic_read_unchecked(&fscache_n_stores),
53018+ atomic_read_unchecked(&fscache_n_stores_ok),
53019+ atomic_read_unchecked(&fscache_n_stores_again),
53020+ atomic_read_unchecked(&fscache_n_stores_nobufs),
53021+ atomic_read_unchecked(&fscache_n_stores_oom));
53022 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
53023- atomic_read(&fscache_n_store_ops),
53024- atomic_read(&fscache_n_store_calls),
53025- atomic_read(&fscache_n_store_pages),
53026- atomic_read(&fscache_n_store_radix_deletes),
53027- atomic_read(&fscache_n_store_pages_over_limit));
53028+ atomic_read_unchecked(&fscache_n_store_ops),
53029+ atomic_read_unchecked(&fscache_n_store_calls),
53030+ atomic_read_unchecked(&fscache_n_store_pages),
53031+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
53032+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
53033
53034 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u wt=%u\n",
53035- atomic_read(&fscache_n_store_vmscan_not_storing),
53036- atomic_read(&fscache_n_store_vmscan_gone),
53037- atomic_read(&fscache_n_store_vmscan_busy),
53038- atomic_read(&fscache_n_store_vmscan_cancelled),
53039- atomic_read(&fscache_n_store_vmscan_wait));
53040+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
53041+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
53042+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
53043+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled),
53044+ atomic_read_unchecked(&fscache_n_store_vmscan_wait));
53045
53046 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
53047- atomic_read(&fscache_n_op_pend),
53048- atomic_read(&fscache_n_op_run),
53049- atomic_read(&fscache_n_op_enqueue),
53050- atomic_read(&fscache_n_op_cancelled),
53051- atomic_read(&fscache_n_op_rejected));
53052+ atomic_read_unchecked(&fscache_n_op_pend),
53053+ atomic_read_unchecked(&fscache_n_op_run),
53054+ atomic_read_unchecked(&fscache_n_op_enqueue),
53055+ atomic_read_unchecked(&fscache_n_op_cancelled),
53056+ atomic_read_unchecked(&fscache_n_op_rejected));
53057 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
53058- atomic_read(&fscache_n_op_deferred_release),
53059- atomic_read(&fscache_n_op_release),
53060- atomic_read(&fscache_n_op_gc));
53061+ atomic_read_unchecked(&fscache_n_op_deferred_release),
53062+ atomic_read_unchecked(&fscache_n_op_release),
53063+ atomic_read_unchecked(&fscache_n_op_gc));
53064
53065 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
53066 atomic_read(&fscache_n_cop_alloc_object),
53067diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
53068index e397b67..b0d8709 100644
53069--- a/fs/fuse/cuse.c
53070+++ b/fs/fuse/cuse.c
53071@@ -593,10 +593,12 @@ static int __init cuse_init(void)
53072 INIT_LIST_HEAD(&cuse_conntbl[i]);
53073
53074 /* inherit and extend fuse_dev_operations */
53075- cuse_channel_fops = fuse_dev_operations;
53076- cuse_channel_fops.owner = THIS_MODULE;
53077- cuse_channel_fops.open = cuse_channel_open;
53078- cuse_channel_fops.release = cuse_channel_release;
53079+ pax_open_kernel();
53080+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
53081+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
53082+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
53083+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
53084+ pax_close_kernel();
53085
53086 cuse_class = class_create(THIS_MODULE, "cuse");
53087 if (IS_ERR(cuse_class))
53088diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
53089index e83351a..41e3c9c 100644
53090--- a/fs/fuse/dev.c
53091+++ b/fs/fuse/dev.c
53092@@ -1236,7 +1236,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
53093 ret = 0;
53094 pipe_lock(pipe);
53095
53096- if (!pipe->readers) {
53097+ if (!atomic_read(&pipe->readers)) {
53098 send_sig(SIGPIPE, current, 0);
53099 if (!ret)
53100 ret = -EPIPE;
53101diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
53102index 315e1f8..91f890c 100644
53103--- a/fs/fuse/dir.c
53104+++ b/fs/fuse/dir.c
53105@@ -1233,7 +1233,7 @@ static char *read_link(struct dentry *dentry)
53106 return link;
53107 }
53108
53109-static void free_link(char *link)
53110+static void free_link(const char *link)
53111 {
53112 if (!IS_ERR(link))
53113 free_page((unsigned long) link);
53114diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
53115index 2b6f569..fcb4d1f 100644
53116--- a/fs/gfs2/inode.c
53117+++ b/fs/gfs2/inode.c
53118@@ -1499,7 +1499,7 @@ out:
53119
53120 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
53121 {
53122- char *s = nd_get_link(nd);
53123+ const char *s = nd_get_link(nd);
53124 if (!IS_ERR(s))
53125 kfree(s);
53126 }
53127diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
53128index 78bde32..767e906 100644
53129--- a/fs/hugetlbfs/inode.c
53130+++ b/fs/hugetlbfs/inode.c
53131@@ -152,6 +152,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
53132 struct mm_struct *mm = current->mm;
53133 struct vm_area_struct *vma;
53134 struct hstate *h = hstate_file(file);
53135+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
53136 struct vm_unmapped_area_info info;
53137
53138 if (len & ~huge_page_mask(h))
53139@@ -165,17 +166,26 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
53140 return addr;
53141 }
53142
53143+#ifdef CONFIG_PAX_RANDMMAP
53144+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
53145+#endif
53146+
53147 if (addr) {
53148 addr = ALIGN(addr, huge_page_size(h));
53149 vma = find_vma(mm, addr);
53150- if (TASK_SIZE - len >= addr &&
53151- (!vma || addr + len <= vma->vm_start))
53152+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
53153 return addr;
53154 }
53155
53156 info.flags = 0;
53157 info.length = len;
53158 info.low_limit = TASK_UNMAPPED_BASE;
53159+
53160+#ifdef CONFIG_PAX_RANDMMAP
53161+ if (mm->pax_flags & MF_PAX_RANDMMAP)
53162+ info.low_limit += mm->delta_mmap;
53163+#endif
53164+
53165 info.high_limit = TASK_SIZE;
53166 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
53167 info.align_offset = 0;
53168@@ -897,7 +907,7 @@ static struct file_system_type hugetlbfs_fs_type = {
53169 .kill_sb = kill_litter_super,
53170 };
53171
53172-static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
53173+struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
53174
53175 static int can_do_hugetlb_shm(void)
53176 {
53177diff --git a/fs/inode.c b/fs/inode.c
53178index 14084b7..29af1d9 100644
53179--- a/fs/inode.c
53180+++ b/fs/inode.c
53181@@ -880,8 +880,8 @@ unsigned int get_next_ino(void)
53182
53183 #ifdef CONFIG_SMP
53184 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
53185- static atomic_t shared_last_ino;
53186- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
53187+ static atomic_unchecked_t shared_last_ino;
53188+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
53189
53190 res = next - LAST_INO_BATCH;
53191 }
53192diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
53193index 4a6cf28..d3a29d3 100644
53194--- a/fs/jffs2/erase.c
53195+++ b/fs/jffs2/erase.c
53196@@ -452,7 +452,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
53197 struct jffs2_unknown_node marker = {
53198 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
53199 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
53200- .totlen = cpu_to_je32(c->cleanmarker_size)
53201+ .totlen = cpu_to_je32(c->cleanmarker_size),
53202+ .hdr_crc = cpu_to_je32(0)
53203 };
53204
53205 jffs2_prealloc_raw_node_refs(c, jeb, 1);
53206diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
53207index a6597d6..41b30ec 100644
53208--- a/fs/jffs2/wbuf.c
53209+++ b/fs/jffs2/wbuf.c
53210@@ -1023,7 +1023,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
53211 {
53212 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
53213 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
53214- .totlen = constant_cpu_to_je32(8)
53215+ .totlen = constant_cpu_to_je32(8),
53216+ .hdr_crc = constant_cpu_to_je32(0)
53217 };
53218
53219 /*
53220diff --git a/fs/jfs/super.c b/fs/jfs/super.c
53221index 1a543be..a4e1363 100644
53222--- a/fs/jfs/super.c
53223+++ b/fs/jfs/super.c
53224@@ -225,7 +225,7 @@ static const match_table_t tokens = {
53225 static int parse_options(char *options, struct super_block *sb, s64 *newLVSize,
53226 int *flag)
53227 {
53228- void *nls_map = (void *)-1; /* -1: no change; NULL: none */
53229+ const void *nls_map = (const void *)-1; /* -1: no change; NULL: none */
53230 char *p;
53231 struct jfs_sb_info *sbi = JFS_SBI(sb);
53232
53233@@ -253,7 +253,7 @@ static int parse_options(char *options, struct super_block *sb, s64 *newLVSize,
53234 /* Don't do anything ;-) */
53235 break;
53236 case Opt_iocharset:
53237- if (nls_map && nls_map != (void *) -1)
53238+ if (nls_map && nls_map != (const void *) -1)
53239 unload_nls(nls_map);
53240 if (!strcmp(args[0].from, "none"))
53241 nls_map = NULL;
53242@@ -855,7 +855,7 @@ static int __init init_jfs_fs(void)
53243
53244 jfs_inode_cachep =
53245 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
53246- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
53247+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
53248 init_once);
53249 if (jfs_inode_cachep == NULL)
53250 return -ENOMEM;
53251diff --git a/fs/libfs.c b/fs/libfs.c
53252index 916da8c..1588998 100644
53253--- a/fs/libfs.c
53254+++ b/fs/libfs.c
53255@@ -165,6 +165,9 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
53256
53257 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
53258 struct dentry *next;
53259+ char d_name[sizeof(next->d_iname)];
53260+ const unsigned char *name;
53261+
53262 next = list_entry(p, struct dentry, d_u.d_child);
53263 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
53264 if (!simple_positive(next)) {
53265@@ -174,7 +177,12 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
53266
53267 spin_unlock(&next->d_lock);
53268 spin_unlock(&dentry->d_lock);
53269- if (filldir(dirent, next->d_name.name,
53270+ name = next->d_name.name;
53271+ if (name == next->d_iname) {
53272+ memcpy(d_name, name, next->d_name.len);
53273+ name = d_name;
53274+ }
53275+ if (filldir(dirent, name,
53276 next->d_name.len, filp->f_pos,
53277 next->d_inode->i_ino,
53278 dt_type(next->d_inode)) < 0)
53279diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
53280index 52e5120..808936e 100644
53281--- a/fs/lockd/clntproc.c
53282+++ b/fs/lockd/clntproc.c
53283@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
53284 /*
53285 * Cookie counter for NLM requests
53286 */
53287-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
53288+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
53289
53290 void nlmclnt_next_cookie(struct nlm_cookie *c)
53291 {
53292- u32 cookie = atomic_inc_return(&nlm_cookie);
53293+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
53294
53295 memcpy(c->data, &cookie, 4);
53296 c->len=4;
53297diff --git a/fs/locks.c b/fs/locks.c
53298index a94e331..060bce3 100644
53299--- a/fs/locks.c
53300+++ b/fs/locks.c
53301@@ -2064,16 +2064,16 @@ void locks_remove_flock(struct file *filp)
53302 return;
53303
53304 if (filp->f_op && filp->f_op->flock) {
53305- struct file_lock fl = {
53306+ struct file_lock flock = {
53307 .fl_pid = current->tgid,
53308 .fl_file = filp,
53309 .fl_flags = FL_FLOCK,
53310 .fl_type = F_UNLCK,
53311 .fl_end = OFFSET_MAX,
53312 };
53313- filp->f_op->flock(filp, F_SETLKW, &fl);
53314- if (fl.fl_ops && fl.fl_ops->fl_release_private)
53315- fl.fl_ops->fl_release_private(&fl);
53316+ filp->f_op->flock(filp, F_SETLKW, &flock);
53317+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
53318+ flock.fl_ops->fl_release_private(&flock);
53319 }
53320
53321 lock_flocks();
53322diff --git a/fs/namei.c b/fs/namei.c
53323index ec97aef..e67718d 100644
53324--- a/fs/namei.c
53325+++ b/fs/namei.c
53326@@ -319,16 +319,32 @@ int generic_permission(struct inode *inode, int mask)
53327 if (ret != -EACCES)
53328 return ret;
53329
53330+#ifdef CONFIG_GRKERNSEC
53331+ /* we'll block if we have to log due to a denied capability use */
53332+ if (mask & MAY_NOT_BLOCK)
53333+ return -ECHILD;
53334+#endif
53335+
53336 if (S_ISDIR(inode->i_mode)) {
53337 /* DACs are overridable for directories */
53338- if (inode_capable(inode, CAP_DAC_OVERRIDE))
53339- return 0;
53340 if (!(mask & MAY_WRITE))
53341- if (inode_capable(inode, CAP_DAC_READ_SEARCH))
53342+ if (inode_capable_nolog(inode, CAP_DAC_OVERRIDE) ||
53343+ inode_capable(inode, CAP_DAC_READ_SEARCH))
53344 return 0;
53345+ if (inode_capable(inode, CAP_DAC_OVERRIDE))
53346+ return 0;
53347 return -EACCES;
53348 }
53349 /*
53350+ * Searching includes executable on directories, else just read.
53351+ */
53352+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
53353+ if (mask == MAY_READ)
53354+ if (inode_capable_nolog(inode, CAP_DAC_OVERRIDE) ||
53355+ inode_capable(inode, CAP_DAC_READ_SEARCH))
53356+ return 0;
53357+
53358+ /*
53359 * Read/write DACs are always overridable.
53360 * Executable DACs are overridable when there is
53361 * at least one exec bit set.
53362@@ -337,14 +353,6 @@ int generic_permission(struct inode *inode, int mask)
53363 if (inode_capable(inode, CAP_DAC_OVERRIDE))
53364 return 0;
53365
53366- /*
53367- * Searching includes executable on directories, else just read.
53368- */
53369- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
53370- if (mask == MAY_READ)
53371- if (inode_capable(inode, CAP_DAC_READ_SEARCH))
53372- return 0;
53373-
53374 return -EACCES;
53375 }
53376
53377@@ -824,7 +832,7 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
53378 {
53379 struct dentry *dentry = link->dentry;
53380 int error;
53381- char *s;
53382+ const char *s;
53383
53384 BUG_ON(nd->flags & LOOKUP_RCU);
53385
53386@@ -845,6 +853,12 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
53387 if (error)
53388 goto out_put_nd_path;
53389
53390+ if (gr_handle_follow_link(dentry->d_parent->d_inode,
53391+ dentry->d_inode, dentry, nd->path.mnt)) {
53392+ error = -EACCES;
53393+ goto out_put_nd_path;
53394+ }
53395+
53396 nd->last_type = LAST_BIND;
53397 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
53398 error = PTR_ERR(*p);
53399@@ -1594,6 +1608,8 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
53400 break;
53401 res = walk_component(nd, path, &nd->last,
53402 nd->last_type, LOOKUP_FOLLOW);
53403+ if (res >= 0 && gr_handle_symlink_owner(&link, nd->inode))
53404+ res = -EACCES;
53405 put_link(nd, &link, cookie);
53406 } while (res > 0);
53407
53408@@ -1692,7 +1708,7 @@ EXPORT_SYMBOL(full_name_hash);
53409 static inline unsigned long hash_name(const char *name, unsigned int *hashp)
53410 {
53411 unsigned long a, b, adata, bdata, mask, hash, len;
53412- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
53413+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
53414
53415 hash = a = 0;
53416 len = -sizeof(unsigned long);
53417@@ -1977,6 +1993,8 @@ static int path_lookupat(int dfd, const char *name,
53418 if (err)
53419 break;
53420 err = lookup_last(nd, &path);
53421+ if (!err && gr_handle_symlink_owner(&link, nd->inode))
53422+ err = -EACCES;
53423 put_link(nd, &link, cookie);
53424 }
53425 }
53426@@ -1984,6 +2002,13 @@ static int path_lookupat(int dfd, const char *name,
53427 if (!err)
53428 err = complete_walk(nd);
53429
53430+ if (!err && !(nd->flags & LOOKUP_PARENT)) {
53431+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
53432+ path_put(&nd->path);
53433+ err = -ENOENT;
53434+ }
53435+ }
53436+
53437 if (!err && nd->flags & LOOKUP_DIRECTORY) {
53438 if (!nd->inode->i_op->lookup) {
53439 path_put(&nd->path);
53440@@ -2011,8 +2036,15 @@ static int filename_lookup(int dfd, struct filename *name,
53441 retval = path_lookupat(dfd, name->name,
53442 flags | LOOKUP_REVAL, nd);
53443
53444- if (likely(!retval))
53445+ if (likely(!retval)) {
53446 audit_inode(name, nd->path.dentry, flags & LOOKUP_PARENT);
53447+ if (name->name[0] != '/' && nd->path.dentry && nd->inode) {
53448+ if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt)) {
53449+ path_put(&nd->path);
53450+ return -ENOENT;
53451+ }
53452+ }
53453+ }
53454 return retval;
53455 }
53456
53457@@ -2390,6 +2422,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
53458 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
53459 return -EPERM;
53460
53461+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
53462+ return -EPERM;
53463+ if (gr_handle_rawio(inode))
53464+ return -EPERM;
53465+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
53466+ return -EACCES;
53467+
53468 return 0;
53469 }
53470
53471@@ -2611,7 +2650,7 @@ looked_up:
53472 * cleared otherwise prior to returning.
53473 */
53474 static int lookup_open(struct nameidata *nd, struct path *path,
53475- struct file *file,
53476+ struct path *link, struct file *file,
53477 const struct open_flags *op,
53478 bool got_write, int *opened)
53479 {
53480@@ -2646,6 +2685,17 @@ static int lookup_open(struct nameidata *nd, struct path *path,
53481 /* Negative dentry, just create the file */
53482 if (!dentry->d_inode && (op->open_flag & O_CREAT)) {
53483 umode_t mode = op->mode;
53484+
53485+ if (link && gr_handle_symlink_owner(link, dir->d_inode)) {
53486+ error = -EACCES;
53487+ goto out_dput;
53488+ }
53489+
53490+ if (!gr_acl_handle_creat(dentry, dir, nd->path.mnt, op->open_flag, op->acc_mode, mode)) {
53491+ error = -EACCES;
53492+ goto out_dput;
53493+ }
53494+
53495 if (!IS_POSIXACL(dir->d_inode))
53496 mode &= ~current_umask();
53497 /*
53498@@ -2667,6 +2717,8 @@ static int lookup_open(struct nameidata *nd, struct path *path,
53499 nd->flags & LOOKUP_EXCL);
53500 if (error)
53501 goto out_dput;
53502+ else
53503+ gr_handle_create(dentry, nd->path.mnt);
53504 }
53505 out_no_open:
53506 path->dentry = dentry;
53507@@ -2681,7 +2733,7 @@ out_dput:
53508 /*
53509 * Handle the last step of open()
53510 */
53511-static int do_last(struct nameidata *nd, struct path *path,
53512+static int do_last(struct nameidata *nd, struct path *path, struct path *link,
53513 struct file *file, const struct open_flags *op,
53514 int *opened, struct filename *name)
53515 {
53516@@ -2710,16 +2762,32 @@ static int do_last(struct nameidata *nd, struct path *path,
53517 error = complete_walk(nd);
53518 if (error)
53519 return error;
53520+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
53521+ error = -ENOENT;
53522+ goto out;
53523+ }
53524 audit_inode(name, nd->path.dentry, 0);
53525 if (open_flag & O_CREAT) {
53526 error = -EISDIR;
53527 goto out;
53528 }
53529+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
53530+ error = -EACCES;
53531+ goto out;
53532+ }
53533 goto finish_open;
53534 case LAST_BIND:
53535 error = complete_walk(nd);
53536 if (error)
53537 return error;
53538+ if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
53539+ error = -ENOENT;
53540+ goto out;
53541+ }
53542+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
53543+ error = -EACCES;
53544+ goto out;
53545+ }
53546 audit_inode(name, dir, 0);
53547 goto finish_open;
53548 }
53549@@ -2768,7 +2836,7 @@ retry_lookup:
53550 */
53551 }
53552 mutex_lock(&dir->d_inode->i_mutex);
53553- error = lookup_open(nd, path, file, op, got_write, opened);
53554+ error = lookup_open(nd, path, link, file, op, got_write, opened);
53555 mutex_unlock(&dir->d_inode->i_mutex);
53556
53557 if (error <= 0) {
53558@@ -2792,11 +2860,28 @@ retry_lookup:
53559 goto finish_open_created;
53560 }
53561
53562+ if (!gr_acl_handle_hidden_file(path->dentry, nd->path.mnt)) {
53563+ error = -ENOENT;
53564+ goto exit_dput;
53565+ }
53566+ if (link && gr_handle_symlink_owner(link, path->dentry->d_inode)) {
53567+ error = -EACCES;
53568+ goto exit_dput;
53569+ }
53570+
53571 /*
53572 * create/update audit record if it already exists.
53573 */
53574- if (path->dentry->d_inode)
53575+ if (path->dentry->d_inode) {
53576+ /* only check if O_CREAT is specified, all other checks need to go
53577+ into may_open */
53578+ if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
53579+ error = -EACCES;
53580+ goto exit_dput;
53581+ }
53582+
53583 audit_inode(name, path->dentry, 0);
53584+ }
53585
53586 /*
53587 * If atomic_open() acquired write access it is dropped now due to
53588@@ -2837,6 +2922,11 @@ finish_lookup:
53589 }
53590 }
53591 BUG_ON(inode != path->dentry->d_inode);
53592+ /* if we're resolving a symlink to another symlink */
53593+ if (link && gr_handle_symlink_owner(link, inode)) {
53594+ error = -EACCES;
53595+ goto out;
53596+ }
53597 return 1;
53598 }
53599
53600@@ -2846,7 +2936,6 @@ finish_lookup:
53601 save_parent.dentry = nd->path.dentry;
53602 save_parent.mnt = mntget(path->mnt);
53603 nd->path.dentry = path->dentry;
53604-
53605 }
53606 nd->inode = inode;
53607 /* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */
53608@@ -2855,6 +2944,16 @@ finish_lookup:
53609 path_put(&save_parent);
53610 return error;
53611 }
53612+
53613+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
53614+ error = -ENOENT;
53615+ goto out;
53616+ }
53617+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
53618+ error = -EACCES;
53619+ goto out;
53620+ }
53621+
53622 error = -EISDIR;
53623 if ((open_flag & O_CREAT) && S_ISDIR(nd->inode->i_mode))
53624 goto out;
53625@@ -2953,7 +3052,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
53626 if (unlikely(error))
53627 goto out;
53628
53629- error = do_last(nd, &path, file, op, &opened, pathname);
53630+ error = do_last(nd, &path, NULL, file, op, &opened, pathname);
53631 while (unlikely(error > 0)) { /* trailing symlink */
53632 struct path link = path;
53633 void *cookie;
53634@@ -2971,7 +3070,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
53635 error = follow_link(&link, nd, &cookie);
53636 if (unlikely(error))
53637 break;
53638- error = do_last(nd, &path, file, op, &opened, pathname);
53639+ error = do_last(nd, &path, &link, file, op, &opened, pathname);
53640 put_link(nd, &link, cookie);
53641 }
53642 out:
53643@@ -3071,8 +3170,12 @@ struct dentry *kern_path_create(int dfd, const char *pathname,
53644 goto unlock;
53645
53646 error = -EEXIST;
53647- if (dentry->d_inode)
53648+ if (dentry->d_inode) {
53649+ if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt)) {
53650+ error = -ENOENT;
53651+ }
53652 goto fail;
53653+ }
53654 /*
53655 * Special case - lookup gave negative, but... we had foo/bar/
53656 * From the vfs_mknod() POV we just have a negative dentry -
53657@@ -3124,6 +3227,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname,
53658 }
53659 EXPORT_SYMBOL(user_path_create);
53660
53661+static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, struct filename **to, unsigned int lookup_flags)
53662+{
53663+ struct filename *tmp = getname(pathname);
53664+ struct dentry *res;
53665+ if (IS_ERR(tmp))
53666+ return ERR_CAST(tmp);
53667+ res = kern_path_create(dfd, tmp->name, path, lookup_flags);
53668+ if (IS_ERR(res))
53669+ putname(tmp);
53670+ else
53671+ *to = tmp;
53672+ return res;
53673+}
53674+
53675 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
53676 {
53677 int error = may_create(dir, dentry);
53678@@ -3186,6 +3303,17 @@ retry:
53679
53680 if (!IS_POSIXACL(path.dentry->d_inode))
53681 mode &= ~current_umask();
53682+
53683+ if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
53684+ error = -EPERM;
53685+ goto out;
53686+ }
53687+
53688+ if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
53689+ error = -EACCES;
53690+ goto out;
53691+ }
53692+
53693 error = security_path_mknod(&path, dentry, mode, dev);
53694 if (error)
53695 goto out;
53696@@ -3202,6 +3330,8 @@ retry:
53697 break;
53698 }
53699 out:
53700+ if (!error)
53701+ gr_handle_create(dentry, path.mnt);
53702 done_path_create(&path, dentry);
53703 if (retry_estale(error, lookup_flags)) {
53704 lookup_flags |= LOOKUP_REVAL;
53705@@ -3254,9 +3384,16 @@ retry:
53706
53707 if (!IS_POSIXACL(path.dentry->d_inode))
53708 mode &= ~current_umask();
53709+ if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
53710+ error = -EACCES;
53711+ goto out;
53712+ }
53713 error = security_path_mkdir(&path, dentry, mode);
53714 if (!error)
53715 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
53716+ if (!error)
53717+ gr_handle_create(dentry, path.mnt);
53718+out:
53719 done_path_create(&path, dentry);
53720 if (retry_estale(error, lookup_flags)) {
53721 lookup_flags |= LOOKUP_REVAL;
53722@@ -3337,6 +3474,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
53723 struct filename *name;
53724 struct dentry *dentry;
53725 struct nameidata nd;
53726+ ino_t saved_ino = 0;
53727+ dev_t saved_dev = 0;
53728 unsigned int lookup_flags = 0;
53729 retry:
53730 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
53731@@ -3369,10 +3508,21 @@ retry:
53732 error = -ENOENT;
53733 goto exit3;
53734 }
53735+
53736+ saved_ino = dentry->d_inode->i_ino;
53737+ saved_dev = gr_get_dev_from_dentry(dentry);
53738+
53739+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
53740+ error = -EACCES;
53741+ goto exit3;
53742+ }
53743+
53744 error = security_path_rmdir(&nd.path, dentry);
53745 if (error)
53746 goto exit3;
53747 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
53748+ if (!error && (saved_dev || saved_ino))
53749+ gr_handle_delete(saved_ino, saved_dev);
53750 exit3:
53751 dput(dentry);
53752 exit2:
53753@@ -3438,6 +3588,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
53754 struct dentry *dentry;
53755 struct nameidata nd;
53756 struct inode *inode = NULL;
53757+ ino_t saved_ino = 0;
53758+ dev_t saved_dev = 0;
53759 unsigned int lookup_flags = 0;
53760 retry:
53761 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
53762@@ -3464,10 +3616,22 @@ retry:
53763 if (!inode)
53764 goto slashes;
53765 ihold(inode);
53766+
53767+ if (inode->i_nlink <= 1) {
53768+ saved_ino = inode->i_ino;
53769+ saved_dev = gr_get_dev_from_dentry(dentry);
53770+ }
53771+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
53772+ error = -EACCES;
53773+ goto exit2;
53774+ }
53775+
53776 error = security_path_unlink(&nd.path, dentry);
53777 if (error)
53778 goto exit2;
53779 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
53780+ if (!error && (saved_ino || saved_dev))
53781+ gr_handle_delete(saved_ino, saved_dev);
53782 exit2:
53783 dput(dentry);
53784 }
53785@@ -3545,9 +3709,17 @@ retry:
53786 if (IS_ERR(dentry))
53787 goto out_putname;
53788
53789+ if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
53790+ error = -EACCES;
53791+ goto out;
53792+ }
53793+
53794 error = security_path_symlink(&path, dentry, from->name);
53795 if (!error)
53796 error = vfs_symlink(path.dentry->d_inode, dentry, from->name);
53797+ if (!error)
53798+ gr_handle_create(dentry, path.mnt);
53799+out:
53800 done_path_create(&path, dentry);
53801 if (retry_estale(error, lookup_flags)) {
53802 lookup_flags |= LOOKUP_REVAL;
53803@@ -3621,6 +3793,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
53804 {
53805 struct dentry *new_dentry;
53806 struct path old_path, new_path;
53807+ struct filename *to = NULL;
53808 int how = 0;
53809 int error;
53810
53811@@ -3644,7 +3817,7 @@ retry:
53812 if (error)
53813 return error;
53814
53815- new_dentry = user_path_create(newdfd, newname, &new_path,
53816+ new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to,
53817 (how & LOOKUP_REVAL));
53818 error = PTR_ERR(new_dentry);
53819 if (IS_ERR(new_dentry))
53820@@ -3656,11 +3829,28 @@ retry:
53821 error = may_linkat(&old_path);
53822 if (unlikely(error))
53823 goto out_dput;
53824+
53825+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
53826+ old_path.dentry->d_inode,
53827+ old_path.dentry->d_inode->i_mode, to)) {
53828+ error = -EACCES;
53829+ goto out_dput;
53830+ }
53831+
53832+ if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
53833+ old_path.dentry, old_path.mnt, to)) {
53834+ error = -EACCES;
53835+ goto out_dput;
53836+ }
53837+
53838 error = security_path_link(old_path.dentry, &new_path, new_dentry);
53839 if (error)
53840 goto out_dput;
53841 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry);
53842+ if (!error)
53843+ gr_handle_create(new_dentry, new_path.mnt);
53844 out_dput:
53845+ putname(to);
53846 done_path_create(&new_path, new_dentry);
53847 if (retry_estale(error, how)) {
53848 how |= LOOKUP_REVAL;
53849@@ -3906,12 +4096,21 @@ retry:
53850 if (new_dentry == trap)
53851 goto exit5;
53852
53853+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
53854+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
53855+ to);
53856+ if (error)
53857+ goto exit5;
53858+
53859 error = security_path_rename(&oldnd.path, old_dentry,
53860 &newnd.path, new_dentry);
53861 if (error)
53862 goto exit5;
53863 error = vfs_rename(old_dir->d_inode, old_dentry,
53864 new_dir->d_inode, new_dentry);
53865+ if (!error)
53866+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
53867+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
53868 exit5:
53869 dput(new_dentry);
53870 exit4:
53871@@ -3943,6 +4142,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
53872
53873 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
53874 {
53875+ char tmpbuf[64];
53876+ const char *newlink;
53877 int len;
53878
53879 len = PTR_ERR(link);
53880@@ -3952,7 +4153,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
53881 len = strlen(link);
53882 if (len > (unsigned) buflen)
53883 len = buflen;
53884- if (copy_to_user(buffer, link, len))
53885+
53886+ if (len < sizeof(tmpbuf)) {
53887+ memcpy(tmpbuf, link, len);
53888+ newlink = tmpbuf;
53889+ } else
53890+ newlink = link;
53891+
53892+ if (copy_to_user(buffer, newlink, len))
53893 len = -EFAULT;
53894 out:
53895 return len;
53896diff --git a/fs/namespace.c b/fs/namespace.c
53897index 5dd7709..0002ebe 100644
53898--- a/fs/namespace.c
53899+++ b/fs/namespace.c
53900@@ -1219,6 +1219,9 @@ static int do_umount(struct mount *mnt, int flags)
53901 if (!(sb->s_flags & MS_RDONLY))
53902 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
53903 up_write(&sb->s_umount);
53904+
53905+ gr_log_remount(mnt->mnt_devname, retval);
53906+
53907 return retval;
53908 }
53909
53910@@ -1238,6 +1241,9 @@ static int do_umount(struct mount *mnt, int flags)
53911 br_write_unlock(&vfsmount_lock);
53912 up_write(&namespace_sem);
53913 release_mounts(&umount_list);
53914+
53915+ gr_log_unmount(mnt->mnt_devname, retval);
53916+
53917 return retval;
53918 }
53919
53920@@ -2294,6 +2300,16 @@ long do_mount(const char *dev_name, const char *dir_name,
53921 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
53922 MS_STRICTATIME);
53923
53924+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
53925+ retval = -EPERM;
53926+ goto dput_out;
53927+ }
53928+
53929+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
53930+ retval = -EPERM;
53931+ goto dput_out;
53932+ }
53933+
53934 if (flags & MS_REMOUNT)
53935 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
53936 data_page);
53937@@ -2308,6 +2324,9 @@ long do_mount(const char *dev_name, const char *dir_name,
53938 dev_name, data_page);
53939 dput_out:
53940 path_put(&path);
53941+
53942+ gr_log_mount(dev_name, dir_name, retval);
53943+
53944 return retval;
53945 }
53946
53947@@ -2594,6 +2613,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
53948 if (error)
53949 goto out2;
53950
53951+ if (gr_handle_chroot_pivot()) {
53952+ error = -EPERM;
53953+ goto out2;
53954+ }
53955+
53956 get_fs_root(current->fs, &root);
53957 error = lock_mount(&old);
53958 if (error)
53959@@ -2842,7 +2866,7 @@ static int mntns_install(struct nsproxy *nsproxy, void *ns)
53960 !nsown_capable(CAP_SYS_ADMIN))
53961 return -EPERM;
53962
53963- if (fs->users != 1)
53964+ if (atomic_read(&fs->users) != 1)
53965 return -EINVAL;
53966
53967 get_mnt_ns(mnt_ns);
53968diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
53969index 59461c9..b17c57e 100644
53970--- a/fs/nfs/callback_xdr.c
53971+++ b/fs/nfs/callback_xdr.c
53972@@ -51,7 +51,7 @@ struct callback_op {
53973 callback_decode_arg_t decode_args;
53974 callback_encode_res_t encode_res;
53975 long res_maxsize;
53976-};
53977+} __do_const;
53978
53979 static struct callback_op callback_ops[];
53980
53981diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
53982index ebeb94c..ff35337 100644
53983--- a/fs/nfs/inode.c
53984+++ b/fs/nfs/inode.c
53985@@ -1042,16 +1042,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
53986 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
53987 }
53988
53989-static atomic_long_t nfs_attr_generation_counter;
53990+static atomic_long_unchecked_t nfs_attr_generation_counter;
53991
53992 static unsigned long nfs_read_attr_generation_counter(void)
53993 {
53994- return atomic_long_read(&nfs_attr_generation_counter);
53995+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
53996 }
53997
53998 unsigned long nfs_inc_attr_generation_counter(void)
53999 {
54000- return atomic_long_inc_return(&nfs_attr_generation_counter);
54001+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
54002 }
54003
54004 void nfs_fattr_init(struct nfs_fattr *fattr)
54005diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
54006index 9d1c5db..1e13db8 100644
54007--- a/fs/nfsd/nfs4proc.c
54008+++ b/fs/nfsd/nfs4proc.c
54009@@ -1097,7 +1097,7 @@ struct nfsd4_operation {
54010 nfsd4op_rsize op_rsize_bop;
54011 stateid_getter op_get_currentstateid;
54012 stateid_setter op_set_currentstateid;
54013-};
54014+} __do_const;
54015
54016 static struct nfsd4_operation nfsd4_ops[];
54017
54018diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
54019index d1dd710..32ac0e8 100644
54020--- a/fs/nfsd/nfs4xdr.c
54021+++ b/fs/nfsd/nfs4xdr.c
54022@@ -1456,7 +1456,7 @@ nfsd4_decode_notsupp(struct nfsd4_compoundargs *argp, void *p)
54023
54024 typedef __be32(*nfsd4_dec)(struct nfsd4_compoundargs *argp, void *);
54025
54026-static nfsd4_dec nfsd4_dec_ops[] = {
54027+static const nfsd4_dec nfsd4_dec_ops[] = {
54028 [OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access,
54029 [OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close,
54030 [OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit,
54031@@ -1496,7 +1496,7 @@ static nfsd4_dec nfsd4_dec_ops[] = {
54032 [OP_RELEASE_LOCKOWNER] = (nfsd4_dec)nfsd4_decode_release_lockowner,
54033 };
54034
54035-static nfsd4_dec nfsd41_dec_ops[] = {
54036+static const nfsd4_dec nfsd41_dec_ops[] = {
54037 [OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access,
54038 [OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close,
54039 [OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit,
54040@@ -1558,7 +1558,7 @@ static nfsd4_dec nfsd41_dec_ops[] = {
54041 };
54042
54043 struct nfsd4_minorversion_ops {
54044- nfsd4_dec *decoders;
54045+ const nfsd4_dec *decoders;
54046 int nops;
54047 };
54048
54049diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
54050index 69c6413..c0408d2 100644
54051--- a/fs/nfsd/vfs.c
54052+++ b/fs/nfsd/vfs.c
54053@@ -939,7 +939,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
54054 } else {
54055 oldfs = get_fs();
54056 set_fs(KERNEL_DS);
54057- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
54058+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
54059 set_fs(oldfs);
54060 }
54061
54062@@ -1026,7 +1026,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
54063
54064 /* Write the data. */
54065 oldfs = get_fs(); set_fs(KERNEL_DS);
54066- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &pos);
54067+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &pos);
54068 set_fs(oldfs);
54069 if (host_err < 0)
54070 goto out_nfserr;
54071@@ -1572,7 +1572,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
54072 */
54073
54074 oldfs = get_fs(); set_fs(KERNEL_DS);
54075- host_err = inode->i_op->readlink(path.dentry, (char __user *)buf, *lenp);
54076+ host_err = inode->i_op->readlink(path.dentry, (char __force_user *)buf, *lenp);
54077 set_fs(oldfs);
54078
54079 if (host_err < 0)
54080diff --git a/fs/nls/nls_base.c b/fs/nls/nls_base.c
54081index fea6bd5..8ee9d81 100644
54082--- a/fs/nls/nls_base.c
54083+++ b/fs/nls/nls_base.c
54084@@ -234,20 +234,22 @@ EXPORT_SYMBOL(utf16s_to_utf8s);
54085
54086 int register_nls(struct nls_table * nls)
54087 {
54088- struct nls_table ** tmp = &tables;
54089+ struct nls_table *tmp = tables;
54090
54091 if (nls->next)
54092 return -EBUSY;
54093
54094 spin_lock(&nls_lock);
54095- while (*tmp) {
54096- if (nls == *tmp) {
54097+ while (tmp) {
54098+ if (nls == tmp) {
54099 spin_unlock(&nls_lock);
54100 return -EBUSY;
54101 }
54102- tmp = &(*tmp)->next;
54103+ tmp = tmp->next;
54104 }
54105- nls->next = tables;
54106+ pax_open_kernel();
54107+ *(struct nls_table **)&nls->next = tables;
54108+ pax_close_kernel();
54109 tables = nls;
54110 spin_unlock(&nls_lock);
54111 return 0;
54112@@ -255,12 +257,14 @@ int register_nls(struct nls_table * nls)
54113
54114 int unregister_nls(struct nls_table * nls)
54115 {
54116- struct nls_table ** tmp = &tables;
54117+ struct nls_table * const * tmp = &tables;
54118
54119 spin_lock(&nls_lock);
54120 while (*tmp) {
54121 if (nls == *tmp) {
54122- *tmp = nls->next;
54123+ pax_open_kernel();
54124+ *(struct nls_table **)tmp = nls->next;
54125+ pax_close_kernel();
54126 spin_unlock(&nls_lock);
54127 return 0;
54128 }
54129diff --git a/fs/nls/nls_euc-jp.c b/fs/nls/nls_euc-jp.c
54130index 7424929..35f6be5 100644
54131--- a/fs/nls/nls_euc-jp.c
54132+++ b/fs/nls/nls_euc-jp.c
54133@@ -561,8 +561,10 @@ static int __init init_nls_euc_jp(void)
54134 p_nls = load_nls("cp932");
54135
54136 if (p_nls) {
54137- table.charset2upper = p_nls->charset2upper;
54138- table.charset2lower = p_nls->charset2lower;
54139+ pax_open_kernel();
54140+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
54141+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
54142+ pax_close_kernel();
54143 return register_nls(&table);
54144 }
54145
54146diff --git a/fs/nls/nls_koi8-ru.c b/fs/nls/nls_koi8-ru.c
54147index e7bc1d7..06bd4bb 100644
54148--- a/fs/nls/nls_koi8-ru.c
54149+++ b/fs/nls/nls_koi8-ru.c
54150@@ -63,8 +63,10 @@ static int __init init_nls_koi8_ru(void)
54151 p_nls = load_nls("koi8-u");
54152
54153 if (p_nls) {
54154- table.charset2upper = p_nls->charset2upper;
54155- table.charset2lower = p_nls->charset2lower;
54156+ pax_open_kernel();
54157+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
54158+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
54159+ pax_close_kernel();
54160 return register_nls(&table);
54161 }
54162
54163diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
54164index 9ff4a5e..deb1f0f 100644
54165--- a/fs/notify/fanotify/fanotify_user.c
54166+++ b/fs/notify/fanotify/fanotify_user.c
54167@@ -251,8 +251,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
54168
54169 fd = fanotify_event_metadata.fd;
54170 ret = -EFAULT;
54171- if (copy_to_user(buf, &fanotify_event_metadata,
54172- fanotify_event_metadata.event_len))
54173+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
54174+ copy_to_user(buf, &fanotify_event_metadata, fanotify_event_metadata.event_len))
54175 goto out_close_fd;
54176
54177 ret = prepare_for_access_response(group, event, fd);
54178diff --git a/fs/notify/notification.c b/fs/notify/notification.c
54179index 7b51b05..5ea5ef6 100644
54180--- a/fs/notify/notification.c
54181+++ b/fs/notify/notification.c
54182@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
54183 * get set to 0 so it will never get 'freed'
54184 */
54185 static struct fsnotify_event *q_overflow_event;
54186-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
54187+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
54188
54189 /**
54190 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
54191@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
54192 */
54193 u32 fsnotify_get_cookie(void)
54194 {
54195- return atomic_inc_return(&fsnotify_sync_cookie);
54196+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
54197 }
54198 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
54199
54200diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
54201index 99e3610..02c1068 100644
54202--- a/fs/ntfs/dir.c
54203+++ b/fs/ntfs/dir.c
54204@@ -1329,7 +1329,7 @@ find_next_index_buffer:
54205 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
54206 ~(s64)(ndir->itype.index.block_size - 1)));
54207 /* Bounds checks. */
54208- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
54209+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
54210 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
54211 "inode 0x%lx or driver bug.", vdir->i_ino);
54212 goto err_out;
54213diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
54214index 5b2d4f0..c6de396 100644
54215--- a/fs/ntfs/file.c
54216+++ b/fs/ntfs/file.c
54217@@ -2242,6 +2242,6 @@ const struct inode_operations ntfs_file_inode_ops = {
54218 #endif /* NTFS_RW */
54219 };
54220
54221-const struct file_operations ntfs_empty_file_ops = {};
54222+const struct file_operations ntfs_empty_file_ops __read_only;
54223
54224-const struct inode_operations ntfs_empty_inode_ops = {};
54225+const struct inode_operations ntfs_empty_inode_ops __read_only;
54226diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
54227index a9f78c7..ed8a381 100644
54228--- a/fs/ocfs2/localalloc.c
54229+++ b/fs/ocfs2/localalloc.c
54230@@ -1279,7 +1279,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
54231 goto bail;
54232 }
54233
54234- atomic_inc(&osb->alloc_stats.moves);
54235+ atomic_inc_unchecked(&osb->alloc_stats.moves);
54236
54237 bail:
54238 if (handle)
54239diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
54240index d355e6e..578d905 100644
54241--- a/fs/ocfs2/ocfs2.h
54242+++ b/fs/ocfs2/ocfs2.h
54243@@ -235,11 +235,11 @@ enum ocfs2_vol_state
54244
54245 struct ocfs2_alloc_stats
54246 {
54247- atomic_t moves;
54248- atomic_t local_data;
54249- atomic_t bitmap_data;
54250- atomic_t bg_allocs;
54251- atomic_t bg_extends;
54252+ atomic_unchecked_t moves;
54253+ atomic_unchecked_t local_data;
54254+ atomic_unchecked_t bitmap_data;
54255+ atomic_unchecked_t bg_allocs;
54256+ atomic_unchecked_t bg_extends;
54257 };
54258
54259 enum ocfs2_local_alloc_state
54260diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
54261index b7e74b5..19c6536 100644
54262--- a/fs/ocfs2/suballoc.c
54263+++ b/fs/ocfs2/suballoc.c
54264@@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
54265 mlog_errno(status);
54266 goto bail;
54267 }
54268- atomic_inc(&osb->alloc_stats.bg_extends);
54269+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
54270
54271 /* You should never ask for this much metadata */
54272 BUG_ON(bits_wanted >
54273@@ -2007,7 +2007,7 @@ int ocfs2_claim_metadata(handle_t *handle,
54274 mlog_errno(status);
54275 goto bail;
54276 }
54277- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
54278+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
54279
54280 *suballoc_loc = res.sr_bg_blkno;
54281 *suballoc_bit_start = res.sr_bit_offset;
54282@@ -2171,7 +2171,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
54283 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
54284 res->sr_bits);
54285
54286- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
54287+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
54288
54289 BUG_ON(res->sr_bits != 1);
54290
54291@@ -2213,7 +2213,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
54292 mlog_errno(status);
54293 goto bail;
54294 }
54295- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
54296+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
54297
54298 BUG_ON(res.sr_bits != 1);
54299
54300@@ -2317,7 +2317,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
54301 cluster_start,
54302 num_clusters);
54303 if (!status)
54304- atomic_inc(&osb->alloc_stats.local_data);
54305+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
54306 } else {
54307 if (min_clusters > (osb->bitmap_cpg - 1)) {
54308 /* The only paths asking for contiguousness
54309@@ -2343,7 +2343,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
54310 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
54311 res.sr_bg_blkno,
54312 res.sr_bit_offset);
54313- atomic_inc(&osb->alloc_stats.bitmap_data);
54314+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
54315 *num_clusters = res.sr_bits;
54316 }
54317 }
54318diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
54319index 0e91ec2..f4b3fc6 100644
54320--- a/fs/ocfs2/super.c
54321+++ b/fs/ocfs2/super.c
54322@@ -301,11 +301,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
54323 "%10s => GlobalAllocs: %d LocalAllocs: %d "
54324 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
54325 "Stats",
54326- atomic_read(&osb->alloc_stats.bitmap_data),
54327- atomic_read(&osb->alloc_stats.local_data),
54328- atomic_read(&osb->alloc_stats.bg_allocs),
54329- atomic_read(&osb->alloc_stats.moves),
54330- atomic_read(&osb->alloc_stats.bg_extends));
54331+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
54332+ atomic_read_unchecked(&osb->alloc_stats.local_data),
54333+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
54334+ atomic_read_unchecked(&osb->alloc_stats.moves),
54335+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
54336
54337 out += snprintf(buf + out, len - out,
54338 "%10s => State: %u Descriptor: %llu Size: %u bits "
54339@@ -2121,11 +2121,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
54340 spin_lock_init(&osb->osb_xattr_lock);
54341 ocfs2_init_steal_slots(osb);
54342
54343- atomic_set(&osb->alloc_stats.moves, 0);
54344- atomic_set(&osb->alloc_stats.local_data, 0);
54345- atomic_set(&osb->alloc_stats.bitmap_data, 0);
54346- atomic_set(&osb->alloc_stats.bg_allocs, 0);
54347- atomic_set(&osb->alloc_stats.bg_extends, 0);
54348+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
54349+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
54350+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
54351+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
54352+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
54353
54354 /* Copy the blockcheck stats from the superblock probe */
54355 osb->osb_ecc_stats = *stats;
54356diff --git a/fs/open.c b/fs/open.c
54357index 9b33c0c..2ffcca2 100644
54358--- a/fs/open.c
54359+++ b/fs/open.c
54360@@ -31,6 +31,8 @@
54361 #include <linux/ima.h>
54362 #include <linux/dnotify.h>
54363
54364+#define CREATE_TRACE_POINTS
54365+#include <trace/events/fs.h>
54366 #include "internal.h"
54367
54368 int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
54369@@ -101,6 +103,8 @@ long vfs_truncate(struct path *path, loff_t length)
54370 error = locks_verify_truncate(inode, NULL, length);
54371 if (!error)
54372 error = security_path_truncate(path);
54373+ if (!error && !gr_acl_handle_truncate(path->dentry, path->mnt))
54374+ error = -EACCES;
54375 if (!error)
54376 error = do_truncate(path->dentry, length, 0, NULL);
54377
54378@@ -178,6 +182,8 @@ static long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
54379 error = locks_verify_truncate(inode, f.file, length);
54380 if (!error)
54381 error = security_path_truncate(&f.file->f_path);
54382+ if (!error && !gr_acl_handle_truncate(f.file->f_path.dentry, f.file->f_path.mnt))
54383+ error = -EACCES;
54384 if (!error)
54385 error = do_truncate(dentry, length, ATTR_MTIME|ATTR_CTIME, f.file);
54386 sb_end_write(inode->i_sb);
54387@@ -373,6 +379,9 @@ retry:
54388 if (__mnt_is_readonly(path.mnt))
54389 res = -EROFS;
54390
54391+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
54392+ res = -EACCES;
54393+
54394 out_path_release:
54395 path_put(&path);
54396 if (retry_estale(res, lookup_flags)) {
54397@@ -404,6 +413,8 @@ retry:
54398 if (error)
54399 goto dput_and_out;
54400
54401+ gr_log_chdir(path.dentry, path.mnt);
54402+
54403 set_fs_pwd(current->fs, &path);
54404
54405 dput_and_out:
54406@@ -433,6 +444,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
54407 goto out_putf;
54408
54409 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
54410+
54411+ if (!error && !gr_chroot_fchdir(f.file->f_path.dentry, f.file->f_path.mnt))
54412+ error = -EPERM;
54413+
54414+ if (!error)
54415+ gr_log_chdir(f.file->f_path.dentry, f.file->f_path.mnt);
54416+
54417 if (!error)
54418 set_fs_pwd(current->fs, &f.file->f_path);
54419 out_putf:
54420@@ -462,7 +480,13 @@ retry:
54421 if (error)
54422 goto dput_and_out;
54423
54424+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
54425+ goto dput_and_out;
54426+
54427 set_fs_root(current->fs, &path);
54428+
54429+ gr_handle_chroot_chdir(&path);
54430+
54431 error = 0;
54432 dput_and_out:
54433 path_put(&path);
54434@@ -484,6 +508,16 @@ static int chmod_common(struct path *path, umode_t mode)
54435 if (error)
54436 return error;
54437 mutex_lock(&inode->i_mutex);
54438+
54439+ if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
54440+ error = -EACCES;
54441+ goto out_unlock;
54442+ }
54443+ if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
54444+ error = -EACCES;
54445+ goto out_unlock;
54446+ }
54447+
54448 error = security_path_chmod(path, mode);
54449 if (error)
54450 goto out_unlock;
54451@@ -544,6 +578,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
54452 uid = make_kuid(current_user_ns(), user);
54453 gid = make_kgid(current_user_ns(), group);
54454
54455+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
54456+ return -EACCES;
54457+
54458 newattrs.ia_valid = ATTR_CTIME;
54459 if (user != (uid_t) -1) {
54460 if (!uid_valid(uid))
54461@@ -960,6 +997,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
54462 } else {
54463 fsnotify_open(f);
54464 fd_install(fd, f);
54465+ trace_do_sys_open(tmp->name, flags, mode);
54466 }
54467 }
54468 putname(tmp);
54469diff --git a/fs/pipe.c b/fs/pipe.c
54470index 8e2e73f..1ef1048 100644
54471--- a/fs/pipe.c
54472+++ b/fs/pipe.c
54473@@ -438,9 +438,9 @@ redo:
54474 }
54475 if (bufs) /* More to do? */
54476 continue;
54477- if (!pipe->writers)
54478+ if (!atomic_read(&pipe->writers))
54479 break;
54480- if (!pipe->waiting_writers) {
54481+ if (!atomic_read(&pipe->waiting_writers)) {
54482 /* syscall merging: Usually we must not sleep
54483 * if O_NONBLOCK is set, or if we got some data.
54484 * But if a writer sleeps in kernel space, then
54485@@ -504,7 +504,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
54486 mutex_lock(&inode->i_mutex);
54487 pipe = inode->i_pipe;
54488
54489- if (!pipe->readers) {
54490+ if (!atomic_read(&pipe->readers)) {
54491 send_sig(SIGPIPE, current, 0);
54492 ret = -EPIPE;
54493 goto out;
54494@@ -553,7 +553,7 @@ redo1:
54495 for (;;) {
54496 int bufs;
54497
54498- if (!pipe->readers) {
54499+ if (!atomic_read(&pipe->readers)) {
54500 send_sig(SIGPIPE, current, 0);
54501 if (!ret)
54502 ret = -EPIPE;
54503@@ -644,9 +644,9 @@ redo2:
54504 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
54505 do_wakeup = 0;
54506 }
54507- pipe->waiting_writers++;
54508+ atomic_inc(&pipe->waiting_writers);
54509 pipe_wait(pipe);
54510- pipe->waiting_writers--;
54511+ atomic_dec(&pipe->waiting_writers);
54512 }
54513 out:
54514 mutex_unlock(&inode->i_mutex);
54515@@ -716,7 +716,7 @@ pipe_poll(struct file *filp, poll_table *wait)
54516 mask = 0;
54517 if (filp->f_mode & FMODE_READ) {
54518 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
54519- if (!pipe->writers && filp->f_version != pipe->w_counter)
54520+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
54521 mask |= POLLHUP;
54522 }
54523
54524@@ -726,7 +726,7 @@ pipe_poll(struct file *filp, poll_table *wait)
54525 * Most Unices do not set POLLERR for FIFOs but on Linux they
54526 * behave exactly like pipes for poll().
54527 */
54528- if (!pipe->readers)
54529+ if (!atomic_read(&pipe->readers))
54530 mask |= POLLERR;
54531 }
54532
54533@@ -740,10 +740,10 @@ pipe_release(struct inode *inode, int decr, int decw)
54534
54535 mutex_lock(&inode->i_mutex);
54536 pipe = inode->i_pipe;
54537- pipe->readers -= decr;
54538- pipe->writers -= decw;
54539+ atomic_sub(decr, &pipe->readers);
54540+ atomic_sub(decw, &pipe->writers);
54541
54542- if (!pipe->readers && !pipe->writers) {
54543+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
54544 free_pipe_info(inode);
54545 } else {
54546 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
54547@@ -833,7 +833,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
54548
54549 if (inode->i_pipe) {
54550 ret = 0;
54551- inode->i_pipe->readers++;
54552+ atomic_inc(&inode->i_pipe->readers);
54553 }
54554
54555 mutex_unlock(&inode->i_mutex);
54556@@ -850,7 +850,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
54557
54558 if (inode->i_pipe) {
54559 ret = 0;
54560- inode->i_pipe->writers++;
54561+ atomic_inc(&inode->i_pipe->writers);
54562 }
54563
54564 mutex_unlock(&inode->i_mutex);
54565@@ -871,9 +871,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
54566 if (inode->i_pipe) {
54567 ret = 0;
54568 if (filp->f_mode & FMODE_READ)
54569- inode->i_pipe->readers++;
54570+ atomic_inc(&inode->i_pipe->readers);
54571 if (filp->f_mode & FMODE_WRITE)
54572- inode->i_pipe->writers++;
54573+ atomic_inc(&inode->i_pipe->writers);
54574 }
54575
54576 mutex_unlock(&inode->i_mutex);
54577@@ -965,7 +965,7 @@ void free_pipe_info(struct inode *inode)
54578 inode->i_pipe = NULL;
54579 }
54580
54581-static struct vfsmount *pipe_mnt __read_mostly;
54582+struct vfsmount *pipe_mnt __read_mostly;
54583
54584 /*
54585 * pipefs_dname() is called from d_path().
54586@@ -995,7 +995,8 @@ static struct inode * get_pipe_inode(void)
54587 goto fail_iput;
54588 inode->i_pipe = pipe;
54589
54590- pipe->readers = pipe->writers = 1;
54591+ atomic_set(&pipe->readers, 1);
54592+ atomic_set(&pipe->writers, 1);
54593 inode->i_fop = &rdwr_pipefifo_fops;
54594
54595 /*
54596diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
54597index 15af622..0e9f4467 100644
54598--- a/fs/proc/Kconfig
54599+++ b/fs/proc/Kconfig
54600@@ -30,12 +30,12 @@ config PROC_FS
54601
54602 config PROC_KCORE
54603 bool "/proc/kcore support" if !ARM
54604- depends on PROC_FS && MMU
54605+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
54606
54607 config PROC_VMCORE
54608 bool "/proc/vmcore support"
54609- depends on PROC_FS && CRASH_DUMP
54610- default y
54611+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
54612+ default n
54613 help
54614 Exports the dump image of crashed kernel in ELF format.
54615
54616@@ -59,8 +59,8 @@ config PROC_SYSCTL
54617 limited in memory.
54618
54619 config PROC_PAGE_MONITOR
54620- default y
54621- depends on PROC_FS && MMU
54622+ default n
54623+ depends on PROC_FS && MMU && !GRKERNSEC
54624 bool "Enable /proc page monitoring" if EXPERT
54625 help
54626 Various /proc files exist to monitor process memory utilization:
54627diff --git a/fs/proc/array.c b/fs/proc/array.c
54628index 6a91e6f..e54dbc14 100644
54629--- a/fs/proc/array.c
54630+++ b/fs/proc/array.c
54631@@ -60,6 +60,7 @@
54632 #include <linux/tty.h>
54633 #include <linux/string.h>
54634 #include <linux/mman.h>
54635+#include <linux/grsecurity.h>
54636 #include <linux/proc_fs.h>
54637 #include <linux/ioport.h>
54638 #include <linux/uaccess.h>
54639@@ -362,6 +363,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
54640 seq_putc(m, '\n');
54641 }
54642
54643+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
54644+static inline void task_pax(struct seq_file *m, struct task_struct *p)
54645+{
54646+ if (p->mm)
54647+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
54648+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
54649+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
54650+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
54651+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
54652+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
54653+ else
54654+ seq_printf(m, "PaX:\t-----\n");
54655+}
54656+#endif
54657+
54658 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
54659 struct pid *pid, struct task_struct *task)
54660 {
54661@@ -380,9 +396,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
54662 task_cpus_allowed(m, task);
54663 cpuset_task_status_allowed(m, task);
54664 task_context_switch_counts(m, task);
54665+
54666+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
54667+ task_pax(m, task);
54668+#endif
54669+
54670+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
54671+ task_grsec_rbac(m, task);
54672+#endif
54673+
54674 return 0;
54675 }
54676
54677+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54678+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
54679+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
54680+ _mm->pax_flags & MF_PAX_SEGMEXEC))
54681+#endif
54682+
54683 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
54684 struct pid *pid, struct task_struct *task, int whole)
54685 {
54686@@ -404,6 +435,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
54687 char tcomm[sizeof(task->comm)];
54688 unsigned long flags;
54689
54690+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54691+ if (current->exec_id != m->exec_id) {
54692+ gr_log_badprocpid("stat");
54693+ return 0;
54694+ }
54695+#endif
54696+
54697 state = *get_task_state(task);
54698 vsize = eip = esp = 0;
54699 permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
54700@@ -475,6 +513,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
54701 gtime = task->gtime;
54702 }
54703
54704+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54705+ if (PAX_RAND_FLAGS(mm)) {
54706+ eip = 0;
54707+ esp = 0;
54708+ wchan = 0;
54709+ }
54710+#endif
54711+#ifdef CONFIG_GRKERNSEC_HIDESYM
54712+ wchan = 0;
54713+ eip =0;
54714+ esp =0;
54715+#endif
54716+
54717 /* scale priority and nice values from timeslices to -20..20 */
54718 /* to make it look like a "normal" Unix priority/nice value */
54719 priority = task_prio(task);
54720@@ -511,9 +562,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
54721 seq_put_decimal_ull(m, ' ', vsize);
54722 seq_put_decimal_ull(m, ' ', mm ? get_mm_rss(mm) : 0);
54723 seq_put_decimal_ull(m, ' ', rsslim);
54724+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54725+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0));
54726+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0));
54727+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0));
54728+#else
54729 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0);
54730 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0);
54731 seq_put_decimal_ull(m, ' ', (permitted && mm) ? mm->start_stack : 0);
54732+#endif
54733 seq_put_decimal_ull(m, ' ', esp);
54734 seq_put_decimal_ull(m, ' ', eip);
54735 /* The signal information here is obsolete.
54736@@ -535,7 +592,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
54737 seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime));
54738 seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime));
54739
54740- if (mm && permitted) {
54741+ if (mm && permitted
54742+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54743+ && !PAX_RAND_FLAGS(mm)
54744+#endif
54745+ ) {
54746 seq_put_decimal_ull(m, ' ', mm->start_data);
54747 seq_put_decimal_ull(m, ' ', mm->end_data);
54748 seq_put_decimal_ull(m, ' ', mm->start_brk);
54749@@ -573,8 +634,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
54750 struct pid *pid, struct task_struct *task)
54751 {
54752 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
54753- struct mm_struct *mm = get_task_mm(task);
54754+ struct mm_struct *mm;
54755
54756+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54757+ if (current->exec_id != m->exec_id) {
54758+ gr_log_badprocpid("statm");
54759+ return 0;
54760+ }
54761+#endif
54762+ mm = get_task_mm(task);
54763 if (mm) {
54764 size = task_statm(mm, &shared, &text, &data, &resident);
54765 mmput(mm);
54766@@ -597,6 +665,13 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
54767 return 0;
54768 }
54769
54770+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
54771+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
54772+{
54773+ return sprintf(buffer, "%pI4\n", &task->signal->curr_ip);
54774+}
54775+#endif
54776+
54777 #ifdef CONFIG_CHECKPOINT_RESTORE
54778 static struct pid *
54779 get_children_pid(struct inode *inode, struct pid *pid_prev, loff_t pos)
54780diff --git a/fs/proc/base.c b/fs/proc/base.c
54781index 9b43ff77..0fa9564 100644
54782--- a/fs/proc/base.c
54783+++ b/fs/proc/base.c
54784@@ -111,6 +111,14 @@ struct pid_entry {
54785 union proc_op op;
54786 };
54787
54788+struct getdents_callback {
54789+ struct linux_dirent __user * current_dir;
54790+ struct linux_dirent __user * previous;
54791+ struct file * file;
54792+ int count;
54793+ int error;
54794+};
54795+
54796 #define NOD(NAME, MODE, IOP, FOP, OP) { \
54797 .name = (NAME), \
54798 .len = sizeof(NAME) - 1, \
54799@@ -208,6 +216,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
54800 if (!mm->arg_end)
54801 goto out_mm; /* Shh! No looking before we're done */
54802
54803+ if (gr_acl_handle_procpidmem(task))
54804+ goto out_mm;
54805+
54806 len = mm->arg_end - mm->arg_start;
54807
54808 if (len > PAGE_SIZE)
54809@@ -235,12 +246,28 @@ out:
54810 return res;
54811 }
54812
54813+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54814+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
54815+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
54816+ _mm->pax_flags & MF_PAX_SEGMEXEC))
54817+#endif
54818+
54819 static int proc_pid_auxv(struct task_struct *task, char *buffer)
54820 {
54821 struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ);
54822 int res = PTR_ERR(mm);
54823 if (mm && !IS_ERR(mm)) {
54824 unsigned int nwords = 0;
54825+
54826+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54827+ /* allow if we're currently ptracing this task */
54828+ if (PAX_RAND_FLAGS(mm) &&
54829+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
54830+ mmput(mm);
54831+ return 0;
54832+ }
54833+#endif
54834+
54835 do {
54836 nwords += 2;
54837 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
54838@@ -254,7 +281,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
54839 }
54840
54841
54842-#ifdef CONFIG_KALLSYMS
54843+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
54844 /*
54845 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
54846 * Returns the resolved symbol. If that fails, simply return the address.
54847@@ -293,7 +320,7 @@ static void unlock_trace(struct task_struct *task)
54848 mutex_unlock(&task->signal->cred_guard_mutex);
54849 }
54850
54851-#ifdef CONFIG_STACKTRACE
54852+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
54853
54854 #define MAX_STACK_TRACE_DEPTH 64
54855
54856@@ -485,7 +512,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
54857 return count;
54858 }
54859
54860-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
54861+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
54862 static int proc_pid_syscall(struct task_struct *task, char *buffer)
54863 {
54864 long nr;
54865@@ -514,7 +541,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
54866 /************************************************************************/
54867
54868 /* permission checks */
54869-static int proc_fd_access_allowed(struct inode *inode)
54870+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
54871 {
54872 struct task_struct *task;
54873 int allowed = 0;
54874@@ -524,7 +551,10 @@ static int proc_fd_access_allowed(struct inode *inode)
54875 */
54876 task = get_proc_task(inode);
54877 if (task) {
54878- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
54879+ if (log)
54880+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
54881+ else
54882+ allowed = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
54883 put_task_struct(task);
54884 }
54885 return allowed;
54886@@ -555,10 +585,35 @@ static bool has_pid_permissions(struct pid_namespace *pid,
54887 struct task_struct *task,
54888 int hide_pid_min)
54889 {
54890+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
54891+ return false;
54892+
54893+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54894+ rcu_read_lock();
54895+ {
54896+ const struct cred *tmpcred = current_cred();
54897+ const struct cred *cred = __task_cred(task);
54898+
54899+ if (uid_eq(tmpcred->uid, GLOBAL_ROOT_UID) || uid_eq(tmpcred->uid, cred->uid)
54900+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
54901+ || in_group_p(grsec_proc_gid)
54902+#endif
54903+ ) {
54904+ rcu_read_unlock();
54905+ return true;
54906+ }
54907+ }
54908+ rcu_read_unlock();
54909+
54910+ if (!pid->hide_pid)
54911+ return false;
54912+#endif
54913+
54914 if (pid->hide_pid < hide_pid_min)
54915 return true;
54916 if (in_group_p(pid->pid_gid))
54917 return true;
54918+
54919 return ptrace_may_access(task, PTRACE_MODE_READ);
54920 }
54921
54922@@ -576,7 +631,11 @@ static int proc_pid_permission(struct inode *inode, int mask)
54923 put_task_struct(task);
54924
54925 if (!has_perms) {
54926+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54927+ {
54928+#else
54929 if (pid->hide_pid == 2) {
54930+#endif
54931 /*
54932 * Let's make getdents(), stat(), and open()
54933 * consistent with each other. If a process
54934@@ -674,6 +733,11 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
54935 if (!task)
54936 return -ESRCH;
54937
54938+ if (gr_acl_handle_procpidmem(task)) {
54939+ put_task_struct(task);
54940+ return -EPERM;
54941+ }
54942+
54943 mm = mm_access(task, mode);
54944 put_task_struct(task);
54945
54946@@ -689,6 +753,10 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
54947
54948 file->private_data = mm;
54949
54950+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54951+ file->f_version = current->exec_id;
54952+#endif
54953+
54954 return 0;
54955 }
54956
54957@@ -710,6 +778,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
54958 ssize_t copied;
54959 char *page;
54960
54961+#ifdef CONFIG_GRKERNSEC
54962+ if (write)
54963+ return -EPERM;
54964+#endif
54965+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54966+ if (file->f_version != current->exec_id) {
54967+ gr_log_badprocpid("mem");
54968+ return 0;
54969+ }
54970+#endif
54971+
54972 if (!mm)
54973 return 0;
54974
54975@@ -722,7 +801,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
54976 goto free;
54977
54978 while (count > 0) {
54979- int this_len = min_t(int, count, PAGE_SIZE);
54980+ ssize_t this_len = min_t(ssize_t, count, PAGE_SIZE);
54981
54982 if (write && copy_from_user(page, buf, this_len)) {
54983 copied = -EFAULT;
54984@@ -814,6 +893,13 @@ static ssize_t environ_read(struct file *file, char __user *buf,
54985 if (!mm)
54986 return 0;
54987
54988+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54989+ if (file->f_version != current->exec_id) {
54990+ gr_log_badprocpid("environ");
54991+ return 0;
54992+ }
54993+#endif
54994+
54995 page = (char *)__get_free_page(GFP_TEMPORARY);
54996 if (!page)
54997 return -ENOMEM;
54998@@ -823,7 +909,7 @@ static ssize_t environ_read(struct file *file, char __user *buf,
54999 goto free;
55000 while (count > 0) {
55001 size_t this_len, max_len;
55002- int retval;
55003+ ssize_t retval;
55004
55005 if (src >= (mm->env_end - mm->env_start))
55006 break;
55007@@ -1429,7 +1515,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
55008 int error = -EACCES;
55009
55010 /* Are we allowed to snoop on the tasks file descriptors? */
55011- if (!proc_fd_access_allowed(inode))
55012+ if (!proc_fd_access_allowed(inode, 0))
55013 goto out;
55014
55015 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
55016@@ -1473,8 +1559,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
55017 struct path path;
55018
55019 /* Are we allowed to snoop on the tasks file descriptors? */
55020- if (!proc_fd_access_allowed(inode))
55021- goto out;
55022+ /* logging this is needed for learning on chromium to work properly,
55023+ but we don't want to flood the logs from 'ps' which does a readlink
55024+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
55025+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
55026+ */
55027+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
55028+ if (!proc_fd_access_allowed(inode,0))
55029+ goto out;
55030+ } else {
55031+ if (!proc_fd_access_allowed(inode,1))
55032+ goto out;
55033+ }
55034
55035 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
55036 if (error)
55037@@ -1524,7 +1620,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
55038 rcu_read_lock();
55039 cred = __task_cred(task);
55040 inode->i_uid = cred->euid;
55041+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
55042+ inode->i_gid = grsec_proc_gid;
55043+#else
55044 inode->i_gid = cred->egid;
55045+#endif
55046 rcu_read_unlock();
55047 }
55048 security_task_to_inode(task, inode);
55049@@ -1560,10 +1660,19 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
55050 return -ENOENT;
55051 }
55052 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
55053+#ifdef CONFIG_GRKERNSEC_PROC_USER
55054+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
55055+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
55056+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
55057+#endif
55058 task_dumpable(task)) {
55059 cred = __task_cred(task);
55060 stat->uid = cred->euid;
55061+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
55062+ stat->gid = grsec_proc_gid;
55063+#else
55064 stat->gid = cred->egid;
55065+#endif
55066 }
55067 }
55068 rcu_read_unlock();
55069@@ -1601,11 +1710,20 @@ int pid_revalidate(struct dentry *dentry, unsigned int flags)
55070
55071 if (task) {
55072 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
55073+#ifdef CONFIG_GRKERNSEC_PROC_USER
55074+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
55075+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
55076+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
55077+#endif
55078 task_dumpable(task)) {
55079 rcu_read_lock();
55080 cred = __task_cred(task);
55081 inode->i_uid = cred->euid;
55082+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
55083+ inode->i_gid = grsec_proc_gid;
55084+#else
55085 inode->i_gid = cred->egid;
55086+#endif
55087 rcu_read_unlock();
55088 } else {
55089 inode->i_uid = GLOBAL_ROOT_UID;
55090@@ -2058,6 +2176,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
55091 if (!task)
55092 goto out_no_task;
55093
55094+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
55095+ goto out;
55096+
55097 /*
55098 * Yes, it does not scale. And it should not. Don't add
55099 * new entries into /proc/<tgid>/ without very good reasons.
55100@@ -2102,6 +2223,9 @@ static int proc_pident_readdir(struct file *filp,
55101 if (!task)
55102 goto out_no_task;
55103
55104+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
55105+ goto out;
55106+
55107 ret = 0;
55108 i = filp->f_pos;
55109 switch (i) {
55110@@ -2515,7 +2639,7 @@ static const struct pid_entry tgid_base_stuff[] = {
55111 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
55112 #endif
55113 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
55114-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
55115+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
55116 INF("syscall", S_IRUGO, proc_pid_syscall),
55117 #endif
55118 INF("cmdline", S_IRUGO, proc_pid_cmdline),
55119@@ -2540,10 +2664,10 @@ static const struct pid_entry tgid_base_stuff[] = {
55120 #ifdef CONFIG_SECURITY
55121 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
55122 #endif
55123-#ifdef CONFIG_KALLSYMS
55124+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
55125 INF("wchan", S_IRUGO, proc_pid_wchan),
55126 #endif
55127-#ifdef CONFIG_STACKTRACE
55128+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
55129 ONE("stack", S_IRUGO, proc_pid_stack),
55130 #endif
55131 #ifdef CONFIG_SCHEDSTATS
55132@@ -2577,6 +2701,9 @@ static const struct pid_entry tgid_base_stuff[] = {
55133 #ifdef CONFIG_HARDWALL
55134 INF("hardwall", S_IRUGO, proc_pid_hardwall),
55135 #endif
55136+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
55137+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
55138+#endif
55139 #ifdef CONFIG_USER_NS
55140 REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations),
55141 REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations),
55142@@ -2705,7 +2832,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
55143 if (!inode)
55144 goto out;
55145
55146+#ifdef CONFIG_GRKERNSEC_PROC_USER
55147+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
55148+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
55149+ inode->i_gid = grsec_proc_gid;
55150+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
55151+#else
55152 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
55153+#endif
55154 inode->i_op = &proc_tgid_base_inode_operations;
55155 inode->i_fop = &proc_tgid_base_operations;
55156 inode->i_flags|=S_IMMUTABLE;
55157@@ -2743,7 +2877,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, unsign
55158 if (!task)
55159 goto out;
55160
55161+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
55162+ goto out_put_task;
55163+
55164 result = proc_pid_instantiate(dir, dentry, task, NULL);
55165+out_put_task:
55166 put_task_struct(task);
55167 out:
55168 return result;
55169@@ -2806,6 +2944,8 @@ static int proc_pid_fill_cache(struct file *filp, void *dirent, filldir_t filldi
55170 static int fake_filldir(void *buf, const char *name, int namelen,
55171 loff_t offset, u64 ino, unsigned d_type)
55172 {
55173+ struct getdents_callback * __buf = (struct getdents_callback *) buf;
55174+ __buf->error = -EINVAL;
55175 return 0;
55176 }
55177
55178@@ -2857,7 +2997,7 @@ static const struct pid_entry tid_base_stuff[] = {
55179 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
55180 #endif
55181 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
55182-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
55183+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
55184 INF("syscall", S_IRUGO, proc_pid_syscall),
55185 #endif
55186 INF("cmdline", S_IRUGO, proc_pid_cmdline),
55187@@ -2884,10 +3024,10 @@ static const struct pid_entry tid_base_stuff[] = {
55188 #ifdef CONFIG_SECURITY
55189 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
55190 #endif
55191-#ifdef CONFIG_KALLSYMS
55192+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
55193 INF("wchan", S_IRUGO, proc_pid_wchan),
55194 #endif
55195-#ifdef CONFIG_STACKTRACE
55196+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
55197 ONE("stack", S_IRUGO, proc_pid_stack),
55198 #endif
55199 #ifdef CONFIG_SCHEDSTATS
55200diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
55201index 82676e3..5f8518a 100644
55202--- a/fs/proc/cmdline.c
55203+++ b/fs/proc/cmdline.c
55204@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
55205
55206 static int __init proc_cmdline_init(void)
55207 {
55208+#ifdef CONFIG_GRKERNSEC_PROC_ADD
55209+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
55210+#else
55211 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
55212+#endif
55213 return 0;
55214 }
55215 module_init(proc_cmdline_init);
55216diff --git a/fs/proc/devices.c b/fs/proc/devices.c
55217index b143471..bb105e5 100644
55218--- a/fs/proc/devices.c
55219+++ b/fs/proc/devices.c
55220@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
55221
55222 static int __init proc_devices_init(void)
55223 {
55224+#ifdef CONFIG_GRKERNSEC_PROC_ADD
55225+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
55226+#else
55227 proc_create("devices", 0, NULL, &proc_devinfo_operations);
55228+#endif
55229 return 0;
55230 }
55231 module_init(proc_devices_init);
55232diff --git a/fs/proc/fd.c b/fs/proc/fd.c
55233index d7a4a28..0201742 100644
55234--- a/fs/proc/fd.c
55235+++ b/fs/proc/fd.c
55236@@ -25,7 +25,8 @@ static int seq_show(struct seq_file *m, void *v)
55237 if (!task)
55238 return -ENOENT;
55239
55240- files = get_files_struct(task);
55241+ if (!gr_acl_handle_procpidmem(task))
55242+ files = get_files_struct(task);
55243 put_task_struct(task);
55244
55245 if (files) {
55246@@ -302,11 +303,21 @@ static struct dentry *proc_lookupfd(struct inode *dir, struct dentry *dentry,
55247 */
55248 int proc_fd_permission(struct inode *inode, int mask)
55249 {
55250+ struct task_struct *task;
55251 int rv = generic_permission(inode, mask);
55252- if (rv == 0)
55253- return 0;
55254+
55255 if (task_pid(current) == proc_pid(inode))
55256 rv = 0;
55257+
55258+ task = get_proc_task(inode);
55259+ if (task == NULL)
55260+ return rv;
55261+
55262+ if (gr_acl_handle_procpidmem(task))
55263+ rv = -EACCES;
55264+
55265+ put_task_struct(task);
55266+
55267 return rv;
55268 }
55269
55270diff --git a/fs/proc/inode.c b/fs/proc/inode.c
55271index 0ac1e1b..0497e58 100644
55272--- a/fs/proc/inode.c
55273+++ b/fs/proc/inode.c
55274@@ -21,11 +21,17 @@
55275 #include <linux/seq_file.h>
55276 #include <linux/slab.h>
55277 #include <linux/mount.h>
55278+#include <linux/grsecurity.h>
55279
55280 #include <asm/uaccess.h>
55281
55282 #include "internal.h"
55283
55284+#ifdef CONFIG_PROC_SYSCTL
55285+extern const struct inode_operations proc_sys_inode_operations;
55286+extern const struct inode_operations proc_sys_dir_operations;
55287+#endif
55288+
55289 static void proc_evict_inode(struct inode *inode)
55290 {
55291 struct proc_dir_entry *de;
55292@@ -53,6 +59,13 @@ static void proc_evict_inode(struct inode *inode)
55293 ns = PROC_I(inode)->ns;
55294 if (ns_ops && ns)
55295 ns_ops->put(ns);
55296+
55297+#ifdef CONFIG_PROC_SYSCTL
55298+ if (inode->i_op == &proc_sys_inode_operations ||
55299+ inode->i_op == &proc_sys_dir_operations)
55300+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
55301+#endif
55302+
55303 }
55304
55305 static struct kmem_cache * proc_inode_cachep;
55306@@ -455,7 +468,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
55307 if (de->mode) {
55308 inode->i_mode = de->mode;
55309 inode->i_uid = de->uid;
55310+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
55311+ inode->i_gid = grsec_proc_gid;
55312+#else
55313 inode->i_gid = de->gid;
55314+#endif
55315 }
55316 if (de->size)
55317 inode->i_size = de->size;
55318diff --git a/fs/proc/internal.h b/fs/proc/internal.h
55319index 252544c..04395b9 100644
55320--- a/fs/proc/internal.h
55321+++ b/fs/proc/internal.h
55322@@ -55,6 +55,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
55323 struct pid *pid, struct task_struct *task);
55324 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
55325 struct pid *pid, struct task_struct *task);
55326+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
55327+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
55328+#endif
55329 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
55330
55331 extern const struct file_operations proc_tid_children_operations;
55332diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
55333index e96d4f1..8b116ed 100644
55334--- a/fs/proc/kcore.c
55335+++ b/fs/proc/kcore.c
55336@@ -480,9 +480,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
55337 * the addresses in the elf_phdr on our list.
55338 */
55339 start = kc_offset_to_vaddr(*fpos - elf_buflen);
55340- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
55341+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
55342+ if (tsz > buflen)
55343 tsz = buflen;
55344-
55345+
55346 while (buflen) {
55347 struct kcore_list *m;
55348
55349@@ -511,20 +512,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
55350 kfree(elf_buf);
55351 } else {
55352 if (kern_addr_valid(start)) {
55353- unsigned long n;
55354+ char *elf_buf;
55355+ mm_segment_t oldfs;
55356
55357- n = copy_to_user(buffer, (char *)start, tsz);
55358- /*
55359- * We cannot distinguish between fault on source
55360- * and fault on destination. When this happens
55361- * we clear too and hope it will trigger the
55362- * EFAULT again.
55363- */
55364- if (n) {
55365- if (clear_user(buffer + tsz - n,
55366- n))
55367+ elf_buf = kmalloc(tsz, GFP_KERNEL);
55368+ if (!elf_buf)
55369+ return -ENOMEM;
55370+ oldfs = get_fs();
55371+ set_fs(KERNEL_DS);
55372+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
55373+ set_fs(oldfs);
55374+ if (copy_to_user(buffer, elf_buf, tsz)) {
55375+ kfree(elf_buf);
55376 return -EFAULT;
55377+ }
55378 }
55379+ set_fs(oldfs);
55380+ kfree(elf_buf);
55381 } else {
55382 if (clear_user(buffer, tsz))
55383 return -EFAULT;
55384@@ -544,6 +548,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
55385
55386 static int open_kcore(struct inode *inode, struct file *filp)
55387 {
55388+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
55389+ return -EPERM;
55390+#endif
55391 if (!capable(CAP_SYS_RAWIO))
55392 return -EPERM;
55393 if (kcore_need_update)
55394diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
55395index 80e4645..53e5fcf 100644
55396--- a/fs/proc/meminfo.c
55397+++ b/fs/proc/meminfo.c
55398@@ -158,7 +158,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
55399 vmi.used >> 10,
55400 vmi.largest_chunk >> 10
55401 #ifdef CONFIG_MEMORY_FAILURE
55402- ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
55403+ ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
55404 #endif
55405 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
55406 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
55407diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
55408index b1822dd..df622cb 100644
55409--- a/fs/proc/nommu.c
55410+++ b/fs/proc/nommu.c
55411@@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
55412 if (len < 1)
55413 len = 1;
55414 seq_printf(m, "%*c", len, ' ');
55415- seq_path(m, &file->f_path, "");
55416+ seq_path(m, &file->f_path, "\n\\");
55417 }
55418
55419 seq_putc(m, '\n');
55420diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
55421index fe72cd0..21b52ff 100644
55422--- a/fs/proc/proc_net.c
55423+++ b/fs/proc/proc_net.c
55424@@ -23,6 +23,7 @@
55425 #include <linux/nsproxy.h>
55426 #include <net/net_namespace.h>
55427 #include <linux/seq_file.h>
55428+#include <linux/grsecurity.h>
55429
55430 #include "internal.h"
55431
55432@@ -105,6 +106,17 @@ static struct net *get_proc_task_net(struct inode *dir)
55433 struct task_struct *task;
55434 struct nsproxy *ns;
55435 struct net *net = NULL;
55436+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
55437+ const struct cred *cred = current_cred();
55438+#endif
55439+
55440+#ifdef CONFIG_GRKERNSEC_PROC_USER
55441+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID))
55442+ return net;
55443+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
55444+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID) && !in_group_p(grsec_proc_gid))
55445+ return net;
55446+#endif
55447
55448 rcu_read_lock();
55449 task = pid_task(proc_pid(dir), PIDTYPE_PID);
55450diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
55451index 1827d88..43b0279 100644
55452--- a/fs/proc/proc_sysctl.c
55453+++ b/fs/proc/proc_sysctl.c
55454@@ -12,11 +12,15 @@
55455 #include <linux/module.h>
55456 #include "internal.h"
55457
55458+extern int gr_handle_chroot_sysctl(const int op);
55459+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
55460+ const int op);
55461+
55462 static const struct dentry_operations proc_sys_dentry_operations;
55463 static const struct file_operations proc_sys_file_operations;
55464-static const struct inode_operations proc_sys_inode_operations;
55465+const struct inode_operations proc_sys_inode_operations;
55466 static const struct file_operations proc_sys_dir_file_operations;
55467-static const struct inode_operations proc_sys_dir_operations;
55468+const struct inode_operations proc_sys_dir_operations;
55469
55470 void proc_sys_poll_notify(struct ctl_table_poll *poll)
55471 {
55472@@ -466,6 +470,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
55473
55474 err = NULL;
55475 d_set_d_op(dentry, &proc_sys_dentry_operations);
55476+
55477+ gr_handle_proc_create(dentry, inode);
55478+
55479 d_add(dentry, inode);
55480
55481 out:
55482@@ -481,6 +488,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
55483 struct inode *inode = filp->f_path.dentry->d_inode;
55484 struct ctl_table_header *head = grab_header(inode);
55485 struct ctl_table *table = PROC_I(inode)->sysctl_entry;
55486+ int op = write ? MAY_WRITE : MAY_READ;
55487 ssize_t error;
55488 size_t res;
55489
55490@@ -492,7 +500,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
55491 * and won't be until we finish.
55492 */
55493 error = -EPERM;
55494- if (sysctl_perm(head, table, write ? MAY_WRITE : MAY_READ))
55495+ if (sysctl_perm(head, table, op))
55496 goto out;
55497
55498 /* if that can happen at all, it should be -EINVAL, not -EISDIR */
55499@@ -500,6 +508,22 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
55500 if (!table->proc_handler)
55501 goto out;
55502
55503+#ifdef CONFIG_GRKERNSEC
55504+ error = -EPERM;
55505+ if (gr_handle_chroot_sysctl(op))
55506+ goto out;
55507+ dget(filp->f_path.dentry);
55508+ if (gr_handle_sysctl_mod(filp->f_path.dentry->d_parent->d_name.name, table->procname, op)) {
55509+ dput(filp->f_path.dentry);
55510+ goto out;
55511+ }
55512+ dput(filp->f_path.dentry);
55513+ if (!gr_acl_handle_open(filp->f_path.dentry, filp->f_path.mnt, op))
55514+ goto out;
55515+ if (write && !capable(CAP_SYS_ADMIN))
55516+ goto out;
55517+#endif
55518+
55519 /* careful: calling conventions are nasty here */
55520 res = count;
55521 error = table->proc_handler(table, write, buf, &res, ppos);
55522@@ -597,6 +621,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
55523 return -ENOMEM;
55524 } else {
55525 d_set_d_op(child, &proc_sys_dentry_operations);
55526+
55527+ gr_handle_proc_create(child, inode);
55528+
55529 d_add(child, inode);
55530 }
55531 } else {
55532@@ -640,6 +667,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
55533 if ((*pos)++ < file->f_pos)
55534 return 0;
55535
55536+ if (!gr_acl_handle_hidden_file(file->f_path.dentry, file->f_path.mnt))
55537+ return 0;
55538+
55539 if (unlikely(S_ISLNK(table->mode)))
55540 res = proc_sys_link_fill_cache(file, dirent, filldir, head, table);
55541 else
55542@@ -750,6 +780,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
55543 if (IS_ERR(head))
55544 return PTR_ERR(head);
55545
55546+ if (table && !gr_acl_handle_hidden_file(dentry, mnt))
55547+ return -ENOENT;
55548+
55549 generic_fillattr(inode, stat);
55550 if (table)
55551 stat->mode = (stat->mode & S_IFMT) | table->mode;
55552@@ -772,13 +805,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
55553 .llseek = generic_file_llseek,
55554 };
55555
55556-static const struct inode_operations proc_sys_inode_operations = {
55557+const struct inode_operations proc_sys_inode_operations = {
55558 .permission = proc_sys_permission,
55559 .setattr = proc_sys_setattr,
55560 .getattr = proc_sys_getattr,
55561 };
55562
55563-static const struct inode_operations proc_sys_dir_operations = {
55564+const struct inode_operations proc_sys_dir_operations = {
55565 .lookup = proc_sys_lookup,
55566 .permission = proc_sys_permission,
55567 .setattr = proc_sys_setattr,
55568@@ -854,7 +887,7 @@ static struct ctl_dir *find_subdir(struct ctl_dir *dir,
55569 static struct ctl_dir *new_dir(struct ctl_table_set *set,
55570 const char *name, int namelen)
55571 {
55572- struct ctl_table *table;
55573+ ctl_table_no_const *table;
55574 struct ctl_dir *new;
55575 struct ctl_node *node;
55576 char *new_name;
55577@@ -866,7 +899,7 @@ static struct ctl_dir *new_dir(struct ctl_table_set *set,
55578 return NULL;
55579
55580 node = (struct ctl_node *)(new + 1);
55581- table = (struct ctl_table *)(node + 1);
55582+ table = (ctl_table_no_const *)(node + 1);
55583 new_name = (char *)(table + 2);
55584 memcpy(new_name, name, namelen);
55585 new_name[namelen] = '\0';
55586@@ -1035,7 +1068,8 @@ static int sysctl_check_table(const char *path, struct ctl_table *table)
55587 static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table *table,
55588 struct ctl_table_root *link_root)
55589 {
55590- struct ctl_table *link_table, *entry, *link;
55591+ ctl_table_no_const *link_table, *link;
55592+ struct ctl_table *entry;
55593 struct ctl_table_header *links;
55594 struct ctl_node *node;
55595 char *link_name;
55596@@ -1058,7 +1092,7 @@ static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table
55597 return NULL;
55598
55599 node = (struct ctl_node *)(links + 1);
55600- link_table = (struct ctl_table *)(node + nr_entries);
55601+ link_table = (ctl_table_no_const *)(node + nr_entries);
55602 link_name = (char *)&link_table[nr_entries + 1];
55603
55604 for (link = link_table, entry = table; entry->procname; link++, entry++) {
55605@@ -1306,8 +1340,8 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
55606 struct ctl_table_header ***subheader, struct ctl_table_set *set,
55607 struct ctl_table *table)
55608 {
55609- struct ctl_table *ctl_table_arg = NULL;
55610- struct ctl_table *entry, *files;
55611+ ctl_table_no_const *ctl_table_arg = NULL, *files = NULL;
55612+ struct ctl_table *entry;
55613 int nr_files = 0;
55614 int nr_dirs = 0;
55615 int err = -ENOMEM;
55616@@ -1319,10 +1353,9 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
55617 nr_files++;
55618 }
55619
55620- files = table;
55621 /* If there are mixed files and directories we need a new table */
55622 if (nr_dirs && nr_files) {
55623- struct ctl_table *new;
55624+ ctl_table_no_const *new;
55625 files = kzalloc(sizeof(struct ctl_table) * (nr_files + 1),
55626 GFP_KERNEL);
55627 if (!files)
55628@@ -1340,7 +1373,7 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
55629 /* Register everything except a directory full of subdirectories */
55630 if (nr_files || !nr_dirs) {
55631 struct ctl_table_header *header;
55632- header = __register_sysctl_table(set, path, files);
55633+ header = __register_sysctl_table(set, path, files ? files : table);
55634 if (!header) {
55635 kfree(ctl_table_arg);
55636 goto out;
55637diff --git a/fs/proc/root.c b/fs/proc/root.c
55638index 9c7fab1..ed1c8e0 100644
55639--- a/fs/proc/root.c
55640+++ b/fs/proc/root.c
55641@@ -180,7 +180,15 @@ void __init proc_root_init(void)
55642 #ifdef CONFIG_PROC_DEVICETREE
55643 proc_device_tree_init();
55644 #endif
55645+#ifdef CONFIG_GRKERNSEC_PROC_ADD
55646+#ifdef CONFIG_GRKERNSEC_PROC_USER
55647+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
55648+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
55649+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
55650+#endif
55651+#else
55652 proc_mkdir("bus", NULL);
55653+#endif
55654 proc_sys_init();
55655 }
55656
55657diff --git a/fs/proc/self.c b/fs/proc/self.c
55658index aa5cc3b..c91a5d0 100644
55659--- a/fs/proc/self.c
55660+++ b/fs/proc/self.c
55661@@ -37,7 +37,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
55662 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
55663 void *cookie)
55664 {
55665- char *s = nd_get_link(nd);
55666+ const char *s = nd_get_link(nd);
55667 if (!IS_ERR(s))
55668 kfree(s);
55669 }
55670diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
55671index ca5ce7f..02c1cf0 100644
55672--- a/fs/proc/task_mmu.c
55673+++ b/fs/proc/task_mmu.c
55674@@ -11,12 +11,19 @@
55675 #include <linux/rmap.h>
55676 #include <linux/swap.h>
55677 #include <linux/swapops.h>
55678+#include <linux/grsecurity.h>
55679
55680 #include <asm/elf.h>
55681 #include <asm/uaccess.h>
55682 #include <asm/tlbflush.h>
55683 #include "internal.h"
55684
55685+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55686+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
55687+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
55688+ _mm->pax_flags & MF_PAX_SEGMEXEC))
55689+#endif
55690+
55691 void task_mem(struct seq_file *m, struct mm_struct *mm)
55692 {
55693 unsigned long data, text, lib, swap;
55694@@ -52,8 +59,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
55695 "VmExe:\t%8lu kB\n"
55696 "VmLib:\t%8lu kB\n"
55697 "VmPTE:\t%8lu kB\n"
55698- "VmSwap:\t%8lu kB\n",
55699- hiwater_vm << (PAGE_SHIFT-10),
55700+ "VmSwap:\t%8lu kB\n"
55701+
55702+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
55703+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
55704+#endif
55705+
55706+ ,hiwater_vm << (PAGE_SHIFT-10),
55707 total_vm << (PAGE_SHIFT-10),
55708 mm->locked_vm << (PAGE_SHIFT-10),
55709 mm->pinned_vm << (PAGE_SHIFT-10),
55710@@ -62,7 +74,19 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
55711 data << (PAGE_SHIFT-10),
55712 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
55713 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
55714- swap << (PAGE_SHIFT-10));
55715+ swap << (PAGE_SHIFT-10)
55716+
55717+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
55718+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55719+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_base
55720+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_limit
55721+#else
55722+ , mm->context.user_cs_base
55723+ , mm->context.user_cs_limit
55724+#endif
55725+#endif
55726+
55727+ );
55728 }
55729
55730 unsigned long task_vsize(struct mm_struct *mm)
55731@@ -277,13 +301,13 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
55732 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
55733 }
55734
55735- /* We don't show the stack guard page in /proc/maps */
55736+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55737+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
55738+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
55739+#else
55740 start = vma->vm_start;
55741- if (stack_guard_page_start(vma, start))
55742- start += PAGE_SIZE;
55743 end = vma->vm_end;
55744- if (stack_guard_page_end(vma, end))
55745- end -= PAGE_SIZE;
55746+#endif
55747
55748 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
55749 start,
55750@@ -292,7 +316,11 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
55751 flags & VM_WRITE ? 'w' : '-',
55752 flags & VM_EXEC ? 'x' : '-',
55753 flags & VM_MAYSHARE ? 's' : 'p',
55754+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55755+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
55756+#else
55757 pgoff,
55758+#endif
55759 MAJOR(dev), MINOR(dev), ino, &len);
55760
55761 /*
55762@@ -301,7 +329,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
55763 */
55764 if (file) {
55765 pad_len_spaces(m, len);
55766- seq_path(m, &file->f_path, "\n");
55767+ seq_path(m, &file->f_path, "\n\\");
55768 goto done;
55769 }
55770
55771@@ -327,8 +355,9 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
55772 * Thread stack in /proc/PID/task/TID/maps or
55773 * the main process stack.
55774 */
55775- if (!is_pid || (vma->vm_start <= mm->start_stack &&
55776- vma->vm_end >= mm->start_stack)) {
55777+ if (!is_pid || (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
55778+ (vma->vm_start <= mm->start_stack &&
55779+ vma->vm_end >= mm->start_stack)) {
55780 name = "[stack]";
55781 } else {
55782 /* Thread stack in /proc/PID/maps */
55783@@ -352,6 +381,13 @@ static int show_map(struct seq_file *m, void *v, int is_pid)
55784 struct proc_maps_private *priv = m->private;
55785 struct task_struct *task = priv->task;
55786
55787+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55788+ if (current->exec_id != m->exec_id) {
55789+ gr_log_badprocpid("maps");
55790+ return 0;
55791+ }
55792+#endif
55793+
55794 show_map_vma(m, vma, is_pid);
55795
55796 if (m->count < m->size) /* vma is copied successfully */
55797@@ -589,12 +625,23 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
55798 .private = &mss,
55799 };
55800
55801+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55802+ if (current->exec_id != m->exec_id) {
55803+ gr_log_badprocpid("smaps");
55804+ return 0;
55805+ }
55806+#endif
55807 memset(&mss, 0, sizeof mss);
55808- mss.vma = vma;
55809- /* mmap_sem is held in m_start */
55810- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
55811- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
55812-
55813+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55814+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
55815+#endif
55816+ mss.vma = vma;
55817+ /* mmap_sem is held in m_start */
55818+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
55819+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
55820+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55821+ }
55822+#endif
55823 show_map_vma(m, vma, is_pid);
55824
55825 seq_printf(m,
55826@@ -612,7 +659,11 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
55827 "KernelPageSize: %8lu kB\n"
55828 "MMUPageSize: %8lu kB\n"
55829 "Locked: %8lu kB\n",
55830+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55831+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
55832+#else
55833 (vma->vm_end - vma->vm_start) >> 10,
55834+#endif
55835 mss.resident >> 10,
55836 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
55837 mss.shared_clean >> 10,
55838@@ -1264,6 +1315,13 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
55839 int n;
55840 char buffer[50];
55841
55842+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55843+ if (current->exec_id != m->exec_id) {
55844+ gr_log_badprocpid("numa_maps");
55845+ return 0;
55846+ }
55847+#endif
55848+
55849 if (!mm)
55850 return 0;
55851
55852@@ -1281,11 +1339,15 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
55853 mpol_to_str(buffer, sizeof(buffer), pol);
55854 mpol_cond_put(pol);
55855
55856+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55857+ seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
55858+#else
55859 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
55860+#endif
55861
55862 if (file) {
55863 seq_printf(m, " file=");
55864- seq_path(m, &file->f_path, "\n\t= ");
55865+ seq_path(m, &file->f_path, "\n\t\\= ");
55866 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
55867 seq_printf(m, " heap");
55868 } else {
55869diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
55870index 1ccfa53..0848f95 100644
55871--- a/fs/proc/task_nommu.c
55872+++ b/fs/proc/task_nommu.c
55873@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
55874 else
55875 bytes += kobjsize(mm);
55876
55877- if (current->fs && current->fs->users > 1)
55878+ if (current->fs && atomic_read(&current->fs->users) > 1)
55879 sbytes += kobjsize(current->fs);
55880 else
55881 bytes += kobjsize(current->fs);
55882@@ -168,7 +168,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
55883
55884 if (file) {
55885 pad_len_spaces(m, len);
55886- seq_path(m, &file->f_path, "");
55887+ seq_path(m, &file->f_path, "\n\\");
55888 } else if (mm) {
55889 pid_t tid = vm_is_stack(priv->task, vma, is_pid);
55890
55891diff --git a/fs/qnx6/qnx6.h b/fs/qnx6/qnx6.h
55892index b00fcc9..e0c6381 100644
55893--- a/fs/qnx6/qnx6.h
55894+++ b/fs/qnx6/qnx6.h
55895@@ -74,7 +74,7 @@ enum {
55896 BYTESEX_BE,
55897 };
55898
55899-static inline __u64 fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
55900+static inline __u64 __intentional_overflow(-1) fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
55901 {
55902 if (sbi->s_bytesex == BYTESEX_LE)
55903 return le64_to_cpu((__force __le64)n);
55904@@ -90,7 +90,7 @@ static inline __fs64 cpu_to_fs64(struct qnx6_sb_info *sbi, __u64 n)
55905 return (__force __fs64)cpu_to_be64(n);
55906 }
55907
55908-static inline __u32 fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
55909+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
55910 {
55911 if (sbi->s_bytesex == BYTESEX_LE)
55912 return le32_to_cpu((__force __le32)n);
55913diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
55914index 16e8abb..2dcf914 100644
55915--- a/fs/quota/netlink.c
55916+++ b/fs/quota/netlink.c
55917@@ -33,7 +33,7 @@ static struct genl_family quota_genl_family = {
55918 void quota_send_warning(struct kqid qid, dev_t dev,
55919 const char warntype)
55920 {
55921- static atomic_t seq;
55922+ static atomic_unchecked_t seq;
55923 struct sk_buff *skb;
55924 void *msg_head;
55925 int ret;
55926@@ -49,7 +49,7 @@ void quota_send_warning(struct kqid qid, dev_t dev,
55927 "VFS: Not enough memory to send quota warning.\n");
55928 return;
55929 }
55930- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
55931+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
55932 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
55933 if (!msg_head) {
55934 printk(KERN_ERR
55935diff --git a/fs/readdir.c b/fs/readdir.c
55936index 5e69ef5..e5d9099 100644
55937--- a/fs/readdir.c
55938+++ b/fs/readdir.c
55939@@ -17,6 +17,7 @@
55940 #include <linux/security.h>
55941 #include <linux/syscalls.h>
55942 #include <linux/unistd.h>
55943+#include <linux/namei.h>
55944
55945 #include <asm/uaccess.h>
55946
55947@@ -67,6 +68,7 @@ struct old_linux_dirent {
55948
55949 struct readdir_callback {
55950 struct old_linux_dirent __user * dirent;
55951+ struct file * file;
55952 int result;
55953 };
55954
55955@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
55956 buf->result = -EOVERFLOW;
55957 return -EOVERFLOW;
55958 }
55959+
55960+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
55961+ return 0;
55962+
55963 buf->result++;
55964 dirent = buf->dirent;
55965 if (!access_ok(VERIFY_WRITE, dirent,
55966@@ -114,6 +120,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
55967
55968 buf.result = 0;
55969 buf.dirent = dirent;
55970+ buf.file = f.file;
55971
55972 error = vfs_readdir(f.file, fillonedir, &buf);
55973 if (buf.result)
55974@@ -139,6 +146,7 @@ struct linux_dirent {
55975 struct getdents_callback {
55976 struct linux_dirent __user * current_dir;
55977 struct linux_dirent __user * previous;
55978+ struct file * file;
55979 int count;
55980 int error;
55981 };
55982@@ -160,6 +168,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
55983 buf->error = -EOVERFLOW;
55984 return -EOVERFLOW;
55985 }
55986+
55987+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
55988+ return 0;
55989+
55990 dirent = buf->previous;
55991 if (dirent) {
55992 if (__put_user(offset, &dirent->d_off))
55993@@ -205,6 +217,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
55994 buf.previous = NULL;
55995 buf.count = count;
55996 buf.error = 0;
55997+ buf.file = f.file;
55998
55999 error = vfs_readdir(f.file, filldir, &buf);
56000 if (error >= 0)
56001@@ -223,6 +236,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
56002 struct getdents_callback64 {
56003 struct linux_dirent64 __user * current_dir;
56004 struct linux_dirent64 __user * previous;
56005+ struct file *file;
56006 int count;
56007 int error;
56008 };
56009@@ -238,6 +252,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
56010 buf->error = -EINVAL; /* only used if we fail.. */
56011 if (reclen > buf->count)
56012 return -EINVAL;
56013+
56014+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
56015+ return 0;
56016+
56017 dirent = buf->previous;
56018 if (dirent) {
56019 if (__put_user(offset, &dirent->d_off))
56020@@ -283,6 +301,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
56021
56022 buf.current_dir = dirent;
56023 buf.previous = NULL;
56024+ buf.file = f.file;
56025 buf.count = count;
56026 buf.error = 0;
56027
56028@@ -291,7 +310,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
56029 error = buf.error;
56030 lastdirent = buf.previous;
56031 if (lastdirent) {
56032- typeof(lastdirent->d_off) d_off = f.file->f_pos;
56033+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = f.file->f_pos;
56034 if (__put_user(d_off, &lastdirent->d_off))
56035 error = -EFAULT;
56036 else
56037diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
56038index 2b7882b..1c5ef48 100644
56039--- a/fs/reiserfs/do_balan.c
56040+++ b/fs/reiserfs/do_balan.c
56041@@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
56042 return;
56043 }
56044
56045- atomic_inc(&(fs_generation(tb->tb_sb)));
56046+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
56047 do_balance_starts(tb);
56048
56049 /* balance leaf returns 0 except if combining L R and S into
56050diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
56051index e60e870..f40ac16 100644
56052--- a/fs/reiserfs/procfs.c
56053+++ b/fs/reiserfs/procfs.c
56054@@ -112,7 +112,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
56055 "SMALL_TAILS " : "NO_TAILS ",
56056 replay_only(sb) ? "REPLAY_ONLY " : "",
56057 convert_reiserfs(sb) ? "CONV " : "",
56058- atomic_read(&r->s_generation_counter),
56059+ atomic_read_unchecked(&r->s_generation_counter),
56060 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
56061 SF(s_do_balance), SF(s_unneeded_left_neighbor),
56062 SF(s_good_search_by_key_reada), SF(s_bmaps),
56063diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
56064index 157e474..65a6114 100644
56065--- a/fs/reiserfs/reiserfs.h
56066+++ b/fs/reiserfs/reiserfs.h
56067@@ -453,7 +453,7 @@ struct reiserfs_sb_info {
56068 /* Comment? -Hans */
56069 wait_queue_head_t s_wait;
56070 /* To be obsoleted soon by per buffer seals.. -Hans */
56071- atomic_t s_generation_counter; // increased by one every time the
56072+ atomic_unchecked_t s_generation_counter; // increased by one every time the
56073 // tree gets re-balanced
56074 unsigned long s_properties; /* File system properties. Currently holds
56075 on-disk FS format */
56076@@ -1978,7 +1978,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
56077 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
56078
56079 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
56080-#define get_generation(s) atomic_read (&fs_generation(s))
56081+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
56082 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
56083 #define __fs_changed(gen,s) (gen != get_generation (s))
56084 #define fs_changed(gen,s) \
56085diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
56086index c196369..4cce1d9 100644
56087--- a/fs/reiserfs/xattr.c
56088+++ b/fs/reiserfs/xattr.c
56089@@ -187,8 +187,8 @@ fill_with_dentries(void *buf, const char *name, int namelen, loff_t offset,
56090 if (dbuf->count == ARRAY_SIZE(dbuf->dentries))
56091 return -ENOSPC;
56092
56093- if (name[0] == '.' && (name[1] == '\0' ||
56094- (name[1] == '.' && name[2] == '\0')))
56095+ if (name[0] == '.' && (namelen < 2 ||
56096+ (namelen == 2 && name[1] == '.')))
56097 return 0;
56098
56099 dentry = lookup_one_len(name, dbuf->xadir, namelen);
56100diff --git a/fs/select.c b/fs/select.c
56101index 2ef72d9..f213b17 100644
56102--- a/fs/select.c
56103+++ b/fs/select.c
56104@@ -20,6 +20,7 @@
56105 #include <linux/export.h>
56106 #include <linux/slab.h>
56107 #include <linux/poll.h>
56108+#include <linux/security.h>
56109 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
56110 #include <linux/file.h>
56111 #include <linux/fdtable.h>
56112@@ -826,6 +827,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
56113 struct poll_list *walk = head;
56114 unsigned long todo = nfds;
56115
56116+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
56117 if (nfds > rlimit(RLIMIT_NOFILE))
56118 return -EINVAL;
56119
56120diff --git a/fs/seq_file.c b/fs/seq_file.c
56121index f2bc3df..239d4f6 100644
56122--- a/fs/seq_file.c
56123+++ b/fs/seq_file.c
56124@@ -10,6 +10,7 @@
56125 #include <linux/seq_file.h>
56126 #include <linux/slab.h>
56127 #include <linux/cred.h>
56128+#include <linux/sched.h>
56129
56130 #include <asm/uaccess.h>
56131 #include <asm/page.h>
56132@@ -60,6 +61,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
56133 #ifdef CONFIG_USER_NS
56134 p->user_ns = file->f_cred->user_ns;
56135 #endif
56136+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
56137+ p->exec_id = current->exec_id;
56138+#endif
56139
56140 /*
56141 * Wrappers around seq_open(e.g. swaps_open) need to be
56142@@ -96,7 +100,7 @@ static int traverse(struct seq_file *m, loff_t offset)
56143 return 0;
56144 }
56145 if (!m->buf) {
56146- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
56147+ m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL | GFP_USERCOPY);
56148 if (!m->buf)
56149 return -ENOMEM;
56150 }
56151@@ -136,7 +140,7 @@ static int traverse(struct seq_file *m, loff_t offset)
56152 Eoverflow:
56153 m->op->stop(m, p);
56154 kfree(m->buf);
56155- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
56156+ m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | GFP_USERCOPY);
56157 return !m->buf ? -ENOMEM : -EAGAIN;
56158 }
56159
56160@@ -191,7 +195,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
56161
56162 /* grab buffer if we didn't have one */
56163 if (!m->buf) {
56164- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
56165+ m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL | GFP_USERCOPY);
56166 if (!m->buf)
56167 goto Enomem;
56168 }
56169@@ -232,7 +236,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
56170 goto Fill;
56171 m->op->stop(m, p);
56172 kfree(m->buf);
56173- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
56174+ m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | GFP_USERCOPY);
56175 if (!m->buf)
56176 goto Enomem;
56177 m->count = 0;
56178@@ -581,7 +585,7 @@ static void single_stop(struct seq_file *p, void *v)
56179 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
56180 void *data)
56181 {
56182- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
56183+ seq_operations_no_const *op = kzalloc(sizeof(*op), GFP_KERNEL);
56184 int res = -ENOMEM;
56185
56186 if (op) {
56187diff --git a/fs/splice.c b/fs/splice.c
56188index 6909d89..5b2e8f9 100644
56189--- a/fs/splice.c
56190+++ b/fs/splice.c
56191@@ -194,7 +194,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
56192 pipe_lock(pipe);
56193
56194 for (;;) {
56195- if (!pipe->readers) {
56196+ if (!atomic_read(&pipe->readers)) {
56197 send_sig(SIGPIPE, current, 0);
56198 if (!ret)
56199 ret = -EPIPE;
56200@@ -248,9 +248,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
56201 do_wakeup = 0;
56202 }
56203
56204- pipe->waiting_writers++;
56205+ atomic_inc(&pipe->waiting_writers);
56206 pipe_wait(pipe);
56207- pipe->waiting_writers--;
56208+ atomic_dec(&pipe->waiting_writers);
56209 }
56210
56211 pipe_unlock(pipe);
56212@@ -563,7 +563,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
56213 old_fs = get_fs();
56214 set_fs(get_ds());
56215 /* The cast to a user pointer is valid due to the set_fs() */
56216- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
56217+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
56218 set_fs(old_fs);
56219
56220 return res;
56221@@ -578,7 +578,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
56222 old_fs = get_fs();
56223 set_fs(get_ds());
56224 /* The cast to a user pointer is valid due to the set_fs() */
56225- res = vfs_write(file, (const char __user *)buf, count, &pos);
56226+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
56227 set_fs(old_fs);
56228
56229 return res;
56230@@ -630,7 +630,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
56231 goto err;
56232
56233 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
56234- vec[i].iov_base = (void __user *) page_address(page);
56235+ vec[i].iov_base = (void __force_user *) page_address(page);
56236 vec[i].iov_len = this_len;
56237 spd.pages[i] = page;
56238 spd.nr_pages++;
56239@@ -851,10 +851,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
56240 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
56241 {
56242 while (!pipe->nrbufs) {
56243- if (!pipe->writers)
56244+ if (!atomic_read(&pipe->writers))
56245 return 0;
56246
56247- if (!pipe->waiting_writers && sd->num_spliced)
56248+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
56249 return 0;
56250
56251 if (sd->flags & SPLICE_F_NONBLOCK)
56252@@ -1189,7 +1189,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
56253 * out of the pipe right after the splice_to_pipe(). So set
56254 * PIPE_READERS appropriately.
56255 */
56256- pipe->readers = 1;
56257+ atomic_set(&pipe->readers, 1);
56258
56259 current->splice_pipe = pipe;
56260 }
56261@@ -1738,9 +1738,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
56262 ret = -ERESTARTSYS;
56263 break;
56264 }
56265- if (!pipe->writers)
56266+ if (!atomic_read(&pipe->writers))
56267 break;
56268- if (!pipe->waiting_writers) {
56269+ if (!atomic_read(&pipe->waiting_writers)) {
56270 if (flags & SPLICE_F_NONBLOCK) {
56271 ret = -EAGAIN;
56272 break;
56273@@ -1772,7 +1772,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
56274 pipe_lock(pipe);
56275
56276 while (pipe->nrbufs >= pipe->buffers) {
56277- if (!pipe->readers) {
56278+ if (!atomic_read(&pipe->readers)) {
56279 send_sig(SIGPIPE, current, 0);
56280 ret = -EPIPE;
56281 break;
56282@@ -1785,9 +1785,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
56283 ret = -ERESTARTSYS;
56284 break;
56285 }
56286- pipe->waiting_writers++;
56287+ atomic_inc(&pipe->waiting_writers);
56288 pipe_wait(pipe);
56289- pipe->waiting_writers--;
56290+ atomic_dec(&pipe->waiting_writers);
56291 }
56292
56293 pipe_unlock(pipe);
56294@@ -1823,14 +1823,14 @@ retry:
56295 pipe_double_lock(ipipe, opipe);
56296
56297 do {
56298- if (!opipe->readers) {
56299+ if (!atomic_read(&opipe->readers)) {
56300 send_sig(SIGPIPE, current, 0);
56301 if (!ret)
56302 ret = -EPIPE;
56303 break;
56304 }
56305
56306- if (!ipipe->nrbufs && !ipipe->writers)
56307+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
56308 break;
56309
56310 /*
56311@@ -1927,7 +1927,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
56312 pipe_double_lock(ipipe, opipe);
56313
56314 do {
56315- if (!opipe->readers) {
56316+ if (!atomic_read(&opipe->readers)) {
56317 send_sig(SIGPIPE, current, 0);
56318 if (!ret)
56319 ret = -EPIPE;
56320@@ -1972,7 +1972,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
56321 * return EAGAIN if we have the potential of some data in the
56322 * future, otherwise just return 0
56323 */
56324- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
56325+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
56326 ret = -EAGAIN;
56327
56328 pipe_unlock(ipipe);
56329diff --git a/fs/stat.c b/fs/stat.c
56330index 14f4545..9b7f55b 100644
56331--- a/fs/stat.c
56332+++ b/fs/stat.c
56333@@ -28,8 +28,13 @@ void generic_fillattr(struct inode *inode, struct kstat *stat)
56334 stat->gid = inode->i_gid;
56335 stat->rdev = inode->i_rdev;
56336 stat->size = i_size_read(inode);
56337- stat->atime = inode->i_atime;
56338- stat->mtime = inode->i_mtime;
56339+ if (is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
56340+ stat->atime = inode->i_ctime;
56341+ stat->mtime = inode->i_ctime;
56342+ } else {
56343+ stat->atime = inode->i_atime;
56344+ stat->mtime = inode->i_mtime;
56345+ }
56346 stat->ctime = inode->i_ctime;
56347 stat->blksize = (1 << inode->i_blkbits);
56348 stat->blocks = inode->i_blocks;
56349@@ -46,8 +51,14 @@ int vfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
56350 if (retval)
56351 return retval;
56352
56353- if (inode->i_op->getattr)
56354- return inode->i_op->getattr(mnt, dentry, stat);
56355+ if (inode->i_op->getattr) {
56356+ retval = inode->i_op->getattr(mnt, dentry, stat);
56357+ if (!retval && is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
56358+ stat->atime = stat->ctime;
56359+ stat->mtime = stat->ctime;
56360+ }
56361+ return retval;
56362+ }
56363
56364 generic_fillattr(inode, stat);
56365 return 0;
56366diff --git a/fs/sysfs/bin.c b/fs/sysfs/bin.c
56367index 614b2b5..4d321e6 100644
56368--- a/fs/sysfs/bin.c
56369+++ b/fs/sysfs/bin.c
56370@@ -235,13 +235,13 @@ static int bin_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
56371 return ret;
56372 }
56373
56374-static int bin_access(struct vm_area_struct *vma, unsigned long addr,
56375- void *buf, int len, int write)
56376+static ssize_t bin_access(struct vm_area_struct *vma, unsigned long addr,
56377+ void *buf, size_t len, int write)
56378 {
56379 struct file *file = vma->vm_file;
56380 struct bin_buffer *bb = file->private_data;
56381 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
56382- int ret;
56383+ ssize_t ret;
56384
56385 if (!bb->vm_ops)
56386 return -EINVAL;
56387diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
56388index 1f8c823..ed57cfe 100644
56389--- a/fs/sysfs/dir.c
56390+++ b/fs/sysfs/dir.c
56391@@ -40,7 +40,7 @@ static DEFINE_IDA(sysfs_ino_ida);
56392 *
56393 * Returns 31 bit hash of ns + name (so it fits in an off_t )
56394 */
56395-static unsigned int sysfs_name_hash(const void *ns, const char *name)
56396+static unsigned int sysfs_name_hash(const void *ns, const unsigned char *name)
56397 {
56398 unsigned long hash = init_name_hash();
56399 unsigned int len = strlen(name);
56400@@ -685,6 +685,18 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
56401 struct sysfs_dirent *sd;
56402 int rc;
56403
56404+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
56405+ const char *parent_name = parent_sd->s_name;
56406+
56407+ mode = S_IFDIR | S_IRWXU;
56408+
56409+ if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
56410+ (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
56411+ (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse"))) ||
56412+ (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
56413+ mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
56414+#endif
56415+
56416 /* allocate */
56417 sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
56418 if (!sd)
56419diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
56420index 602f56d..6853db8 100644
56421--- a/fs/sysfs/file.c
56422+++ b/fs/sysfs/file.c
56423@@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
56424
56425 struct sysfs_open_dirent {
56426 atomic_t refcnt;
56427- atomic_t event;
56428+ atomic_unchecked_t event;
56429 wait_queue_head_t poll;
56430 struct list_head buffers; /* goes through sysfs_buffer.list */
56431 };
56432@@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
56433 if (!sysfs_get_active(attr_sd))
56434 return -ENODEV;
56435
56436- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
56437+ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
56438 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
56439
56440 sysfs_put_active(attr_sd);
56441@@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
56442 return -ENOMEM;
56443
56444 atomic_set(&new_od->refcnt, 0);
56445- atomic_set(&new_od->event, 1);
56446+ atomic_set_unchecked(&new_od->event, 1);
56447 init_waitqueue_head(&new_od->poll);
56448 INIT_LIST_HEAD(&new_od->buffers);
56449 goto retry;
56450@@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
56451
56452 sysfs_put_active(attr_sd);
56453
56454- if (buffer->event != atomic_read(&od->event))
56455+ if (buffer->event != atomic_read_unchecked(&od->event))
56456 goto trigger;
56457
56458 return DEFAULT_POLLMASK;
56459@@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
56460
56461 od = sd->s_attr.open;
56462 if (od) {
56463- atomic_inc(&od->event);
56464+ atomic_inc_unchecked(&od->event);
56465 wake_up_interruptible(&od->poll);
56466 }
56467
56468diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
56469index 3c9eb56..9dea5be 100644
56470--- a/fs/sysfs/symlink.c
56471+++ b/fs/sysfs/symlink.c
56472@@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
56473
56474 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
56475 {
56476- char *page = nd_get_link(nd);
56477+ const char *page = nd_get_link(nd);
56478 if (!IS_ERR(page))
56479 free_page((unsigned long)page);
56480 }
56481diff --git a/fs/sysv/sysv.h b/fs/sysv/sysv.h
56482index 69d4889..a810bd4 100644
56483--- a/fs/sysv/sysv.h
56484+++ b/fs/sysv/sysv.h
56485@@ -188,7 +188,7 @@ static inline u32 PDP_swab(u32 x)
56486 #endif
56487 }
56488
56489-static inline __u32 fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
56490+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
56491 {
56492 if (sbi->s_bytesex == BYTESEX_PDP)
56493 return PDP_swab((__force __u32)n);
56494diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
56495index e18b988..f1d4ad0f 100644
56496--- a/fs/ubifs/io.c
56497+++ b/fs/ubifs/io.c
56498@@ -155,7 +155,7 @@ int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len)
56499 return err;
56500 }
56501
56502-int ubifs_leb_unmap(struct ubifs_info *c, int lnum)
56503+int __intentional_overflow(-1) ubifs_leb_unmap(struct ubifs_info *c, int lnum)
56504 {
56505 int err;
56506
56507diff --git a/fs/udf/misc.c b/fs/udf/misc.c
56508index c175b4d..8f36a16 100644
56509--- a/fs/udf/misc.c
56510+++ b/fs/udf/misc.c
56511@@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
56512
56513 u8 udf_tag_checksum(const struct tag *t)
56514 {
56515- u8 *data = (u8 *)t;
56516+ const u8 *data = (const u8 *)t;
56517 u8 checksum = 0;
56518 int i;
56519 for (i = 0; i < sizeof(struct tag); ++i)
56520diff --git a/fs/ufs/swab.h b/fs/ufs/swab.h
56521index 8d974c4..b82f6ec 100644
56522--- a/fs/ufs/swab.h
56523+++ b/fs/ufs/swab.h
56524@@ -22,7 +22,7 @@ enum {
56525 BYTESEX_BE
56526 };
56527
56528-static inline u64
56529+static inline u64 __intentional_overflow(-1)
56530 fs64_to_cpu(struct super_block *sbp, __fs64 n)
56531 {
56532 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
56533@@ -40,7 +40,7 @@ cpu_to_fs64(struct super_block *sbp, u64 n)
56534 return (__force __fs64)cpu_to_be64(n);
56535 }
56536
56537-static inline u32
56538+static inline u32 __intentional_overflow(-1)
56539 fs32_to_cpu(struct super_block *sbp, __fs32 n)
56540 {
56541 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
56542diff --git a/fs/utimes.c b/fs/utimes.c
56543index f4fb7ec..3fe03c0 100644
56544--- a/fs/utimes.c
56545+++ b/fs/utimes.c
56546@@ -1,6 +1,7 @@
56547 #include <linux/compiler.h>
56548 #include <linux/file.h>
56549 #include <linux/fs.h>
56550+#include <linux/security.h>
56551 #include <linux/linkage.h>
56552 #include <linux/mount.h>
56553 #include <linux/namei.h>
56554@@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
56555 goto mnt_drop_write_and_out;
56556 }
56557 }
56558+
56559+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
56560+ error = -EACCES;
56561+ goto mnt_drop_write_and_out;
56562+ }
56563+
56564 mutex_lock(&inode->i_mutex);
56565 error = notify_change(path->dentry, &newattrs);
56566 mutex_unlock(&inode->i_mutex);
56567diff --git a/fs/xattr.c b/fs/xattr.c
56568index 3377dff..4feded6 100644
56569--- a/fs/xattr.c
56570+++ b/fs/xattr.c
56571@@ -319,7 +319,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
56572 * Extended attribute SET operations
56573 */
56574 static long
56575-setxattr(struct dentry *d, const char __user *name, const void __user *value,
56576+setxattr(struct path *path, const char __user *name, const void __user *value,
56577 size_t size, int flags)
56578 {
56579 int error;
56580@@ -355,7 +355,12 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
56581 posix_acl_fix_xattr_from_user(kvalue, size);
56582 }
56583
56584- error = vfs_setxattr(d, kname, kvalue, size, flags);
56585+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
56586+ error = -EACCES;
56587+ goto out;
56588+ }
56589+
56590+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
56591 out:
56592 if (vvalue)
56593 vfree(vvalue);
56594@@ -377,7 +382,7 @@ retry:
56595 return error;
56596 error = mnt_want_write(path.mnt);
56597 if (!error) {
56598- error = setxattr(path.dentry, name, value, size, flags);
56599+ error = setxattr(&path, name, value, size, flags);
56600 mnt_drop_write(path.mnt);
56601 }
56602 path_put(&path);
56603@@ -401,7 +406,7 @@ retry:
56604 return error;
56605 error = mnt_want_write(path.mnt);
56606 if (!error) {
56607- error = setxattr(path.dentry, name, value, size, flags);
56608+ error = setxattr(&path, name, value, size, flags);
56609 mnt_drop_write(path.mnt);
56610 }
56611 path_put(&path);
56612@@ -416,16 +421,14 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
56613 const void __user *,value, size_t, size, int, flags)
56614 {
56615 struct fd f = fdget(fd);
56616- struct dentry *dentry;
56617 int error = -EBADF;
56618
56619 if (!f.file)
56620 return error;
56621- dentry = f.file->f_path.dentry;
56622- audit_inode(NULL, dentry, 0);
56623+ audit_inode(NULL, f.file->f_path.dentry, 0);
56624 error = mnt_want_write_file(f.file);
56625 if (!error) {
56626- error = setxattr(dentry, name, value, size, flags);
56627+ error = setxattr(&f.file->f_path, name, value, size, flags);
56628 mnt_drop_write_file(f.file);
56629 }
56630 fdput(f);
56631diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
56632index 9fbea87..6b19972 100644
56633--- a/fs/xattr_acl.c
56634+++ b/fs/xattr_acl.c
56635@@ -76,8 +76,8 @@ struct posix_acl *
56636 posix_acl_from_xattr(struct user_namespace *user_ns,
56637 const void *value, size_t size)
56638 {
56639- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
56640- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
56641+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
56642+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
56643 int count;
56644 struct posix_acl *acl;
56645 struct posix_acl_entry *acl_e;
56646diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
56647index 572a858..12a9b0d 100644
56648--- a/fs/xfs/xfs_bmap.c
56649+++ b/fs/xfs/xfs_bmap.c
56650@@ -192,7 +192,7 @@ xfs_bmap_validate_ret(
56651 int nmap,
56652 int ret_nmap);
56653 #else
56654-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
56655+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
56656 #endif /* DEBUG */
56657
56658 STATIC int
56659diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
56660index 1b9fc3e..e1bdde0 100644
56661--- a/fs/xfs/xfs_dir2_sf.c
56662+++ b/fs/xfs/xfs_dir2_sf.c
56663@@ -851,7 +851,15 @@ xfs_dir2_sf_getdents(
56664 }
56665
56666 ino = xfs_dir2_sfe_get_ino(sfp, sfep);
56667- if (filldir(dirent, (char *)sfep->name, sfep->namelen,
56668+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
56669+ char name[sfep->namelen];
56670+ memcpy(name, sfep->name, sfep->namelen);
56671+ if (filldir(dirent, name, sfep->namelen,
56672+ off & 0x7fffffff, ino, DT_UNKNOWN)) {
56673+ *offset = off & 0x7fffffff;
56674+ return 0;
56675+ }
56676+ } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
56677 off & 0x7fffffff, ino, DT_UNKNOWN)) {
56678 *offset = off & 0x7fffffff;
56679 return 0;
56680diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
56681index c1c3ef8..0952438 100644
56682--- a/fs/xfs/xfs_ioctl.c
56683+++ b/fs/xfs/xfs_ioctl.c
56684@@ -127,7 +127,7 @@ xfs_find_handle(
56685 }
56686
56687 error = -EFAULT;
56688- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
56689+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
56690 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
56691 goto out_put;
56692
56693diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
56694index d82efaa..0904a8e 100644
56695--- a/fs/xfs/xfs_iops.c
56696+++ b/fs/xfs/xfs_iops.c
56697@@ -395,7 +395,7 @@ xfs_vn_put_link(
56698 struct nameidata *nd,
56699 void *p)
56700 {
56701- char *s = nd_get_link(nd);
56702+ const char *s = nd_get_link(nd);
56703
56704 if (!IS_ERR(s))
56705 kfree(s);
56706diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
56707new file mode 100644
56708index 0000000..92247e4
56709--- /dev/null
56710+++ b/grsecurity/Kconfig
56711@@ -0,0 +1,1021 @@
56712+#
56713+# grecurity configuration
56714+#
56715+menu "Memory Protections"
56716+depends on GRKERNSEC
56717+
56718+config GRKERNSEC_KMEM
56719+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
56720+ default y if GRKERNSEC_CONFIG_AUTO
56721+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
56722+ help
56723+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
56724+ be written to or read from to modify or leak the contents of the running
56725+ kernel. /dev/port will also not be allowed to be opened and support
56726+ for /dev/cpu/*/msr will be removed. If you have module
56727+ support disabled, enabling this will close up five ways that are
56728+ currently used to insert malicious code into the running kernel.
56729+
56730+ Even with all these features enabled, we still highly recommend that
56731+ you use the RBAC system, as it is still possible for an attacker to
56732+ modify the running kernel through privileged I/O granted by ioperm/iopl.
56733+
56734+ If you are not using XFree86, you may be able to stop this additional
56735+ case by enabling the 'Disable privileged I/O' option. Though nothing
56736+ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
56737+ but only to video memory, which is the only writing we allow in this
56738+ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
56739+ not be allowed to mprotect it with PROT_WRITE later.
56740+ Enabling this feature will prevent the "cpupower" and "powertop" tools
56741+ from working.
56742+
56743+ It is highly recommended that you say Y here if you meet all the
56744+ conditions above.
56745+
56746+config GRKERNSEC_VM86
56747+ bool "Restrict VM86 mode"
56748+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
56749+ depends on X86_32
56750+
56751+ help
56752+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
56753+ make use of a special execution mode on 32bit x86 processors called
56754+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
56755+ video cards and will still work with this option enabled. The purpose
56756+ of the option is to prevent exploitation of emulation errors in
56757+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
56758+ Nearly all users should be able to enable this option.
56759+
56760+config GRKERNSEC_IO
56761+ bool "Disable privileged I/O"
56762+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
56763+ depends on X86
56764+ select RTC_CLASS
56765+ select RTC_INTF_DEV
56766+ select RTC_DRV_CMOS
56767+
56768+ help
56769+ If you say Y here, all ioperm and iopl calls will return an error.
56770+ Ioperm and iopl can be used to modify the running kernel.
56771+ Unfortunately, some programs need this access to operate properly,
56772+ the most notable of which are XFree86 and hwclock. hwclock can be
56773+ remedied by having RTC support in the kernel, so real-time
56774+ clock support is enabled if this option is enabled, to ensure
56775+ that hwclock operates correctly. XFree86 still will not
56776+ operate correctly with this option enabled, so DO NOT CHOOSE Y
56777+ IF YOU USE XFree86. If you use XFree86 and you still want to
56778+ protect your kernel against modification, use the RBAC system.
56779+
56780+config GRKERNSEC_JIT_HARDEN
56781+ bool "Harden BPF JIT against spray attacks"
56782+ default y if GRKERNSEC_CONFIG_AUTO
56783+ depends on BPF_JIT
56784+ help
56785+ If you say Y here, the native code generated by the kernel's Berkeley
56786+ Packet Filter (BPF) JIT engine will be hardened against JIT-spraying
56787+ attacks that attempt to fit attacker-beneficial instructions in
56788+ 32bit immediate fields of JIT-generated native instructions. The
56789+ attacker will generally aim to cause an unintended instruction sequence
56790+ of JIT-generated native code to execute by jumping into the middle of
56791+ a generated instruction. This feature effectively randomizes the 32bit
56792+ immediate constants present in the generated code to thwart such attacks.
56793+
56794+ If you're using KERNEXEC, it's recommended that you enable this option
56795+ to supplement the hardening of the kernel.
56796+
56797+config GRKERNSEC_RAND_THREADSTACK
56798+ bool "Insert random gaps between thread stacks"
56799+ default y if GRKERNSEC_CONFIG_AUTO
56800+ depends on PAX_RANDMMAP && !PPC
56801+ help
56802+ If you say Y here, a random-sized gap will be enforced between allocated
56803+ thread stacks. Glibc's NPTL and other threading libraries that
56804+ pass MAP_STACK to the kernel for thread stack allocation are supported.
56805+ The implementation currently provides 8 bits of entropy for the gap.
56806+
56807+ Many distributions do not compile threaded remote services with the
56808+ -fstack-check argument to GCC, causing the variable-sized stack-based
56809+ allocator, alloca(), to not probe the stack on allocation. This
56810+ permits an unbounded alloca() to skip over any guard page and potentially
56811+ modify another thread's stack reliably. An enforced random gap
56812+ reduces the reliability of such an attack and increases the chance
56813+ that such a read/write to another thread's stack instead lands in
56814+ an unmapped area, causing a crash and triggering grsecurity's
56815+ anti-bruteforcing logic.
56816+
56817+config GRKERNSEC_PROC_MEMMAP
56818+ bool "Harden ASLR against information leaks and entropy reduction"
56819+ default y if (GRKERNSEC_CONFIG_AUTO || PAX_NOEXEC || PAX_ASLR)
56820+ depends on PAX_NOEXEC || PAX_ASLR
56821+ help
56822+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
56823+ give no information about the addresses of its mappings if
56824+ PaX features that rely on random addresses are enabled on the task.
56825+ In addition to sanitizing this information and disabling other
56826+ dangerous sources of information, this option causes reads of sensitive
56827+ /proc/<pid> entries where the file descriptor was opened in a different
56828+ task than the one performing the read. Such attempts are logged.
56829+ This option also limits argv/env strings for suid/sgid binaries
56830+ to 512KB to prevent a complete exhaustion of the stack entropy provided
56831+ by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
56832+ binaries to prevent alternative mmap layouts from being abused.
56833+
56834+ If you use PaX it is essential that you say Y here as it closes up
56835+ several holes that make full ASLR useless locally.
56836+
56837+config GRKERNSEC_BRUTE
56838+ bool "Deter exploit bruteforcing"
56839+ default y if GRKERNSEC_CONFIG_AUTO
56840+ help
56841+ If you say Y here, attempts to bruteforce exploits against forking
56842+ daemons such as apache or sshd, as well as against suid/sgid binaries
56843+ will be deterred. When a child of a forking daemon is killed by PaX
56844+ or crashes due to an illegal instruction or other suspicious signal,
56845+ the parent process will be delayed 30 seconds upon every subsequent
56846+ fork until the administrator is able to assess the situation and
56847+ restart the daemon.
56848+ In the suid/sgid case, the attempt is logged, the user has all their
56849+ processes terminated, and they are prevented from executing any further
56850+ processes for 15 minutes.
56851+ It is recommended that you also enable signal logging in the auditing
56852+ section so that logs are generated when a process triggers a suspicious
56853+ signal.
56854+ If the sysctl option is enabled, a sysctl option with name
56855+ "deter_bruteforce" is created.
56856+
56857+
56858+config GRKERNSEC_MODHARDEN
56859+ bool "Harden module auto-loading"
56860+ default y if GRKERNSEC_CONFIG_AUTO
56861+ depends on MODULES
56862+ help
56863+ If you say Y here, module auto-loading in response to use of some
56864+ feature implemented by an unloaded module will be restricted to
56865+ root users. Enabling this option helps defend against attacks
56866+ by unprivileged users who abuse the auto-loading behavior to
56867+ cause a vulnerable module to load that is then exploited.
56868+
56869+ If this option prevents a legitimate use of auto-loading for a
56870+ non-root user, the administrator can execute modprobe manually
56871+ with the exact name of the module mentioned in the alert log.
56872+ Alternatively, the administrator can add the module to the list
56873+ of modules loaded at boot by modifying init scripts.
56874+
56875+ Modification of init scripts will most likely be needed on
56876+ Ubuntu servers with encrypted home directory support enabled,
56877+ as the first non-root user logging in will cause the ecb(aes),
56878+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
56879+
56880+config GRKERNSEC_HIDESYM
56881+ bool "Hide kernel symbols"
56882+ default y if GRKERNSEC_CONFIG_AUTO
56883+ select PAX_USERCOPY_SLABS
56884+ help
56885+ If you say Y here, getting information on loaded modules, and
56886+ displaying all kernel symbols through a syscall will be restricted
56887+ to users with CAP_SYS_MODULE. For software compatibility reasons,
56888+ /proc/kallsyms will be restricted to the root user. The RBAC
56889+ system can hide that entry even from root.
56890+
56891+ This option also prevents leaking of kernel addresses through
56892+ several /proc entries.
56893+
56894+ Note that this option is only effective provided the following
56895+ conditions are met:
56896+ 1) The kernel using grsecurity is not precompiled by some distribution
56897+ 2) You have also enabled GRKERNSEC_DMESG
56898+ 3) You are using the RBAC system and hiding other files such as your
56899+ kernel image and System.map. Alternatively, enabling this option
56900+ causes the permissions on /boot, /lib/modules, and the kernel
56901+ source directory to change at compile time to prevent
56902+ reading by non-root users.
56903+ If the above conditions are met, this option will aid in providing a
56904+ useful protection against local kernel exploitation of overflows
56905+ and arbitrary read/write vulnerabilities.
56906+
56907+config GRKERNSEC_KERN_LOCKOUT
56908+ bool "Active kernel exploit response"
56909+ default y if GRKERNSEC_CONFIG_AUTO
56910+ depends on X86 || ARM || PPC || SPARC
56911+ help
56912+ If you say Y here, when a PaX alert is triggered due to suspicious
56913+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
56914+ or an OOPS occurs due to bad memory accesses, instead of just
56915+ terminating the offending process (and potentially allowing
56916+ a subsequent exploit from the same user), we will take one of two
56917+ actions:
56918+ If the user was root, we will panic the system
56919+ If the user was non-root, we will log the attempt, terminate
56920+ all processes owned by the user, then prevent them from creating
56921+ any new processes until the system is restarted
56922+ This deters repeated kernel exploitation/bruteforcing attempts
56923+ and is useful for later forensics.
56924+
56925+endmenu
56926+menu "Role Based Access Control Options"
56927+depends on GRKERNSEC
56928+
56929+config GRKERNSEC_RBAC_DEBUG
56930+ bool
56931+
56932+config GRKERNSEC_NO_RBAC
56933+ bool "Disable RBAC system"
56934+ help
56935+ If you say Y here, the /dev/grsec device will be removed from the kernel,
56936+ preventing the RBAC system from being enabled. You should only say Y
56937+ here if you have no intention of using the RBAC system, so as to prevent
56938+ an attacker with root access from misusing the RBAC system to hide files
56939+ and processes when loadable module support and /dev/[k]mem have been
56940+ locked down.
56941+
56942+config GRKERNSEC_ACL_HIDEKERN
56943+ bool "Hide kernel processes"
56944+ help
56945+ If you say Y here, all kernel threads will be hidden to all
56946+ processes but those whose subject has the "view hidden processes"
56947+ flag.
56948+
56949+config GRKERNSEC_ACL_MAXTRIES
56950+ int "Maximum tries before password lockout"
56951+ default 3
56952+ help
56953+ This option enforces the maximum number of times a user can attempt
56954+ to authorize themselves with the grsecurity RBAC system before being
56955+ denied the ability to attempt authorization again for a specified time.
56956+ The lower the number, the harder it will be to brute-force a password.
56957+
56958+config GRKERNSEC_ACL_TIMEOUT
56959+ int "Time to wait after max password tries, in seconds"
56960+ default 30
56961+ help
56962+ This option specifies the time the user must wait after attempting to
56963+ authorize to the RBAC system with the maximum number of invalid
56964+ passwords. The higher the number, the harder it will be to brute-force
56965+ a password.
56966+
56967+endmenu
56968+menu "Filesystem Protections"
56969+depends on GRKERNSEC
56970+
56971+config GRKERNSEC_PROC
56972+ bool "Proc restrictions"
56973+ default y if GRKERNSEC_CONFIG_AUTO
56974+ help
56975+ If you say Y here, the permissions of the /proc filesystem
56976+ will be altered to enhance system security and privacy. You MUST
56977+ choose either a user only restriction or a user and group restriction.
56978+ Depending upon the option you choose, you can either restrict users to
56979+ see only the processes they themselves run, or choose a group that can
56980+ view all processes and files normally restricted to root if you choose
56981+ the "restrict to user only" option. NOTE: If you're running identd or
56982+ ntpd as a non-root user, you will have to run it as the group you
56983+ specify here.
56984+
56985+config GRKERNSEC_PROC_USER
56986+ bool "Restrict /proc to user only"
56987+ depends on GRKERNSEC_PROC
56988+ help
56989+ If you say Y here, non-root users will only be able to view their own
56990+ processes, and restricts them from viewing network-related information,
56991+ and viewing kernel symbol and module information.
56992+
56993+config GRKERNSEC_PROC_USERGROUP
56994+ bool "Allow special group"
56995+ default y if GRKERNSEC_CONFIG_AUTO
56996+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
56997+ help
56998+ If you say Y here, you will be able to select a group that will be
56999+ able to view all processes and network-related information. If you've
57000+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
57001+ remain hidden. This option is useful if you want to run identd as
57002+ a non-root user. The group you select may also be chosen at boot time
57003+ via "grsec_proc_gid=" on the kernel commandline.
57004+
57005+config GRKERNSEC_PROC_GID
57006+ int "GID for special group"
57007+ depends on GRKERNSEC_PROC_USERGROUP
57008+ default 1001
57009+
57010+config GRKERNSEC_PROC_ADD
57011+ bool "Additional restrictions"
57012+ default y if GRKERNSEC_CONFIG_AUTO
57013+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
57014+ help
57015+ If you say Y here, additional restrictions will be placed on
57016+ /proc that keep normal users from viewing device information and
57017+ slabinfo information that could be useful for exploits.
57018+
57019+config GRKERNSEC_LINK
57020+ bool "Linking restrictions"
57021+ default y if GRKERNSEC_CONFIG_AUTO
57022+ help
57023+ If you say Y here, /tmp race exploits will be prevented, since users
57024+ will no longer be able to follow symlinks owned by other users in
57025+ world-writable +t directories (e.g. /tmp), unless the owner of the
57026+ symlink is the owner of the directory. users will also not be
57027+ able to hardlink to files they do not own. If the sysctl option is
57028+ enabled, a sysctl option with name "linking_restrictions" is created.
57029+
57030+config GRKERNSEC_SYMLINKOWN
57031+ bool "Kernel-enforced SymlinksIfOwnerMatch"
57032+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
57033+ help
57034+ Apache's SymlinksIfOwnerMatch option has an inherent race condition
57035+ that prevents it from being used as a security feature. As Apache
57036+ verifies the symlink by performing a stat() against the target of
57037+ the symlink before it is followed, an attacker can setup a symlink
57038+ to point to a same-owned file, then replace the symlink with one
57039+ that targets another user's file just after Apache "validates" the
57040+ symlink -- a classic TOCTOU race. If you say Y here, a complete,
57041+ race-free replacement for Apache's "SymlinksIfOwnerMatch" option
57042+ will be in place for the group you specify. If the sysctl option
57043+ is enabled, a sysctl option with name "enforce_symlinksifowner" is
57044+ created.
57045+
57046+config GRKERNSEC_SYMLINKOWN_GID
57047+ int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
57048+ depends on GRKERNSEC_SYMLINKOWN
57049+ default 1006
57050+ help
57051+ Setting this GID determines what group kernel-enforced
57052+ SymlinksIfOwnerMatch will be enabled for. If the sysctl option
57053+ is enabled, a sysctl option with name "symlinkown_gid" is created.
57054+
57055+config GRKERNSEC_FIFO
57056+ bool "FIFO restrictions"
57057+ default y if GRKERNSEC_CONFIG_AUTO
57058+ help
57059+ If you say Y here, users will not be able to write to FIFOs they don't
57060+ own in world-writable +t directories (e.g. /tmp), unless the owner of
57061+ the FIFO is the same owner of the directory it's held in. If the sysctl
57062+ option is enabled, a sysctl option with name "fifo_restrictions" is
57063+ created.
57064+
57065+config GRKERNSEC_SYSFS_RESTRICT
57066+ bool "Sysfs/debugfs restriction"
57067+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
57068+ depends on SYSFS
57069+ help
57070+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
57071+ any filesystem normally mounted under it (e.g. debugfs) will be
57072+ mostly accessible only by root. These filesystems generally provide access
57073+ to hardware and debug information that isn't appropriate for unprivileged
57074+ users of the system. Sysfs and debugfs have also become a large source
57075+ of new vulnerabilities, ranging from infoleaks to local compromise.
57076+ There has been very little oversight with an eye toward security involved
57077+ in adding new exporters of information to these filesystems, so their
57078+ use is discouraged.
57079+ For reasons of compatibility, a few directories have been whitelisted
57080+ for access by non-root users:
57081+ /sys/fs/selinux
57082+ /sys/fs/fuse
57083+ /sys/devices/system/cpu
57084+
57085+config GRKERNSEC_ROFS
57086+ bool "Runtime read-only mount protection"
57087+ help
57088+ If you say Y here, a sysctl option with name "romount_protect" will
57089+ be created. By setting this option to 1 at runtime, filesystems
57090+ will be protected in the following ways:
57091+ * No new writable mounts will be allowed
57092+ * Existing read-only mounts won't be able to be remounted read/write
57093+ * Write operations will be denied on all block devices
57094+ This option acts independently of grsec_lock: once it is set to 1,
57095+ it cannot be turned off. Therefore, please be mindful of the resulting
57096+ behavior if this option is enabled in an init script on a read-only
57097+ filesystem. This feature is mainly intended for secure embedded systems.
57098+
57099+config GRKERNSEC_DEVICE_SIDECHANNEL
57100+ bool "Eliminate stat/notify-based device sidechannels"
57101+ default y if GRKERNSEC_CONFIG_AUTO
57102+ help
57103+ If you say Y here, timing analyses on block or character
57104+ devices like /dev/ptmx using stat or inotify/dnotify/fanotify
57105+ will be thwarted for unprivileged users. If a process without
57106+ CAP_MKNOD stats such a device, the last access and last modify times
57107+ will match the device's create time. No access or modify events
57108+ will be triggered through inotify/dnotify/fanotify for such devices.
57109+ This feature will prevent attacks that may at a minimum
57110+ allow an attacker to determine the administrator's password length.
57111+
57112+config GRKERNSEC_CHROOT
57113+ bool "Chroot jail restrictions"
57114+ default y if GRKERNSEC_CONFIG_AUTO
57115+ help
57116+ If you say Y here, you will be able to choose several options that will
57117+ make breaking out of a chrooted jail much more difficult. If you
57118+ encounter no software incompatibilities with the following options, it
57119+ is recommended that you enable each one.
57120+
57121+config GRKERNSEC_CHROOT_MOUNT
57122+ bool "Deny mounts"
57123+ default y if GRKERNSEC_CONFIG_AUTO
57124+ depends on GRKERNSEC_CHROOT
57125+ help
57126+ If you say Y here, processes inside a chroot will not be able to
57127+ mount or remount filesystems. If the sysctl option is enabled, a
57128+ sysctl option with name "chroot_deny_mount" is created.
57129+
57130+config GRKERNSEC_CHROOT_DOUBLE
57131+ bool "Deny double-chroots"
57132+ default y if GRKERNSEC_CONFIG_AUTO
57133+ depends on GRKERNSEC_CHROOT
57134+ help
57135+ If you say Y here, processes inside a chroot will not be able to chroot
57136+ again outside the chroot. This is a widely used method of breaking
57137+ out of a chroot jail and should not be allowed. If the sysctl
57138+ option is enabled, a sysctl option with name
57139+ "chroot_deny_chroot" is created.
57140+
57141+config GRKERNSEC_CHROOT_PIVOT
57142+ bool "Deny pivot_root in chroot"
57143+ default y if GRKERNSEC_CONFIG_AUTO
57144+ depends on GRKERNSEC_CHROOT
57145+ help
57146+ If you say Y here, processes inside a chroot will not be able to use
57147+ a function called pivot_root() that was introduced in Linux 2.3.41. It
57148+ works similar to chroot in that it changes the root filesystem. This
57149+ function could be misused in a chrooted process to attempt to break out
57150+ of the chroot, and therefore should not be allowed. If the sysctl
57151+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
57152+ created.
57153+
57154+config GRKERNSEC_CHROOT_CHDIR
57155+ bool "Enforce chdir(\"/\") on all chroots"
57156+ default y if GRKERNSEC_CONFIG_AUTO
57157+ depends on GRKERNSEC_CHROOT
57158+ help
57159+ If you say Y here, the current working directory of all newly-chrooted
57160+ applications will be set to the the root directory of the chroot.
57161+ The man page on chroot(2) states:
57162+ Note that this call does not change the current working
57163+ directory, so that `.' can be outside the tree rooted at
57164+ `/'. In particular, the super-user can escape from a
57165+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
57166+
57167+ It is recommended that you say Y here, since it's not known to break
57168+ any software. If the sysctl option is enabled, a sysctl option with
57169+ name "chroot_enforce_chdir" is created.
57170+
57171+config GRKERNSEC_CHROOT_CHMOD
57172+ bool "Deny (f)chmod +s"
57173+ default y if GRKERNSEC_CONFIG_AUTO
57174+ depends on GRKERNSEC_CHROOT
57175+ help
57176+ If you say Y here, processes inside a chroot will not be able to chmod
57177+ or fchmod files to make them have suid or sgid bits. This protects
57178+ against another published method of breaking a chroot. If the sysctl
57179+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
57180+ created.
57181+
57182+config GRKERNSEC_CHROOT_FCHDIR
57183+ bool "Deny fchdir out of chroot"
57184+ default y if GRKERNSEC_CONFIG_AUTO
57185+ depends on GRKERNSEC_CHROOT
57186+ help
57187+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
57188+ to a file descriptor of the chrooting process that points to a directory
57189+ outside the filesystem will be stopped. If the sysctl option
57190+ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
57191+
57192+config GRKERNSEC_CHROOT_MKNOD
57193+ bool "Deny mknod"
57194+ default y if GRKERNSEC_CONFIG_AUTO
57195+ depends on GRKERNSEC_CHROOT
57196+ help
57197+ If you say Y here, processes inside a chroot will not be allowed to
57198+ mknod. The problem with using mknod inside a chroot is that it
57199+ would allow an attacker to create a device entry that is the same
57200+ as one on the physical root of your system, which could range from
57201+ anything from the console device to a device for your harddrive (which
57202+ they could then use to wipe the drive or steal data). It is recommended
57203+ that you say Y here, unless you run into software incompatibilities.
57204+ If the sysctl option is enabled, a sysctl option with name
57205+ "chroot_deny_mknod" is created.
57206+
57207+config GRKERNSEC_CHROOT_SHMAT
57208+ bool "Deny shmat() out of chroot"
57209+ default y if GRKERNSEC_CONFIG_AUTO
57210+ depends on GRKERNSEC_CHROOT
57211+ help
57212+ If you say Y here, processes inside a chroot will not be able to attach
57213+ to shared memory segments that were created outside of the chroot jail.
57214+ It is recommended that you say Y here. If the sysctl option is enabled,
57215+ a sysctl option with name "chroot_deny_shmat" is created.
57216+
57217+config GRKERNSEC_CHROOT_UNIX
57218+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
57219+ default y if GRKERNSEC_CONFIG_AUTO
57220+ depends on GRKERNSEC_CHROOT
57221+ help
57222+ If you say Y here, processes inside a chroot will not be able to
57223+ connect to abstract (meaning not belonging to a filesystem) Unix
57224+ domain sockets that were bound outside of a chroot. It is recommended
57225+ that you say Y here. If the sysctl option is enabled, a sysctl option
57226+ with name "chroot_deny_unix" is created.
57227+
57228+config GRKERNSEC_CHROOT_FINDTASK
57229+ bool "Protect outside processes"
57230+ default y if GRKERNSEC_CONFIG_AUTO
57231+ depends on GRKERNSEC_CHROOT
57232+ help
57233+ If you say Y here, processes inside a chroot will not be able to
57234+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
57235+ getsid, or view any process outside of the chroot. If the sysctl
57236+ option is enabled, a sysctl option with name "chroot_findtask" is
57237+ created.
57238+
57239+config GRKERNSEC_CHROOT_NICE
57240+ bool "Restrict priority changes"
57241+ default y if GRKERNSEC_CONFIG_AUTO
57242+ depends on GRKERNSEC_CHROOT
57243+ help
57244+ If you say Y here, processes inside a chroot will not be able to raise
57245+ the priority of processes in the chroot, or alter the priority of
57246+ processes outside the chroot. This provides more security than simply
57247+ removing CAP_SYS_NICE from the process' capability set. If the
57248+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
57249+ is created.
57250+
57251+config GRKERNSEC_CHROOT_SYSCTL
57252+ bool "Deny sysctl writes"
57253+ default y if GRKERNSEC_CONFIG_AUTO
57254+ depends on GRKERNSEC_CHROOT
57255+ help
57256+ If you say Y here, an attacker in a chroot will not be able to
57257+ write to sysctl entries, either by sysctl(2) or through a /proc
57258+ interface. It is strongly recommended that you say Y here. If the
57259+ sysctl option is enabled, a sysctl option with name
57260+ "chroot_deny_sysctl" is created.
57261+
57262+config GRKERNSEC_CHROOT_CAPS
57263+ bool "Capability restrictions"
57264+ default y if GRKERNSEC_CONFIG_AUTO
57265+ depends on GRKERNSEC_CHROOT
57266+ help
57267+ If you say Y here, the capabilities on all processes within a
57268+ chroot jail will be lowered to stop module insertion, raw i/o,
57269+ system and net admin tasks, rebooting the system, modifying immutable
57270+ files, modifying IPC owned by another, and changing the system time.
57271+ This is left an option because it can break some apps. Disable this
57272+ if your chrooted apps are having problems performing those kinds of
57273+ tasks. If the sysctl option is enabled, a sysctl option with
57274+ name "chroot_caps" is created.
57275+
57276+endmenu
57277+menu "Kernel Auditing"
57278+depends on GRKERNSEC
57279+
57280+config GRKERNSEC_AUDIT_GROUP
57281+ bool "Single group for auditing"
57282+ help
57283+ If you say Y here, the exec and chdir logging features will only operate
57284+ on a group you specify. This option is recommended if you only want to
57285+ watch certain users instead of having a large amount of logs from the
57286+ entire system. If the sysctl option is enabled, a sysctl option with
57287+ name "audit_group" is created.
57288+
57289+config GRKERNSEC_AUDIT_GID
57290+ int "GID for auditing"
57291+ depends on GRKERNSEC_AUDIT_GROUP
57292+ default 1007
57293+
57294+config GRKERNSEC_EXECLOG
57295+ bool "Exec logging"
57296+ help
57297+ If you say Y here, all execve() calls will be logged (since the
57298+ other exec*() calls are frontends to execve(), all execution
57299+ will be logged). Useful for shell-servers that like to keep track
57300+ of their users. If the sysctl option is enabled, a sysctl option with
57301+ name "exec_logging" is created.
57302+ WARNING: This option when enabled will produce a LOT of logs, especially
57303+ on an active system.
57304+
57305+config GRKERNSEC_RESLOG
57306+ bool "Resource logging"
57307+ default y if GRKERNSEC_CONFIG_AUTO
57308+ help
57309+ If you say Y here, all attempts to overstep resource limits will
57310+ be logged with the resource name, the requested size, and the current
57311+ limit. It is highly recommended that you say Y here. If the sysctl
57312+ option is enabled, a sysctl option with name "resource_logging" is
57313+ created. If the RBAC system is enabled, the sysctl value is ignored.
57314+
57315+config GRKERNSEC_CHROOT_EXECLOG
57316+ bool "Log execs within chroot"
57317+ help
57318+ If you say Y here, all executions inside a chroot jail will be logged
57319+ to syslog. This can cause a large amount of logs if certain
57320+ applications (eg. djb's daemontools) are installed on the system, and
57321+ is therefore left as an option. If the sysctl option is enabled, a
57322+ sysctl option with name "chroot_execlog" is created.
57323+
57324+config GRKERNSEC_AUDIT_PTRACE
57325+ bool "Ptrace logging"
57326+ help
57327+ If you say Y here, all attempts to attach to a process via ptrace
57328+ will be logged. If the sysctl option is enabled, a sysctl option
57329+ with name "audit_ptrace" is created.
57330+
57331+config GRKERNSEC_AUDIT_CHDIR
57332+ bool "Chdir logging"
57333+ help
57334+ If you say Y here, all chdir() calls will be logged. If the sysctl
57335+ option is enabled, a sysctl option with name "audit_chdir" is created.
57336+
57337+config GRKERNSEC_AUDIT_MOUNT
57338+ bool "(Un)Mount logging"
57339+ help
57340+ If you say Y here, all mounts and unmounts will be logged. If the
57341+ sysctl option is enabled, a sysctl option with name "audit_mount" is
57342+ created.
57343+
57344+config GRKERNSEC_SIGNAL
57345+ bool "Signal logging"
57346+ default y if GRKERNSEC_CONFIG_AUTO
57347+ help
57348+ If you say Y here, certain important signals will be logged, such as
57349+ SIGSEGV, which will as a result inform you of when a error in a program
57350+ occurred, which in some cases could mean a possible exploit attempt.
57351+ If the sysctl option is enabled, a sysctl option with name
57352+ "signal_logging" is created.
57353+
57354+config GRKERNSEC_FORKFAIL
57355+ bool "Fork failure logging"
57356+ help
57357+ If you say Y here, all failed fork() attempts will be logged.
57358+ This could suggest a fork bomb, or someone attempting to overstep
57359+ their process limit. If the sysctl option is enabled, a sysctl option
57360+ with name "forkfail_logging" is created.
57361+
57362+config GRKERNSEC_TIME
57363+ bool "Time change logging"
57364+ default y if GRKERNSEC_CONFIG_AUTO
57365+ help
57366+ If you say Y here, any changes of the system clock will be logged.
57367+ If the sysctl option is enabled, a sysctl option with name
57368+ "timechange_logging" is created.
57369+
57370+config GRKERNSEC_PROC_IPADDR
57371+ bool "/proc/<pid>/ipaddr support"
57372+ default y if GRKERNSEC_CONFIG_AUTO
57373+ help
57374+ If you say Y here, a new entry will be added to each /proc/<pid>
57375+ directory that contains the IP address of the person using the task.
57376+ The IP is carried across local TCP and AF_UNIX stream sockets.
57377+ This information can be useful for IDS/IPSes to perform remote response
57378+ to a local attack. The entry is readable by only the owner of the
57379+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
57380+ the RBAC system), and thus does not create privacy concerns.
57381+
57382+config GRKERNSEC_RWXMAP_LOG
57383+ bool 'Denied RWX mmap/mprotect logging'
57384+ default y if GRKERNSEC_CONFIG_AUTO
57385+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
57386+ help
57387+ If you say Y here, calls to mmap() and mprotect() with explicit
57388+ usage of PROT_WRITE and PROT_EXEC together will be logged when
57389+ denied by the PAX_MPROTECT feature. If the sysctl option is
57390+ enabled, a sysctl option with name "rwxmap_logging" is created.
57391+
57392+config GRKERNSEC_AUDIT_TEXTREL
57393+ bool 'ELF text relocations logging (READ HELP)'
57394+ depends on PAX_MPROTECT
57395+ help
57396+ If you say Y here, text relocations will be logged with the filename
57397+ of the offending library or binary. The purpose of the feature is
57398+ to help Linux distribution developers get rid of libraries and
57399+ binaries that need text relocations which hinder the future progress
57400+ of PaX. Only Linux distribution developers should say Y here, and
57401+ never on a production machine, as this option creates an information
57402+ leak that could aid an attacker in defeating the randomization of
57403+ a single memory region. If the sysctl option is enabled, a sysctl
57404+ option with name "audit_textrel" is created.
57405+
57406+endmenu
57407+
57408+menu "Executable Protections"
57409+depends on GRKERNSEC
57410+
57411+config GRKERNSEC_DMESG
57412+ bool "Dmesg(8) restriction"
57413+ default y if GRKERNSEC_CONFIG_AUTO
57414+ help
57415+ If you say Y here, non-root users will not be able to use dmesg(8)
57416+ to view the contents of the kernel's circular log buffer.
57417+ The kernel's log buffer often contains kernel addresses and other
57418+ identifying information useful to an attacker in fingerprinting a
57419+ system for a targeted exploit.
57420+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
57421+ created.
57422+
57423+config GRKERNSEC_HARDEN_PTRACE
57424+ bool "Deter ptrace-based process snooping"
57425+ default y if GRKERNSEC_CONFIG_AUTO
57426+ help
57427+ If you say Y here, TTY sniffers and other malicious monitoring
57428+ programs implemented through ptrace will be defeated. If you
57429+ have been using the RBAC system, this option has already been
57430+ enabled for several years for all users, with the ability to make
57431+ fine-grained exceptions.
57432+
57433+ This option only affects the ability of non-root users to ptrace
57434+ processes that are not a descendent of the ptracing process.
57435+ This means that strace ./binary and gdb ./binary will still work,
57436+ but attaching to arbitrary processes will not. If the sysctl
57437+ option is enabled, a sysctl option with name "harden_ptrace" is
57438+ created.
57439+
57440+config GRKERNSEC_PTRACE_READEXEC
57441+ bool "Require read access to ptrace sensitive binaries"
57442+ default y if GRKERNSEC_CONFIG_AUTO
57443+ help
57444+ If you say Y here, unprivileged users will not be able to ptrace unreadable
57445+ binaries. This option is useful in environments that
57446+ remove the read bits (e.g. file mode 4711) from suid binaries to
57447+ prevent infoleaking of their contents. This option adds
57448+ consistency to the use of that file mode, as the binary could normally
57449+ be read out when run without privileges while ptracing.
57450+
57451+ If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
57452+ is created.
57453+
57454+config GRKERNSEC_SETXID
57455+ bool "Enforce consistent multithreaded privileges"
57456+ default y if GRKERNSEC_CONFIG_AUTO
57457+ depends on (X86 || SPARC64 || PPC || ARM || MIPS)
57458+ help
57459+ If you say Y here, a change from a root uid to a non-root uid
57460+ in a multithreaded application will cause the resulting uids,
57461+ gids, supplementary groups, and capabilities in that thread
57462+ to be propagated to the other threads of the process. In most
57463+ cases this is unnecessary, as glibc will emulate this behavior
57464+ on behalf of the application. Other libcs do not act in the
57465+ same way, allowing the other threads of the process to continue
57466+ running with root privileges. If the sysctl option is enabled,
57467+ a sysctl option with name "consistent_setxid" is created.
57468+
57469+config GRKERNSEC_TPE
57470+ bool "Trusted Path Execution (TPE)"
57471+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
57472+ help
57473+ If you say Y here, you will be able to choose a gid to add to the
57474+ supplementary groups of users you want to mark as "untrusted."
57475+ These users will not be able to execute any files that are not in
57476+ root-owned directories writable only by root. If the sysctl option
57477+ is enabled, a sysctl option with name "tpe" is created.
57478+
57479+config GRKERNSEC_TPE_ALL
57480+ bool "Partially restrict all non-root users"
57481+ depends on GRKERNSEC_TPE
57482+ help
57483+ If you say Y here, all non-root users will be covered under
57484+ a weaker TPE restriction. This is separate from, and in addition to,
57485+ the main TPE options that you have selected elsewhere. Thus, if a
57486+ "trusted" GID is chosen, this restriction applies to even that GID.
57487+ Under this restriction, all non-root users will only be allowed to
57488+ execute files in directories they own that are not group or
57489+ world-writable, or in directories owned by root and writable only by
57490+ root. If the sysctl option is enabled, a sysctl option with name
57491+ "tpe_restrict_all" is created.
57492+
57493+config GRKERNSEC_TPE_INVERT
57494+ bool "Invert GID option"
57495+ depends on GRKERNSEC_TPE
57496+ help
57497+ If you say Y here, the group you specify in the TPE configuration will
57498+ decide what group TPE restrictions will be *disabled* for. This
57499+ option is useful if you want TPE restrictions to be applied to most
57500+ users on the system. If the sysctl option is enabled, a sysctl option
57501+ with name "tpe_invert" is created. Unlike other sysctl options, this
57502+ entry will default to on for backward-compatibility.
57503+
57504+config GRKERNSEC_TPE_GID
57505+ int
57506+ default GRKERNSEC_TPE_UNTRUSTED_GID if (GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT)
57507+ default GRKERNSEC_TPE_TRUSTED_GID if (GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT)
57508+
57509+config GRKERNSEC_TPE_UNTRUSTED_GID
57510+ int "GID for TPE-untrusted users"
57511+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
57512+ default 1005
57513+ help
57514+ Setting this GID determines what group TPE restrictions will be
57515+ *enabled* for. If the sysctl option is enabled, a sysctl option
57516+ with name "tpe_gid" is created.
57517+
57518+config GRKERNSEC_TPE_TRUSTED_GID
57519+ int "GID for TPE-trusted users"
57520+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
57521+ default 1005
57522+ help
57523+ Setting this GID determines what group TPE restrictions will be
57524+ *disabled* for. If the sysctl option is enabled, a sysctl option
57525+ with name "tpe_gid" is created.
57526+
57527+endmenu
57528+menu "Network Protections"
57529+depends on GRKERNSEC
57530+
57531+config GRKERNSEC_RANDNET
57532+ bool "Larger entropy pools"
57533+ default y if GRKERNSEC_CONFIG_AUTO
57534+ help
57535+ If you say Y here, the entropy pools used for many features of Linux
57536+ and grsecurity will be doubled in size. Since several grsecurity
57537+ features use additional randomness, it is recommended that you say Y
57538+ here. Saying Y here has a similar effect as modifying
57539+ /proc/sys/kernel/random/poolsize.
57540+
57541+config GRKERNSEC_BLACKHOLE
57542+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
57543+ default y if GRKERNSEC_CONFIG_AUTO
57544+ depends on NET
57545+ help
57546+ If you say Y here, neither TCP resets nor ICMP
57547+ destination-unreachable packets will be sent in response to packets
57548+ sent to ports for which no associated listening process exists.
57549+ This feature supports both IPV4 and IPV6 and exempts the
57550+ loopback interface from blackholing. Enabling this feature
57551+ makes a host more resilient to DoS attacks and reduces network
57552+ visibility against scanners.
57553+
57554+ The blackhole feature as-implemented is equivalent to the FreeBSD
57555+ blackhole feature, as it prevents RST responses to all packets, not
57556+ just SYNs. Under most application behavior this causes no
57557+ problems, but applications (like haproxy) may not close certain
57558+ connections in a way that cleanly terminates them on the remote
57559+ end, leaving the remote host in LAST_ACK state. Because of this
57560+ side-effect and to prevent intentional LAST_ACK DoSes, this
57561+ feature also adds automatic mitigation against such attacks.
57562+ The mitigation drastically reduces the amount of time a socket
57563+ can spend in LAST_ACK state. If you're using haproxy and not
57564+ all servers it connects to have this option enabled, consider
57565+ disabling this feature on the haproxy host.
57566+
57567+ If the sysctl option is enabled, two sysctl options with names
57568+ "ip_blackhole" and "lastack_retries" will be created.
57569+ While "ip_blackhole" takes the standard zero/non-zero on/off
57570+ toggle, "lastack_retries" uses the same kinds of values as
57571+ "tcp_retries1" and "tcp_retries2". The default value of 4
57572+ prevents a socket from lasting more than 45 seconds in LAST_ACK
57573+ state.
57574+
57575+config GRKERNSEC_NO_SIMULT_CONNECT
57576+ bool "Disable TCP Simultaneous Connect"
57577+ default y if GRKERNSEC_CONFIG_AUTO
57578+ depends on NET
57579+ help
57580+ If you say Y here, a feature by Willy Tarreau will be enabled that
57581+ removes a weakness in Linux's strict implementation of TCP that
57582+ allows two clients to connect to each other without either entering
57583+ a listening state. The weakness allows an attacker to easily prevent
57584+ a client from connecting to a known server provided the source port
57585+ for the connection is guessed correctly.
57586+
57587+ As the weakness could be used to prevent an antivirus or IPS from
57588+ fetching updates, or prevent an SSL gateway from fetching a CRL,
57589+ it should be eliminated by enabling this option. Though Linux is
57590+ one of few operating systems supporting simultaneous connect, it
57591+ has no legitimate use in practice and is rarely supported by firewalls.
57592+
57593+config GRKERNSEC_SOCKET
57594+ bool "Socket restrictions"
57595+ depends on NET
57596+ help
57597+ If you say Y here, you will be able to choose from several options.
57598+ If you assign a GID on your system and add it to the supplementary
57599+ groups of users you want to restrict socket access to, this patch
57600+ will perform up to three things, based on the option(s) you choose.
57601+
57602+config GRKERNSEC_SOCKET_ALL
57603+ bool "Deny any sockets to group"
57604+ depends on GRKERNSEC_SOCKET
57605+ help
57606+ If you say Y here, you will be able to choose a GID of whose users will
57607+ be unable to connect to other hosts from your machine or run server
57608+ applications from your machine. If the sysctl option is enabled, a
57609+ sysctl option with name "socket_all" is created.
57610+
57611+config GRKERNSEC_SOCKET_ALL_GID
57612+ int "GID to deny all sockets for"
57613+ depends on GRKERNSEC_SOCKET_ALL
57614+ default 1004
57615+ help
57616+ Here you can choose the GID to disable socket access for. Remember to
57617+ add the users you want socket access disabled for to the GID
57618+ specified here. If the sysctl option is enabled, a sysctl option
57619+ with name "socket_all_gid" is created.
57620+
57621+config GRKERNSEC_SOCKET_CLIENT
57622+ bool "Deny client sockets to group"
57623+ depends on GRKERNSEC_SOCKET
57624+ help
57625+ If you say Y here, you will be able to choose a GID of whose users will
57626+ be unable to connect to other hosts from your machine, but will be
57627+ able to run servers. If this option is enabled, all users in the group
57628+ you specify will have to use passive mode when initiating ftp transfers
57629+ from the shell on your machine. If the sysctl option is enabled, a
57630+ sysctl option with name "socket_client" is created.
57631+
57632+config GRKERNSEC_SOCKET_CLIENT_GID
57633+ int "GID to deny client sockets for"
57634+ depends on GRKERNSEC_SOCKET_CLIENT
57635+ default 1003
57636+ help
57637+ Here you can choose the GID to disable client socket access for.
57638+ Remember to add the users you want client socket access disabled for to
57639+ the GID specified here. If the sysctl option is enabled, a sysctl
57640+ option with name "socket_client_gid" is created.
57641+
57642+config GRKERNSEC_SOCKET_SERVER
57643+ bool "Deny server sockets to group"
57644+ depends on GRKERNSEC_SOCKET
57645+ help
57646+ If you say Y here, you will be able to choose a GID of whose users will
57647+ be unable to run server applications from your machine. If the sysctl
57648+ option is enabled, a sysctl option with name "socket_server" is created.
57649+
57650+config GRKERNSEC_SOCKET_SERVER_GID
57651+ int "GID to deny server sockets for"
57652+ depends on GRKERNSEC_SOCKET_SERVER
57653+ default 1002
57654+ help
57655+ Here you can choose the GID to disable server socket access for.
57656+ Remember to add the users you want server socket access disabled for to
57657+ the GID specified here. If the sysctl option is enabled, a sysctl
57658+ option with name "socket_server_gid" is created.
57659+
57660+endmenu
57661+menu "Sysctl Support"
57662+depends on GRKERNSEC && SYSCTL
57663+
57664+config GRKERNSEC_SYSCTL
57665+ bool "Sysctl support"
57666+ default y if GRKERNSEC_CONFIG_AUTO
57667+ help
57668+ If you say Y here, you will be able to change the options that
57669+ grsecurity runs with at bootup, without having to recompile your
57670+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
57671+ to enable (1) or disable (0) various features. All the sysctl entries
57672+ are mutable until the "grsec_lock" entry is set to a non-zero value.
57673+ All features enabled in the kernel configuration are disabled at boot
57674+ if you do not say Y to the "Turn on features by default" option.
57675+ All options should be set at startup, and the grsec_lock entry should
57676+ be set to a non-zero value after all the options are set.
57677+ *THIS IS EXTREMELY IMPORTANT*
57678+
57679+config GRKERNSEC_SYSCTL_DISTRO
57680+ bool "Extra sysctl support for distro makers (READ HELP)"
57681+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
57682+ help
57683+ If you say Y here, additional sysctl options will be created
57684+ for features that affect processes running as root. Therefore,
57685+ it is critical when using this option that the grsec_lock entry be
57686+ enabled after boot. Only distros with prebuilt kernel packages
57687+ with this option enabled that can ensure grsec_lock is enabled
57688+ after boot should use this option.
57689+ *Failure to set grsec_lock after boot makes all grsec features
57690+ this option covers useless*
57691+
57692+ Currently this option creates the following sysctl entries:
57693+ "Disable Privileged I/O": "disable_priv_io"
57694+
57695+config GRKERNSEC_SYSCTL_ON
57696+ bool "Turn on features by default"
57697+ default y if GRKERNSEC_CONFIG_AUTO
57698+ depends on GRKERNSEC_SYSCTL
57699+ help
57700+ If you say Y here, instead of having all features enabled in the
57701+ kernel configuration disabled at boot time, the features will be
57702+ enabled at boot time. It is recommended you say Y here unless
57703+ there is some reason you would want all sysctl-tunable features to
57704+ be disabled by default. As mentioned elsewhere, it is important
57705+ to enable the grsec_lock entry once you have finished modifying
57706+ the sysctl entries.
57707+
57708+endmenu
57709+menu "Logging Options"
57710+depends on GRKERNSEC
57711+
57712+config GRKERNSEC_FLOODTIME
57713+ int "Seconds in between log messages (minimum)"
57714+ default 10
57715+ help
57716+ This option allows you to enforce the number of seconds between
57717+ grsecurity log messages. The default should be suitable for most
57718+ people, however, if you choose to change it, choose a value small enough
57719+ to allow informative logs to be produced, but large enough to
57720+ prevent flooding.
57721+
57722+config GRKERNSEC_FLOODBURST
57723+ int "Number of messages in a burst (maximum)"
57724+ default 6
57725+ help
57726+ This option allows you to choose the maximum number of messages allowed
57727+ within the flood time interval you chose in a separate option. The
57728+ default should be suitable for most people, however if you find that
57729+ many of your logs are being interpreted as flooding, you may want to
57730+ raise this value.
57731+
57732+endmenu
57733diff --git a/grsecurity/Makefile b/grsecurity/Makefile
57734new file mode 100644
57735index 0000000..1b9afa9
57736--- /dev/null
57737+++ b/grsecurity/Makefile
57738@@ -0,0 +1,38 @@
57739+# grsecurity's ACL system was originally written in 2001 by Michael Dalton
57740+# during 2001-2009 it has been completely redesigned by Brad Spengler
57741+# into an RBAC system
57742+#
57743+# All code in this directory and various hooks inserted throughout the kernel
57744+# are copyright Brad Spengler - Open Source Security, Inc., and released
57745+# under the GPL v2 or higher
57746+
57747+KBUILD_CFLAGS += -Werror
57748+
57749+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
57750+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
57751+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
57752+
57753+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
57754+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
57755+ gracl_learn.o grsec_log.o
57756+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
57757+
57758+ifdef CONFIG_NET
57759+obj-y += grsec_sock.o
57760+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
57761+endif
57762+
57763+ifndef CONFIG_GRKERNSEC
57764+obj-y += grsec_disabled.o
57765+endif
57766+
57767+ifdef CONFIG_GRKERNSEC_HIDESYM
57768+extra-y := grsec_hidesym.o
57769+$(obj)/grsec_hidesym.o:
57770+ @-chmod -f 500 /boot
57771+ @-chmod -f 500 /lib/modules
57772+ @-chmod -f 500 /lib64/modules
57773+ @-chmod -f 500 /lib32/modules
57774+ @-chmod -f 700 .
57775+ @echo ' grsec: protected kernel image paths'
57776+endif
57777diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
57778new file mode 100644
57779index 0000000..b306b36
57780--- /dev/null
57781+++ b/grsecurity/gracl.c
57782@@ -0,0 +1,4071 @@
57783+#include <linux/kernel.h>
57784+#include <linux/module.h>
57785+#include <linux/sched.h>
57786+#include <linux/mm.h>
57787+#include <linux/file.h>
57788+#include <linux/fs.h>
57789+#include <linux/namei.h>
57790+#include <linux/mount.h>
57791+#include <linux/tty.h>
57792+#include <linux/proc_fs.h>
57793+#include <linux/lglock.h>
57794+#include <linux/slab.h>
57795+#include <linux/vmalloc.h>
57796+#include <linux/types.h>
57797+#include <linux/sysctl.h>
57798+#include <linux/netdevice.h>
57799+#include <linux/ptrace.h>
57800+#include <linux/gracl.h>
57801+#include <linux/gralloc.h>
57802+#include <linux/security.h>
57803+#include <linux/grinternal.h>
57804+#include <linux/pid_namespace.h>
57805+#include <linux/stop_machine.h>
57806+#include <linux/fdtable.h>
57807+#include <linux/percpu.h>
57808+#include <linux/lglock.h>
57809+#include <linux/hugetlb.h>
57810+#include <linux/posix-timers.h>
57811+#include "../fs/mount.h"
57812+
57813+#include <asm/uaccess.h>
57814+#include <asm/errno.h>
57815+#include <asm/mman.h>
57816+
57817+extern struct lglock vfsmount_lock;
57818+
57819+static struct acl_role_db acl_role_set;
57820+static struct name_db name_set;
57821+static struct inodev_db inodev_set;
57822+
57823+/* for keeping track of userspace pointers used for subjects, so we
57824+ can share references in the kernel as well
57825+*/
57826+
57827+static struct path real_root;
57828+
57829+static struct acl_subj_map_db subj_map_set;
57830+
57831+static struct acl_role_label *default_role;
57832+
57833+static struct acl_role_label *role_list;
57834+
57835+static u16 acl_sp_role_value;
57836+
57837+extern char *gr_shared_page[4];
57838+static DEFINE_MUTEX(gr_dev_mutex);
57839+DEFINE_RWLOCK(gr_inode_lock);
57840+
57841+struct gr_arg *gr_usermode;
57842+
57843+static unsigned int gr_status __read_only = GR_STATUS_INIT;
57844+
57845+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
57846+extern void gr_clear_learn_entries(void);
57847+
57848+unsigned char *gr_system_salt;
57849+unsigned char *gr_system_sum;
57850+
57851+static struct sprole_pw **acl_special_roles = NULL;
57852+static __u16 num_sprole_pws = 0;
57853+
57854+static struct acl_role_label *kernel_role = NULL;
57855+
57856+static unsigned int gr_auth_attempts = 0;
57857+static unsigned long gr_auth_expires = 0UL;
57858+
57859+#ifdef CONFIG_NET
57860+extern struct vfsmount *sock_mnt;
57861+#endif
57862+
57863+extern struct vfsmount *pipe_mnt;
57864+extern struct vfsmount *shm_mnt;
57865+
57866+#ifdef CONFIG_HUGETLBFS
57867+extern struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
57868+#endif
57869+
57870+static struct acl_object_label *fakefs_obj_rw;
57871+static struct acl_object_label *fakefs_obj_rwx;
57872+
57873+extern int gr_init_uidset(void);
57874+extern void gr_free_uidset(void);
57875+extern void gr_remove_uid(uid_t uid);
57876+extern int gr_find_uid(uid_t uid);
57877+
57878+__inline__ int
57879+gr_acl_is_enabled(void)
57880+{
57881+ return (gr_status & GR_READY);
57882+}
57883+
57884+#ifdef CONFIG_BTRFS_FS
57885+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
57886+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
57887+#endif
57888+
57889+static inline dev_t __get_dev(const struct dentry *dentry)
57890+{
57891+#ifdef CONFIG_BTRFS_FS
57892+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
57893+ return get_btrfs_dev_from_inode(dentry->d_inode);
57894+ else
57895+#endif
57896+ return dentry->d_inode->i_sb->s_dev;
57897+}
57898+
57899+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
57900+{
57901+ return __get_dev(dentry);
57902+}
57903+
57904+static char gr_task_roletype_to_char(struct task_struct *task)
57905+{
57906+ switch (task->role->roletype &
57907+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
57908+ GR_ROLE_SPECIAL)) {
57909+ case GR_ROLE_DEFAULT:
57910+ return 'D';
57911+ case GR_ROLE_USER:
57912+ return 'U';
57913+ case GR_ROLE_GROUP:
57914+ return 'G';
57915+ case GR_ROLE_SPECIAL:
57916+ return 'S';
57917+ }
57918+
57919+ return 'X';
57920+}
57921+
57922+char gr_roletype_to_char(void)
57923+{
57924+ return gr_task_roletype_to_char(current);
57925+}
57926+
57927+__inline__ int
57928+gr_acl_tpe_check(void)
57929+{
57930+ if (unlikely(!(gr_status & GR_READY)))
57931+ return 0;
57932+ if (current->role->roletype & GR_ROLE_TPE)
57933+ return 1;
57934+ else
57935+ return 0;
57936+}
57937+
57938+int
57939+gr_handle_rawio(const struct inode *inode)
57940+{
57941+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
57942+ if (inode && S_ISBLK(inode->i_mode) &&
57943+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
57944+ !capable(CAP_SYS_RAWIO))
57945+ return 1;
57946+#endif
57947+ return 0;
57948+}
57949+
57950+static int
57951+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
57952+{
57953+ if (likely(lena != lenb))
57954+ return 0;
57955+
57956+ return !memcmp(a, b, lena);
57957+}
57958+
57959+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
57960+{
57961+ *buflen -= namelen;
57962+ if (*buflen < 0)
57963+ return -ENAMETOOLONG;
57964+ *buffer -= namelen;
57965+ memcpy(*buffer, str, namelen);
57966+ return 0;
57967+}
57968+
57969+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
57970+{
57971+ return prepend(buffer, buflen, name->name, name->len);
57972+}
57973+
57974+static int prepend_path(const struct path *path, struct path *root,
57975+ char **buffer, int *buflen)
57976+{
57977+ struct dentry *dentry = path->dentry;
57978+ struct vfsmount *vfsmnt = path->mnt;
57979+ struct mount *mnt = real_mount(vfsmnt);
57980+ bool slash = false;
57981+ int error = 0;
57982+
57983+ while (dentry != root->dentry || vfsmnt != root->mnt) {
57984+ struct dentry * parent;
57985+
57986+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
57987+ /* Global root? */
57988+ if (!mnt_has_parent(mnt)) {
57989+ goto out;
57990+ }
57991+ dentry = mnt->mnt_mountpoint;
57992+ mnt = mnt->mnt_parent;
57993+ vfsmnt = &mnt->mnt;
57994+ continue;
57995+ }
57996+ parent = dentry->d_parent;
57997+ prefetch(parent);
57998+ spin_lock(&dentry->d_lock);
57999+ error = prepend_name(buffer, buflen, &dentry->d_name);
58000+ spin_unlock(&dentry->d_lock);
58001+ if (!error)
58002+ error = prepend(buffer, buflen, "/", 1);
58003+ if (error)
58004+ break;
58005+
58006+ slash = true;
58007+ dentry = parent;
58008+ }
58009+
58010+out:
58011+ if (!error && !slash)
58012+ error = prepend(buffer, buflen, "/", 1);
58013+
58014+ return error;
58015+}
58016+
58017+/* this must be called with vfsmount_lock and rename_lock held */
58018+
58019+static char *__our_d_path(const struct path *path, struct path *root,
58020+ char *buf, int buflen)
58021+{
58022+ char *res = buf + buflen;
58023+ int error;
58024+
58025+ prepend(&res, &buflen, "\0", 1);
58026+ error = prepend_path(path, root, &res, &buflen);
58027+ if (error)
58028+ return ERR_PTR(error);
58029+
58030+ return res;
58031+}
58032+
58033+static char *
58034+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
58035+{
58036+ char *retval;
58037+
58038+ retval = __our_d_path(path, root, buf, buflen);
58039+ if (unlikely(IS_ERR(retval)))
58040+ retval = strcpy(buf, "<path too long>");
58041+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
58042+ retval[1] = '\0';
58043+
58044+ return retval;
58045+}
58046+
58047+static char *
58048+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
58049+ char *buf, int buflen)
58050+{
58051+ struct path path;
58052+ char *res;
58053+
58054+ path.dentry = (struct dentry *)dentry;
58055+ path.mnt = (struct vfsmount *)vfsmnt;
58056+
58057+ /* we can use real_root.dentry, real_root.mnt, because this is only called
58058+ by the RBAC system */
58059+ res = gen_full_path(&path, &real_root, buf, buflen);
58060+
58061+ return res;
58062+}
58063+
58064+static char *
58065+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
58066+ char *buf, int buflen)
58067+{
58068+ char *res;
58069+ struct path path;
58070+ struct path root;
58071+ struct task_struct *reaper = init_pid_ns.child_reaper;
58072+
58073+ path.dentry = (struct dentry *)dentry;
58074+ path.mnt = (struct vfsmount *)vfsmnt;
58075+
58076+ /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
58077+ get_fs_root(reaper->fs, &root);
58078+
58079+ br_read_lock(&vfsmount_lock);
58080+ write_seqlock(&rename_lock);
58081+ res = gen_full_path(&path, &root, buf, buflen);
58082+ write_sequnlock(&rename_lock);
58083+ br_read_unlock(&vfsmount_lock);
58084+
58085+ path_put(&root);
58086+ return res;
58087+}
58088+
58089+static char *
58090+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
58091+{
58092+ char *ret;
58093+ br_read_lock(&vfsmount_lock);
58094+ write_seqlock(&rename_lock);
58095+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
58096+ PAGE_SIZE);
58097+ write_sequnlock(&rename_lock);
58098+ br_read_unlock(&vfsmount_lock);
58099+ return ret;
58100+}
58101+
58102+static char *
58103+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
58104+{
58105+ char *ret;
58106+ char *buf;
58107+ int buflen;
58108+
58109+ br_read_lock(&vfsmount_lock);
58110+ write_seqlock(&rename_lock);
58111+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
58112+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
58113+ buflen = (int)(ret - buf);
58114+ if (buflen >= 5)
58115+ prepend(&ret, &buflen, "/proc", 5);
58116+ else
58117+ ret = strcpy(buf, "<path too long>");
58118+ write_sequnlock(&rename_lock);
58119+ br_read_unlock(&vfsmount_lock);
58120+ return ret;
58121+}
58122+
58123+char *
58124+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
58125+{
58126+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
58127+ PAGE_SIZE);
58128+}
58129+
58130+char *
58131+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
58132+{
58133+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
58134+ PAGE_SIZE);
58135+}
58136+
58137+char *
58138+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
58139+{
58140+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
58141+ PAGE_SIZE);
58142+}
58143+
58144+char *
58145+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
58146+{
58147+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
58148+ PAGE_SIZE);
58149+}
58150+
58151+char *
58152+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
58153+{
58154+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
58155+ PAGE_SIZE);
58156+}
58157+
58158+__inline__ __u32
58159+to_gr_audit(const __u32 reqmode)
58160+{
58161+ /* masks off auditable permission flags, then shifts them to create
58162+ auditing flags, and adds the special case of append auditing if
58163+ we're requesting write */
58164+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
58165+}
58166+
58167+struct acl_subject_label *
58168+lookup_subject_map(const struct acl_subject_label *userp)
58169+{
58170+ unsigned int index = gr_shash(userp, subj_map_set.s_size);
58171+ struct subject_map *match;
58172+
58173+ match = subj_map_set.s_hash[index];
58174+
58175+ while (match && match->user != userp)
58176+ match = match->next;
58177+
58178+ if (match != NULL)
58179+ return match->kernel;
58180+ else
58181+ return NULL;
58182+}
58183+
58184+static void
58185+insert_subj_map_entry(struct subject_map *subjmap)
58186+{
58187+ unsigned int index = gr_shash(subjmap->user, subj_map_set.s_size);
58188+ struct subject_map **curr;
58189+
58190+ subjmap->prev = NULL;
58191+
58192+ curr = &subj_map_set.s_hash[index];
58193+ if (*curr != NULL)
58194+ (*curr)->prev = subjmap;
58195+
58196+ subjmap->next = *curr;
58197+ *curr = subjmap;
58198+
58199+ return;
58200+}
58201+
58202+static struct acl_role_label *
58203+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
58204+ const gid_t gid)
58205+{
58206+ unsigned int index = gr_rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
58207+ struct acl_role_label *match;
58208+ struct role_allowed_ip *ipp;
58209+ unsigned int x;
58210+ u32 curr_ip = task->signal->curr_ip;
58211+
58212+ task->signal->saved_ip = curr_ip;
58213+
58214+ match = acl_role_set.r_hash[index];
58215+
58216+ while (match) {
58217+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
58218+ for (x = 0; x < match->domain_child_num; x++) {
58219+ if (match->domain_children[x] == uid)
58220+ goto found;
58221+ }
58222+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
58223+ break;
58224+ match = match->next;
58225+ }
58226+found:
58227+ if (match == NULL) {
58228+ try_group:
58229+ index = gr_rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
58230+ match = acl_role_set.r_hash[index];
58231+
58232+ while (match) {
58233+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
58234+ for (x = 0; x < match->domain_child_num; x++) {
58235+ if (match->domain_children[x] == gid)
58236+ goto found2;
58237+ }
58238+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
58239+ break;
58240+ match = match->next;
58241+ }
58242+found2:
58243+ if (match == NULL)
58244+ match = default_role;
58245+ if (match->allowed_ips == NULL)
58246+ return match;
58247+ else {
58248+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
58249+ if (likely
58250+ ((ntohl(curr_ip) & ipp->netmask) ==
58251+ (ntohl(ipp->addr) & ipp->netmask)))
58252+ return match;
58253+ }
58254+ match = default_role;
58255+ }
58256+ } else if (match->allowed_ips == NULL) {
58257+ return match;
58258+ } else {
58259+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
58260+ if (likely
58261+ ((ntohl(curr_ip) & ipp->netmask) ==
58262+ (ntohl(ipp->addr) & ipp->netmask)))
58263+ return match;
58264+ }
58265+ goto try_group;
58266+ }
58267+
58268+ return match;
58269+}
58270+
58271+struct acl_subject_label *
58272+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
58273+ const struct acl_role_label *role)
58274+{
58275+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
58276+ struct acl_subject_label *match;
58277+
58278+ match = role->subj_hash[index];
58279+
58280+ while (match && (match->inode != ino || match->device != dev ||
58281+ (match->mode & GR_DELETED))) {
58282+ match = match->next;
58283+ }
58284+
58285+ if (match && !(match->mode & GR_DELETED))
58286+ return match;
58287+ else
58288+ return NULL;
58289+}
58290+
58291+struct acl_subject_label *
58292+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
58293+ const struct acl_role_label *role)
58294+{
58295+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
58296+ struct acl_subject_label *match;
58297+
58298+ match = role->subj_hash[index];
58299+
58300+ while (match && (match->inode != ino || match->device != dev ||
58301+ !(match->mode & GR_DELETED))) {
58302+ match = match->next;
58303+ }
58304+
58305+ if (match && (match->mode & GR_DELETED))
58306+ return match;
58307+ else
58308+ return NULL;
58309+}
58310+
58311+static struct acl_object_label *
58312+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
58313+ const struct acl_subject_label *subj)
58314+{
58315+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
58316+ struct acl_object_label *match;
58317+
58318+ match = subj->obj_hash[index];
58319+
58320+ while (match && (match->inode != ino || match->device != dev ||
58321+ (match->mode & GR_DELETED))) {
58322+ match = match->next;
58323+ }
58324+
58325+ if (match && !(match->mode & GR_DELETED))
58326+ return match;
58327+ else
58328+ return NULL;
58329+}
58330+
58331+static struct acl_object_label *
58332+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
58333+ const struct acl_subject_label *subj)
58334+{
58335+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
58336+ struct acl_object_label *match;
58337+
58338+ match = subj->obj_hash[index];
58339+
58340+ while (match && (match->inode != ino || match->device != dev ||
58341+ !(match->mode & GR_DELETED))) {
58342+ match = match->next;
58343+ }
58344+
58345+ if (match && (match->mode & GR_DELETED))
58346+ return match;
58347+
58348+ match = subj->obj_hash[index];
58349+
58350+ while (match && (match->inode != ino || match->device != dev ||
58351+ (match->mode & GR_DELETED))) {
58352+ match = match->next;
58353+ }
58354+
58355+ if (match && !(match->mode & GR_DELETED))
58356+ return match;
58357+ else
58358+ return NULL;
58359+}
58360+
58361+static struct name_entry *
58362+lookup_name_entry(const char *name)
58363+{
58364+ unsigned int len = strlen(name);
58365+ unsigned int key = full_name_hash(name, len);
58366+ unsigned int index = key % name_set.n_size;
58367+ struct name_entry *match;
58368+
58369+ match = name_set.n_hash[index];
58370+
58371+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
58372+ match = match->next;
58373+
58374+ return match;
58375+}
58376+
58377+static struct name_entry *
58378+lookup_name_entry_create(const char *name)
58379+{
58380+ unsigned int len = strlen(name);
58381+ unsigned int key = full_name_hash(name, len);
58382+ unsigned int index = key % name_set.n_size;
58383+ struct name_entry *match;
58384+
58385+ match = name_set.n_hash[index];
58386+
58387+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
58388+ !match->deleted))
58389+ match = match->next;
58390+
58391+ if (match && match->deleted)
58392+ return match;
58393+
58394+ match = name_set.n_hash[index];
58395+
58396+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
58397+ match->deleted))
58398+ match = match->next;
58399+
58400+ if (match && !match->deleted)
58401+ return match;
58402+ else
58403+ return NULL;
58404+}
58405+
58406+static struct inodev_entry *
58407+lookup_inodev_entry(const ino_t ino, const dev_t dev)
58408+{
58409+ unsigned int index = gr_fhash(ino, dev, inodev_set.i_size);
58410+ struct inodev_entry *match;
58411+
58412+ match = inodev_set.i_hash[index];
58413+
58414+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
58415+ match = match->next;
58416+
58417+ return match;
58418+}
58419+
58420+static void
58421+insert_inodev_entry(struct inodev_entry *entry)
58422+{
58423+ unsigned int index = gr_fhash(entry->nentry->inode, entry->nentry->device,
58424+ inodev_set.i_size);
58425+ struct inodev_entry **curr;
58426+
58427+ entry->prev = NULL;
58428+
58429+ curr = &inodev_set.i_hash[index];
58430+ if (*curr != NULL)
58431+ (*curr)->prev = entry;
58432+
58433+ entry->next = *curr;
58434+ *curr = entry;
58435+
58436+ return;
58437+}
58438+
58439+static void
58440+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
58441+{
58442+ unsigned int index =
58443+ gr_rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
58444+ struct acl_role_label **curr;
58445+ struct acl_role_label *tmp, *tmp2;
58446+
58447+ curr = &acl_role_set.r_hash[index];
58448+
58449+ /* simple case, slot is empty, just set it to our role */
58450+ if (*curr == NULL) {
58451+ *curr = role;
58452+ } else {
58453+ /* example:
58454+ 1 -> 2 -> 3 (adding 2 -> 3 to here)
58455+ 2 -> 3
58456+ */
58457+ /* first check to see if we can already be reached via this slot */
58458+ tmp = *curr;
58459+ while (tmp && tmp != role)
58460+ tmp = tmp->next;
58461+ if (tmp == role) {
58462+ /* we don't need to add ourselves to this slot's chain */
58463+ return;
58464+ }
58465+ /* we need to add ourselves to this chain, two cases */
58466+ if (role->next == NULL) {
58467+ /* simple case, append the current chain to our role */
58468+ role->next = *curr;
58469+ *curr = role;
58470+ } else {
58471+ /* 1 -> 2 -> 3 -> 4
58472+ 2 -> 3 -> 4
58473+ 3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here)
58474+ */
58475+ /* trickier case: walk our role's chain until we find
58476+ the role for the start of the current slot's chain */
58477+ tmp = role;
58478+ tmp2 = *curr;
58479+ while (tmp->next && tmp->next != tmp2)
58480+ tmp = tmp->next;
58481+ if (tmp->next == tmp2) {
58482+ /* from example above, we found 3, so just
58483+ replace this slot's chain with ours */
58484+ *curr = role;
58485+ } else {
58486+ /* we didn't find a subset of our role's chain
58487+ in the current slot's chain, so append their
58488+ chain to ours, and set us as the first role in
58489+ the slot's chain
58490+
58491+ we could fold this case with the case above,
58492+ but making it explicit for clarity
58493+ */
58494+ tmp->next = tmp2;
58495+ *curr = role;
58496+ }
58497+ }
58498+ }
58499+
58500+ return;
58501+}
58502+
58503+static void
58504+insert_acl_role_label(struct acl_role_label *role)
58505+{
58506+ int i;
58507+
58508+ if (role_list == NULL) {
58509+ role_list = role;
58510+ role->prev = NULL;
58511+ } else {
58512+ role->prev = role_list;
58513+ role_list = role;
58514+ }
58515+
58516+ /* used for hash chains */
58517+ role->next = NULL;
58518+
58519+ if (role->roletype & GR_ROLE_DOMAIN) {
58520+ for (i = 0; i < role->domain_child_num; i++)
58521+ __insert_acl_role_label(role, role->domain_children[i]);
58522+ } else
58523+ __insert_acl_role_label(role, role->uidgid);
58524+}
58525+
58526+static int
58527+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
58528+{
58529+ struct name_entry **curr, *nentry;
58530+ struct inodev_entry *ientry;
58531+ unsigned int len = strlen(name);
58532+ unsigned int key = full_name_hash(name, len);
58533+ unsigned int index = key % name_set.n_size;
58534+
58535+ curr = &name_set.n_hash[index];
58536+
58537+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
58538+ curr = &((*curr)->next);
58539+
58540+ if (*curr != NULL)
58541+ return 1;
58542+
58543+ nentry = acl_alloc(sizeof (struct name_entry));
58544+ if (nentry == NULL)
58545+ return 0;
58546+ ientry = acl_alloc(sizeof (struct inodev_entry));
58547+ if (ientry == NULL)
58548+ return 0;
58549+ ientry->nentry = nentry;
58550+
58551+ nentry->key = key;
58552+ nentry->name = name;
58553+ nentry->inode = inode;
58554+ nentry->device = device;
58555+ nentry->len = len;
58556+ nentry->deleted = deleted;
58557+
58558+ nentry->prev = NULL;
58559+ curr = &name_set.n_hash[index];
58560+ if (*curr != NULL)
58561+ (*curr)->prev = nentry;
58562+ nentry->next = *curr;
58563+ *curr = nentry;
58564+
58565+ /* insert us into the table searchable by inode/dev */
58566+ insert_inodev_entry(ientry);
58567+
58568+ return 1;
58569+}
58570+
58571+static void
58572+insert_acl_obj_label(struct acl_object_label *obj,
58573+ struct acl_subject_label *subj)
58574+{
58575+ unsigned int index =
58576+ gr_fhash(obj->inode, obj->device, subj->obj_hash_size);
58577+ struct acl_object_label **curr;
58578+
58579+
58580+ obj->prev = NULL;
58581+
58582+ curr = &subj->obj_hash[index];
58583+ if (*curr != NULL)
58584+ (*curr)->prev = obj;
58585+
58586+ obj->next = *curr;
58587+ *curr = obj;
58588+
58589+ return;
58590+}
58591+
58592+static void
58593+insert_acl_subj_label(struct acl_subject_label *obj,
58594+ struct acl_role_label *role)
58595+{
58596+ unsigned int index = gr_fhash(obj->inode, obj->device, role->subj_hash_size);
58597+ struct acl_subject_label **curr;
58598+
58599+ obj->prev = NULL;
58600+
58601+ curr = &role->subj_hash[index];
58602+ if (*curr != NULL)
58603+ (*curr)->prev = obj;
58604+
58605+ obj->next = *curr;
58606+ *curr = obj;
58607+
58608+ return;
58609+}
58610+
58611+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
58612+
58613+static void *
58614+create_table(__u32 * len, int elementsize)
58615+{
58616+ unsigned int table_sizes[] = {
58617+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
58618+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
58619+ 4194301, 8388593, 16777213, 33554393, 67108859
58620+ };
58621+ void *newtable = NULL;
58622+ unsigned int pwr = 0;
58623+
58624+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
58625+ table_sizes[pwr] <= *len)
58626+ pwr++;
58627+
58628+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
58629+ return newtable;
58630+
58631+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
58632+ newtable =
58633+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
58634+ else
58635+ newtable = vmalloc(table_sizes[pwr] * elementsize);
58636+
58637+ *len = table_sizes[pwr];
58638+
58639+ return newtable;
58640+}
58641+
58642+static int
58643+init_variables(const struct gr_arg *arg)
58644+{
58645+ struct task_struct *reaper = init_pid_ns.child_reaper;
58646+ unsigned int stacksize;
58647+
58648+ subj_map_set.s_size = arg->role_db.num_subjects;
58649+ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
58650+ name_set.n_size = arg->role_db.num_objects;
58651+ inodev_set.i_size = arg->role_db.num_objects;
58652+
58653+ if (!subj_map_set.s_size || !acl_role_set.r_size ||
58654+ !name_set.n_size || !inodev_set.i_size)
58655+ return 1;
58656+
58657+ if (!gr_init_uidset())
58658+ return 1;
58659+
58660+ /* set up the stack that holds allocation info */
58661+
58662+ stacksize = arg->role_db.num_pointers + 5;
58663+
58664+ if (!acl_alloc_stack_init(stacksize))
58665+ return 1;
58666+
58667+ /* grab reference for the real root dentry and vfsmount */
58668+ get_fs_root(reaper->fs, &real_root);
58669+
58670+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
58671+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
58672+#endif
58673+
58674+ fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
58675+ if (fakefs_obj_rw == NULL)
58676+ return 1;
58677+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
58678+
58679+ fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
58680+ if (fakefs_obj_rwx == NULL)
58681+ return 1;
58682+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
58683+
58684+ subj_map_set.s_hash =
58685+ (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
58686+ acl_role_set.r_hash =
58687+ (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
58688+ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
58689+ inodev_set.i_hash =
58690+ (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
58691+
58692+ if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
58693+ !name_set.n_hash || !inodev_set.i_hash)
58694+ return 1;
58695+
58696+ memset(subj_map_set.s_hash, 0,
58697+ sizeof(struct subject_map *) * subj_map_set.s_size);
58698+ memset(acl_role_set.r_hash, 0,
58699+ sizeof (struct acl_role_label *) * acl_role_set.r_size);
58700+ memset(name_set.n_hash, 0,
58701+ sizeof (struct name_entry *) * name_set.n_size);
58702+ memset(inodev_set.i_hash, 0,
58703+ sizeof (struct inodev_entry *) * inodev_set.i_size);
58704+
58705+ return 0;
58706+}
58707+
58708+/* free information not needed after startup
58709+ currently contains user->kernel pointer mappings for subjects
58710+*/
58711+
58712+static void
58713+free_init_variables(void)
58714+{
58715+ __u32 i;
58716+
58717+ if (subj_map_set.s_hash) {
58718+ for (i = 0; i < subj_map_set.s_size; i++) {
58719+ if (subj_map_set.s_hash[i]) {
58720+ kfree(subj_map_set.s_hash[i]);
58721+ subj_map_set.s_hash[i] = NULL;
58722+ }
58723+ }
58724+
58725+ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
58726+ PAGE_SIZE)
58727+ kfree(subj_map_set.s_hash);
58728+ else
58729+ vfree(subj_map_set.s_hash);
58730+ }
58731+
58732+ return;
58733+}
58734+
58735+static void
58736+free_variables(void)
58737+{
58738+ struct acl_subject_label *s;
58739+ struct acl_role_label *r;
58740+ struct task_struct *task, *task2;
58741+ unsigned int x;
58742+
58743+ gr_clear_learn_entries();
58744+
58745+ read_lock(&tasklist_lock);
58746+ do_each_thread(task2, task) {
58747+ task->acl_sp_role = 0;
58748+ task->acl_role_id = 0;
58749+ task->acl = NULL;
58750+ task->role = NULL;
58751+ } while_each_thread(task2, task);
58752+ read_unlock(&tasklist_lock);
58753+
58754+ /* release the reference to the real root dentry and vfsmount */
58755+ path_put(&real_root);
58756+ memset(&real_root, 0, sizeof(real_root));
58757+
58758+ /* free all object hash tables */
58759+
58760+ FOR_EACH_ROLE_START(r)
58761+ if (r->subj_hash == NULL)
58762+ goto next_role;
58763+ FOR_EACH_SUBJECT_START(r, s, x)
58764+ if (s->obj_hash == NULL)
58765+ break;
58766+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
58767+ kfree(s->obj_hash);
58768+ else
58769+ vfree(s->obj_hash);
58770+ FOR_EACH_SUBJECT_END(s, x)
58771+ FOR_EACH_NESTED_SUBJECT_START(r, s)
58772+ if (s->obj_hash == NULL)
58773+ break;
58774+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
58775+ kfree(s->obj_hash);
58776+ else
58777+ vfree(s->obj_hash);
58778+ FOR_EACH_NESTED_SUBJECT_END(s)
58779+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
58780+ kfree(r->subj_hash);
58781+ else
58782+ vfree(r->subj_hash);
58783+ r->subj_hash = NULL;
58784+next_role:
58785+ FOR_EACH_ROLE_END(r)
58786+
58787+ acl_free_all();
58788+
58789+ if (acl_role_set.r_hash) {
58790+ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
58791+ PAGE_SIZE)
58792+ kfree(acl_role_set.r_hash);
58793+ else
58794+ vfree(acl_role_set.r_hash);
58795+ }
58796+ if (name_set.n_hash) {
58797+ if ((name_set.n_size * sizeof (struct name_entry *)) <=
58798+ PAGE_SIZE)
58799+ kfree(name_set.n_hash);
58800+ else
58801+ vfree(name_set.n_hash);
58802+ }
58803+
58804+ if (inodev_set.i_hash) {
58805+ if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
58806+ PAGE_SIZE)
58807+ kfree(inodev_set.i_hash);
58808+ else
58809+ vfree(inodev_set.i_hash);
58810+ }
58811+
58812+ gr_free_uidset();
58813+
58814+ memset(&name_set, 0, sizeof (struct name_db));
58815+ memset(&inodev_set, 0, sizeof (struct inodev_db));
58816+ memset(&acl_role_set, 0, sizeof (struct acl_role_db));
58817+ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
58818+
58819+ default_role = NULL;
58820+ kernel_role = NULL;
58821+ role_list = NULL;
58822+
58823+ return;
58824+}
58825+
58826+static __u32
58827+count_user_objs(struct acl_object_label *userp)
58828+{
58829+ struct acl_object_label o_tmp;
58830+ __u32 num = 0;
58831+
58832+ while (userp) {
58833+ if (copy_from_user(&o_tmp, userp,
58834+ sizeof (struct acl_object_label)))
58835+ break;
58836+
58837+ userp = o_tmp.prev;
58838+ num++;
58839+ }
58840+
58841+ return num;
58842+}
58843+
58844+static struct acl_subject_label *
58845+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied);
58846+
58847+static int
58848+copy_user_glob(struct acl_object_label *obj)
58849+{
58850+ struct acl_object_label *g_tmp, **guser;
58851+ unsigned int len;
58852+ char *tmp;
58853+
58854+ if (obj->globbed == NULL)
58855+ return 0;
58856+
58857+ guser = &obj->globbed;
58858+ while (*guser) {
58859+ g_tmp = (struct acl_object_label *)
58860+ acl_alloc(sizeof (struct acl_object_label));
58861+ if (g_tmp == NULL)
58862+ return -ENOMEM;
58863+
58864+ if (copy_from_user(g_tmp, *guser,
58865+ sizeof (struct acl_object_label)))
58866+ return -EFAULT;
58867+
58868+ len = strnlen_user(g_tmp->filename, PATH_MAX);
58869+
58870+ if (!len || len >= PATH_MAX)
58871+ return -EINVAL;
58872+
58873+ if ((tmp = (char *) acl_alloc(len)) == NULL)
58874+ return -ENOMEM;
58875+
58876+ if (copy_from_user(tmp, g_tmp->filename, len))
58877+ return -EFAULT;
58878+ tmp[len-1] = '\0';
58879+ g_tmp->filename = tmp;
58880+
58881+ *guser = g_tmp;
58882+ guser = &(g_tmp->next);
58883+ }
58884+
58885+ return 0;
58886+}
58887+
58888+static int
58889+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
58890+ struct acl_role_label *role)
58891+{
58892+ struct acl_object_label *o_tmp;
58893+ unsigned int len;
58894+ int ret;
58895+ char *tmp;
58896+
58897+ while (userp) {
58898+ if ((o_tmp = (struct acl_object_label *)
58899+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
58900+ return -ENOMEM;
58901+
58902+ if (copy_from_user(o_tmp, userp,
58903+ sizeof (struct acl_object_label)))
58904+ return -EFAULT;
58905+
58906+ userp = o_tmp->prev;
58907+
58908+ len = strnlen_user(o_tmp->filename, PATH_MAX);
58909+
58910+ if (!len || len >= PATH_MAX)
58911+ return -EINVAL;
58912+
58913+ if ((tmp = (char *) acl_alloc(len)) == NULL)
58914+ return -ENOMEM;
58915+
58916+ if (copy_from_user(tmp, o_tmp->filename, len))
58917+ return -EFAULT;
58918+ tmp[len-1] = '\0';
58919+ o_tmp->filename = tmp;
58920+
58921+ insert_acl_obj_label(o_tmp, subj);
58922+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
58923+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
58924+ return -ENOMEM;
58925+
58926+ ret = copy_user_glob(o_tmp);
58927+ if (ret)
58928+ return ret;
58929+
58930+ if (o_tmp->nested) {
58931+ int already_copied;
58932+
58933+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role, &already_copied);
58934+ if (IS_ERR(o_tmp->nested))
58935+ return PTR_ERR(o_tmp->nested);
58936+
58937+ /* insert into nested subject list if we haven't copied this one yet
58938+ to prevent duplicate entries */
58939+ if (!already_copied) {
58940+ o_tmp->nested->next = role->hash->first;
58941+ role->hash->first = o_tmp->nested;
58942+ }
58943+ }
58944+ }
58945+
58946+ return 0;
58947+}
58948+
58949+static __u32
58950+count_user_subjs(struct acl_subject_label *userp)
58951+{
58952+ struct acl_subject_label s_tmp;
58953+ __u32 num = 0;
58954+
58955+ while (userp) {
58956+ if (copy_from_user(&s_tmp, userp,
58957+ sizeof (struct acl_subject_label)))
58958+ break;
58959+
58960+ userp = s_tmp.prev;
58961+ }
58962+
58963+ return num;
58964+}
58965+
58966+static int
58967+copy_user_allowedips(struct acl_role_label *rolep)
58968+{
58969+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
58970+
58971+ ruserip = rolep->allowed_ips;
58972+
58973+ while (ruserip) {
58974+ rlast = rtmp;
58975+
58976+ if ((rtmp = (struct role_allowed_ip *)
58977+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
58978+ return -ENOMEM;
58979+
58980+ if (copy_from_user(rtmp, ruserip,
58981+ sizeof (struct role_allowed_ip)))
58982+ return -EFAULT;
58983+
58984+ ruserip = rtmp->prev;
58985+
58986+ if (!rlast) {
58987+ rtmp->prev = NULL;
58988+ rolep->allowed_ips = rtmp;
58989+ } else {
58990+ rlast->next = rtmp;
58991+ rtmp->prev = rlast;
58992+ }
58993+
58994+ if (!ruserip)
58995+ rtmp->next = NULL;
58996+ }
58997+
58998+ return 0;
58999+}
59000+
59001+static int
59002+copy_user_transitions(struct acl_role_label *rolep)
59003+{
59004+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
59005+
59006+ unsigned int len;
59007+ char *tmp;
59008+
59009+ rusertp = rolep->transitions;
59010+
59011+ while (rusertp) {
59012+ rlast = rtmp;
59013+
59014+ if ((rtmp = (struct role_transition *)
59015+ acl_alloc(sizeof (struct role_transition))) == NULL)
59016+ return -ENOMEM;
59017+
59018+ if (copy_from_user(rtmp, rusertp,
59019+ sizeof (struct role_transition)))
59020+ return -EFAULT;
59021+
59022+ rusertp = rtmp->prev;
59023+
59024+ len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
59025+
59026+ if (!len || len >= GR_SPROLE_LEN)
59027+ return -EINVAL;
59028+
59029+ if ((tmp = (char *) acl_alloc(len)) == NULL)
59030+ return -ENOMEM;
59031+
59032+ if (copy_from_user(tmp, rtmp->rolename, len))
59033+ return -EFAULT;
59034+ tmp[len-1] = '\0';
59035+ rtmp->rolename = tmp;
59036+
59037+ if (!rlast) {
59038+ rtmp->prev = NULL;
59039+ rolep->transitions = rtmp;
59040+ } else {
59041+ rlast->next = rtmp;
59042+ rtmp->prev = rlast;
59043+ }
59044+
59045+ if (!rusertp)
59046+ rtmp->next = NULL;
59047+ }
59048+
59049+ return 0;
59050+}
59051+
59052+static struct acl_subject_label *
59053+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied)
59054+{
59055+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
59056+ unsigned int len;
59057+ char *tmp;
59058+ __u32 num_objs;
59059+ struct acl_ip_label **i_tmp, *i_utmp2;
59060+ struct gr_hash_struct ghash;
59061+ struct subject_map *subjmap;
59062+ unsigned int i_num;
59063+ int err;
59064+
59065+ if (already_copied != NULL)
59066+ *already_copied = 0;
59067+
59068+ s_tmp = lookup_subject_map(userp);
59069+
59070+ /* we've already copied this subject into the kernel, just return
59071+ the reference to it, and don't copy it over again
59072+ */
59073+ if (s_tmp) {
59074+ if (already_copied != NULL)
59075+ *already_copied = 1;
59076+ return(s_tmp);
59077+ }
59078+
59079+ if ((s_tmp = (struct acl_subject_label *)
59080+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
59081+ return ERR_PTR(-ENOMEM);
59082+
59083+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
59084+ if (subjmap == NULL)
59085+ return ERR_PTR(-ENOMEM);
59086+
59087+ subjmap->user = userp;
59088+ subjmap->kernel = s_tmp;
59089+ insert_subj_map_entry(subjmap);
59090+
59091+ if (copy_from_user(s_tmp, userp,
59092+ sizeof (struct acl_subject_label)))
59093+ return ERR_PTR(-EFAULT);
59094+
59095+ len = strnlen_user(s_tmp->filename, PATH_MAX);
59096+
59097+ if (!len || len >= PATH_MAX)
59098+ return ERR_PTR(-EINVAL);
59099+
59100+ if ((tmp = (char *) acl_alloc(len)) == NULL)
59101+ return ERR_PTR(-ENOMEM);
59102+
59103+ if (copy_from_user(tmp, s_tmp->filename, len))
59104+ return ERR_PTR(-EFAULT);
59105+ tmp[len-1] = '\0';
59106+ s_tmp->filename = tmp;
59107+
59108+ if (!strcmp(s_tmp->filename, "/"))
59109+ role->root_label = s_tmp;
59110+
59111+ if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
59112+ return ERR_PTR(-EFAULT);
59113+
59114+ /* copy user and group transition tables */
59115+
59116+ if (s_tmp->user_trans_num) {
59117+ uid_t *uidlist;
59118+
59119+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
59120+ if (uidlist == NULL)
59121+ return ERR_PTR(-ENOMEM);
59122+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
59123+ return ERR_PTR(-EFAULT);
59124+
59125+ s_tmp->user_transitions = uidlist;
59126+ }
59127+
59128+ if (s_tmp->group_trans_num) {
59129+ gid_t *gidlist;
59130+
59131+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
59132+ if (gidlist == NULL)
59133+ return ERR_PTR(-ENOMEM);
59134+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
59135+ return ERR_PTR(-EFAULT);
59136+
59137+ s_tmp->group_transitions = gidlist;
59138+ }
59139+
59140+ /* set up object hash table */
59141+ num_objs = count_user_objs(ghash.first);
59142+
59143+ s_tmp->obj_hash_size = num_objs;
59144+ s_tmp->obj_hash =
59145+ (struct acl_object_label **)
59146+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
59147+
59148+ if (!s_tmp->obj_hash)
59149+ return ERR_PTR(-ENOMEM);
59150+
59151+ memset(s_tmp->obj_hash, 0,
59152+ s_tmp->obj_hash_size *
59153+ sizeof (struct acl_object_label *));
59154+
59155+ /* add in objects */
59156+ err = copy_user_objs(ghash.first, s_tmp, role);
59157+
59158+ if (err)
59159+ return ERR_PTR(err);
59160+
59161+ /* set pointer for parent subject */
59162+ if (s_tmp->parent_subject) {
59163+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role, NULL);
59164+
59165+ if (IS_ERR(s_tmp2))
59166+ return s_tmp2;
59167+
59168+ s_tmp->parent_subject = s_tmp2;
59169+ }
59170+
59171+ /* add in ip acls */
59172+
59173+ if (!s_tmp->ip_num) {
59174+ s_tmp->ips = NULL;
59175+ goto insert;
59176+ }
59177+
59178+ i_tmp =
59179+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
59180+ sizeof (struct acl_ip_label *));
59181+
59182+ if (!i_tmp)
59183+ return ERR_PTR(-ENOMEM);
59184+
59185+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
59186+ *(i_tmp + i_num) =
59187+ (struct acl_ip_label *)
59188+ acl_alloc(sizeof (struct acl_ip_label));
59189+ if (!*(i_tmp + i_num))
59190+ return ERR_PTR(-ENOMEM);
59191+
59192+ if (copy_from_user
59193+ (&i_utmp2, s_tmp->ips + i_num,
59194+ sizeof (struct acl_ip_label *)))
59195+ return ERR_PTR(-EFAULT);
59196+
59197+ if (copy_from_user
59198+ (*(i_tmp + i_num), i_utmp2,
59199+ sizeof (struct acl_ip_label)))
59200+ return ERR_PTR(-EFAULT);
59201+
59202+ if ((*(i_tmp + i_num))->iface == NULL)
59203+ continue;
59204+
59205+ len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
59206+ if (!len || len >= IFNAMSIZ)
59207+ return ERR_PTR(-EINVAL);
59208+ tmp = acl_alloc(len);
59209+ if (tmp == NULL)
59210+ return ERR_PTR(-ENOMEM);
59211+ if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
59212+ return ERR_PTR(-EFAULT);
59213+ (*(i_tmp + i_num))->iface = tmp;
59214+ }
59215+
59216+ s_tmp->ips = i_tmp;
59217+
59218+insert:
59219+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
59220+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
59221+ return ERR_PTR(-ENOMEM);
59222+
59223+ return s_tmp;
59224+}
59225+
59226+static int
59227+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
59228+{
59229+ struct acl_subject_label s_pre;
59230+ struct acl_subject_label * ret;
59231+ int err;
59232+
59233+ while (userp) {
59234+ if (copy_from_user(&s_pre, userp,
59235+ sizeof (struct acl_subject_label)))
59236+ return -EFAULT;
59237+
59238+ ret = do_copy_user_subj(userp, role, NULL);
59239+
59240+ err = PTR_ERR(ret);
59241+ if (IS_ERR(ret))
59242+ return err;
59243+
59244+ insert_acl_subj_label(ret, role);
59245+
59246+ userp = s_pre.prev;
59247+ }
59248+
59249+ return 0;
59250+}
59251+
59252+static int
59253+copy_user_acl(struct gr_arg *arg)
59254+{
59255+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
59256+ struct acl_subject_label *subj_list;
59257+ struct sprole_pw *sptmp;
59258+ struct gr_hash_struct *ghash;
59259+ uid_t *domainlist;
59260+ unsigned int r_num;
59261+ unsigned int len;
59262+ char *tmp;
59263+ int err = 0;
59264+ __u16 i;
59265+ __u32 num_subjs;
59266+
59267+ /* we need a default and kernel role */
59268+ if (arg->role_db.num_roles < 2)
59269+ return -EINVAL;
59270+
59271+ /* copy special role authentication info from userspace */
59272+
59273+ num_sprole_pws = arg->num_sprole_pws;
59274+ acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
59275+
59276+ if (!acl_special_roles && num_sprole_pws)
59277+ return -ENOMEM;
59278+
59279+ for (i = 0; i < num_sprole_pws; i++) {
59280+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
59281+ if (!sptmp)
59282+ return -ENOMEM;
59283+ if (copy_from_user(sptmp, arg->sprole_pws + i,
59284+ sizeof (struct sprole_pw)))
59285+ return -EFAULT;
59286+
59287+ len = strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
59288+
59289+ if (!len || len >= GR_SPROLE_LEN)
59290+ return -EINVAL;
59291+
59292+ if ((tmp = (char *) acl_alloc(len)) == NULL)
59293+ return -ENOMEM;
59294+
59295+ if (copy_from_user(tmp, sptmp->rolename, len))
59296+ return -EFAULT;
59297+
59298+ tmp[len-1] = '\0';
59299+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
59300+ printk(KERN_ALERT "Copying special role %s\n", tmp);
59301+#endif
59302+ sptmp->rolename = tmp;
59303+ acl_special_roles[i] = sptmp;
59304+ }
59305+
59306+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
59307+
59308+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
59309+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
59310+
59311+ if (!r_tmp)
59312+ return -ENOMEM;
59313+
59314+ if (copy_from_user(&r_utmp2, r_utmp + r_num,
59315+ sizeof (struct acl_role_label *)))
59316+ return -EFAULT;
59317+
59318+ if (copy_from_user(r_tmp, r_utmp2,
59319+ sizeof (struct acl_role_label)))
59320+ return -EFAULT;
59321+
59322+ len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
59323+
59324+ if (!len || len >= PATH_MAX)
59325+ return -EINVAL;
59326+
59327+ if ((tmp = (char *) acl_alloc(len)) == NULL)
59328+ return -ENOMEM;
59329+
59330+ if (copy_from_user(tmp, r_tmp->rolename, len))
59331+ return -EFAULT;
59332+
59333+ tmp[len-1] = '\0';
59334+ r_tmp->rolename = tmp;
59335+
59336+ if (!strcmp(r_tmp->rolename, "default")
59337+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
59338+ default_role = r_tmp;
59339+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
59340+ kernel_role = r_tmp;
59341+ }
59342+
59343+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL)
59344+ return -ENOMEM;
59345+
59346+ if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct)))
59347+ return -EFAULT;
59348+
59349+ r_tmp->hash = ghash;
59350+
59351+ num_subjs = count_user_subjs(r_tmp->hash->first);
59352+
59353+ r_tmp->subj_hash_size = num_subjs;
59354+ r_tmp->subj_hash =
59355+ (struct acl_subject_label **)
59356+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
59357+
59358+ if (!r_tmp->subj_hash)
59359+ return -ENOMEM;
59360+
59361+ err = copy_user_allowedips(r_tmp);
59362+ if (err)
59363+ return err;
59364+
59365+ /* copy domain info */
59366+ if (r_tmp->domain_children != NULL) {
59367+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
59368+ if (domainlist == NULL)
59369+ return -ENOMEM;
59370+
59371+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t)))
59372+ return -EFAULT;
59373+
59374+ r_tmp->domain_children = domainlist;
59375+ }
59376+
59377+ err = copy_user_transitions(r_tmp);
59378+ if (err)
59379+ return err;
59380+
59381+ memset(r_tmp->subj_hash, 0,
59382+ r_tmp->subj_hash_size *
59383+ sizeof (struct acl_subject_label *));
59384+
59385+ /* acquire the list of subjects, then NULL out
59386+ the list prior to parsing the subjects for this role,
59387+ as during this parsing the list is replaced with a list
59388+ of *nested* subjects for the role
59389+ */
59390+ subj_list = r_tmp->hash->first;
59391+
59392+ /* set nested subject list to null */
59393+ r_tmp->hash->first = NULL;
59394+
59395+ err = copy_user_subjs(subj_list, r_tmp);
59396+
59397+ if (err)
59398+ return err;
59399+
59400+ insert_acl_role_label(r_tmp);
59401+ }
59402+
59403+ if (default_role == NULL || kernel_role == NULL)
59404+ return -EINVAL;
59405+
59406+ return err;
59407+}
59408+
59409+static int
59410+gracl_init(struct gr_arg *args)
59411+{
59412+ int error = 0;
59413+
59414+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
59415+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
59416+
59417+ if (init_variables(args)) {
59418+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
59419+ error = -ENOMEM;
59420+ free_variables();
59421+ goto out;
59422+ }
59423+
59424+ error = copy_user_acl(args);
59425+ free_init_variables();
59426+ if (error) {
59427+ free_variables();
59428+ goto out;
59429+ }
59430+
59431+ if ((error = gr_set_acls(0))) {
59432+ free_variables();
59433+ goto out;
59434+ }
59435+
59436+ pax_open_kernel();
59437+ gr_status |= GR_READY;
59438+ pax_close_kernel();
59439+
59440+ out:
59441+ return error;
59442+}
59443+
59444+/* derived from glibc fnmatch() 0: match, 1: no match*/
59445+
59446+static int
59447+glob_match(const char *p, const char *n)
59448+{
59449+ char c;
59450+
59451+ while ((c = *p++) != '\0') {
59452+ switch (c) {
59453+ case '?':
59454+ if (*n == '\0')
59455+ return 1;
59456+ else if (*n == '/')
59457+ return 1;
59458+ break;
59459+ case '\\':
59460+ if (*n != c)
59461+ return 1;
59462+ break;
59463+ case '*':
59464+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
59465+ if (*n == '/')
59466+ return 1;
59467+ else if (c == '?') {
59468+ if (*n == '\0')
59469+ return 1;
59470+ else
59471+ ++n;
59472+ }
59473+ }
59474+ if (c == '\0') {
59475+ return 0;
59476+ } else {
59477+ const char *endp;
59478+
59479+ if ((endp = strchr(n, '/')) == NULL)
59480+ endp = n + strlen(n);
59481+
59482+ if (c == '[') {
59483+ for (--p; n < endp; ++n)
59484+ if (!glob_match(p, n))
59485+ return 0;
59486+ } else if (c == '/') {
59487+ while (*n != '\0' && *n != '/')
59488+ ++n;
59489+ if (*n == '/' && !glob_match(p, n + 1))
59490+ return 0;
59491+ } else {
59492+ for (--p; n < endp; ++n)
59493+ if (*n == c && !glob_match(p, n))
59494+ return 0;
59495+ }
59496+
59497+ return 1;
59498+ }
59499+ case '[':
59500+ {
59501+ int not;
59502+ char cold;
59503+
59504+ if (*n == '\0' || *n == '/')
59505+ return 1;
59506+
59507+ not = (*p == '!' || *p == '^');
59508+ if (not)
59509+ ++p;
59510+
59511+ c = *p++;
59512+ for (;;) {
59513+ unsigned char fn = (unsigned char)*n;
59514+
59515+ if (c == '\0')
59516+ return 1;
59517+ else {
59518+ if (c == fn)
59519+ goto matched;
59520+ cold = c;
59521+ c = *p++;
59522+
59523+ if (c == '-' && *p != ']') {
59524+ unsigned char cend = *p++;
59525+
59526+ if (cend == '\0')
59527+ return 1;
59528+
59529+ if (cold <= fn && fn <= cend)
59530+ goto matched;
59531+
59532+ c = *p++;
59533+ }
59534+ }
59535+
59536+ if (c == ']')
59537+ break;
59538+ }
59539+ if (!not)
59540+ return 1;
59541+ break;
59542+ matched:
59543+ while (c != ']') {
59544+ if (c == '\0')
59545+ return 1;
59546+
59547+ c = *p++;
59548+ }
59549+ if (not)
59550+ return 1;
59551+ }
59552+ break;
59553+ default:
59554+ if (c != *n)
59555+ return 1;
59556+ }
59557+
59558+ ++n;
59559+ }
59560+
59561+ if (*n == '\0')
59562+ return 0;
59563+
59564+ if (*n == '/')
59565+ return 0;
59566+
59567+ return 1;
59568+}
59569+
59570+static struct acl_object_label *
59571+chk_glob_label(struct acl_object_label *globbed,
59572+ const struct dentry *dentry, const struct vfsmount *mnt, char **path)
59573+{
59574+ struct acl_object_label *tmp;
59575+
59576+ if (*path == NULL)
59577+ *path = gr_to_filename_nolock(dentry, mnt);
59578+
59579+ tmp = globbed;
59580+
59581+ while (tmp) {
59582+ if (!glob_match(tmp->filename, *path))
59583+ return tmp;
59584+ tmp = tmp->next;
59585+ }
59586+
59587+ return NULL;
59588+}
59589+
59590+static struct acl_object_label *
59591+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
59592+ const ino_t curr_ino, const dev_t curr_dev,
59593+ const struct acl_subject_label *subj, char **path, const int checkglob)
59594+{
59595+ struct acl_subject_label *tmpsubj;
59596+ struct acl_object_label *retval;
59597+ struct acl_object_label *retval2;
59598+
59599+ tmpsubj = (struct acl_subject_label *) subj;
59600+ read_lock(&gr_inode_lock);
59601+ do {
59602+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
59603+ if (retval) {
59604+ if (checkglob && retval->globbed) {
59605+ retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
59606+ if (retval2)
59607+ retval = retval2;
59608+ }
59609+ break;
59610+ }
59611+ } while ((tmpsubj = tmpsubj->parent_subject));
59612+ read_unlock(&gr_inode_lock);
59613+
59614+ return retval;
59615+}
59616+
59617+static __inline__ struct acl_object_label *
59618+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
59619+ struct dentry *curr_dentry,
59620+ const struct acl_subject_label *subj, char **path, const int checkglob)
59621+{
59622+ int newglob = checkglob;
59623+ ino_t inode;
59624+ dev_t device;
59625+
59626+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
59627+ as we don't want a / * rule to match instead of the / object
59628+ don't do this for create lookups that call this function though, since they're looking up
59629+ on the parent and thus need globbing checks on all paths
59630+ */
59631+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
59632+ newglob = GR_NO_GLOB;
59633+
59634+ spin_lock(&curr_dentry->d_lock);
59635+ inode = curr_dentry->d_inode->i_ino;
59636+ device = __get_dev(curr_dentry);
59637+ spin_unlock(&curr_dentry->d_lock);
59638+
59639+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
59640+}
59641+
59642+#ifdef CONFIG_HUGETLBFS
59643+static inline bool
59644+is_hugetlbfs_mnt(const struct vfsmount *mnt)
59645+{
59646+ int i;
59647+ for (i = 0; i < HUGE_MAX_HSTATE; i++) {
59648+ if (unlikely(hugetlbfs_vfsmount[i] == mnt))
59649+ return true;
59650+ }
59651+
59652+ return false;
59653+}
59654+#endif
59655+
59656+static struct acl_object_label *
59657+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
59658+ const struct acl_subject_label *subj, char *path, const int checkglob)
59659+{
59660+ struct dentry *dentry = (struct dentry *) l_dentry;
59661+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
59662+ struct mount *real_mnt = real_mount(mnt);
59663+ struct acl_object_label *retval;
59664+ struct dentry *parent;
59665+
59666+ br_read_lock(&vfsmount_lock);
59667+ write_seqlock(&rename_lock);
59668+
59669+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
59670+#ifdef CONFIG_NET
59671+ mnt == sock_mnt ||
59672+#endif
59673+#ifdef CONFIG_HUGETLBFS
59674+ (is_hugetlbfs_mnt(mnt) && dentry->d_inode->i_nlink == 0) ||
59675+#endif
59676+ /* ignore Eric Biederman */
59677+ IS_PRIVATE(l_dentry->d_inode))) {
59678+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
59679+ goto out;
59680+ }
59681+
59682+ for (;;) {
59683+ if (dentry == real_root.dentry && mnt == real_root.mnt)
59684+ break;
59685+
59686+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
59687+ if (!mnt_has_parent(real_mnt))
59688+ break;
59689+
59690+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
59691+ if (retval != NULL)
59692+ goto out;
59693+
59694+ dentry = real_mnt->mnt_mountpoint;
59695+ real_mnt = real_mnt->mnt_parent;
59696+ mnt = &real_mnt->mnt;
59697+ continue;
59698+ }
59699+
59700+ parent = dentry->d_parent;
59701+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
59702+ if (retval != NULL)
59703+ goto out;
59704+
59705+ dentry = parent;
59706+ }
59707+
59708+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
59709+
59710+ /* real_root is pinned so we don't have to hold a reference */
59711+ if (retval == NULL)
59712+ retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
59713+out:
59714+ write_sequnlock(&rename_lock);
59715+ br_read_unlock(&vfsmount_lock);
59716+
59717+ BUG_ON(retval == NULL);
59718+
59719+ return retval;
59720+}
59721+
59722+static __inline__ struct acl_object_label *
59723+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
59724+ const struct acl_subject_label *subj)
59725+{
59726+ char *path = NULL;
59727+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
59728+}
59729+
59730+static __inline__ struct acl_object_label *
59731+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
59732+ const struct acl_subject_label *subj)
59733+{
59734+ char *path = NULL;
59735+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
59736+}
59737+
59738+static __inline__ struct acl_object_label *
59739+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
59740+ const struct acl_subject_label *subj, char *path)
59741+{
59742+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
59743+}
59744+
59745+static struct acl_subject_label *
59746+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
59747+ const struct acl_role_label *role)
59748+{
59749+ struct dentry *dentry = (struct dentry *) l_dentry;
59750+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
59751+ struct mount *real_mnt = real_mount(mnt);
59752+ struct acl_subject_label *retval;
59753+ struct dentry *parent;
59754+
59755+ br_read_lock(&vfsmount_lock);
59756+ write_seqlock(&rename_lock);
59757+
59758+ for (;;) {
59759+ if (dentry == real_root.dentry && mnt == real_root.mnt)
59760+ break;
59761+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
59762+ if (!mnt_has_parent(real_mnt))
59763+ break;
59764+
59765+ spin_lock(&dentry->d_lock);
59766+ read_lock(&gr_inode_lock);
59767+ retval =
59768+ lookup_acl_subj_label(dentry->d_inode->i_ino,
59769+ __get_dev(dentry), role);
59770+ read_unlock(&gr_inode_lock);
59771+ spin_unlock(&dentry->d_lock);
59772+ if (retval != NULL)
59773+ goto out;
59774+
59775+ dentry = real_mnt->mnt_mountpoint;
59776+ real_mnt = real_mnt->mnt_parent;
59777+ mnt = &real_mnt->mnt;
59778+ continue;
59779+ }
59780+
59781+ spin_lock(&dentry->d_lock);
59782+ read_lock(&gr_inode_lock);
59783+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
59784+ __get_dev(dentry), role);
59785+ read_unlock(&gr_inode_lock);
59786+ parent = dentry->d_parent;
59787+ spin_unlock(&dentry->d_lock);
59788+
59789+ if (retval != NULL)
59790+ goto out;
59791+
59792+ dentry = parent;
59793+ }
59794+
59795+ spin_lock(&dentry->d_lock);
59796+ read_lock(&gr_inode_lock);
59797+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
59798+ __get_dev(dentry), role);
59799+ read_unlock(&gr_inode_lock);
59800+ spin_unlock(&dentry->d_lock);
59801+
59802+ if (unlikely(retval == NULL)) {
59803+ /* real_root is pinned, we don't need to hold a reference */
59804+ read_lock(&gr_inode_lock);
59805+ retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
59806+ __get_dev(real_root.dentry), role);
59807+ read_unlock(&gr_inode_lock);
59808+ }
59809+out:
59810+ write_sequnlock(&rename_lock);
59811+ br_read_unlock(&vfsmount_lock);
59812+
59813+ BUG_ON(retval == NULL);
59814+
59815+ return retval;
59816+}
59817+
59818+static void
59819+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
59820+{
59821+ struct task_struct *task = current;
59822+ const struct cred *cred = current_cred();
59823+
59824+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
59825+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
59826+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
59827+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
59828+
59829+ return;
59830+}
59831+
59832+static void
59833+gr_log_learn_uid_change(const kuid_t real, const kuid_t effective, const kuid_t fs)
59834+{
59835+ struct task_struct *task = current;
59836+ const struct cred *cred = current_cred();
59837+
59838+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
59839+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
59840+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
59841+ 'u', GR_GLOBAL_UID(real), GR_GLOBAL_UID(effective), GR_GLOBAL_UID(fs), &task->signal->saved_ip);
59842+
59843+ return;
59844+}
59845+
59846+static void
59847+gr_log_learn_gid_change(const kgid_t real, const kgid_t effective, const kgid_t fs)
59848+{
59849+ struct task_struct *task = current;
59850+ const struct cred *cred = current_cred();
59851+
59852+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
59853+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
59854+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
59855+ 'g', GR_GLOBAL_GID(real), GR_GLOBAL_GID(effective), GR_GLOBAL_GID(fs), &task->signal->saved_ip);
59856+
59857+ return;
59858+}
59859+
59860+__u32
59861+gr_search_file(const struct dentry * dentry, const __u32 mode,
59862+ const struct vfsmount * mnt)
59863+{
59864+ __u32 retval = mode;
59865+ struct acl_subject_label *curracl;
59866+ struct acl_object_label *currobj;
59867+
59868+ if (unlikely(!(gr_status & GR_READY)))
59869+ return (mode & ~GR_AUDITS);
59870+
59871+ curracl = current->acl;
59872+
59873+ currobj = chk_obj_label(dentry, mnt, curracl);
59874+ retval = currobj->mode & mode;
59875+
59876+ /* if we're opening a specified transfer file for writing
59877+ (e.g. /dev/initctl), then transfer our role to init
59878+ */
59879+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
59880+ current->role->roletype & GR_ROLE_PERSIST)) {
59881+ struct task_struct *task = init_pid_ns.child_reaper;
59882+
59883+ if (task->role != current->role) {
59884+ task->acl_sp_role = 0;
59885+ task->acl_role_id = current->acl_role_id;
59886+ task->role = current->role;
59887+ rcu_read_lock();
59888+ read_lock(&grsec_exec_file_lock);
59889+ gr_apply_subject_to_task(task);
59890+ read_unlock(&grsec_exec_file_lock);
59891+ rcu_read_unlock();
59892+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
59893+ }
59894+ }
59895+
59896+ if (unlikely
59897+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
59898+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
59899+ __u32 new_mode = mode;
59900+
59901+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
59902+
59903+ retval = new_mode;
59904+
59905+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
59906+ new_mode |= GR_INHERIT;
59907+
59908+ if (!(mode & GR_NOLEARN))
59909+ gr_log_learn(dentry, mnt, new_mode);
59910+ }
59911+
59912+ return retval;
59913+}
59914+
59915+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
59916+ const struct dentry *parent,
59917+ const struct vfsmount *mnt)
59918+{
59919+ struct name_entry *match;
59920+ struct acl_object_label *matchpo;
59921+ struct acl_subject_label *curracl;
59922+ char *path;
59923+
59924+ if (unlikely(!(gr_status & GR_READY)))
59925+ return NULL;
59926+
59927+ preempt_disable();
59928+ path = gr_to_filename_rbac(new_dentry, mnt);
59929+ match = lookup_name_entry_create(path);
59930+
59931+ curracl = current->acl;
59932+
59933+ if (match) {
59934+ read_lock(&gr_inode_lock);
59935+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
59936+ read_unlock(&gr_inode_lock);
59937+
59938+ if (matchpo) {
59939+ preempt_enable();
59940+ return matchpo;
59941+ }
59942+ }
59943+
59944+ // lookup parent
59945+
59946+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
59947+
59948+ preempt_enable();
59949+ return matchpo;
59950+}
59951+
59952+__u32
59953+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
59954+ const struct vfsmount * mnt, const __u32 mode)
59955+{
59956+ struct acl_object_label *matchpo;
59957+ __u32 retval;
59958+
59959+ if (unlikely(!(gr_status & GR_READY)))
59960+ return (mode & ~GR_AUDITS);
59961+
59962+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
59963+
59964+ retval = matchpo->mode & mode;
59965+
59966+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
59967+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
59968+ __u32 new_mode = mode;
59969+
59970+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
59971+
59972+ gr_log_learn(new_dentry, mnt, new_mode);
59973+ return new_mode;
59974+ }
59975+
59976+ return retval;
59977+}
59978+
59979+__u32
59980+gr_check_link(const struct dentry * new_dentry,
59981+ const struct dentry * parent_dentry,
59982+ const struct vfsmount * parent_mnt,
59983+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
59984+{
59985+ struct acl_object_label *obj;
59986+ __u32 oldmode, newmode;
59987+ __u32 needmode;
59988+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
59989+ GR_DELETE | GR_INHERIT;
59990+
59991+ if (unlikely(!(gr_status & GR_READY)))
59992+ return (GR_CREATE | GR_LINK);
59993+
59994+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
59995+ oldmode = obj->mode;
59996+
59997+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
59998+ newmode = obj->mode;
59999+
60000+ needmode = newmode & checkmodes;
60001+
60002+ // old name for hardlink must have at least the permissions of the new name
60003+ if ((oldmode & needmode) != needmode)
60004+ goto bad;
60005+
60006+ // if old name had restrictions/auditing, make sure the new name does as well
60007+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
60008+
60009+ // don't allow hardlinking of suid/sgid/fcapped files without permission
60010+ if (is_privileged_binary(old_dentry))
60011+ needmode |= GR_SETID;
60012+
60013+ if ((newmode & needmode) != needmode)
60014+ goto bad;
60015+
60016+ // enforce minimum permissions
60017+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
60018+ return newmode;
60019+bad:
60020+ needmode = oldmode;
60021+ if (is_privileged_binary(old_dentry))
60022+ needmode |= GR_SETID;
60023+
60024+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
60025+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
60026+ return (GR_CREATE | GR_LINK);
60027+ } else if (newmode & GR_SUPPRESS)
60028+ return GR_SUPPRESS;
60029+ else
60030+ return 0;
60031+}
60032+
60033+int
60034+gr_check_hidden_task(const struct task_struct *task)
60035+{
60036+ if (unlikely(!(gr_status & GR_READY)))
60037+ return 0;
60038+
60039+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
60040+ return 1;
60041+
60042+ return 0;
60043+}
60044+
60045+int
60046+gr_check_protected_task(const struct task_struct *task)
60047+{
60048+ if (unlikely(!(gr_status & GR_READY) || !task))
60049+ return 0;
60050+
60051+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
60052+ task->acl != current->acl)
60053+ return 1;
60054+
60055+ return 0;
60056+}
60057+
60058+int
60059+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
60060+{
60061+ struct task_struct *p;
60062+ int ret = 0;
60063+
60064+ if (unlikely(!(gr_status & GR_READY) || !pid))
60065+ return ret;
60066+
60067+ read_lock(&tasklist_lock);
60068+ do_each_pid_task(pid, type, p) {
60069+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
60070+ p->acl != current->acl) {
60071+ ret = 1;
60072+ goto out;
60073+ }
60074+ } while_each_pid_task(pid, type, p);
60075+out:
60076+ read_unlock(&tasklist_lock);
60077+
60078+ return ret;
60079+}
60080+
60081+void
60082+gr_copy_label(struct task_struct *tsk)
60083+{
60084+ tsk->signal->used_accept = 0;
60085+ tsk->acl_sp_role = 0;
60086+ tsk->acl_role_id = current->acl_role_id;
60087+ tsk->acl = current->acl;
60088+ tsk->role = current->role;
60089+ tsk->signal->curr_ip = current->signal->curr_ip;
60090+ tsk->signal->saved_ip = current->signal->saved_ip;
60091+ if (current->exec_file)
60092+ get_file(current->exec_file);
60093+ tsk->exec_file = current->exec_file;
60094+ tsk->is_writable = current->is_writable;
60095+ if (unlikely(current->signal->used_accept)) {
60096+ current->signal->curr_ip = 0;
60097+ current->signal->saved_ip = 0;
60098+ }
60099+
60100+ return;
60101+}
60102+
60103+static void
60104+gr_set_proc_res(struct task_struct *task)
60105+{
60106+ struct acl_subject_label *proc;
60107+ unsigned short i;
60108+
60109+ proc = task->acl;
60110+
60111+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
60112+ return;
60113+
60114+ for (i = 0; i < RLIM_NLIMITS; i++) {
60115+ if (!(proc->resmask & (1U << i)))
60116+ continue;
60117+
60118+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
60119+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
60120+
60121+ if (i == RLIMIT_CPU)
60122+ update_rlimit_cpu(task, proc->res[i].rlim_cur);
60123+ }
60124+
60125+ return;
60126+}
60127+
60128+extern int __gr_process_user_ban(struct user_struct *user);
60129+
60130+int
60131+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
60132+{
60133+ unsigned int i;
60134+ __u16 num;
60135+ uid_t *uidlist;
60136+ uid_t curuid;
60137+ int realok = 0;
60138+ int effectiveok = 0;
60139+ int fsok = 0;
60140+ uid_t globalreal, globaleffective, globalfs;
60141+
60142+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
60143+ struct user_struct *user;
60144+
60145+ if (!uid_valid(real))
60146+ goto skipit;
60147+
60148+ /* find user based on global namespace */
60149+
60150+ globalreal = GR_GLOBAL_UID(real);
60151+
60152+ user = find_user(make_kuid(&init_user_ns, globalreal));
60153+ if (user == NULL)
60154+ goto skipit;
60155+
60156+ if (__gr_process_user_ban(user)) {
60157+ /* for find_user */
60158+ free_uid(user);
60159+ return 1;
60160+ }
60161+
60162+ /* for find_user */
60163+ free_uid(user);
60164+
60165+skipit:
60166+#endif
60167+
60168+ if (unlikely(!(gr_status & GR_READY)))
60169+ return 0;
60170+
60171+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
60172+ gr_log_learn_uid_change(real, effective, fs);
60173+
60174+ num = current->acl->user_trans_num;
60175+ uidlist = current->acl->user_transitions;
60176+
60177+ if (uidlist == NULL)
60178+ return 0;
60179+
60180+ if (!uid_valid(real)) {
60181+ realok = 1;
60182+ globalreal = (uid_t)-1;
60183+ } else {
60184+ globalreal = GR_GLOBAL_UID(real);
60185+ }
60186+ if (!uid_valid(effective)) {
60187+ effectiveok = 1;
60188+ globaleffective = (uid_t)-1;
60189+ } else {
60190+ globaleffective = GR_GLOBAL_UID(effective);
60191+ }
60192+ if (!uid_valid(fs)) {
60193+ fsok = 1;
60194+ globalfs = (uid_t)-1;
60195+ } else {
60196+ globalfs = GR_GLOBAL_UID(fs);
60197+ }
60198+
60199+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
60200+ for (i = 0; i < num; i++) {
60201+ curuid = uidlist[i];
60202+ if (globalreal == curuid)
60203+ realok = 1;
60204+ if (globaleffective == curuid)
60205+ effectiveok = 1;
60206+ if (globalfs == curuid)
60207+ fsok = 1;
60208+ }
60209+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
60210+ for (i = 0; i < num; i++) {
60211+ curuid = uidlist[i];
60212+ if (globalreal == curuid)
60213+ break;
60214+ if (globaleffective == curuid)
60215+ break;
60216+ if (globalfs == curuid)
60217+ break;
60218+ }
60219+ /* not in deny list */
60220+ if (i == num) {
60221+ realok = 1;
60222+ effectiveok = 1;
60223+ fsok = 1;
60224+ }
60225+ }
60226+
60227+ if (realok && effectiveok && fsok)
60228+ return 0;
60229+ else {
60230+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
60231+ return 1;
60232+ }
60233+}
60234+
60235+int
60236+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
60237+{
60238+ unsigned int i;
60239+ __u16 num;
60240+ gid_t *gidlist;
60241+ gid_t curgid;
60242+ int realok = 0;
60243+ int effectiveok = 0;
60244+ int fsok = 0;
60245+ gid_t globalreal, globaleffective, globalfs;
60246+
60247+ if (unlikely(!(gr_status & GR_READY)))
60248+ return 0;
60249+
60250+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
60251+ gr_log_learn_gid_change(real, effective, fs);
60252+
60253+ num = current->acl->group_trans_num;
60254+ gidlist = current->acl->group_transitions;
60255+
60256+ if (gidlist == NULL)
60257+ return 0;
60258+
60259+ if (!gid_valid(real)) {
60260+ realok = 1;
60261+ globalreal = (gid_t)-1;
60262+ } else {
60263+ globalreal = GR_GLOBAL_GID(real);
60264+ }
60265+ if (!gid_valid(effective)) {
60266+ effectiveok = 1;
60267+ globaleffective = (gid_t)-1;
60268+ } else {
60269+ globaleffective = GR_GLOBAL_GID(effective);
60270+ }
60271+ if (!gid_valid(fs)) {
60272+ fsok = 1;
60273+ globalfs = (gid_t)-1;
60274+ } else {
60275+ globalfs = GR_GLOBAL_GID(fs);
60276+ }
60277+
60278+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
60279+ for (i = 0; i < num; i++) {
60280+ curgid = gidlist[i];
60281+ if (globalreal == curgid)
60282+ realok = 1;
60283+ if (globaleffective == curgid)
60284+ effectiveok = 1;
60285+ if (globalfs == curgid)
60286+ fsok = 1;
60287+ }
60288+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
60289+ for (i = 0; i < num; i++) {
60290+ curgid = gidlist[i];
60291+ if (globalreal == curgid)
60292+ break;
60293+ if (globaleffective == curgid)
60294+ break;
60295+ if (globalfs == curgid)
60296+ break;
60297+ }
60298+ /* not in deny list */
60299+ if (i == num) {
60300+ realok = 1;
60301+ effectiveok = 1;
60302+ fsok = 1;
60303+ }
60304+ }
60305+
60306+ if (realok && effectiveok && fsok)
60307+ return 0;
60308+ else {
60309+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
60310+ return 1;
60311+ }
60312+}
60313+
60314+extern int gr_acl_is_capable(const int cap);
60315+
60316+void
60317+gr_set_role_label(struct task_struct *task, const kuid_t kuid, const kgid_t kgid)
60318+{
60319+ struct acl_role_label *role = task->role;
60320+ struct acl_subject_label *subj = NULL;
60321+ struct acl_object_label *obj;
60322+ struct file *filp;
60323+ uid_t uid;
60324+ gid_t gid;
60325+
60326+ if (unlikely(!(gr_status & GR_READY)))
60327+ return;
60328+
60329+ uid = GR_GLOBAL_UID(kuid);
60330+ gid = GR_GLOBAL_GID(kgid);
60331+
60332+ filp = task->exec_file;
60333+
60334+ /* kernel process, we'll give them the kernel role */
60335+ if (unlikely(!filp)) {
60336+ task->role = kernel_role;
60337+ task->acl = kernel_role->root_label;
60338+ return;
60339+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
60340+ role = lookup_acl_role_label(task, uid, gid);
60341+
60342+ /* don't change the role if we're not a privileged process */
60343+ if (role && task->role != role &&
60344+ (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
60345+ ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
60346+ return;
60347+
60348+ /* perform subject lookup in possibly new role
60349+ we can use this result below in the case where role == task->role
60350+ */
60351+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
60352+
60353+ /* if we changed uid/gid, but result in the same role
60354+ and are using inheritance, don't lose the inherited subject
60355+ if current subject is other than what normal lookup
60356+ would result in, we arrived via inheritance, don't
60357+ lose subject
60358+ */
60359+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
60360+ (subj == task->acl)))
60361+ task->acl = subj;
60362+
60363+ task->role = role;
60364+
60365+ task->is_writable = 0;
60366+
60367+ /* ignore additional mmap checks for processes that are writable
60368+ by the default ACL */
60369+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
60370+ if (unlikely(obj->mode & GR_WRITE))
60371+ task->is_writable = 1;
60372+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
60373+ if (unlikely(obj->mode & GR_WRITE))
60374+ task->is_writable = 1;
60375+
60376+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
60377+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
60378+#endif
60379+
60380+ gr_set_proc_res(task);
60381+
60382+ return;
60383+}
60384+
60385+int
60386+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
60387+ const int unsafe_flags)
60388+{
60389+ struct task_struct *task = current;
60390+ struct acl_subject_label *newacl;
60391+ struct acl_object_label *obj;
60392+ __u32 retmode;
60393+
60394+ if (unlikely(!(gr_status & GR_READY)))
60395+ return 0;
60396+
60397+ newacl = chk_subj_label(dentry, mnt, task->role);
60398+
60399+ /* special handling for if we did an strace -f -p <pid> from an admin role, where pid then
60400+ did an exec
60401+ */
60402+ rcu_read_lock();
60403+ read_lock(&tasklist_lock);
60404+ if (task->ptrace && task->parent && ((task->parent->role->roletype & GR_ROLE_GOD) ||
60405+ (task->parent->acl->mode & GR_POVERRIDE))) {
60406+ read_unlock(&tasklist_lock);
60407+ rcu_read_unlock();
60408+ goto skip_check;
60409+ }
60410+ read_unlock(&tasklist_lock);
60411+ rcu_read_unlock();
60412+
60413+ if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
60414+ !(task->role->roletype & GR_ROLE_GOD) &&
60415+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
60416+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
60417+ if (unsafe_flags & LSM_UNSAFE_SHARE)
60418+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
60419+ else
60420+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
60421+ return -EACCES;
60422+ }
60423+
60424+skip_check:
60425+
60426+ obj = chk_obj_label(dentry, mnt, task->acl);
60427+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
60428+
60429+ if (!(task->acl->mode & GR_INHERITLEARN) &&
60430+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
60431+ if (obj->nested)
60432+ task->acl = obj->nested;
60433+ else
60434+ task->acl = newacl;
60435+ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
60436+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
60437+
60438+ task->is_writable = 0;
60439+
60440+ /* ignore additional mmap checks for processes that are writable
60441+ by the default ACL */
60442+ obj = chk_obj_label(dentry, mnt, default_role->root_label);
60443+ if (unlikely(obj->mode & GR_WRITE))
60444+ task->is_writable = 1;
60445+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
60446+ if (unlikely(obj->mode & GR_WRITE))
60447+ task->is_writable = 1;
60448+
60449+ gr_set_proc_res(task);
60450+
60451+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
60452+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
60453+#endif
60454+ return 0;
60455+}
60456+
60457+/* always called with valid inodev ptr */
60458+static void
60459+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
60460+{
60461+ struct acl_object_label *matchpo;
60462+ struct acl_subject_label *matchps;
60463+ struct acl_subject_label *subj;
60464+ struct acl_role_label *role;
60465+ unsigned int x;
60466+
60467+ FOR_EACH_ROLE_START(role)
60468+ FOR_EACH_SUBJECT_START(role, subj, x)
60469+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
60470+ matchpo->mode |= GR_DELETED;
60471+ FOR_EACH_SUBJECT_END(subj,x)
60472+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
60473+ /* nested subjects aren't in the role's subj_hash table */
60474+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
60475+ matchpo->mode |= GR_DELETED;
60476+ FOR_EACH_NESTED_SUBJECT_END(subj)
60477+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
60478+ matchps->mode |= GR_DELETED;
60479+ FOR_EACH_ROLE_END(role)
60480+
60481+ inodev->nentry->deleted = 1;
60482+
60483+ return;
60484+}
60485+
60486+void
60487+gr_handle_delete(const ino_t ino, const dev_t dev)
60488+{
60489+ struct inodev_entry *inodev;
60490+
60491+ if (unlikely(!(gr_status & GR_READY)))
60492+ return;
60493+
60494+ write_lock(&gr_inode_lock);
60495+ inodev = lookup_inodev_entry(ino, dev);
60496+ if (inodev != NULL)
60497+ do_handle_delete(inodev, ino, dev);
60498+ write_unlock(&gr_inode_lock);
60499+
60500+ return;
60501+}
60502+
60503+static void
60504+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
60505+ const ino_t newinode, const dev_t newdevice,
60506+ struct acl_subject_label *subj)
60507+{
60508+ unsigned int index = gr_fhash(oldinode, olddevice, subj->obj_hash_size);
60509+ struct acl_object_label *match;
60510+
60511+ match = subj->obj_hash[index];
60512+
60513+ while (match && (match->inode != oldinode ||
60514+ match->device != olddevice ||
60515+ !(match->mode & GR_DELETED)))
60516+ match = match->next;
60517+
60518+ if (match && (match->inode == oldinode)
60519+ && (match->device == olddevice)
60520+ && (match->mode & GR_DELETED)) {
60521+ if (match->prev == NULL) {
60522+ subj->obj_hash[index] = match->next;
60523+ if (match->next != NULL)
60524+ match->next->prev = NULL;
60525+ } else {
60526+ match->prev->next = match->next;
60527+ if (match->next != NULL)
60528+ match->next->prev = match->prev;
60529+ }
60530+ match->prev = NULL;
60531+ match->next = NULL;
60532+ match->inode = newinode;
60533+ match->device = newdevice;
60534+ match->mode &= ~GR_DELETED;
60535+
60536+ insert_acl_obj_label(match, subj);
60537+ }
60538+
60539+ return;
60540+}
60541+
60542+static void
60543+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
60544+ const ino_t newinode, const dev_t newdevice,
60545+ struct acl_role_label *role)
60546+{
60547+ unsigned int index = gr_fhash(oldinode, olddevice, role->subj_hash_size);
60548+ struct acl_subject_label *match;
60549+
60550+ match = role->subj_hash[index];
60551+
60552+ while (match && (match->inode != oldinode ||
60553+ match->device != olddevice ||
60554+ !(match->mode & GR_DELETED)))
60555+ match = match->next;
60556+
60557+ if (match && (match->inode == oldinode)
60558+ && (match->device == olddevice)
60559+ && (match->mode & GR_DELETED)) {
60560+ if (match->prev == NULL) {
60561+ role->subj_hash[index] = match->next;
60562+ if (match->next != NULL)
60563+ match->next->prev = NULL;
60564+ } else {
60565+ match->prev->next = match->next;
60566+ if (match->next != NULL)
60567+ match->next->prev = match->prev;
60568+ }
60569+ match->prev = NULL;
60570+ match->next = NULL;
60571+ match->inode = newinode;
60572+ match->device = newdevice;
60573+ match->mode &= ~GR_DELETED;
60574+
60575+ insert_acl_subj_label(match, role);
60576+ }
60577+
60578+ return;
60579+}
60580+
60581+static void
60582+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
60583+ const ino_t newinode, const dev_t newdevice)
60584+{
60585+ unsigned int index = gr_fhash(oldinode, olddevice, inodev_set.i_size);
60586+ struct inodev_entry *match;
60587+
60588+ match = inodev_set.i_hash[index];
60589+
60590+ while (match && (match->nentry->inode != oldinode ||
60591+ match->nentry->device != olddevice || !match->nentry->deleted))
60592+ match = match->next;
60593+
60594+ if (match && (match->nentry->inode == oldinode)
60595+ && (match->nentry->device == olddevice) &&
60596+ match->nentry->deleted) {
60597+ if (match->prev == NULL) {
60598+ inodev_set.i_hash[index] = match->next;
60599+ if (match->next != NULL)
60600+ match->next->prev = NULL;
60601+ } else {
60602+ match->prev->next = match->next;
60603+ if (match->next != NULL)
60604+ match->next->prev = match->prev;
60605+ }
60606+ match->prev = NULL;
60607+ match->next = NULL;
60608+ match->nentry->inode = newinode;
60609+ match->nentry->device = newdevice;
60610+ match->nentry->deleted = 0;
60611+
60612+ insert_inodev_entry(match);
60613+ }
60614+
60615+ return;
60616+}
60617+
60618+static void
60619+__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
60620+{
60621+ struct acl_subject_label *subj;
60622+ struct acl_role_label *role;
60623+ unsigned int x;
60624+
60625+ FOR_EACH_ROLE_START(role)
60626+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
60627+
60628+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
60629+ if ((subj->inode == ino) && (subj->device == dev)) {
60630+ subj->inode = ino;
60631+ subj->device = dev;
60632+ }
60633+ /* nested subjects aren't in the role's subj_hash table */
60634+ update_acl_obj_label(matchn->inode, matchn->device,
60635+ ino, dev, subj);
60636+ FOR_EACH_NESTED_SUBJECT_END(subj)
60637+ FOR_EACH_SUBJECT_START(role, subj, x)
60638+ update_acl_obj_label(matchn->inode, matchn->device,
60639+ ino, dev, subj);
60640+ FOR_EACH_SUBJECT_END(subj,x)
60641+ FOR_EACH_ROLE_END(role)
60642+
60643+ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
60644+
60645+ return;
60646+}
60647+
60648+static void
60649+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
60650+ const struct vfsmount *mnt)
60651+{
60652+ ino_t ino = dentry->d_inode->i_ino;
60653+ dev_t dev = __get_dev(dentry);
60654+
60655+ __do_handle_create(matchn, ino, dev);
60656+
60657+ return;
60658+}
60659+
60660+void
60661+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
60662+{
60663+ struct name_entry *matchn;
60664+
60665+ if (unlikely(!(gr_status & GR_READY)))
60666+ return;
60667+
60668+ preempt_disable();
60669+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
60670+
60671+ if (unlikely((unsigned long)matchn)) {
60672+ write_lock(&gr_inode_lock);
60673+ do_handle_create(matchn, dentry, mnt);
60674+ write_unlock(&gr_inode_lock);
60675+ }
60676+ preempt_enable();
60677+
60678+ return;
60679+}
60680+
60681+void
60682+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
60683+{
60684+ struct name_entry *matchn;
60685+
60686+ if (unlikely(!(gr_status & GR_READY)))
60687+ return;
60688+
60689+ preempt_disable();
60690+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
60691+
60692+ if (unlikely((unsigned long)matchn)) {
60693+ write_lock(&gr_inode_lock);
60694+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
60695+ write_unlock(&gr_inode_lock);
60696+ }
60697+ preempt_enable();
60698+
60699+ return;
60700+}
60701+
60702+void
60703+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
60704+ struct dentry *old_dentry,
60705+ struct dentry *new_dentry,
60706+ struct vfsmount *mnt, const __u8 replace)
60707+{
60708+ struct name_entry *matchn;
60709+ struct inodev_entry *inodev;
60710+ struct inode *inode = new_dentry->d_inode;
60711+ ino_t old_ino = old_dentry->d_inode->i_ino;
60712+ dev_t old_dev = __get_dev(old_dentry);
60713+
60714+ /* vfs_rename swaps the name and parent link for old_dentry and
60715+ new_dentry
60716+ at this point, old_dentry has the new name, parent link, and inode
60717+ for the renamed file
60718+ if a file is being replaced by a rename, new_dentry has the inode
60719+ and name for the replaced file
60720+ */
60721+
60722+ if (unlikely(!(gr_status & GR_READY)))
60723+ return;
60724+
60725+ preempt_disable();
60726+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
60727+
60728+ /* we wouldn't have to check d_inode if it weren't for
60729+ NFS silly-renaming
60730+ */
60731+
60732+ write_lock(&gr_inode_lock);
60733+ if (unlikely(replace && inode)) {
60734+ ino_t new_ino = inode->i_ino;
60735+ dev_t new_dev = __get_dev(new_dentry);
60736+
60737+ inodev = lookup_inodev_entry(new_ino, new_dev);
60738+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
60739+ do_handle_delete(inodev, new_ino, new_dev);
60740+ }
60741+
60742+ inodev = lookup_inodev_entry(old_ino, old_dev);
60743+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
60744+ do_handle_delete(inodev, old_ino, old_dev);
60745+
60746+ if (unlikely((unsigned long)matchn))
60747+ do_handle_create(matchn, old_dentry, mnt);
60748+
60749+ write_unlock(&gr_inode_lock);
60750+ preempt_enable();
60751+
60752+ return;
60753+}
60754+
60755+static int
60756+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
60757+ unsigned char **sum)
60758+{
60759+ struct acl_role_label *r;
60760+ struct role_allowed_ip *ipp;
60761+ struct role_transition *trans;
60762+ unsigned int i;
60763+ int found = 0;
60764+ u32 curr_ip = current->signal->curr_ip;
60765+
60766+ current->signal->saved_ip = curr_ip;
60767+
60768+ /* check transition table */
60769+
60770+ for (trans = current->role->transitions; trans; trans = trans->next) {
60771+ if (!strcmp(rolename, trans->rolename)) {
60772+ found = 1;
60773+ break;
60774+ }
60775+ }
60776+
60777+ if (!found)
60778+ return 0;
60779+
60780+ /* handle special roles that do not require authentication
60781+ and check ip */
60782+
60783+ FOR_EACH_ROLE_START(r)
60784+ if (!strcmp(rolename, r->rolename) &&
60785+ (r->roletype & GR_ROLE_SPECIAL)) {
60786+ found = 0;
60787+ if (r->allowed_ips != NULL) {
60788+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
60789+ if ((ntohl(curr_ip) & ipp->netmask) ==
60790+ (ntohl(ipp->addr) & ipp->netmask))
60791+ found = 1;
60792+ }
60793+ } else
60794+ found = 2;
60795+ if (!found)
60796+ return 0;
60797+
60798+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
60799+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
60800+ *salt = NULL;
60801+ *sum = NULL;
60802+ return 1;
60803+ }
60804+ }
60805+ FOR_EACH_ROLE_END(r)
60806+
60807+ for (i = 0; i < num_sprole_pws; i++) {
60808+ if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
60809+ *salt = acl_special_roles[i]->salt;
60810+ *sum = acl_special_roles[i]->sum;
60811+ return 1;
60812+ }
60813+ }
60814+
60815+ return 0;
60816+}
60817+
60818+static void
60819+assign_special_role(char *rolename)
60820+{
60821+ struct acl_object_label *obj;
60822+ struct acl_role_label *r;
60823+ struct acl_role_label *assigned = NULL;
60824+ struct task_struct *tsk;
60825+ struct file *filp;
60826+
60827+ FOR_EACH_ROLE_START(r)
60828+ if (!strcmp(rolename, r->rolename) &&
60829+ (r->roletype & GR_ROLE_SPECIAL)) {
60830+ assigned = r;
60831+ break;
60832+ }
60833+ FOR_EACH_ROLE_END(r)
60834+
60835+ if (!assigned)
60836+ return;
60837+
60838+ read_lock(&tasklist_lock);
60839+ read_lock(&grsec_exec_file_lock);
60840+
60841+ tsk = current->real_parent;
60842+ if (tsk == NULL)
60843+ goto out_unlock;
60844+
60845+ filp = tsk->exec_file;
60846+ if (filp == NULL)
60847+ goto out_unlock;
60848+
60849+ tsk->is_writable = 0;
60850+
60851+ tsk->acl_sp_role = 1;
60852+ tsk->acl_role_id = ++acl_sp_role_value;
60853+ tsk->role = assigned;
60854+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
60855+
60856+ /* ignore additional mmap checks for processes that are writable
60857+ by the default ACL */
60858+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
60859+ if (unlikely(obj->mode & GR_WRITE))
60860+ tsk->is_writable = 1;
60861+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
60862+ if (unlikely(obj->mode & GR_WRITE))
60863+ tsk->is_writable = 1;
60864+
60865+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
60866+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, task_pid_nr(tsk));
60867+#endif
60868+
60869+out_unlock:
60870+ read_unlock(&grsec_exec_file_lock);
60871+ read_unlock(&tasklist_lock);
60872+ return;
60873+}
60874+
60875+int gr_check_secure_terminal(struct task_struct *task)
60876+{
60877+ struct task_struct *p, *p2, *p3;
60878+ struct files_struct *files;
60879+ struct fdtable *fdt;
60880+ struct file *our_file = NULL, *file;
60881+ int i;
60882+
60883+ if (task->signal->tty == NULL)
60884+ return 1;
60885+
60886+ files = get_files_struct(task);
60887+ if (files != NULL) {
60888+ rcu_read_lock();
60889+ fdt = files_fdtable(files);
60890+ for (i=0; i < fdt->max_fds; i++) {
60891+ file = fcheck_files(files, i);
60892+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
60893+ get_file(file);
60894+ our_file = file;
60895+ }
60896+ }
60897+ rcu_read_unlock();
60898+ put_files_struct(files);
60899+ }
60900+
60901+ if (our_file == NULL)
60902+ return 1;
60903+
60904+ read_lock(&tasklist_lock);
60905+ do_each_thread(p2, p) {
60906+ files = get_files_struct(p);
60907+ if (files == NULL ||
60908+ (p->signal && p->signal->tty == task->signal->tty)) {
60909+ if (files != NULL)
60910+ put_files_struct(files);
60911+ continue;
60912+ }
60913+ rcu_read_lock();
60914+ fdt = files_fdtable(files);
60915+ for (i=0; i < fdt->max_fds; i++) {
60916+ file = fcheck_files(files, i);
60917+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
60918+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
60919+ p3 = task;
60920+ while (task_pid_nr(p3) > 0) {
60921+ if (p3 == p)
60922+ break;
60923+ p3 = p3->real_parent;
60924+ }
60925+ if (p3 == p)
60926+ break;
60927+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
60928+ gr_handle_alertkill(p);
60929+ rcu_read_unlock();
60930+ put_files_struct(files);
60931+ read_unlock(&tasklist_lock);
60932+ fput(our_file);
60933+ return 0;
60934+ }
60935+ }
60936+ rcu_read_unlock();
60937+ put_files_struct(files);
60938+ } while_each_thread(p2, p);
60939+ read_unlock(&tasklist_lock);
60940+
60941+ fput(our_file);
60942+ return 1;
60943+}
60944+
60945+static int gr_rbac_disable(void *unused)
60946+{
60947+ pax_open_kernel();
60948+ gr_status &= ~GR_READY;
60949+ pax_close_kernel();
60950+
60951+ return 0;
60952+}
60953+
60954+ssize_t
60955+write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
60956+{
60957+ struct gr_arg_wrapper uwrap;
60958+ unsigned char *sprole_salt = NULL;
60959+ unsigned char *sprole_sum = NULL;
60960+ int error = sizeof (struct gr_arg_wrapper);
60961+ int error2 = 0;
60962+
60963+ mutex_lock(&gr_dev_mutex);
60964+
60965+ if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
60966+ error = -EPERM;
60967+ goto out;
60968+ }
60969+
60970+ if (count != sizeof (struct gr_arg_wrapper)) {
60971+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
60972+ error = -EINVAL;
60973+ goto out;
60974+ }
60975+
60976+
60977+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
60978+ gr_auth_expires = 0;
60979+ gr_auth_attempts = 0;
60980+ }
60981+
60982+ if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
60983+ error = -EFAULT;
60984+ goto out;
60985+ }
60986+
60987+ if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
60988+ error = -EINVAL;
60989+ goto out;
60990+ }
60991+
60992+ if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
60993+ error = -EFAULT;
60994+ goto out;
60995+ }
60996+
60997+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
60998+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
60999+ time_after(gr_auth_expires, get_seconds())) {
61000+ error = -EBUSY;
61001+ goto out;
61002+ }
61003+
61004+ /* if non-root trying to do anything other than use a special role,
61005+ do not attempt authentication, do not count towards authentication
61006+ locking
61007+ */
61008+
61009+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
61010+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
61011+ gr_is_global_nonroot(current_uid())) {
61012+ error = -EPERM;
61013+ goto out;
61014+ }
61015+
61016+ /* ensure pw and special role name are null terminated */
61017+
61018+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
61019+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
61020+
61021+ /* Okay.
61022+ * We have our enough of the argument structure..(we have yet
61023+ * to copy_from_user the tables themselves) . Copy the tables
61024+ * only if we need them, i.e. for loading operations. */
61025+
61026+ switch (gr_usermode->mode) {
61027+ case GR_STATUS:
61028+ if (gr_status & GR_READY) {
61029+ error = 1;
61030+ if (!gr_check_secure_terminal(current))
61031+ error = 3;
61032+ } else
61033+ error = 2;
61034+ goto out;
61035+ case GR_SHUTDOWN:
61036+ if ((gr_status & GR_READY)
61037+ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
61038+ stop_machine(gr_rbac_disable, NULL, NULL);
61039+ free_variables();
61040+ memset(gr_usermode, 0, sizeof (struct gr_arg));
61041+ memset(gr_system_salt, 0, GR_SALT_LEN);
61042+ memset(gr_system_sum, 0, GR_SHA_LEN);
61043+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
61044+ } else if (gr_status & GR_READY) {
61045+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
61046+ error = -EPERM;
61047+ } else {
61048+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
61049+ error = -EAGAIN;
61050+ }
61051+ break;
61052+ case GR_ENABLE:
61053+ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
61054+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
61055+ else {
61056+ if (gr_status & GR_READY)
61057+ error = -EAGAIN;
61058+ else
61059+ error = error2;
61060+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
61061+ }
61062+ break;
61063+ case GR_RELOAD:
61064+ if (!(gr_status & GR_READY)) {
61065+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
61066+ error = -EAGAIN;
61067+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
61068+ stop_machine(gr_rbac_disable, NULL, NULL);
61069+ free_variables();
61070+ error2 = gracl_init(gr_usermode);
61071+ if (!error2)
61072+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
61073+ else {
61074+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
61075+ error = error2;
61076+ }
61077+ } else {
61078+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
61079+ error = -EPERM;
61080+ }
61081+ break;
61082+ case GR_SEGVMOD:
61083+ if (unlikely(!(gr_status & GR_READY))) {
61084+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
61085+ error = -EAGAIN;
61086+ break;
61087+ }
61088+
61089+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
61090+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
61091+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
61092+ struct acl_subject_label *segvacl;
61093+ segvacl =
61094+ lookup_acl_subj_label(gr_usermode->segv_inode,
61095+ gr_usermode->segv_device,
61096+ current->role);
61097+ if (segvacl) {
61098+ segvacl->crashes = 0;
61099+ segvacl->expires = 0;
61100+ }
61101+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
61102+ gr_remove_uid(gr_usermode->segv_uid);
61103+ }
61104+ } else {
61105+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
61106+ error = -EPERM;
61107+ }
61108+ break;
61109+ case GR_SPROLE:
61110+ case GR_SPROLEPAM:
61111+ if (unlikely(!(gr_status & GR_READY))) {
61112+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
61113+ error = -EAGAIN;
61114+ break;
61115+ }
61116+
61117+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
61118+ current->role->expires = 0;
61119+ current->role->auth_attempts = 0;
61120+ }
61121+
61122+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
61123+ time_after(current->role->expires, get_seconds())) {
61124+ error = -EBUSY;
61125+ goto out;
61126+ }
61127+
61128+ if (lookup_special_role_auth
61129+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
61130+ && ((!sprole_salt && !sprole_sum)
61131+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
61132+ char *p = "";
61133+ assign_special_role(gr_usermode->sp_role);
61134+ read_lock(&tasklist_lock);
61135+ if (current->real_parent)
61136+ p = current->real_parent->role->rolename;
61137+ read_unlock(&tasklist_lock);
61138+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
61139+ p, acl_sp_role_value);
61140+ } else {
61141+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
61142+ error = -EPERM;
61143+ if(!(current->role->auth_attempts++))
61144+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
61145+
61146+ goto out;
61147+ }
61148+ break;
61149+ case GR_UNSPROLE:
61150+ if (unlikely(!(gr_status & GR_READY))) {
61151+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
61152+ error = -EAGAIN;
61153+ break;
61154+ }
61155+
61156+ if (current->role->roletype & GR_ROLE_SPECIAL) {
61157+ char *p = "";
61158+ int i = 0;
61159+
61160+ read_lock(&tasklist_lock);
61161+ if (current->real_parent) {
61162+ p = current->real_parent->role->rolename;
61163+ i = current->real_parent->acl_role_id;
61164+ }
61165+ read_unlock(&tasklist_lock);
61166+
61167+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
61168+ gr_set_acls(1);
61169+ } else {
61170+ error = -EPERM;
61171+ goto out;
61172+ }
61173+ break;
61174+ default:
61175+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
61176+ error = -EINVAL;
61177+ break;
61178+ }
61179+
61180+ if (error != -EPERM)
61181+ goto out;
61182+
61183+ if(!(gr_auth_attempts++))
61184+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
61185+
61186+ out:
61187+ mutex_unlock(&gr_dev_mutex);
61188+ return error;
61189+}
61190+
61191+/* must be called with
61192+ rcu_read_lock();
61193+ read_lock(&tasklist_lock);
61194+ read_lock(&grsec_exec_file_lock);
61195+*/
61196+int gr_apply_subject_to_task(struct task_struct *task)
61197+{
61198+ struct acl_object_label *obj;
61199+ char *tmpname;
61200+ struct acl_subject_label *tmpsubj;
61201+ struct file *filp;
61202+ struct name_entry *nmatch;
61203+
61204+ filp = task->exec_file;
61205+ if (filp == NULL)
61206+ return 0;
61207+
61208+ /* the following is to apply the correct subject
61209+ on binaries running when the RBAC system
61210+ is enabled, when the binaries have been
61211+ replaced or deleted since their execution
61212+ -----
61213+ when the RBAC system starts, the inode/dev
61214+ from exec_file will be one the RBAC system
61215+ is unaware of. It only knows the inode/dev
61216+ of the present file on disk, or the absence
61217+ of it.
61218+ */
61219+ preempt_disable();
61220+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
61221+
61222+ nmatch = lookup_name_entry(tmpname);
61223+ preempt_enable();
61224+ tmpsubj = NULL;
61225+ if (nmatch) {
61226+ if (nmatch->deleted)
61227+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
61228+ else
61229+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
61230+ if (tmpsubj != NULL)
61231+ task->acl = tmpsubj;
61232+ }
61233+ if (tmpsubj == NULL)
61234+ task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
61235+ task->role);
61236+ if (task->acl) {
61237+ task->is_writable = 0;
61238+ /* ignore additional mmap checks for processes that are writable
61239+ by the default ACL */
61240+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
61241+ if (unlikely(obj->mode & GR_WRITE))
61242+ task->is_writable = 1;
61243+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
61244+ if (unlikely(obj->mode & GR_WRITE))
61245+ task->is_writable = 1;
61246+
61247+ gr_set_proc_res(task);
61248+
61249+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
61250+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
61251+#endif
61252+ } else {
61253+ return 1;
61254+ }
61255+
61256+ return 0;
61257+}
61258+
61259+int
61260+gr_set_acls(const int type)
61261+{
61262+ struct task_struct *task, *task2;
61263+ struct acl_role_label *role = current->role;
61264+ __u16 acl_role_id = current->acl_role_id;
61265+ const struct cred *cred;
61266+ int ret;
61267+
61268+ rcu_read_lock();
61269+ read_lock(&tasklist_lock);
61270+ read_lock(&grsec_exec_file_lock);
61271+ do_each_thread(task2, task) {
61272+ /* check to see if we're called from the exit handler,
61273+ if so, only replace ACLs that have inherited the admin
61274+ ACL */
61275+
61276+ if (type && (task->role != role ||
61277+ task->acl_role_id != acl_role_id))
61278+ continue;
61279+
61280+ task->acl_role_id = 0;
61281+ task->acl_sp_role = 0;
61282+
61283+ if (task->exec_file) {
61284+ cred = __task_cred(task);
61285+ task->role = lookup_acl_role_label(task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
61286+ ret = gr_apply_subject_to_task(task);
61287+ if (ret) {
61288+ read_unlock(&grsec_exec_file_lock);
61289+ read_unlock(&tasklist_lock);
61290+ rcu_read_unlock();
61291+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task_pid_nr(task));
61292+ return ret;
61293+ }
61294+ } else {
61295+ // it's a kernel process
61296+ task->role = kernel_role;
61297+ task->acl = kernel_role->root_label;
61298+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
61299+ task->acl->mode &= ~GR_PROCFIND;
61300+#endif
61301+ }
61302+ } while_each_thread(task2, task);
61303+ read_unlock(&grsec_exec_file_lock);
61304+ read_unlock(&tasklist_lock);
61305+ rcu_read_unlock();
61306+
61307+ return 0;
61308+}
61309+
61310+#if defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC)
61311+static const unsigned long res_learn_bumps[GR_NLIMITS] = {
61312+ [RLIMIT_CPU] = GR_RLIM_CPU_BUMP,
61313+ [RLIMIT_FSIZE] = GR_RLIM_FSIZE_BUMP,
61314+ [RLIMIT_DATA] = GR_RLIM_DATA_BUMP,
61315+ [RLIMIT_STACK] = GR_RLIM_STACK_BUMP,
61316+ [RLIMIT_CORE] = GR_RLIM_CORE_BUMP,
61317+ [RLIMIT_RSS] = GR_RLIM_RSS_BUMP,
61318+ [RLIMIT_NPROC] = GR_RLIM_NPROC_BUMP,
61319+ [RLIMIT_NOFILE] = GR_RLIM_NOFILE_BUMP,
61320+ [RLIMIT_MEMLOCK] = GR_RLIM_MEMLOCK_BUMP,
61321+ [RLIMIT_AS] = GR_RLIM_AS_BUMP,
61322+ [RLIMIT_LOCKS] = GR_RLIM_LOCKS_BUMP,
61323+ [RLIMIT_SIGPENDING] = GR_RLIM_SIGPENDING_BUMP,
61324+ [RLIMIT_MSGQUEUE] = GR_RLIM_MSGQUEUE_BUMP,
61325+ [RLIMIT_NICE] = GR_RLIM_NICE_BUMP,
61326+ [RLIMIT_RTPRIO] = GR_RLIM_RTPRIO_BUMP,
61327+ [RLIMIT_RTTIME] = GR_RLIM_RTTIME_BUMP
61328+};
61329+
61330+void
61331+gr_learn_resource(const struct task_struct *task,
61332+ const int res, const unsigned long wanted, const int gt)
61333+{
61334+ struct acl_subject_label *acl;
61335+ const struct cred *cred;
61336+
61337+ if (unlikely((gr_status & GR_READY) &&
61338+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
61339+ goto skip_reslog;
61340+
61341+ gr_log_resource(task, res, wanted, gt);
61342+skip_reslog:
61343+
61344+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
61345+ return;
61346+
61347+ acl = task->acl;
61348+
61349+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
61350+ !(acl->resmask & (1U << (unsigned short) res))))
61351+ return;
61352+
61353+ if (wanted >= acl->res[res].rlim_cur) {
61354+ unsigned long res_add;
61355+
61356+ res_add = wanted + res_learn_bumps[res];
61357+
61358+ acl->res[res].rlim_cur = res_add;
61359+
61360+ if (wanted > acl->res[res].rlim_max)
61361+ acl->res[res].rlim_max = res_add;
61362+
61363+ /* only log the subject filename, since resource logging is supported for
61364+ single-subject learning only */
61365+ rcu_read_lock();
61366+ cred = __task_cred(task);
61367+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
61368+ task->role->roletype, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), acl->filename,
61369+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
61370+ "", (unsigned long) res, &task->signal->saved_ip);
61371+ rcu_read_unlock();
61372+ }
61373+
61374+ return;
61375+}
61376+EXPORT_SYMBOL(gr_learn_resource);
61377+#endif
61378+
61379+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
61380+void
61381+pax_set_initial_flags(struct linux_binprm *bprm)
61382+{
61383+ struct task_struct *task = current;
61384+ struct acl_subject_label *proc;
61385+ unsigned long flags;
61386+
61387+ if (unlikely(!(gr_status & GR_READY)))
61388+ return;
61389+
61390+ flags = pax_get_flags(task);
61391+
61392+ proc = task->acl;
61393+
61394+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
61395+ flags &= ~MF_PAX_PAGEEXEC;
61396+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
61397+ flags &= ~MF_PAX_SEGMEXEC;
61398+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
61399+ flags &= ~MF_PAX_RANDMMAP;
61400+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
61401+ flags &= ~MF_PAX_EMUTRAMP;
61402+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
61403+ flags &= ~MF_PAX_MPROTECT;
61404+
61405+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
61406+ flags |= MF_PAX_PAGEEXEC;
61407+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
61408+ flags |= MF_PAX_SEGMEXEC;
61409+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
61410+ flags |= MF_PAX_RANDMMAP;
61411+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
61412+ flags |= MF_PAX_EMUTRAMP;
61413+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
61414+ flags |= MF_PAX_MPROTECT;
61415+
61416+ pax_set_flags(task, flags);
61417+
61418+ return;
61419+}
61420+#endif
61421+
61422+int
61423+gr_handle_proc_ptrace(struct task_struct *task)
61424+{
61425+ struct file *filp;
61426+ struct task_struct *tmp = task;
61427+ struct task_struct *curtemp = current;
61428+ __u32 retmode;
61429+
61430+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
61431+ if (unlikely(!(gr_status & GR_READY)))
61432+ return 0;
61433+#endif
61434+
61435+ read_lock(&tasklist_lock);
61436+ read_lock(&grsec_exec_file_lock);
61437+ filp = task->exec_file;
61438+
61439+ while (task_pid_nr(tmp) > 0) {
61440+ if (tmp == curtemp)
61441+ break;
61442+ tmp = tmp->real_parent;
61443+ }
61444+
61445+ if (!filp || (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
61446+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
61447+ read_unlock(&grsec_exec_file_lock);
61448+ read_unlock(&tasklist_lock);
61449+ return 1;
61450+ }
61451+
61452+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
61453+ if (!(gr_status & GR_READY)) {
61454+ read_unlock(&grsec_exec_file_lock);
61455+ read_unlock(&tasklist_lock);
61456+ return 0;
61457+ }
61458+#endif
61459+
61460+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
61461+ read_unlock(&grsec_exec_file_lock);
61462+ read_unlock(&tasklist_lock);
61463+
61464+ if (retmode & GR_NOPTRACE)
61465+ return 1;
61466+
61467+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
61468+ && (current->acl != task->acl || (current->acl != current->role->root_label
61469+ && task_pid_nr(current) != task_pid_nr(task))))
61470+ return 1;
61471+
61472+ return 0;
61473+}
61474+
61475+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
61476+{
61477+ if (unlikely(!(gr_status & GR_READY)))
61478+ return;
61479+
61480+ if (!(current->role->roletype & GR_ROLE_GOD))
61481+ return;
61482+
61483+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
61484+ p->role->rolename, gr_task_roletype_to_char(p),
61485+ p->acl->filename);
61486+}
61487+
61488+int
61489+gr_handle_ptrace(struct task_struct *task, const long request)
61490+{
61491+ struct task_struct *tmp = task;
61492+ struct task_struct *curtemp = current;
61493+ __u32 retmode;
61494+
61495+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
61496+ if (unlikely(!(gr_status & GR_READY)))
61497+ return 0;
61498+#endif
61499+ if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
61500+ read_lock(&tasklist_lock);
61501+ while (task_pid_nr(tmp) > 0) {
61502+ if (tmp == curtemp)
61503+ break;
61504+ tmp = tmp->real_parent;
61505+ }
61506+
61507+ if (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
61508+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
61509+ read_unlock(&tasklist_lock);
61510+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
61511+ return 1;
61512+ }
61513+ read_unlock(&tasklist_lock);
61514+ }
61515+
61516+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
61517+ if (!(gr_status & GR_READY))
61518+ return 0;
61519+#endif
61520+
61521+ read_lock(&grsec_exec_file_lock);
61522+ if (unlikely(!task->exec_file)) {
61523+ read_unlock(&grsec_exec_file_lock);
61524+ return 0;
61525+ }
61526+
61527+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
61528+ read_unlock(&grsec_exec_file_lock);
61529+
61530+ if (retmode & GR_NOPTRACE) {
61531+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
61532+ return 1;
61533+ }
61534+
61535+ if (retmode & GR_PTRACERD) {
61536+ switch (request) {
61537+ case PTRACE_SEIZE:
61538+ case PTRACE_POKETEXT:
61539+ case PTRACE_POKEDATA:
61540+ case PTRACE_POKEUSR:
61541+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
61542+ case PTRACE_SETREGS:
61543+ case PTRACE_SETFPREGS:
61544+#endif
61545+#ifdef CONFIG_X86
61546+ case PTRACE_SETFPXREGS:
61547+#endif
61548+#ifdef CONFIG_ALTIVEC
61549+ case PTRACE_SETVRREGS:
61550+#endif
61551+ return 1;
61552+ default:
61553+ return 0;
61554+ }
61555+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
61556+ !(current->role->roletype & GR_ROLE_GOD) &&
61557+ (current->acl != task->acl)) {
61558+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
61559+ return 1;
61560+ }
61561+
61562+ return 0;
61563+}
61564+
61565+static int is_writable_mmap(const struct file *filp)
61566+{
61567+ struct task_struct *task = current;
61568+ struct acl_object_label *obj, *obj2;
61569+
61570+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
61571+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
61572+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
61573+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
61574+ task->role->root_label);
61575+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
61576+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
61577+ return 1;
61578+ }
61579+ }
61580+ return 0;
61581+}
61582+
61583+int
61584+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
61585+{
61586+ __u32 mode;
61587+
61588+ if (unlikely(!file || !(prot & PROT_EXEC)))
61589+ return 1;
61590+
61591+ if (is_writable_mmap(file))
61592+ return 0;
61593+
61594+ mode =
61595+ gr_search_file(file->f_path.dentry,
61596+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
61597+ file->f_path.mnt);
61598+
61599+ if (!gr_tpe_allow(file))
61600+ return 0;
61601+
61602+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
61603+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
61604+ return 0;
61605+ } else if (unlikely(!(mode & GR_EXEC))) {
61606+ return 0;
61607+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
61608+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
61609+ return 1;
61610+ }
61611+
61612+ return 1;
61613+}
61614+
61615+int
61616+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
61617+{
61618+ __u32 mode;
61619+
61620+ if (unlikely(!file || !(prot & PROT_EXEC)))
61621+ return 1;
61622+
61623+ if (is_writable_mmap(file))
61624+ return 0;
61625+
61626+ mode =
61627+ gr_search_file(file->f_path.dentry,
61628+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
61629+ file->f_path.mnt);
61630+
61631+ if (!gr_tpe_allow(file))
61632+ return 0;
61633+
61634+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
61635+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
61636+ return 0;
61637+ } else if (unlikely(!(mode & GR_EXEC))) {
61638+ return 0;
61639+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
61640+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
61641+ return 1;
61642+ }
61643+
61644+ return 1;
61645+}
61646+
61647+void
61648+gr_acl_handle_psacct(struct task_struct *task, const long code)
61649+{
61650+ unsigned long runtime;
61651+ unsigned long cputime;
61652+ unsigned int wday, cday;
61653+ __u8 whr, chr;
61654+ __u8 wmin, cmin;
61655+ __u8 wsec, csec;
61656+ struct timespec timeval;
61657+
61658+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
61659+ !(task->acl->mode & GR_PROCACCT)))
61660+ return;
61661+
61662+ do_posix_clock_monotonic_gettime(&timeval);
61663+ runtime = timeval.tv_sec - task->start_time.tv_sec;
61664+ wday = runtime / (3600 * 24);
61665+ runtime -= wday * (3600 * 24);
61666+ whr = runtime / 3600;
61667+ runtime -= whr * 3600;
61668+ wmin = runtime / 60;
61669+ runtime -= wmin * 60;
61670+ wsec = runtime;
61671+
61672+ cputime = (task->utime + task->stime) / HZ;
61673+ cday = cputime / (3600 * 24);
61674+ cputime -= cday * (3600 * 24);
61675+ chr = cputime / 3600;
61676+ cputime -= chr * 3600;
61677+ cmin = cputime / 60;
61678+ cputime -= cmin * 60;
61679+ csec = cputime;
61680+
61681+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
61682+
61683+ return;
61684+}
61685+
61686+void gr_set_kernel_label(struct task_struct *task)
61687+{
61688+ if (gr_status & GR_READY) {
61689+ task->role = kernel_role;
61690+ task->acl = kernel_role->root_label;
61691+ }
61692+ return;
61693+}
61694+
61695+#ifdef CONFIG_TASKSTATS
61696+int gr_is_taskstats_denied(int pid)
61697+{
61698+ struct task_struct *task;
61699+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
61700+ const struct cred *cred;
61701+#endif
61702+ int ret = 0;
61703+
61704+ /* restrict taskstats viewing to un-chrooted root users
61705+ who have the 'view' subject flag if the RBAC system is enabled
61706+ */
61707+
61708+ rcu_read_lock();
61709+ read_lock(&tasklist_lock);
61710+ task = find_task_by_vpid(pid);
61711+ if (task) {
61712+#ifdef CONFIG_GRKERNSEC_CHROOT
61713+ if (proc_is_chrooted(task))
61714+ ret = -EACCES;
61715+#endif
61716+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
61717+ cred = __task_cred(task);
61718+#ifdef CONFIG_GRKERNSEC_PROC_USER
61719+ if (gr_is_global_nonroot(cred->uid))
61720+ ret = -EACCES;
61721+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
61722+ if (gr_is_global_nonroot(cred->uid) && !groups_search(cred->group_info, grsec_proc_gid))
61723+ ret = -EACCES;
61724+#endif
61725+#endif
61726+ if (gr_status & GR_READY) {
61727+ if (!(task->acl->mode & GR_VIEW))
61728+ ret = -EACCES;
61729+ }
61730+ } else
61731+ ret = -ENOENT;
61732+
61733+ read_unlock(&tasklist_lock);
61734+ rcu_read_unlock();
61735+
61736+ return ret;
61737+}
61738+#endif
61739+
61740+/* AUXV entries are filled via a descendant of search_binary_handler
61741+ after we've already applied the subject for the target
61742+*/
61743+int gr_acl_enable_at_secure(void)
61744+{
61745+ if (unlikely(!(gr_status & GR_READY)))
61746+ return 0;
61747+
61748+ if (current->acl->mode & GR_ATSECURE)
61749+ return 1;
61750+
61751+ return 0;
61752+}
61753+
61754+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
61755+{
61756+ struct task_struct *task = current;
61757+ struct dentry *dentry = file->f_path.dentry;
61758+ struct vfsmount *mnt = file->f_path.mnt;
61759+ struct acl_object_label *obj, *tmp;
61760+ struct acl_subject_label *subj;
61761+ unsigned int bufsize;
61762+ int is_not_root;
61763+ char *path;
61764+ dev_t dev = __get_dev(dentry);
61765+
61766+ if (unlikely(!(gr_status & GR_READY)))
61767+ return 1;
61768+
61769+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
61770+ return 1;
61771+
61772+ /* ignore Eric Biederman */
61773+ if (IS_PRIVATE(dentry->d_inode))
61774+ return 1;
61775+
61776+ subj = task->acl;
61777+ read_lock(&gr_inode_lock);
61778+ do {
61779+ obj = lookup_acl_obj_label(ino, dev, subj);
61780+ if (obj != NULL) {
61781+ read_unlock(&gr_inode_lock);
61782+ return (obj->mode & GR_FIND) ? 1 : 0;
61783+ }
61784+ } while ((subj = subj->parent_subject));
61785+ read_unlock(&gr_inode_lock);
61786+
61787+ /* this is purely an optimization since we're looking for an object
61788+ for the directory we're doing a readdir on
61789+ if it's possible for any globbed object to match the entry we're
61790+ filling into the directory, then the object we find here will be
61791+ an anchor point with attached globbed objects
61792+ */
61793+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
61794+ if (obj->globbed == NULL)
61795+ return (obj->mode & GR_FIND) ? 1 : 0;
61796+
61797+ is_not_root = ((obj->filename[0] == '/') &&
61798+ (obj->filename[1] == '\0')) ? 0 : 1;
61799+ bufsize = PAGE_SIZE - namelen - is_not_root;
61800+
61801+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
61802+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
61803+ return 1;
61804+
61805+ preempt_disable();
61806+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
61807+ bufsize);
61808+
61809+ bufsize = strlen(path);
61810+
61811+ /* if base is "/", don't append an additional slash */
61812+ if (is_not_root)
61813+ *(path + bufsize) = '/';
61814+ memcpy(path + bufsize + is_not_root, name, namelen);
61815+ *(path + bufsize + namelen + is_not_root) = '\0';
61816+
61817+ tmp = obj->globbed;
61818+ while (tmp) {
61819+ if (!glob_match(tmp->filename, path)) {
61820+ preempt_enable();
61821+ return (tmp->mode & GR_FIND) ? 1 : 0;
61822+ }
61823+ tmp = tmp->next;
61824+ }
61825+ preempt_enable();
61826+ return (obj->mode & GR_FIND) ? 1 : 0;
61827+}
61828+
61829+void gr_put_exec_file(struct task_struct *task)
61830+{
61831+ struct file *filp;
61832+
61833+ write_lock(&grsec_exec_file_lock);
61834+ filp = task->exec_file;
61835+ task->exec_file = NULL;
61836+ write_unlock(&grsec_exec_file_lock);
61837+
61838+ if (filp)
61839+ fput(filp);
61840+
61841+ return;
61842+}
61843+
61844+
61845+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
61846+EXPORT_SYMBOL(gr_acl_is_enabled);
61847+#endif
61848+EXPORT_SYMBOL(gr_set_kernel_label);
61849+#ifdef CONFIG_SECURITY
61850+EXPORT_SYMBOL(gr_check_user_change);
61851+EXPORT_SYMBOL(gr_check_group_change);
61852+#endif
61853+
61854diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
61855new file mode 100644
61856index 0000000..34fefda
61857--- /dev/null
61858+++ b/grsecurity/gracl_alloc.c
61859@@ -0,0 +1,105 @@
61860+#include <linux/kernel.h>
61861+#include <linux/mm.h>
61862+#include <linux/slab.h>
61863+#include <linux/vmalloc.h>
61864+#include <linux/gracl.h>
61865+#include <linux/grsecurity.h>
61866+
61867+static unsigned long alloc_stack_next = 1;
61868+static unsigned long alloc_stack_size = 1;
61869+static void **alloc_stack;
61870+
61871+static __inline__ int
61872+alloc_pop(void)
61873+{
61874+ if (alloc_stack_next == 1)
61875+ return 0;
61876+
61877+ kfree(alloc_stack[alloc_stack_next - 2]);
61878+
61879+ alloc_stack_next--;
61880+
61881+ return 1;
61882+}
61883+
61884+static __inline__ int
61885+alloc_push(void *buf)
61886+{
61887+ if (alloc_stack_next >= alloc_stack_size)
61888+ return 1;
61889+
61890+ alloc_stack[alloc_stack_next - 1] = buf;
61891+
61892+ alloc_stack_next++;
61893+
61894+ return 0;
61895+}
61896+
61897+void *
61898+acl_alloc(unsigned long len)
61899+{
61900+ void *ret = NULL;
61901+
61902+ if (!len || len > PAGE_SIZE)
61903+ goto out;
61904+
61905+ ret = kmalloc(len, GFP_KERNEL);
61906+
61907+ if (ret) {
61908+ if (alloc_push(ret)) {
61909+ kfree(ret);
61910+ ret = NULL;
61911+ }
61912+ }
61913+
61914+out:
61915+ return ret;
61916+}
61917+
61918+void *
61919+acl_alloc_num(unsigned long num, unsigned long len)
61920+{
61921+ if (!len || (num > (PAGE_SIZE / len)))
61922+ return NULL;
61923+
61924+ return acl_alloc(num * len);
61925+}
61926+
61927+void
61928+acl_free_all(void)
61929+{
61930+ if (gr_acl_is_enabled() || !alloc_stack)
61931+ return;
61932+
61933+ while (alloc_pop()) ;
61934+
61935+ if (alloc_stack) {
61936+ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
61937+ kfree(alloc_stack);
61938+ else
61939+ vfree(alloc_stack);
61940+ }
61941+
61942+ alloc_stack = NULL;
61943+ alloc_stack_size = 1;
61944+ alloc_stack_next = 1;
61945+
61946+ return;
61947+}
61948+
61949+int
61950+acl_alloc_stack_init(unsigned long size)
61951+{
61952+ if ((size * sizeof (void *)) <= PAGE_SIZE)
61953+ alloc_stack =
61954+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
61955+ else
61956+ alloc_stack = (void **) vmalloc(size * sizeof (void *));
61957+
61958+ alloc_stack_size = size;
61959+
61960+ if (!alloc_stack)
61961+ return 0;
61962+ else
61963+ return 1;
61964+}
61965diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
61966new file mode 100644
61967index 0000000..bdd51ea
61968--- /dev/null
61969+++ b/grsecurity/gracl_cap.c
61970@@ -0,0 +1,110 @@
61971+#include <linux/kernel.h>
61972+#include <linux/module.h>
61973+#include <linux/sched.h>
61974+#include <linux/gracl.h>
61975+#include <linux/grsecurity.h>
61976+#include <linux/grinternal.h>
61977+
61978+extern const char *captab_log[];
61979+extern int captab_log_entries;
61980+
61981+int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
61982+{
61983+ struct acl_subject_label *curracl;
61984+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
61985+ kernel_cap_t cap_audit = __cap_empty_set;
61986+
61987+ if (!gr_acl_is_enabled())
61988+ return 1;
61989+
61990+ curracl = task->acl;
61991+
61992+ cap_drop = curracl->cap_lower;
61993+ cap_mask = curracl->cap_mask;
61994+ cap_audit = curracl->cap_invert_audit;
61995+
61996+ while ((curracl = curracl->parent_subject)) {
61997+ /* if the cap isn't specified in the current computed mask but is specified in the
61998+ current level subject, and is lowered in the current level subject, then add
61999+ it to the set of dropped capabilities
62000+ otherwise, add the current level subject's mask to the current computed mask
62001+ */
62002+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
62003+ cap_raise(cap_mask, cap);
62004+ if (cap_raised(curracl->cap_lower, cap))
62005+ cap_raise(cap_drop, cap);
62006+ if (cap_raised(curracl->cap_invert_audit, cap))
62007+ cap_raise(cap_audit, cap);
62008+ }
62009+ }
62010+
62011+ if (!cap_raised(cap_drop, cap)) {
62012+ if (cap_raised(cap_audit, cap))
62013+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
62014+ return 1;
62015+ }
62016+
62017+ curracl = task->acl;
62018+
62019+ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
62020+ && cap_raised(cred->cap_effective, cap)) {
62021+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
62022+ task->role->roletype, GR_GLOBAL_UID(cred->uid),
62023+ GR_GLOBAL_GID(cred->gid), task->exec_file ?
62024+ gr_to_filename(task->exec_file->f_path.dentry,
62025+ task->exec_file->f_path.mnt) : curracl->filename,
62026+ curracl->filename, 0UL,
62027+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
62028+ return 1;
62029+ }
62030+
62031+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
62032+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
62033+
62034+ return 0;
62035+}
62036+
62037+int
62038+gr_acl_is_capable(const int cap)
62039+{
62040+ return gr_task_acl_is_capable(current, current_cred(), cap);
62041+}
62042+
62043+int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap)
62044+{
62045+ struct acl_subject_label *curracl;
62046+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
62047+
62048+ if (!gr_acl_is_enabled())
62049+ return 1;
62050+
62051+ curracl = task->acl;
62052+
62053+ cap_drop = curracl->cap_lower;
62054+ cap_mask = curracl->cap_mask;
62055+
62056+ while ((curracl = curracl->parent_subject)) {
62057+ /* if the cap isn't specified in the current computed mask but is specified in the
62058+ current level subject, and is lowered in the current level subject, then add
62059+ it to the set of dropped capabilities
62060+ otherwise, add the current level subject's mask to the current computed mask
62061+ */
62062+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
62063+ cap_raise(cap_mask, cap);
62064+ if (cap_raised(curracl->cap_lower, cap))
62065+ cap_raise(cap_drop, cap);
62066+ }
62067+ }
62068+
62069+ if (!cap_raised(cap_drop, cap))
62070+ return 1;
62071+
62072+ return 0;
62073+}
62074+
62075+int
62076+gr_acl_is_capable_nolog(const int cap)
62077+{
62078+ return gr_task_acl_is_capable_nolog(current, cap);
62079+}
62080+
62081diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
62082new file mode 100644
62083index 0000000..a340c17
62084--- /dev/null
62085+++ b/grsecurity/gracl_fs.c
62086@@ -0,0 +1,431 @@
62087+#include <linux/kernel.h>
62088+#include <linux/sched.h>
62089+#include <linux/types.h>
62090+#include <linux/fs.h>
62091+#include <linux/file.h>
62092+#include <linux/stat.h>
62093+#include <linux/grsecurity.h>
62094+#include <linux/grinternal.h>
62095+#include <linux/gracl.h>
62096+
62097+umode_t
62098+gr_acl_umask(void)
62099+{
62100+ if (unlikely(!gr_acl_is_enabled()))
62101+ return 0;
62102+
62103+ return current->role->umask;
62104+}
62105+
62106+__u32
62107+gr_acl_handle_hidden_file(const struct dentry * dentry,
62108+ const struct vfsmount * mnt)
62109+{
62110+ __u32 mode;
62111+
62112+ if (unlikely(!dentry->d_inode))
62113+ return GR_FIND;
62114+
62115+ mode =
62116+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
62117+
62118+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
62119+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
62120+ return mode;
62121+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
62122+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
62123+ return 0;
62124+ } else if (unlikely(!(mode & GR_FIND)))
62125+ return 0;
62126+
62127+ return GR_FIND;
62128+}
62129+
62130+__u32
62131+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
62132+ int acc_mode)
62133+{
62134+ __u32 reqmode = GR_FIND;
62135+ __u32 mode;
62136+
62137+ if (unlikely(!dentry->d_inode))
62138+ return reqmode;
62139+
62140+ if (acc_mode & MAY_APPEND)
62141+ reqmode |= GR_APPEND;
62142+ else if (acc_mode & MAY_WRITE)
62143+ reqmode |= GR_WRITE;
62144+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
62145+ reqmode |= GR_READ;
62146+
62147+ mode =
62148+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
62149+ mnt);
62150+
62151+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
62152+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
62153+ reqmode & GR_READ ? " reading" : "",
62154+ reqmode & GR_WRITE ? " writing" : reqmode &
62155+ GR_APPEND ? " appending" : "");
62156+ return reqmode;
62157+ } else
62158+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
62159+ {
62160+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
62161+ reqmode & GR_READ ? " reading" : "",
62162+ reqmode & GR_WRITE ? " writing" : reqmode &
62163+ GR_APPEND ? " appending" : "");
62164+ return 0;
62165+ } else if (unlikely((mode & reqmode) != reqmode))
62166+ return 0;
62167+
62168+ return reqmode;
62169+}
62170+
62171+__u32
62172+gr_acl_handle_creat(const struct dentry * dentry,
62173+ const struct dentry * p_dentry,
62174+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
62175+ const int imode)
62176+{
62177+ __u32 reqmode = GR_WRITE | GR_CREATE;
62178+ __u32 mode;
62179+
62180+ if (acc_mode & MAY_APPEND)
62181+ reqmode |= GR_APPEND;
62182+ // if a directory was required or the directory already exists, then
62183+ // don't count this open as a read
62184+ if ((acc_mode & MAY_READ) &&
62185+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
62186+ reqmode |= GR_READ;
62187+ if ((open_flags & O_CREAT) &&
62188+ ((imode & S_ISUID) || ((imode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
62189+ reqmode |= GR_SETID;
62190+
62191+ mode =
62192+ gr_check_create(dentry, p_dentry, p_mnt,
62193+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
62194+
62195+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
62196+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
62197+ reqmode & GR_READ ? " reading" : "",
62198+ reqmode & GR_WRITE ? " writing" : reqmode &
62199+ GR_APPEND ? " appending" : "");
62200+ return reqmode;
62201+ } else
62202+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
62203+ {
62204+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
62205+ reqmode & GR_READ ? " reading" : "",
62206+ reqmode & GR_WRITE ? " writing" : reqmode &
62207+ GR_APPEND ? " appending" : "");
62208+ return 0;
62209+ } else if (unlikely((mode & reqmode) != reqmode))
62210+ return 0;
62211+
62212+ return reqmode;
62213+}
62214+
62215+__u32
62216+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
62217+ const int fmode)
62218+{
62219+ __u32 mode, reqmode = GR_FIND;
62220+
62221+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
62222+ reqmode |= GR_EXEC;
62223+ if (fmode & S_IWOTH)
62224+ reqmode |= GR_WRITE;
62225+ if (fmode & S_IROTH)
62226+ reqmode |= GR_READ;
62227+
62228+ mode =
62229+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
62230+ mnt);
62231+
62232+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
62233+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
62234+ reqmode & GR_READ ? " reading" : "",
62235+ reqmode & GR_WRITE ? " writing" : "",
62236+ reqmode & GR_EXEC ? " executing" : "");
62237+ return reqmode;
62238+ } else
62239+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
62240+ {
62241+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
62242+ reqmode & GR_READ ? " reading" : "",
62243+ reqmode & GR_WRITE ? " writing" : "",
62244+ reqmode & GR_EXEC ? " executing" : "");
62245+ return 0;
62246+ } else if (unlikely((mode & reqmode) != reqmode))
62247+ return 0;
62248+
62249+ return reqmode;
62250+}
62251+
62252+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
62253+{
62254+ __u32 mode;
62255+
62256+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
62257+
62258+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
62259+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
62260+ return mode;
62261+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
62262+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
62263+ return 0;
62264+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
62265+ return 0;
62266+
62267+ return (reqmode);
62268+}
62269+
62270+__u32
62271+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
62272+{
62273+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
62274+}
62275+
62276+__u32
62277+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
62278+{
62279+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
62280+}
62281+
62282+__u32
62283+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
62284+{
62285+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
62286+}
62287+
62288+__u32
62289+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
62290+{
62291+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
62292+}
62293+
62294+__u32
62295+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
62296+ umode_t *modeptr)
62297+{
62298+ umode_t mode;
62299+
62300+ *modeptr &= ~gr_acl_umask();
62301+ mode = *modeptr;
62302+
62303+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
62304+ return 1;
62305+
62306+ if (unlikely(dentry->d_inode && !S_ISDIR(dentry->d_inode->i_mode) &&
62307+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))) {
62308+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
62309+ GR_CHMOD_ACL_MSG);
62310+ } else {
62311+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
62312+ }
62313+}
62314+
62315+__u32
62316+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
62317+{
62318+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
62319+}
62320+
62321+__u32
62322+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
62323+{
62324+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
62325+}
62326+
62327+__u32
62328+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
62329+{
62330+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
62331+}
62332+
62333+__u32
62334+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
62335+{
62336+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
62337+ GR_UNIXCONNECT_ACL_MSG);
62338+}
62339+
62340+/* hardlinks require at minimum create and link permission,
62341+ any additional privilege required is based on the
62342+ privilege of the file being linked to
62343+*/
62344+__u32
62345+gr_acl_handle_link(const struct dentry * new_dentry,
62346+ const struct dentry * parent_dentry,
62347+ const struct vfsmount * parent_mnt,
62348+ const struct dentry * old_dentry,
62349+ const struct vfsmount * old_mnt, const struct filename *to)
62350+{
62351+ __u32 mode;
62352+ __u32 needmode = GR_CREATE | GR_LINK;
62353+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
62354+
62355+ mode =
62356+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
62357+ old_mnt);
62358+
62359+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
62360+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
62361+ return mode;
62362+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
62363+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
62364+ return 0;
62365+ } else if (unlikely((mode & needmode) != needmode))
62366+ return 0;
62367+
62368+ return 1;
62369+}
62370+
62371+__u32
62372+gr_acl_handle_symlink(const struct dentry * new_dentry,
62373+ const struct dentry * parent_dentry,
62374+ const struct vfsmount * parent_mnt, const struct filename *from)
62375+{
62376+ __u32 needmode = GR_WRITE | GR_CREATE;
62377+ __u32 mode;
62378+
62379+ mode =
62380+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
62381+ GR_CREATE | GR_AUDIT_CREATE |
62382+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
62383+
62384+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
62385+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
62386+ return mode;
62387+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
62388+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
62389+ return 0;
62390+ } else if (unlikely((mode & needmode) != needmode))
62391+ return 0;
62392+
62393+ return (GR_WRITE | GR_CREATE);
62394+}
62395+
62396+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
62397+{
62398+ __u32 mode;
62399+
62400+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
62401+
62402+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
62403+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
62404+ return mode;
62405+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
62406+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
62407+ return 0;
62408+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
62409+ return 0;
62410+
62411+ return (reqmode);
62412+}
62413+
62414+__u32
62415+gr_acl_handle_mknod(const struct dentry * new_dentry,
62416+ const struct dentry * parent_dentry,
62417+ const struct vfsmount * parent_mnt,
62418+ const int mode)
62419+{
62420+ __u32 reqmode = GR_WRITE | GR_CREATE;
62421+ if (unlikely((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
62422+ reqmode |= GR_SETID;
62423+
62424+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
62425+ reqmode, GR_MKNOD_ACL_MSG);
62426+}
62427+
62428+__u32
62429+gr_acl_handle_mkdir(const struct dentry *new_dentry,
62430+ const struct dentry *parent_dentry,
62431+ const struct vfsmount *parent_mnt)
62432+{
62433+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
62434+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
62435+}
62436+
62437+#define RENAME_CHECK_SUCCESS(old, new) \
62438+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
62439+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
62440+
62441+int
62442+gr_acl_handle_rename(struct dentry *new_dentry,
62443+ struct dentry *parent_dentry,
62444+ const struct vfsmount *parent_mnt,
62445+ struct dentry *old_dentry,
62446+ struct inode *old_parent_inode,
62447+ struct vfsmount *old_mnt, const struct filename *newname)
62448+{
62449+ __u32 comp1, comp2;
62450+ int error = 0;
62451+
62452+ if (unlikely(!gr_acl_is_enabled()))
62453+ return 0;
62454+
62455+ if (!new_dentry->d_inode) {
62456+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
62457+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
62458+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
62459+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
62460+ GR_DELETE | GR_AUDIT_DELETE |
62461+ GR_AUDIT_READ | GR_AUDIT_WRITE |
62462+ GR_SUPPRESS, old_mnt);
62463+ } else {
62464+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
62465+ GR_CREATE | GR_DELETE |
62466+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
62467+ GR_AUDIT_READ | GR_AUDIT_WRITE |
62468+ GR_SUPPRESS, parent_mnt);
62469+ comp2 =
62470+ gr_search_file(old_dentry,
62471+ GR_READ | GR_WRITE | GR_AUDIT_READ |
62472+ GR_DELETE | GR_AUDIT_DELETE |
62473+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
62474+ }
62475+
62476+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
62477+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
62478+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
62479+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
62480+ && !(comp2 & GR_SUPPRESS)) {
62481+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
62482+ error = -EACCES;
62483+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
62484+ error = -EACCES;
62485+
62486+ return error;
62487+}
62488+
62489+void
62490+gr_acl_handle_exit(void)
62491+{
62492+ u16 id;
62493+ char *rolename;
62494+
62495+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
62496+ !(current->role->roletype & GR_ROLE_PERSIST))) {
62497+ id = current->acl_role_id;
62498+ rolename = current->role->rolename;
62499+ gr_set_acls(1);
62500+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
62501+ }
62502+
62503+ gr_put_exec_file(current);
62504+ return;
62505+}
62506+
62507+int
62508+gr_acl_handle_procpidmem(const struct task_struct *task)
62509+{
62510+ if (unlikely(!gr_acl_is_enabled()))
62511+ return 0;
62512+
62513+ if (task != current && task->acl->mode & GR_PROTPROCFD)
62514+ return -EACCES;
62515+
62516+ return 0;
62517+}
62518diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
62519new file mode 100644
62520index 0000000..8132048
62521--- /dev/null
62522+++ b/grsecurity/gracl_ip.c
62523@@ -0,0 +1,387 @@
62524+#include <linux/kernel.h>
62525+#include <asm/uaccess.h>
62526+#include <asm/errno.h>
62527+#include <net/sock.h>
62528+#include <linux/file.h>
62529+#include <linux/fs.h>
62530+#include <linux/net.h>
62531+#include <linux/in.h>
62532+#include <linux/skbuff.h>
62533+#include <linux/ip.h>
62534+#include <linux/udp.h>
62535+#include <linux/types.h>
62536+#include <linux/sched.h>
62537+#include <linux/netdevice.h>
62538+#include <linux/inetdevice.h>
62539+#include <linux/gracl.h>
62540+#include <linux/grsecurity.h>
62541+#include <linux/grinternal.h>
62542+
62543+#define GR_BIND 0x01
62544+#define GR_CONNECT 0x02
62545+#define GR_INVERT 0x04
62546+#define GR_BINDOVERRIDE 0x08
62547+#define GR_CONNECTOVERRIDE 0x10
62548+#define GR_SOCK_FAMILY 0x20
62549+
62550+static const char * gr_protocols[IPPROTO_MAX] = {
62551+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
62552+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
62553+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
62554+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
62555+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
62556+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
62557+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
62558+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
62559+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
62560+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
62561+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
62562+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
62563+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
62564+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
62565+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
62566+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
62567+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
62568+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
62569+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
62570+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
62571+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
62572+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
62573+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
62574+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
62575+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
62576+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
62577+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
62578+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
62579+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
62580+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
62581+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
62582+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
62583+ };
62584+
62585+static const char * gr_socktypes[SOCK_MAX] = {
62586+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
62587+ "unknown:7", "unknown:8", "unknown:9", "packet"
62588+ };
62589+
62590+static const char * gr_sockfamilies[AF_MAX+1] = {
62591+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
62592+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
62593+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
62594+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
62595+ };
62596+
62597+const char *
62598+gr_proto_to_name(unsigned char proto)
62599+{
62600+ return gr_protocols[proto];
62601+}
62602+
62603+const char *
62604+gr_socktype_to_name(unsigned char type)
62605+{
62606+ return gr_socktypes[type];
62607+}
62608+
62609+const char *
62610+gr_sockfamily_to_name(unsigned char family)
62611+{
62612+ return gr_sockfamilies[family];
62613+}
62614+
62615+int
62616+gr_search_socket(const int domain, const int type, const int protocol)
62617+{
62618+ struct acl_subject_label *curr;
62619+ const struct cred *cred = current_cred();
62620+
62621+ if (unlikely(!gr_acl_is_enabled()))
62622+ goto exit;
62623+
62624+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
62625+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
62626+ goto exit; // let the kernel handle it
62627+
62628+ curr = current->acl;
62629+
62630+ if (curr->sock_families[domain / 32] & (1U << (domain % 32))) {
62631+ /* the family is allowed, if this is PF_INET allow it only if
62632+ the extra sock type/protocol checks pass */
62633+ if (domain == PF_INET)
62634+ goto inet_check;
62635+ goto exit;
62636+ } else {
62637+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
62638+ __u32 fakeip = 0;
62639+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
62640+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
62641+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
62642+ gr_to_filename(current->exec_file->f_path.dentry,
62643+ current->exec_file->f_path.mnt) :
62644+ curr->filename, curr->filename,
62645+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
62646+ &current->signal->saved_ip);
62647+ goto exit;
62648+ }
62649+ goto exit_fail;
62650+ }
62651+
62652+inet_check:
62653+ /* the rest of this checking is for IPv4 only */
62654+ if (!curr->ips)
62655+ goto exit;
62656+
62657+ if ((curr->ip_type & (1U << type)) &&
62658+ (curr->ip_proto[protocol / 32] & (1U << (protocol % 32))))
62659+ goto exit;
62660+
62661+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
62662+ /* we don't place acls on raw sockets , and sometimes
62663+ dgram/ip sockets are opened for ioctl and not
62664+ bind/connect, so we'll fake a bind learn log */
62665+ if (type == SOCK_RAW || type == SOCK_PACKET) {
62666+ __u32 fakeip = 0;
62667+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
62668+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
62669+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
62670+ gr_to_filename(current->exec_file->f_path.dentry,
62671+ current->exec_file->f_path.mnt) :
62672+ curr->filename, curr->filename,
62673+ &fakeip, 0, type,
62674+ protocol, GR_CONNECT, &current->signal->saved_ip);
62675+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
62676+ __u32 fakeip = 0;
62677+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
62678+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
62679+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
62680+ gr_to_filename(current->exec_file->f_path.dentry,
62681+ current->exec_file->f_path.mnt) :
62682+ curr->filename, curr->filename,
62683+ &fakeip, 0, type,
62684+ protocol, GR_BIND, &current->signal->saved_ip);
62685+ }
62686+ /* we'll log when they use connect or bind */
62687+ goto exit;
62688+ }
62689+
62690+exit_fail:
62691+ if (domain == PF_INET)
62692+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
62693+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
62694+ else
62695+#ifndef CONFIG_IPV6
62696+ if (domain != PF_INET6)
62697+#endif
62698+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
62699+ gr_socktype_to_name(type), protocol);
62700+
62701+ return 0;
62702+exit:
62703+ return 1;
62704+}
62705+
62706+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
62707+{
62708+ if ((ip->mode & mode) &&
62709+ (ip_port >= ip->low) &&
62710+ (ip_port <= ip->high) &&
62711+ ((ntohl(ip_addr) & our_netmask) ==
62712+ (ntohl(our_addr) & our_netmask))
62713+ && (ip->proto[protocol / 32] & (1U << (protocol % 32)))
62714+ && (ip->type & (1U << type))) {
62715+ if (ip->mode & GR_INVERT)
62716+ return 2; // specifically denied
62717+ else
62718+ return 1; // allowed
62719+ }
62720+
62721+ return 0; // not specifically allowed, may continue parsing
62722+}
62723+
62724+static int
62725+gr_search_connectbind(const int full_mode, struct sock *sk,
62726+ struct sockaddr_in *addr, const int type)
62727+{
62728+ char iface[IFNAMSIZ] = {0};
62729+ struct acl_subject_label *curr;
62730+ struct acl_ip_label *ip;
62731+ struct inet_sock *isk;
62732+ struct net_device *dev;
62733+ struct in_device *idev;
62734+ unsigned long i;
62735+ int ret;
62736+ int mode = full_mode & (GR_BIND | GR_CONNECT);
62737+ __u32 ip_addr = 0;
62738+ __u32 our_addr;
62739+ __u32 our_netmask;
62740+ char *p;
62741+ __u16 ip_port = 0;
62742+ const struct cred *cred = current_cred();
62743+
62744+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
62745+ return 0;
62746+
62747+ curr = current->acl;
62748+ isk = inet_sk(sk);
62749+
62750+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
62751+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
62752+ addr->sin_addr.s_addr = curr->inaddr_any_override;
62753+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
62754+ struct sockaddr_in saddr;
62755+ int err;
62756+
62757+ saddr.sin_family = AF_INET;
62758+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
62759+ saddr.sin_port = isk->inet_sport;
62760+
62761+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
62762+ if (err)
62763+ return err;
62764+
62765+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
62766+ if (err)
62767+ return err;
62768+ }
62769+
62770+ if (!curr->ips)
62771+ return 0;
62772+
62773+ ip_addr = addr->sin_addr.s_addr;
62774+ ip_port = ntohs(addr->sin_port);
62775+
62776+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
62777+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
62778+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
62779+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
62780+ gr_to_filename(current->exec_file->f_path.dentry,
62781+ current->exec_file->f_path.mnt) :
62782+ curr->filename, curr->filename,
62783+ &ip_addr, ip_port, type,
62784+ sk->sk_protocol, mode, &current->signal->saved_ip);
62785+ return 0;
62786+ }
62787+
62788+ for (i = 0; i < curr->ip_num; i++) {
62789+ ip = *(curr->ips + i);
62790+ if (ip->iface != NULL) {
62791+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
62792+ p = strchr(iface, ':');
62793+ if (p != NULL)
62794+ *p = '\0';
62795+ dev = dev_get_by_name(sock_net(sk), iface);
62796+ if (dev == NULL)
62797+ continue;
62798+ idev = in_dev_get(dev);
62799+ if (idev == NULL) {
62800+ dev_put(dev);
62801+ continue;
62802+ }
62803+ rcu_read_lock();
62804+ for_ifa(idev) {
62805+ if (!strcmp(ip->iface, ifa->ifa_label)) {
62806+ our_addr = ifa->ifa_address;
62807+ our_netmask = 0xffffffff;
62808+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
62809+ if (ret == 1) {
62810+ rcu_read_unlock();
62811+ in_dev_put(idev);
62812+ dev_put(dev);
62813+ return 0;
62814+ } else if (ret == 2) {
62815+ rcu_read_unlock();
62816+ in_dev_put(idev);
62817+ dev_put(dev);
62818+ goto denied;
62819+ }
62820+ }
62821+ } endfor_ifa(idev);
62822+ rcu_read_unlock();
62823+ in_dev_put(idev);
62824+ dev_put(dev);
62825+ } else {
62826+ our_addr = ip->addr;
62827+ our_netmask = ip->netmask;
62828+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
62829+ if (ret == 1)
62830+ return 0;
62831+ else if (ret == 2)
62832+ goto denied;
62833+ }
62834+ }
62835+
62836+denied:
62837+ if (mode == GR_BIND)
62838+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
62839+ else if (mode == GR_CONNECT)
62840+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
62841+
62842+ return -EACCES;
62843+}
62844+
62845+int
62846+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
62847+{
62848+ /* always allow disconnection of dgram sockets with connect */
62849+ if (addr->sin_family == AF_UNSPEC)
62850+ return 0;
62851+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
62852+}
62853+
62854+int
62855+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
62856+{
62857+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
62858+}
62859+
62860+int gr_search_listen(struct socket *sock)
62861+{
62862+ struct sock *sk = sock->sk;
62863+ struct sockaddr_in addr;
62864+
62865+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
62866+ addr.sin_port = inet_sk(sk)->inet_sport;
62867+
62868+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
62869+}
62870+
62871+int gr_search_accept(struct socket *sock)
62872+{
62873+ struct sock *sk = sock->sk;
62874+ struct sockaddr_in addr;
62875+
62876+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
62877+ addr.sin_port = inet_sk(sk)->inet_sport;
62878+
62879+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
62880+}
62881+
62882+int
62883+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
62884+{
62885+ if (addr)
62886+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
62887+ else {
62888+ struct sockaddr_in sin;
62889+ const struct inet_sock *inet = inet_sk(sk);
62890+
62891+ sin.sin_addr.s_addr = inet->inet_daddr;
62892+ sin.sin_port = inet->inet_dport;
62893+
62894+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
62895+ }
62896+}
62897+
62898+int
62899+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
62900+{
62901+ struct sockaddr_in sin;
62902+
62903+ if (unlikely(skb->len < sizeof (struct udphdr)))
62904+ return 0; // skip this packet
62905+
62906+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
62907+ sin.sin_port = udp_hdr(skb)->source;
62908+
62909+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
62910+}
62911diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
62912new file mode 100644
62913index 0000000..25f54ef
62914--- /dev/null
62915+++ b/grsecurity/gracl_learn.c
62916@@ -0,0 +1,207 @@
62917+#include <linux/kernel.h>
62918+#include <linux/mm.h>
62919+#include <linux/sched.h>
62920+#include <linux/poll.h>
62921+#include <linux/string.h>
62922+#include <linux/file.h>
62923+#include <linux/types.h>
62924+#include <linux/vmalloc.h>
62925+#include <linux/grinternal.h>
62926+
62927+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
62928+ size_t count, loff_t *ppos);
62929+extern int gr_acl_is_enabled(void);
62930+
62931+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
62932+static int gr_learn_attached;
62933+
62934+/* use a 512k buffer */
62935+#define LEARN_BUFFER_SIZE (512 * 1024)
62936+
62937+static DEFINE_SPINLOCK(gr_learn_lock);
62938+static DEFINE_MUTEX(gr_learn_user_mutex);
62939+
62940+/* we need to maintain two buffers, so that the kernel context of grlearn
62941+ uses a semaphore around the userspace copying, and the other kernel contexts
62942+ use a spinlock when copying into the buffer, since they cannot sleep
62943+*/
62944+static char *learn_buffer;
62945+static char *learn_buffer_user;
62946+static int learn_buffer_len;
62947+static int learn_buffer_user_len;
62948+
62949+static ssize_t
62950+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
62951+{
62952+ DECLARE_WAITQUEUE(wait, current);
62953+ ssize_t retval = 0;
62954+
62955+ add_wait_queue(&learn_wait, &wait);
62956+ set_current_state(TASK_INTERRUPTIBLE);
62957+ do {
62958+ mutex_lock(&gr_learn_user_mutex);
62959+ spin_lock(&gr_learn_lock);
62960+ if (learn_buffer_len)
62961+ break;
62962+ spin_unlock(&gr_learn_lock);
62963+ mutex_unlock(&gr_learn_user_mutex);
62964+ if (file->f_flags & O_NONBLOCK) {
62965+ retval = -EAGAIN;
62966+ goto out;
62967+ }
62968+ if (signal_pending(current)) {
62969+ retval = -ERESTARTSYS;
62970+ goto out;
62971+ }
62972+
62973+ schedule();
62974+ } while (1);
62975+
62976+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
62977+ learn_buffer_user_len = learn_buffer_len;
62978+ retval = learn_buffer_len;
62979+ learn_buffer_len = 0;
62980+
62981+ spin_unlock(&gr_learn_lock);
62982+
62983+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
62984+ retval = -EFAULT;
62985+
62986+ mutex_unlock(&gr_learn_user_mutex);
62987+out:
62988+ set_current_state(TASK_RUNNING);
62989+ remove_wait_queue(&learn_wait, &wait);
62990+ return retval;
62991+}
62992+
62993+static unsigned int
62994+poll_learn(struct file * file, poll_table * wait)
62995+{
62996+ poll_wait(file, &learn_wait, wait);
62997+
62998+ if (learn_buffer_len)
62999+ return (POLLIN | POLLRDNORM);
63000+
63001+ return 0;
63002+}
63003+
63004+void
63005+gr_clear_learn_entries(void)
63006+{
63007+ char *tmp;
63008+
63009+ mutex_lock(&gr_learn_user_mutex);
63010+ spin_lock(&gr_learn_lock);
63011+ tmp = learn_buffer;
63012+ learn_buffer = NULL;
63013+ spin_unlock(&gr_learn_lock);
63014+ if (tmp)
63015+ vfree(tmp);
63016+ if (learn_buffer_user != NULL) {
63017+ vfree(learn_buffer_user);
63018+ learn_buffer_user = NULL;
63019+ }
63020+ learn_buffer_len = 0;
63021+ mutex_unlock(&gr_learn_user_mutex);
63022+
63023+ return;
63024+}
63025+
63026+void
63027+gr_add_learn_entry(const char *fmt, ...)
63028+{
63029+ va_list args;
63030+ unsigned int len;
63031+
63032+ if (!gr_learn_attached)
63033+ return;
63034+
63035+ spin_lock(&gr_learn_lock);
63036+
63037+ /* leave a gap at the end so we know when it's "full" but don't have to
63038+ compute the exact length of the string we're trying to append
63039+ */
63040+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
63041+ spin_unlock(&gr_learn_lock);
63042+ wake_up_interruptible(&learn_wait);
63043+ return;
63044+ }
63045+ if (learn_buffer == NULL) {
63046+ spin_unlock(&gr_learn_lock);
63047+ return;
63048+ }
63049+
63050+ va_start(args, fmt);
63051+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
63052+ va_end(args);
63053+
63054+ learn_buffer_len += len + 1;
63055+
63056+ spin_unlock(&gr_learn_lock);
63057+ wake_up_interruptible(&learn_wait);
63058+
63059+ return;
63060+}
63061+
63062+static int
63063+open_learn(struct inode *inode, struct file *file)
63064+{
63065+ if (file->f_mode & FMODE_READ && gr_learn_attached)
63066+ return -EBUSY;
63067+ if (file->f_mode & FMODE_READ) {
63068+ int retval = 0;
63069+ mutex_lock(&gr_learn_user_mutex);
63070+ if (learn_buffer == NULL)
63071+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
63072+ if (learn_buffer_user == NULL)
63073+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
63074+ if (learn_buffer == NULL) {
63075+ retval = -ENOMEM;
63076+ goto out_error;
63077+ }
63078+ if (learn_buffer_user == NULL) {
63079+ retval = -ENOMEM;
63080+ goto out_error;
63081+ }
63082+ learn_buffer_len = 0;
63083+ learn_buffer_user_len = 0;
63084+ gr_learn_attached = 1;
63085+out_error:
63086+ mutex_unlock(&gr_learn_user_mutex);
63087+ return retval;
63088+ }
63089+ return 0;
63090+}
63091+
63092+static int
63093+close_learn(struct inode *inode, struct file *file)
63094+{
63095+ if (file->f_mode & FMODE_READ) {
63096+ char *tmp = NULL;
63097+ mutex_lock(&gr_learn_user_mutex);
63098+ spin_lock(&gr_learn_lock);
63099+ tmp = learn_buffer;
63100+ learn_buffer = NULL;
63101+ spin_unlock(&gr_learn_lock);
63102+ if (tmp)
63103+ vfree(tmp);
63104+ if (learn_buffer_user != NULL) {
63105+ vfree(learn_buffer_user);
63106+ learn_buffer_user = NULL;
63107+ }
63108+ learn_buffer_len = 0;
63109+ learn_buffer_user_len = 0;
63110+ gr_learn_attached = 0;
63111+ mutex_unlock(&gr_learn_user_mutex);
63112+ }
63113+
63114+ return 0;
63115+}
63116+
63117+const struct file_operations grsec_fops = {
63118+ .read = read_learn,
63119+ .write = write_grsec_handler,
63120+ .open = open_learn,
63121+ .release = close_learn,
63122+ .poll = poll_learn,
63123+};
63124diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
63125new file mode 100644
63126index 0000000..39645c9
63127--- /dev/null
63128+++ b/grsecurity/gracl_res.c
63129@@ -0,0 +1,68 @@
63130+#include <linux/kernel.h>
63131+#include <linux/sched.h>
63132+#include <linux/gracl.h>
63133+#include <linux/grinternal.h>
63134+
63135+static const char *restab_log[] = {
63136+ [RLIMIT_CPU] = "RLIMIT_CPU",
63137+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
63138+ [RLIMIT_DATA] = "RLIMIT_DATA",
63139+ [RLIMIT_STACK] = "RLIMIT_STACK",
63140+ [RLIMIT_CORE] = "RLIMIT_CORE",
63141+ [RLIMIT_RSS] = "RLIMIT_RSS",
63142+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
63143+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
63144+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
63145+ [RLIMIT_AS] = "RLIMIT_AS",
63146+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
63147+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
63148+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
63149+ [RLIMIT_NICE] = "RLIMIT_NICE",
63150+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
63151+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
63152+ [GR_CRASH_RES] = "RLIMIT_CRASH"
63153+};
63154+
63155+void
63156+gr_log_resource(const struct task_struct *task,
63157+ const int res, const unsigned long wanted, const int gt)
63158+{
63159+ const struct cred *cred;
63160+ unsigned long rlim;
63161+
63162+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
63163+ return;
63164+
63165+ // not yet supported resource
63166+ if (unlikely(!restab_log[res]))
63167+ return;
63168+
63169+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
63170+ rlim = task_rlimit_max(task, res);
63171+ else
63172+ rlim = task_rlimit(task, res);
63173+
63174+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
63175+ return;
63176+
63177+ rcu_read_lock();
63178+ cred = __task_cred(task);
63179+
63180+ if (res == RLIMIT_NPROC &&
63181+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
63182+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
63183+ goto out_rcu_unlock;
63184+ else if (res == RLIMIT_MEMLOCK &&
63185+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
63186+ goto out_rcu_unlock;
63187+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
63188+ goto out_rcu_unlock;
63189+ rcu_read_unlock();
63190+
63191+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
63192+
63193+ return;
63194+out_rcu_unlock:
63195+ rcu_read_unlock();
63196+ return;
63197+}
63198diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
63199new file mode 100644
63200index 0000000..cb1e5ab
63201--- /dev/null
63202+++ b/grsecurity/gracl_segv.c
63203@@ -0,0 +1,303 @@
63204+#include <linux/kernel.h>
63205+#include <linux/mm.h>
63206+#include <asm/uaccess.h>
63207+#include <asm/errno.h>
63208+#include <asm/mman.h>
63209+#include <net/sock.h>
63210+#include <linux/file.h>
63211+#include <linux/fs.h>
63212+#include <linux/net.h>
63213+#include <linux/in.h>
63214+#include <linux/slab.h>
63215+#include <linux/types.h>
63216+#include <linux/sched.h>
63217+#include <linux/timer.h>
63218+#include <linux/gracl.h>
63219+#include <linux/grsecurity.h>
63220+#include <linux/grinternal.h>
63221+
63222+static struct crash_uid *uid_set;
63223+static unsigned short uid_used;
63224+static DEFINE_SPINLOCK(gr_uid_lock);
63225+extern rwlock_t gr_inode_lock;
63226+extern struct acl_subject_label *
63227+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
63228+ struct acl_role_label *role);
63229+
63230+#ifdef CONFIG_BTRFS_FS
63231+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
63232+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
63233+#endif
63234+
63235+static inline dev_t __get_dev(const struct dentry *dentry)
63236+{
63237+#ifdef CONFIG_BTRFS_FS
63238+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
63239+ return get_btrfs_dev_from_inode(dentry->d_inode);
63240+ else
63241+#endif
63242+ return dentry->d_inode->i_sb->s_dev;
63243+}
63244+
63245+int
63246+gr_init_uidset(void)
63247+{
63248+ uid_set =
63249+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
63250+ uid_used = 0;
63251+
63252+ return uid_set ? 1 : 0;
63253+}
63254+
63255+void
63256+gr_free_uidset(void)
63257+{
63258+ if (uid_set)
63259+ kfree(uid_set);
63260+
63261+ return;
63262+}
63263+
63264+int
63265+gr_find_uid(const uid_t uid)
63266+{
63267+ struct crash_uid *tmp = uid_set;
63268+ uid_t buid;
63269+ int low = 0, high = uid_used - 1, mid;
63270+
63271+ while (high >= low) {
63272+ mid = (low + high) >> 1;
63273+ buid = tmp[mid].uid;
63274+ if (buid == uid)
63275+ return mid;
63276+ if (buid > uid)
63277+ high = mid - 1;
63278+ if (buid < uid)
63279+ low = mid + 1;
63280+ }
63281+
63282+ return -1;
63283+}
63284+
63285+static __inline__ void
63286+gr_insertsort(void)
63287+{
63288+ unsigned short i, j;
63289+ struct crash_uid index;
63290+
63291+ for (i = 1; i < uid_used; i++) {
63292+ index = uid_set[i];
63293+ j = i;
63294+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
63295+ uid_set[j] = uid_set[j - 1];
63296+ j--;
63297+ }
63298+ uid_set[j] = index;
63299+ }
63300+
63301+ return;
63302+}
63303+
63304+static __inline__ void
63305+gr_insert_uid(const kuid_t kuid, const unsigned long expires)
63306+{
63307+ int loc;
63308+ uid_t uid = GR_GLOBAL_UID(kuid);
63309+
63310+ if (uid_used == GR_UIDTABLE_MAX)
63311+ return;
63312+
63313+ loc = gr_find_uid(uid);
63314+
63315+ if (loc >= 0) {
63316+ uid_set[loc].expires = expires;
63317+ return;
63318+ }
63319+
63320+ uid_set[uid_used].uid = uid;
63321+ uid_set[uid_used].expires = expires;
63322+ uid_used++;
63323+
63324+ gr_insertsort();
63325+
63326+ return;
63327+}
63328+
63329+void
63330+gr_remove_uid(const unsigned short loc)
63331+{
63332+ unsigned short i;
63333+
63334+ for (i = loc + 1; i < uid_used; i++)
63335+ uid_set[i - 1] = uid_set[i];
63336+
63337+ uid_used--;
63338+
63339+ return;
63340+}
63341+
63342+int
63343+gr_check_crash_uid(const kuid_t kuid)
63344+{
63345+ int loc;
63346+ int ret = 0;
63347+ uid_t uid;
63348+
63349+ if (unlikely(!gr_acl_is_enabled()))
63350+ return 0;
63351+
63352+ uid = GR_GLOBAL_UID(kuid);
63353+
63354+ spin_lock(&gr_uid_lock);
63355+ loc = gr_find_uid(uid);
63356+
63357+ if (loc < 0)
63358+ goto out_unlock;
63359+
63360+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
63361+ gr_remove_uid(loc);
63362+ else
63363+ ret = 1;
63364+
63365+out_unlock:
63366+ spin_unlock(&gr_uid_lock);
63367+ return ret;
63368+}
63369+
63370+static __inline__ int
63371+proc_is_setxid(const struct cred *cred)
63372+{
63373+ if (!uid_eq(cred->uid, cred->euid) || !uid_eq(cred->uid, cred->suid) ||
63374+ !uid_eq(cred->uid, cred->fsuid))
63375+ return 1;
63376+ if (!gid_eq(cred->gid, cred->egid) || !gid_eq(cred->gid, cred->sgid) ||
63377+ !gid_eq(cred->gid, cred->fsgid))
63378+ return 1;
63379+
63380+ return 0;
63381+}
63382+
63383+extern int gr_fake_force_sig(int sig, struct task_struct *t);
63384+
63385+void
63386+gr_handle_crash(struct task_struct *task, const int sig)
63387+{
63388+ struct acl_subject_label *curr;
63389+ struct task_struct *tsk, *tsk2;
63390+ const struct cred *cred;
63391+ const struct cred *cred2;
63392+
63393+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
63394+ return;
63395+
63396+ if (unlikely(!gr_acl_is_enabled()))
63397+ return;
63398+
63399+ curr = task->acl;
63400+
63401+ if (!(curr->resmask & (1U << GR_CRASH_RES)))
63402+ return;
63403+
63404+ if (time_before_eq(curr->expires, get_seconds())) {
63405+ curr->expires = 0;
63406+ curr->crashes = 0;
63407+ }
63408+
63409+ curr->crashes++;
63410+
63411+ if (!curr->expires)
63412+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
63413+
63414+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
63415+ time_after(curr->expires, get_seconds())) {
63416+ rcu_read_lock();
63417+ cred = __task_cred(task);
63418+ if (gr_is_global_nonroot(cred->uid) && proc_is_setxid(cred)) {
63419+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
63420+ spin_lock(&gr_uid_lock);
63421+ gr_insert_uid(cred->uid, curr->expires);
63422+ spin_unlock(&gr_uid_lock);
63423+ curr->expires = 0;
63424+ curr->crashes = 0;
63425+ read_lock(&tasklist_lock);
63426+ do_each_thread(tsk2, tsk) {
63427+ cred2 = __task_cred(tsk);
63428+ if (tsk != task && uid_eq(cred2->uid, cred->uid))
63429+ gr_fake_force_sig(SIGKILL, tsk);
63430+ } while_each_thread(tsk2, tsk);
63431+ read_unlock(&tasklist_lock);
63432+ } else {
63433+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
63434+ read_lock(&tasklist_lock);
63435+ read_lock(&grsec_exec_file_lock);
63436+ do_each_thread(tsk2, tsk) {
63437+ if (likely(tsk != task)) {
63438+ // if this thread has the same subject as the one that triggered
63439+ // RES_CRASH and it's the same binary, kill it
63440+ if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
63441+ gr_fake_force_sig(SIGKILL, tsk);
63442+ }
63443+ } while_each_thread(tsk2, tsk);
63444+ read_unlock(&grsec_exec_file_lock);
63445+ read_unlock(&tasklist_lock);
63446+ }
63447+ rcu_read_unlock();
63448+ }
63449+
63450+ return;
63451+}
63452+
63453+int
63454+gr_check_crash_exec(const struct file *filp)
63455+{
63456+ struct acl_subject_label *curr;
63457+
63458+ if (unlikely(!gr_acl_is_enabled()))
63459+ return 0;
63460+
63461+ read_lock(&gr_inode_lock);
63462+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
63463+ __get_dev(filp->f_path.dentry),
63464+ current->role);
63465+ read_unlock(&gr_inode_lock);
63466+
63467+ if (!curr || !(curr->resmask & (1U << GR_CRASH_RES)) ||
63468+ (!curr->crashes && !curr->expires))
63469+ return 0;
63470+
63471+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
63472+ time_after(curr->expires, get_seconds()))
63473+ return 1;
63474+ else if (time_before_eq(curr->expires, get_seconds())) {
63475+ curr->crashes = 0;
63476+ curr->expires = 0;
63477+ }
63478+
63479+ return 0;
63480+}
63481+
63482+void
63483+gr_handle_alertkill(struct task_struct *task)
63484+{
63485+ struct acl_subject_label *curracl;
63486+ __u32 curr_ip;
63487+ struct task_struct *p, *p2;
63488+
63489+ if (unlikely(!gr_acl_is_enabled()))
63490+ return;
63491+
63492+ curracl = task->acl;
63493+ curr_ip = task->signal->curr_ip;
63494+
63495+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
63496+ read_lock(&tasklist_lock);
63497+ do_each_thread(p2, p) {
63498+ if (p->signal->curr_ip == curr_ip)
63499+ gr_fake_force_sig(SIGKILL, p);
63500+ } while_each_thread(p2, p);
63501+ read_unlock(&tasklist_lock);
63502+ } else if (curracl->mode & GR_KILLPROC)
63503+ gr_fake_force_sig(SIGKILL, task);
63504+
63505+ return;
63506+}
63507diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
63508new file mode 100644
63509index 0000000..98011b0
63510--- /dev/null
63511+++ b/grsecurity/gracl_shm.c
63512@@ -0,0 +1,40 @@
63513+#include <linux/kernel.h>
63514+#include <linux/mm.h>
63515+#include <linux/sched.h>
63516+#include <linux/file.h>
63517+#include <linux/ipc.h>
63518+#include <linux/gracl.h>
63519+#include <linux/grsecurity.h>
63520+#include <linux/grinternal.h>
63521+
63522+int
63523+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
63524+ const time_t shm_createtime, const kuid_t cuid, const int shmid)
63525+{
63526+ struct task_struct *task;
63527+
63528+ if (!gr_acl_is_enabled())
63529+ return 1;
63530+
63531+ rcu_read_lock();
63532+ read_lock(&tasklist_lock);
63533+
63534+ task = find_task_by_vpid(shm_cprid);
63535+
63536+ if (unlikely(!task))
63537+ task = find_task_by_vpid(shm_lapid);
63538+
63539+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
63540+ (task_pid_nr(task) == shm_lapid)) &&
63541+ (task->acl->mode & GR_PROTSHM) &&
63542+ (task->acl != current->acl))) {
63543+ read_unlock(&tasklist_lock);
63544+ rcu_read_unlock();
63545+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, GR_GLOBAL_UID(cuid), shm_cprid, shmid);
63546+ return 0;
63547+ }
63548+ read_unlock(&tasklist_lock);
63549+ rcu_read_unlock();
63550+
63551+ return 1;
63552+}
63553diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
63554new file mode 100644
63555index 0000000..bc0be01
63556--- /dev/null
63557+++ b/grsecurity/grsec_chdir.c
63558@@ -0,0 +1,19 @@
63559+#include <linux/kernel.h>
63560+#include <linux/sched.h>
63561+#include <linux/fs.h>
63562+#include <linux/file.h>
63563+#include <linux/grsecurity.h>
63564+#include <linux/grinternal.h>
63565+
63566+void
63567+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
63568+{
63569+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
63570+ if ((grsec_enable_chdir && grsec_enable_group &&
63571+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
63572+ !grsec_enable_group)) {
63573+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
63574+ }
63575+#endif
63576+ return;
63577+}
63578diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
63579new file mode 100644
63580index 0000000..6d2de57
63581--- /dev/null
63582+++ b/grsecurity/grsec_chroot.c
63583@@ -0,0 +1,357 @@
63584+#include <linux/kernel.h>
63585+#include <linux/module.h>
63586+#include <linux/sched.h>
63587+#include <linux/file.h>
63588+#include <linux/fs.h>
63589+#include <linux/mount.h>
63590+#include <linux/types.h>
63591+#include "../fs/mount.h"
63592+#include <linux/grsecurity.h>
63593+#include <linux/grinternal.h>
63594+
63595+void gr_set_chroot_entries(struct task_struct *task, struct path *path)
63596+{
63597+#ifdef CONFIG_GRKERNSEC
63598+ if (task_pid_nr(task) > 1 && path->dentry != init_task.fs->root.dentry &&
63599+ path->dentry != task->nsproxy->mnt_ns->root->mnt.mnt_root)
63600+ task->gr_is_chrooted = 1;
63601+ else
63602+ task->gr_is_chrooted = 0;
63603+
63604+ task->gr_chroot_dentry = path->dentry;
63605+#endif
63606+ return;
63607+}
63608+
63609+void gr_clear_chroot_entries(struct task_struct *task)
63610+{
63611+#ifdef CONFIG_GRKERNSEC
63612+ task->gr_is_chrooted = 0;
63613+ task->gr_chroot_dentry = NULL;
63614+#endif
63615+ return;
63616+}
63617+
63618+int
63619+gr_handle_chroot_unix(const pid_t pid)
63620+{
63621+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
63622+ struct task_struct *p;
63623+
63624+ if (unlikely(!grsec_enable_chroot_unix))
63625+ return 1;
63626+
63627+ if (likely(!proc_is_chrooted(current)))
63628+ return 1;
63629+
63630+ rcu_read_lock();
63631+ read_lock(&tasklist_lock);
63632+ p = find_task_by_vpid_unrestricted(pid);
63633+ if (unlikely(p && !have_same_root(current, p))) {
63634+ read_unlock(&tasklist_lock);
63635+ rcu_read_unlock();
63636+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
63637+ return 0;
63638+ }
63639+ read_unlock(&tasklist_lock);
63640+ rcu_read_unlock();
63641+#endif
63642+ return 1;
63643+}
63644+
63645+int
63646+gr_handle_chroot_nice(void)
63647+{
63648+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
63649+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
63650+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
63651+ return -EPERM;
63652+ }
63653+#endif
63654+ return 0;
63655+}
63656+
63657+int
63658+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
63659+{
63660+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
63661+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
63662+ && proc_is_chrooted(current)) {
63663+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, task_pid_nr(p));
63664+ return -EACCES;
63665+ }
63666+#endif
63667+ return 0;
63668+}
63669+
63670+int
63671+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
63672+{
63673+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
63674+ struct task_struct *p;
63675+ int ret = 0;
63676+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
63677+ return ret;
63678+
63679+ read_lock(&tasklist_lock);
63680+ do_each_pid_task(pid, type, p) {
63681+ if (!have_same_root(current, p)) {
63682+ ret = 1;
63683+ goto out;
63684+ }
63685+ } while_each_pid_task(pid, type, p);
63686+out:
63687+ read_unlock(&tasklist_lock);
63688+ return ret;
63689+#endif
63690+ return 0;
63691+}
63692+
63693+int
63694+gr_pid_is_chrooted(struct task_struct *p)
63695+{
63696+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
63697+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
63698+ return 0;
63699+
63700+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
63701+ !have_same_root(current, p)) {
63702+ return 1;
63703+ }
63704+#endif
63705+ return 0;
63706+}
63707+
63708+EXPORT_SYMBOL(gr_pid_is_chrooted);
63709+
63710+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
63711+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
63712+{
63713+ struct path path, currentroot;
63714+ int ret = 0;
63715+
63716+ path.dentry = (struct dentry *)u_dentry;
63717+ path.mnt = (struct vfsmount *)u_mnt;
63718+ get_fs_root(current->fs, &currentroot);
63719+ if (path_is_under(&path, &currentroot))
63720+ ret = 1;
63721+ path_put(&currentroot);
63722+
63723+ return ret;
63724+}
63725+#endif
63726+
63727+int
63728+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
63729+{
63730+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
63731+ if (!grsec_enable_chroot_fchdir)
63732+ return 1;
63733+
63734+ if (!proc_is_chrooted(current))
63735+ return 1;
63736+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
63737+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
63738+ return 0;
63739+ }
63740+#endif
63741+ return 1;
63742+}
63743+
63744+int
63745+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
63746+ const time_t shm_createtime)
63747+{
63748+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
63749+ struct task_struct *p;
63750+ time_t starttime;
63751+
63752+ if (unlikely(!grsec_enable_chroot_shmat))
63753+ return 1;
63754+
63755+ if (likely(!proc_is_chrooted(current)))
63756+ return 1;
63757+
63758+ rcu_read_lock();
63759+ read_lock(&tasklist_lock);
63760+
63761+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
63762+ starttime = p->start_time.tv_sec;
63763+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
63764+ if (have_same_root(current, p)) {
63765+ goto allow;
63766+ } else {
63767+ read_unlock(&tasklist_lock);
63768+ rcu_read_unlock();
63769+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
63770+ return 0;
63771+ }
63772+ }
63773+ /* creator exited, pid reuse, fall through to next check */
63774+ }
63775+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
63776+ if (unlikely(!have_same_root(current, p))) {
63777+ read_unlock(&tasklist_lock);
63778+ rcu_read_unlock();
63779+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
63780+ return 0;
63781+ }
63782+ }
63783+
63784+allow:
63785+ read_unlock(&tasklist_lock);
63786+ rcu_read_unlock();
63787+#endif
63788+ return 1;
63789+}
63790+
63791+void
63792+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
63793+{
63794+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
63795+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
63796+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
63797+#endif
63798+ return;
63799+}
63800+
63801+int
63802+gr_handle_chroot_mknod(const struct dentry *dentry,
63803+ const struct vfsmount *mnt, const int mode)
63804+{
63805+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
63806+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
63807+ proc_is_chrooted(current)) {
63808+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
63809+ return -EPERM;
63810+ }
63811+#endif
63812+ return 0;
63813+}
63814+
63815+int
63816+gr_handle_chroot_mount(const struct dentry *dentry,
63817+ const struct vfsmount *mnt, const char *dev_name)
63818+{
63819+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
63820+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
63821+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
63822+ return -EPERM;
63823+ }
63824+#endif
63825+ return 0;
63826+}
63827+
63828+int
63829+gr_handle_chroot_pivot(void)
63830+{
63831+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
63832+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
63833+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
63834+ return -EPERM;
63835+ }
63836+#endif
63837+ return 0;
63838+}
63839+
63840+int
63841+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
63842+{
63843+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
63844+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
63845+ !gr_is_outside_chroot(dentry, mnt)) {
63846+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
63847+ return -EPERM;
63848+ }
63849+#endif
63850+ return 0;
63851+}
63852+
63853+extern const char *captab_log[];
63854+extern int captab_log_entries;
63855+
63856+int
63857+gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
63858+{
63859+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
63860+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
63861+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
63862+ if (cap_raised(chroot_caps, cap)) {
63863+ if (cap_raised(cred->cap_effective, cap) && cap < captab_log_entries) {
63864+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, task, captab_log[cap]);
63865+ }
63866+ return 0;
63867+ }
63868+ }
63869+#endif
63870+ return 1;
63871+}
63872+
63873+int
63874+gr_chroot_is_capable(const int cap)
63875+{
63876+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
63877+ return gr_task_chroot_is_capable(current, current_cred(), cap);
63878+#endif
63879+ return 1;
63880+}
63881+
63882+int
63883+gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap)
63884+{
63885+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
63886+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
63887+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
63888+ if (cap_raised(chroot_caps, cap)) {
63889+ return 0;
63890+ }
63891+ }
63892+#endif
63893+ return 1;
63894+}
63895+
63896+int
63897+gr_chroot_is_capable_nolog(const int cap)
63898+{
63899+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
63900+ return gr_task_chroot_is_capable_nolog(current, cap);
63901+#endif
63902+ return 1;
63903+}
63904+
63905+int
63906+gr_handle_chroot_sysctl(const int op)
63907+{
63908+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
63909+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
63910+ proc_is_chrooted(current))
63911+ return -EACCES;
63912+#endif
63913+ return 0;
63914+}
63915+
63916+void
63917+gr_handle_chroot_chdir(struct path *path)
63918+{
63919+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
63920+ if (grsec_enable_chroot_chdir)
63921+ set_fs_pwd(current->fs, path);
63922+#endif
63923+ return;
63924+}
63925+
63926+int
63927+gr_handle_chroot_chmod(const struct dentry *dentry,
63928+ const struct vfsmount *mnt, const int mode)
63929+{
63930+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
63931+ /* allow chmod +s on directories, but not files */
63932+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
63933+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
63934+ proc_is_chrooted(current)) {
63935+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
63936+ return -EPERM;
63937+ }
63938+#endif
63939+ return 0;
63940+}
63941diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
63942new file mode 100644
63943index 0000000..207d409
63944--- /dev/null
63945+++ b/grsecurity/grsec_disabled.c
63946@@ -0,0 +1,434 @@
63947+#include <linux/kernel.h>
63948+#include <linux/module.h>
63949+#include <linux/sched.h>
63950+#include <linux/file.h>
63951+#include <linux/fs.h>
63952+#include <linux/kdev_t.h>
63953+#include <linux/net.h>
63954+#include <linux/in.h>
63955+#include <linux/ip.h>
63956+#include <linux/skbuff.h>
63957+#include <linux/sysctl.h>
63958+
63959+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
63960+void
63961+pax_set_initial_flags(struct linux_binprm *bprm)
63962+{
63963+ return;
63964+}
63965+#endif
63966+
63967+#ifdef CONFIG_SYSCTL
63968+__u32
63969+gr_handle_sysctl(const struct ctl_table * table, const int op)
63970+{
63971+ return 0;
63972+}
63973+#endif
63974+
63975+#ifdef CONFIG_TASKSTATS
63976+int gr_is_taskstats_denied(int pid)
63977+{
63978+ return 0;
63979+}
63980+#endif
63981+
63982+int
63983+gr_acl_is_enabled(void)
63984+{
63985+ return 0;
63986+}
63987+
63988+void
63989+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
63990+{
63991+ return;
63992+}
63993+
63994+int
63995+gr_handle_rawio(const struct inode *inode)
63996+{
63997+ return 0;
63998+}
63999+
64000+void
64001+gr_acl_handle_psacct(struct task_struct *task, const long code)
64002+{
64003+ return;
64004+}
64005+
64006+int
64007+gr_handle_ptrace(struct task_struct *task, const long request)
64008+{
64009+ return 0;
64010+}
64011+
64012+int
64013+gr_handle_proc_ptrace(struct task_struct *task)
64014+{
64015+ return 0;
64016+}
64017+
64018+int
64019+gr_set_acls(const int type)
64020+{
64021+ return 0;
64022+}
64023+
64024+int
64025+gr_check_hidden_task(const struct task_struct *tsk)
64026+{
64027+ return 0;
64028+}
64029+
64030+int
64031+gr_check_protected_task(const struct task_struct *task)
64032+{
64033+ return 0;
64034+}
64035+
64036+int
64037+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
64038+{
64039+ return 0;
64040+}
64041+
64042+void
64043+gr_copy_label(struct task_struct *tsk)
64044+{
64045+ return;
64046+}
64047+
64048+void
64049+gr_set_pax_flags(struct task_struct *task)
64050+{
64051+ return;
64052+}
64053+
64054+int
64055+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
64056+ const int unsafe_share)
64057+{
64058+ return 0;
64059+}
64060+
64061+void
64062+gr_handle_delete(const ino_t ino, const dev_t dev)
64063+{
64064+ return;
64065+}
64066+
64067+void
64068+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
64069+{
64070+ return;
64071+}
64072+
64073+void
64074+gr_handle_crash(struct task_struct *task, const int sig)
64075+{
64076+ return;
64077+}
64078+
64079+int
64080+gr_check_crash_exec(const struct file *filp)
64081+{
64082+ return 0;
64083+}
64084+
64085+int
64086+gr_check_crash_uid(const kuid_t uid)
64087+{
64088+ return 0;
64089+}
64090+
64091+void
64092+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
64093+ struct dentry *old_dentry,
64094+ struct dentry *new_dentry,
64095+ struct vfsmount *mnt, const __u8 replace)
64096+{
64097+ return;
64098+}
64099+
64100+int
64101+gr_search_socket(const int family, const int type, const int protocol)
64102+{
64103+ return 1;
64104+}
64105+
64106+int
64107+gr_search_connectbind(const int mode, const struct socket *sock,
64108+ const struct sockaddr_in *addr)
64109+{
64110+ return 0;
64111+}
64112+
64113+void
64114+gr_handle_alertkill(struct task_struct *task)
64115+{
64116+ return;
64117+}
64118+
64119+__u32
64120+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
64121+{
64122+ return 1;
64123+}
64124+
64125+__u32
64126+gr_acl_handle_hidden_file(const struct dentry * dentry,
64127+ const struct vfsmount * mnt)
64128+{
64129+ return 1;
64130+}
64131+
64132+__u32
64133+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
64134+ int acc_mode)
64135+{
64136+ return 1;
64137+}
64138+
64139+__u32
64140+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
64141+{
64142+ return 1;
64143+}
64144+
64145+__u32
64146+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
64147+{
64148+ return 1;
64149+}
64150+
64151+int
64152+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
64153+ unsigned int *vm_flags)
64154+{
64155+ return 1;
64156+}
64157+
64158+__u32
64159+gr_acl_handle_truncate(const struct dentry * dentry,
64160+ const struct vfsmount * mnt)
64161+{
64162+ return 1;
64163+}
64164+
64165+__u32
64166+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
64167+{
64168+ return 1;
64169+}
64170+
64171+__u32
64172+gr_acl_handle_access(const struct dentry * dentry,
64173+ const struct vfsmount * mnt, const int fmode)
64174+{
64175+ return 1;
64176+}
64177+
64178+__u32
64179+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
64180+ umode_t *mode)
64181+{
64182+ return 1;
64183+}
64184+
64185+__u32
64186+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
64187+{
64188+ return 1;
64189+}
64190+
64191+__u32
64192+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
64193+{
64194+ return 1;
64195+}
64196+
64197+void
64198+grsecurity_init(void)
64199+{
64200+ return;
64201+}
64202+
64203+umode_t gr_acl_umask(void)
64204+{
64205+ return 0;
64206+}
64207+
64208+__u32
64209+gr_acl_handle_mknod(const struct dentry * new_dentry,
64210+ const struct dentry * parent_dentry,
64211+ const struct vfsmount * parent_mnt,
64212+ const int mode)
64213+{
64214+ return 1;
64215+}
64216+
64217+__u32
64218+gr_acl_handle_mkdir(const struct dentry * new_dentry,
64219+ const struct dentry * parent_dentry,
64220+ const struct vfsmount * parent_mnt)
64221+{
64222+ return 1;
64223+}
64224+
64225+__u32
64226+gr_acl_handle_symlink(const struct dentry * new_dentry,
64227+ const struct dentry * parent_dentry,
64228+ const struct vfsmount * parent_mnt, const struct filename *from)
64229+{
64230+ return 1;
64231+}
64232+
64233+__u32
64234+gr_acl_handle_link(const struct dentry * new_dentry,
64235+ const struct dentry * parent_dentry,
64236+ const struct vfsmount * parent_mnt,
64237+ const struct dentry * old_dentry,
64238+ const struct vfsmount * old_mnt, const struct filename *to)
64239+{
64240+ return 1;
64241+}
64242+
64243+int
64244+gr_acl_handle_rename(const struct dentry *new_dentry,
64245+ const struct dentry *parent_dentry,
64246+ const struct vfsmount *parent_mnt,
64247+ const struct dentry *old_dentry,
64248+ const struct inode *old_parent_inode,
64249+ const struct vfsmount *old_mnt, const struct filename *newname)
64250+{
64251+ return 0;
64252+}
64253+
64254+int
64255+gr_acl_handle_filldir(const struct file *file, const char *name,
64256+ const int namelen, const ino_t ino)
64257+{
64258+ return 1;
64259+}
64260+
64261+int
64262+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
64263+ const time_t shm_createtime, const kuid_t cuid, const int shmid)
64264+{
64265+ return 1;
64266+}
64267+
64268+int
64269+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
64270+{
64271+ return 0;
64272+}
64273+
64274+int
64275+gr_search_accept(const struct socket *sock)
64276+{
64277+ return 0;
64278+}
64279+
64280+int
64281+gr_search_listen(const struct socket *sock)
64282+{
64283+ return 0;
64284+}
64285+
64286+int
64287+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
64288+{
64289+ return 0;
64290+}
64291+
64292+__u32
64293+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
64294+{
64295+ return 1;
64296+}
64297+
64298+__u32
64299+gr_acl_handle_creat(const struct dentry * dentry,
64300+ const struct dentry * p_dentry,
64301+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
64302+ const int imode)
64303+{
64304+ return 1;
64305+}
64306+
64307+void
64308+gr_acl_handle_exit(void)
64309+{
64310+ return;
64311+}
64312+
64313+int
64314+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
64315+{
64316+ return 1;
64317+}
64318+
64319+void
64320+gr_set_role_label(const kuid_t uid, const kgid_t gid)
64321+{
64322+ return;
64323+}
64324+
64325+int
64326+gr_acl_handle_procpidmem(const struct task_struct *task)
64327+{
64328+ return 0;
64329+}
64330+
64331+int
64332+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
64333+{
64334+ return 0;
64335+}
64336+
64337+int
64338+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
64339+{
64340+ return 0;
64341+}
64342+
64343+void
64344+gr_set_kernel_label(struct task_struct *task)
64345+{
64346+ return;
64347+}
64348+
64349+int
64350+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
64351+{
64352+ return 0;
64353+}
64354+
64355+int
64356+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
64357+{
64358+ return 0;
64359+}
64360+
64361+int gr_acl_enable_at_secure(void)
64362+{
64363+ return 0;
64364+}
64365+
64366+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
64367+{
64368+ return dentry->d_inode->i_sb->s_dev;
64369+}
64370+
64371+void gr_put_exec_file(struct task_struct *task)
64372+{
64373+ return;
64374+}
64375+
64376+EXPORT_SYMBOL(gr_set_kernel_label);
64377+#ifdef CONFIG_SECURITY
64378+EXPORT_SYMBOL(gr_check_user_change);
64379+EXPORT_SYMBOL(gr_check_group_change);
64380+#endif
64381diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
64382new file mode 100644
64383index 0000000..387032b
64384--- /dev/null
64385+++ b/grsecurity/grsec_exec.c
64386@@ -0,0 +1,187 @@
64387+#include <linux/kernel.h>
64388+#include <linux/sched.h>
64389+#include <linux/file.h>
64390+#include <linux/binfmts.h>
64391+#include <linux/fs.h>
64392+#include <linux/types.h>
64393+#include <linux/grdefs.h>
64394+#include <linux/grsecurity.h>
64395+#include <linux/grinternal.h>
64396+#include <linux/capability.h>
64397+#include <linux/module.h>
64398+#include <linux/compat.h>
64399+
64400+#include <asm/uaccess.h>
64401+
64402+#ifdef CONFIG_GRKERNSEC_EXECLOG
64403+static char gr_exec_arg_buf[132];
64404+static DEFINE_MUTEX(gr_exec_arg_mutex);
64405+#endif
64406+
64407+struct user_arg_ptr {
64408+#ifdef CONFIG_COMPAT
64409+ bool is_compat;
64410+#endif
64411+ union {
64412+ const char __user *const __user *native;
64413+#ifdef CONFIG_COMPAT
64414+ const compat_uptr_t __user *compat;
64415+#endif
64416+ } ptr;
64417+};
64418+
64419+extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
64420+
64421+void
64422+gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
64423+{
64424+#ifdef CONFIG_GRKERNSEC_EXECLOG
64425+ char *grarg = gr_exec_arg_buf;
64426+ unsigned int i, x, execlen = 0;
64427+ char c;
64428+
64429+ if (!((grsec_enable_execlog && grsec_enable_group &&
64430+ in_group_p(grsec_audit_gid))
64431+ || (grsec_enable_execlog && !grsec_enable_group)))
64432+ return;
64433+
64434+ mutex_lock(&gr_exec_arg_mutex);
64435+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
64436+
64437+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
64438+ const char __user *p;
64439+ unsigned int len;
64440+
64441+ p = get_user_arg_ptr(argv, i);
64442+ if (IS_ERR(p))
64443+ goto log;
64444+
64445+ len = strnlen_user(p, 128 - execlen);
64446+ if (len > 128 - execlen)
64447+ len = 128 - execlen;
64448+ else if (len > 0)
64449+ len--;
64450+ if (copy_from_user(grarg + execlen, p, len))
64451+ goto log;
64452+
64453+ /* rewrite unprintable characters */
64454+ for (x = 0; x < len; x++) {
64455+ c = *(grarg + execlen + x);
64456+ if (c < 32 || c > 126)
64457+ *(grarg + execlen + x) = ' ';
64458+ }
64459+
64460+ execlen += len;
64461+ *(grarg + execlen) = ' ';
64462+ *(grarg + execlen + 1) = '\0';
64463+ execlen++;
64464+ }
64465+
64466+ log:
64467+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
64468+ bprm->file->f_path.mnt, grarg);
64469+ mutex_unlock(&gr_exec_arg_mutex);
64470+#endif
64471+ return;
64472+}
64473+
64474+#ifdef CONFIG_GRKERNSEC
64475+extern int gr_acl_is_capable(const int cap);
64476+extern int gr_acl_is_capable_nolog(const int cap);
64477+extern int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
64478+extern int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap);
64479+extern int gr_chroot_is_capable(const int cap);
64480+extern int gr_chroot_is_capable_nolog(const int cap);
64481+extern int gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
64482+extern int gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap);
64483+#endif
64484+
64485+const char *captab_log[] = {
64486+ "CAP_CHOWN",
64487+ "CAP_DAC_OVERRIDE",
64488+ "CAP_DAC_READ_SEARCH",
64489+ "CAP_FOWNER",
64490+ "CAP_FSETID",
64491+ "CAP_KILL",
64492+ "CAP_SETGID",
64493+ "CAP_SETUID",
64494+ "CAP_SETPCAP",
64495+ "CAP_LINUX_IMMUTABLE",
64496+ "CAP_NET_BIND_SERVICE",
64497+ "CAP_NET_BROADCAST",
64498+ "CAP_NET_ADMIN",
64499+ "CAP_NET_RAW",
64500+ "CAP_IPC_LOCK",
64501+ "CAP_IPC_OWNER",
64502+ "CAP_SYS_MODULE",
64503+ "CAP_SYS_RAWIO",
64504+ "CAP_SYS_CHROOT",
64505+ "CAP_SYS_PTRACE",
64506+ "CAP_SYS_PACCT",
64507+ "CAP_SYS_ADMIN",
64508+ "CAP_SYS_BOOT",
64509+ "CAP_SYS_NICE",
64510+ "CAP_SYS_RESOURCE",
64511+ "CAP_SYS_TIME",
64512+ "CAP_SYS_TTY_CONFIG",
64513+ "CAP_MKNOD",
64514+ "CAP_LEASE",
64515+ "CAP_AUDIT_WRITE",
64516+ "CAP_AUDIT_CONTROL",
64517+ "CAP_SETFCAP",
64518+ "CAP_MAC_OVERRIDE",
64519+ "CAP_MAC_ADMIN",
64520+ "CAP_SYSLOG",
64521+ "CAP_WAKE_ALARM"
64522+};
64523+
64524+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
64525+
64526+int gr_is_capable(const int cap)
64527+{
64528+#ifdef CONFIG_GRKERNSEC
64529+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
64530+ return 1;
64531+ return 0;
64532+#else
64533+ return 1;
64534+#endif
64535+}
64536+
64537+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
64538+{
64539+#ifdef CONFIG_GRKERNSEC
64540+ if (gr_task_acl_is_capable(task, cred, cap) && gr_task_chroot_is_capable(task, cred, cap))
64541+ return 1;
64542+ return 0;
64543+#else
64544+ return 1;
64545+#endif
64546+}
64547+
64548+int gr_is_capable_nolog(const int cap)
64549+{
64550+#ifdef CONFIG_GRKERNSEC
64551+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
64552+ return 1;
64553+ return 0;
64554+#else
64555+ return 1;
64556+#endif
64557+}
64558+
64559+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap)
64560+{
64561+#ifdef CONFIG_GRKERNSEC
64562+ if (gr_task_acl_is_capable_nolog(task, cap) && gr_task_chroot_is_capable_nolog(task, cap))
64563+ return 1;
64564+ return 0;
64565+#else
64566+ return 1;
64567+#endif
64568+}
64569+
64570+EXPORT_SYMBOL(gr_is_capable);
64571+EXPORT_SYMBOL(gr_is_capable_nolog);
64572+EXPORT_SYMBOL(gr_task_is_capable);
64573+EXPORT_SYMBOL(gr_task_is_capable_nolog);
64574diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
64575new file mode 100644
64576index 0000000..06cc6ea
64577--- /dev/null
64578+++ b/grsecurity/grsec_fifo.c
64579@@ -0,0 +1,24 @@
64580+#include <linux/kernel.h>
64581+#include <linux/sched.h>
64582+#include <linux/fs.h>
64583+#include <linux/file.h>
64584+#include <linux/grinternal.h>
64585+
64586+int
64587+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
64588+ const struct dentry *dir, const int flag, const int acc_mode)
64589+{
64590+#ifdef CONFIG_GRKERNSEC_FIFO
64591+ const struct cred *cred = current_cred();
64592+
64593+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
64594+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
64595+ !uid_eq(dentry->d_inode->i_uid, dir->d_inode->i_uid) &&
64596+ !uid_eq(cred->fsuid, dentry->d_inode->i_uid)) {
64597+ if (!inode_permission(dentry->d_inode, acc_mode))
64598+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, GR_GLOBAL_UID(dentry->d_inode->i_uid), GR_GLOBAL_GID(dentry->d_inode->i_gid));
64599+ return -EACCES;
64600+ }
64601+#endif
64602+ return 0;
64603+}
64604diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
64605new file mode 100644
64606index 0000000..8ca18bf
64607--- /dev/null
64608+++ b/grsecurity/grsec_fork.c
64609@@ -0,0 +1,23 @@
64610+#include <linux/kernel.h>
64611+#include <linux/sched.h>
64612+#include <linux/grsecurity.h>
64613+#include <linux/grinternal.h>
64614+#include <linux/errno.h>
64615+
64616+void
64617+gr_log_forkfail(const int retval)
64618+{
64619+#ifdef CONFIG_GRKERNSEC_FORKFAIL
64620+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
64621+ switch (retval) {
64622+ case -EAGAIN:
64623+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
64624+ break;
64625+ case -ENOMEM:
64626+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
64627+ break;
64628+ }
64629+ }
64630+#endif
64631+ return;
64632+}
64633diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
64634new file mode 100644
64635index 0000000..a862e9f
64636--- /dev/null
64637+++ b/grsecurity/grsec_init.c
64638@@ -0,0 +1,283 @@
64639+#include <linux/kernel.h>
64640+#include <linux/sched.h>
64641+#include <linux/mm.h>
64642+#include <linux/gracl.h>
64643+#include <linux/slab.h>
64644+#include <linux/vmalloc.h>
64645+#include <linux/percpu.h>
64646+#include <linux/module.h>
64647+
64648+int grsec_enable_ptrace_readexec;
64649+int grsec_enable_setxid;
64650+int grsec_enable_symlinkown;
64651+kgid_t grsec_symlinkown_gid;
64652+int grsec_enable_brute;
64653+int grsec_enable_link;
64654+int grsec_enable_dmesg;
64655+int grsec_enable_harden_ptrace;
64656+int grsec_enable_fifo;
64657+int grsec_enable_execlog;
64658+int grsec_enable_signal;
64659+int grsec_enable_forkfail;
64660+int grsec_enable_audit_ptrace;
64661+int grsec_enable_time;
64662+int grsec_enable_audit_textrel;
64663+int grsec_enable_group;
64664+kgid_t grsec_audit_gid;
64665+int grsec_enable_chdir;
64666+int grsec_enable_mount;
64667+int grsec_enable_rofs;
64668+int grsec_enable_chroot_findtask;
64669+int grsec_enable_chroot_mount;
64670+int grsec_enable_chroot_shmat;
64671+int grsec_enable_chroot_fchdir;
64672+int grsec_enable_chroot_double;
64673+int grsec_enable_chroot_pivot;
64674+int grsec_enable_chroot_chdir;
64675+int grsec_enable_chroot_chmod;
64676+int grsec_enable_chroot_mknod;
64677+int grsec_enable_chroot_nice;
64678+int grsec_enable_chroot_execlog;
64679+int grsec_enable_chroot_caps;
64680+int grsec_enable_chroot_sysctl;
64681+int grsec_enable_chroot_unix;
64682+int grsec_enable_tpe;
64683+kgid_t grsec_tpe_gid;
64684+int grsec_enable_blackhole;
64685+#ifdef CONFIG_IPV6_MODULE
64686+EXPORT_SYMBOL(grsec_enable_blackhole);
64687+#endif
64688+int grsec_lastack_retries;
64689+int grsec_enable_tpe_all;
64690+int grsec_enable_tpe_invert;
64691+int grsec_enable_socket_all;
64692+kgid_t grsec_socket_all_gid;
64693+int grsec_enable_socket_client;
64694+kgid_t grsec_socket_client_gid;
64695+int grsec_enable_socket_server;
64696+kgid_t grsec_socket_server_gid;
64697+int grsec_resource_logging;
64698+int grsec_disable_privio;
64699+int grsec_enable_log_rwxmaps;
64700+int grsec_lock;
64701+
64702+DEFINE_SPINLOCK(grsec_alert_lock);
64703+unsigned long grsec_alert_wtime = 0;
64704+unsigned long grsec_alert_fyet = 0;
64705+
64706+DEFINE_SPINLOCK(grsec_audit_lock);
64707+
64708+DEFINE_RWLOCK(grsec_exec_file_lock);
64709+
64710+char *gr_shared_page[4];
64711+
64712+char *gr_alert_log_fmt;
64713+char *gr_audit_log_fmt;
64714+char *gr_alert_log_buf;
64715+char *gr_audit_log_buf;
64716+
64717+extern struct gr_arg *gr_usermode;
64718+extern unsigned char *gr_system_salt;
64719+extern unsigned char *gr_system_sum;
64720+
64721+void __init
64722+grsecurity_init(void)
64723+{
64724+ int j;
64725+ /* create the per-cpu shared pages */
64726+
64727+#ifdef CONFIG_X86
64728+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
64729+#endif
64730+
64731+ for (j = 0; j < 4; j++) {
64732+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
64733+ if (gr_shared_page[j] == NULL) {
64734+ panic("Unable to allocate grsecurity shared page");
64735+ return;
64736+ }
64737+ }
64738+
64739+ /* allocate log buffers */
64740+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
64741+ if (!gr_alert_log_fmt) {
64742+ panic("Unable to allocate grsecurity alert log format buffer");
64743+ return;
64744+ }
64745+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
64746+ if (!gr_audit_log_fmt) {
64747+ panic("Unable to allocate grsecurity audit log format buffer");
64748+ return;
64749+ }
64750+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
64751+ if (!gr_alert_log_buf) {
64752+ panic("Unable to allocate grsecurity alert log buffer");
64753+ return;
64754+ }
64755+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
64756+ if (!gr_audit_log_buf) {
64757+ panic("Unable to allocate grsecurity audit log buffer");
64758+ return;
64759+ }
64760+
64761+ /* allocate memory for authentication structure */
64762+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
64763+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
64764+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
64765+
64766+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
64767+ panic("Unable to allocate grsecurity authentication structure");
64768+ return;
64769+ }
64770+
64771+
64772+#ifdef CONFIG_GRKERNSEC_IO
64773+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
64774+ grsec_disable_privio = 1;
64775+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
64776+ grsec_disable_privio = 1;
64777+#else
64778+ grsec_disable_privio = 0;
64779+#endif
64780+#endif
64781+
64782+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
64783+ /* for backward compatibility, tpe_invert always defaults to on if
64784+ enabled in the kernel
64785+ */
64786+ grsec_enable_tpe_invert = 1;
64787+#endif
64788+
64789+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
64790+#ifndef CONFIG_GRKERNSEC_SYSCTL
64791+ grsec_lock = 1;
64792+#endif
64793+
64794+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
64795+ grsec_enable_audit_textrel = 1;
64796+#endif
64797+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
64798+ grsec_enable_log_rwxmaps = 1;
64799+#endif
64800+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
64801+ grsec_enable_group = 1;
64802+ grsec_audit_gid = KGIDT_INIT(CONFIG_GRKERNSEC_AUDIT_GID);
64803+#endif
64804+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
64805+ grsec_enable_ptrace_readexec = 1;
64806+#endif
64807+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
64808+ grsec_enable_chdir = 1;
64809+#endif
64810+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
64811+ grsec_enable_harden_ptrace = 1;
64812+#endif
64813+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
64814+ grsec_enable_mount = 1;
64815+#endif
64816+#ifdef CONFIG_GRKERNSEC_LINK
64817+ grsec_enable_link = 1;
64818+#endif
64819+#ifdef CONFIG_GRKERNSEC_BRUTE
64820+ grsec_enable_brute = 1;
64821+#endif
64822+#ifdef CONFIG_GRKERNSEC_DMESG
64823+ grsec_enable_dmesg = 1;
64824+#endif
64825+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
64826+ grsec_enable_blackhole = 1;
64827+ grsec_lastack_retries = 4;
64828+#endif
64829+#ifdef CONFIG_GRKERNSEC_FIFO
64830+ grsec_enable_fifo = 1;
64831+#endif
64832+#ifdef CONFIG_GRKERNSEC_EXECLOG
64833+ grsec_enable_execlog = 1;
64834+#endif
64835+#ifdef CONFIG_GRKERNSEC_SETXID
64836+ grsec_enable_setxid = 1;
64837+#endif
64838+#ifdef CONFIG_GRKERNSEC_SIGNAL
64839+ grsec_enable_signal = 1;
64840+#endif
64841+#ifdef CONFIG_GRKERNSEC_FORKFAIL
64842+ grsec_enable_forkfail = 1;
64843+#endif
64844+#ifdef CONFIG_GRKERNSEC_TIME
64845+ grsec_enable_time = 1;
64846+#endif
64847+#ifdef CONFIG_GRKERNSEC_RESLOG
64848+ grsec_resource_logging = 1;
64849+#endif
64850+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
64851+ grsec_enable_chroot_findtask = 1;
64852+#endif
64853+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
64854+ grsec_enable_chroot_unix = 1;
64855+#endif
64856+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
64857+ grsec_enable_chroot_mount = 1;
64858+#endif
64859+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
64860+ grsec_enable_chroot_fchdir = 1;
64861+#endif
64862+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
64863+ grsec_enable_chroot_shmat = 1;
64864+#endif
64865+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
64866+ grsec_enable_audit_ptrace = 1;
64867+#endif
64868+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
64869+ grsec_enable_chroot_double = 1;
64870+#endif
64871+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
64872+ grsec_enable_chroot_pivot = 1;
64873+#endif
64874+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
64875+ grsec_enable_chroot_chdir = 1;
64876+#endif
64877+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
64878+ grsec_enable_chroot_chmod = 1;
64879+#endif
64880+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
64881+ grsec_enable_chroot_mknod = 1;
64882+#endif
64883+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
64884+ grsec_enable_chroot_nice = 1;
64885+#endif
64886+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
64887+ grsec_enable_chroot_execlog = 1;
64888+#endif
64889+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
64890+ grsec_enable_chroot_caps = 1;
64891+#endif
64892+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
64893+ grsec_enable_chroot_sysctl = 1;
64894+#endif
64895+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
64896+ grsec_enable_symlinkown = 1;
64897+ grsec_symlinkown_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SYMLINKOWN_GID);
64898+#endif
64899+#ifdef CONFIG_GRKERNSEC_TPE
64900+ grsec_enable_tpe = 1;
64901+ grsec_tpe_gid = KGIDT_INIT(CONFIG_GRKERNSEC_TPE_GID);
64902+#ifdef CONFIG_GRKERNSEC_TPE_ALL
64903+ grsec_enable_tpe_all = 1;
64904+#endif
64905+#endif
64906+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
64907+ grsec_enable_socket_all = 1;
64908+ grsec_socket_all_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_ALL_GID);
64909+#endif
64910+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
64911+ grsec_enable_socket_client = 1;
64912+ grsec_socket_client_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_CLIENT_GID);
64913+#endif
64914+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
64915+ grsec_enable_socket_server = 1;
64916+ grsec_socket_server_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_SERVER_GID);
64917+#endif
64918+#endif
64919+
64920+ return;
64921+}
64922diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
64923new file mode 100644
64924index 0000000..5e05e20
64925--- /dev/null
64926+++ b/grsecurity/grsec_link.c
64927@@ -0,0 +1,58 @@
64928+#include <linux/kernel.h>
64929+#include <linux/sched.h>
64930+#include <linux/fs.h>
64931+#include <linux/file.h>
64932+#include <linux/grinternal.h>
64933+
64934+int gr_handle_symlink_owner(const struct path *link, const struct inode *target)
64935+{
64936+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
64937+ const struct inode *link_inode = link->dentry->d_inode;
64938+
64939+ if (grsec_enable_symlinkown && in_group_p(grsec_symlinkown_gid) &&
64940+ /* ignore root-owned links, e.g. /proc/self */
64941+ gr_is_global_nonroot(link_inode->i_uid) && target &&
64942+ !uid_eq(link_inode->i_uid, target->i_uid)) {
64943+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINKOWNER_MSG, link->dentry, link->mnt, link_inode->i_uid, target->i_uid);
64944+ return 1;
64945+ }
64946+#endif
64947+ return 0;
64948+}
64949+
64950+int
64951+gr_handle_follow_link(const struct inode *parent,
64952+ const struct inode *inode,
64953+ const struct dentry *dentry, const struct vfsmount *mnt)
64954+{
64955+#ifdef CONFIG_GRKERNSEC_LINK
64956+ const struct cred *cred = current_cred();
64957+
64958+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
64959+ (parent->i_mode & S_ISVTX) && !uid_eq(parent->i_uid, inode->i_uid) &&
64960+ (parent->i_mode & S_IWOTH) && !uid_eq(cred->fsuid, inode->i_uid)) {
64961+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
64962+ return -EACCES;
64963+ }
64964+#endif
64965+ return 0;
64966+}
64967+
64968+int
64969+gr_handle_hardlink(const struct dentry *dentry,
64970+ const struct vfsmount *mnt,
64971+ struct inode *inode, const int mode, const struct filename *to)
64972+{
64973+#ifdef CONFIG_GRKERNSEC_LINK
64974+ const struct cred *cred = current_cred();
64975+
64976+ if (grsec_enable_link && !uid_eq(cred->fsuid, inode->i_uid) &&
64977+ (!S_ISREG(mode) || is_privileged_binary(dentry) ||
64978+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
64979+ !capable(CAP_FOWNER) && gr_is_global_nonroot(cred->uid)) {
64980+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to->name);
64981+ return -EPERM;
64982+ }
64983+#endif
64984+ return 0;
64985+}
64986diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
64987new file mode 100644
64988index 0000000..7c06085
64989--- /dev/null
64990+++ b/grsecurity/grsec_log.c
64991@@ -0,0 +1,326 @@
64992+#include <linux/kernel.h>
64993+#include <linux/sched.h>
64994+#include <linux/file.h>
64995+#include <linux/tty.h>
64996+#include <linux/fs.h>
64997+#include <linux/grinternal.h>
64998+
64999+#ifdef CONFIG_TREE_PREEMPT_RCU
65000+#define DISABLE_PREEMPT() preempt_disable()
65001+#define ENABLE_PREEMPT() preempt_enable()
65002+#else
65003+#define DISABLE_PREEMPT()
65004+#define ENABLE_PREEMPT()
65005+#endif
65006+
65007+#define BEGIN_LOCKS(x) \
65008+ DISABLE_PREEMPT(); \
65009+ rcu_read_lock(); \
65010+ read_lock(&tasklist_lock); \
65011+ read_lock(&grsec_exec_file_lock); \
65012+ if (x != GR_DO_AUDIT) \
65013+ spin_lock(&grsec_alert_lock); \
65014+ else \
65015+ spin_lock(&grsec_audit_lock)
65016+
65017+#define END_LOCKS(x) \
65018+ if (x != GR_DO_AUDIT) \
65019+ spin_unlock(&grsec_alert_lock); \
65020+ else \
65021+ spin_unlock(&grsec_audit_lock); \
65022+ read_unlock(&grsec_exec_file_lock); \
65023+ read_unlock(&tasklist_lock); \
65024+ rcu_read_unlock(); \
65025+ ENABLE_PREEMPT(); \
65026+ if (x == GR_DONT_AUDIT) \
65027+ gr_handle_alertkill(current)
65028+
65029+enum {
65030+ FLOODING,
65031+ NO_FLOODING
65032+};
65033+
65034+extern char *gr_alert_log_fmt;
65035+extern char *gr_audit_log_fmt;
65036+extern char *gr_alert_log_buf;
65037+extern char *gr_audit_log_buf;
65038+
65039+static int gr_log_start(int audit)
65040+{
65041+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
65042+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
65043+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
65044+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
65045+ unsigned long curr_secs = get_seconds();
65046+
65047+ if (audit == GR_DO_AUDIT)
65048+ goto set_fmt;
65049+
65050+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
65051+ grsec_alert_wtime = curr_secs;
65052+ grsec_alert_fyet = 0;
65053+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
65054+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
65055+ grsec_alert_fyet++;
65056+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
65057+ grsec_alert_wtime = curr_secs;
65058+ grsec_alert_fyet++;
65059+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
65060+ return FLOODING;
65061+ }
65062+ else return FLOODING;
65063+
65064+set_fmt:
65065+#endif
65066+ memset(buf, 0, PAGE_SIZE);
65067+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
65068+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
65069+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
65070+ } else if (current->signal->curr_ip) {
65071+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
65072+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
65073+ } else if (gr_acl_is_enabled()) {
65074+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
65075+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
65076+ } else {
65077+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
65078+ strcpy(buf, fmt);
65079+ }
65080+
65081+ return NO_FLOODING;
65082+}
65083+
65084+static void gr_log_middle(int audit, const char *msg, va_list ap)
65085+ __attribute__ ((format (printf, 2, 0)));
65086+
65087+static void gr_log_middle(int audit, const char *msg, va_list ap)
65088+{
65089+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
65090+ unsigned int len = strlen(buf);
65091+
65092+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
65093+
65094+ return;
65095+}
65096+
65097+static void gr_log_middle_varargs(int audit, const char *msg, ...)
65098+ __attribute__ ((format (printf, 2, 3)));
65099+
65100+static void gr_log_middle_varargs(int audit, const char *msg, ...)
65101+{
65102+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
65103+ unsigned int len = strlen(buf);
65104+ va_list ap;
65105+
65106+ va_start(ap, msg);
65107+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
65108+ va_end(ap);
65109+
65110+ return;
65111+}
65112+
65113+static void gr_log_end(int audit, int append_default)
65114+{
65115+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
65116+ if (append_default) {
65117+ struct task_struct *task = current;
65118+ struct task_struct *parent = task->real_parent;
65119+ const struct cred *cred = __task_cred(task);
65120+ const struct cred *pcred = __task_cred(parent);
65121+ unsigned int len = strlen(buf);
65122+
65123+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
65124+ }
65125+
65126+ printk("%s\n", buf);
65127+
65128+ return;
65129+}
65130+
65131+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
65132+{
65133+ int logtype;
65134+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
65135+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
65136+ void *voidptr = NULL;
65137+ int num1 = 0, num2 = 0;
65138+ unsigned long ulong1 = 0, ulong2 = 0;
65139+ struct dentry *dentry = NULL;
65140+ struct vfsmount *mnt = NULL;
65141+ struct file *file = NULL;
65142+ struct task_struct *task = NULL;
65143+ const struct cred *cred, *pcred;
65144+ va_list ap;
65145+
65146+ BEGIN_LOCKS(audit);
65147+ logtype = gr_log_start(audit);
65148+ if (logtype == FLOODING) {
65149+ END_LOCKS(audit);
65150+ return;
65151+ }
65152+ va_start(ap, argtypes);
65153+ switch (argtypes) {
65154+ case GR_TTYSNIFF:
65155+ task = va_arg(ap, struct task_struct *);
65156+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task_pid_nr(task), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent));
65157+ break;
65158+ case GR_SYSCTL_HIDDEN:
65159+ str1 = va_arg(ap, char *);
65160+ gr_log_middle_varargs(audit, msg, result, str1);
65161+ break;
65162+ case GR_RBAC:
65163+ dentry = va_arg(ap, struct dentry *);
65164+ mnt = va_arg(ap, struct vfsmount *);
65165+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
65166+ break;
65167+ case GR_RBAC_STR:
65168+ dentry = va_arg(ap, struct dentry *);
65169+ mnt = va_arg(ap, struct vfsmount *);
65170+ str1 = va_arg(ap, char *);
65171+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
65172+ break;
65173+ case GR_STR_RBAC:
65174+ str1 = va_arg(ap, char *);
65175+ dentry = va_arg(ap, struct dentry *);
65176+ mnt = va_arg(ap, struct vfsmount *);
65177+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
65178+ break;
65179+ case GR_RBAC_MODE2:
65180+ dentry = va_arg(ap, struct dentry *);
65181+ mnt = va_arg(ap, struct vfsmount *);
65182+ str1 = va_arg(ap, char *);
65183+ str2 = va_arg(ap, char *);
65184+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
65185+ break;
65186+ case GR_RBAC_MODE3:
65187+ dentry = va_arg(ap, struct dentry *);
65188+ mnt = va_arg(ap, struct vfsmount *);
65189+ str1 = va_arg(ap, char *);
65190+ str2 = va_arg(ap, char *);
65191+ str3 = va_arg(ap, char *);
65192+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
65193+ break;
65194+ case GR_FILENAME:
65195+ dentry = va_arg(ap, struct dentry *);
65196+ mnt = va_arg(ap, struct vfsmount *);
65197+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
65198+ break;
65199+ case GR_STR_FILENAME:
65200+ str1 = va_arg(ap, char *);
65201+ dentry = va_arg(ap, struct dentry *);
65202+ mnt = va_arg(ap, struct vfsmount *);
65203+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
65204+ break;
65205+ case GR_FILENAME_STR:
65206+ dentry = va_arg(ap, struct dentry *);
65207+ mnt = va_arg(ap, struct vfsmount *);
65208+ str1 = va_arg(ap, char *);
65209+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
65210+ break;
65211+ case GR_FILENAME_TWO_INT:
65212+ dentry = va_arg(ap, struct dentry *);
65213+ mnt = va_arg(ap, struct vfsmount *);
65214+ num1 = va_arg(ap, int);
65215+ num2 = va_arg(ap, int);
65216+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
65217+ break;
65218+ case GR_FILENAME_TWO_INT_STR:
65219+ dentry = va_arg(ap, struct dentry *);
65220+ mnt = va_arg(ap, struct vfsmount *);
65221+ num1 = va_arg(ap, int);
65222+ num2 = va_arg(ap, int);
65223+ str1 = va_arg(ap, char *);
65224+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
65225+ break;
65226+ case GR_TEXTREL:
65227+ file = va_arg(ap, struct file *);
65228+ ulong1 = va_arg(ap, unsigned long);
65229+ ulong2 = va_arg(ap, unsigned long);
65230+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
65231+ break;
65232+ case GR_PTRACE:
65233+ task = va_arg(ap, struct task_struct *);
65234+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task_pid_nr(task));
65235+ break;
65236+ case GR_RESOURCE:
65237+ task = va_arg(ap, struct task_struct *);
65238+ cred = __task_cred(task);
65239+ pcred = __task_cred(task->real_parent);
65240+ ulong1 = va_arg(ap, unsigned long);
65241+ str1 = va_arg(ap, char *);
65242+ ulong2 = va_arg(ap, unsigned long);
65243+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
65244+ break;
65245+ case GR_CAP:
65246+ task = va_arg(ap, struct task_struct *);
65247+ cred = __task_cred(task);
65248+ pcred = __task_cred(task->real_parent);
65249+ str1 = va_arg(ap, char *);
65250+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
65251+ break;
65252+ case GR_SIG:
65253+ str1 = va_arg(ap, char *);
65254+ voidptr = va_arg(ap, void *);
65255+ gr_log_middle_varargs(audit, msg, str1, voidptr);
65256+ break;
65257+ case GR_SIG2:
65258+ task = va_arg(ap, struct task_struct *);
65259+ cred = __task_cred(task);
65260+ pcred = __task_cred(task->real_parent);
65261+ num1 = va_arg(ap, int);
65262+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
65263+ break;
65264+ case GR_CRASH1:
65265+ task = va_arg(ap, struct task_struct *);
65266+ cred = __task_cred(task);
65267+ pcred = __task_cred(task->real_parent);
65268+ ulong1 = va_arg(ap, unsigned long);
65269+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), GR_GLOBAL_UID(cred->uid), ulong1);
65270+ break;
65271+ case GR_CRASH2:
65272+ task = va_arg(ap, struct task_struct *);
65273+ cred = __task_cred(task);
65274+ pcred = __task_cred(task->real_parent);
65275+ ulong1 = va_arg(ap, unsigned long);
65276+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), ulong1);
65277+ break;
65278+ case GR_RWXMAP:
65279+ file = va_arg(ap, struct file *);
65280+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
65281+ break;
65282+ case GR_PSACCT:
65283+ {
65284+ unsigned int wday, cday;
65285+ __u8 whr, chr;
65286+ __u8 wmin, cmin;
65287+ __u8 wsec, csec;
65288+ char cur_tty[64] = { 0 };
65289+ char parent_tty[64] = { 0 };
65290+
65291+ task = va_arg(ap, struct task_struct *);
65292+ wday = va_arg(ap, unsigned int);
65293+ cday = va_arg(ap, unsigned int);
65294+ whr = va_arg(ap, int);
65295+ chr = va_arg(ap, int);
65296+ wmin = va_arg(ap, int);
65297+ cmin = va_arg(ap, int);
65298+ wsec = va_arg(ap, int);
65299+ csec = va_arg(ap, int);
65300+ ulong1 = va_arg(ap, unsigned long);
65301+ cred = __task_cred(task);
65302+ pcred = __task_cred(task->real_parent);
65303+
65304+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
65305+ }
65306+ break;
65307+ default:
65308+ gr_log_middle(audit, msg, ap);
65309+ }
65310+ va_end(ap);
65311+ // these don't need DEFAULTSECARGS printed on the end
65312+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
65313+ gr_log_end(audit, 0);
65314+ else
65315+ gr_log_end(audit, 1);
65316+ END_LOCKS(audit);
65317+}
65318diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
65319new file mode 100644
65320index 0000000..f536303
65321--- /dev/null
65322+++ b/grsecurity/grsec_mem.c
65323@@ -0,0 +1,40 @@
65324+#include <linux/kernel.h>
65325+#include <linux/sched.h>
65326+#include <linux/mm.h>
65327+#include <linux/mman.h>
65328+#include <linux/grinternal.h>
65329+
65330+void
65331+gr_handle_ioperm(void)
65332+{
65333+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
65334+ return;
65335+}
65336+
65337+void
65338+gr_handle_iopl(void)
65339+{
65340+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
65341+ return;
65342+}
65343+
65344+void
65345+gr_handle_mem_readwrite(u64 from, u64 to)
65346+{
65347+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
65348+ return;
65349+}
65350+
65351+void
65352+gr_handle_vm86(void)
65353+{
65354+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
65355+ return;
65356+}
65357+
65358+void
65359+gr_log_badprocpid(const char *entry)
65360+{
65361+ gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
65362+ return;
65363+}
65364diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
65365new file mode 100644
65366index 0000000..2131422
65367--- /dev/null
65368+++ b/grsecurity/grsec_mount.c
65369@@ -0,0 +1,62 @@
65370+#include <linux/kernel.h>
65371+#include <linux/sched.h>
65372+#include <linux/mount.h>
65373+#include <linux/grsecurity.h>
65374+#include <linux/grinternal.h>
65375+
65376+void
65377+gr_log_remount(const char *devname, const int retval)
65378+{
65379+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
65380+ if (grsec_enable_mount && (retval >= 0))
65381+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
65382+#endif
65383+ return;
65384+}
65385+
65386+void
65387+gr_log_unmount(const char *devname, const int retval)
65388+{
65389+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
65390+ if (grsec_enable_mount && (retval >= 0))
65391+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
65392+#endif
65393+ return;
65394+}
65395+
65396+void
65397+gr_log_mount(const char *from, const char *to, const int retval)
65398+{
65399+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
65400+ if (grsec_enable_mount && (retval >= 0))
65401+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
65402+#endif
65403+ return;
65404+}
65405+
65406+int
65407+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
65408+{
65409+#ifdef CONFIG_GRKERNSEC_ROFS
65410+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
65411+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
65412+ return -EPERM;
65413+ } else
65414+ return 0;
65415+#endif
65416+ return 0;
65417+}
65418+
65419+int
65420+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
65421+{
65422+#ifdef CONFIG_GRKERNSEC_ROFS
65423+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
65424+ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
65425+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
65426+ return -EPERM;
65427+ } else
65428+ return 0;
65429+#endif
65430+ return 0;
65431+}
65432diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
65433new file mode 100644
65434index 0000000..a3b12a0
65435--- /dev/null
65436+++ b/grsecurity/grsec_pax.c
65437@@ -0,0 +1,36 @@
65438+#include <linux/kernel.h>
65439+#include <linux/sched.h>
65440+#include <linux/mm.h>
65441+#include <linux/file.h>
65442+#include <linux/grinternal.h>
65443+#include <linux/grsecurity.h>
65444+
65445+void
65446+gr_log_textrel(struct vm_area_struct * vma)
65447+{
65448+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
65449+ if (grsec_enable_audit_textrel)
65450+ gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
65451+#endif
65452+ return;
65453+}
65454+
65455+void
65456+gr_log_rwxmmap(struct file *file)
65457+{
65458+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
65459+ if (grsec_enable_log_rwxmaps)
65460+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
65461+#endif
65462+ return;
65463+}
65464+
65465+void
65466+gr_log_rwxmprotect(struct file *file)
65467+{
65468+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
65469+ if (grsec_enable_log_rwxmaps)
65470+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
65471+#endif
65472+ return;
65473+}
65474diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
65475new file mode 100644
65476index 0000000..f7f29aa
65477--- /dev/null
65478+++ b/grsecurity/grsec_ptrace.c
65479@@ -0,0 +1,30 @@
65480+#include <linux/kernel.h>
65481+#include <linux/sched.h>
65482+#include <linux/grinternal.h>
65483+#include <linux/security.h>
65484+
65485+void
65486+gr_audit_ptrace(struct task_struct *task)
65487+{
65488+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
65489+ if (grsec_enable_audit_ptrace)
65490+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
65491+#endif
65492+ return;
65493+}
65494+
65495+int
65496+gr_ptrace_readexec(struct file *file, int unsafe_flags)
65497+{
65498+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
65499+ const struct dentry *dentry = file->f_path.dentry;
65500+ const struct vfsmount *mnt = file->f_path.mnt;
65501+
65502+ if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
65503+ (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
65504+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
65505+ return -EACCES;
65506+ }
65507+#endif
65508+ return 0;
65509+}
65510diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
65511new file mode 100644
65512index 0000000..e09715a
65513--- /dev/null
65514+++ b/grsecurity/grsec_sig.c
65515@@ -0,0 +1,222 @@
65516+#include <linux/kernel.h>
65517+#include <linux/sched.h>
65518+#include <linux/delay.h>
65519+#include <linux/grsecurity.h>
65520+#include <linux/grinternal.h>
65521+#include <linux/hardirq.h>
65522+
65523+char *signames[] = {
65524+ [SIGSEGV] = "Segmentation fault",
65525+ [SIGILL] = "Illegal instruction",
65526+ [SIGABRT] = "Abort",
65527+ [SIGBUS] = "Invalid alignment/Bus error"
65528+};
65529+
65530+void
65531+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
65532+{
65533+#ifdef CONFIG_GRKERNSEC_SIGNAL
65534+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
65535+ (sig == SIGABRT) || (sig == SIGBUS))) {
65536+ if (task_pid_nr(t) == task_pid_nr(current)) {
65537+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
65538+ } else {
65539+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
65540+ }
65541+ }
65542+#endif
65543+ return;
65544+}
65545+
65546+int
65547+gr_handle_signal(const struct task_struct *p, const int sig)
65548+{
65549+#ifdef CONFIG_GRKERNSEC
65550+ /* ignore the 0 signal for protected task checks */
65551+ if (task_pid_nr(current) > 1 && sig && gr_check_protected_task(p)) {
65552+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
65553+ return -EPERM;
65554+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
65555+ return -EPERM;
65556+ }
65557+#endif
65558+ return 0;
65559+}
65560+
65561+#ifdef CONFIG_GRKERNSEC
65562+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
65563+
65564+int gr_fake_force_sig(int sig, struct task_struct *t)
65565+{
65566+ unsigned long int flags;
65567+ int ret, blocked, ignored;
65568+ struct k_sigaction *action;
65569+
65570+ spin_lock_irqsave(&t->sighand->siglock, flags);
65571+ action = &t->sighand->action[sig-1];
65572+ ignored = action->sa.sa_handler == SIG_IGN;
65573+ blocked = sigismember(&t->blocked, sig);
65574+ if (blocked || ignored) {
65575+ action->sa.sa_handler = SIG_DFL;
65576+ if (blocked) {
65577+ sigdelset(&t->blocked, sig);
65578+ recalc_sigpending_and_wake(t);
65579+ }
65580+ }
65581+ if (action->sa.sa_handler == SIG_DFL)
65582+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
65583+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
65584+
65585+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
65586+
65587+ return ret;
65588+}
65589+#endif
65590+
65591+#ifdef CONFIG_GRKERNSEC_BRUTE
65592+#define GR_USER_BAN_TIME (15 * 60)
65593+#define GR_DAEMON_BRUTE_TIME (30 * 60)
65594+
65595+static int __get_dumpable(unsigned long mm_flags)
65596+{
65597+ int ret;
65598+
65599+ ret = mm_flags & MMF_DUMPABLE_MASK;
65600+ return (ret >= 2) ? 2 : ret;
65601+}
65602+#endif
65603+
65604+void gr_handle_brute_attach(unsigned long mm_flags)
65605+{
65606+#ifdef CONFIG_GRKERNSEC_BRUTE
65607+ struct task_struct *p = current;
65608+ kuid_t uid = GLOBAL_ROOT_UID;
65609+ int daemon = 0;
65610+
65611+ if (!grsec_enable_brute)
65612+ return;
65613+
65614+ rcu_read_lock();
65615+ read_lock(&tasklist_lock);
65616+ read_lock(&grsec_exec_file_lock);
65617+ if (p->real_parent && p->real_parent->exec_file == p->exec_file) {
65618+ p->real_parent->brute_expires = get_seconds() + GR_DAEMON_BRUTE_TIME;
65619+ p->real_parent->brute = 1;
65620+ daemon = 1;
65621+ } else {
65622+ const struct cred *cred = __task_cred(p), *cred2;
65623+ struct task_struct *tsk, *tsk2;
65624+
65625+ if (!__get_dumpable(mm_flags) && gr_is_global_nonroot(cred->uid)) {
65626+ struct user_struct *user;
65627+
65628+ uid = cred->uid;
65629+
65630+ /* this is put upon execution past expiration */
65631+ user = find_user(uid);
65632+ if (user == NULL)
65633+ goto unlock;
65634+ user->banned = 1;
65635+ user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
65636+ if (user->ban_expires == ~0UL)
65637+ user->ban_expires--;
65638+
65639+ do_each_thread(tsk2, tsk) {
65640+ cred2 = __task_cred(tsk);
65641+ if (tsk != p && uid_eq(cred2->uid, uid))
65642+ gr_fake_force_sig(SIGKILL, tsk);
65643+ } while_each_thread(tsk2, tsk);
65644+ }
65645+ }
65646+unlock:
65647+ read_unlock(&grsec_exec_file_lock);
65648+ read_unlock(&tasklist_lock);
65649+ rcu_read_unlock();
65650+
65651+ if (gr_is_global_nonroot(uid))
65652+ printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n",
65653+ GR_GLOBAL_UID(uid), GR_USER_BAN_TIME / 60);
65654+ else if (daemon)
65655+ gr_log_noargs(GR_DONT_AUDIT, GR_BRUTE_DAEMON_MSG);
65656+
65657+#endif
65658+ return;
65659+}
65660+
65661+void gr_handle_brute_check(void)
65662+{
65663+#ifdef CONFIG_GRKERNSEC_BRUTE
65664+ struct task_struct *p = current;
65665+
65666+ if (unlikely(p->brute)) {
65667+ if (!grsec_enable_brute)
65668+ p->brute = 0;
65669+ else if (time_before(get_seconds(), p->brute_expires))
65670+ msleep(30 * 1000);
65671+ }
65672+#endif
65673+ return;
65674+}
65675+
65676+void gr_handle_kernel_exploit(void)
65677+{
65678+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
65679+ const struct cred *cred;
65680+ struct task_struct *tsk, *tsk2;
65681+ struct user_struct *user;
65682+ kuid_t uid;
65683+
65684+ if (in_irq() || in_serving_softirq() || in_nmi())
65685+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
65686+
65687+ uid = current_uid();
65688+
65689+ if (gr_is_global_root(uid))
65690+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
65691+ else {
65692+ /* kill all the processes of this user, hold a reference
65693+ to their creds struct, and prevent them from creating
65694+ another process until system reset
65695+ */
65696+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n",
65697+ GR_GLOBAL_UID(uid));
65698+ /* we intentionally leak this ref */
65699+ user = get_uid(current->cred->user);
65700+ if (user) {
65701+ user->banned = 1;
65702+ user->ban_expires = ~0UL;
65703+ }
65704+
65705+ read_lock(&tasklist_lock);
65706+ do_each_thread(tsk2, tsk) {
65707+ cred = __task_cred(tsk);
65708+ if (uid_eq(cred->uid, uid))
65709+ gr_fake_force_sig(SIGKILL, tsk);
65710+ } while_each_thread(tsk2, tsk);
65711+ read_unlock(&tasklist_lock);
65712+ }
65713+#endif
65714+}
65715+
65716+int __gr_process_user_ban(struct user_struct *user)
65717+{
65718+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
65719+ if (unlikely(user->banned)) {
65720+ if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
65721+ user->banned = 0;
65722+ user->ban_expires = 0;
65723+ free_uid(user);
65724+ } else
65725+ return -EPERM;
65726+ }
65727+#endif
65728+ return 0;
65729+}
65730+
65731+int gr_process_user_ban(void)
65732+{
65733+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
65734+ return __gr_process_user_ban(current->cred->user);
65735+#endif
65736+ return 0;
65737+}
65738diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
65739new file mode 100644
65740index 0000000..4030d57
65741--- /dev/null
65742+++ b/grsecurity/grsec_sock.c
65743@@ -0,0 +1,244 @@
65744+#include <linux/kernel.h>
65745+#include <linux/module.h>
65746+#include <linux/sched.h>
65747+#include <linux/file.h>
65748+#include <linux/net.h>
65749+#include <linux/in.h>
65750+#include <linux/ip.h>
65751+#include <net/sock.h>
65752+#include <net/inet_sock.h>
65753+#include <linux/grsecurity.h>
65754+#include <linux/grinternal.h>
65755+#include <linux/gracl.h>
65756+
65757+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
65758+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
65759+
65760+EXPORT_SYMBOL(gr_search_udp_recvmsg);
65761+EXPORT_SYMBOL(gr_search_udp_sendmsg);
65762+
65763+#ifdef CONFIG_UNIX_MODULE
65764+EXPORT_SYMBOL(gr_acl_handle_unix);
65765+EXPORT_SYMBOL(gr_acl_handle_mknod);
65766+EXPORT_SYMBOL(gr_handle_chroot_unix);
65767+EXPORT_SYMBOL(gr_handle_create);
65768+#endif
65769+
65770+#ifdef CONFIG_GRKERNSEC
65771+#define gr_conn_table_size 32749
65772+struct conn_table_entry {
65773+ struct conn_table_entry *next;
65774+ struct signal_struct *sig;
65775+};
65776+
65777+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
65778+DEFINE_SPINLOCK(gr_conn_table_lock);
65779+
65780+extern const char * gr_socktype_to_name(unsigned char type);
65781+extern const char * gr_proto_to_name(unsigned char proto);
65782+extern const char * gr_sockfamily_to_name(unsigned char family);
65783+
65784+static __inline__ int
65785+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
65786+{
65787+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
65788+}
65789+
65790+static __inline__ int
65791+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
65792+ __u16 sport, __u16 dport)
65793+{
65794+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
65795+ sig->gr_sport == sport && sig->gr_dport == dport))
65796+ return 1;
65797+ else
65798+ return 0;
65799+}
65800+
65801+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
65802+{
65803+ struct conn_table_entry **match;
65804+ unsigned int index;
65805+
65806+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
65807+ sig->gr_sport, sig->gr_dport,
65808+ gr_conn_table_size);
65809+
65810+ newent->sig = sig;
65811+
65812+ match = &gr_conn_table[index];
65813+ newent->next = *match;
65814+ *match = newent;
65815+
65816+ return;
65817+}
65818+
65819+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
65820+{
65821+ struct conn_table_entry *match, *last = NULL;
65822+ unsigned int index;
65823+
65824+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
65825+ sig->gr_sport, sig->gr_dport,
65826+ gr_conn_table_size);
65827+
65828+ match = gr_conn_table[index];
65829+ while (match && !conn_match(match->sig,
65830+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
65831+ sig->gr_dport)) {
65832+ last = match;
65833+ match = match->next;
65834+ }
65835+
65836+ if (match) {
65837+ if (last)
65838+ last->next = match->next;
65839+ else
65840+ gr_conn_table[index] = NULL;
65841+ kfree(match);
65842+ }
65843+
65844+ return;
65845+}
65846+
65847+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
65848+ __u16 sport, __u16 dport)
65849+{
65850+ struct conn_table_entry *match;
65851+ unsigned int index;
65852+
65853+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
65854+
65855+ match = gr_conn_table[index];
65856+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
65857+ match = match->next;
65858+
65859+ if (match)
65860+ return match->sig;
65861+ else
65862+ return NULL;
65863+}
65864+
65865+#endif
65866+
65867+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
65868+{
65869+#ifdef CONFIG_GRKERNSEC
65870+ struct signal_struct *sig = task->signal;
65871+ struct conn_table_entry *newent;
65872+
65873+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
65874+ if (newent == NULL)
65875+ return;
65876+ /* no bh lock needed since we are called with bh disabled */
65877+ spin_lock(&gr_conn_table_lock);
65878+ gr_del_task_from_ip_table_nolock(sig);
65879+ sig->gr_saddr = inet->inet_rcv_saddr;
65880+ sig->gr_daddr = inet->inet_daddr;
65881+ sig->gr_sport = inet->inet_sport;
65882+ sig->gr_dport = inet->inet_dport;
65883+ gr_add_to_task_ip_table_nolock(sig, newent);
65884+ spin_unlock(&gr_conn_table_lock);
65885+#endif
65886+ return;
65887+}
65888+
65889+void gr_del_task_from_ip_table(struct task_struct *task)
65890+{
65891+#ifdef CONFIG_GRKERNSEC
65892+ spin_lock_bh(&gr_conn_table_lock);
65893+ gr_del_task_from_ip_table_nolock(task->signal);
65894+ spin_unlock_bh(&gr_conn_table_lock);
65895+#endif
65896+ return;
65897+}
65898+
65899+void
65900+gr_attach_curr_ip(const struct sock *sk)
65901+{
65902+#ifdef CONFIG_GRKERNSEC
65903+ struct signal_struct *p, *set;
65904+ const struct inet_sock *inet = inet_sk(sk);
65905+
65906+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
65907+ return;
65908+
65909+ set = current->signal;
65910+
65911+ spin_lock_bh(&gr_conn_table_lock);
65912+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
65913+ inet->inet_dport, inet->inet_sport);
65914+ if (unlikely(p != NULL)) {
65915+ set->curr_ip = p->curr_ip;
65916+ set->used_accept = 1;
65917+ gr_del_task_from_ip_table_nolock(p);
65918+ spin_unlock_bh(&gr_conn_table_lock);
65919+ return;
65920+ }
65921+ spin_unlock_bh(&gr_conn_table_lock);
65922+
65923+ set->curr_ip = inet->inet_daddr;
65924+ set->used_accept = 1;
65925+#endif
65926+ return;
65927+}
65928+
65929+int
65930+gr_handle_sock_all(const int family, const int type, const int protocol)
65931+{
65932+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
65933+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
65934+ (family != AF_UNIX)) {
65935+ if (family == AF_INET)
65936+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
65937+ else
65938+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
65939+ return -EACCES;
65940+ }
65941+#endif
65942+ return 0;
65943+}
65944+
65945+int
65946+gr_handle_sock_server(const struct sockaddr *sck)
65947+{
65948+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
65949+ if (grsec_enable_socket_server &&
65950+ in_group_p(grsec_socket_server_gid) &&
65951+ sck && (sck->sa_family != AF_UNIX) &&
65952+ (sck->sa_family != AF_LOCAL)) {
65953+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
65954+ return -EACCES;
65955+ }
65956+#endif
65957+ return 0;
65958+}
65959+
65960+int
65961+gr_handle_sock_server_other(const struct sock *sck)
65962+{
65963+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
65964+ if (grsec_enable_socket_server &&
65965+ in_group_p(grsec_socket_server_gid) &&
65966+ sck && (sck->sk_family != AF_UNIX) &&
65967+ (sck->sk_family != AF_LOCAL)) {
65968+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
65969+ return -EACCES;
65970+ }
65971+#endif
65972+ return 0;
65973+}
65974+
65975+int
65976+gr_handle_sock_client(const struct sockaddr *sck)
65977+{
65978+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
65979+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
65980+ sck && (sck->sa_family != AF_UNIX) &&
65981+ (sck->sa_family != AF_LOCAL)) {
65982+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
65983+ return -EACCES;
65984+ }
65985+#endif
65986+ return 0;
65987+}
65988diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
65989new file mode 100644
65990index 0000000..f55ef0f
65991--- /dev/null
65992+++ b/grsecurity/grsec_sysctl.c
65993@@ -0,0 +1,469 @@
65994+#include <linux/kernel.h>
65995+#include <linux/sched.h>
65996+#include <linux/sysctl.h>
65997+#include <linux/grsecurity.h>
65998+#include <linux/grinternal.h>
65999+
66000+int
66001+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
66002+{
66003+#ifdef CONFIG_GRKERNSEC_SYSCTL
66004+ if (dirname == NULL || name == NULL)
66005+ return 0;
66006+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
66007+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
66008+ return -EACCES;
66009+ }
66010+#endif
66011+ return 0;
66012+}
66013+
66014+#ifdef CONFIG_GRKERNSEC_ROFS
66015+static int __maybe_unused one = 1;
66016+#endif
66017+
66018+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
66019+struct ctl_table grsecurity_table[] = {
66020+#ifdef CONFIG_GRKERNSEC_SYSCTL
66021+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
66022+#ifdef CONFIG_GRKERNSEC_IO
66023+ {
66024+ .procname = "disable_priv_io",
66025+ .data = &grsec_disable_privio,
66026+ .maxlen = sizeof(int),
66027+ .mode = 0600,
66028+ .proc_handler = &proc_dointvec,
66029+ },
66030+#endif
66031+#endif
66032+#ifdef CONFIG_GRKERNSEC_LINK
66033+ {
66034+ .procname = "linking_restrictions",
66035+ .data = &grsec_enable_link,
66036+ .maxlen = sizeof(int),
66037+ .mode = 0600,
66038+ .proc_handler = &proc_dointvec,
66039+ },
66040+#endif
66041+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
66042+ {
66043+ .procname = "enforce_symlinksifowner",
66044+ .data = &grsec_enable_symlinkown,
66045+ .maxlen = sizeof(int),
66046+ .mode = 0600,
66047+ .proc_handler = &proc_dointvec,
66048+ },
66049+ {
66050+ .procname = "symlinkown_gid",
66051+ .data = &grsec_symlinkown_gid,
66052+ .maxlen = sizeof(int),
66053+ .mode = 0600,
66054+ .proc_handler = &proc_dointvec,
66055+ },
66056+#endif
66057+#ifdef CONFIG_GRKERNSEC_BRUTE
66058+ {
66059+ .procname = "deter_bruteforce",
66060+ .data = &grsec_enable_brute,
66061+ .maxlen = sizeof(int),
66062+ .mode = 0600,
66063+ .proc_handler = &proc_dointvec,
66064+ },
66065+#endif
66066+#ifdef CONFIG_GRKERNSEC_FIFO
66067+ {
66068+ .procname = "fifo_restrictions",
66069+ .data = &grsec_enable_fifo,
66070+ .maxlen = sizeof(int),
66071+ .mode = 0600,
66072+ .proc_handler = &proc_dointvec,
66073+ },
66074+#endif
66075+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
66076+ {
66077+ .procname = "ptrace_readexec",
66078+ .data = &grsec_enable_ptrace_readexec,
66079+ .maxlen = sizeof(int),
66080+ .mode = 0600,
66081+ .proc_handler = &proc_dointvec,
66082+ },
66083+#endif
66084+#ifdef CONFIG_GRKERNSEC_SETXID
66085+ {
66086+ .procname = "consistent_setxid",
66087+ .data = &grsec_enable_setxid,
66088+ .maxlen = sizeof(int),
66089+ .mode = 0600,
66090+ .proc_handler = &proc_dointvec,
66091+ },
66092+#endif
66093+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66094+ {
66095+ .procname = "ip_blackhole",
66096+ .data = &grsec_enable_blackhole,
66097+ .maxlen = sizeof(int),
66098+ .mode = 0600,
66099+ .proc_handler = &proc_dointvec,
66100+ },
66101+ {
66102+ .procname = "lastack_retries",
66103+ .data = &grsec_lastack_retries,
66104+ .maxlen = sizeof(int),
66105+ .mode = 0600,
66106+ .proc_handler = &proc_dointvec,
66107+ },
66108+#endif
66109+#ifdef CONFIG_GRKERNSEC_EXECLOG
66110+ {
66111+ .procname = "exec_logging",
66112+ .data = &grsec_enable_execlog,
66113+ .maxlen = sizeof(int),
66114+ .mode = 0600,
66115+ .proc_handler = &proc_dointvec,
66116+ },
66117+#endif
66118+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
66119+ {
66120+ .procname = "rwxmap_logging",
66121+ .data = &grsec_enable_log_rwxmaps,
66122+ .maxlen = sizeof(int),
66123+ .mode = 0600,
66124+ .proc_handler = &proc_dointvec,
66125+ },
66126+#endif
66127+#ifdef CONFIG_GRKERNSEC_SIGNAL
66128+ {
66129+ .procname = "signal_logging",
66130+ .data = &grsec_enable_signal,
66131+ .maxlen = sizeof(int),
66132+ .mode = 0600,
66133+ .proc_handler = &proc_dointvec,
66134+ },
66135+#endif
66136+#ifdef CONFIG_GRKERNSEC_FORKFAIL
66137+ {
66138+ .procname = "forkfail_logging",
66139+ .data = &grsec_enable_forkfail,
66140+ .maxlen = sizeof(int),
66141+ .mode = 0600,
66142+ .proc_handler = &proc_dointvec,
66143+ },
66144+#endif
66145+#ifdef CONFIG_GRKERNSEC_TIME
66146+ {
66147+ .procname = "timechange_logging",
66148+ .data = &grsec_enable_time,
66149+ .maxlen = sizeof(int),
66150+ .mode = 0600,
66151+ .proc_handler = &proc_dointvec,
66152+ },
66153+#endif
66154+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
66155+ {
66156+ .procname = "chroot_deny_shmat",
66157+ .data = &grsec_enable_chroot_shmat,
66158+ .maxlen = sizeof(int),
66159+ .mode = 0600,
66160+ .proc_handler = &proc_dointvec,
66161+ },
66162+#endif
66163+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
66164+ {
66165+ .procname = "chroot_deny_unix",
66166+ .data = &grsec_enable_chroot_unix,
66167+ .maxlen = sizeof(int),
66168+ .mode = 0600,
66169+ .proc_handler = &proc_dointvec,
66170+ },
66171+#endif
66172+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
66173+ {
66174+ .procname = "chroot_deny_mount",
66175+ .data = &grsec_enable_chroot_mount,
66176+ .maxlen = sizeof(int),
66177+ .mode = 0600,
66178+ .proc_handler = &proc_dointvec,
66179+ },
66180+#endif
66181+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
66182+ {
66183+ .procname = "chroot_deny_fchdir",
66184+ .data = &grsec_enable_chroot_fchdir,
66185+ .maxlen = sizeof(int),
66186+ .mode = 0600,
66187+ .proc_handler = &proc_dointvec,
66188+ },
66189+#endif
66190+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
66191+ {
66192+ .procname = "chroot_deny_chroot",
66193+ .data = &grsec_enable_chroot_double,
66194+ .maxlen = sizeof(int),
66195+ .mode = 0600,
66196+ .proc_handler = &proc_dointvec,
66197+ },
66198+#endif
66199+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
66200+ {
66201+ .procname = "chroot_deny_pivot",
66202+ .data = &grsec_enable_chroot_pivot,
66203+ .maxlen = sizeof(int),
66204+ .mode = 0600,
66205+ .proc_handler = &proc_dointvec,
66206+ },
66207+#endif
66208+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
66209+ {
66210+ .procname = "chroot_enforce_chdir",
66211+ .data = &grsec_enable_chroot_chdir,
66212+ .maxlen = sizeof(int),
66213+ .mode = 0600,
66214+ .proc_handler = &proc_dointvec,
66215+ },
66216+#endif
66217+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
66218+ {
66219+ .procname = "chroot_deny_chmod",
66220+ .data = &grsec_enable_chroot_chmod,
66221+ .maxlen = sizeof(int),
66222+ .mode = 0600,
66223+ .proc_handler = &proc_dointvec,
66224+ },
66225+#endif
66226+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
66227+ {
66228+ .procname = "chroot_deny_mknod",
66229+ .data = &grsec_enable_chroot_mknod,
66230+ .maxlen = sizeof(int),
66231+ .mode = 0600,
66232+ .proc_handler = &proc_dointvec,
66233+ },
66234+#endif
66235+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
66236+ {
66237+ .procname = "chroot_restrict_nice",
66238+ .data = &grsec_enable_chroot_nice,
66239+ .maxlen = sizeof(int),
66240+ .mode = 0600,
66241+ .proc_handler = &proc_dointvec,
66242+ },
66243+#endif
66244+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
66245+ {
66246+ .procname = "chroot_execlog",
66247+ .data = &grsec_enable_chroot_execlog,
66248+ .maxlen = sizeof(int),
66249+ .mode = 0600,
66250+ .proc_handler = &proc_dointvec,
66251+ },
66252+#endif
66253+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
66254+ {
66255+ .procname = "chroot_caps",
66256+ .data = &grsec_enable_chroot_caps,
66257+ .maxlen = sizeof(int),
66258+ .mode = 0600,
66259+ .proc_handler = &proc_dointvec,
66260+ },
66261+#endif
66262+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
66263+ {
66264+ .procname = "chroot_deny_sysctl",
66265+ .data = &grsec_enable_chroot_sysctl,
66266+ .maxlen = sizeof(int),
66267+ .mode = 0600,
66268+ .proc_handler = &proc_dointvec,
66269+ },
66270+#endif
66271+#ifdef CONFIG_GRKERNSEC_TPE
66272+ {
66273+ .procname = "tpe",
66274+ .data = &grsec_enable_tpe,
66275+ .maxlen = sizeof(int),
66276+ .mode = 0600,
66277+ .proc_handler = &proc_dointvec,
66278+ },
66279+ {
66280+ .procname = "tpe_gid",
66281+ .data = &grsec_tpe_gid,
66282+ .maxlen = sizeof(int),
66283+ .mode = 0600,
66284+ .proc_handler = &proc_dointvec,
66285+ },
66286+#endif
66287+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
66288+ {
66289+ .procname = "tpe_invert",
66290+ .data = &grsec_enable_tpe_invert,
66291+ .maxlen = sizeof(int),
66292+ .mode = 0600,
66293+ .proc_handler = &proc_dointvec,
66294+ },
66295+#endif
66296+#ifdef CONFIG_GRKERNSEC_TPE_ALL
66297+ {
66298+ .procname = "tpe_restrict_all",
66299+ .data = &grsec_enable_tpe_all,
66300+ .maxlen = sizeof(int),
66301+ .mode = 0600,
66302+ .proc_handler = &proc_dointvec,
66303+ },
66304+#endif
66305+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
66306+ {
66307+ .procname = "socket_all",
66308+ .data = &grsec_enable_socket_all,
66309+ .maxlen = sizeof(int),
66310+ .mode = 0600,
66311+ .proc_handler = &proc_dointvec,
66312+ },
66313+ {
66314+ .procname = "socket_all_gid",
66315+ .data = &grsec_socket_all_gid,
66316+ .maxlen = sizeof(int),
66317+ .mode = 0600,
66318+ .proc_handler = &proc_dointvec,
66319+ },
66320+#endif
66321+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
66322+ {
66323+ .procname = "socket_client",
66324+ .data = &grsec_enable_socket_client,
66325+ .maxlen = sizeof(int),
66326+ .mode = 0600,
66327+ .proc_handler = &proc_dointvec,
66328+ },
66329+ {
66330+ .procname = "socket_client_gid",
66331+ .data = &grsec_socket_client_gid,
66332+ .maxlen = sizeof(int),
66333+ .mode = 0600,
66334+ .proc_handler = &proc_dointvec,
66335+ },
66336+#endif
66337+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
66338+ {
66339+ .procname = "socket_server",
66340+ .data = &grsec_enable_socket_server,
66341+ .maxlen = sizeof(int),
66342+ .mode = 0600,
66343+ .proc_handler = &proc_dointvec,
66344+ },
66345+ {
66346+ .procname = "socket_server_gid",
66347+ .data = &grsec_socket_server_gid,
66348+ .maxlen = sizeof(int),
66349+ .mode = 0600,
66350+ .proc_handler = &proc_dointvec,
66351+ },
66352+#endif
66353+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
66354+ {
66355+ .procname = "audit_group",
66356+ .data = &grsec_enable_group,
66357+ .maxlen = sizeof(int),
66358+ .mode = 0600,
66359+ .proc_handler = &proc_dointvec,
66360+ },
66361+ {
66362+ .procname = "audit_gid",
66363+ .data = &grsec_audit_gid,
66364+ .maxlen = sizeof(int),
66365+ .mode = 0600,
66366+ .proc_handler = &proc_dointvec,
66367+ },
66368+#endif
66369+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
66370+ {
66371+ .procname = "audit_chdir",
66372+ .data = &grsec_enable_chdir,
66373+ .maxlen = sizeof(int),
66374+ .mode = 0600,
66375+ .proc_handler = &proc_dointvec,
66376+ },
66377+#endif
66378+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
66379+ {
66380+ .procname = "audit_mount",
66381+ .data = &grsec_enable_mount,
66382+ .maxlen = sizeof(int),
66383+ .mode = 0600,
66384+ .proc_handler = &proc_dointvec,
66385+ },
66386+#endif
66387+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
66388+ {
66389+ .procname = "audit_textrel",
66390+ .data = &grsec_enable_audit_textrel,
66391+ .maxlen = sizeof(int),
66392+ .mode = 0600,
66393+ .proc_handler = &proc_dointvec,
66394+ },
66395+#endif
66396+#ifdef CONFIG_GRKERNSEC_DMESG
66397+ {
66398+ .procname = "dmesg",
66399+ .data = &grsec_enable_dmesg,
66400+ .maxlen = sizeof(int),
66401+ .mode = 0600,
66402+ .proc_handler = &proc_dointvec,
66403+ },
66404+#endif
66405+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
66406+ {
66407+ .procname = "chroot_findtask",
66408+ .data = &grsec_enable_chroot_findtask,
66409+ .maxlen = sizeof(int),
66410+ .mode = 0600,
66411+ .proc_handler = &proc_dointvec,
66412+ },
66413+#endif
66414+#ifdef CONFIG_GRKERNSEC_RESLOG
66415+ {
66416+ .procname = "resource_logging",
66417+ .data = &grsec_resource_logging,
66418+ .maxlen = sizeof(int),
66419+ .mode = 0600,
66420+ .proc_handler = &proc_dointvec,
66421+ },
66422+#endif
66423+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
66424+ {
66425+ .procname = "audit_ptrace",
66426+ .data = &grsec_enable_audit_ptrace,
66427+ .maxlen = sizeof(int),
66428+ .mode = 0600,
66429+ .proc_handler = &proc_dointvec,
66430+ },
66431+#endif
66432+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
66433+ {
66434+ .procname = "harden_ptrace",
66435+ .data = &grsec_enable_harden_ptrace,
66436+ .maxlen = sizeof(int),
66437+ .mode = 0600,
66438+ .proc_handler = &proc_dointvec,
66439+ },
66440+#endif
66441+ {
66442+ .procname = "grsec_lock",
66443+ .data = &grsec_lock,
66444+ .maxlen = sizeof(int),
66445+ .mode = 0600,
66446+ .proc_handler = &proc_dointvec,
66447+ },
66448+#endif
66449+#ifdef CONFIG_GRKERNSEC_ROFS
66450+ {
66451+ .procname = "romount_protect",
66452+ .data = &grsec_enable_rofs,
66453+ .maxlen = sizeof(int),
66454+ .mode = 0600,
66455+ .proc_handler = &proc_dointvec_minmax,
66456+ .extra1 = &one,
66457+ .extra2 = &one,
66458+ },
66459+#endif
66460+ { }
66461+};
66462+#endif
66463diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
66464new file mode 100644
66465index 0000000..0dc13c3
66466--- /dev/null
66467+++ b/grsecurity/grsec_time.c
66468@@ -0,0 +1,16 @@
66469+#include <linux/kernel.h>
66470+#include <linux/sched.h>
66471+#include <linux/grinternal.h>
66472+#include <linux/module.h>
66473+
66474+void
66475+gr_log_timechange(void)
66476+{
66477+#ifdef CONFIG_GRKERNSEC_TIME
66478+ if (grsec_enable_time)
66479+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
66480+#endif
66481+ return;
66482+}
66483+
66484+EXPORT_SYMBOL(gr_log_timechange);
66485diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
66486new file mode 100644
66487index 0000000..ee57dcf
66488--- /dev/null
66489+++ b/grsecurity/grsec_tpe.c
66490@@ -0,0 +1,73 @@
66491+#include <linux/kernel.h>
66492+#include <linux/sched.h>
66493+#include <linux/file.h>
66494+#include <linux/fs.h>
66495+#include <linux/grinternal.h>
66496+
66497+extern int gr_acl_tpe_check(void);
66498+
66499+int
66500+gr_tpe_allow(const struct file *file)
66501+{
66502+#ifdef CONFIG_GRKERNSEC
66503+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
66504+ const struct cred *cred = current_cred();
66505+ char *msg = NULL;
66506+ char *msg2 = NULL;
66507+
66508+ // never restrict root
66509+ if (gr_is_global_root(cred->uid))
66510+ return 1;
66511+
66512+ if (grsec_enable_tpe) {
66513+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
66514+ if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
66515+ msg = "not being in trusted group";
66516+ else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
66517+ msg = "being in untrusted group";
66518+#else
66519+ if (in_group_p(grsec_tpe_gid))
66520+ msg = "being in untrusted group";
66521+#endif
66522+ }
66523+ if (!msg && gr_acl_tpe_check())
66524+ msg = "being in untrusted role";
66525+
66526+ // not in any affected group/role
66527+ if (!msg)
66528+ goto next_check;
66529+
66530+ if (gr_is_global_nonroot(inode->i_uid))
66531+ msg2 = "file in non-root-owned directory";
66532+ else if (inode->i_mode & S_IWOTH)
66533+ msg2 = "file in world-writable directory";
66534+ else if (inode->i_mode & S_IWGRP)
66535+ msg2 = "file in group-writable directory";
66536+
66537+ if (msg && msg2) {
66538+ char fullmsg[70] = {0};
66539+ snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
66540+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
66541+ return 0;
66542+ }
66543+ msg = NULL;
66544+next_check:
66545+#ifdef CONFIG_GRKERNSEC_TPE_ALL
66546+ if (!grsec_enable_tpe || !grsec_enable_tpe_all)
66547+ return 1;
66548+
66549+ if (gr_is_global_nonroot(inode->i_uid) && !uid_eq(inode->i_uid, cred->uid))
66550+ msg = "directory not owned by user";
66551+ else if (inode->i_mode & S_IWOTH)
66552+ msg = "file in world-writable directory";
66553+ else if (inode->i_mode & S_IWGRP)
66554+ msg = "file in group-writable directory";
66555+
66556+ if (msg) {
66557+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
66558+ return 0;
66559+ }
66560+#endif
66561+#endif
66562+ return 1;
66563+}
66564diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
66565new file mode 100644
66566index 0000000..9f7b1ac
66567--- /dev/null
66568+++ b/grsecurity/grsum.c
66569@@ -0,0 +1,61 @@
66570+#include <linux/err.h>
66571+#include <linux/kernel.h>
66572+#include <linux/sched.h>
66573+#include <linux/mm.h>
66574+#include <linux/scatterlist.h>
66575+#include <linux/crypto.h>
66576+#include <linux/gracl.h>
66577+
66578+
66579+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
66580+#error "crypto and sha256 must be built into the kernel"
66581+#endif
66582+
66583+int
66584+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
66585+{
66586+ char *p;
66587+ struct crypto_hash *tfm;
66588+ struct hash_desc desc;
66589+ struct scatterlist sg;
66590+ unsigned char temp_sum[GR_SHA_LEN];
66591+ volatile int retval = 0;
66592+ volatile int dummy = 0;
66593+ unsigned int i;
66594+
66595+ sg_init_table(&sg, 1);
66596+
66597+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
66598+ if (IS_ERR(tfm)) {
66599+ /* should never happen, since sha256 should be built in */
66600+ return 1;
66601+ }
66602+
66603+ desc.tfm = tfm;
66604+ desc.flags = 0;
66605+
66606+ crypto_hash_init(&desc);
66607+
66608+ p = salt;
66609+ sg_set_buf(&sg, p, GR_SALT_LEN);
66610+ crypto_hash_update(&desc, &sg, sg.length);
66611+
66612+ p = entry->pw;
66613+ sg_set_buf(&sg, p, strlen(p));
66614+
66615+ crypto_hash_update(&desc, &sg, sg.length);
66616+
66617+ crypto_hash_final(&desc, temp_sum);
66618+
66619+ memset(entry->pw, 0, GR_PW_LEN);
66620+
66621+ for (i = 0; i < GR_SHA_LEN; i++)
66622+ if (sum[i] != temp_sum[i])
66623+ retval = 1;
66624+ else
66625+ dummy = 1; // waste a cycle
66626+
66627+ crypto_free_hash(tfm);
66628+
66629+ return retval;
66630+}
66631diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
66632index 77ff547..181834f 100644
66633--- a/include/asm-generic/4level-fixup.h
66634+++ b/include/asm-generic/4level-fixup.h
66635@@ -13,8 +13,10 @@
66636 #define pmd_alloc(mm, pud, address) \
66637 ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \
66638 NULL: pmd_offset(pud, address))
66639+#define pmd_alloc_kernel(mm, pud, address) pmd_alloc((mm), (pud), (address))
66640
66641 #define pud_alloc(mm, pgd, address) (pgd)
66642+#define pud_alloc_kernel(mm, pgd, address) pud_alloc((mm), (pgd), (address))
66643 #define pud_offset(pgd, start) (pgd)
66644 #define pud_none(pud) 0
66645 #define pud_bad(pud) 0
66646diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
66647index b7babf0..04ad282 100644
66648--- a/include/asm-generic/atomic-long.h
66649+++ b/include/asm-generic/atomic-long.h
66650@@ -22,6 +22,12 @@
66651
66652 typedef atomic64_t atomic_long_t;
66653
66654+#ifdef CONFIG_PAX_REFCOUNT
66655+typedef atomic64_unchecked_t atomic_long_unchecked_t;
66656+#else
66657+typedef atomic64_t atomic_long_unchecked_t;
66658+#endif
66659+
66660 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
66661
66662 static inline long atomic_long_read(atomic_long_t *l)
66663@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
66664 return (long)atomic64_read(v);
66665 }
66666
66667+#ifdef CONFIG_PAX_REFCOUNT
66668+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
66669+{
66670+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
66671+
66672+ return (long)atomic64_read_unchecked(v);
66673+}
66674+#endif
66675+
66676 static inline void atomic_long_set(atomic_long_t *l, long i)
66677 {
66678 atomic64_t *v = (atomic64_t *)l;
66679@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
66680 atomic64_set(v, i);
66681 }
66682
66683+#ifdef CONFIG_PAX_REFCOUNT
66684+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
66685+{
66686+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
66687+
66688+ atomic64_set_unchecked(v, i);
66689+}
66690+#endif
66691+
66692 static inline void atomic_long_inc(atomic_long_t *l)
66693 {
66694 atomic64_t *v = (atomic64_t *)l;
66695@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
66696 atomic64_inc(v);
66697 }
66698
66699+#ifdef CONFIG_PAX_REFCOUNT
66700+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
66701+{
66702+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
66703+
66704+ atomic64_inc_unchecked(v);
66705+}
66706+#endif
66707+
66708 static inline void atomic_long_dec(atomic_long_t *l)
66709 {
66710 atomic64_t *v = (atomic64_t *)l;
66711@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
66712 atomic64_dec(v);
66713 }
66714
66715+#ifdef CONFIG_PAX_REFCOUNT
66716+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
66717+{
66718+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
66719+
66720+ atomic64_dec_unchecked(v);
66721+}
66722+#endif
66723+
66724 static inline void atomic_long_add(long i, atomic_long_t *l)
66725 {
66726 atomic64_t *v = (atomic64_t *)l;
66727@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
66728 atomic64_add(i, v);
66729 }
66730
66731+#ifdef CONFIG_PAX_REFCOUNT
66732+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
66733+{
66734+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
66735+
66736+ atomic64_add_unchecked(i, v);
66737+}
66738+#endif
66739+
66740 static inline void atomic_long_sub(long i, atomic_long_t *l)
66741 {
66742 atomic64_t *v = (atomic64_t *)l;
66743@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
66744 atomic64_sub(i, v);
66745 }
66746
66747+#ifdef CONFIG_PAX_REFCOUNT
66748+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
66749+{
66750+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
66751+
66752+ atomic64_sub_unchecked(i, v);
66753+}
66754+#endif
66755+
66756 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
66757 {
66758 atomic64_t *v = (atomic64_t *)l;
66759@@ -101,6 +161,15 @@ static inline long atomic_long_add_return(long i, atomic_long_t *l)
66760 return (long)atomic64_add_return(i, v);
66761 }
66762
66763+#ifdef CONFIG_PAX_REFCOUNT
66764+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
66765+{
66766+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
66767+
66768+ return (long)atomic64_add_return_unchecked(i, v);
66769+}
66770+#endif
66771+
66772 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
66773 {
66774 atomic64_t *v = (atomic64_t *)l;
66775@@ -115,6 +184,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
66776 return (long)atomic64_inc_return(v);
66777 }
66778
66779+#ifdef CONFIG_PAX_REFCOUNT
66780+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
66781+{
66782+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
66783+
66784+ return (long)atomic64_inc_return_unchecked(v);
66785+}
66786+#endif
66787+
66788 static inline long atomic_long_dec_return(atomic_long_t *l)
66789 {
66790 atomic64_t *v = (atomic64_t *)l;
66791@@ -140,6 +218,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
66792
66793 typedef atomic_t atomic_long_t;
66794
66795+#ifdef CONFIG_PAX_REFCOUNT
66796+typedef atomic_unchecked_t atomic_long_unchecked_t;
66797+#else
66798+typedef atomic_t atomic_long_unchecked_t;
66799+#endif
66800+
66801 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
66802 static inline long atomic_long_read(atomic_long_t *l)
66803 {
66804@@ -148,6 +232,15 @@ static inline long atomic_long_read(atomic_long_t *l)
66805 return (long)atomic_read(v);
66806 }
66807
66808+#ifdef CONFIG_PAX_REFCOUNT
66809+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
66810+{
66811+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
66812+
66813+ return (long)atomic_read_unchecked(v);
66814+}
66815+#endif
66816+
66817 static inline void atomic_long_set(atomic_long_t *l, long i)
66818 {
66819 atomic_t *v = (atomic_t *)l;
66820@@ -155,6 +248,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
66821 atomic_set(v, i);
66822 }
66823
66824+#ifdef CONFIG_PAX_REFCOUNT
66825+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
66826+{
66827+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
66828+
66829+ atomic_set_unchecked(v, i);
66830+}
66831+#endif
66832+
66833 static inline void atomic_long_inc(atomic_long_t *l)
66834 {
66835 atomic_t *v = (atomic_t *)l;
66836@@ -162,6 +264,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
66837 atomic_inc(v);
66838 }
66839
66840+#ifdef CONFIG_PAX_REFCOUNT
66841+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
66842+{
66843+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
66844+
66845+ atomic_inc_unchecked(v);
66846+}
66847+#endif
66848+
66849 static inline void atomic_long_dec(atomic_long_t *l)
66850 {
66851 atomic_t *v = (atomic_t *)l;
66852@@ -169,6 +280,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
66853 atomic_dec(v);
66854 }
66855
66856+#ifdef CONFIG_PAX_REFCOUNT
66857+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
66858+{
66859+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
66860+
66861+ atomic_dec_unchecked(v);
66862+}
66863+#endif
66864+
66865 static inline void atomic_long_add(long i, atomic_long_t *l)
66866 {
66867 atomic_t *v = (atomic_t *)l;
66868@@ -176,6 +296,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
66869 atomic_add(i, v);
66870 }
66871
66872+#ifdef CONFIG_PAX_REFCOUNT
66873+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
66874+{
66875+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
66876+
66877+ atomic_add_unchecked(i, v);
66878+}
66879+#endif
66880+
66881 static inline void atomic_long_sub(long i, atomic_long_t *l)
66882 {
66883 atomic_t *v = (atomic_t *)l;
66884@@ -183,6 +312,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
66885 atomic_sub(i, v);
66886 }
66887
66888+#ifdef CONFIG_PAX_REFCOUNT
66889+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
66890+{
66891+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
66892+
66893+ atomic_sub_unchecked(i, v);
66894+}
66895+#endif
66896+
66897 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
66898 {
66899 atomic_t *v = (atomic_t *)l;
66900@@ -218,6 +356,16 @@ static inline long atomic_long_add_return(long i, atomic_long_t *l)
66901 return (long)atomic_add_return(i, v);
66902 }
66903
66904+#ifdef CONFIG_PAX_REFCOUNT
66905+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
66906+{
66907+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
66908+
66909+ return (long)atomic_add_return_unchecked(i, v);
66910+}
66911+
66912+#endif
66913+
66914 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
66915 {
66916 atomic_t *v = (atomic_t *)l;
66917@@ -232,6 +380,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
66918 return (long)atomic_inc_return(v);
66919 }
66920
66921+#ifdef CONFIG_PAX_REFCOUNT
66922+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
66923+{
66924+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
66925+
66926+ return (long)atomic_inc_return_unchecked(v);
66927+}
66928+#endif
66929+
66930 static inline long atomic_long_dec_return(atomic_long_t *l)
66931 {
66932 atomic_t *v = (atomic_t *)l;
66933@@ -255,4 +412,57 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
66934
66935 #endif /* BITS_PER_LONG == 64 */
66936
66937+#ifdef CONFIG_PAX_REFCOUNT
66938+static inline void pax_refcount_needs_these_functions(void)
66939+{
66940+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
66941+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
66942+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
66943+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
66944+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
66945+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
66946+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
66947+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
66948+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
66949+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
66950+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
66951+#ifdef CONFIG_X86
66952+ atomic_clear_mask_unchecked(0, NULL);
66953+ atomic_set_mask_unchecked(0, NULL);
66954+#endif
66955+
66956+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
66957+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
66958+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
66959+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
66960+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
66961+ atomic_long_add_return_unchecked(0, (atomic_long_unchecked_t *)NULL);
66962+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
66963+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
66964+}
66965+#else
66966+#define atomic_read_unchecked(v) atomic_read(v)
66967+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
66968+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
66969+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
66970+#define atomic_inc_unchecked(v) atomic_inc(v)
66971+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
66972+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
66973+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
66974+#define atomic_dec_unchecked(v) atomic_dec(v)
66975+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
66976+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
66977+#define atomic_clear_mask_unchecked(mask, v) atomic_clear_mask((mask), (v))
66978+#define atomic_set_mask_unchecked(mask, v) atomic_set_mask((mask), (v))
66979+
66980+#define atomic_long_read_unchecked(v) atomic_long_read(v)
66981+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
66982+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
66983+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
66984+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
66985+#define atomic_long_add_return_unchecked(i, v) atomic_long_add_return((i), (v))
66986+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
66987+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
66988+#endif
66989+
66990 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
66991diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
66992index 1ced641..c896ee8 100644
66993--- a/include/asm-generic/atomic.h
66994+++ b/include/asm-generic/atomic.h
66995@@ -159,7 +159,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
66996 * Atomically clears the bits set in @mask from @v
66997 */
66998 #ifndef atomic_clear_mask
66999-static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
67000+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
67001 {
67002 unsigned long flags;
67003
67004diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
67005index b18ce4f..2ee2843 100644
67006--- a/include/asm-generic/atomic64.h
67007+++ b/include/asm-generic/atomic64.h
67008@@ -16,6 +16,8 @@ typedef struct {
67009 long long counter;
67010 } atomic64_t;
67011
67012+typedef atomic64_t atomic64_unchecked_t;
67013+
67014 #define ATOMIC64_INIT(i) { (i) }
67015
67016 extern long long atomic64_read(const atomic64_t *v);
67017@@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
67018 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
67019 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
67020
67021+#define atomic64_read_unchecked(v) atomic64_read(v)
67022+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
67023+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
67024+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
67025+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
67026+#define atomic64_inc_unchecked(v) atomic64_inc(v)
67027+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
67028+#define atomic64_dec_unchecked(v) atomic64_dec(v)
67029+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
67030+
67031 #endif /* _ASM_GENERIC_ATOMIC64_H */
67032diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
67033index 1bfcfe5..e04c5c9 100644
67034--- a/include/asm-generic/cache.h
67035+++ b/include/asm-generic/cache.h
67036@@ -6,7 +6,7 @@
67037 * cache lines need to provide their own cache.h.
67038 */
67039
67040-#define L1_CACHE_SHIFT 5
67041-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
67042+#define L1_CACHE_SHIFT 5UL
67043+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
67044
67045 #endif /* __ASM_GENERIC_CACHE_H */
67046diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
67047index 0d68a1e..b74a761 100644
67048--- a/include/asm-generic/emergency-restart.h
67049+++ b/include/asm-generic/emergency-restart.h
67050@@ -1,7 +1,7 @@
67051 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
67052 #define _ASM_GENERIC_EMERGENCY_RESTART_H
67053
67054-static inline void machine_emergency_restart(void)
67055+static inline __noreturn void machine_emergency_restart(void)
67056 {
67057 machine_restart(NULL);
67058 }
67059diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
67060index 90f99c7..00ce236 100644
67061--- a/include/asm-generic/kmap_types.h
67062+++ b/include/asm-generic/kmap_types.h
67063@@ -2,9 +2,9 @@
67064 #define _ASM_GENERIC_KMAP_TYPES_H
67065
67066 #ifdef __WITH_KM_FENCE
67067-# define KM_TYPE_NR 41
67068+# define KM_TYPE_NR 42
67069 #else
67070-# define KM_TYPE_NR 20
67071+# define KM_TYPE_NR 21
67072 #endif
67073
67074 #endif
67075diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
67076index 9ceb03b..62b0b8f 100644
67077--- a/include/asm-generic/local.h
67078+++ b/include/asm-generic/local.h
67079@@ -23,24 +23,37 @@ typedef struct
67080 atomic_long_t a;
67081 } local_t;
67082
67083+typedef struct {
67084+ atomic_long_unchecked_t a;
67085+} local_unchecked_t;
67086+
67087 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
67088
67089 #define local_read(l) atomic_long_read(&(l)->a)
67090+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
67091 #define local_set(l,i) atomic_long_set((&(l)->a),(i))
67092+#define local_set_unchecked(l,i) atomic_long_set_unchecked((&(l)->a),(i))
67093 #define local_inc(l) atomic_long_inc(&(l)->a)
67094+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
67095 #define local_dec(l) atomic_long_dec(&(l)->a)
67096+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
67097 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
67098+#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
67099 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
67100+#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
67101
67102 #define local_sub_and_test(i, l) atomic_long_sub_and_test((i), (&(l)->a))
67103 #define local_dec_and_test(l) atomic_long_dec_and_test(&(l)->a)
67104 #define local_inc_and_test(l) atomic_long_inc_and_test(&(l)->a)
67105 #define local_add_negative(i, l) atomic_long_add_negative((i), (&(l)->a))
67106 #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
67107+#define local_add_return_unchecked(i, l) atomic_long_add_return_unchecked((i), (&(l)->a))
67108 #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
67109 #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
67110+#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
67111
67112 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
67113+#define local_cmpxchg_unchecked(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
67114 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
67115 #define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u))
67116 #define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a)
67117diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
67118index 725612b..9cc513a 100644
67119--- a/include/asm-generic/pgtable-nopmd.h
67120+++ b/include/asm-generic/pgtable-nopmd.h
67121@@ -1,14 +1,19 @@
67122 #ifndef _PGTABLE_NOPMD_H
67123 #define _PGTABLE_NOPMD_H
67124
67125-#ifndef __ASSEMBLY__
67126-
67127 #include <asm-generic/pgtable-nopud.h>
67128
67129-struct mm_struct;
67130-
67131 #define __PAGETABLE_PMD_FOLDED
67132
67133+#define PMD_SHIFT PUD_SHIFT
67134+#define PTRS_PER_PMD 1
67135+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
67136+#define PMD_MASK (~(PMD_SIZE-1))
67137+
67138+#ifndef __ASSEMBLY__
67139+
67140+struct mm_struct;
67141+
67142 /*
67143 * Having the pmd type consist of a pud gets the size right, and allows
67144 * us to conceptually access the pud entry that this pmd is folded into
67145@@ -16,11 +21,6 @@ struct mm_struct;
67146 */
67147 typedef struct { pud_t pud; } pmd_t;
67148
67149-#define PMD_SHIFT PUD_SHIFT
67150-#define PTRS_PER_PMD 1
67151-#define PMD_SIZE (1UL << PMD_SHIFT)
67152-#define PMD_MASK (~(PMD_SIZE-1))
67153-
67154 /*
67155 * The "pud_xxx()" functions here are trivial for a folded two-level
67156 * setup: the pmd is never bad, and a pmd always exists (as it's folded
67157diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
67158index 810431d..0ec4804f 100644
67159--- a/include/asm-generic/pgtable-nopud.h
67160+++ b/include/asm-generic/pgtable-nopud.h
67161@@ -1,10 +1,15 @@
67162 #ifndef _PGTABLE_NOPUD_H
67163 #define _PGTABLE_NOPUD_H
67164
67165-#ifndef __ASSEMBLY__
67166-
67167 #define __PAGETABLE_PUD_FOLDED
67168
67169+#define PUD_SHIFT PGDIR_SHIFT
67170+#define PTRS_PER_PUD 1
67171+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
67172+#define PUD_MASK (~(PUD_SIZE-1))
67173+
67174+#ifndef __ASSEMBLY__
67175+
67176 /*
67177 * Having the pud type consist of a pgd gets the size right, and allows
67178 * us to conceptually access the pgd entry that this pud is folded into
67179@@ -12,11 +17,6 @@
67180 */
67181 typedef struct { pgd_t pgd; } pud_t;
67182
67183-#define PUD_SHIFT PGDIR_SHIFT
67184-#define PTRS_PER_PUD 1
67185-#define PUD_SIZE (1UL << PUD_SHIFT)
67186-#define PUD_MASK (~(PUD_SIZE-1))
67187-
67188 /*
67189 * The "pgd_xxx()" functions here are trivial for a folded two-level
67190 * setup: the pud is never bad, and a pud always exists (as it's folded
67191@@ -29,6 +29,7 @@ static inline void pgd_clear(pgd_t *pgd) { }
67192 #define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
67193
67194 #define pgd_populate(mm, pgd, pud) do { } while (0)
67195+#define pgd_populate_kernel(mm, pgd, pud) do { } while (0)
67196 /*
67197 * (puds are folded into pgds so this doesn't get actually called,
67198 * but the define is needed for a generic inline function.)
67199diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
67200index 5cf680a..4b74d62 100644
67201--- a/include/asm-generic/pgtable.h
67202+++ b/include/asm-generic/pgtable.h
67203@@ -688,6 +688,14 @@ static inline pmd_t pmd_mknuma(pmd_t pmd)
67204 }
67205 #endif /* CONFIG_NUMA_BALANCING */
67206
67207+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
67208+static inline unsigned long pax_open_kernel(void) { return 0; }
67209+#endif
67210+
67211+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
67212+static inline unsigned long pax_close_kernel(void) { return 0; }
67213+#endif
67214+
67215 #endif /* CONFIG_MMU */
67216
67217 #endif /* !__ASSEMBLY__ */
67218diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
67219index d1ea7ce..b1ebf2a 100644
67220--- a/include/asm-generic/vmlinux.lds.h
67221+++ b/include/asm-generic/vmlinux.lds.h
67222@@ -218,6 +218,7 @@
67223 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
67224 VMLINUX_SYMBOL(__start_rodata) = .; \
67225 *(.rodata) *(.rodata.*) \
67226+ *(.data..read_only) \
67227 *(__vermagic) /* Kernel version magic */ \
67228 . = ALIGN(8); \
67229 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
67230@@ -725,17 +726,18 @@
67231 * section in the linker script will go there too. @phdr should have
67232 * a leading colon.
67233 *
67234- * Note that this macros defines __per_cpu_load as an absolute symbol.
67235+ * Note that this macros defines per_cpu_load as an absolute symbol.
67236 * If there is no need to put the percpu section at a predetermined
67237 * address, use PERCPU_SECTION.
67238 */
67239 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
67240- VMLINUX_SYMBOL(__per_cpu_load) = .; \
67241- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
67242+ per_cpu_load = .; \
67243+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
67244 - LOAD_OFFSET) { \
67245+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
67246 PERCPU_INPUT(cacheline) \
67247 } phdr \
67248- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
67249+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
67250
67251 /**
67252 * PERCPU_SECTION - define output section for percpu area, simple version
67253diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
67254index 418d270..bfd2794 100644
67255--- a/include/crypto/algapi.h
67256+++ b/include/crypto/algapi.h
67257@@ -34,7 +34,7 @@ struct crypto_type {
67258 unsigned int maskclear;
67259 unsigned int maskset;
67260 unsigned int tfmsize;
67261-};
67262+} __do_const;
67263
67264 struct crypto_instance {
67265 struct crypto_alg alg;
67266diff --git a/include/drm/drmP.h b/include/drm/drmP.h
67267index fad21c9..ab858bc 100644
67268--- a/include/drm/drmP.h
67269+++ b/include/drm/drmP.h
67270@@ -72,6 +72,7 @@
67271 #include <linux/workqueue.h>
67272 #include <linux/poll.h>
67273 #include <asm/pgalloc.h>
67274+#include <asm/local.h>
67275 #include <drm/drm.h>
67276 #include <drm/drm_sarea.h>
67277
67278@@ -293,10 +294,12 @@ do { \
67279 * \param cmd command.
67280 * \param arg argument.
67281 */
67282-typedef int drm_ioctl_t(struct drm_device *dev, void *data,
67283+typedef int (* const drm_ioctl_t)(struct drm_device *dev, void *data,
67284+ struct drm_file *file_priv);
67285+typedef int (* drm_ioctl_no_const_t)(struct drm_device *dev, void *data,
67286 struct drm_file *file_priv);
67287
67288-typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
67289+typedef int (* const drm_ioctl_compat_t)(struct file *filp, unsigned int cmd,
67290 unsigned long arg);
67291
67292 #define DRM_IOCTL_NR(n) _IOC_NR(n)
67293@@ -311,9 +314,9 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
67294 struct drm_ioctl_desc {
67295 unsigned int cmd;
67296 int flags;
67297- drm_ioctl_t *func;
67298+ drm_ioctl_t func;
67299 unsigned int cmd_drv;
67300-};
67301+} __do_const;
67302
67303 /**
67304 * Creates a driver or general drm_ioctl_desc array entry for the given
67305@@ -995,7 +998,7 @@ struct drm_info_list {
67306 int (*show)(struct seq_file*, void*); /** show callback */
67307 u32 driver_features; /**< Required driver features for this entry */
67308 void *data;
67309-};
67310+} __do_const;
67311
67312 /**
67313 * debugfs node structure. This structure represents a debugfs file.
67314@@ -1068,7 +1071,7 @@ struct drm_device {
67315
67316 /** \name Usage Counters */
67317 /*@{ */
67318- int open_count; /**< Outstanding files open */
67319+ local_t open_count; /**< Outstanding files open */
67320 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
67321 atomic_t vma_count; /**< Outstanding vma areas open */
67322 int buf_use; /**< Buffers in use -- cannot alloc */
67323@@ -1079,7 +1082,7 @@ struct drm_device {
67324 /*@{ */
67325 unsigned long counters;
67326 enum drm_stat_type types[15];
67327- atomic_t counts[15];
67328+ atomic_unchecked_t counts[15];
67329 /*@} */
67330
67331 struct list_head filelist;
67332diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
67333index f43d556..94d9343 100644
67334--- a/include/drm/drm_crtc_helper.h
67335+++ b/include/drm/drm_crtc_helper.h
67336@@ -109,7 +109,7 @@ struct drm_encoder_helper_funcs {
67337 struct drm_connector *connector);
67338 /* disable encoder when not in use - more explicit than dpms off */
67339 void (*disable)(struct drm_encoder *encoder);
67340-};
67341+} __no_const;
67342
67343 /**
67344 * drm_connector_helper_funcs - helper operations for connectors
67345diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
67346index 72dcbe8..8db58d7 100644
67347--- a/include/drm/ttm/ttm_memory.h
67348+++ b/include/drm/ttm/ttm_memory.h
67349@@ -48,7 +48,7 @@
67350
67351 struct ttm_mem_shrink {
67352 int (*do_shrink) (struct ttm_mem_shrink *);
67353-};
67354+} __no_const;
67355
67356 /**
67357 * struct ttm_mem_global - Global memory accounting structure.
67358diff --git a/include/keys/asymmetric-subtype.h b/include/keys/asymmetric-subtype.h
67359index 4b840e8..155d235 100644
67360--- a/include/keys/asymmetric-subtype.h
67361+++ b/include/keys/asymmetric-subtype.h
67362@@ -37,7 +37,7 @@ struct asymmetric_key_subtype {
67363 /* Verify the signature on a key of this subtype (optional) */
67364 int (*verify_signature)(const struct key *key,
67365 const struct public_key_signature *sig);
67366-};
67367+} __do_const;
67368
67369 /**
67370 * asymmetric_key_subtype - Get the subtype from an asymmetric key
67371diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
67372index c1da539..1dcec55 100644
67373--- a/include/linux/atmdev.h
67374+++ b/include/linux/atmdev.h
67375@@ -28,7 +28,7 @@ struct compat_atm_iobuf {
67376 #endif
67377
67378 struct k_atm_aal_stats {
67379-#define __HANDLE_ITEM(i) atomic_t i
67380+#define __HANDLE_ITEM(i) atomic_unchecked_t i
67381 __AAL_STAT_ITEMS
67382 #undef __HANDLE_ITEM
67383 };
67384@@ -200,7 +200,7 @@ struct atmdev_ops { /* only send is required */
67385 int (*change_qos)(struct atm_vcc *vcc,struct atm_qos *qos,int flags);
67386 int (*proc_read)(struct atm_dev *dev,loff_t *pos,char *page);
67387 struct module *owner;
67388-};
67389+} __do_const ;
67390
67391 struct atmphy_ops {
67392 int (*start)(struct atm_dev *dev);
67393diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
67394index 0530b98..96a8ac0 100644
67395--- a/include/linux/binfmts.h
67396+++ b/include/linux/binfmts.h
67397@@ -73,8 +73,9 @@ struct linux_binfmt {
67398 int (*load_binary)(struct linux_binprm *);
67399 int (*load_shlib)(struct file *);
67400 int (*core_dump)(struct coredump_params *cprm);
67401+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
67402 unsigned long min_coredump; /* minimal dump size */
67403-};
67404+} __do_const;
67405
67406 extern void __register_binfmt(struct linux_binfmt *fmt, int insert);
67407
67408diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
67409index f94bc83..62b9cfe 100644
67410--- a/include/linux/blkdev.h
67411+++ b/include/linux/blkdev.h
67412@@ -1498,7 +1498,7 @@ struct block_device_operations {
67413 /* this callback is with swap_lock and sometimes page table lock held */
67414 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
67415 struct module *owner;
67416-};
67417+} __do_const;
67418
67419 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
67420 unsigned long);
67421diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
67422index 7c2e030..b72475d 100644
67423--- a/include/linux/blktrace_api.h
67424+++ b/include/linux/blktrace_api.h
67425@@ -23,7 +23,7 @@ struct blk_trace {
67426 struct dentry *dir;
67427 struct dentry *dropped_file;
67428 struct dentry *msg_file;
67429- atomic_t dropped;
67430+ atomic_unchecked_t dropped;
67431 };
67432
67433 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
67434diff --git a/include/linux/cache.h b/include/linux/cache.h
67435index 4c57065..4307975 100644
67436--- a/include/linux/cache.h
67437+++ b/include/linux/cache.h
67438@@ -16,6 +16,10 @@
67439 #define __read_mostly
67440 #endif
67441
67442+#ifndef __read_only
67443+#define __read_only __read_mostly
67444+#endif
67445+
67446 #ifndef ____cacheline_aligned
67447 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
67448 #endif
67449diff --git a/include/linux/capability.h b/include/linux/capability.h
67450index 98503b7..cc36d18 100644
67451--- a/include/linux/capability.h
67452+++ b/include/linux/capability.h
67453@@ -211,8 +211,13 @@ extern bool capable(int cap);
67454 extern bool ns_capable(struct user_namespace *ns, int cap);
67455 extern bool nsown_capable(int cap);
67456 extern bool inode_capable(const struct inode *inode, int cap);
67457+extern bool capable_nolog(int cap);
67458+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
67459+extern bool inode_capable_nolog(const struct inode *inode, int cap);
67460
67461 /* audit system wants to get cap info from files as well */
67462 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
67463
67464+extern int is_privileged_binary(const struct dentry *dentry);
67465+
67466 #endif /* !_LINUX_CAPABILITY_H */
67467diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
67468index 8609d57..86e4d79 100644
67469--- a/include/linux/cdrom.h
67470+++ b/include/linux/cdrom.h
67471@@ -87,7 +87,6 @@ struct cdrom_device_ops {
67472
67473 /* driver specifications */
67474 const int capability; /* capability flags */
67475- int n_minors; /* number of active minor devices */
67476 /* handle uniform packets for scsi type devices (scsi,atapi) */
67477 int (*generic_packet) (struct cdrom_device_info *,
67478 struct packet_command *);
67479diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
67480index 42e55de..1cd0e66 100644
67481--- a/include/linux/cleancache.h
67482+++ b/include/linux/cleancache.h
67483@@ -31,7 +31,7 @@ struct cleancache_ops {
67484 void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t);
67485 void (*invalidate_inode)(int, struct cleancache_filekey);
67486 void (*invalidate_fs)(int);
67487-};
67488+} __no_const;
67489
67490 extern struct cleancache_ops
67491 cleancache_register_ops(struct cleancache_ops *ops);
67492diff --git a/include/linux/compat.h b/include/linux/compat.h
67493index dec7e2d..45db13f 100644
67494--- a/include/linux/compat.h
67495+++ b/include/linux/compat.h
67496@@ -311,14 +311,14 @@ long compat_sys_msgsnd(int first, int second, int third, void __user *uptr);
67497 long compat_sys_msgrcv(int first, int second, int msgtyp, int third,
67498 int version, void __user *uptr);
67499 long compat_sys_shmat(int first, int second, compat_uptr_t third, int version,
67500- void __user *uptr);
67501+ void __user *uptr) __intentional_overflow(0);
67502 #else
67503 long compat_sys_semctl(int semid, int semnum, int cmd, int arg);
67504 long compat_sys_msgsnd(int msqid, struct compat_msgbuf __user *msgp,
67505 compat_ssize_t msgsz, int msgflg);
67506 long compat_sys_msgrcv(int msqid, struct compat_msgbuf __user *msgp,
67507 compat_ssize_t msgsz, long msgtyp, int msgflg);
67508-long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg);
67509+long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg) __intentional_overflow(0);
67510 #endif
67511 long compat_sys_msgctl(int first, int second, void __user *uptr);
67512 long compat_sys_shmctl(int first, int second, void __user *uptr);
67513@@ -414,7 +414,7 @@ extern int compat_ptrace_request(struct task_struct *child,
67514 extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
67515 compat_ulong_t addr, compat_ulong_t data);
67516 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
67517- compat_long_t addr, compat_long_t data);
67518+ compat_ulong_t addr, compat_ulong_t data);
67519
67520 /*
67521 * epoll (fs/eventpoll.c) compat bits follow ...
67522diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
67523index 662fd1b..e801992 100644
67524--- a/include/linux/compiler-gcc4.h
67525+++ b/include/linux/compiler-gcc4.h
67526@@ -34,6 +34,21 @@
67527 #define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
67528
67529 #if __GNUC_MINOR__ >= 5
67530+
67531+#ifdef CONSTIFY_PLUGIN
67532+#define __no_const __attribute__((no_const))
67533+#define __do_const __attribute__((do_const))
67534+#endif
67535+
67536+#ifdef SIZE_OVERFLOW_PLUGIN
67537+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
67538+#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__)))
67539+#endif
67540+
67541+#ifdef LATENT_ENTROPY_PLUGIN
67542+#define __latent_entropy __attribute__((latent_entropy))
67543+#endif
67544+
67545 /*
67546 * Mark a position in code as unreachable. This can be used to
67547 * suppress control flow warnings after asm blocks that transfer
67548@@ -49,6 +64,11 @@
67549 #define __noclone __attribute__((__noclone__))
67550
67551 #endif
67552+
67553+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
67554+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
67555+#define __bos0(ptr) __bos((ptr), 0)
67556+#define __bos1(ptr) __bos((ptr), 1)
67557 #endif
67558
67559 #if __GNUC_MINOR__ >= 6
67560diff --git a/include/linux/compiler.h b/include/linux/compiler.h
67561index dd852b7..1ad5fba 100644
67562--- a/include/linux/compiler.h
67563+++ b/include/linux/compiler.h
67564@@ -5,11 +5,14 @@
67565
67566 #ifdef __CHECKER__
67567 # define __user __attribute__((noderef, address_space(1)))
67568+# define __force_user __force __user
67569 # define __kernel __attribute__((address_space(0)))
67570+# define __force_kernel __force __kernel
67571 # define __safe __attribute__((safe))
67572 # define __force __attribute__((force))
67573 # define __nocast __attribute__((nocast))
67574 # define __iomem __attribute__((noderef, address_space(2)))
67575+# define __force_iomem __force __iomem
67576 # define __must_hold(x) __attribute__((context(x,1,1)))
67577 # define __acquires(x) __attribute__((context(x,0,1)))
67578 # define __releases(x) __attribute__((context(x,1,0)))
67579@@ -17,20 +20,37 @@
67580 # define __release(x) __context__(x,-1)
67581 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
67582 # define __percpu __attribute__((noderef, address_space(3)))
67583+# define __force_percpu __force __percpu
67584 #ifdef CONFIG_SPARSE_RCU_POINTER
67585 # define __rcu __attribute__((noderef, address_space(4)))
67586+# define __force_rcu __force __rcu
67587 #else
67588 # define __rcu
67589+# define __force_rcu
67590 #endif
67591 extern void __chk_user_ptr(const volatile void __user *);
67592 extern void __chk_io_ptr(const volatile void __iomem *);
67593 #else
67594-# define __user
67595-# define __kernel
67596+# ifdef CHECKER_PLUGIN
67597+//# define __user
67598+//# define __force_user
67599+//# define __kernel
67600+//# define __force_kernel
67601+# else
67602+# ifdef STRUCTLEAK_PLUGIN
67603+# define __user __attribute__((user))
67604+# else
67605+# define __user
67606+# endif
67607+# define __force_user
67608+# define __kernel
67609+# define __force_kernel
67610+# endif
67611 # define __safe
67612 # define __force
67613 # define __nocast
67614 # define __iomem
67615+# define __force_iomem
67616 # define __chk_user_ptr(x) (void)0
67617 # define __chk_io_ptr(x) (void)0
67618 # define __builtin_warning(x, y...) (1)
67619@@ -41,7 +61,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
67620 # define __release(x) (void)0
67621 # define __cond_lock(x,c) (c)
67622 # define __percpu
67623+# define __force_percpu
67624 # define __rcu
67625+# define __force_rcu
67626 #endif
67627
67628 /* Indirect macros required for expanded argument pasting, eg. __LINE__. */
67629@@ -275,6 +297,26 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
67630 # define __attribute_const__ /* unimplemented */
67631 #endif
67632
67633+#ifndef __no_const
67634+# define __no_const
67635+#endif
67636+
67637+#ifndef __do_const
67638+# define __do_const
67639+#endif
67640+
67641+#ifndef __size_overflow
67642+# define __size_overflow(...)
67643+#endif
67644+
67645+#ifndef __intentional_overflow
67646+# define __intentional_overflow(...)
67647+#endif
67648+
67649+#ifndef __latent_entropy
67650+# define __latent_entropy
67651+#endif
67652+
67653 /*
67654 * Tell gcc if a function is cold. The compiler will assume any path
67655 * directly leading to the call is unlikely.
67656@@ -284,6 +326,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
67657 #define __cold
67658 #endif
67659
67660+#ifndef __alloc_size
67661+#define __alloc_size(...)
67662+#endif
67663+
67664+#ifndef __bos
67665+#define __bos(ptr, arg)
67666+#endif
67667+
67668+#ifndef __bos0
67669+#define __bos0(ptr)
67670+#endif
67671+
67672+#ifndef __bos1
67673+#define __bos1(ptr)
67674+#endif
67675+
67676 /* Simple shorthand for a section definition */
67677 #ifndef __section
67678 # define __section(S) __attribute__ ((__section__(#S)))
67679@@ -323,6 +381,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
67680 * use is to mediate communication between process-level code and irq/NMI
67681 * handlers, all running on the same CPU.
67682 */
67683-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
67684+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
67685+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
67686
67687 #endif /* __LINUX_COMPILER_H */
67688diff --git a/include/linux/completion.h b/include/linux/completion.h
67689index 51494e6..0fd1b61 100644
67690--- a/include/linux/completion.h
67691+++ b/include/linux/completion.h
67692@@ -78,13 +78,13 @@ static inline void init_completion(struct completion *x)
67693
67694 extern void wait_for_completion(struct completion *);
67695 extern int wait_for_completion_interruptible(struct completion *x);
67696-extern int wait_for_completion_killable(struct completion *x);
67697+extern int wait_for_completion_killable(struct completion *x) __intentional_overflow(-1);
67698 extern unsigned long wait_for_completion_timeout(struct completion *x,
67699 unsigned long timeout);
67700 extern long wait_for_completion_interruptible_timeout(
67701- struct completion *x, unsigned long timeout);
67702+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
67703 extern long wait_for_completion_killable_timeout(
67704- struct completion *x, unsigned long timeout);
67705+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
67706 extern bool try_wait_for_completion(struct completion *x);
67707 extern bool completion_done(struct completion *x);
67708
67709diff --git a/include/linux/configfs.h b/include/linux/configfs.h
67710index 34025df..d94bbbc 100644
67711--- a/include/linux/configfs.h
67712+++ b/include/linux/configfs.h
67713@@ -125,7 +125,7 @@ struct configfs_attribute {
67714 const char *ca_name;
67715 struct module *ca_owner;
67716 umode_t ca_mode;
67717-};
67718+} __do_const;
67719
67720 /*
67721 * Users often need to create attribute structures for their configurable
67722diff --git a/include/linux/cpu.h b/include/linux/cpu.h
67723index ce7a074..01ab8ac 100644
67724--- a/include/linux/cpu.h
67725+++ b/include/linux/cpu.h
67726@@ -115,7 +115,7 @@ enum {
67727 /* Need to know about CPUs going up/down? */
67728 #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE)
67729 #define cpu_notifier(fn, pri) { \
67730- static struct notifier_block fn##_nb __cpuinitdata = \
67731+ static struct notifier_block fn##_nb = \
67732 { .notifier_call = fn, .priority = pri }; \
67733 register_cpu_notifier(&fn##_nb); \
67734 }
67735diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
67736index a55b88e..fba90c5 100644
67737--- a/include/linux/cpufreq.h
67738+++ b/include/linux/cpufreq.h
67739@@ -240,7 +240,7 @@ struct cpufreq_driver {
67740 int (*suspend) (struct cpufreq_policy *policy);
67741 int (*resume) (struct cpufreq_policy *policy);
67742 struct freq_attr **attr;
67743-};
67744+} __do_const;
67745
67746 /* flags */
67747
67748@@ -299,6 +299,7 @@ struct global_attr {
67749 ssize_t (*store)(struct kobject *a, struct attribute *b,
67750 const char *c, size_t count);
67751 };
67752+typedef struct global_attr __no_const global_attr_no_const;
67753
67754 #define define_one_global_ro(_name) \
67755 static struct global_attr _name = \
67756diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
67757index 24cd1037..20a63aae 100644
67758--- a/include/linux/cpuidle.h
67759+++ b/include/linux/cpuidle.h
67760@@ -54,7 +54,8 @@ struct cpuidle_state {
67761 int index);
67762
67763 int (*enter_dead) (struct cpuidle_device *dev, int index);
67764-};
67765+} __do_const;
67766+typedef struct cpuidle_state __no_const cpuidle_state_no_const;
67767
67768 /* Idle State Flags */
67769 #define CPUIDLE_FLAG_TIME_VALID (0x01) /* is residency time measurable? */
67770@@ -216,7 +217,7 @@ struct cpuidle_governor {
67771 void (*reflect) (struct cpuidle_device *dev, int index);
67772
67773 struct module *owner;
67774-};
67775+} __do_const;
67776
67777 #ifdef CONFIG_CPU_IDLE
67778
67779diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
67780index 0325602..5e9feff 100644
67781--- a/include/linux/cpumask.h
67782+++ b/include/linux/cpumask.h
67783@@ -118,17 +118,17 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
67784 }
67785
67786 /* Valid inputs for n are -1 and 0. */
67787-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
67788+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
67789 {
67790 return n+1;
67791 }
67792
67793-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
67794+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
67795 {
67796 return n+1;
67797 }
67798
67799-static inline unsigned int cpumask_next_and(int n,
67800+static inline unsigned int __intentional_overflow(-1) cpumask_next_and(int n,
67801 const struct cpumask *srcp,
67802 const struct cpumask *andp)
67803 {
67804@@ -167,7 +167,7 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
67805 *
67806 * Returns >= nr_cpu_ids if no further cpus set.
67807 */
67808-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
67809+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
67810 {
67811 /* -1 is a legal arg here. */
67812 if (n != -1)
67813@@ -182,7 +182,7 @@ static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
67814 *
67815 * Returns >= nr_cpu_ids if no further cpus unset.
67816 */
67817-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
67818+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
67819 {
67820 /* -1 is a legal arg here. */
67821 if (n != -1)
67822@@ -190,7 +190,7 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
67823 return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
67824 }
67825
67826-int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
67827+int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *) __intentional_overflow(-1);
67828 int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
67829
67830 /**
67831diff --git a/include/linux/cred.h b/include/linux/cred.h
67832index 04421e8..6bce4ef 100644
67833--- a/include/linux/cred.h
67834+++ b/include/linux/cred.h
67835@@ -194,6 +194,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
67836 static inline void validate_process_creds(void)
67837 {
67838 }
67839+static inline void validate_task_creds(struct task_struct *task)
67840+{
67841+}
67842 #endif
67843
67844 /**
67845diff --git a/include/linux/crypto.h b/include/linux/crypto.h
67846index b92eadf..b4ecdc1 100644
67847--- a/include/linux/crypto.h
67848+++ b/include/linux/crypto.h
67849@@ -373,7 +373,7 @@ struct cipher_tfm {
67850 const u8 *key, unsigned int keylen);
67851 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
67852 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
67853-};
67854+} __no_const;
67855
67856 struct hash_tfm {
67857 int (*init)(struct hash_desc *desc);
67858@@ -394,13 +394,13 @@ struct compress_tfm {
67859 int (*cot_decompress)(struct crypto_tfm *tfm,
67860 const u8 *src, unsigned int slen,
67861 u8 *dst, unsigned int *dlen);
67862-};
67863+} __no_const;
67864
67865 struct rng_tfm {
67866 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
67867 unsigned int dlen);
67868 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
67869-};
67870+} __no_const;
67871
67872 #define crt_ablkcipher crt_u.ablkcipher
67873 #define crt_aead crt_u.aead
67874diff --git a/include/linux/ctype.h b/include/linux/ctype.h
67875index 8acfe31..6ffccd63 100644
67876--- a/include/linux/ctype.h
67877+++ b/include/linux/ctype.h
67878@@ -56,7 +56,7 @@ static inline unsigned char __toupper(unsigned char c)
67879 * Fast implementation of tolower() for internal usage. Do not use in your
67880 * code.
67881 */
67882-static inline char _tolower(const char c)
67883+static inline unsigned char _tolower(const unsigned char c)
67884 {
67885 return c | 0x20;
67886 }
67887diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
67888index 7925bf0..d5143d2 100644
67889--- a/include/linux/decompress/mm.h
67890+++ b/include/linux/decompress/mm.h
67891@@ -77,7 +77,7 @@ static void free(void *where)
67892 * warnings when not needed (indeed large_malloc / large_free are not
67893 * needed by inflate */
67894
67895-#define malloc(a) kmalloc(a, GFP_KERNEL)
67896+#define malloc(a) kmalloc((a), GFP_KERNEL)
67897 #define free(a) kfree(a)
67898
67899 #define large_malloc(a) vmalloc(a)
67900diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
67901index e83ef39..33e0eb3 100644
67902--- a/include/linux/devfreq.h
67903+++ b/include/linux/devfreq.h
67904@@ -114,7 +114,7 @@ struct devfreq_governor {
67905 int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
67906 int (*event_handler)(struct devfreq *devfreq,
67907 unsigned int event, void *data);
67908-};
67909+} __do_const;
67910
67911 /**
67912 * struct devfreq - Device devfreq structure
67913diff --git a/include/linux/device.h b/include/linux/device.h
67914index 43dcda9..7a1fb65 100644
67915--- a/include/linux/device.h
67916+++ b/include/linux/device.h
67917@@ -294,7 +294,7 @@ struct subsys_interface {
67918 struct list_head node;
67919 int (*add_dev)(struct device *dev, struct subsys_interface *sif);
67920 int (*remove_dev)(struct device *dev, struct subsys_interface *sif);
67921-};
67922+} __do_const;
67923
67924 int subsys_interface_register(struct subsys_interface *sif);
67925 void subsys_interface_unregister(struct subsys_interface *sif);
67926@@ -474,7 +474,7 @@ struct device_type {
67927 void (*release)(struct device *dev);
67928
67929 const struct dev_pm_ops *pm;
67930-};
67931+} __do_const;
67932
67933 /* interface for exporting device attributes */
67934 struct device_attribute {
67935@@ -484,11 +484,12 @@ struct device_attribute {
67936 ssize_t (*store)(struct device *dev, struct device_attribute *attr,
67937 const char *buf, size_t count);
67938 };
67939+typedef struct device_attribute __no_const device_attribute_no_const;
67940
67941 struct dev_ext_attribute {
67942 struct device_attribute attr;
67943 void *var;
67944-};
67945+} __do_const;
67946
67947 ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr,
67948 char *buf);
67949diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
67950index 94af418..b1ca7a2 100644
67951--- a/include/linux/dma-mapping.h
67952+++ b/include/linux/dma-mapping.h
67953@@ -54,7 +54,7 @@ struct dma_map_ops {
67954 u64 (*get_required_mask)(struct device *dev);
67955 #endif
67956 int is_phys;
67957-};
67958+} __do_const;
67959
67960 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
67961
67962diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
67963index d3201e4..8281e63 100644
67964--- a/include/linux/dmaengine.h
67965+++ b/include/linux/dmaengine.h
67966@@ -1018,9 +1018,9 @@ struct dma_pinned_list {
67967 struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
67968 void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
67969
67970-dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
67971+dma_cookie_t __intentional_overflow(0) dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
67972 struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
67973-dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
67974+dma_cookie_t __intentional_overflow(0) dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
67975 struct dma_pinned_list *pinned_list, struct page *page,
67976 unsigned int offset, size_t len);
67977
67978diff --git a/include/linux/efi.h b/include/linux/efi.h
67979index 7a9498a..155713d 100644
67980--- a/include/linux/efi.h
67981+++ b/include/linux/efi.h
67982@@ -733,6 +733,7 @@ struct efivar_operations {
67983 efi_set_variable_t *set_variable;
67984 efi_query_variable_info_t *query_variable_info;
67985 };
67986+typedef struct efivar_operations __no_const efivar_operations_no_const;
67987
67988 struct efivars {
67989 /*
67990diff --git a/include/linux/elf.h b/include/linux/elf.h
67991index 8c9048e..16a4665 100644
67992--- a/include/linux/elf.h
67993+++ b/include/linux/elf.h
67994@@ -20,6 +20,7 @@ extern Elf32_Dyn _DYNAMIC [];
67995 #define elf_note elf32_note
67996 #define elf_addr_t Elf32_Off
67997 #define Elf_Half Elf32_Half
67998+#define elf_dyn Elf32_Dyn
67999
68000 #else
68001
68002@@ -30,6 +31,7 @@ extern Elf64_Dyn _DYNAMIC [];
68003 #define elf_note elf64_note
68004 #define elf_addr_t Elf64_Off
68005 #define Elf_Half Elf64_Half
68006+#define elf_dyn Elf64_Dyn
68007
68008 #endif
68009
68010diff --git a/include/linux/err.h b/include/linux/err.h
68011index f2edce2..cc2082c 100644
68012--- a/include/linux/err.h
68013+++ b/include/linux/err.h
68014@@ -19,12 +19,12 @@
68015
68016 #define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
68017
68018-static inline void * __must_check ERR_PTR(long error)
68019+static inline void * __must_check __intentional_overflow(-1) ERR_PTR(long error)
68020 {
68021 return (void *) error;
68022 }
68023
68024-static inline long __must_check PTR_ERR(const void *ptr)
68025+static inline long __must_check __intentional_overflow(-1) PTR_ERR(const void *ptr)
68026 {
68027 return (long) ptr;
68028 }
68029diff --git a/include/linux/extcon.h b/include/linux/extcon.h
68030index fcb51c8..bdafcf6 100644
68031--- a/include/linux/extcon.h
68032+++ b/include/linux/extcon.h
68033@@ -134,7 +134,7 @@ struct extcon_dev {
68034 /* /sys/class/extcon/.../mutually_exclusive/... */
68035 struct attribute_group attr_g_muex;
68036 struct attribute **attrs_muex;
68037- struct device_attribute *d_attrs_muex;
68038+ device_attribute_no_const *d_attrs_muex;
68039 };
68040
68041 /**
68042diff --git a/include/linux/fb.h b/include/linux/fb.h
68043index c7a9571..02eeffe 100644
68044--- a/include/linux/fb.h
68045+++ b/include/linux/fb.h
68046@@ -302,7 +302,7 @@ struct fb_ops {
68047 /* called at KDB enter and leave time to prepare the console */
68048 int (*fb_debug_enter)(struct fb_info *info);
68049 int (*fb_debug_leave)(struct fb_info *info);
68050-};
68051+} __do_const;
68052
68053 #ifdef CONFIG_FB_TILEBLITTING
68054 #define FB_TILE_CURSOR_NONE 0
68055diff --git a/include/linux/filter.h b/include/linux/filter.h
68056index c45eabc..baa0be5 100644
68057--- a/include/linux/filter.h
68058+++ b/include/linux/filter.h
68059@@ -20,6 +20,7 @@ struct compat_sock_fprog {
68060
68061 struct sk_buff;
68062 struct sock;
68063+struct bpf_jit_work;
68064
68065 struct sk_filter
68066 {
68067@@ -27,6 +28,9 @@ struct sk_filter
68068 unsigned int len; /* Number of filter blocks */
68069 unsigned int (*bpf_func)(const struct sk_buff *skb,
68070 const struct sock_filter *filter);
68071+#ifdef CONFIG_BPF_JIT
68072+ struct bpf_jit_work *work;
68073+#endif
68074 struct rcu_head rcu;
68075 struct sock_filter insns[0];
68076 };
68077diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
68078index 3044254..9767f41 100644
68079--- a/include/linux/frontswap.h
68080+++ b/include/linux/frontswap.h
68081@@ -11,7 +11,7 @@ struct frontswap_ops {
68082 int (*load)(unsigned, pgoff_t, struct page *);
68083 void (*invalidate_page)(unsigned, pgoff_t);
68084 void (*invalidate_area)(unsigned);
68085-};
68086+} __no_const;
68087
68088 extern bool frontswap_enabled;
68089 extern struct frontswap_ops
68090diff --git a/include/linux/fs.h b/include/linux/fs.h
68091index 7617ee0..b575199 100644
68092--- a/include/linux/fs.h
68093+++ b/include/linux/fs.h
68094@@ -1541,7 +1541,8 @@ struct file_operations {
68095 long (*fallocate)(struct file *file, int mode, loff_t offset,
68096 loff_t len);
68097 int (*show_fdinfo)(struct seq_file *m, struct file *f);
68098-};
68099+} __do_const;
68100+typedef struct file_operations __no_const file_operations_no_const;
68101
68102 struct inode_operations {
68103 struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
68104@@ -2665,4 +2666,14 @@ static inline void inode_has_no_xattr(struct inode *inode)
68105 inode->i_flags |= S_NOSEC;
68106 }
68107
68108+static inline bool is_sidechannel_device(const struct inode *inode)
68109+{
68110+#ifdef CONFIG_GRKERNSEC_DEVICE_SIDECHANNEL
68111+ umode_t mode = inode->i_mode;
68112+ return ((S_ISCHR(mode) || S_ISBLK(mode)) && (mode & (S_IROTH | S_IWOTH)));
68113+#else
68114+ return false;
68115+#endif
68116+}
68117+
68118 #endif /* _LINUX_FS_H */
68119diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
68120index 324f931..f292b65 100644
68121--- a/include/linux/fs_struct.h
68122+++ b/include/linux/fs_struct.h
68123@@ -6,7 +6,7 @@
68124 #include <linux/seqlock.h>
68125
68126 struct fs_struct {
68127- int users;
68128+ atomic_t users;
68129 spinlock_t lock;
68130 seqcount_t seq;
68131 int umask;
68132diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
68133index 5dfa0aa..6acf322 100644
68134--- a/include/linux/fscache-cache.h
68135+++ b/include/linux/fscache-cache.h
68136@@ -112,7 +112,7 @@ struct fscache_operation {
68137 fscache_operation_release_t release;
68138 };
68139
68140-extern atomic_t fscache_op_debug_id;
68141+extern atomic_unchecked_t fscache_op_debug_id;
68142 extern void fscache_op_work_func(struct work_struct *work);
68143
68144 extern void fscache_enqueue_operation(struct fscache_operation *);
68145@@ -134,7 +134,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
68146 INIT_WORK(&op->work, fscache_op_work_func);
68147 atomic_set(&op->usage, 1);
68148 op->state = FSCACHE_OP_ST_INITIALISED;
68149- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
68150+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
68151 op->processor = processor;
68152 op->release = release;
68153 INIT_LIST_HEAD(&op->pend_link);
68154diff --git a/include/linux/fscache.h b/include/linux/fscache.h
68155index 7a08623..4c07b0f 100644
68156--- a/include/linux/fscache.h
68157+++ b/include/linux/fscache.h
68158@@ -152,7 +152,7 @@ struct fscache_cookie_def {
68159 * - this is mandatory for any object that may have data
68160 */
68161 void (*now_uncached)(void *cookie_netfs_data);
68162-};
68163+} __do_const;
68164
68165 /*
68166 * fscache cached network filesystem type
68167diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
68168index 0fbfb46..508eb0d 100644
68169--- a/include/linux/fsnotify.h
68170+++ b/include/linux/fsnotify.h
68171@@ -195,6 +195,9 @@ static inline void fsnotify_access(struct file *file)
68172 struct inode *inode = path->dentry->d_inode;
68173 __u32 mask = FS_ACCESS;
68174
68175+ if (is_sidechannel_device(inode))
68176+ return;
68177+
68178 if (S_ISDIR(inode->i_mode))
68179 mask |= FS_ISDIR;
68180
68181@@ -213,6 +216,9 @@ static inline void fsnotify_modify(struct file *file)
68182 struct inode *inode = path->dentry->d_inode;
68183 __u32 mask = FS_MODIFY;
68184
68185+ if (is_sidechannel_device(inode))
68186+ return;
68187+
68188 if (S_ISDIR(inode->i_mode))
68189 mask |= FS_ISDIR;
68190
68191@@ -315,7 +321,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
68192 */
68193 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
68194 {
68195- return kstrdup(name, GFP_KERNEL);
68196+ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
68197 }
68198
68199 /*
68200diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
68201index a3d4895..ddd2a50 100644
68202--- a/include/linux/ftrace_event.h
68203+++ b/include/linux/ftrace_event.h
68204@@ -272,7 +272,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type,
68205 extern int trace_add_event_call(struct ftrace_event_call *call);
68206 extern void trace_remove_event_call(struct ftrace_event_call *call);
68207
68208-#define is_signed_type(type) (((type)(-1)) < 0)
68209+#define is_signed_type(type) (((type)(-1)) < (type)1)
68210
68211 int trace_set_clr_event(const char *system, const char *event, int set);
68212
68213diff --git a/include/linux/genhd.h b/include/linux/genhd.h
68214index 79b8bba..86b539e 100644
68215--- a/include/linux/genhd.h
68216+++ b/include/linux/genhd.h
68217@@ -194,7 +194,7 @@ struct gendisk {
68218 struct kobject *slave_dir;
68219
68220 struct timer_rand_state *random;
68221- atomic_t sync_io; /* RAID */
68222+ atomic_unchecked_t sync_io; /* RAID */
68223 struct disk_events *ev;
68224 #ifdef CONFIG_BLK_DEV_INTEGRITY
68225 struct blk_integrity *integrity;
68226diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h
68227index 023bc34..b02b46a 100644
68228--- a/include/linux/genl_magic_func.h
68229+++ b/include/linux/genl_magic_func.h
68230@@ -246,7 +246,7 @@ const char *CONCAT_(GENL_MAGIC_FAMILY, _genl_cmd_to_str)(__u8 cmd)
68231 },
68232
68233 #define ZZZ_genl_ops CONCAT_(GENL_MAGIC_FAMILY, _genl_ops)
68234-static struct genl_ops ZZZ_genl_ops[] __read_mostly = {
68235+static struct genl_ops ZZZ_genl_ops[] = {
68236 #include GENL_MAGIC_INCLUDE_FILE
68237 };
68238
68239diff --git a/include/linux/gfp.h b/include/linux/gfp.h
68240index 0f615eb..5c3832f 100644
68241--- a/include/linux/gfp.h
68242+++ b/include/linux/gfp.h
68243@@ -35,6 +35,13 @@ struct vm_area_struct;
68244 #define ___GFP_NO_KSWAPD 0x400000u
68245 #define ___GFP_OTHER_NODE 0x800000u
68246 #define ___GFP_WRITE 0x1000000u
68247+
68248+#ifdef CONFIG_PAX_USERCOPY_SLABS
68249+#define ___GFP_USERCOPY 0x2000000u
68250+#else
68251+#define ___GFP_USERCOPY 0
68252+#endif
68253+
68254 /* If the above are modified, __GFP_BITS_SHIFT may need updating */
68255
68256 /*
68257@@ -92,6 +99,7 @@ struct vm_area_struct;
68258 #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
68259 #define __GFP_KMEMCG ((__force gfp_t)___GFP_KMEMCG) /* Allocation comes from a memcg-accounted resource */
68260 #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
68261+#define __GFP_USERCOPY ((__force gfp_t)___GFP_USERCOPY)/* Allocator intends to copy page to/from userland */
68262
68263 /*
68264 * This may seem redundant, but it's a way of annotating false positives vs.
68265@@ -99,7 +107,7 @@ struct vm_area_struct;
68266 */
68267 #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
68268
68269-#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */
68270+#define __GFP_BITS_SHIFT 26 /* Room for N __GFP_FOO bits */
68271 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
68272
68273 /* This equals 0, but use constants in case they ever change */
68274@@ -153,6 +161,8 @@ struct vm_area_struct;
68275 /* 4GB DMA on some platforms */
68276 #define GFP_DMA32 __GFP_DMA32
68277
68278+#define GFP_USERCOPY __GFP_USERCOPY
68279+
68280 /* Convert GFP flags to their corresponding migrate type */
68281 static inline int allocflags_to_migratetype(gfp_t gfp_flags)
68282 {
68283diff --git a/include/linux/gracl.h b/include/linux/gracl.h
68284new file mode 100644
68285index 0000000..ebe6d72
68286--- /dev/null
68287+++ b/include/linux/gracl.h
68288@@ -0,0 +1,319 @@
68289+#ifndef GR_ACL_H
68290+#define GR_ACL_H
68291+
68292+#include <linux/grdefs.h>
68293+#include <linux/resource.h>
68294+#include <linux/capability.h>
68295+#include <linux/dcache.h>
68296+#include <asm/resource.h>
68297+
68298+/* Major status information */
68299+
68300+#define GR_VERSION "grsecurity 2.9.1"
68301+#define GRSECURITY_VERSION 0x2901
68302+
68303+enum {
68304+ GR_SHUTDOWN = 0,
68305+ GR_ENABLE = 1,
68306+ GR_SPROLE = 2,
68307+ GR_RELOAD = 3,
68308+ GR_SEGVMOD = 4,
68309+ GR_STATUS = 5,
68310+ GR_UNSPROLE = 6,
68311+ GR_PASSSET = 7,
68312+ GR_SPROLEPAM = 8,
68313+};
68314+
68315+/* Password setup definitions
68316+ * kernel/grhash.c */
68317+enum {
68318+ GR_PW_LEN = 128,
68319+ GR_SALT_LEN = 16,
68320+ GR_SHA_LEN = 32,
68321+};
68322+
68323+enum {
68324+ GR_SPROLE_LEN = 64,
68325+};
68326+
68327+enum {
68328+ GR_NO_GLOB = 0,
68329+ GR_REG_GLOB,
68330+ GR_CREATE_GLOB
68331+};
68332+
68333+#define GR_NLIMITS 32
68334+
68335+/* Begin Data Structures */
68336+
68337+struct sprole_pw {
68338+ unsigned char *rolename;
68339+ unsigned char salt[GR_SALT_LEN];
68340+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
68341+};
68342+
68343+struct name_entry {
68344+ __u32 key;
68345+ ino_t inode;
68346+ dev_t device;
68347+ char *name;
68348+ __u16 len;
68349+ __u8 deleted;
68350+ struct name_entry *prev;
68351+ struct name_entry *next;
68352+};
68353+
68354+struct inodev_entry {
68355+ struct name_entry *nentry;
68356+ struct inodev_entry *prev;
68357+ struct inodev_entry *next;
68358+};
68359+
68360+struct acl_role_db {
68361+ struct acl_role_label **r_hash;
68362+ __u32 r_size;
68363+};
68364+
68365+struct inodev_db {
68366+ struct inodev_entry **i_hash;
68367+ __u32 i_size;
68368+};
68369+
68370+struct name_db {
68371+ struct name_entry **n_hash;
68372+ __u32 n_size;
68373+};
68374+
68375+struct crash_uid {
68376+ uid_t uid;
68377+ unsigned long expires;
68378+};
68379+
68380+struct gr_hash_struct {
68381+ void **table;
68382+ void **nametable;
68383+ void *first;
68384+ __u32 table_size;
68385+ __u32 used_size;
68386+ int type;
68387+};
68388+
68389+/* Userspace Grsecurity ACL data structures */
68390+
68391+struct acl_subject_label {
68392+ char *filename;
68393+ ino_t inode;
68394+ dev_t device;
68395+ __u32 mode;
68396+ kernel_cap_t cap_mask;
68397+ kernel_cap_t cap_lower;
68398+ kernel_cap_t cap_invert_audit;
68399+
68400+ struct rlimit res[GR_NLIMITS];
68401+ __u32 resmask;
68402+
68403+ __u8 user_trans_type;
68404+ __u8 group_trans_type;
68405+ uid_t *user_transitions;
68406+ gid_t *group_transitions;
68407+ __u16 user_trans_num;
68408+ __u16 group_trans_num;
68409+
68410+ __u32 sock_families[2];
68411+ __u32 ip_proto[8];
68412+ __u32 ip_type;
68413+ struct acl_ip_label **ips;
68414+ __u32 ip_num;
68415+ __u32 inaddr_any_override;
68416+
68417+ __u32 crashes;
68418+ unsigned long expires;
68419+
68420+ struct acl_subject_label *parent_subject;
68421+ struct gr_hash_struct *hash;
68422+ struct acl_subject_label *prev;
68423+ struct acl_subject_label *next;
68424+
68425+ struct acl_object_label **obj_hash;
68426+ __u32 obj_hash_size;
68427+ __u16 pax_flags;
68428+};
68429+
68430+struct role_allowed_ip {
68431+ __u32 addr;
68432+ __u32 netmask;
68433+
68434+ struct role_allowed_ip *prev;
68435+ struct role_allowed_ip *next;
68436+};
68437+
68438+struct role_transition {
68439+ char *rolename;
68440+
68441+ struct role_transition *prev;
68442+ struct role_transition *next;
68443+};
68444+
68445+struct acl_role_label {
68446+ char *rolename;
68447+ uid_t uidgid;
68448+ __u16 roletype;
68449+
68450+ __u16 auth_attempts;
68451+ unsigned long expires;
68452+
68453+ struct acl_subject_label *root_label;
68454+ struct gr_hash_struct *hash;
68455+
68456+ struct acl_role_label *prev;
68457+ struct acl_role_label *next;
68458+
68459+ struct role_transition *transitions;
68460+ struct role_allowed_ip *allowed_ips;
68461+ uid_t *domain_children;
68462+ __u16 domain_child_num;
68463+
68464+ umode_t umask;
68465+
68466+ struct acl_subject_label **subj_hash;
68467+ __u32 subj_hash_size;
68468+};
68469+
68470+struct user_acl_role_db {
68471+ struct acl_role_label **r_table;
68472+ __u32 num_pointers; /* Number of allocations to track */
68473+ __u32 num_roles; /* Number of roles */
68474+ __u32 num_domain_children; /* Number of domain children */
68475+ __u32 num_subjects; /* Number of subjects */
68476+ __u32 num_objects; /* Number of objects */
68477+};
68478+
68479+struct acl_object_label {
68480+ char *filename;
68481+ ino_t inode;
68482+ dev_t device;
68483+ __u32 mode;
68484+
68485+ struct acl_subject_label *nested;
68486+ struct acl_object_label *globbed;
68487+
68488+ /* next two structures not used */
68489+
68490+ struct acl_object_label *prev;
68491+ struct acl_object_label *next;
68492+};
68493+
68494+struct acl_ip_label {
68495+ char *iface;
68496+ __u32 addr;
68497+ __u32 netmask;
68498+ __u16 low, high;
68499+ __u8 mode;
68500+ __u32 type;
68501+ __u32 proto[8];
68502+
68503+ /* next two structures not used */
68504+
68505+ struct acl_ip_label *prev;
68506+ struct acl_ip_label *next;
68507+};
68508+
68509+struct gr_arg {
68510+ struct user_acl_role_db role_db;
68511+ unsigned char pw[GR_PW_LEN];
68512+ unsigned char salt[GR_SALT_LEN];
68513+ unsigned char sum[GR_SHA_LEN];
68514+ unsigned char sp_role[GR_SPROLE_LEN];
68515+ struct sprole_pw *sprole_pws;
68516+ dev_t segv_device;
68517+ ino_t segv_inode;
68518+ uid_t segv_uid;
68519+ __u16 num_sprole_pws;
68520+ __u16 mode;
68521+};
68522+
68523+struct gr_arg_wrapper {
68524+ struct gr_arg *arg;
68525+ __u32 version;
68526+ __u32 size;
68527+};
68528+
68529+struct subject_map {
68530+ struct acl_subject_label *user;
68531+ struct acl_subject_label *kernel;
68532+ struct subject_map *prev;
68533+ struct subject_map *next;
68534+};
68535+
68536+struct acl_subj_map_db {
68537+ struct subject_map **s_hash;
68538+ __u32 s_size;
68539+};
68540+
68541+/* End Data Structures Section */
68542+
68543+/* Hash functions generated by empirical testing by Brad Spengler
68544+ Makes good use of the low bits of the inode. Generally 0-1 times
68545+ in loop for successful match. 0-3 for unsuccessful match.
68546+ Shift/add algorithm with modulus of table size and an XOR*/
68547+
68548+static __inline__ unsigned int
68549+gr_rhash(const uid_t uid, const __u16 type, const unsigned int sz)
68550+{
68551+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
68552+}
68553+
68554+ static __inline__ unsigned int
68555+gr_shash(const struct acl_subject_label *userp, const unsigned int sz)
68556+{
68557+ return ((const unsigned long)userp % sz);
68558+}
68559+
68560+static __inline__ unsigned int
68561+gr_fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
68562+{
68563+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
68564+}
68565+
68566+static __inline__ unsigned int
68567+gr_nhash(const char *name, const __u16 len, const unsigned int sz)
68568+{
68569+ return full_name_hash((const unsigned char *)name, len) % sz;
68570+}
68571+
68572+#define FOR_EACH_ROLE_START(role) \
68573+ role = role_list; \
68574+ while (role) {
68575+
68576+#define FOR_EACH_ROLE_END(role) \
68577+ role = role->prev; \
68578+ }
68579+
68580+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
68581+ subj = NULL; \
68582+ iter = 0; \
68583+ while (iter < role->subj_hash_size) { \
68584+ if (subj == NULL) \
68585+ subj = role->subj_hash[iter]; \
68586+ if (subj == NULL) { \
68587+ iter++; \
68588+ continue; \
68589+ }
68590+
68591+#define FOR_EACH_SUBJECT_END(subj,iter) \
68592+ subj = subj->next; \
68593+ if (subj == NULL) \
68594+ iter++; \
68595+ }
68596+
68597+
68598+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
68599+ subj = role->hash->first; \
68600+ while (subj != NULL) {
68601+
68602+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
68603+ subj = subj->next; \
68604+ }
68605+
68606+#endif
68607+
68608diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
68609new file mode 100644
68610index 0000000..323ecf2
68611--- /dev/null
68612+++ b/include/linux/gralloc.h
68613@@ -0,0 +1,9 @@
68614+#ifndef __GRALLOC_H
68615+#define __GRALLOC_H
68616+
68617+void acl_free_all(void);
68618+int acl_alloc_stack_init(unsigned long size);
68619+void *acl_alloc(unsigned long len);
68620+void *acl_alloc_num(unsigned long num, unsigned long len);
68621+
68622+#endif
68623diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
68624new file mode 100644
68625index 0000000..be66033
68626--- /dev/null
68627+++ b/include/linux/grdefs.h
68628@@ -0,0 +1,140 @@
68629+#ifndef GRDEFS_H
68630+#define GRDEFS_H
68631+
68632+/* Begin grsecurity status declarations */
68633+
68634+enum {
68635+ GR_READY = 0x01,
68636+ GR_STATUS_INIT = 0x00 // disabled state
68637+};
68638+
68639+/* Begin ACL declarations */
68640+
68641+/* Role flags */
68642+
68643+enum {
68644+ GR_ROLE_USER = 0x0001,
68645+ GR_ROLE_GROUP = 0x0002,
68646+ GR_ROLE_DEFAULT = 0x0004,
68647+ GR_ROLE_SPECIAL = 0x0008,
68648+ GR_ROLE_AUTH = 0x0010,
68649+ GR_ROLE_NOPW = 0x0020,
68650+ GR_ROLE_GOD = 0x0040,
68651+ GR_ROLE_LEARN = 0x0080,
68652+ GR_ROLE_TPE = 0x0100,
68653+ GR_ROLE_DOMAIN = 0x0200,
68654+ GR_ROLE_PAM = 0x0400,
68655+ GR_ROLE_PERSIST = 0x0800
68656+};
68657+
68658+/* ACL Subject and Object mode flags */
68659+enum {
68660+ GR_DELETED = 0x80000000
68661+};
68662+
68663+/* ACL Object-only mode flags */
68664+enum {
68665+ GR_READ = 0x00000001,
68666+ GR_APPEND = 0x00000002,
68667+ GR_WRITE = 0x00000004,
68668+ GR_EXEC = 0x00000008,
68669+ GR_FIND = 0x00000010,
68670+ GR_INHERIT = 0x00000020,
68671+ GR_SETID = 0x00000040,
68672+ GR_CREATE = 0x00000080,
68673+ GR_DELETE = 0x00000100,
68674+ GR_LINK = 0x00000200,
68675+ GR_AUDIT_READ = 0x00000400,
68676+ GR_AUDIT_APPEND = 0x00000800,
68677+ GR_AUDIT_WRITE = 0x00001000,
68678+ GR_AUDIT_EXEC = 0x00002000,
68679+ GR_AUDIT_FIND = 0x00004000,
68680+ GR_AUDIT_INHERIT= 0x00008000,
68681+ GR_AUDIT_SETID = 0x00010000,
68682+ GR_AUDIT_CREATE = 0x00020000,
68683+ GR_AUDIT_DELETE = 0x00040000,
68684+ GR_AUDIT_LINK = 0x00080000,
68685+ GR_PTRACERD = 0x00100000,
68686+ GR_NOPTRACE = 0x00200000,
68687+ GR_SUPPRESS = 0x00400000,
68688+ GR_NOLEARN = 0x00800000,
68689+ GR_INIT_TRANSFER= 0x01000000
68690+};
68691+
68692+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
68693+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
68694+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
68695+
68696+/* ACL subject-only mode flags */
68697+enum {
68698+ GR_KILL = 0x00000001,
68699+ GR_VIEW = 0x00000002,
68700+ GR_PROTECTED = 0x00000004,
68701+ GR_LEARN = 0x00000008,
68702+ GR_OVERRIDE = 0x00000010,
68703+ /* just a placeholder, this mode is only used in userspace */
68704+ GR_DUMMY = 0x00000020,
68705+ GR_PROTSHM = 0x00000040,
68706+ GR_KILLPROC = 0x00000080,
68707+ GR_KILLIPPROC = 0x00000100,
68708+ /* just a placeholder, this mode is only used in userspace */
68709+ GR_NOTROJAN = 0x00000200,
68710+ GR_PROTPROCFD = 0x00000400,
68711+ GR_PROCACCT = 0x00000800,
68712+ GR_RELAXPTRACE = 0x00001000,
68713+ //GR_NESTED = 0x00002000,
68714+ GR_INHERITLEARN = 0x00004000,
68715+ GR_PROCFIND = 0x00008000,
68716+ GR_POVERRIDE = 0x00010000,
68717+ GR_KERNELAUTH = 0x00020000,
68718+ GR_ATSECURE = 0x00040000,
68719+ GR_SHMEXEC = 0x00080000
68720+};
68721+
68722+enum {
68723+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
68724+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
68725+ GR_PAX_ENABLE_MPROTECT = 0x0004,
68726+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
68727+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
68728+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
68729+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
68730+ GR_PAX_DISABLE_MPROTECT = 0x0400,
68731+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
68732+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
68733+};
68734+
68735+enum {
68736+ GR_ID_USER = 0x01,
68737+ GR_ID_GROUP = 0x02,
68738+};
68739+
68740+enum {
68741+ GR_ID_ALLOW = 0x01,
68742+ GR_ID_DENY = 0x02,
68743+};
68744+
68745+#define GR_CRASH_RES 31
68746+#define GR_UIDTABLE_MAX 500
68747+
68748+/* begin resource learning section */
68749+enum {
68750+ GR_RLIM_CPU_BUMP = 60,
68751+ GR_RLIM_FSIZE_BUMP = 50000,
68752+ GR_RLIM_DATA_BUMP = 10000,
68753+ GR_RLIM_STACK_BUMP = 1000,
68754+ GR_RLIM_CORE_BUMP = 10000,
68755+ GR_RLIM_RSS_BUMP = 500000,
68756+ GR_RLIM_NPROC_BUMP = 1,
68757+ GR_RLIM_NOFILE_BUMP = 5,
68758+ GR_RLIM_MEMLOCK_BUMP = 50000,
68759+ GR_RLIM_AS_BUMP = 500000,
68760+ GR_RLIM_LOCKS_BUMP = 2,
68761+ GR_RLIM_SIGPENDING_BUMP = 5,
68762+ GR_RLIM_MSGQUEUE_BUMP = 10000,
68763+ GR_RLIM_NICE_BUMP = 1,
68764+ GR_RLIM_RTPRIO_BUMP = 1,
68765+ GR_RLIM_RTTIME_BUMP = 1000000
68766+};
68767+
68768+#endif
68769diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
68770new file mode 100644
68771index 0000000..9bb6662
68772--- /dev/null
68773+++ b/include/linux/grinternal.h
68774@@ -0,0 +1,215 @@
68775+#ifndef __GRINTERNAL_H
68776+#define __GRINTERNAL_H
68777+
68778+#ifdef CONFIG_GRKERNSEC
68779+
68780+#include <linux/fs.h>
68781+#include <linux/mnt_namespace.h>
68782+#include <linux/nsproxy.h>
68783+#include <linux/gracl.h>
68784+#include <linux/grdefs.h>
68785+#include <linux/grmsg.h>
68786+
68787+void gr_add_learn_entry(const char *fmt, ...)
68788+ __attribute__ ((format (printf, 1, 2)));
68789+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
68790+ const struct vfsmount *mnt);
68791+__u32 gr_check_create(const struct dentry *new_dentry,
68792+ const struct dentry *parent,
68793+ const struct vfsmount *mnt, const __u32 mode);
68794+int gr_check_protected_task(const struct task_struct *task);
68795+__u32 to_gr_audit(const __u32 reqmode);
68796+int gr_set_acls(const int type);
68797+int gr_apply_subject_to_task(struct task_struct *task);
68798+int gr_acl_is_enabled(void);
68799+char gr_roletype_to_char(void);
68800+
68801+void gr_handle_alertkill(struct task_struct *task);
68802+char *gr_to_filename(const struct dentry *dentry,
68803+ const struct vfsmount *mnt);
68804+char *gr_to_filename1(const struct dentry *dentry,
68805+ const struct vfsmount *mnt);
68806+char *gr_to_filename2(const struct dentry *dentry,
68807+ const struct vfsmount *mnt);
68808+char *gr_to_filename3(const struct dentry *dentry,
68809+ const struct vfsmount *mnt);
68810+
68811+extern int grsec_enable_ptrace_readexec;
68812+extern int grsec_enable_harden_ptrace;
68813+extern int grsec_enable_link;
68814+extern int grsec_enable_fifo;
68815+extern int grsec_enable_execve;
68816+extern int grsec_enable_shm;
68817+extern int grsec_enable_execlog;
68818+extern int grsec_enable_signal;
68819+extern int grsec_enable_audit_ptrace;
68820+extern int grsec_enable_forkfail;
68821+extern int grsec_enable_time;
68822+extern int grsec_enable_rofs;
68823+extern int grsec_enable_chroot_shmat;
68824+extern int grsec_enable_chroot_mount;
68825+extern int grsec_enable_chroot_double;
68826+extern int grsec_enable_chroot_pivot;
68827+extern int grsec_enable_chroot_chdir;
68828+extern int grsec_enable_chroot_chmod;
68829+extern int grsec_enable_chroot_mknod;
68830+extern int grsec_enable_chroot_fchdir;
68831+extern int grsec_enable_chroot_nice;
68832+extern int grsec_enable_chroot_execlog;
68833+extern int grsec_enable_chroot_caps;
68834+extern int grsec_enable_chroot_sysctl;
68835+extern int grsec_enable_chroot_unix;
68836+extern int grsec_enable_symlinkown;
68837+extern kgid_t grsec_symlinkown_gid;
68838+extern int grsec_enable_tpe;
68839+extern kgid_t grsec_tpe_gid;
68840+extern int grsec_enable_tpe_all;
68841+extern int grsec_enable_tpe_invert;
68842+extern int grsec_enable_socket_all;
68843+extern kgid_t grsec_socket_all_gid;
68844+extern int grsec_enable_socket_client;
68845+extern kgid_t grsec_socket_client_gid;
68846+extern int grsec_enable_socket_server;
68847+extern kgid_t grsec_socket_server_gid;
68848+extern kgid_t grsec_audit_gid;
68849+extern int grsec_enable_group;
68850+extern int grsec_enable_audit_textrel;
68851+extern int grsec_enable_log_rwxmaps;
68852+extern int grsec_enable_mount;
68853+extern int grsec_enable_chdir;
68854+extern int grsec_resource_logging;
68855+extern int grsec_enable_blackhole;
68856+extern int grsec_lastack_retries;
68857+extern int grsec_enable_brute;
68858+extern int grsec_lock;
68859+
68860+extern spinlock_t grsec_alert_lock;
68861+extern unsigned long grsec_alert_wtime;
68862+extern unsigned long grsec_alert_fyet;
68863+
68864+extern spinlock_t grsec_audit_lock;
68865+
68866+extern rwlock_t grsec_exec_file_lock;
68867+
68868+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
68869+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
68870+ (tsk)->exec_file->f_vfsmnt) : "/")
68871+
68872+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
68873+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
68874+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
68875+
68876+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
68877+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
68878+ (tsk)->exec_file->f_vfsmnt) : "/")
68879+
68880+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
68881+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
68882+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
68883+
68884+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
68885+
68886+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
68887+
68888+#define GR_CHROOT_CAPS {{ \
68889+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
68890+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
68891+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
68892+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
68893+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
68894+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
68895+ CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
68896+
68897+#define security_learn(normal_msg,args...) \
68898+({ \
68899+ read_lock(&grsec_exec_file_lock); \
68900+ gr_add_learn_entry(normal_msg "\n", ## args); \
68901+ read_unlock(&grsec_exec_file_lock); \
68902+})
68903+
68904+enum {
68905+ GR_DO_AUDIT,
68906+ GR_DONT_AUDIT,
68907+ /* used for non-audit messages that we shouldn't kill the task on */
68908+ GR_DONT_AUDIT_GOOD
68909+};
68910+
68911+enum {
68912+ GR_TTYSNIFF,
68913+ GR_RBAC,
68914+ GR_RBAC_STR,
68915+ GR_STR_RBAC,
68916+ GR_RBAC_MODE2,
68917+ GR_RBAC_MODE3,
68918+ GR_FILENAME,
68919+ GR_SYSCTL_HIDDEN,
68920+ GR_NOARGS,
68921+ GR_ONE_INT,
68922+ GR_ONE_INT_TWO_STR,
68923+ GR_ONE_STR,
68924+ GR_STR_INT,
68925+ GR_TWO_STR_INT,
68926+ GR_TWO_INT,
68927+ GR_TWO_U64,
68928+ GR_THREE_INT,
68929+ GR_FIVE_INT_TWO_STR,
68930+ GR_TWO_STR,
68931+ GR_THREE_STR,
68932+ GR_FOUR_STR,
68933+ GR_STR_FILENAME,
68934+ GR_FILENAME_STR,
68935+ GR_FILENAME_TWO_INT,
68936+ GR_FILENAME_TWO_INT_STR,
68937+ GR_TEXTREL,
68938+ GR_PTRACE,
68939+ GR_RESOURCE,
68940+ GR_CAP,
68941+ GR_SIG,
68942+ GR_SIG2,
68943+ GR_CRASH1,
68944+ GR_CRASH2,
68945+ GR_PSACCT,
68946+ GR_RWXMAP
68947+};
68948+
68949+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
68950+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
68951+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
68952+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
68953+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
68954+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
68955+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
68956+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
68957+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
68958+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
68959+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
68960+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
68961+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
68962+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
68963+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
68964+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
68965+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
68966+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
68967+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
68968+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
68969+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
68970+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
68971+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
68972+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
68973+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
68974+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
68975+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
68976+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
68977+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
68978+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
68979+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
68980+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
68981+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
68982+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
68983+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
68984+
68985+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
68986+
68987+#endif
68988+
68989+#endif
68990diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
68991new file mode 100644
68992index 0000000..2bd4c8d
68993--- /dev/null
68994+++ b/include/linux/grmsg.h
68995@@ -0,0 +1,111 @@
68996+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
68997+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
68998+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
68999+#define GR_STOPMOD_MSG "denied modification of module state by "
69000+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
69001+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
69002+#define GR_IOPERM_MSG "denied use of ioperm() by "
69003+#define GR_IOPL_MSG "denied use of iopl() by "
69004+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
69005+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
69006+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
69007+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
69008+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
69009+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
69010+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
69011+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
69012+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
69013+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
69014+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
69015+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
69016+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
69017+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
69018+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
69019+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
69020+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
69021+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
69022+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
69023+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
69024+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
69025+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
69026+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
69027+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
69028+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
69029+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
69030+#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
69031+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
69032+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
69033+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
69034+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
69035+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
69036+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
69037+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
69038+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
69039+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
69040+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
69041+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
69042+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
69043+#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
69044+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
69045+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
69046+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
69047+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
69048+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
69049+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
69050+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
69051+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
69052+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
69053+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
69054+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
69055+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
69056+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
69057+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
69058+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
69059+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
69060+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
69061+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
69062+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
69063+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
69064+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
69065+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
69066+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
69067+#define GR_FAILFORK_MSG "failed fork with errno %s by "
69068+#define GR_NICE_CHROOT_MSG "denied priority change by "
69069+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
69070+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
69071+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
69072+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
69073+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
69074+#define GR_TIME_MSG "time set by "
69075+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
69076+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
69077+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
69078+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
69079+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
69080+#define GR_BIND_MSG "denied bind() by "
69081+#define GR_CONNECT_MSG "denied connect() by "
69082+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
69083+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
69084+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
69085+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
69086+#define GR_CAP_ACL_MSG "use of %s denied for "
69087+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
69088+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
69089+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
69090+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
69091+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
69092+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
69093+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
69094+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
69095+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
69096+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
69097+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
69098+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
69099+#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
69100+#define GR_VM86_MSG "denied use of vm86 by "
69101+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
69102+#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
69103+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
69104+#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
69105+#define GR_SYMLINKOWNER_MSG "denied following symlink %.950s since symlink owner %u does not match target owner %u, by "
69106+#define GR_BRUTE_DAEMON_MSG "bruteforce prevention initiated for the next 30 minutes or until service restarted, stalling each fork 30 seconds. Please investigate the crash report for "
69107diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
69108new file mode 100644
69109index 0000000..8da63a4
69110--- /dev/null
69111+++ b/include/linux/grsecurity.h
69112@@ -0,0 +1,242 @@
69113+#ifndef GR_SECURITY_H
69114+#define GR_SECURITY_H
69115+#include <linux/fs.h>
69116+#include <linux/fs_struct.h>
69117+#include <linux/binfmts.h>
69118+#include <linux/gracl.h>
69119+
69120+/* notify of brain-dead configs */
69121+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
69122+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
69123+#endif
69124+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
69125+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
69126+#endif
69127+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
69128+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
69129+#endif
69130+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
69131+#error "CONFIG_PAX enabled, but no PaX options are enabled."
69132+#endif
69133+
69134+void gr_handle_brute_attach(unsigned long mm_flags);
69135+void gr_handle_brute_check(void);
69136+void gr_handle_kernel_exploit(void);
69137+int gr_process_user_ban(void);
69138+
69139+char gr_roletype_to_char(void);
69140+
69141+int gr_acl_enable_at_secure(void);
69142+
69143+int gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs);
69144+int gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs);
69145+
69146+void gr_del_task_from_ip_table(struct task_struct *p);
69147+
69148+int gr_pid_is_chrooted(struct task_struct *p);
69149+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
69150+int gr_handle_chroot_nice(void);
69151+int gr_handle_chroot_sysctl(const int op);
69152+int gr_handle_chroot_setpriority(struct task_struct *p,
69153+ const int niceval);
69154+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
69155+int gr_handle_chroot_chroot(const struct dentry *dentry,
69156+ const struct vfsmount *mnt);
69157+void gr_handle_chroot_chdir(struct path *path);
69158+int gr_handle_chroot_chmod(const struct dentry *dentry,
69159+ const struct vfsmount *mnt, const int mode);
69160+int gr_handle_chroot_mknod(const struct dentry *dentry,
69161+ const struct vfsmount *mnt, const int mode);
69162+int gr_handle_chroot_mount(const struct dentry *dentry,
69163+ const struct vfsmount *mnt,
69164+ const char *dev_name);
69165+int gr_handle_chroot_pivot(void);
69166+int gr_handle_chroot_unix(const pid_t pid);
69167+
69168+int gr_handle_rawio(const struct inode *inode);
69169+
69170+void gr_handle_ioperm(void);
69171+void gr_handle_iopl(void);
69172+
69173+umode_t gr_acl_umask(void);
69174+
69175+int gr_tpe_allow(const struct file *file);
69176+
69177+void gr_set_chroot_entries(struct task_struct *task, struct path *path);
69178+void gr_clear_chroot_entries(struct task_struct *task);
69179+
69180+void gr_log_forkfail(const int retval);
69181+void gr_log_timechange(void);
69182+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
69183+void gr_log_chdir(const struct dentry *dentry,
69184+ const struct vfsmount *mnt);
69185+void gr_log_chroot_exec(const struct dentry *dentry,
69186+ const struct vfsmount *mnt);
69187+void gr_log_remount(const char *devname, const int retval);
69188+void gr_log_unmount(const char *devname, const int retval);
69189+void gr_log_mount(const char *from, const char *to, const int retval);
69190+void gr_log_textrel(struct vm_area_struct *vma);
69191+void gr_log_rwxmmap(struct file *file);
69192+void gr_log_rwxmprotect(struct file *file);
69193+
69194+int gr_handle_follow_link(const struct inode *parent,
69195+ const struct inode *inode,
69196+ const struct dentry *dentry,
69197+ const struct vfsmount *mnt);
69198+int gr_handle_fifo(const struct dentry *dentry,
69199+ const struct vfsmount *mnt,
69200+ const struct dentry *dir, const int flag,
69201+ const int acc_mode);
69202+int gr_handle_hardlink(const struct dentry *dentry,
69203+ const struct vfsmount *mnt,
69204+ struct inode *inode,
69205+ const int mode, const struct filename *to);
69206+
69207+int gr_is_capable(const int cap);
69208+int gr_is_capable_nolog(const int cap);
69209+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
69210+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap);
69211+
69212+void gr_copy_label(struct task_struct *tsk);
69213+void gr_handle_crash(struct task_struct *task, const int sig);
69214+int gr_handle_signal(const struct task_struct *p, const int sig);
69215+int gr_check_crash_uid(const kuid_t uid);
69216+int gr_check_protected_task(const struct task_struct *task);
69217+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
69218+int gr_acl_handle_mmap(const struct file *file,
69219+ const unsigned long prot);
69220+int gr_acl_handle_mprotect(const struct file *file,
69221+ const unsigned long prot);
69222+int gr_check_hidden_task(const struct task_struct *tsk);
69223+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
69224+ const struct vfsmount *mnt);
69225+__u32 gr_acl_handle_utime(const struct dentry *dentry,
69226+ const struct vfsmount *mnt);
69227+__u32 gr_acl_handle_access(const struct dentry *dentry,
69228+ const struct vfsmount *mnt, const int fmode);
69229+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
69230+ const struct vfsmount *mnt, umode_t *mode);
69231+__u32 gr_acl_handle_chown(const struct dentry *dentry,
69232+ const struct vfsmount *mnt);
69233+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
69234+ const struct vfsmount *mnt);
69235+int gr_handle_ptrace(struct task_struct *task, const long request);
69236+int gr_handle_proc_ptrace(struct task_struct *task);
69237+__u32 gr_acl_handle_execve(const struct dentry *dentry,
69238+ const struct vfsmount *mnt);
69239+int gr_check_crash_exec(const struct file *filp);
69240+int gr_acl_is_enabled(void);
69241+void gr_set_kernel_label(struct task_struct *task);
69242+void gr_set_role_label(struct task_struct *task, const kuid_t uid,
69243+ const kgid_t gid);
69244+int gr_set_proc_label(const struct dentry *dentry,
69245+ const struct vfsmount *mnt,
69246+ const int unsafe_flags);
69247+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
69248+ const struct vfsmount *mnt);
69249+__u32 gr_acl_handle_open(const struct dentry *dentry,
69250+ const struct vfsmount *mnt, int acc_mode);
69251+__u32 gr_acl_handle_creat(const struct dentry *dentry,
69252+ const struct dentry *p_dentry,
69253+ const struct vfsmount *p_mnt,
69254+ int open_flags, int acc_mode, const int imode);
69255+void gr_handle_create(const struct dentry *dentry,
69256+ const struct vfsmount *mnt);
69257+void gr_handle_proc_create(const struct dentry *dentry,
69258+ const struct inode *inode);
69259+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
69260+ const struct dentry *parent_dentry,
69261+ const struct vfsmount *parent_mnt,
69262+ const int mode);
69263+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
69264+ const struct dentry *parent_dentry,
69265+ const struct vfsmount *parent_mnt);
69266+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
69267+ const struct vfsmount *mnt);
69268+void gr_handle_delete(const ino_t ino, const dev_t dev);
69269+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
69270+ const struct vfsmount *mnt);
69271+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
69272+ const struct dentry *parent_dentry,
69273+ const struct vfsmount *parent_mnt,
69274+ const struct filename *from);
69275+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
69276+ const struct dentry *parent_dentry,
69277+ const struct vfsmount *parent_mnt,
69278+ const struct dentry *old_dentry,
69279+ const struct vfsmount *old_mnt, const struct filename *to);
69280+int gr_handle_symlink_owner(const struct path *link, const struct inode *target);
69281+int gr_acl_handle_rename(struct dentry *new_dentry,
69282+ struct dentry *parent_dentry,
69283+ const struct vfsmount *parent_mnt,
69284+ struct dentry *old_dentry,
69285+ struct inode *old_parent_inode,
69286+ struct vfsmount *old_mnt, const struct filename *newname);
69287+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
69288+ struct dentry *old_dentry,
69289+ struct dentry *new_dentry,
69290+ struct vfsmount *mnt, const __u8 replace);
69291+__u32 gr_check_link(const struct dentry *new_dentry,
69292+ const struct dentry *parent_dentry,
69293+ const struct vfsmount *parent_mnt,
69294+ const struct dentry *old_dentry,
69295+ const struct vfsmount *old_mnt);
69296+int gr_acl_handle_filldir(const struct file *file, const char *name,
69297+ const unsigned int namelen, const ino_t ino);
69298+
69299+__u32 gr_acl_handle_unix(const struct dentry *dentry,
69300+ const struct vfsmount *mnt);
69301+void gr_acl_handle_exit(void);
69302+void gr_acl_handle_psacct(struct task_struct *task, const long code);
69303+int gr_acl_handle_procpidmem(const struct task_struct *task);
69304+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
69305+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
69306+void gr_audit_ptrace(struct task_struct *task);
69307+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
69308+void gr_put_exec_file(struct task_struct *task);
69309+
69310+int gr_ptrace_readexec(struct file *file, int unsafe_flags);
69311+
69312+#if defined(CONFIG_GRKERNSEC) && (defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC))
69313+extern void gr_learn_resource(const struct task_struct *task, const int res,
69314+ const unsigned long wanted, const int gt);
69315+#else
69316+static inline void gr_learn_resource(const struct task_struct *task, const int res,
69317+ const unsigned long wanted, const int gt)
69318+{
69319+}
69320+#endif
69321+
69322+#ifdef CONFIG_GRKERNSEC_RESLOG
69323+extern void gr_log_resource(const struct task_struct *task, const int res,
69324+ const unsigned long wanted, const int gt);
69325+#else
69326+static inline void gr_log_resource(const struct task_struct *task, const int res,
69327+ const unsigned long wanted, const int gt)
69328+{
69329+}
69330+#endif
69331+
69332+#ifdef CONFIG_GRKERNSEC
69333+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
69334+void gr_handle_vm86(void);
69335+void gr_handle_mem_readwrite(u64 from, u64 to);
69336+
69337+void gr_log_badprocpid(const char *entry);
69338+
69339+extern int grsec_enable_dmesg;
69340+extern int grsec_disable_privio;
69341+
69342+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
69343+extern kgid_t grsec_proc_gid;
69344+#endif
69345+
69346+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
69347+extern int grsec_enable_chroot_findtask;
69348+#endif
69349+#ifdef CONFIG_GRKERNSEC_SETXID
69350+extern int grsec_enable_setxid;
69351+#endif
69352+#endif
69353+
69354+#endif
69355diff --git a/include/linux/grsock.h b/include/linux/grsock.h
69356new file mode 100644
69357index 0000000..e7ffaaf
69358--- /dev/null
69359+++ b/include/linux/grsock.h
69360@@ -0,0 +1,19 @@
69361+#ifndef __GRSOCK_H
69362+#define __GRSOCK_H
69363+
69364+extern void gr_attach_curr_ip(const struct sock *sk);
69365+extern int gr_handle_sock_all(const int family, const int type,
69366+ const int protocol);
69367+extern int gr_handle_sock_server(const struct sockaddr *sck);
69368+extern int gr_handle_sock_server_other(const struct sock *sck);
69369+extern int gr_handle_sock_client(const struct sockaddr *sck);
69370+extern int gr_search_connect(struct socket * sock,
69371+ struct sockaddr_in * addr);
69372+extern int gr_search_bind(struct socket * sock,
69373+ struct sockaddr_in * addr);
69374+extern int gr_search_listen(struct socket * sock);
69375+extern int gr_search_accept(struct socket * sock);
69376+extern int gr_search_socket(const int domain, const int type,
69377+ const int protocol);
69378+
69379+#endif
69380diff --git a/include/linux/highmem.h b/include/linux/highmem.h
69381index ef788b5..ac41b7b 100644
69382--- a/include/linux/highmem.h
69383+++ b/include/linux/highmem.h
69384@@ -189,6 +189,18 @@ static inline void clear_highpage(struct page *page)
69385 kunmap_atomic(kaddr);
69386 }
69387
69388+static inline void sanitize_highpage(struct page *page)
69389+{
69390+ void *kaddr;
69391+ unsigned long flags;
69392+
69393+ local_irq_save(flags);
69394+ kaddr = kmap_atomic(page);
69395+ clear_page(kaddr);
69396+ kunmap_atomic(kaddr);
69397+ local_irq_restore(flags);
69398+}
69399+
69400 static inline void zero_user_segments(struct page *page,
69401 unsigned start1, unsigned end1,
69402 unsigned start2, unsigned end2)
69403diff --git a/include/linux/hwmon-sysfs.h b/include/linux/hwmon-sysfs.h
69404index 1c7b89a..7f52502 100644
69405--- a/include/linux/hwmon-sysfs.h
69406+++ b/include/linux/hwmon-sysfs.h
69407@@ -25,7 +25,8 @@
69408 struct sensor_device_attribute{
69409 struct device_attribute dev_attr;
69410 int index;
69411-};
69412+} __do_const;
69413+typedef struct sensor_device_attribute __no_const sensor_device_attribute_no_const;
69414 #define to_sensor_dev_attr(_dev_attr) \
69415 container_of(_dev_attr, struct sensor_device_attribute, dev_attr)
69416
69417@@ -41,7 +42,7 @@ struct sensor_device_attribute_2 {
69418 struct device_attribute dev_attr;
69419 u8 index;
69420 u8 nr;
69421-};
69422+} __do_const;
69423 #define to_sensor_dev_attr_2(_dev_attr) \
69424 container_of(_dev_attr, struct sensor_device_attribute_2, dev_attr)
69425
69426diff --git a/include/linux/i2c.h b/include/linux/i2c.h
69427index d0c4db7..61b3577 100644
69428--- a/include/linux/i2c.h
69429+++ b/include/linux/i2c.h
69430@@ -369,6 +369,7 @@ struct i2c_algorithm {
69431 /* To determine what the adapter supports */
69432 u32 (*functionality) (struct i2c_adapter *);
69433 };
69434+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
69435
69436 /*
69437 * i2c_adapter is the structure used to identify a physical i2c bus along
69438diff --git a/include/linux/i2o.h b/include/linux/i2o.h
69439index d23c3c2..eb63c81 100644
69440--- a/include/linux/i2o.h
69441+++ b/include/linux/i2o.h
69442@@ -565,7 +565,7 @@ struct i2o_controller {
69443 struct i2o_device *exec; /* Executive */
69444 #if BITS_PER_LONG == 64
69445 spinlock_t context_list_lock; /* lock for context_list */
69446- atomic_t context_list_counter; /* needed for unique contexts */
69447+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
69448 struct list_head context_list; /* list of context id's
69449 and pointers */
69450 #endif
69451diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
69452index aff7ad8..3942bbd 100644
69453--- a/include/linux/if_pppox.h
69454+++ b/include/linux/if_pppox.h
69455@@ -76,7 +76,7 @@ struct pppox_proto {
69456 int (*ioctl)(struct socket *sock, unsigned int cmd,
69457 unsigned long arg);
69458 struct module *owner;
69459-};
69460+} __do_const;
69461
69462 extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp);
69463 extern void unregister_pppox_proto(int proto_num);
69464diff --git a/include/linux/init.h b/include/linux/init.h
69465index 10ed4f4..8e8490d 100644
69466--- a/include/linux/init.h
69467+++ b/include/linux/init.h
69468@@ -39,9 +39,36 @@
69469 * Also note, that this data cannot be "const".
69470 */
69471
69472+#ifdef MODULE
69473+#define add_init_latent_entropy
69474+#define add_devinit_latent_entropy
69475+#define add_cpuinit_latent_entropy
69476+#define add_meminit_latent_entropy
69477+#else
69478+#define add_init_latent_entropy __latent_entropy
69479+
69480+#ifdef CONFIG_HOTPLUG
69481+#define add_devinit_latent_entropy
69482+#else
69483+#define add_devinit_latent_entropy __latent_entropy
69484+#endif
69485+
69486+#ifdef CONFIG_HOTPLUG_CPU
69487+#define add_cpuinit_latent_entropy
69488+#else
69489+#define add_cpuinit_latent_entropy __latent_entropy
69490+#endif
69491+
69492+#ifdef CONFIG_MEMORY_HOTPLUG
69493+#define add_meminit_latent_entropy
69494+#else
69495+#define add_meminit_latent_entropy __latent_entropy
69496+#endif
69497+#endif
69498+
69499 /* These are for everybody (although not all archs will actually
69500 discard it in modules) */
69501-#define __init __section(.init.text) __cold notrace
69502+#define __init __section(.init.text) __cold notrace add_init_latent_entropy
69503 #define __initdata __section(.init.data)
69504 #define __initconst __constsection(.init.rodata)
69505 #define __exitdata __section(.exit.data)
69506@@ -94,7 +121,7 @@
69507 #define __exit __section(.exit.text) __exitused __cold notrace
69508
69509 /* Used for HOTPLUG_CPU */
69510-#define __cpuinit __section(.cpuinit.text) __cold notrace
69511+#define __cpuinit __section(.cpuinit.text) __cold notrace add_cpuinit_latent_entropy
69512 #define __cpuinitdata __section(.cpuinit.data)
69513 #define __cpuinitconst __constsection(.cpuinit.rodata)
69514 #define __cpuexit __section(.cpuexit.text) __exitused __cold notrace
69515@@ -102,7 +129,7 @@
69516 #define __cpuexitconst __constsection(.cpuexit.rodata)
69517
69518 /* Used for MEMORY_HOTPLUG */
69519-#define __meminit __section(.meminit.text) __cold notrace
69520+#define __meminit __section(.meminit.text) __cold notrace add_meminit_latent_entropy
69521 #define __meminitdata __section(.meminit.data)
69522 #define __meminitconst __constsection(.meminit.rodata)
69523 #define __memexit __section(.memexit.text) __exitused __cold notrace
69524diff --git a/include/linux/init_task.h b/include/linux/init_task.h
69525index 6d087c5..401cab8 100644
69526--- a/include/linux/init_task.h
69527+++ b/include/linux/init_task.h
69528@@ -143,6 +143,12 @@ extern struct task_group root_task_group;
69529
69530 #define INIT_TASK_COMM "swapper"
69531
69532+#ifdef CONFIG_X86
69533+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
69534+#else
69535+#define INIT_TASK_THREAD_INFO
69536+#endif
69537+
69538 /*
69539 * INIT_TASK is used to set up the first task table, touch at
69540 * your own risk!. Base=0, limit=0x1fffff (=2MB)
69541@@ -182,6 +188,7 @@ extern struct task_group root_task_group;
69542 RCU_POINTER_INITIALIZER(cred, &init_cred), \
69543 .comm = INIT_TASK_COMM, \
69544 .thread = INIT_THREAD, \
69545+ INIT_TASK_THREAD_INFO \
69546 .fs = &init_fs, \
69547 .files = &init_files, \
69548 .signal = &init_signals, \
69549diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
69550index 5fa5afe..ac55b25 100644
69551--- a/include/linux/interrupt.h
69552+++ b/include/linux/interrupt.h
69553@@ -430,7 +430,7 @@ enum
69554 /* map softirq index to softirq name. update 'softirq_to_name' in
69555 * kernel/softirq.c when adding a new softirq.
69556 */
69557-extern char *softirq_to_name[NR_SOFTIRQS];
69558+extern const char * const softirq_to_name[NR_SOFTIRQS];
69559
69560 /* softirq mask and active fields moved to irq_cpustat_t in
69561 * asm/hardirq.h to get better cache usage. KAO
69562@@ -438,12 +438,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
69563
69564 struct softirq_action
69565 {
69566- void (*action)(struct softirq_action *);
69567-};
69568+ void (*action)(void);
69569+} __no_const;
69570
69571 asmlinkage void do_softirq(void);
69572 asmlinkage void __do_softirq(void);
69573-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
69574+extern void open_softirq(int nr, void (*action)(void));
69575 extern void softirq_init(void);
69576 extern void __raise_softirq_irqoff(unsigned int nr);
69577
69578diff --git a/include/linux/iommu.h b/include/linux/iommu.h
69579index f3b99e1..9b73cee 100644
69580--- a/include/linux/iommu.h
69581+++ b/include/linux/iommu.h
69582@@ -101,7 +101,7 @@ struct iommu_ops {
69583 int (*domain_set_attr)(struct iommu_domain *domain,
69584 enum iommu_attr attr, void *data);
69585 unsigned long pgsize_bitmap;
69586-};
69587+} __do_const;
69588
69589 #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
69590 #define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
69591diff --git a/include/linux/irq.h b/include/linux/irq.h
69592index fdf2c4a..5332486 100644
69593--- a/include/linux/irq.h
69594+++ b/include/linux/irq.h
69595@@ -328,7 +328,8 @@ struct irq_chip {
69596 void (*irq_print_chip)(struct irq_data *data, struct seq_file *p);
69597
69598 unsigned long flags;
69599-};
69600+} __do_const;
69601+typedef struct irq_chip __no_const irq_chip_no_const;
69602
69603 /*
69604 * irq_chip specific flags
69605diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
69606index 6883e19..06992b1 100644
69607--- a/include/linux/kallsyms.h
69608+++ b/include/linux/kallsyms.h
69609@@ -15,7 +15,8 @@
69610
69611 struct module;
69612
69613-#ifdef CONFIG_KALLSYMS
69614+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
69615+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
69616 /* Lookup the address for a symbol. Returns 0 if not found. */
69617 unsigned long kallsyms_lookup_name(const char *name);
69618
69619@@ -106,6 +107,17 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
69620 /* Stupid that this does nothing, but I didn't create this mess. */
69621 #define __print_symbol(fmt, addr)
69622 #endif /*CONFIG_KALLSYMS*/
69623+#else /* when included by kallsyms.c, vsnprintf.c, or
69624+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
69625+extern void __print_symbol(const char *fmt, unsigned long address);
69626+extern int sprint_backtrace(char *buffer, unsigned long address);
69627+extern int sprint_symbol(char *buffer, unsigned long address);
69628+extern int sprint_symbol_no_offset(char *buffer, unsigned long address);
69629+const char *kallsyms_lookup(unsigned long addr,
69630+ unsigned long *symbolsize,
69631+ unsigned long *offset,
69632+ char **modname, char *namebuf);
69633+#endif
69634
69635 /* This macro allows us to keep printk typechecking */
69636 static __printf(1, 2)
69637diff --git a/include/linux/key-type.h b/include/linux/key-type.h
69638index 518a53a..5e28358 100644
69639--- a/include/linux/key-type.h
69640+++ b/include/linux/key-type.h
69641@@ -125,7 +125,7 @@ struct key_type {
69642 /* internal fields */
69643 struct list_head link; /* link in types list */
69644 struct lock_class_key lock_class; /* key->sem lock class */
69645-};
69646+} __do_const;
69647
69648 extern struct key_type key_type_keyring;
69649
69650diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
69651index 4dff0c6..1ca9b72 100644
69652--- a/include/linux/kgdb.h
69653+++ b/include/linux/kgdb.h
69654@@ -53,7 +53,7 @@ extern int kgdb_connected;
69655 extern int kgdb_io_module_registered;
69656
69657 extern atomic_t kgdb_setting_breakpoint;
69658-extern atomic_t kgdb_cpu_doing_single_step;
69659+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
69660
69661 extern struct task_struct *kgdb_usethread;
69662 extern struct task_struct *kgdb_contthread;
69663@@ -255,7 +255,7 @@ struct kgdb_arch {
69664 void (*correct_hw_break)(void);
69665
69666 void (*enable_nmi)(bool on);
69667-};
69668+} __do_const;
69669
69670 /**
69671 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
69672@@ -280,7 +280,7 @@ struct kgdb_io {
69673 void (*pre_exception) (void);
69674 void (*post_exception) (void);
69675 int is_console;
69676-};
69677+} __do_const;
69678
69679 extern struct kgdb_arch arch_kgdb_ops;
69680
69681diff --git a/include/linux/kmod.h b/include/linux/kmod.h
69682index 5398d58..5883a34 100644
69683--- a/include/linux/kmod.h
69684+++ b/include/linux/kmod.h
69685@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
69686 * usually useless though. */
69687 extern __printf(2, 3)
69688 int __request_module(bool wait, const char *name, ...);
69689+extern __printf(3, 4)
69690+int ___request_module(bool wait, char *param_name, const char *name, ...);
69691 #define request_module(mod...) __request_module(true, mod)
69692 #define request_module_nowait(mod...) __request_module(false, mod)
69693 #define try_then_request_module(x, mod...) \
69694diff --git a/include/linux/kobject.h b/include/linux/kobject.h
69695index 939b112..ed6ed51 100644
69696--- a/include/linux/kobject.h
69697+++ b/include/linux/kobject.h
69698@@ -111,7 +111,7 @@ struct kobj_type {
69699 struct attribute **default_attrs;
69700 const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
69701 const void *(*namespace)(struct kobject *kobj);
69702-};
69703+} __do_const;
69704
69705 struct kobj_uevent_env {
69706 char *envp[UEVENT_NUM_ENVP];
69707@@ -134,6 +134,7 @@ struct kobj_attribute {
69708 ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
69709 const char *buf, size_t count);
69710 };
69711+typedef struct kobj_attribute __no_const kobj_attribute_no_const;
69712
69713 extern const struct sysfs_ops kobj_sysfs_ops;
69714
69715diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h
69716index f66b065..c2c29b4 100644
69717--- a/include/linux/kobject_ns.h
69718+++ b/include/linux/kobject_ns.h
69719@@ -43,7 +43,7 @@ struct kobj_ns_type_operations {
69720 const void *(*netlink_ns)(struct sock *sk);
69721 const void *(*initial_ns)(void);
69722 void (*drop_ns)(void *);
69723-};
69724+} __do_const;
69725
69726 int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
69727 int kobj_ns_type_registered(enum kobj_ns_type type);
69728diff --git a/include/linux/kref.h b/include/linux/kref.h
69729index 4972e6e..de4d19b 100644
69730--- a/include/linux/kref.h
69731+++ b/include/linux/kref.h
69732@@ -64,7 +64,7 @@ static inline void kref_get(struct kref *kref)
69733 static inline int kref_sub(struct kref *kref, unsigned int count,
69734 void (*release)(struct kref *kref))
69735 {
69736- WARN_ON(release == NULL);
69737+ BUG_ON(release == NULL);
69738
69739 if (atomic_sub_and_test((int) count, &kref->refcount)) {
69740 release(kref);
69741diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
69742index 2c497ab..afe32f5 100644
69743--- a/include/linux/kvm_host.h
69744+++ b/include/linux/kvm_host.h
69745@@ -418,7 +418,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
69746 int __must_check vcpu_load(struct kvm_vcpu *vcpu);
69747 void vcpu_put(struct kvm_vcpu *vcpu);
69748
69749-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
69750+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
69751 struct module *module);
69752 void kvm_exit(void);
69753
69754@@ -574,7 +574,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
69755 struct kvm_guest_debug *dbg);
69756 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
69757
69758-int kvm_arch_init(void *opaque);
69759+int kvm_arch_init(const void *opaque);
69760 void kvm_arch_exit(void);
69761
69762 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
69763diff --git a/include/linux/libata.h b/include/linux/libata.h
69764index 649e5f8..ead5194 100644
69765--- a/include/linux/libata.h
69766+++ b/include/linux/libata.h
69767@@ -915,7 +915,7 @@ struct ata_port_operations {
69768 * fields must be pointers.
69769 */
69770 const struct ata_port_operations *inherits;
69771-};
69772+} __do_const;
69773
69774 struct ata_port_info {
69775 unsigned long flags;
69776diff --git a/include/linux/list.h b/include/linux/list.h
69777index cc6d2aa..c10ee83 100644
69778--- a/include/linux/list.h
69779+++ b/include/linux/list.h
69780@@ -112,6 +112,19 @@ extern void __list_del_entry(struct list_head *entry);
69781 extern void list_del(struct list_head *entry);
69782 #endif
69783
69784+extern void __pax_list_add(struct list_head *new,
69785+ struct list_head *prev,
69786+ struct list_head *next);
69787+static inline void pax_list_add(struct list_head *new, struct list_head *head)
69788+{
69789+ __pax_list_add(new, head, head->next);
69790+}
69791+static inline void pax_list_add_tail(struct list_head *new, struct list_head *head)
69792+{
69793+ __pax_list_add(new, head->prev, head);
69794+}
69795+extern void pax_list_del(struct list_head *entry);
69796+
69797 /**
69798 * list_replace - replace old entry by new one
69799 * @old : the element to be replaced
69800@@ -145,6 +158,8 @@ static inline void list_del_init(struct list_head *entry)
69801 INIT_LIST_HEAD(entry);
69802 }
69803
69804+extern void pax_list_del_init(struct list_head *entry);
69805+
69806 /**
69807 * list_move - delete from one list and add as another's head
69808 * @list: the entry to move
69809diff --git a/include/linux/math64.h b/include/linux/math64.h
69810index b8ba855..0148090 100644
69811--- a/include/linux/math64.h
69812+++ b/include/linux/math64.h
69813@@ -14,7 +14,7 @@
69814 * This is commonly provided by 32bit archs to provide an optimized 64bit
69815 * divide.
69816 */
69817-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
69818+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
69819 {
69820 *remainder = dividend % divisor;
69821 return dividend / divisor;
69822@@ -50,7 +50,7 @@ static inline s64 div64_s64(s64 dividend, s64 divisor)
69823 #define div64_long(x,y) div_s64((x),(y))
69824
69825 #ifndef div_u64_rem
69826-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
69827+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
69828 {
69829 *remainder = do_div(dividend, divisor);
69830 return dividend;
69831@@ -79,7 +79,7 @@ extern s64 div64_s64(s64 dividend, s64 divisor);
69832 * divide.
69833 */
69834 #ifndef div_u64
69835-static inline u64 div_u64(u64 dividend, u32 divisor)
69836+static inline u64 __intentional_overflow(-1) div_u64(u64 dividend, u32 divisor)
69837 {
69838 u32 remainder;
69839 return div_u64_rem(dividend, divisor, &remainder);
69840diff --git a/include/linux/mm.h b/include/linux/mm.h
69841index 66e2f7c..b916b9a 100644
69842--- a/include/linux/mm.h
69843+++ b/include/linux/mm.h
69844@@ -101,6 +101,11 @@ extern unsigned int kobjsize(const void *objp);
69845 #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
69846 #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */
69847 #define VM_ARCH_1 0x01000000 /* Architecture-specific flag */
69848+
69849+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
69850+#define VM_PAGEEXEC 0x02000000 /* vma->vm_page_prot needs special handling */
69851+#endif
69852+
69853 #define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */
69854
69855 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
69856@@ -200,8 +205,8 @@ struct vm_operations_struct {
69857 /* called by access_process_vm when get_user_pages() fails, typically
69858 * for use by special VMAs that can switch between memory and hardware
69859 */
69860- int (*access)(struct vm_area_struct *vma, unsigned long addr,
69861- void *buf, int len, int write);
69862+ ssize_t (*access)(struct vm_area_struct *vma, unsigned long addr,
69863+ void *buf, size_t len, int write);
69864 #ifdef CONFIG_NUMA
69865 /*
69866 * set_policy() op must add a reference to any non-NULL @new mempolicy
69867@@ -231,6 +236,7 @@ struct vm_operations_struct {
69868 int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr,
69869 unsigned long size, pgoff_t pgoff);
69870 };
69871+typedef struct vm_operations_struct __no_const vm_operations_struct_no_const;
69872
69873 struct mmu_gather;
69874 struct inode;
69875@@ -995,8 +1001,8 @@ int follow_pfn(struct vm_area_struct *vma, unsigned long address,
69876 unsigned long *pfn);
69877 int follow_phys(struct vm_area_struct *vma, unsigned long address,
69878 unsigned int flags, unsigned long *prot, resource_size_t *phys);
69879-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
69880- void *buf, int len, int write);
69881+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
69882+ void *buf, size_t len, int write);
69883
69884 static inline void unmap_shared_mapping_range(struct address_space *mapping,
69885 loff_t const holebegin, loff_t const holelen)
69886@@ -1035,10 +1041,10 @@ static inline int fixup_user_fault(struct task_struct *tsk,
69887 }
69888 #endif
69889
69890-extern int make_pages_present(unsigned long addr, unsigned long end);
69891-extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
69892-extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
69893- void *buf, int len, int write);
69894+extern ssize_t make_pages_present(unsigned long addr, unsigned long end);
69895+extern ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write);
69896+extern ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
69897+ void *buf, size_t len, int write);
69898
69899 int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
69900 unsigned long start, int len, unsigned int foll_flags,
69901@@ -1068,34 +1074,6 @@ int set_page_dirty(struct page *page);
69902 int set_page_dirty_lock(struct page *page);
69903 int clear_page_dirty_for_io(struct page *page);
69904
69905-/* Is the vma a continuation of the stack vma above it? */
69906-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
69907-{
69908- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
69909-}
69910-
69911-static inline int stack_guard_page_start(struct vm_area_struct *vma,
69912- unsigned long addr)
69913-{
69914- return (vma->vm_flags & VM_GROWSDOWN) &&
69915- (vma->vm_start == addr) &&
69916- !vma_growsdown(vma->vm_prev, addr);
69917-}
69918-
69919-/* Is the vma a continuation of the stack vma below it? */
69920-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
69921-{
69922- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
69923-}
69924-
69925-static inline int stack_guard_page_end(struct vm_area_struct *vma,
69926- unsigned long addr)
69927-{
69928- return (vma->vm_flags & VM_GROWSUP) &&
69929- (vma->vm_end == addr) &&
69930- !vma_growsup(vma->vm_next, addr);
69931-}
69932-
69933 extern pid_t
69934 vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group);
69935
69936@@ -1198,6 +1176,15 @@ static inline void sync_mm_rss(struct mm_struct *mm)
69937 }
69938 #endif
69939
69940+#ifdef CONFIG_MMU
69941+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
69942+#else
69943+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
69944+{
69945+ return __pgprot(0);
69946+}
69947+#endif
69948+
69949 int vma_wants_writenotify(struct vm_area_struct *vma);
69950
69951 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
69952@@ -1216,8 +1203,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
69953 {
69954 return 0;
69955 }
69956+
69957+static inline int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd,
69958+ unsigned long address)
69959+{
69960+ return 0;
69961+}
69962 #else
69963 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
69964+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
69965 #endif
69966
69967 #ifdef __PAGETABLE_PMD_FOLDED
69968@@ -1226,8 +1220,15 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
69969 {
69970 return 0;
69971 }
69972+
69973+static inline int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud,
69974+ unsigned long address)
69975+{
69976+ return 0;
69977+}
69978 #else
69979 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
69980+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address);
69981 #endif
69982
69983 int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
69984@@ -1245,11 +1246,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
69985 NULL: pud_offset(pgd, address);
69986 }
69987
69988+static inline pud_t *pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
69989+{
69990+ return (unlikely(pgd_none(*pgd)) && __pud_alloc_kernel(mm, pgd, address))?
69991+ NULL: pud_offset(pgd, address);
69992+}
69993+
69994 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
69995 {
69996 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
69997 NULL: pmd_offset(pud, address);
69998 }
69999+
70000+static inline pmd_t *pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
70001+{
70002+ return (unlikely(pud_none(*pud)) && __pmd_alloc_kernel(mm, pud, address))?
70003+ NULL: pmd_offset(pud, address);
70004+}
70005 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
70006
70007 #if USE_SPLIT_PTLOCKS
70008@@ -1479,6 +1492,7 @@ extern unsigned long do_mmap_pgoff(struct file *, unsigned long,
70009 unsigned long, unsigned long,
70010 unsigned long, unsigned long);
70011 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
70012+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
70013
70014 /* These take the mm semaphore themselves */
70015 extern unsigned long vm_brk(unsigned long, unsigned long);
70016@@ -1573,6 +1587,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
70017 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
70018 struct vm_area_struct **pprev);
70019
70020+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
70021+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
70022+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
70023+
70024 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
70025 NULL if none. Assume start_addr < end_addr. */
70026 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
70027@@ -1601,15 +1619,6 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
70028 return vma;
70029 }
70030
70031-#ifdef CONFIG_MMU
70032-pgprot_t vm_get_page_prot(unsigned long vm_flags);
70033-#else
70034-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
70035-{
70036- return __pgprot(0);
70037-}
70038-#endif
70039-
70040 #ifdef CONFIG_ARCH_USES_NUMA_PROT_NONE
70041 unsigned long change_prot_numa(struct vm_area_struct *vma,
70042 unsigned long start, unsigned long end);
70043@@ -1649,6 +1658,11 @@ void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
70044 static inline void vm_stat_account(struct mm_struct *mm,
70045 unsigned long flags, struct file *file, long pages)
70046 {
70047+
70048+#ifdef CONFIG_PAX_RANDMMAP
70049+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
70050+#endif
70051+
70052 mm->total_vm += pages;
70053 }
70054 #endif /* CONFIG_PROC_FS */
70055@@ -1721,7 +1735,7 @@ extern int unpoison_memory(unsigned long pfn);
70056 extern int sysctl_memory_failure_early_kill;
70057 extern int sysctl_memory_failure_recovery;
70058 extern void shake_page(struct page *p, int access);
70059-extern atomic_long_t mce_bad_pages;
70060+extern atomic_long_unchecked_t mce_bad_pages;
70061 extern int soft_offline_page(struct page *page, int flags);
70062
70063 extern void dump_page(struct page *page);
70064@@ -1752,5 +1766,11 @@ static inline unsigned int debug_guardpage_minorder(void) { return 0; }
70065 static inline bool page_is_guard(struct page *page) { return false; }
70066 #endif /* CONFIG_DEBUG_PAGEALLOC */
70067
70068+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
70069+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
70070+#else
70071+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
70072+#endif
70073+
70074 #endif /* __KERNEL__ */
70075 #endif /* _LINUX_MM_H */
70076diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
70077index f8f5162..3aaf20f 100644
70078--- a/include/linux/mm_types.h
70079+++ b/include/linux/mm_types.h
70080@@ -288,6 +288,8 @@ struct vm_area_struct {
70081 #ifdef CONFIG_NUMA
70082 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
70083 #endif
70084+
70085+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
70086 };
70087
70088 struct core_thread {
70089@@ -436,6 +438,24 @@ struct mm_struct {
70090 int first_nid;
70091 #endif
70092 struct uprobes_state uprobes_state;
70093+
70094+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
70095+ unsigned long pax_flags;
70096+#endif
70097+
70098+#ifdef CONFIG_PAX_DLRESOLVE
70099+ unsigned long call_dl_resolve;
70100+#endif
70101+
70102+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
70103+ unsigned long call_syscall;
70104+#endif
70105+
70106+#ifdef CONFIG_PAX_ASLR
70107+ unsigned long delta_mmap; /* randomized offset */
70108+ unsigned long delta_stack; /* randomized offset */
70109+#endif
70110+
70111 };
70112
70113 /* first nid will either be a valid NID or one of these values */
70114diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h
70115index c5d5278..f0b68c8 100644
70116--- a/include/linux/mmiotrace.h
70117+++ b/include/linux/mmiotrace.h
70118@@ -46,7 +46,7 @@ extern int kmmio_handler(struct pt_regs *regs, unsigned long addr);
70119 /* Called from ioremap.c */
70120 extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
70121 void __iomem *addr);
70122-extern void mmiotrace_iounmap(volatile void __iomem *addr);
70123+extern void mmiotrace_iounmap(const volatile void __iomem *addr);
70124
70125 /* For anyone to insert markers. Remember trailing newline. */
70126 extern __printf(1, 2) int mmiotrace_printk(const char *fmt, ...);
70127@@ -66,7 +66,7 @@ static inline void mmiotrace_ioremap(resource_size_t offset,
70128 {
70129 }
70130
70131-static inline void mmiotrace_iounmap(volatile void __iomem *addr)
70132+static inline void mmiotrace_iounmap(const volatile void __iomem *addr)
70133 {
70134 }
70135
70136diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
70137index 73b64a3..6562925 100644
70138--- a/include/linux/mmzone.h
70139+++ b/include/linux/mmzone.h
70140@@ -412,7 +412,7 @@ struct zone {
70141 unsigned long flags; /* zone flags, see below */
70142
70143 /* Zone statistics */
70144- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
70145+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
70146
70147 /*
70148 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
70149diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
70150index fed3def..c933f99 100644
70151--- a/include/linux/mod_devicetable.h
70152+++ b/include/linux/mod_devicetable.h
70153@@ -12,7 +12,7 @@
70154 typedef unsigned long kernel_ulong_t;
70155 #endif
70156
70157-#define PCI_ANY_ID (~0)
70158+#define PCI_ANY_ID ((__u16)~0)
70159
70160 struct pci_device_id {
70161 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
70162@@ -139,7 +139,7 @@ struct usb_device_id {
70163 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
70164 #define USB_DEVICE_ID_MATCH_INT_NUMBER 0x0400
70165
70166-#define HID_ANY_ID (~0)
70167+#define HID_ANY_ID (~0U)
70168 #define HID_BUS_ANY 0xffff
70169 #define HID_GROUP_ANY 0x0000
70170
70171@@ -498,7 +498,7 @@ struct dmi_system_id {
70172 const char *ident;
70173 struct dmi_strmatch matches[4];
70174 void *driver_data;
70175-};
70176+} __do_const;
70177 /*
70178 * struct dmi_device_id appears during expansion of
70179 * "MODULE_DEVICE_TABLE(dmi, x)". Compiler doesn't look inside it
70180diff --git a/include/linux/module.h b/include/linux/module.h
70181index 1375ee3..ced8177 100644
70182--- a/include/linux/module.h
70183+++ b/include/linux/module.h
70184@@ -17,9 +17,11 @@
70185 #include <linux/moduleparam.h>
70186 #include <linux/tracepoint.h>
70187 #include <linux/export.h>
70188+#include <linux/fs.h>
70189
70190 #include <linux/percpu.h>
70191 #include <asm/module.h>
70192+#include <asm/pgtable.h>
70193
70194 /* In stripped ARM and x86-64 modules, ~ is surprisingly rare. */
70195 #define MODULE_SIG_STRING "~Module signature appended~\n"
70196@@ -54,12 +56,13 @@ struct module_attribute {
70197 int (*test)(struct module *);
70198 void (*free)(struct module *);
70199 };
70200+typedef struct module_attribute __no_const module_attribute_no_const;
70201
70202 struct module_version_attribute {
70203 struct module_attribute mattr;
70204 const char *module_name;
70205 const char *version;
70206-} __attribute__ ((__aligned__(sizeof(void *))));
70207+} __do_const __attribute__ ((__aligned__(sizeof(void *))));
70208
70209 extern ssize_t __modver_version_show(struct module_attribute *,
70210 struct module_kobject *, char *);
70211@@ -232,7 +235,7 @@ struct module
70212
70213 /* Sysfs stuff. */
70214 struct module_kobject mkobj;
70215- struct module_attribute *modinfo_attrs;
70216+ module_attribute_no_const *modinfo_attrs;
70217 const char *version;
70218 const char *srcversion;
70219 struct kobject *holders_dir;
70220@@ -281,19 +284,16 @@ struct module
70221 int (*init)(void);
70222
70223 /* If this is non-NULL, vfree after init() returns */
70224- void *module_init;
70225+ void *module_init_rx, *module_init_rw;
70226
70227 /* Here is the actual code + data, vfree'd on unload. */
70228- void *module_core;
70229+ void *module_core_rx, *module_core_rw;
70230
70231 /* Here are the sizes of the init and core sections */
70232- unsigned int init_size, core_size;
70233+ unsigned int init_size_rw, core_size_rw;
70234
70235 /* The size of the executable code in each section. */
70236- unsigned int init_text_size, core_text_size;
70237-
70238- /* Size of RO sections of the module (text+rodata) */
70239- unsigned int init_ro_size, core_ro_size;
70240+ unsigned int init_size_rx, core_size_rx;
70241
70242 /* Arch-specific module values */
70243 struct mod_arch_specific arch;
70244@@ -349,6 +349,10 @@ struct module
70245 #ifdef CONFIG_EVENT_TRACING
70246 struct ftrace_event_call **trace_events;
70247 unsigned int num_trace_events;
70248+ struct file_operations trace_id;
70249+ struct file_operations trace_enable;
70250+ struct file_operations trace_format;
70251+ struct file_operations trace_filter;
70252 #endif
70253 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
70254 unsigned int num_ftrace_callsites;
70255@@ -396,16 +400,46 @@ bool is_module_address(unsigned long addr);
70256 bool is_module_percpu_address(unsigned long addr);
70257 bool is_module_text_address(unsigned long addr);
70258
70259+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
70260+{
70261+
70262+#ifdef CONFIG_PAX_KERNEXEC
70263+ if (ktla_ktva(addr) >= (unsigned long)start &&
70264+ ktla_ktva(addr) < (unsigned long)start + size)
70265+ return 1;
70266+#endif
70267+
70268+ return ((void *)addr >= start && (void *)addr < start + size);
70269+}
70270+
70271+static inline int within_module_core_rx(unsigned long addr, struct module *mod)
70272+{
70273+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
70274+}
70275+
70276+static inline int within_module_core_rw(unsigned long addr, struct module *mod)
70277+{
70278+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
70279+}
70280+
70281+static inline int within_module_init_rx(unsigned long addr, struct module *mod)
70282+{
70283+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
70284+}
70285+
70286+static inline int within_module_init_rw(unsigned long addr, struct module *mod)
70287+{
70288+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
70289+}
70290+
70291 static inline int within_module_core(unsigned long addr, struct module *mod)
70292 {
70293- return (unsigned long)mod->module_core <= addr &&
70294- addr < (unsigned long)mod->module_core + mod->core_size;
70295+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
70296 }
70297
70298 static inline int within_module_init(unsigned long addr, struct module *mod)
70299 {
70300- return (unsigned long)mod->module_init <= addr &&
70301- addr < (unsigned long)mod->module_init + mod->init_size;
70302+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
70303 }
70304
70305 /* Search for module by name: must hold module_mutex. */
70306diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
70307index 560ca53..ef621ef 100644
70308--- a/include/linux/moduleloader.h
70309+++ b/include/linux/moduleloader.h
70310@@ -25,9 +25,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
70311 sections. Returns NULL on failure. */
70312 void *module_alloc(unsigned long size);
70313
70314+#ifdef CONFIG_PAX_KERNEXEC
70315+void *module_alloc_exec(unsigned long size);
70316+#else
70317+#define module_alloc_exec(x) module_alloc(x)
70318+#endif
70319+
70320 /* Free memory returned from module_alloc. */
70321 void module_free(struct module *mod, void *module_region);
70322
70323+#ifdef CONFIG_PAX_KERNEXEC
70324+void module_free_exec(struct module *mod, void *module_region);
70325+#else
70326+#define module_free_exec(x, y) module_free((x), (y))
70327+#endif
70328+
70329 /*
70330 * Apply the given relocation to the (simplified) ELF. Return -error
70331 * or 0.
70332@@ -45,7 +57,9 @@ static inline int apply_relocate(Elf_Shdr *sechdrs,
70333 unsigned int relsec,
70334 struct module *me)
70335 {
70336+#ifdef CONFIG_MODULES
70337 printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name);
70338+#endif
70339 return -ENOEXEC;
70340 }
70341 #endif
70342@@ -67,7 +81,9 @@ static inline int apply_relocate_add(Elf_Shdr *sechdrs,
70343 unsigned int relsec,
70344 struct module *me)
70345 {
70346+#ifdef CONFIG_MODULES
70347 printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name);
70348+#endif
70349 return -ENOEXEC;
70350 }
70351 #endif
70352diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
70353index 137b419..fe663ec 100644
70354--- a/include/linux/moduleparam.h
70355+++ b/include/linux/moduleparam.h
70356@@ -284,7 +284,7 @@ static inline void __kernel_param_unlock(void)
70357 * @len is usually just sizeof(string).
70358 */
70359 #define module_param_string(name, string, len, perm) \
70360- static const struct kparam_string __param_string_##name \
70361+ static const struct kparam_string __param_string_##name __used \
70362 = { len, string }; \
70363 __module_param_call(MODULE_PARAM_PREFIX, name, \
70364 &param_ops_string, \
70365@@ -423,7 +423,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
70366 */
70367 #define module_param_array_named(name, array, type, nump, perm) \
70368 param_check_##type(name, &(array)[0]); \
70369- static const struct kparam_array __param_arr_##name \
70370+ static const struct kparam_array __param_arr_##name __used \
70371 = { .max = ARRAY_SIZE(array), .num = nump, \
70372 .ops = &param_ops_##type, \
70373 .elemsize = sizeof(array[0]), .elem = array }; \
70374diff --git a/include/linux/namei.h b/include/linux/namei.h
70375index 5a5ff57..5ae5070 100644
70376--- a/include/linux/namei.h
70377+++ b/include/linux/namei.h
70378@@ -19,7 +19,7 @@ struct nameidata {
70379 unsigned seq;
70380 int last_type;
70381 unsigned depth;
70382- char *saved_names[MAX_NESTED_LINKS + 1];
70383+ const char *saved_names[MAX_NESTED_LINKS + 1];
70384 };
70385
70386 /*
70387@@ -84,12 +84,12 @@ extern void unlock_rename(struct dentry *, struct dentry *);
70388
70389 extern void nd_jump_link(struct nameidata *nd, struct path *path);
70390
70391-static inline void nd_set_link(struct nameidata *nd, char *path)
70392+static inline void nd_set_link(struct nameidata *nd, const char *path)
70393 {
70394 nd->saved_names[nd->depth] = path;
70395 }
70396
70397-static inline char *nd_get_link(struct nameidata *nd)
70398+static inline const char *nd_get_link(const struct nameidata *nd)
70399 {
70400 return nd->saved_names[nd->depth];
70401 }
70402diff --git a/include/linux/net.h b/include/linux/net.h
70403index aa16731..514b875 100644
70404--- a/include/linux/net.h
70405+++ b/include/linux/net.h
70406@@ -183,7 +183,7 @@ struct net_proto_family {
70407 int (*create)(struct net *net, struct socket *sock,
70408 int protocol, int kern);
70409 struct module *owner;
70410-};
70411+} __do_const;
70412
70413 struct iovec;
70414 struct kvec;
70415diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
70416index 9ef07d0..130a5d9 100644
70417--- a/include/linux/netdevice.h
70418+++ b/include/linux/netdevice.h
70419@@ -1012,6 +1012,7 @@ struct net_device_ops {
70420 u32 pid, u32 seq,
70421 struct net_device *dev);
70422 };
70423+typedef struct net_device_ops __no_const net_device_ops_no_const;
70424
70425 /*
70426 * The DEVICE structure.
70427@@ -1078,7 +1079,7 @@ struct net_device {
70428 int iflink;
70429
70430 struct net_device_stats stats;
70431- atomic_long_t rx_dropped; /* dropped packets by core network
70432+ atomic_long_unchecked_t rx_dropped; /* dropped packets by core network
70433 * Do not use this in drivers.
70434 */
70435
70436diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
70437index ee14284..bc65d63 100644
70438--- a/include/linux/netfilter.h
70439+++ b/include/linux/netfilter.h
70440@@ -82,7 +82,7 @@ struct nf_sockopt_ops {
70441 #endif
70442 /* Use the module struct to lock set/get code in place */
70443 struct module *owner;
70444-};
70445+} __do_const;
70446
70447 /* Function to register/unregister hook points. */
70448 int nf_register_hook(struct nf_hook_ops *reg);
70449diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
70450index 7958e84..ed74d7a 100644
70451--- a/include/linux/netfilter/ipset/ip_set.h
70452+++ b/include/linux/netfilter/ipset/ip_set.h
70453@@ -98,7 +98,7 @@ struct ip_set_type_variant {
70454 /* Return true if "b" set is the same as "a"
70455 * according to the create set parameters */
70456 bool (*same_set)(const struct ip_set *a, const struct ip_set *b);
70457-};
70458+} __do_const;
70459
70460 /* The core set type structure */
70461 struct ip_set_type {
70462diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
70463index 4966dde..7d8ce06 100644
70464--- a/include/linux/netfilter/nfnetlink.h
70465+++ b/include/linux/netfilter/nfnetlink.h
70466@@ -16,7 +16,7 @@ struct nfnl_callback {
70467 const struct nlattr * const cda[]);
70468 const struct nla_policy *policy; /* netlink attribute policy */
70469 const u_int16_t attr_count; /* number of nlattr's */
70470-};
70471+} __do_const;
70472
70473 struct nfnetlink_subsystem {
70474 const char *name;
70475diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
70476new file mode 100644
70477index 0000000..33f4af8
70478--- /dev/null
70479+++ b/include/linux/netfilter/xt_gradm.h
70480@@ -0,0 +1,9 @@
70481+#ifndef _LINUX_NETFILTER_XT_GRADM_H
70482+#define _LINUX_NETFILTER_XT_GRADM_H 1
70483+
70484+struct xt_gradm_mtinfo {
70485+ __u16 flags;
70486+ __u16 invflags;
70487+};
70488+
70489+#endif
70490diff --git a/include/linux/nls.h b/include/linux/nls.h
70491index 5dc635f..35f5e11 100644
70492--- a/include/linux/nls.h
70493+++ b/include/linux/nls.h
70494@@ -31,7 +31,7 @@ struct nls_table {
70495 const unsigned char *charset2upper;
70496 struct module *owner;
70497 struct nls_table *next;
70498-};
70499+} __do_const;
70500
70501 /* this value hold the maximum octet of charset */
70502 #define NLS_MAX_CHARSET_SIZE 6 /* for UTF-8 */
70503diff --git a/include/linux/notifier.h b/include/linux/notifier.h
70504index d65746e..62e72c2 100644
70505--- a/include/linux/notifier.h
70506+++ b/include/linux/notifier.h
70507@@ -51,7 +51,8 @@ struct notifier_block {
70508 int (*notifier_call)(struct notifier_block *, unsigned long, void *);
70509 struct notifier_block __rcu *next;
70510 int priority;
70511-};
70512+} __do_const;
70513+typedef struct notifier_block __no_const notifier_block_no_const;
70514
70515 struct atomic_notifier_head {
70516 spinlock_t lock;
70517diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
70518index a4c5624..79d6d88 100644
70519--- a/include/linux/oprofile.h
70520+++ b/include/linux/oprofile.h
70521@@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
70522 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
70523 char const * name, ulong * val);
70524
70525-/** Create a file for read-only access to an atomic_t. */
70526+/** Create a file for read-only access to an atomic_unchecked_t. */
70527 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
70528- char const * name, atomic_t * val);
70529+ char const * name, atomic_unchecked_t * val);
70530
70531 /** create a directory */
70532 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
70533diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
70534index 45fc162..01a4068 100644
70535--- a/include/linux/pci_hotplug.h
70536+++ b/include/linux/pci_hotplug.h
70537@@ -80,7 +80,8 @@ struct hotplug_slot_ops {
70538 int (*get_attention_status) (struct hotplug_slot *slot, u8 *value);
70539 int (*get_latch_status) (struct hotplug_slot *slot, u8 *value);
70540 int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value);
70541-};
70542+} __do_const;
70543+typedef struct hotplug_slot_ops __no_const hotplug_slot_ops_no_const;
70544
70545 /**
70546 * struct hotplug_slot_info - used to notify the hotplug pci core of the state of the slot
70547diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
70548index a280650..2b67b91 100644
70549--- a/include/linux/perf_event.h
70550+++ b/include/linux/perf_event.h
70551@@ -328,8 +328,8 @@ struct perf_event {
70552
70553 enum perf_event_active_state state;
70554 unsigned int attach_state;
70555- local64_t count;
70556- atomic64_t child_count;
70557+ local64_t count; /* PaX: fix it one day */
70558+ atomic64_unchecked_t child_count;
70559
70560 /*
70561 * These are the total time in nanoseconds that the event
70562@@ -380,8 +380,8 @@ struct perf_event {
70563 * These accumulate total time (in nanoseconds) that children
70564 * events have been enabled and running, respectively.
70565 */
70566- atomic64_t child_total_time_enabled;
70567- atomic64_t child_total_time_running;
70568+ atomic64_unchecked_t child_total_time_enabled;
70569+ atomic64_unchecked_t child_total_time_running;
70570
70571 /*
70572 * Protect attach/detach and child_list:
70573@@ -807,7 +807,7 @@ static inline void perf_restore_debug_store(void) { }
70574 */
70575 #define perf_cpu_notifier(fn) \
70576 do { \
70577- static struct notifier_block fn##_nb __cpuinitdata = \
70578+ static struct notifier_block fn##_nb = \
70579 { .notifier_call = fn, .priority = CPU_PRI_PERF }; \
70580 unsigned long cpu = smp_processor_id(); \
70581 unsigned long flags; \
70582diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
70583index ad1a427..6419649 100644
70584--- a/include/linux/pipe_fs_i.h
70585+++ b/include/linux/pipe_fs_i.h
70586@@ -45,9 +45,9 @@ struct pipe_buffer {
70587 struct pipe_inode_info {
70588 wait_queue_head_t wait;
70589 unsigned int nrbufs, curbuf, buffers;
70590- unsigned int readers;
70591- unsigned int writers;
70592- unsigned int waiting_writers;
70593+ atomic_t readers;
70594+ atomic_t writers;
70595+ atomic_t waiting_writers;
70596 unsigned int r_counter;
70597 unsigned int w_counter;
70598 struct page *tmp_page;
70599diff --git a/include/linux/platform_data/usb-ehci-s5p.h b/include/linux/platform_data/usb-ehci-s5p.h
70600index 5f28cae..3d23723 100644
70601--- a/include/linux/platform_data/usb-ehci-s5p.h
70602+++ b/include/linux/platform_data/usb-ehci-s5p.h
70603@@ -14,7 +14,7 @@
70604 struct s5p_ehci_platdata {
70605 int (*phy_init)(struct platform_device *pdev, int type);
70606 int (*phy_exit)(struct platform_device *pdev, int type);
70607-};
70608+} __no_const;
70609
70610 extern void s5p_ehci_set_platdata(struct s5p_ehci_platdata *pd);
70611
70612diff --git a/include/linux/platform_data/usb-exynos.h b/include/linux/platform_data/usb-exynos.h
70613index c256c59..8ea94c7 100644
70614--- a/include/linux/platform_data/usb-exynos.h
70615+++ b/include/linux/platform_data/usb-exynos.h
70616@@ -14,7 +14,7 @@
70617 struct exynos4_ohci_platdata {
70618 int (*phy_init)(struct platform_device *pdev, int type);
70619 int (*phy_exit)(struct platform_device *pdev, int type);
70620-};
70621+} __no_const;
70622
70623 extern void exynos4_ohci_set_platdata(struct exynos4_ohci_platdata *pd);
70624
70625diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
70626index 7c1d252..c5c773e 100644
70627--- a/include/linux/pm_domain.h
70628+++ b/include/linux/pm_domain.h
70629@@ -48,7 +48,7 @@ struct gpd_dev_ops {
70630
70631 struct gpd_cpu_data {
70632 unsigned int saved_exit_latency;
70633- struct cpuidle_state *idle_state;
70634+ cpuidle_state_no_const *idle_state;
70635 };
70636
70637 struct generic_pm_domain {
70638diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
70639index f271860..6b3bec5 100644
70640--- a/include/linux/pm_runtime.h
70641+++ b/include/linux/pm_runtime.h
70642@@ -97,7 +97,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
70643
70644 static inline void pm_runtime_mark_last_busy(struct device *dev)
70645 {
70646- ACCESS_ONCE(dev->power.last_busy) = jiffies;
70647+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
70648 }
70649
70650 #else /* !CONFIG_PM_RUNTIME */
70651diff --git a/include/linux/pnp.h b/include/linux/pnp.h
70652index 195aafc..49a7bc2 100644
70653--- a/include/linux/pnp.h
70654+++ b/include/linux/pnp.h
70655@@ -297,7 +297,7 @@ static inline void pnp_set_drvdata(struct pnp_dev *pdev, void *data)
70656 struct pnp_fixup {
70657 char id[7];
70658 void (*quirk_function) (struct pnp_dev * dev); /* fixup function */
70659-};
70660+} __do_const;
70661
70662 /* config parameters */
70663 #define PNP_CONFIG_NORMAL 0x0001
70664diff --git a/include/linux/poison.h b/include/linux/poison.h
70665index 2110a81..13a11bb 100644
70666--- a/include/linux/poison.h
70667+++ b/include/linux/poison.h
70668@@ -19,8 +19,8 @@
70669 * under normal circumstances, used to verify that nobody uses
70670 * non-initialized list entries.
70671 */
70672-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
70673-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
70674+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
70675+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
70676
70677 /********** include/linux/timer.h **********/
70678 /*
70679diff --git a/include/linux/power/smartreflex.h b/include/linux/power/smartreflex.h
70680index c0f44c2..1572583 100644
70681--- a/include/linux/power/smartreflex.h
70682+++ b/include/linux/power/smartreflex.h
70683@@ -238,7 +238,7 @@ struct omap_sr_class_data {
70684 int (*notify)(struct omap_sr *sr, u32 status);
70685 u8 notify_flags;
70686 u8 class_type;
70687-};
70688+} __do_const;
70689
70690 /**
70691 * struct omap_sr_nvalue_table - Smartreflex n-target value info
70692diff --git a/include/linux/ppp-comp.h b/include/linux/ppp-comp.h
70693index 4ea1d37..80f4b33 100644
70694--- a/include/linux/ppp-comp.h
70695+++ b/include/linux/ppp-comp.h
70696@@ -84,7 +84,7 @@ struct compressor {
70697 struct module *owner;
70698 /* Extra skb space needed by the compressor algorithm */
70699 unsigned int comp_extra;
70700-};
70701+} __do_const;
70702
70703 /*
70704 * The return value from decompress routine is the length of the
70705diff --git a/include/linux/printk.h b/include/linux/printk.h
70706index 9afc01e..92c32e8 100644
70707--- a/include/linux/printk.h
70708+++ b/include/linux/printk.h
70709@@ -101,6 +101,8 @@ void early_printk(const char *fmt, ...);
70710 extern int printk_needs_cpu(int cpu);
70711 extern void printk_tick(void);
70712
70713+extern int kptr_restrict;
70714+
70715 #ifdef CONFIG_PRINTK
70716 asmlinkage __printf(5, 0)
70717 int vprintk_emit(int facility, int level,
70718@@ -135,7 +137,6 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
70719
70720 extern int printk_delay_msec;
70721 extern int dmesg_restrict;
70722-extern int kptr_restrict;
70723
70724 void log_buf_kexec_setup(void);
70725 void __init setup_log_buf(int early);
70726diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
70727index 32676b3..e46f2c0 100644
70728--- a/include/linux/proc_fs.h
70729+++ b/include/linux/proc_fs.h
70730@@ -159,6 +159,18 @@ static inline struct proc_dir_entry *proc_create(const char *name, umode_t mode,
70731 return proc_create_data(name, mode, parent, proc_fops, NULL);
70732 }
70733
70734+static inline struct proc_dir_entry *proc_create_grsec(const char *name, umode_t mode,
70735+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
70736+{
70737+#ifdef CONFIG_GRKERNSEC_PROC_USER
70738+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
70739+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
70740+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
70741+#else
70742+ return proc_create_data(name, mode, parent, proc_fops, NULL);
70743+#endif
70744+}
70745+
70746 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
70747 umode_t mode, struct proc_dir_entry *base,
70748 read_proc_t *read_proc, void * data)
70749@@ -268,7 +280,7 @@ struct proc_ns_operations {
70750 void (*put)(void *ns);
70751 int (*install)(struct nsproxy *nsproxy, void *ns);
70752 unsigned int (*inum)(void *ns);
70753-};
70754+} __do_const;
70755 extern const struct proc_ns_operations netns_operations;
70756 extern const struct proc_ns_operations utsns_operations;
70757 extern const struct proc_ns_operations ipcns_operations;
70758diff --git a/include/linux/random.h b/include/linux/random.h
70759index d984608..d6f0042 100644
70760--- a/include/linux/random.h
70761+++ b/include/linux/random.h
70762@@ -39,6 +39,11 @@ void prandom_seed(u32 seed);
70763 u32 prandom_u32_state(struct rnd_state *);
70764 void prandom_bytes_state(struct rnd_state *state, void *buf, int nbytes);
70765
70766+static inline unsigned long pax_get_random_long(void)
70767+{
70768+ return prandom_u32() + (sizeof(long) > 4 ? (unsigned long)prandom_u32() << 32 : 0);
70769+}
70770+
70771 /*
70772 * Handle minimum values for seeds
70773 */
70774diff --git a/include/linux/rculist.h b/include/linux/rculist.h
70775index c92dd28..08f4eab 100644
70776--- a/include/linux/rculist.h
70777+++ b/include/linux/rculist.h
70778@@ -44,6 +44,9 @@ extern void __list_add_rcu(struct list_head *new,
70779 struct list_head *prev, struct list_head *next);
70780 #endif
70781
70782+extern void __pax_list_add_rcu(struct list_head *new,
70783+ struct list_head *prev, struct list_head *next);
70784+
70785 /**
70786 * list_add_rcu - add a new entry to rcu-protected list
70787 * @new: new entry to be added
70788@@ -65,6 +68,11 @@ static inline void list_add_rcu(struct list_head *new, struct list_head *head)
70789 __list_add_rcu(new, head, head->next);
70790 }
70791
70792+static inline void pax_list_add_rcu(struct list_head *new, struct list_head *head)
70793+{
70794+ __pax_list_add_rcu(new, head, head->next);
70795+}
70796+
70797 /**
70798 * list_add_tail_rcu - add a new entry to rcu-protected list
70799 * @new: new entry to be added
70800@@ -87,6 +95,12 @@ static inline void list_add_tail_rcu(struct list_head *new,
70801 __list_add_rcu(new, head->prev, head);
70802 }
70803
70804+static inline void pax_list_add_tail_rcu(struct list_head *new,
70805+ struct list_head *head)
70806+{
70807+ __pax_list_add_rcu(new, head->prev, head);
70808+}
70809+
70810 /**
70811 * list_del_rcu - deletes entry from list without re-initialization
70812 * @entry: the element to delete from the list.
70813@@ -117,6 +131,8 @@ static inline void list_del_rcu(struct list_head *entry)
70814 entry->prev = LIST_POISON2;
70815 }
70816
70817+extern void pax_list_del_rcu(struct list_head *entry);
70818+
70819 /**
70820 * hlist_del_init_rcu - deletes entry from hash list with re-initialization
70821 * @n: the element to delete from the hash list.
70822diff --git a/include/linux/reboot.h b/include/linux/reboot.h
70823index 23b3630..e1bc12b 100644
70824--- a/include/linux/reboot.h
70825+++ b/include/linux/reboot.h
70826@@ -18,9 +18,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
70827 * Architecture-specific implementations of sys_reboot commands.
70828 */
70829
70830-extern void machine_restart(char *cmd);
70831-extern void machine_halt(void);
70832-extern void machine_power_off(void);
70833+extern void machine_restart(char *cmd) __noreturn;
70834+extern void machine_halt(void) __noreturn;
70835+extern void machine_power_off(void) __noreturn;
70836
70837 extern void machine_shutdown(void);
70838 struct pt_regs;
70839@@ -31,9 +31,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
70840 */
70841
70842 extern void kernel_restart_prepare(char *cmd);
70843-extern void kernel_restart(char *cmd);
70844-extern void kernel_halt(void);
70845-extern void kernel_power_off(void);
70846+extern void kernel_restart(char *cmd) __noreturn;
70847+extern void kernel_halt(void) __noreturn;
70848+extern void kernel_power_off(void) __noreturn;
70849
70850 extern int C_A_D; /* for sysctl */
70851 void ctrl_alt_del(void);
70852@@ -47,7 +47,7 @@ extern int orderly_poweroff(bool force);
70853 * Emergency restart, callable from an interrupt handler.
70854 */
70855
70856-extern void emergency_restart(void);
70857+extern void emergency_restart(void) __noreturn;
70858 #include <asm/emergency-restart.h>
70859
70860 #endif /* _LINUX_REBOOT_H */
70861diff --git a/include/linux/regset.h b/include/linux/regset.h
70862index 8e0c9fe..ac4d221 100644
70863--- a/include/linux/regset.h
70864+++ b/include/linux/regset.h
70865@@ -161,7 +161,8 @@ struct user_regset {
70866 unsigned int align;
70867 unsigned int bias;
70868 unsigned int core_note_type;
70869-};
70870+} __do_const;
70871+typedef struct user_regset __no_const user_regset_no_const;
70872
70873 /**
70874 * struct user_regset_view - available regsets
70875diff --git a/include/linux/relay.h b/include/linux/relay.h
70876index 91cacc3..b55ff74 100644
70877--- a/include/linux/relay.h
70878+++ b/include/linux/relay.h
70879@@ -160,7 +160,7 @@ struct rchan_callbacks
70880 * The callback should return 0 if successful, negative if not.
70881 */
70882 int (*remove_buf_file)(struct dentry *dentry);
70883-};
70884+} __no_const;
70885
70886 /*
70887 * CONFIG_RELAY kernel API, kernel/relay.c
70888diff --git a/include/linux/rio.h b/include/linux/rio.h
70889index a3e7842..d973ca6 100644
70890--- a/include/linux/rio.h
70891+++ b/include/linux/rio.h
70892@@ -339,7 +339,7 @@ struct rio_ops {
70893 int (*map_inb)(struct rio_mport *mport, dma_addr_t lstart,
70894 u64 rstart, u32 size, u32 flags);
70895 void (*unmap_inb)(struct rio_mport *mport, dma_addr_t lstart);
70896-};
70897+} __no_const;
70898
70899 #define RIO_RESOURCE_MEM 0x00000100
70900 #define RIO_RESOURCE_DOORBELL 0x00000200
70901diff --git a/include/linux/rmap.h b/include/linux/rmap.h
70902index c20635c..2f5def4 100644
70903--- a/include/linux/rmap.h
70904+++ b/include/linux/rmap.h
70905@@ -145,8 +145,8 @@ static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
70906 void anon_vma_init(void); /* create anon_vma_cachep */
70907 int anon_vma_prepare(struct vm_area_struct *);
70908 void unlink_anon_vmas(struct vm_area_struct *);
70909-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
70910-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
70911+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
70912+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
70913
70914 static inline void anon_vma_merge(struct vm_area_struct *vma,
70915 struct vm_area_struct *next)
70916diff --git a/include/linux/sched.h b/include/linux/sched.h
70917index d211247..eac6c2c 100644
70918--- a/include/linux/sched.h
70919+++ b/include/linux/sched.h
70920@@ -61,6 +61,7 @@ struct bio_list;
70921 struct fs_struct;
70922 struct perf_event_context;
70923 struct blk_plug;
70924+struct linux_binprm;
70925
70926 /*
70927 * List of flags we want to share for kernel threads,
70928@@ -327,7 +328,7 @@ extern char __sched_text_start[], __sched_text_end[];
70929 extern int in_sched_functions(unsigned long addr);
70930
70931 #define MAX_SCHEDULE_TIMEOUT LONG_MAX
70932-extern signed long schedule_timeout(signed long timeout);
70933+extern signed long schedule_timeout(signed long timeout) __intentional_overflow(-1);
70934 extern signed long schedule_timeout_interruptible(signed long timeout);
70935 extern signed long schedule_timeout_killable(signed long timeout);
70936 extern signed long schedule_timeout_uninterruptible(signed long timeout);
70937@@ -354,10 +355,23 @@ struct user_namespace;
70938 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
70939
70940 extern int sysctl_max_map_count;
70941+extern unsigned long sysctl_heap_stack_gap;
70942
70943 #include <linux/aio.h>
70944
70945 #ifdef CONFIG_MMU
70946+
70947+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
70948+extern unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags);
70949+#else
70950+static inline unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
70951+{
70952+ return 0;
70953+}
70954+#endif
70955+
70956+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset);
70957+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset);
70958 extern void arch_pick_mmap_layout(struct mm_struct *mm);
70959 extern unsigned long
70960 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
70961@@ -639,6 +653,17 @@ struct signal_struct {
70962 #ifdef CONFIG_TASKSTATS
70963 struct taskstats *stats;
70964 #endif
70965+
70966+#ifdef CONFIG_GRKERNSEC
70967+ u32 curr_ip;
70968+ u32 saved_ip;
70969+ u32 gr_saddr;
70970+ u32 gr_daddr;
70971+ u16 gr_sport;
70972+ u16 gr_dport;
70973+ u8 used_accept:1;
70974+#endif
70975+
70976 #ifdef CONFIG_AUDIT
70977 unsigned audit_tty;
70978 struct tty_audit_buf *tty_audit_buf;
70979@@ -717,6 +742,11 @@ struct user_struct {
70980 struct key *session_keyring; /* UID's default session keyring */
70981 #endif
70982
70983+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
70984+ unsigned int banned;
70985+ unsigned long ban_expires;
70986+#endif
70987+
70988 /* Hash table maintenance information */
70989 struct hlist_node uidhash_node;
70990 kuid_t uid;
70991@@ -1116,7 +1146,7 @@ struct sched_class {
70992 #ifdef CONFIG_FAIR_GROUP_SCHED
70993 void (*task_move_group) (struct task_struct *p, int on_rq);
70994 #endif
70995-};
70996+} __do_const;
70997
70998 struct load_weight {
70999 unsigned long weight, inv_weight;
71000@@ -1360,8 +1390,8 @@ struct task_struct {
71001 struct list_head thread_group;
71002
71003 struct completion *vfork_done; /* for vfork() */
71004- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
71005- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
71006+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
71007+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
71008
71009 cputime_t utime, stime, utimescaled, stimescaled;
71010 cputime_t gtime;
71011@@ -1377,11 +1407,6 @@ struct task_struct {
71012 struct task_cputime cputime_expires;
71013 struct list_head cpu_timers[3];
71014
71015-/* process credentials */
71016- const struct cred __rcu *real_cred; /* objective and real subjective task
71017- * credentials (COW) */
71018- const struct cred __rcu *cred; /* effective (overridable) subjective task
71019- * credentials (COW) */
71020 char comm[TASK_COMM_LEN]; /* executable name excluding path
71021 - access with [gs]et_task_comm (which lock
71022 it with task_lock())
71023@@ -1398,6 +1423,10 @@ struct task_struct {
71024 #endif
71025 /* CPU-specific state of this task */
71026 struct thread_struct thread;
71027+/* thread_info moved to task_struct */
71028+#ifdef CONFIG_X86
71029+ struct thread_info tinfo;
71030+#endif
71031 /* filesystem information */
71032 struct fs_struct *fs;
71033 /* open file information */
71034@@ -1471,6 +1500,10 @@ struct task_struct {
71035 gfp_t lockdep_reclaim_gfp;
71036 #endif
71037
71038+/* process credentials */
71039+ const struct cred __rcu *real_cred; /* objective and real subjective task
71040+ * credentials (COW) */
71041+
71042 /* journalling filesystem info */
71043 void *journal_info;
71044
71045@@ -1509,6 +1542,10 @@ struct task_struct {
71046 /* cg_list protected by css_set_lock and tsk->alloc_lock */
71047 struct list_head cg_list;
71048 #endif
71049+
71050+ const struct cred __rcu *cred; /* effective (overridable) subjective task
71051+ * credentials (COW) */
71052+
71053 #ifdef CONFIG_FUTEX
71054 struct robust_list_head __user *robust_list;
71055 #ifdef CONFIG_COMPAT
71056@@ -1605,8 +1642,74 @@ struct task_struct {
71057 #ifdef CONFIG_UPROBES
71058 struct uprobe_task *utask;
71059 #endif
71060+
71061+#ifdef CONFIG_GRKERNSEC
71062+ /* grsecurity */
71063+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71064+ u64 exec_id;
71065+#endif
71066+#ifdef CONFIG_GRKERNSEC_SETXID
71067+ const struct cred *delayed_cred;
71068+#endif
71069+ struct dentry *gr_chroot_dentry;
71070+ struct acl_subject_label *acl;
71071+ struct acl_role_label *role;
71072+ struct file *exec_file;
71073+ unsigned long brute_expires;
71074+ u16 acl_role_id;
71075+ /* is this the task that authenticated to the special role */
71076+ u8 acl_sp_role;
71077+ u8 is_writable;
71078+ u8 brute;
71079+ u8 gr_is_chrooted;
71080+#endif
71081+
71082 };
71083
71084+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
71085+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
71086+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
71087+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
71088+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
71089+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
71090+
71091+#ifdef CONFIG_PAX_SOFTMODE
71092+extern int pax_softmode;
71093+#endif
71094+
71095+extern int pax_check_flags(unsigned long *);
71096+
71097+/* if tsk != current then task_lock must be held on it */
71098+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
71099+static inline unsigned long pax_get_flags(struct task_struct *tsk)
71100+{
71101+ if (likely(tsk->mm))
71102+ return tsk->mm->pax_flags;
71103+ else
71104+ return 0UL;
71105+}
71106+
71107+/* if tsk != current then task_lock must be held on it */
71108+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
71109+{
71110+ if (likely(tsk->mm)) {
71111+ tsk->mm->pax_flags = flags;
71112+ return 0;
71113+ }
71114+ return -EINVAL;
71115+}
71116+#endif
71117+
71118+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
71119+extern void pax_set_initial_flags(struct linux_binprm *bprm);
71120+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
71121+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
71122+#endif
71123+
71124+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
71125+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
71126+extern void pax_report_refcount_overflow(struct pt_regs *regs);
71127+
71128 /* Future-safe accessor for struct task_struct's cpus_allowed. */
71129 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
71130
71131@@ -1696,7 +1799,7 @@ struct pid_namespace;
71132 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
71133 struct pid_namespace *ns);
71134
71135-static inline pid_t task_pid_nr(struct task_struct *tsk)
71136+static inline pid_t task_pid_nr(const struct task_struct *tsk)
71137 {
71138 return tsk->pid;
71139 }
71140@@ -2155,7 +2258,9 @@ void yield(void);
71141 extern struct exec_domain default_exec_domain;
71142
71143 union thread_union {
71144+#ifndef CONFIG_X86
71145 struct thread_info thread_info;
71146+#endif
71147 unsigned long stack[THREAD_SIZE/sizeof(long)];
71148 };
71149
71150@@ -2188,6 +2293,7 @@ extern struct pid_namespace init_pid_ns;
71151 */
71152
71153 extern struct task_struct *find_task_by_vpid(pid_t nr);
71154+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
71155 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
71156 struct pid_namespace *ns);
71157
71158@@ -2344,7 +2450,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
71159 extern void exit_itimers(struct signal_struct *);
71160 extern void flush_itimer_signals(void);
71161
71162-extern void do_group_exit(int);
71163+extern __noreturn void do_group_exit(int);
71164
71165 extern int allow_signal(int);
71166 extern int disallow_signal(int);
71167@@ -2545,9 +2651,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
71168
71169 #endif
71170
71171-static inline int object_is_on_stack(void *obj)
71172+static inline int object_starts_on_stack(void *obj)
71173 {
71174- void *stack = task_stack_page(current);
71175+ const void *stack = task_stack_page(current);
71176
71177 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
71178 }
71179diff --git a/include/linux/security.h b/include/linux/security.h
71180index eee7478..290f7ba 100644
71181--- a/include/linux/security.h
71182+++ b/include/linux/security.h
71183@@ -26,6 +26,7 @@
71184 #include <linux/capability.h>
71185 #include <linux/slab.h>
71186 #include <linux/err.h>
71187+#include <linux/grsecurity.h>
71188
71189 struct linux_binprm;
71190 struct cred;
71191diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
71192index 68a04a3..866e6a1 100644
71193--- a/include/linux/seq_file.h
71194+++ b/include/linux/seq_file.h
71195@@ -26,6 +26,9 @@ struct seq_file {
71196 struct mutex lock;
71197 const struct seq_operations *op;
71198 int poll_event;
71199+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71200+ u64 exec_id;
71201+#endif
71202 #ifdef CONFIG_USER_NS
71203 struct user_namespace *user_ns;
71204 #endif
71205@@ -38,6 +41,7 @@ struct seq_operations {
71206 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
71207 int (*show) (struct seq_file *m, void *v);
71208 };
71209+typedef struct seq_operations __no_const seq_operations_no_const;
71210
71211 #define SEQ_SKIP 1
71212
71213diff --git a/include/linux/shm.h b/include/linux/shm.h
71214index 429c199..4d42e38 100644
71215--- a/include/linux/shm.h
71216+++ b/include/linux/shm.h
71217@@ -21,6 +21,10 @@ struct shmid_kernel /* private to the kernel */
71218
71219 /* The task created the shm object. NULL if the task is dead. */
71220 struct task_struct *shm_creator;
71221+#ifdef CONFIG_GRKERNSEC
71222+ time_t shm_createtime;
71223+ pid_t shm_lapid;
71224+#endif
71225 };
71226
71227 /* shm_mode upper byte flags */
71228diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
71229index 98399e2..7c74c41 100644
71230--- a/include/linux/skbuff.h
71231+++ b/include/linux/skbuff.h
71232@@ -590,7 +590,7 @@ extern bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
71233 extern struct sk_buff *__alloc_skb(unsigned int size,
71234 gfp_t priority, int flags, int node);
71235 extern struct sk_buff *build_skb(void *data, unsigned int frag_size);
71236-static inline struct sk_buff *alloc_skb(unsigned int size,
71237+static inline struct sk_buff * __intentional_overflow(0) alloc_skb(unsigned int size,
71238 gfp_t priority)
71239 {
71240 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
71241@@ -700,7 +700,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
71242 */
71243 static inline int skb_queue_empty(const struct sk_buff_head *list)
71244 {
71245- return list->next == (struct sk_buff *)list;
71246+ return list->next == (const struct sk_buff *)list;
71247 }
71248
71249 /**
71250@@ -713,7 +713,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
71251 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
71252 const struct sk_buff *skb)
71253 {
71254- return skb->next == (struct sk_buff *)list;
71255+ return skb->next == (const struct sk_buff *)list;
71256 }
71257
71258 /**
71259@@ -726,7 +726,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
71260 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
71261 const struct sk_buff *skb)
71262 {
71263- return skb->prev == (struct sk_buff *)list;
71264+ return skb->prev == (const struct sk_buff *)list;
71265 }
71266
71267 /**
71268@@ -1727,7 +1727,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
71269 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
71270 */
71271 #ifndef NET_SKB_PAD
71272-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
71273+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
71274 #endif
71275
71276 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
71277@@ -2305,7 +2305,7 @@ extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
71278 int noblock, int *err);
71279 extern unsigned int datagram_poll(struct file *file, struct socket *sock,
71280 struct poll_table_struct *wait);
71281-extern int skb_copy_datagram_iovec(const struct sk_buff *from,
71282+extern int __intentional_overflow(0) skb_copy_datagram_iovec(const struct sk_buff *from,
71283 int offset, struct iovec *to,
71284 int size);
71285 extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
71286@@ -2595,6 +2595,9 @@ static inline void nf_reset(struct sk_buff *skb)
71287 nf_bridge_put(skb->nf_bridge);
71288 skb->nf_bridge = NULL;
71289 #endif
71290+#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
71291+ skb->nf_trace = 0;
71292+#endif
71293 }
71294
71295 /* Note: This doesn't put any conntrack and bridge info in dst. */
71296diff --git a/include/linux/slab.h b/include/linux/slab.h
71297index 5d168d7..720bff3 100644
71298--- a/include/linux/slab.h
71299+++ b/include/linux/slab.h
71300@@ -12,13 +12,20 @@
71301 #include <linux/gfp.h>
71302 #include <linux/types.h>
71303 #include <linux/workqueue.h>
71304-
71305+#include <linux/err.h>
71306
71307 /*
71308 * Flags to pass to kmem_cache_create().
71309 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
71310 */
71311 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
71312+
71313+#ifdef CONFIG_PAX_USERCOPY_SLABS
71314+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
71315+#else
71316+#define SLAB_USERCOPY 0x00000000UL
71317+#endif
71318+
71319 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
71320 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
71321 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
71322@@ -89,10 +96,13 @@
71323 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
71324 * Both make kfree a no-op.
71325 */
71326-#define ZERO_SIZE_PTR ((void *)16)
71327+#define ZERO_SIZE_PTR \
71328+({ \
71329+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
71330+ (void *)(-MAX_ERRNO-1L); \
71331+})
71332
71333-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
71334- (unsigned long)ZERO_SIZE_PTR)
71335+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
71336
71337 /*
71338 * Common fields provided in kmem_cache by all slab allocators
71339@@ -112,7 +122,7 @@ struct kmem_cache {
71340 unsigned int align; /* Alignment as calculated */
71341 unsigned long flags; /* Active flags on the slab */
71342 const char *name; /* Slab name for sysfs */
71343- int refcount; /* Use counter */
71344+ atomic_t refcount; /* Use counter */
71345 void (*ctor)(void *); /* Called on object slot creation */
71346 struct list_head list; /* List of all slab caches on the system */
71347 };
71348@@ -232,6 +242,8 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
71349 void kfree(const void *);
71350 void kzfree(const void *);
71351 size_t ksize(const void *);
71352+const char *check_heap_object(const void *ptr, unsigned long n);
71353+bool is_usercopy_object(const void *ptr);
71354
71355 /*
71356 * Allocator specific definitions. These are mainly used to establish optimized
71357@@ -311,6 +323,7 @@ size_t ksize(const void *);
71358 * for general use, and so are not documented here. For a full list of
71359 * potential flags, always refer to linux/gfp.h.
71360 */
71361+
71362 static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
71363 {
71364 if (size != 0 && n > SIZE_MAX / size)
71365@@ -370,7 +383,7 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
71366 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
71367 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
71368 (defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
71369-extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
71370+extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long) __size_overflow(1);
71371 #define kmalloc_track_caller(size, flags) \
71372 __kmalloc_track_caller(size, flags, _RET_IP_)
71373 #else
71374@@ -390,7 +403,7 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
71375 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
71376 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
71377 (defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
71378-extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
71379+extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long) __size_overflow(1);
71380 #define kmalloc_node_track_caller(size, flags, node) \
71381 __kmalloc_node_track_caller(size, flags, node, \
71382 _RET_IP_)
71383diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
71384index 8bb6e0e..8eb0dbe 100644
71385--- a/include/linux/slab_def.h
71386+++ b/include/linux/slab_def.h
71387@@ -52,7 +52,7 @@ struct kmem_cache {
71388 /* 4) cache creation/removal */
71389 const char *name;
71390 struct list_head list;
71391- int refcount;
71392+ atomic_t refcount;
71393 int object_size;
71394 int align;
71395
71396@@ -68,10 +68,10 @@ struct kmem_cache {
71397 unsigned long node_allocs;
71398 unsigned long node_frees;
71399 unsigned long node_overflow;
71400- atomic_t allochit;
71401- atomic_t allocmiss;
71402- atomic_t freehit;
71403- atomic_t freemiss;
71404+ atomic_unchecked_t allochit;
71405+ atomic_unchecked_t allocmiss;
71406+ atomic_unchecked_t freehit;
71407+ atomic_unchecked_t freemiss;
71408
71409 /*
71410 * If debugging is enabled, then the allocator can add additional
71411@@ -111,11 +111,16 @@ struct cache_sizes {
71412 #ifdef CONFIG_ZONE_DMA
71413 struct kmem_cache *cs_dmacachep;
71414 #endif
71415+
71416+#ifdef CONFIG_PAX_USERCOPY_SLABS
71417+ struct kmem_cache *cs_usercopycachep;
71418+#endif
71419+
71420 };
71421 extern struct cache_sizes malloc_sizes[];
71422
71423 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
71424-void *__kmalloc(size_t size, gfp_t flags);
71425+void *__kmalloc(size_t size, gfp_t flags) __size_overflow(1);
71426
71427 #ifdef CONFIG_TRACING
71428 extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t);
71429@@ -152,6 +157,13 @@ found:
71430 cachep = malloc_sizes[i].cs_dmacachep;
71431 else
71432 #endif
71433+
71434+#ifdef CONFIG_PAX_USERCOPY_SLABS
71435+ if (flags & GFP_USERCOPY)
71436+ cachep = malloc_sizes[i].cs_usercopycachep;
71437+ else
71438+#endif
71439+
71440 cachep = malloc_sizes[i].cs_cachep;
71441
71442 ret = kmem_cache_alloc_trace(cachep, flags, size);
71443@@ -162,7 +174,7 @@ found:
71444 }
71445
71446 #ifdef CONFIG_NUMA
71447-extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
71448+extern void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
71449 extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
71450
71451 #ifdef CONFIG_TRACING
71452@@ -205,6 +217,13 @@ found:
71453 cachep = malloc_sizes[i].cs_dmacachep;
71454 else
71455 #endif
71456+
71457+#ifdef CONFIG_PAX_USERCOPY_SLABS
71458+ if (flags & GFP_USERCOPY)
71459+ cachep = malloc_sizes[i].cs_usercopycachep;
71460+ else
71461+#endif
71462+
71463 cachep = malloc_sizes[i].cs_cachep;
71464
71465 return kmem_cache_alloc_node_trace(cachep, flags, node, size);
71466diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
71467index f28e14a..7831211 100644
71468--- a/include/linux/slob_def.h
71469+++ b/include/linux/slob_def.h
71470@@ -11,7 +11,7 @@ static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
71471 return kmem_cache_alloc_node(cachep, flags, NUMA_NO_NODE);
71472 }
71473
71474-void *__kmalloc_node(size_t size, gfp_t flags, int node);
71475+void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
71476
71477 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
71478 {
71479@@ -31,7 +31,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
71480 return __kmalloc_node(size, flags, NUMA_NO_NODE);
71481 }
71482
71483-static __always_inline void *__kmalloc(size_t size, gfp_t flags)
71484+static __always_inline __size_overflow(1) void *__kmalloc(size_t size, gfp_t flags)
71485 {
71486 return kmalloc(size, flags);
71487 }
71488diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
71489index 9db4825..ed42fb5 100644
71490--- a/include/linux/slub_def.h
71491+++ b/include/linux/slub_def.h
71492@@ -91,7 +91,7 @@ struct kmem_cache {
71493 struct kmem_cache_order_objects max;
71494 struct kmem_cache_order_objects min;
71495 gfp_t allocflags; /* gfp flags to use on each alloc */
71496- int refcount; /* Refcount for slab cache destroy */
71497+ atomic_t refcount; /* Refcount for slab cache destroy */
71498 void (*ctor)(void *);
71499 int inuse; /* Offset to metadata */
71500 int align; /* Alignment */
71501@@ -156,7 +156,7 @@ extern struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT];
71502 * Sorry that the following has to be that ugly but some versions of GCC
71503 * have trouble with constant propagation and loops.
71504 */
71505-static __always_inline int kmalloc_index(size_t size)
71506+static __always_inline __size_overflow(1) int kmalloc_index(size_t size)
71507 {
71508 if (!size)
71509 return 0;
71510@@ -221,7 +221,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
71511 }
71512
71513 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
71514-void *__kmalloc(size_t size, gfp_t flags);
71515+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1) __size_overflow(1);
71516
71517 static __always_inline void *
71518 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
71519@@ -265,7 +265,7 @@ kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
71520 }
71521 #endif
71522
71523-static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
71524+static __always_inline __size_overflow(1) void *kmalloc_large(size_t size, gfp_t flags)
71525 {
71526 unsigned int order = get_order(size);
71527 return kmalloc_order_trace(size, flags, order);
71528@@ -290,7 +290,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
71529 }
71530
71531 #ifdef CONFIG_NUMA
71532-void *__kmalloc_node(size_t size, gfp_t flags, int node);
71533+void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
71534 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
71535
71536 #ifdef CONFIG_TRACING
71537diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
71538index e8d702e..0a56eb4 100644
71539--- a/include/linux/sock_diag.h
71540+++ b/include/linux/sock_diag.h
71541@@ -10,7 +10,7 @@ struct sock;
71542 struct sock_diag_handler {
71543 __u8 family;
71544 int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh);
71545-};
71546+} __do_const;
71547
71548 int sock_diag_register(const struct sock_diag_handler *h);
71549 void sock_diag_unregister(const struct sock_diag_handler *h);
71550diff --git a/include/linux/sonet.h b/include/linux/sonet.h
71551index 680f9a3..f13aeb0 100644
71552--- a/include/linux/sonet.h
71553+++ b/include/linux/sonet.h
71554@@ -7,7 +7,7 @@
71555 #include <uapi/linux/sonet.h>
71556
71557 struct k_sonet_stats {
71558-#define __HANDLE_ITEM(i) atomic_t i
71559+#define __HANDLE_ITEM(i) atomic_unchecked_t i
71560 __SONET_ITEMS
71561 #undef __HANDLE_ITEM
71562 };
71563diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
71564index 34206b8..3db7f1c 100644
71565--- a/include/linux/sunrpc/clnt.h
71566+++ b/include/linux/sunrpc/clnt.h
71567@@ -96,7 +96,7 @@ struct rpc_procinfo {
71568 unsigned int p_timer; /* Which RTT timer to use */
71569 u32 p_statidx; /* Which procedure to account */
71570 const char * p_name; /* name of procedure */
71571-};
71572+} __do_const;
71573
71574 #ifdef __KERNEL__
71575
71576@@ -176,9 +176,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
71577 {
71578 switch (sap->sa_family) {
71579 case AF_INET:
71580- return ntohs(((struct sockaddr_in *)sap)->sin_port);
71581+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
71582 case AF_INET6:
71583- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
71584+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
71585 }
71586 return 0;
71587 }
71588@@ -211,7 +211,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
71589 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
71590 const struct sockaddr *src)
71591 {
71592- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
71593+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
71594 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
71595
71596 dsin->sin_family = ssin->sin_family;
71597@@ -314,7 +314,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
71598 if (sa->sa_family != AF_INET6)
71599 return 0;
71600
71601- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
71602+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
71603 }
71604
71605 #endif /* __KERNEL__ */
71606diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
71607index 676ddf5..4c519a1 100644
71608--- a/include/linux/sunrpc/svc.h
71609+++ b/include/linux/sunrpc/svc.h
71610@@ -410,7 +410,7 @@ struct svc_procedure {
71611 unsigned int pc_count; /* call count */
71612 unsigned int pc_cachetype; /* cache info (NFS) */
71613 unsigned int pc_xdrressize; /* maximum size of XDR reply */
71614-};
71615+} __do_const;
71616
71617 /*
71618 * Function prototypes.
71619diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
71620index 0b8e3e6..33e0a01 100644
71621--- a/include/linux/sunrpc/svc_rdma.h
71622+++ b/include/linux/sunrpc/svc_rdma.h
71623@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
71624 extern unsigned int svcrdma_max_requests;
71625 extern unsigned int svcrdma_max_req_size;
71626
71627-extern atomic_t rdma_stat_recv;
71628-extern atomic_t rdma_stat_read;
71629-extern atomic_t rdma_stat_write;
71630-extern atomic_t rdma_stat_sq_starve;
71631-extern atomic_t rdma_stat_rq_starve;
71632-extern atomic_t rdma_stat_rq_poll;
71633-extern atomic_t rdma_stat_rq_prod;
71634-extern atomic_t rdma_stat_sq_poll;
71635-extern atomic_t rdma_stat_sq_prod;
71636+extern atomic_unchecked_t rdma_stat_recv;
71637+extern atomic_unchecked_t rdma_stat_read;
71638+extern atomic_unchecked_t rdma_stat_write;
71639+extern atomic_unchecked_t rdma_stat_sq_starve;
71640+extern atomic_unchecked_t rdma_stat_rq_starve;
71641+extern atomic_unchecked_t rdma_stat_rq_poll;
71642+extern atomic_unchecked_t rdma_stat_rq_prod;
71643+extern atomic_unchecked_t rdma_stat_sq_poll;
71644+extern atomic_unchecked_t rdma_stat_sq_prod;
71645
71646 #define RPCRDMA_VERSION 1
71647
71648diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
71649index dd74084a..7f509d5 100644
71650--- a/include/linux/sunrpc/svcauth.h
71651+++ b/include/linux/sunrpc/svcauth.h
71652@@ -109,7 +109,7 @@ struct auth_ops {
71653 int (*release)(struct svc_rqst *rq);
71654 void (*domain_release)(struct auth_domain *);
71655 int (*set_client)(struct svc_rqst *rq);
71656-};
71657+} __do_const;
71658
71659 #define SVC_GARBAGE 1
71660 #define SVC_SYSERR 2
71661diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
71662index 071d62c..4ccc7ac 100644
71663--- a/include/linux/swiotlb.h
71664+++ b/include/linux/swiotlb.h
71665@@ -59,7 +59,8 @@ extern void
71666
71667 extern void
71668 swiotlb_free_coherent(struct device *hwdev, size_t size,
71669- void *vaddr, dma_addr_t dma_handle);
71670+ void *vaddr, dma_addr_t dma_handle,
71671+ struct dma_attrs *attrs);
71672
71673 extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
71674 unsigned long offset, size_t size,
71675diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
71676index 45e2db2..1635156a 100644
71677--- a/include/linux/syscalls.h
71678+++ b/include/linux/syscalls.h
71679@@ -615,7 +615,7 @@ asmlinkage long sys_getsockname(int, struct sockaddr __user *, int __user *);
71680 asmlinkage long sys_getpeername(int, struct sockaddr __user *, int __user *);
71681 asmlinkage long sys_send(int, void __user *, size_t, unsigned);
71682 asmlinkage long sys_sendto(int, void __user *, size_t, unsigned,
71683- struct sockaddr __user *, int);
71684+ struct sockaddr __user *, int) __intentional_overflow(0);
71685 asmlinkage long sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags);
71686 asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg,
71687 unsigned int vlen, unsigned flags);
71688diff --git a/include/linux/syscore_ops.h b/include/linux/syscore_ops.h
71689index 27b3b0b..e093dd9 100644
71690--- a/include/linux/syscore_ops.h
71691+++ b/include/linux/syscore_ops.h
71692@@ -16,7 +16,7 @@ struct syscore_ops {
71693 int (*suspend)(void);
71694 void (*resume)(void);
71695 void (*shutdown)(void);
71696-};
71697+} __do_const;
71698
71699 extern void register_syscore_ops(struct syscore_ops *ops);
71700 extern void unregister_syscore_ops(struct syscore_ops *ops);
71701diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
71702index 14a8ff2..af52bad 100644
71703--- a/include/linux/sysctl.h
71704+++ b/include/linux/sysctl.h
71705@@ -34,13 +34,13 @@ struct ctl_table_root;
71706 struct ctl_table_header;
71707 struct ctl_dir;
71708
71709-typedef struct ctl_table ctl_table;
71710-
71711 typedef int proc_handler (struct ctl_table *ctl, int write,
71712 void __user *buffer, size_t *lenp, loff_t *ppos);
71713
71714 extern int proc_dostring(struct ctl_table *, int,
71715 void __user *, size_t *, loff_t *);
71716+extern int proc_dostring_modpriv(struct ctl_table *, int,
71717+ void __user *, size_t *, loff_t *);
71718 extern int proc_dointvec(struct ctl_table *, int,
71719 void __user *, size_t *, loff_t *);
71720 extern int proc_dointvec_minmax(struct ctl_table *, int,
71721@@ -115,7 +115,9 @@ struct ctl_table
71722 struct ctl_table_poll *poll;
71723 void *extra1;
71724 void *extra2;
71725-};
71726+} __do_const;
71727+typedef struct ctl_table __no_const ctl_table_no_const;
71728+typedef struct ctl_table ctl_table;
71729
71730 struct ctl_node {
71731 struct rb_node node;
71732diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
71733index 381f06d..dc16cc7 100644
71734--- a/include/linux/sysfs.h
71735+++ b/include/linux/sysfs.h
71736@@ -31,7 +31,8 @@ struct attribute {
71737 struct lock_class_key *key;
71738 struct lock_class_key skey;
71739 #endif
71740-};
71741+} __do_const;
71742+typedef struct attribute __no_const attribute_no_const;
71743
71744 /**
71745 * sysfs_attr_init - initialize a dynamically allocated sysfs attribute
71746@@ -59,8 +60,8 @@ struct attribute_group {
71747 umode_t (*is_visible)(struct kobject *,
71748 struct attribute *, int);
71749 struct attribute **attrs;
71750-};
71751-
71752+} __do_const;
71753+typedef struct attribute_group __no_const attribute_group_no_const;
71754
71755
71756 /**
71757@@ -107,7 +108,8 @@ struct bin_attribute {
71758 char *, loff_t, size_t);
71759 int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr,
71760 struct vm_area_struct *vma);
71761-};
71762+} __do_const;
71763+typedef struct bin_attribute __no_const bin_attribute_no_const;
71764
71765 /**
71766 * sysfs_bin_attr_init - initialize a dynamically allocated bin_attribute
71767diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
71768index 7faf933..9b85a0c 100644
71769--- a/include/linux/sysrq.h
71770+++ b/include/linux/sysrq.h
71771@@ -16,6 +16,7 @@
71772
71773 #include <linux/errno.h>
71774 #include <linux/types.h>
71775+#include <linux/compiler.h>
71776
71777 /* Enable/disable SYSRQ support by default (0==no, 1==yes). */
71778 #define SYSRQ_DEFAULT_ENABLE 1
71779@@ -36,7 +37,7 @@ struct sysrq_key_op {
71780 char *help_msg;
71781 char *action_msg;
71782 int enable_mask;
71783-};
71784+} __do_const;
71785
71786 #ifdef CONFIG_MAGIC_SYSRQ
71787
71788diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
71789index e7e0473..7989295 100644
71790--- a/include/linux/thread_info.h
71791+++ b/include/linux/thread_info.h
71792@@ -148,6 +148,15 @@ static inline bool test_and_clear_restore_sigmask(void)
71793 #error "no set_restore_sigmask() provided and default one won't work"
71794 #endif
71795
71796+extern void __check_object_size(const void *ptr, unsigned long n, bool to_user);
71797+static inline void check_object_size(const void *ptr, unsigned long n, bool to_user)
71798+{
71799+#ifndef CONFIG_PAX_USERCOPY_DEBUG
71800+ if (!__builtin_constant_p(n))
71801+#endif
71802+ __check_object_size(ptr, n, to_user);
71803+}
71804+
71805 #endif /* __KERNEL__ */
71806
71807 #endif /* _LINUX_THREAD_INFO_H */
71808diff --git a/include/linux/tty.h b/include/linux/tty.h
71809index 8db1b56..c16a040 100644
71810--- a/include/linux/tty.h
71811+++ b/include/linux/tty.h
71812@@ -194,7 +194,7 @@ struct tty_port {
71813 const struct tty_port_operations *ops; /* Port operations */
71814 spinlock_t lock; /* Lock protecting tty field */
71815 int blocked_open; /* Waiting to open */
71816- int count; /* Usage count */
71817+ atomic_t count; /* Usage count */
71818 wait_queue_head_t open_wait; /* Open waiters */
71819 wait_queue_head_t close_wait; /* Close waiters */
71820 wait_queue_head_t delta_msr_wait; /* Modem status change */
71821@@ -490,7 +490,7 @@ extern int tty_port_open(struct tty_port *port,
71822 struct tty_struct *tty, struct file *filp);
71823 static inline int tty_port_users(struct tty_port *port)
71824 {
71825- return port->count + port->blocked_open;
71826+ return atomic_read(&port->count) + port->blocked_open;
71827 }
71828
71829 extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc);
71830diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
71831index dd976cf..e272742 100644
71832--- a/include/linux/tty_driver.h
71833+++ b/include/linux/tty_driver.h
71834@@ -284,7 +284,7 @@ struct tty_operations {
71835 void (*poll_put_char)(struct tty_driver *driver, int line, char ch);
71836 #endif
71837 const struct file_operations *proc_fops;
71838-};
71839+} __do_const;
71840
71841 struct tty_driver {
71842 int magic; /* magic number for this structure */
71843diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
71844index fb79dd8d..07d4773 100644
71845--- a/include/linux/tty_ldisc.h
71846+++ b/include/linux/tty_ldisc.h
71847@@ -149,7 +149,7 @@ struct tty_ldisc_ops {
71848
71849 struct module *owner;
71850
71851- int refcount;
71852+ atomic_t refcount;
71853 };
71854
71855 struct tty_ldisc {
71856diff --git a/include/linux/types.h b/include/linux/types.h
71857index 4d118ba..c3ee9bf 100644
71858--- a/include/linux/types.h
71859+++ b/include/linux/types.h
71860@@ -176,10 +176,26 @@ typedef struct {
71861 int counter;
71862 } atomic_t;
71863
71864+#ifdef CONFIG_PAX_REFCOUNT
71865+typedef struct {
71866+ int counter;
71867+} atomic_unchecked_t;
71868+#else
71869+typedef atomic_t atomic_unchecked_t;
71870+#endif
71871+
71872 #ifdef CONFIG_64BIT
71873 typedef struct {
71874 long counter;
71875 } atomic64_t;
71876+
71877+#ifdef CONFIG_PAX_REFCOUNT
71878+typedef struct {
71879+ long counter;
71880+} atomic64_unchecked_t;
71881+#else
71882+typedef atomic64_t atomic64_unchecked_t;
71883+#endif
71884 #endif
71885
71886 struct list_head {
71887diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
71888index 5ca0951..ab496a5 100644
71889--- a/include/linux/uaccess.h
71890+++ b/include/linux/uaccess.h
71891@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
71892 long ret; \
71893 mm_segment_t old_fs = get_fs(); \
71894 \
71895- set_fs(KERNEL_DS); \
71896 pagefault_disable(); \
71897- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
71898- pagefault_enable(); \
71899+ set_fs(KERNEL_DS); \
71900+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
71901 set_fs(old_fs); \
71902+ pagefault_enable(); \
71903 ret; \
71904 })
71905
71906diff --git a/include/linux/uidgid.h b/include/linux/uidgid.h
71907index 8e522cbc..aa8572d 100644
71908--- a/include/linux/uidgid.h
71909+++ b/include/linux/uidgid.h
71910@@ -197,4 +197,9 @@ static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid)
71911
71912 #endif /* CONFIG_USER_NS */
71913
71914+#define GR_GLOBAL_UID(x) from_kuid_munged(&init_user_ns, (x))
71915+#define GR_GLOBAL_GID(x) from_kgid_munged(&init_user_ns, (x))
71916+#define gr_is_global_root(x) uid_eq((x), GLOBAL_ROOT_UID)
71917+#define gr_is_global_nonroot(x) (!uid_eq((x), GLOBAL_ROOT_UID))
71918+
71919 #endif /* _LINUX_UIDGID_H */
71920diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
71921index 99c1b4d..562e6f3 100644
71922--- a/include/linux/unaligned/access_ok.h
71923+++ b/include/linux/unaligned/access_ok.h
71924@@ -4,34 +4,34 @@
71925 #include <linux/kernel.h>
71926 #include <asm/byteorder.h>
71927
71928-static inline u16 get_unaligned_le16(const void *p)
71929+static inline u16 __intentional_overflow(-1) get_unaligned_le16(const void *p)
71930 {
71931- return le16_to_cpup((__le16 *)p);
71932+ return le16_to_cpup((const __le16 *)p);
71933 }
71934
71935-static inline u32 get_unaligned_le32(const void *p)
71936+static inline u32 __intentional_overflow(-1) get_unaligned_le32(const void *p)
71937 {
71938- return le32_to_cpup((__le32 *)p);
71939+ return le32_to_cpup((const __le32 *)p);
71940 }
71941
71942-static inline u64 get_unaligned_le64(const void *p)
71943+static inline u64 __intentional_overflow(-1) get_unaligned_le64(const void *p)
71944 {
71945- return le64_to_cpup((__le64 *)p);
71946+ return le64_to_cpup((const __le64 *)p);
71947 }
71948
71949-static inline u16 get_unaligned_be16(const void *p)
71950+static inline u16 __intentional_overflow(-1) get_unaligned_be16(const void *p)
71951 {
71952- return be16_to_cpup((__be16 *)p);
71953+ return be16_to_cpup((const __be16 *)p);
71954 }
71955
71956-static inline u32 get_unaligned_be32(const void *p)
71957+static inline u32 __intentional_overflow(-1) get_unaligned_be32(const void *p)
71958 {
71959- return be32_to_cpup((__be32 *)p);
71960+ return be32_to_cpup((const __be32 *)p);
71961 }
71962
71963-static inline u64 get_unaligned_be64(const void *p)
71964+static inline u64 __intentional_overflow(-1) get_unaligned_be64(const void *p)
71965 {
71966- return be64_to_cpup((__be64 *)p);
71967+ return be64_to_cpup((const __be64 *)p);
71968 }
71969
71970 static inline void put_unaligned_le16(u16 val, void *p)
71971diff --git a/include/linux/usb.h b/include/linux/usb.h
71972index 4d22d0f..8d0e8f8 100644
71973--- a/include/linux/usb.h
71974+++ b/include/linux/usb.h
71975@@ -554,7 +554,7 @@ struct usb_device {
71976 int maxchild;
71977
71978 u32 quirks;
71979- atomic_t urbnum;
71980+ atomic_unchecked_t urbnum;
71981
71982 unsigned long active_duration;
71983
71984@@ -1604,7 +1604,7 @@ void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in,
71985
71986 extern int usb_control_msg(struct usb_device *dev, unsigned int pipe,
71987 __u8 request, __u8 requesttype, __u16 value, __u16 index,
71988- void *data, __u16 size, int timeout);
71989+ void *data, __u16 size, int timeout) __intentional_overflow(-1);
71990 extern int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
71991 void *data, int len, int *actual_length, int timeout);
71992 extern int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
71993diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
71994index c5d36c6..108f4f9 100644
71995--- a/include/linux/usb/renesas_usbhs.h
71996+++ b/include/linux/usb/renesas_usbhs.h
71997@@ -39,7 +39,7 @@ enum {
71998 */
71999 struct renesas_usbhs_driver_callback {
72000 int (*notify_hotplug)(struct platform_device *pdev);
72001-};
72002+} __no_const;
72003
72004 /*
72005 * callback functions for platform
72006diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
72007index 5209cfe..b6b215f 100644
72008--- a/include/linux/user_namespace.h
72009+++ b/include/linux/user_namespace.h
72010@@ -21,7 +21,7 @@ struct user_namespace {
72011 struct uid_gid_map uid_map;
72012 struct uid_gid_map gid_map;
72013 struct uid_gid_map projid_map;
72014- struct kref kref;
72015+ atomic_t count;
72016 struct user_namespace *parent;
72017 kuid_t owner;
72018 kgid_t group;
72019@@ -37,18 +37,18 @@ extern struct user_namespace init_user_ns;
72020 static inline struct user_namespace *get_user_ns(struct user_namespace *ns)
72021 {
72022 if (ns)
72023- kref_get(&ns->kref);
72024+ atomic_inc(&ns->count);
72025 return ns;
72026 }
72027
72028 extern int create_user_ns(struct cred *new);
72029 extern int unshare_userns(unsigned long unshare_flags, struct cred **new_cred);
72030-extern void free_user_ns(struct kref *kref);
72031+extern void free_user_ns(struct user_namespace *ns);
72032
72033 static inline void put_user_ns(struct user_namespace *ns)
72034 {
72035- if (ns)
72036- kref_put(&ns->kref, free_user_ns);
72037+ if (ns && atomic_dec_and_test(&ns->count))
72038+ free_user_ns(ns);
72039 }
72040
72041 struct seq_operations;
72042diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
72043index 6f8fbcf..8259001 100644
72044--- a/include/linux/vermagic.h
72045+++ b/include/linux/vermagic.h
72046@@ -25,9 +25,35 @@
72047 #define MODULE_ARCH_VERMAGIC ""
72048 #endif
72049
72050+#ifdef CONFIG_PAX_REFCOUNT
72051+#define MODULE_PAX_REFCOUNT "REFCOUNT "
72052+#else
72053+#define MODULE_PAX_REFCOUNT ""
72054+#endif
72055+
72056+#ifdef CONSTIFY_PLUGIN
72057+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
72058+#else
72059+#define MODULE_CONSTIFY_PLUGIN ""
72060+#endif
72061+
72062+#ifdef STACKLEAK_PLUGIN
72063+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
72064+#else
72065+#define MODULE_STACKLEAK_PLUGIN ""
72066+#endif
72067+
72068+#ifdef CONFIG_GRKERNSEC
72069+#define MODULE_GRSEC "GRSEC "
72070+#else
72071+#define MODULE_GRSEC ""
72072+#endif
72073+
72074 #define VERMAGIC_STRING \
72075 UTS_RELEASE " " \
72076 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
72077 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
72078- MODULE_ARCH_VERMAGIC
72079+ MODULE_ARCH_VERMAGIC \
72080+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
72081+ MODULE_GRSEC
72082
72083diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
72084index 6071e91..ca6a489 100644
72085--- a/include/linux/vmalloc.h
72086+++ b/include/linux/vmalloc.h
72087@@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
72088 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
72089 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
72090 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
72091+
72092+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
72093+#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
72094+#endif
72095+
72096 /* bits [20..32] reserved for arch specific ioremap internals */
72097
72098 /*
72099@@ -62,7 +67,7 @@ extern void *vmalloc_32_user(unsigned long size);
72100 extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
72101 extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
72102 unsigned long start, unsigned long end, gfp_t gfp_mask,
72103- pgprot_t prot, int node, const void *caller);
72104+ pgprot_t prot, int node, const void *caller) __size_overflow(1);
72105 extern void vfree(const void *addr);
72106
72107 extern void *vmap(struct page **pages, unsigned int count,
72108@@ -124,8 +129,8 @@ extern struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes);
72109 extern void free_vm_area(struct vm_struct *area);
72110
72111 /* for /dev/kmem */
72112-extern long vread(char *buf, char *addr, unsigned long count);
72113-extern long vwrite(char *buf, char *addr, unsigned long count);
72114+extern long vread(char *buf, char *addr, unsigned long count) __size_overflow(3);
72115+extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
72116
72117 /*
72118 * Internals. Dont't use..
72119diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
72120index a13291f..af51fa3 100644
72121--- a/include/linux/vmstat.h
72122+++ b/include/linux/vmstat.h
72123@@ -95,18 +95,18 @@ static inline void vm_events_fold_cpu(int cpu)
72124 /*
72125 * Zone based page accounting with per cpu differentials.
72126 */
72127-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
72128+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
72129
72130 static inline void zone_page_state_add(long x, struct zone *zone,
72131 enum zone_stat_item item)
72132 {
72133- atomic_long_add(x, &zone->vm_stat[item]);
72134- atomic_long_add(x, &vm_stat[item]);
72135+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
72136+ atomic_long_add_unchecked(x, &vm_stat[item]);
72137 }
72138
72139 static inline unsigned long global_page_state(enum zone_stat_item item)
72140 {
72141- long x = atomic_long_read(&vm_stat[item]);
72142+ long x = atomic_long_read_unchecked(&vm_stat[item]);
72143 #ifdef CONFIG_SMP
72144 if (x < 0)
72145 x = 0;
72146@@ -117,7 +117,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
72147 static inline unsigned long zone_page_state(struct zone *zone,
72148 enum zone_stat_item item)
72149 {
72150- long x = atomic_long_read(&zone->vm_stat[item]);
72151+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
72152 #ifdef CONFIG_SMP
72153 if (x < 0)
72154 x = 0;
72155@@ -134,7 +134,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
72156 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
72157 enum zone_stat_item item)
72158 {
72159- long x = atomic_long_read(&zone->vm_stat[item]);
72160+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
72161
72162 #ifdef CONFIG_SMP
72163 int cpu;
72164@@ -226,8 +226,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
72165
72166 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
72167 {
72168- atomic_long_inc(&zone->vm_stat[item]);
72169- atomic_long_inc(&vm_stat[item]);
72170+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
72171+ atomic_long_inc_unchecked(&vm_stat[item]);
72172 }
72173
72174 static inline void __inc_zone_page_state(struct page *page,
72175@@ -238,8 +238,8 @@ static inline void __inc_zone_page_state(struct page *page,
72176
72177 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
72178 {
72179- atomic_long_dec(&zone->vm_stat[item]);
72180- atomic_long_dec(&vm_stat[item]);
72181+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
72182+ atomic_long_dec_unchecked(&vm_stat[item]);
72183 }
72184
72185 static inline void __dec_zone_page_state(struct page *page,
72186diff --git a/include/linux/xattr.h b/include/linux/xattr.h
72187index fdbafc6..b7ffd47 100644
72188--- a/include/linux/xattr.h
72189+++ b/include/linux/xattr.h
72190@@ -28,7 +28,7 @@ struct xattr_handler {
72191 size_t size, int handler_flags);
72192 int (*set)(struct dentry *dentry, const char *name, const void *buffer,
72193 size_t size, int flags, int handler_flags);
72194-};
72195+} __do_const;
72196
72197 struct xattr {
72198 char *name;
72199diff --git a/include/linux/zlib.h b/include/linux/zlib.h
72200index 9c5a6b4..09c9438 100644
72201--- a/include/linux/zlib.h
72202+++ b/include/linux/zlib.h
72203@@ -31,6 +31,7 @@
72204 #define _ZLIB_H
72205
72206 #include <linux/zconf.h>
72207+#include <linux/compiler.h>
72208
72209 /* zlib deflate based on ZLIB_VERSION "1.1.3" */
72210 /* zlib inflate based on ZLIB_VERSION "1.2.3" */
72211@@ -179,7 +180,7 @@ typedef z_stream *z_streamp;
72212
72213 /* basic functions */
72214
72215-extern int zlib_deflate_workspacesize (int windowBits, int memLevel);
72216+extern int zlib_deflate_workspacesize (int windowBits, int memLevel) __intentional_overflow(0);
72217 /*
72218 Returns the number of bytes that needs to be allocated for a per-
72219 stream workspace with the specified parameters. A pointer to this
72220diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
72221index 95d1c91..6798cca 100644
72222--- a/include/media/v4l2-dev.h
72223+++ b/include/media/v4l2-dev.h
72224@@ -76,7 +76,7 @@ struct v4l2_file_operations {
72225 int (*mmap) (struct file *, struct vm_area_struct *);
72226 int (*open) (struct file *);
72227 int (*release) (struct file *);
72228-};
72229+} __do_const;
72230
72231 /*
72232 * Newer version of video_device, handled by videodev2.c
72233diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
72234index 4118ad1..cb7e25f 100644
72235--- a/include/media/v4l2-ioctl.h
72236+++ b/include/media/v4l2-ioctl.h
72237@@ -284,7 +284,6 @@ struct v4l2_ioctl_ops {
72238 bool valid_prio, int cmd, void *arg);
72239 };
72240
72241-
72242 /* v4l debugging and diagnostics */
72243
72244 /* Debug bitmask flags to be used on V4L2 */
72245diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
72246index adcbb20..62c2559 100644
72247--- a/include/net/9p/transport.h
72248+++ b/include/net/9p/transport.h
72249@@ -57,7 +57,7 @@ struct p9_trans_module {
72250 int (*cancel) (struct p9_client *, struct p9_req_t *req);
72251 int (*zc_request)(struct p9_client *, struct p9_req_t *,
72252 char *, char *, int , int, int, int);
72253-};
72254+} __do_const;
72255
72256 void v9fs_register_trans(struct p9_trans_module *m);
72257 void v9fs_unregister_trans(struct p9_trans_module *m);
72258diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
72259index 7588ef4..e62d35f 100644
72260--- a/include/net/bluetooth/l2cap.h
72261+++ b/include/net/bluetooth/l2cap.h
72262@@ -552,7 +552,7 @@ struct l2cap_ops {
72263 void (*defer) (struct l2cap_chan *chan);
72264 struct sk_buff *(*alloc_skb) (struct l2cap_chan *chan,
72265 unsigned long len, int nb);
72266-};
72267+} __do_const;
72268
72269 struct l2cap_conn {
72270 struct hci_conn *hcon;
72271diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
72272index 9e5425b..8136ffc 100644
72273--- a/include/net/caif/cfctrl.h
72274+++ b/include/net/caif/cfctrl.h
72275@@ -52,7 +52,7 @@ struct cfctrl_rsp {
72276 void (*radioset_rsp)(void);
72277 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
72278 struct cflayer *client_layer);
72279-};
72280+} __no_const;
72281
72282 /* Link Setup Parameters for CAIF-Links. */
72283 struct cfctrl_link_param {
72284@@ -101,8 +101,8 @@ struct cfctrl_request_info {
72285 struct cfctrl {
72286 struct cfsrvl serv;
72287 struct cfctrl_rsp res;
72288- atomic_t req_seq_no;
72289- atomic_t rsp_seq_no;
72290+ atomic_unchecked_t req_seq_no;
72291+ atomic_unchecked_t rsp_seq_no;
72292 struct list_head list;
72293 /* Protects from simultaneous access to first_req list */
72294 spinlock_t info_list_lock;
72295diff --git a/include/net/flow.h b/include/net/flow.h
72296index 628e11b..4c475df 100644
72297--- a/include/net/flow.h
72298+++ b/include/net/flow.h
72299@@ -221,6 +221,6 @@ extern struct flow_cache_object *flow_cache_lookup(
72300
72301 extern void flow_cache_flush(void);
72302 extern void flow_cache_flush_deferred(void);
72303-extern atomic_t flow_cache_genid;
72304+extern atomic_unchecked_t flow_cache_genid;
72305
72306 #endif
72307diff --git a/include/net/genetlink.h b/include/net/genetlink.h
72308index bdfbe68..4402ebe 100644
72309--- a/include/net/genetlink.h
72310+++ b/include/net/genetlink.h
72311@@ -118,7 +118,7 @@ struct genl_ops {
72312 struct netlink_callback *cb);
72313 int (*done)(struct netlink_callback *cb);
72314 struct list_head ops_list;
72315-};
72316+} __do_const;
72317
72318 extern int genl_register_family(struct genl_family *family);
72319 extern int genl_register_family_with_ops(struct genl_family *family,
72320diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h
72321index e5062c9..48a9a4b 100644
72322--- a/include/net/gro_cells.h
72323+++ b/include/net/gro_cells.h
72324@@ -29,7 +29,7 @@ static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *s
72325 cell += skb_get_rx_queue(skb) & gcells->gro_cells_mask;
72326
72327 if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
72328- atomic_long_inc(&dev->rx_dropped);
72329+ atomic_long_inc_unchecked(&dev->rx_dropped);
72330 kfree_skb(skb);
72331 return;
72332 }
72333@@ -73,8 +73,8 @@ static inline int gro_cells_init(struct gro_cells *gcells, struct net_device *de
72334 int i;
72335
72336 gcells->gro_cells_mask = roundup_pow_of_two(netif_get_num_default_rss_queues()) - 1;
72337- gcells->cells = kcalloc(sizeof(struct gro_cell),
72338- gcells->gro_cells_mask + 1,
72339+ gcells->cells = kcalloc(gcells->gro_cells_mask + 1,
72340+ sizeof(struct gro_cell),
72341 GFP_KERNEL);
72342 if (!gcells->cells)
72343 return -ENOMEM;
72344diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
72345index 1832927..ce39aea 100644
72346--- a/include/net/inet_connection_sock.h
72347+++ b/include/net/inet_connection_sock.h
72348@@ -62,7 +62,7 @@ struct inet_connection_sock_af_ops {
72349 void (*addr2sockaddr)(struct sock *sk, struct sockaddr *);
72350 int (*bind_conflict)(const struct sock *sk,
72351 const struct inet_bind_bucket *tb, bool relax);
72352-};
72353+} __do_const;
72354
72355 /** inet_connection_sock - INET connection oriented sock
72356 *
72357diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
72358index 53f464d..ba76aaa 100644
72359--- a/include/net/inetpeer.h
72360+++ b/include/net/inetpeer.h
72361@@ -47,8 +47,8 @@ struct inet_peer {
72362 */
72363 union {
72364 struct {
72365- atomic_t rid; /* Frag reception counter */
72366- atomic_t ip_id_count; /* IP ID for the next packet */
72367+ atomic_unchecked_t rid; /* Frag reception counter */
72368+ atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
72369 };
72370 struct rcu_head rcu;
72371 struct inet_peer *gc_next;
72372@@ -182,11 +182,11 @@ static inline int inet_getid(struct inet_peer *p, int more)
72373 more++;
72374 inet_peer_refcheck(p);
72375 do {
72376- old = atomic_read(&p->ip_id_count);
72377+ old = atomic_read_unchecked(&p->ip_id_count);
72378 new = old + more;
72379 if (!new)
72380 new = 1;
72381- } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
72382+ } while (atomic_cmpxchg_unchecked(&p->ip_id_count, old, new) != old);
72383 return new;
72384 }
72385
72386diff --git a/include/net/ip.h b/include/net/ip.h
72387index a68f838..74518ab 100644
72388--- a/include/net/ip.h
72389+++ b/include/net/ip.h
72390@@ -202,7 +202,7 @@ extern struct local_ports {
72391 } sysctl_local_ports;
72392 extern void inet_get_local_port_range(int *low, int *high);
72393
72394-extern unsigned long *sysctl_local_reserved_ports;
72395+extern unsigned long sysctl_local_reserved_ports[65536 / 8 / sizeof(unsigned long)];
72396 static inline int inet_is_reserved_local_port(int port)
72397 {
72398 return test_bit(port, sysctl_local_reserved_ports);
72399diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
72400index e49db91..76a81de 100644
72401--- a/include/net/ip_fib.h
72402+++ b/include/net/ip_fib.h
72403@@ -167,7 +167,7 @@ extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
72404
72405 #define FIB_RES_SADDR(net, res) \
72406 ((FIB_RES_NH(res).nh_saddr_genid == \
72407- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
72408+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
72409 FIB_RES_NH(res).nh_saddr : \
72410 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
72411 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
72412diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
72413index 68c69d5..bdab192 100644
72414--- a/include/net/ip_vs.h
72415+++ b/include/net/ip_vs.h
72416@@ -599,7 +599,7 @@ struct ip_vs_conn {
72417 struct ip_vs_conn *control; /* Master control connection */
72418 atomic_t n_control; /* Number of controlled ones */
72419 struct ip_vs_dest *dest; /* real server */
72420- atomic_t in_pkts; /* incoming packet counter */
72421+ atomic_unchecked_t in_pkts; /* incoming packet counter */
72422
72423 /* packet transmitter for different forwarding methods. If it
72424 mangles the packet, it must return NF_DROP or better NF_STOLEN,
72425@@ -737,7 +737,7 @@ struct ip_vs_dest {
72426 __be16 port; /* port number of the server */
72427 union nf_inet_addr addr; /* IP address of the server */
72428 volatile unsigned int flags; /* dest status flags */
72429- atomic_t conn_flags; /* flags to copy to conn */
72430+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
72431 atomic_t weight; /* server weight */
72432
72433 atomic_t refcnt; /* reference counter */
72434@@ -980,11 +980,11 @@ struct netns_ipvs {
72435 /* ip_vs_lblc */
72436 int sysctl_lblc_expiration;
72437 struct ctl_table_header *lblc_ctl_header;
72438- struct ctl_table *lblc_ctl_table;
72439+ ctl_table_no_const *lblc_ctl_table;
72440 /* ip_vs_lblcr */
72441 int sysctl_lblcr_expiration;
72442 struct ctl_table_header *lblcr_ctl_header;
72443- struct ctl_table *lblcr_ctl_table;
72444+ ctl_table_no_const *lblcr_ctl_table;
72445 /* ip_vs_est */
72446 struct list_head est_list; /* estimator list */
72447 spinlock_t est_lock;
72448diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
72449index 80ffde3..968b0f4 100644
72450--- a/include/net/irda/ircomm_tty.h
72451+++ b/include/net/irda/ircomm_tty.h
72452@@ -35,6 +35,7 @@
72453 #include <linux/termios.h>
72454 #include <linux/timer.h>
72455 #include <linux/tty.h> /* struct tty_struct */
72456+#include <asm/local.h>
72457
72458 #include <net/irda/irias_object.h>
72459 #include <net/irda/ircomm_core.h>
72460diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
72461index cc7c197..9f2da2a 100644
72462--- a/include/net/iucv/af_iucv.h
72463+++ b/include/net/iucv/af_iucv.h
72464@@ -141,7 +141,7 @@ struct iucv_sock {
72465 struct iucv_sock_list {
72466 struct hlist_head head;
72467 rwlock_t lock;
72468- atomic_t autobind_name;
72469+ atomic_unchecked_t autobind_name;
72470 };
72471
72472 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
72473diff --git a/include/net/llc_c_ac.h b/include/net/llc_c_ac.h
72474index df83f69..9b640b8 100644
72475--- a/include/net/llc_c_ac.h
72476+++ b/include/net/llc_c_ac.h
72477@@ -87,7 +87,7 @@
72478 #define LLC_CONN_AC_STOP_SENDACK_TMR 70
72479 #define LLC_CONN_AC_START_SENDACK_TMR_IF_NOT_RUNNING 71
72480
72481-typedef int (*llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
72482+typedef int (* const llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
72483
72484 extern int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb);
72485 extern int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb);
72486diff --git a/include/net/llc_c_ev.h b/include/net/llc_c_ev.h
72487index 6ca3113..f8026dd 100644
72488--- a/include/net/llc_c_ev.h
72489+++ b/include/net/llc_c_ev.h
72490@@ -125,8 +125,8 @@ static __inline__ struct llc_conn_state_ev *llc_conn_ev(struct sk_buff *skb)
72491 return (struct llc_conn_state_ev *)skb->cb;
72492 }
72493
72494-typedef int (*llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
72495-typedef int (*llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
72496+typedef int (* const llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
72497+typedef int (* const llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
72498
72499 extern int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb);
72500 extern int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb);
72501diff --git a/include/net/llc_c_st.h b/include/net/llc_c_st.h
72502index 0e79cfb..f46db31 100644
72503--- a/include/net/llc_c_st.h
72504+++ b/include/net/llc_c_st.h
72505@@ -37,7 +37,7 @@ struct llc_conn_state_trans {
72506 u8 next_state;
72507 llc_conn_ev_qfyr_t *ev_qualifiers;
72508 llc_conn_action_t *ev_actions;
72509-};
72510+} __do_const;
72511
72512 struct llc_conn_state {
72513 u8 current_state;
72514diff --git a/include/net/llc_s_ac.h b/include/net/llc_s_ac.h
72515index 37a3bbd..55a4241 100644
72516--- a/include/net/llc_s_ac.h
72517+++ b/include/net/llc_s_ac.h
72518@@ -23,7 +23,7 @@
72519 #define SAP_ACT_TEST_IND 9
72520
72521 /* All action functions must look like this */
72522-typedef int (*llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
72523+typedef int (* const llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
72524
72525 extern int llc_sap_action_unitdata_ind(struct llc_sap *sap,
72526 struct sk_buff *skb);
72527diff --git a/include/net/llc_s_st.h b/include/net/llc_s_st.h
72528index 567c681..cd73ac0 100644
72529--- a/include/net/llc_s_st.h
72530+++ b/include/net/llc_s_st.h
72531@@ -20,7 +20,7 @@ struct llc_sap_state_trans {
72532 llc_sap_ev_t ev;
72533 u8 next_state;
72534 llc_sap_action_t *ev_actions;
72535-};
72536+} __do_const;
72537
72538 struct llc_sap_state {
72539 u8 curr_state;
72540diff --git a/include/net/mac80211.h b/include/net/mac80211.h
72541index ee50c5e..1bc3b1a 100644
72542--- a/include/net/mac80211.h
72543+++ b/include/net/mac80211.h
72544@@ -3996,7 +3996,7 @@ struct rate_control_ops {
72545 void (*add_sta_debugfs)(void *priv, void *priv_sta,
72546 struct dentry *dir);
72547 void (*remove_sta_debugfs)(void *priv, void *priv_sta);
72548-};
72549+} __do_const;
72550
72551 static inline int rate_supported(struct ieee80211_sta *sta,
72552 enum ieee80211_band band,
72553diff --git a/include/net/neighbour.h b/include/net/neighbour.h
72554index 0dab173..1b76af0 100644
72555--- a/include/net/neighbour.h
72556+++ b/include/net/neighbour.h
72557@@ -123,7 +123,7 @@ struct neigh_ops {
72558 void (*error_report)(struct neighbour *, struct sk_buff *);
72559 int (*output)(struct neighbour *, struct sk_buff *);
72560 int (*connected_output)(struct neighbour *, struct sk_buff *);
72561-};
72562+} __do_const;
72563
72564 struct pneigh_entry {
72565 struct pneigh_entry *next;
72566diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
72567index de644bc..dfbcc4c 100644
72568--- a/include/net/net_namespace.h
72569+++ b/include/net/net_namespace.h
72570@@ -115,7 +115,7 @@ struct net {
72571 #endif
72572 struct netns_ipvs *ipvs;
72573 struct sock *diag_nlsk;
72574- atomic_t rt_genid;
72575+ atomic_unchecked_t rt_genid;
72576 };
72577
72578 /*
72579@@ -272,7 +272,11 @@ static inline struct net *read_pnet(struct net * const *pnet)
72580 #define __net_init __init
72581 #define __net_exit __exit_refok
72582 #define __net_initdata __initdata
72583+#ifdef CONSTIFY_PLUGIN
72584 #define __net_initconst __initconst
72585+#else
72586+#define __net_initconst __initdata
72587+#endif
72588 #endif
72589
72590 struct pernet_operations {
72591@@ -282,7 +286,7 @@ struct pernet_operations {
72592 void (*exit_batch)(struct list_head *net_exit_list);
72593 int *id;
72594 size_t size;
72595-};
72596+} __do_const;
72597
72598 /*
72599 * Use these carefully. If you implement a network device and it
72600@@ -330,12 +334,12 @@ static inline void unregister_net_sysctl_table(struct ctl_table_header *header)
72601
72602 static inline int rt_genid(struct net *net)
72603 {
72604- return atomic_read(&net->rt_genid);
72605+ return atomic_read_unchecked(&net->rt_genid);
72606 }
72607
72608 static inline void rt_genid_bump(struct net *net)
72609 {
72610- atomic_inc(&net->rt_genid);
72611+ atomic_inc_unchecked(&net->rt_genid);
72612 }
72613
72614 #endif /* __NET_NET_NAMESPACE_H */
72615diff --git a/include/net/netdma.h b/include/net/netdma.h
72616index 8ba8ce2..99b7fff 100644
72617--- a/include/net/netdma.h
72618+++ b/include/net/netdma.h
72619@@ -24,7 +24,7 @@
72620 #include <linux/dmaengine.h>
72621 #include <linux/skbuff.h>
72622
72623-int dma_skb_copy_datagram_iovec(struct dma_chan* chan,
72624+int __intentional_overflow(3,5) dma_skb_copy_datagram_iovec(struct dma_chan* chan,
72625 struct sk_buff *skb, int offset, struct iovec *to,
72626 size_t len, struct dma_pinned_list *pinned_list);
72627
72628diff --git a/include/net/netlink.h b/include/net/netlink.h
72629index 9690b0f..87aded7 100644
72630--- a/include/net/netlink.h
72631+++ b/include/net/netlink.h
72632@@ -534,7 +534,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
72633 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
72634 {
72635 if (mark)
72636- skb_trim(skb, (unsigned char *) mark - skb->data);
72637+ skb_trim(skb, (const unsigned char *) mark - skb->data);
72638 }
72639
72640 /**
72641diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
72642index 923cb20..deae816 100644
72643--- a/include/net/netns/conntrack.h
72644+++ b/include/net/netns/conntrack.h
72645@@ -12,10 +12,10 @@ struct nf_conntrack_ecache;
72646 struct nf_proto_net {
72647 #ifdef CONFIG_SYSCTL
72648 struct ctl_table_header *ctl_table_header;
72649- struct ctl_table *ctl_table;
72650+ ctl_table_no_const *ctl_table;
72651 #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
72652 struct ctl_table_header *ctl_compat_header;
72653- struct ctl_table *ctl_compat_table;
72654+ ctl_table_no_const *ctl_compat_table;
72655 #endif
72656 #endif
72657 unsigned int users;
72658@@ -58,7 +58,7 @@ struct nf_ip_net {
72659 struct nf_icmp_net icmpv6;
72660 #if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
72661 struct ctl_table_header *ctl_table_header;
72662- struct ctl_table *ctl_table;
72663+ ctl_table_no_const *ctl_table;
72664 #endif
72665 };
72666
72667diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
72668index 2ae2b83..dbdc85e 100644
72669--- a/include/net/netns/ipv4.h
72670+++ b/include/net/netns/ipv4.h
72671@@ -64,7 +64,7 @@ struct netns_ipv4 {
72672 kgid_t sysctl_ping_group_range[2];
72673 long sysctl_tcp_mem[3];
72674
72675- atomic_t dev_addr_genid;
72676+ atomic_unchecked_t dev_addr_genid;
72677
72678 #ifdef CONFIG_IP_MROUTE
72679 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
72680diff --git a/include/net/protocol.h b/include/net/protocol.h
72681index 047c047..b9dad15 100644
72682--- a/include/net/protocol.h
72683+++ b/include/net/protocol.h
72684@@ -44,7 +44,7 @@ struct net_protocol {
72685 void (*err_handler)(struct sk_buff *skb, u32 info);
72686 unsigned int no_policy:1,
72687 netns_ok:1;
72688-};
72689+} __do_const;
72690
72691 #if IS_ENABLED(CONFIG_IPV6)
72692 struct inet6_protocol {
72693@@ -57,7 +57,7 @@ struct inet6_protocol {
72694 u8 type, u8 code, int offset,
72695 __be32 info);
72696 unsigned int flags; /* INET6_PROTO_xxx */
72697-};
72698+} __do_const;
72699
72700 #define INET6_PROTO_NOPOLICY 0x1
72701 #define INET6_PROTO_FINAL 0x2
72702diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
72703index 5a15fab..d799ea7 100644
72704--- a/include/net/rtnetlink.h
72705+++ b/include/net/rtnetlink.h
72706@@ -81,7 +81,7 @@ struct rtnl_link_ops {
72707 const struct net_device *dev);
72708 unsigned int (*get_num_tx_queues)(void);
72709 unsigned int (*get_num_rx_queues)(void);
72710-};
72711+} __do_const;
72712
72713 extern int __rtnl_link_register(struct rtnl_link_ops *ops);
72714 extern void __rtnl_link_unregister(struct rtnl_link_ops *ops);
72715diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
72716index 7fdf298..197e9f7 100644
72717--- a/include/net/sctp/sctp.h
72718+++ b/include/net/sctp/sctp.h
72719@@ -330,9 +330,9 @@ do { \
72720
72721 #else /* SCTP_DEBUG */
72722
72723-#define SCTP_DEBUG_PRINTK(whatever...)
72724-#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
72725-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
72726+#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
72727+#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
72728+#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
72729 #define SCTP_ENABLE_DEBUG
72730 #define SCTP_DISABLE_DEBUG
72731 #define SCTP_ASSERT(expr, str, func)
72732diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
72733index 2a82d13..62a31c2 100644
72734--- a/include/net/sctp/sm.h
72735+++ b/include/net/sctp/sm.h
72736@@ -87,7 +87,7 @@ typedef void (sctp_timer_event_t) (unsigned long);
72737 typedef struct {
72738 sctp_state_fn_t *fn;
72739 const char *name;
72740-} sctp_sm_table_entry_t;
72741+} __do_const sctp_sm_table_entry_t;
72742
72743 /* A naming convention of "sctp_sf_xxx" applies to all the state functions
72744 * currently in use.
72745@@ -299,7 +299,7 @@ __u32 sctp_generate_tag(const struct sctp_endpoint *);
72746 __u32 sctp_generate_tsn(const struct sctp_endpoint *);
72747
72748 /* Extern declarations for major data structures. */
72749-extern sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
72750+extern sctp_timer_event_t * const sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
72751
72752
72753 /* Get the size of a DATA chunk payload. */
72754diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
72755index fdeb85a..1329d95 100644
72756--- a/include/net/sctp/structs.h
72757+++ b/include/net/sctp/structs.h
72758@@ -517,7 +517,7 @@ struct sctp_pf {
72759 struct sctp_association *asoc);
72760 void (*addr_v4map) (struct sctp_sock *, union sctp_addr *);
72761 struct sctp_af *af;
72762-};
72763+} __do_const;
72764
72765
72766 /* Structure to track chunk fragments that have been acked, but peer
72767diff --git a/include/net/sock.h b/include/net/sock.h
72768index 25afaa0..8bb0070 100644
72769--- a/include/net/sock.h
72770+++ b/include/net/sock.h
72771@@ -322,7 +322,7 @@ struct sock {
72772 #ifdef CONFIG_RPS
72773 __u32 sk_rxhash;
72774 #endif
72775- atomic_t sk_drops;
72776+ atomic_unchecked_t sk_drops;
72777 int sk_rcvbuf;
72778
72779 struct sk_filter __rcu *sk_filter;
72780@@ -1781,7 +1781,7 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
72781 }
72782
72783 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
72784- char __user *from, char *to,
72785+ char __user *from, unsigned char *to,
72786 int copy, int offset)
72787 {
72788 if (skb->ip_summed == CHECKSUM_NONE) {
72789@@ -2040,7 +2040,7 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
72790 }
72791 }
72792
72793-struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
72794+struct sk_buff * __intentional_overflow(0) sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
72795
72796 /**
72797 * sk_page_frag - return an appropriate page_frag
72798diff --git a/include/net/tcp.h b/include/net/tcp.h
72799index aed42c7..43890c6 100644
72800--- a/include/net/tcp.h
72801+++ b/include/net/tcp.h
72802@@ -530,7 +530,7 @@ extern void tcp_retransmit_timer(struct sock *sk);
72803 extern void tcp_xmit_retransmit_queue(struct sock *);
72804 extern void tcp_simple_retransmit(struct sock *);
72805 extern int tcp_trim_head(struct sock *, struct sk_buff *, u32);
72806-extern int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
72807+extern int __intentional_overflow(3) tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
72808
72809 extern void tcp_send_probe0(struct sock *);
72810 extern void tcp_send_partial(struct sock *);
72811@@ -701,8 +701,8 @@ struct tcp_skb_cb {
72812 struct inet6_skb_parm h6;
72813 #endif
72814 } header; /* For incoming frames */
72815- __u32 seq; /* Starting sequence number */
72816- __u32 end_seq; /* SEQ + FIN + SYN + datalen */
72817+ __u32 seq __intentional_overflow(0); /* Starting sequence number */
72818+ __u32 end_seq __intentional_overflow(0); /* SEQ + FIN + SYN + datalen */
72819 __u32 when; /* used to compute rtt's */
72820 __u8 tcp_flags; /* TCP header flags. (tcp[13]) */
72821
72822@@ -716,7 +716,7 @@ struct tcp_skb_cb {
72823
72824 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
72825 /* 1 byte hole */
72826- __u32 ack_seq; /* Sequence number ACK'd */
72827+ __u32 ack_seq __intentional_overflow(0); /* Sequence number ACK'd */
72828 };
72829
72830 #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
72831diff --git a/include/net/xfrm.h b/include/net/xfrm.h
72832index 63445ed..d6fc34f 100644
72833--- a/include/net/xfrm.h
72834+++ b/include/net/xfrm.h
72835@@ -304,7 +304,7 @@ struct xfrm_policy_afinfo {
72836 struct net_device *dev,
72837 const struct flowi *fl);
72838 struct dst_entry *(*blackhole_route)(struct net *net, struct dst_entry *orig);
72839-};
72840+} __do_const;
72841
72842 extern int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
72843 extern int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
72844@@ -340,7 +340,7 @@ struct xfrm_state_afinfo {
72845 struct sk_buff *skb);
72846 int (*transport_finish)(struct sk_buff *skb,
72847 int async);
72848-};
72849+} __do_const;
72850
72851 extern int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
72852 extern int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
72853@@ -423,7 +423,7 @@ struct xfrm_mode {
72854 struct module *owner;
72855 unsigned int encap;
72856 int flags;
72857-};
72858+} __do_const;
72859
72860 /* Flags for xfrm_mode. */
72861 enum {
72862@@ -514,7 +514,7 @@ struct xfrm_policy {
72863 struct timer_list timer;
72864
72865 struct flow_cache_object flo;
72866- atomic_t genid;
72867+ atomic_unchecked_t genid;
72868 u32 priority;
72869 u32 index;
72870 struct xfrm_mark mark;
72871diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
72872index 1a046b1..ee0bef0 100644
72873--- a/include/rdma/iw_cm.h
72874+++ b/include/rdma/iw_cm.h
72875@@ -122,7 +122,7 @@ struct iw_cm_verbs {
72876 int backlog);
72877
72878 int (*destroy_listen)(struct iw_cm_id *cm_id);
72879-};
72880+} __no_const;
72881
72882 /**
72883 * iw_create_cm_id - Create an IW CM identifier.
72884diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
72885index 399162b..b337f1a 100644
72886--- a/include/scsi/libfc.h
72887+++ b/include/scsi/libfc.h
72888@@ -762,6 +762,7 @@ struct libfc_function_template {
72889 */
72890 void (*disc_stop_final) (struct fc_lport *);
72891 };
72892+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
72893
72894 /**
72895 * struct fc_disc - Discovery context
72896@@ -866,7 +867,7 @@ struct fc_lport {
72897 struct fc_vport *vport;
72898
72899 /* Operational Information */
72900- struct libfc_function_template tt;
72901+ libfc_function_template_no_const tt;
72902 u8 link_up;
72903 u8 qfull;
72904 enum fc_lport_state state;
72905diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
72906index e65c62e..aa2e5a2 100644
72907--- a/include/scsi/scsi_device.h
72908+++ b/include/scsi/scsi_device.h
72909@@ -170,9 +170,9 @@ struct scsi_device {
72910 unsigned int max_device_blocked; /* what device_blocked counts down from */
72911 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
72912
72913- atomic_t iorequest_cnt;
72914- atomic_t iodone_cnt;
72915- atomic_t ioerr_cnt;
72916+ atomic_unchecked_t iorequest_cnt;
72917+ atomic_unchecked_t iodone_cnt;
72918+ atomic_unchecked_t ioerr_cnt;
72919
72920 struct device sdev_gendev,
72921 sdev_dev;
72922diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
72923index b797e8f..8e2c3aa 100644
72924--- a/include/scsi/scsi_transport_fc.h
72925+++ b/include/scsi/scsi_transport_fc.h
72926@@ -751,7 +751,8 @@ struct fc_function_template {
72927 unsigned long show_host_system_hostname:1;
72928
72929 unsigned long disable_target_scan:1;
72930-};
72931+} __do_const;
72932+typedef struct fc_function_template __no_const fc_function_template_no_const;
72933
72934
72935 /**
72936diff --git a/include/sound/soc.h b/include/sound/soc.h
72937index bc56738..a4be132 100644
72938--- a/include/sound/soc.h
72939+++ b/include/sound/soc.h
72940@@ -771,7 +771,7 @@ struct snd_soc_codec_driver {
72941 /* probe ordering - for components with runtime dependencies */
72942 int probe_order;
72943 int remove_order;
72944-};
72945+} __do_const;
72946
72947 /* SoC platform interface */
72948 struct snd_soc_platform_driver {
72949@@ -817,7 +817,7 @@ struct snd_soc_platform_driver {
72950 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
72951 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
72952 int (*bespoke_trigger)(struct snd_pcm_substream *, int);
72953-};
72954+} __do_const;
72955
72956 struct snd_soc_platform {
72957 const char *name;
72958diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
72959index 663e34a..91b306a 100644
72960--- a/include/target/target_core_base.h
72961+++ b/include/target/target_core_base.h
72962@@ -654,7 +654,7 @@ struct se_device {
72963 spinlock_t stats_lock;
72964 /* Active commands on this virtual SE device */
72965 atomic_t simple_cmds;
72966- atomic_t dev_ordered_id;
72967+ atomic_unchecked_t dev_ordered_id;
72968 atomic_t dev_ordered_sync;
72969 atomic_t dev_qf_count;
72970 int export_count;
72971diff --git a/include/trace/events/fs.h b/include/trace/events/fs.h
72972new file mode 100644
72973index 0000000..fb634b7
72974--- /dev/null
72975+++ b/include/trace/events/fs.h
72976@@ -0,0 +1,53 @@
72977+#undef TRACE_SYSTEM
72978+#define TRACE_SYSTEM fs
72979+
72980+#if !defined(_TRACE_FS_H) || defined(TRACE_HEADER_MULTI_READ)
72981+#define _TRACE_FS_H
72982+
72983+#include <linux/fs.h>
72984+#include <linux/tracepoint.h>
72985+
72986+TRACE_EVENT(do_sys_open,
72987+
72988+ TP_PROTO(const char *filename, int flags, int mode),
72989+
72990+ TP_ARGS(filename, flags, mode),
72991+
72992+ TP_STRUCT__entry(
72993+ __string( filename, filename )
72994+ __field( int, flags )
72995+ __field( int, mode )
72996+ ),
72997+
72998+ TP_fast_assign(
72999+ __assign_str(filename, filename);
73000+ __entry->flags = flags;
73001+ __entry->mode = mode;
73002+ ),
73003+
73004+ TP_printk("\"%s\" %x %o",
73005+ __get_str(filename), __entry->flags, __entry->mode)
73006+);
73007+
73008+TRACE_EVENT(open_exec,
73009+
73010+ TP_PROTO(const char *filename),
73011+
73012+ TP_ARGS(filename),
73013+
73014+ TP_STRUCT__entry(
73015+ __string( filename, filename )
73016+ ),
73017+
73018+ TP_fast_assign(
73019+ __assign_str(filename, filename);
73020+ ),
73021+
73022+ TP_printk("\"%s\"",
73023+ __get_str(filename))
73024+);
73025+
73026+#endif /* _TRACE_FS_H */
73027+
73028+/* This part must be outside protection */
73029+#include <trace/define_trace.h>
73030diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
73031index 1c09820..7f5ec79 100644
73032--- a/include/trace/events/irq.h
73033+++ b/include/trace/events/irq.h
73034@@ -36,7 +36,7 @@ struct softirq_action;
73035 */
73036 TRACE_EVENT(irq_handler_entry,
73037
73038- TP_PROTO(int irq, struct irqaction *action),
73039+ TP_PROTO(int irq, const struct irqaction *action),
73040
73041 TP_ARGS(irq, action),
73042
73043@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
73044 */
73045 TRACE_EVENT(irq_handler_exit,
73046
73047- TP_PROTO(int irq, struct irqaction *action, int ret),
73048+ TP_PROTO(int irq, const struct irqaction *action, int ret),
73049
73050 TP_ARGS(irq, action, ret),
73051
73052diff --git a/include/uapi/linux/a.out.h b/include/uapi/linux/a.out.h
73053index 7caf44c..23c6f27 100644
73054--- a/include/uapi/linux/a.out.h
73055+++ b/include/uapi/linux/a.out.h
73056@@ -39,6 +39,14 @@ enum machine_type {
73057 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
73058 };
73059
73060+/* Constants for the N_FLAGS field */
73061+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
73062+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
73063+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
73064+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
73065+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
73066+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
73067+
73068 #if !defined (N_MAGIC)
73069 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
73070 #endif
73071diff --git a/include/uapi/linux/byteorder/little_endian.h b/include/uapi/linux/byteorder/little_endian.h
73072index d876736..ccce5c0 100644
73073--- a/include/uapi/linux/byteorder/little_endian.h
73074+++ b/include/uapi/linux/byteorder/little_endian.h
73075@@ -42,51 +42,51 @@
73076
73077 static inline __le64 __cpu_to_le64p(const __u64 *p)
73078 {
73079- return (__force __le64)*p;
73080+ return (__force const __le64)*p;
73081 }
73082-static inline __u64 __le64_to_cpup(const __le64 *p)
73083+static inline __u64 __intentional_overflow(-1) __le64_to_cpup(const __le64 *p)
73084 {
73085- return (__force __u64)*p;
73086+ return (__force const __u64)*p;
73087 }
73088 static inline __le32 __cpu_to_le32p(const __u32 *p)
73089 {
73090- return (__force __le32)*p;
73091+ return (__force const __le32)*p;
73092 }
73093 static inline __u32 __le32_to_cpup(const __le32 *p)
73094 {
73095- return (__force __u32)*p;
73096+ return (__force const __u32)*p;
73097 }
73098 static inline __le16 __cpu_to_le16p(const __u16 *p)
73099 {
73100- return (__force __le16)*p;
73101+ return (__force const __le16)*p;
73102 }
73103 static inline __u16 __le16_to_cpup(const __le16 *p)
73104 {
73105- return (__force __u16)*p;
73106+ return (__force const __u16)*p;
73107 }
73108 static inline __be64 __cpu_to_be64p(const __u64 *p)
73109 {
73110- return (__force __be64)__swab64p(p);
73111+ return (__force const __be64)__swab64p(p);
73112 }
73113 static inline __u64 __be64_to_cpup(const __be64 *p)
73114 {
73115- return __swab64p((__u64 *)p);
73116+ return __swab64p((const __u64 *)p);
73117 }
73118 static inline __be32 __cpu_to_be32p(const __u32 *p)
73119 {
73120- return (__force __be32)__swab32p(p);
73121+ return (__force const __be32)__swab32p(p);
73122 }
73123-static inline __u32 __be32_to_cpup(const __be32 *p)
73124+static inline __u32 __intentional_overflow(-1) __be32_to_cpup(const __be32 *p)
73125 {
73126- return __swab32p((__u32 *)p);
73127+ return __swab32p((const __u32 *)p);
73128 }
73129 static inline __be16 __cpu_to_be16p(const __u16 *p)
73130 {
73131- return (__force __be16)__swab16p(p);
73132+ return (__force const __be16)__swab16p(p);
73133 }
73134 static inline __u16 __be16_to_cpup(const __be16 *p)
73135 {
73136- return __swab16p((__u16 *)p);
73137+ return __swab16p((const __u16 *)p);
73138 }
73139 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
73140 #define __le64_to_cpus(x) do { (void)(x); } while (0)
73141diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
73142index 126a817..d522bd1 100644
73143--- a/include/uapi/linux/elf.h
73144+++ b/include/uapi/linux/elf.h
73145@@ -37,6 +37,17 @@ typedef __s64 Elf64_Sxword;
73146 #define PT_GNU_EH_FRAME 0x6474e550
73147
73148 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
73149+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
73150+
73151+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
73152+
73153+/* Constants for the e_flags field */
73154+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
73155+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
73156+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
73157+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
73158+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
73159+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
73160
73161 /*
73162 * Extended Numbering
73163@@ -94,6 +105,8 @@ typedef __s64 Elf64_Sxword;
73164 #define DT_DEBUG 21
73165 #define DT_TEXTREL 22
73166 #define DT_JMPREL 23
73167+#define DT_FLAGS 30
73168+ #define DF_TEXTREL 0x00000004
73169 #define DT_ENCODING 32
73170 #define OLD_DT_LOOS 0x60000000
73171 #define DT_LOOS 0x6000000d
73172@@ -240,6 +253,19 @@ typedef struct elf64_hdr {
73173 #define PF_W 0x2
73174 #define PF_X 0x1
73175
73176+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
73177+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
73178+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
73179+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
73180+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
73181+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
73182+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
73183+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
73184+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
73185+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
73186+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
73187+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
73188+
73189 typedef struct elf32_phdr{
73190 Elf32_Word p_type;
73191 Elf32_Off p_offset;
73192@@ -332,6 +358,8 @@ typedef struct elf64_shdr {
73193 #define EI_OSABI 7
73194 #define EI_PAD 8
73195
73196+#define EI_PAX 14
73197+
73198 #define ELFMAG0 0x7f /* EI_MAG */
73199 #define ELFMAG1 'E'
73200 #define ELFMAG2 'L'
73201diff --git a/include/uapi/linux/personality.h b/include/uapi/linux/personality.h
73202index aa169c4..6a2771d 100644
73203--- a/include/uapi/linux/personality.h
73204+++ b/include/uapi/linux/personality.h
73205@@ -30,6 +30,7 @@ enum {
73206 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
73207 ADDR_NO_RANDOMIZE | \
73208 ADDR_COMPAT_LAYOUT | \
73209+ ADDR_LIMIT_3GB | \
73210 MMAP_PAGE_ZERO)
73211
73212 /*
73213diff --git a/include/uapi/linux/screen_info.h b/include/uapi/linux/screen_info.h
73214index 7530e74..e714828 100644
73215--- a/include/uapi/linux/screen_info.h
73216+++ b/include/uapi/linux/screen_info.h
73217@@ -43,7 +43,8 @@ struct screen_info {
73218 __u16 pages; /* 0x32 */
73219 __u16 vesa_attributes; /* 0x34 */
73220 __u32 capabilities; /* 0x36 */
73221- __u8 _reserved[6]; /* 0x3a */
73222+ __u16 vesapm_size; /* 0x3a */
73223+ __u8 _reserved[4]; /* 0x3c */
73224 } __attribute__((packed));
73225
73226 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
73227diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h
73228index 0e011eb..82681b1 100644
73229--- a/include/uapi/linux/swab.h
73230+++ b/include/uapi/linux/swab.h
73231@@ -43,7 +43,7 @@
73232 * ___swab16, ___swab32, ___swab64, ___swahw32, ___swahb32
73233 */
73234
73235-static inline __attribute_const__ __u16 __fswab16(__u16 val)
73236+static inline __intentional_overflow(-1) __attribute_const__ __u16 __fswab16(__u16 val)
73237 {
73238 #ifdef __HAVE_BUILTIN_BSWAP16__
73239 return __builtin_bswap16(val);
73240@@ -54,7 +54,7 @@ static inline __attribute_const__ __u16 __fswab16(__u16 val)
73241 #endif
73242 }
73243
73244-static inline __attribute_const__ __u32 __fswab32(__u32 val)
73245+static inline __intentional_overflow(-1) __attribute_const__ __u32 __fswab32(__u32 val)
73246 {
73247 #ifdef __HAVE_BUILTIN_BSWAP32__
73248 return __builtin_bswap32(val);
73249@@ -65,7 +65,7 @@ static inline __attribute_const__ __u32 __fswab32(__u32 val)
73250 #endif
73251 }
73252
73253-static inline __attribute_const__ __u64 __fswab64(__u64 val)
73254+static inline __intentional_overflow(-1) __attribute_const__ __u64 __fswab64(__u64 val)
73255 {
73256 #ifdef __HAVE_BUILTIN_BSWAP64__
73257 return __builtin_bswap64(val);
73258diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h
73259index 6d67213..8dab561 100644
73260--- a/include/uapi/linux/sysctl.h
73261+++ b/include/uapi/linux/sysctl.h
73262@@ -155,7 +155,11 @@ enum
73263 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
73264 };
73265
73266-
73267+#ifdef CONFIG_PAX_SOFTMODE
73268+enum {
73269+ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
73270+};
73271+#endif
73272
73273 /* CTL_VM names: */
73274 enum
73275diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
73276index 26607bd..588b65f 100644
73277--- a/include/uapi/linux/xattr.h
73278+++ b/include/uapi/linux/xattr.h
73279@@ -60,5 +60,9 @@
73280 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
73281 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
73282
73283+/* User namespace */
73284+#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
73285+#define XATTR_PAX_FLAGS_SUFFIX "flags"
73286+#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
73287
73288 #endif /* _UAPI_LINUX_XATTR_H */
73289diff --git a/include/video/udlfb.h b/include/video/udlfb.h
73290index f9466fa..f4e2b81 100644
73291--- a/include/video/udlfb.h
73292+++ b/include/video/udlfb.h
73293@@ -53,10 +53,10 @@ struct dlfb_data {
73294 u32 pseudo_palette[256];
73295 int blank_mode; /*one of FB_BLANK_ */
73296 /* blit-only rendering path metrics, exposed through sysfs */
73297- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
73298- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
73299- atomic_t bytes_sent; /* to usb, after compression including overhead */
73300- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
73301+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
73302+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
73303+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
73304+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
73305 };
73306
73307 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
73308diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
73309index 0993a22..32ba2fe 100644
73310--- a/include/video/uvesafb.h
73311+++ b/include/video/uvesafb.h
73312@@ -177,6 +177,7 @@ struct uvesafb_par {
73313 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
73314 u8 pmi_setpal; /* PMI for palette changes */
73315 u16 *pmi_base; /* protected mode interface location */
73316+ u8 *pmi_code; /* protected mode code location */
73317 void *pmi_start;
73318 void *pmi_pal;
73319 u8 *vbe_state_orig; /*
73320diff --git a/init/Kconfig b/init/Kconfig
73321index be8b7f5..1eeca9b 100644
73322--- a/init/Kconfig
73323+++ b/init/Kconfig
73324@@ -990,6 +990,7 @@ endif # CGROUPS
73325
73326 config CHECKPOINT_RESTORE
73327 bool "Checkpoint/restore support" if EXPERT
73328+ depends on !GRKERNSEC
73329 default n
73330 help
73331 Enables additional kernel features in a sake of checkpoint/restore.
73332@@ -1468,7 +1469,7 @@ config SLUB_DEBUG
73333
73334 config COMPAT_BRK
73335 bool "Disable heap randomization"
73336- default y
73337+ default n
73338 help
73339 Randomizing heap placement makes heap exploits harder, but it
73340 also breaks ancient binaries (including anything libc5 based).
73341@@ -1711,7 +1712,7 @@ config INIT_ALL_POSSIBLE
73342 config STOP_MACHINE
73343 bool
73344 default y
73345- depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU
73346+ depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU || GRKERNSEC
73347 help
73348 Need stop_machine() primitive.
73349
73350diff --git a/init/Makefile b/init/Makefile
73351index 7bc47ee..6da2dc7 100644
73352--- a/init/Makefile
73353+++ b/init/Makefile
73354@@ -2,6 +2,9 @@
73355 # Makefile for the linux kernel.
73356 #
73357
73358+ccflags-y := $(GCC_PLUGINS_CFLAGS)
73359+asflags-y := $(GCC_PLUGINS_AFLAGS)
73360+
73361 obj-y := main.o version.o mounts.o
73362 ifneq ($(CONFIG_BLK_DEV_INITRD),y)
73363 obj-y += noinitramfs.o
73364diff --git a/init/do_mounts.c b/init/do_mounts.c
73365index 1d1b634..a1c810f 100644
73366--- a/init/do_mounts.c
73367+++ b/init/do_mounts.c
73368@@ -355,11 +355,11 @@ static void __init get_fs_names(char *page)
73369 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
73370 {
73371 struct super_block *s;
73372- int err = sys_mount(name, "/root", fs, flags, data);
73373+ int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
73374 if (err)
73375 return err;
73376
73377- sys_chdir("/root");
73378+ sys_chdir((const char __force_user *)"/root");
73379 s = current->fs->pwd.dentry->d_sb;
73380 ROOT_DEV = s->s_dev;
73381 printk(KERN_INFO
73382@@ -480,18 +480,18 @@ void __init change_floppy(char *fmt, ...)
73383 va_start(args, fmt);
73384 vsprintf(buf, fmt, args);
73385 va_end(args);
73386- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
73387+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
73388 if (fd >= 0) {
73389 sys_ioctl(fd, FDEJECT, 0);
73390 sys_close(fd);
73391 }
73392 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
73393- fd = sys_open("/dev/console", O_RDWR, 0);
73394+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
73395 if (fd >= 0) {
73396 sys_ioctl(fd, TCGETS, (long)&termios);
73397 termios.c_lflag &= ~ICANON;
73398 sys_ioctl(fd, TCSETSF, (long)&termios);
73399- sys_read(fd, &c, 1);
73400+ sys_read(fd, (char __user *)&c, 1);
73401 termios.c_lflag |= ICANON;
73402 sys_ioctl(fd, TCSETSF, (long)&termios);
73403 sys_close(fd);
73404@@ -585,6 +585,6 @@ void __init prepare_namespace(void)
73405 mount_root();
73406 out:
73407 devtmpfs_mount("dev");
73408- sys_mount(".", "/", NULL, MS_MOVE, NULL);
73409- sys_chroot(".");
73410+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
73411+ sys_chroot((const char __force_user *)".");
73412 }
73413diff --git a/init/do_mounts.h b/init/do_mounts.h
73414index f5b978a..69dbfe8 100644
73415--- a/init/do_mounts.h
73416+++ b/init/do_mounts.h
73417@@ -15,15 +15,15 @@ extern int root_mountflags;
73418
73419 static inline int create_dev(char *name, dev_t dev)
73420 {
73421- sys_unlink(name);
73422- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
73423+ sys_unlink((char __force_user *)name);
73424+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
73425 }
73426
73427 #if BITS_PER_LONG == 32
73428 static inline u32 bstat(char *name)
73429 {
73430 struct stat64 stat;
73431- if (sys_stat64(name, &stat) != 0)
73432+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
73433 return 0;
73434 if (!S_ISBLK(stat.st_mode))
73435 return 0;
73436@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
73437 static inline u32 bstat(char *name)
73438 {
73439 struct stat stat;
73440- if (sys_newstat(name, &stat) != 0)
73441+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
73442 return 0;
73443 if (!S_ISBLK(stat.st_mode))
73444 return 0;
73445diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
73446index f9acf71..1e19144 100644
73447--- a/init/do_mounts_initrd.c
73448+++ b/init/do_mounts_initrd.c
73449@@ -58,8 +58,8 @@ static void __init handle_initrd(void)
73450 create_dev("/dev/root.old", Root_RAM0);
73451 /* mount initrd on rootfs' /root */
73452 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
73453- sys_mkdir("/old", 0700);
73454- sys_chdir("/old");
73455+ sys_mkdir((const char __force_user *)"/old", 0700);
73456+ sys_chdir((const char __force_user *)"/old");
73457
73458 /*
73459 * In case that a resume from disk is carried out by linuxrc or one of
73460@@ -73,31 +73,31 @@ static void __init handle_initrd(void)
73461 current->flags &= ~PF_FREEZER_SKIP;
73462
73463 /* move initrd to rootfs' /old */
73464- sys_mount("..", ".", NULL, MS_MOVE, NULL);
73465+ sys_mount((char __force_user *)"..", (char __force_user *)".", NULL, MS_MOVE, NULL);
73466 /* switch root and cwd back to / of rootfs */
73467- sys_chroot("..");
73468+ sys_chroot((const char __force_user *)"..");
73469
73470 if (new_decode_dev(real_root_dev) == Root_RAM0) {
73471- sys_chdir("/old");
73472+ sys_chdir((const char __force_user *)"/old");
73473 return;
73474 }
73475
73476- sys_chdir("/");
73477+ sys_chdir((const char __force_user *)"/");
73478 ROOT_DEV = new_decode_dev(real_root_dev);
73479 mount_root();
73480
73481 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
73482- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
73483+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
73484 if (!error)
73485 printk("okay\n");
73486 else {
73487- int fd = sys_open("/dev/root.old", O_RDWR, 0);
73488+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
73489 if (error == -ENOENT)
73490 printk("/initrd does not exist. Ignored.\n");
73491 else
73492 printk("failed\n");
73493 printk(KERN_NOTICE "Unmounting old root\n");
73494- sys_umount("/old", MNT_DETACH);
73495+ sys_umount((char __force_user *)"/old", MNT_DETACH);
73496 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
73497 if (fd < 0) {
73498 error = fd;
73499@@ -120,11 +120,11 @@ int __init initrd_load(void)
73500 * mounted in the normal path.
73501 */
73502 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
73503- sys_unlink("/initrd.image");
73504+ sys_unlink((const char __force_user *)"/initrd.image");
73505 handle_initrd();
73506 return 1;
73507 }
73508 }
73509- sys_unlink("/initrd.image");
73510+ sys_unlink((const char __force_user *)"/initrd.image");
73511 return 0;
73512 }
73513diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
73514index 8cb6db5..d729f50 100644
73515--- a/init/do_mounts_md.c
73516+++ b/init/do_mounts_md.c
73517@@ -180,7 +180,7 @@ static void __init md_setup_drive(void)
73518 partitioned ? "_d" : "", minor,
73519 md_setup_args[ent].device_names);
73520
73521- fd = sys_open(name, 0, 0);
73522+ fd = sys_open((char __force_user *)name, 0, 0);
73523 if (fd < 0) {
73524 printk(KERN_ERR "md: open failed - cannot start "
73525 "array %s\n", name);
73526@@ -243,7 +243,7 @@ static void __init md_setup_drive(void)
73527 * array without it
73528 */
73529 sys_close(fd);
73530- fd = sys_open(name, 0, 0);
73531+ fd = sys_open((char __force_user *)name, 0, 0);
73532 sys_ioctl(fd, BLKRRPART, 0);
73533 }
73534 sys_close(fd);
73535@@ -293,7 +293,7 @@ static void __init autodetect_raid(void)
73536
73537 wait_for_device_probe();
73538
73539- fd = sys_open("/dev/md0", 0, 0);
73540+ fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
73541 if (fd >= 0) {
73542 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
73543 sys_close(fd);
73544diff --git a/init/init_task.c b/init/init_task.c
73545index 8b2f399..f0797c9 100644
73546--- a/init/init_task.c
73547+++ b/init/init_task.c
73548@@ -20,5 +20,9 @@ EXPORT_SYMBOL(init_task);
73549 * Initial thread structure. Alignment of this is handled by a special
73550 * linker map entry.
73551 */
73552+#ifdef CONFIG_X86
73553+union thread_union init_thread_union __init_task_data;
73554+#else
73555 union thread_union init_thread_union __init_task_data =
73556 { INIT_THREAD_INFO(init_task) };
73557+#endif
73558diff --git a/init/initramfs.c b/init/initramfs.c
73559index 84c6bf1..8899338 100644
73560--- a/init/initramfs.c
73561+++ b/init/initramfs.c
73562@@ -84,7 +84,7 @@ static void __init free_hash(void)
73563 }
73564 }
73565
73566-static long __init do_utime(char *filename, time_t mtime)
73567+static long __init do_utime(char __force_user *filename, time_t mtime)
73568 {
73569 struct timespec t[2];
73570
73571@@ -119,7 +119,7 @@ static void __init dir_utime(void)
73572 struct dir_entry *de, *tmp;
73573 list_for_each_entry_safe(de, tmp, &dir_list, list) {
73574 list_del(&de->list);
73575- do_utime(de->name, de->mtime);
73576+ do_utime((char __force_user *)de->name, de->mtime);
73577 kfree(de->name);
73578 kfree(de);
73579 }
73580@@ -281,7 +281,7 @@ static int __init maybe_link(void)
73581 if (nlink >= 2) {
73582 char *old = find_link(major, minor, ino, mode, collected);
73583 if (old)
73584- return (sys_link(old, collected) < 0) ? -1 : 1;
73585+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
73586 }
73587 return 0;
73588 }
73589@@ -290,11 +290,11 @@ static void __init clean_path(char *path, umode_t mode)
73590 {
73591 struct stat st;
73592
73593- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
73594+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
73595 if (S_ISDIR(st.st_mode))
73596- sys_rmdir(path);
73597+ sys_rmdir((char __force_user *)path);
73598 else
73599- sys_unlink(path);
73600+ sys_unlink((char __force_user *)path);
73601 }
73602 }
73603
73604@@ -315,7 +315,7 @@ static int __init do_name(void)
73605 int openflags = O_WRONLY|O_CREAT;
73606 if (ml != 1)
73607 openflags |= O_TRUNC;
73608- wfd = sys_open(collected, openflags, mode);
73609+ wfd = sys_open((char __force_user *)collected, openflags, mode);
73610
73611 if (wfd >= 0) {
73612 sys_fchown(wfd, uid, gid);
73613@@ -327,17 +327,17 @@ static int __init do_name(void)
73614 }
73615 }
73616 } else if (S_ISDIR(mode)) {
73617- sys_mkdir(collected, mode);
73618- sys_chown(collected, uid, gid);
73619- sys_chmod(collected, mode);
73620+ sys_mkdir((char __force_user *)collected, mode);
73621+ sys_chown((char __force_user *)collected, uid, gid);
73622+ sys_chmod((char __force_user *)collected, mode);
73623 dir_add(collected, mtime);
73624 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
73625 S_ISFIFO(mode) || S_ISSOCK(mode)) {
73626 if (maybe_link() == 0) {
73627- sys_mknod(collected, mode, rdev);
73628- sys_chown(collected, uid, gid);
73629- sys_chmod(collected, mode);
73630- do_utime(collected, mtime);
73631+ sys_mknod((char __force_user *)collected, mode, rdev);
73632+ sys_chown((char __force_user *)collected, uid, gid);
73633+ sys_chmod((char __force_user *)collected, mode);
73634+ do_utime((char __force_user *)collected, mtime);
73635 }
73636 }
73637 return 0;
73638@@ -346,15 +346,15 @@ static int __init do_name(void)
73639 static int __init do_copy(void)
73640 {
73641 if (count >= body_len) {
73642- sys_write(wfd, victim, body_len);
73643+ sys_write(wfd, (char __force_user *)victim, body_len);
73644 sys_close(wfd);
73645- do_utime(vcollected, mtime);
73646+ do_utime((char __force_user *)vcollected, mtime);
73647 kfree(vcollected);
73648 eat(body_len);
73649 state = SkipIt;
73650 return 0;
73651 } else {
73652- sys_write(wfd, victim, count);
73653+ sys_write(wfd, (char __force_user *)victim, count);
73654 body_len -= count;
73655 eat(count);
73656 return 1;
73657@@ -365,9 +365,9 @@ static int __init do_symlink(void)
73658 {
73659 collected[N_ALIGN(name_len) + body_len] = '\0';
73660 clean_path(collected, 0);
73661- sys_symlink(collected + N_ALIGN(name_len), collected);
73662- sys_lchown(collected, uid, gid);
73663- do_utime(collected, mtime);
73664+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
73665+ sys_lchown((char __force_user *)collected, uid, gid);
73666+ do_utime((char __force_user *)collected, mtime);
73667 state = SkipIt;
73668 next_state = Reset;
73669 return 0;
73670diff --git a/init/main.c b/init/main.c
73671index cee4b5c..360e10a 100644
73672--- a/init/main.c
73673+++ b/init/main.c
73674@@ -96,6 +96,8 @@ static inline void mark_rodata_ro(void) { }
73675 extern void tc_init(void);
73676 #endif
73677
73678+extern void grsecurity_init(void);
73679+
73680 /*
73681 * Debug helper: via this flag we know that we are in 'early bootup code'
73682 * where only the boot processor is running with IRQ disabled. This means
73683@@ -149,6 +151,61 @@ static int __init set_reset_devices(char *str)
73684
73685 __setup("reset_devices", set_reset_devices);
73686
73687+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
73688+kgid_t grsec_proc_gid = KGIDT_INIT(CONFIG_GRKERNSEC_PROC_GID);
73689+static int __init setup_grsec_proc_gid(char *str)
73690+{
73691+ grsec_proc_gid = KGIDT_INIT(simple_strtol(str, NULL, 0));
73692+ return 1;
73693+}
73694+__setup("grsec_proc_gid=", setup_grsec_proc_gid);
73695+#endif
73696+
73697+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
73698+extern char pax_enter_kernel_user[];
73699+extern char pax_exit_kernel_user[];
73700+extern pgdval_t clone_pgd_mask;
73701+#endif
73702+
73703+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
73704+static int __init setup_pax_nouderef(char *str)
73705+{
73706+#ifdef CONFIG_X86_32
73707+ unsigned int cpu;
73708+ struct desc_struct *gdt;
73709+
73710+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
73711+ gdt = get_cpu_gdt_table(cpu);
73712+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
73713+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
73714+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
73715+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
73716+ }
73717+ loadsegment(ds, __KERNEL_DS);
73718+ loadsegment(es, __KERNEL_DS);
73719+ loadsegment(ss, __KERNEL_DS);
73720+#else
73721+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
73722+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
73723+ clone_pgd_mask = ~(pgdval_t)0UL;
73724+#endif
73725+
73726+ return 0;
73727+}
73728+early_param("pax_nouderef", setup_pax_nouderef);
73729+#endif
73730+
73731+#ifdef CONFIG_PAX_SOFTMODE
73732+int pax_softmode;
73733+
73734+static int __init setup_pax_softmode(char *str)
73735+{
73736+ get_option(&str, &pax_softmode);
73737+ return 1;
73738+}
73739+__setup("pax_softmode=", setup_pax_softmode);
73740+#endif
73741+
73742 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
73743 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
73744 static const char *panic_later, *panic_param;
73745@@ -681,6 +738,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
73746 {
73747 int count = preempt_count();
73748 int ret;
73749+ const char *msg1 = "", *msg2 = "";
73750
73751 if (initcall_debug)
73752 ret = do_one_initcall_debug(fn);
73753@@ -693,15 +751,15 @@ int __init_or_module do_one_initcall(initcall_t fn)
73754 sprintf(msgbuf, "error code %d ", ret);
73755
73756 if (preempt_count() != count) {
73757- strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
73758+ msg1 = " preemption imbalance";
73759 preempt_count() = count;
73760 }
73761 if (irqs_disabled()) {
73762- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
73763+ msg2 = " disabled interrupts";
73764 local_irq_enable();
73765 }
73766- if (msgbuf[0]) {
73767- printk("initcall %pF returned with %s\n", fn, msgbuf);
73768+ if (msgbuf[0] || *msg1 || *msg2) {
73769+ printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
73770 }
73771
73772 return ret;
73773@@ -755,8 +813,14 @@ static void __init do_initcall_level(int level)
73774 level, level,
73775 &repair_env_string);
73776
73777- for (fn = initcall_levels[level]; fn < initcall_levels[level+1]; fn++)
73778+ for (fn = initcall_levels[level]; fn < initcall_levels[level+1]; fn++) {
73779 do_one_initcall(*fn);
73780+
73781+#ifdef LATENT_ENTROPY_PLUGIN
73782+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
73783+#endif
73784+
73785+ }
73786 }
73787
73788 static void __init do_initcalls(void)
73789@@ -790,8 +854,14 @@ static void __init do_pre_smp_initcalls(void)
73790 {
73791 initcall_t *fn;
73792
73793- for (fn = __initcall_start; fn < __initcall0_start; fn++)
73794+ for (fn = __initcall_start; fn < __initcall0_start; fn++) {
73795 do_one_initcall(*fn);
73796+
73797+#ifdef LATENT_ENTROPY_PLUGIN
73798+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
73799+#endif
73800+
73801+ }
73802 }
73803
73804 static int run_init_process(const char *init_filename)
73805@@ -877,7 +947,7 @@ static noinline void __init kernel_init_freeable(void)
73806 do_basic_setup();
73807
73808 /* Open the /dev/console on the rootfs, this should never fail */
73809- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
73810+ if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
73811 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
73812
73813 (void) sys_dup(0);
73814@@ -890,11 +960,13 @@ static noinline void __init kernel_init_freeable(void)
73815 if (!ramdisk_execute_command)
73816 ramdisk_execute_command = "/init";
73817
73818- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
73819+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
73820 ramdisk_execute_command = NULL;
73821 prepare_namespace();
73822 }
73823
73824+ grsecurity_init();
73825+
73826 /*
73827 * Ok, we have completed the initial bootup, and
73828 * we're essentially up and running. Get rid of the
73829diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c
73830index 130dfec..cc88451 100644
73831--- a/ipc/ipc_sysctl.c
73832+++ b/ipc/ipc_sysctl.c
73833@@ -30,7 +30,7 @@ static void *get_ipc(ctl_table *table)
73834 static int proc_ipc_dointvec(ctl_table *table, int write,
73835 void __user *buffer, size_t *lenp, loff_t *ppos)
73836 {
73837- struct ctl_table ipc_table;
73838+ ctl_table_no_const ipc_table;
73839
73840 memcpy(&ipc_table, table, sizeof(ipc_table));
73841 ipc_table.data = get_ipc(table);
73842@@ -41,7 +41,7 @@ static int proc_ipc_dointvec(ctl_table *table, int write,
73843 static int proc_ipc_dointvec_minmax(ctl_table *table, int write,
73844 void __user *buffer, size_t *lenp, loff_t *ppos)
73845 {
73846- struct ctl_table ipc_table;
73847+ ctl_table_no_const ipc_table;
73848
73849 memcpy(&ipc_table, table, sizeof(ipc_table));
73850 ipc_table.data = get_ipc(table);
73851@@ -65,7 +65,7 @@ static int proc_ipc_dointvec_minmax_orphans(ctl_table *table, int write,
73852 static int proc_ipc_callback_dointvec(ctl_table *table, int write,
73853 void __user *buffer, size_t *lenp, loff_t *ppos)
73854 {
73855- struct ctl_table ipc_table;
73856+ ctl_table_no_const ipc_table;
73857 size_t lenp_bef = *lenp;
73858 int rc;
73859
73860@@ -88,7 +88,7 @@ static int proc_ipc_callback_dointvec(ctl_table *table, int write,
73861 static int proc_ipc_doulongvec_minmax(ctl_table *table, int write,
73862 void __user *buffer, size_t *lenp, loff_t *ppos)
73863 {
73864- struct ctl_table ipc_table;
73865+ ctl_table_no_const ipc_table;
73866 memcpy(&ipc_table, table, sizeof(ipc_table));
73867 ipc_table.data = get_ipc(table);
73868
73869@@ -122,7 +122,7 @@ static void ipc_auto_callback(int val)
73870 static int proc_ipcauto_dointvec_minmax(ctl_table *table, int write,
73871 void __user *buffer, size_t *lenp, loff_t *ppos)
73872 {
73873- struct ctl_table ipc_table;
73874+ ctl_table_no_const ipc_table;
73875 size_t lenp_bef = *lenp;
73876 int oldval;
73877 int rc;
73878diff --git a/ipc/mq_sysctl.c b/ipc/mq_sysctl.c
73879index 383d638..943fdbb 100644
73880--- a/ipc/mq_sysctl.c
73881+++ b/ipc/mq_sysctl.c
73882@@ -25,7 +25,7 @@ static void *get_mq(ctl_table *table)
73883 static int proc_mq_dointvec_minmax(ctl_table *table, int write,
73884 void __user *buffer, size_t *lenp, loff_t *ppos)
73885 {
73886- struct ctl_table mq_table;
73887+ ctl_table_no_const mq_table;
73888 memcpy(&mq_table, table, sizeof(mq_table));
73889 mq_table.data = get_mq(table);
73890
73891diff --git a/ipc/mqueue.c b/ipc/mqueue.c
73892index f3f40dc..ffe5a3a 100644
73893--- a/ipc/mqueue.c
73894+++ b/ipc/mqueue.c
73895@@ -278,6 +278,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
73896 mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
73897 info->attr.mq_msgsize);
73898
73899+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
73900 spin_lock(&mq_lock);
73901 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
73902 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
73903diff --git a/ipc/msg.c b/ipc/msg.c
73904index 31cd1bf..9778e0f8 100644
73905--- a/ipc/msg.c
73906+++ b/ipc/msg.c
73907@@ -309,18 +309,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
73908 return security_msg_queue_associate(msq, msgflg);
73909 }
73910
73911+static struct ipc_ops msg_ops = {
73912+ .getnew = newque,
73913+ .associate = msg_security,
73914+ .more_checks = NULL
73915+};
73916+
73917 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
73918 {
73919 struct ipc_namespace *ns;
73920- struct ipc_ops msg_ops;
73921 struct ipc_params msg_params;
73922
73923 ns = current->nsproxy->ipc_ns;
73924
73925- msg_ops.getnew = newque;
73926- msg_ops.associate = msg_security;
73927- msg_ops.more_checks = NULL;
73928-
73929 msg_params.key = key;
73930 msg_params.flg = msgflg;
73931
73932@@ -872,6 +873,7 @@ long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp,
73933 goto out_unlock;
73934 break;
73935 }
73936+ msg = ERR_PTR(-EAGAIN);
73937 } else
73938 break;
73939 msg_counter++;
73940diff --git a/ipc/sem.c b/ipc/sem.c
73941index 58d31f1..cce7a55 100644
73942--- a/ipc/sem.c
73943+++ b/ipc/sem.c
73944@@ -364,10 +364,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
73945 return 0;
73946 }
73947
73948+static struct ipc_ops sem_ops = {
73949+ .getnew = newary,
73950+ .associate = sem_security,
73951+ .more_checks = sem_more_checks
73952+};
73953+
73954 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
73955 {
73956 struct ipc_namespace *ns;
73957- struct ipc_ops sem_ops;
73958 struct ipc_params sem_params;
73959
73960 ns = current->nsproxy->ipc_ns;
73961@@ -375,10 +380,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
73962 if (nsems < 0 || nsems > ns->sc_semmsl)
73963 return -EINVAL;
73964
73965- sem_ops.getnew = newary;
73966- sem_ops.associate = sem_security;
73967- sem_ops.more_checks = sem_more_checks;
73968-
73969 sem_params.key = key;
73970 sem_params.flg = semflg;
73971 sem_params.u.nsems = nsems;
73972diff --git a/ipc/shm.c b/ipc/shm.c
73973index 4fa6d8f..55cff14 100644
73974--- a/ipc/shm.c
73975+++ b/ipc/shm.c
73976@@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
73977 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
73978 #endif
73979
73980+#ifdef CONFIG_GRKERNSEC
73981+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
73982+ const time_t shm_createtime, const kuid_t cuid,
73983+ const int shmid);
73984+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
73985+ const time_t shm_createtime);
73986+#endif
73987+
73988 void shm_init_ns(struct ipc_namespace *ns)
73989 {
73990 ns->shm_ctlmax = SHMMAX;
73991@@ -521,6 +529,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
73992 shp->shm_lprid = 0;
73993 shp->shm_atim = shp->shm_dtim = 0;
73994 shp->shm_ctim = get_seconds();
73995+#ifdef CONFIG_GRKERNSEC
73996+ {
73997+ struct timespec timeval;
73998+ do_posix_clock_monotonic_gettime(&timeval);
73999+
74000+ shp->shm_createtime = timeval.tv_sec;
74001+ }
74002+#endif
74003 shp->shm_segsz = size;
74004 shp->shm_nattch = 0;
74005 shp->shm_file = file;
74006@@ -572,18 +588,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
74007 return 0;
74008 }
74009
74010+static struct ipc_ops shm_ops = {
74011+ .getnew = newseg,
74012+ .associate = shm_security,
74013+ .more_checks = shm_more_checks
74014+};
74015+
74016 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
74017 {
74018 struct ipc_namespace *ns;
74019- struct ipc_ops shm_ops;
74020 struct ipc_params shm_params;
74021
74022 ns = current->nsproxy->ipc_ns;
74023
74024- shm_ops.getnew = newseg;
74025- shm_ops.associate = shm_security;
74026- shm_ops.more_checks = shm_more_checks;
74027-
74028 shm_params.key = key;
74029 shm_params.flg = shmflg;
74030 shm_params.u.size = size;
74031@@ -1004,6 +1021,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
74032 f_mode = FMODE_READ | FMODE_WRITE;
74033 }
74034 if (shmflg & SHM_EXEC) {
74035+
74036+#ifdef CONFIG_PAX_MPROTECT
74037+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
74038+ goto out;
74039+#endif
74040+
74041 prot |= PROT_EXEC;
74042 acc_mode |= S_IXUGO;
74043 }
74044@@ -1027,9 +1050,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
74045 if (err)
74046 goto out_unlock;
74047
74048+#ifdef CONFIG_GRKERNSEC
74049+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
74050+ shp->shm_perm.cuid, shmid) ||
74051+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
74052+ err = -EACCES;
74053+ goto out_unlock;
74054+ }
74055+#endif
74056+
74057 path = shp->shm_file->f_path;
74058 path_get(&path);
74059 shp->shm_nattch++;
74060+#ifdef CONFIG_GRKERNSEC
74061+ shp->shm_lapid = current->pid;
74062+#endif
74063 size = i_size_read(path.dentry->d_inode);
74064 shm_unlock(shp);
74065
74066diff --git a/kernel/acct.c b/kernel/acct.c
74067index 051e071..15e0920 100644
74068--- a/kernel/acct.c
74069+++ b/kernel/acct.c
74070@@ -550,7 +550,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
74071 */
74072 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
74073 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
74074- file->f_op->write(file, (char *)&ac,
74075+ file->f_op->write(file, (char __force_user *)&ac,
74076 sizeof(acct_t), &file->f_pos);
74077 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
74078 set_fs(fs);
74079diff --git a/kernel/audit.c b/kernel/audit.c
74080index d596e53..dbef3c3 100644
74081--- a/kernel/audit.c
74082+++ b/kernel/audit.c
74083@@ -116,7 +116,7 @@ u32 audit_sig_sid = 0;
74084 3) suppressed due to audit_rate_limit
74085 4) suppressed due to audit_backlog_limit
74086 */
74087-static atomic_t audit_lost = ATOMIC_INIT(0);
74088+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
74089
74090 /* The netlink socket. */
74091 static struct sock *audit_sock;
74092@@ -238,7 +238,7 @@ void audit_log_lost(const char *message)
74093 unsigned long now;
74094 int print;
74095
74096- atomic_inc(&audit_lost);
74097+ atomic_inc_unchecked(&audit_lost);
74098
74099 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
74100
74101@@ -257,7 +257,7 @@ void audit_log_lost(const char *message)
74102 printk(KERN_WARNING
74103 "audit: audit_lost=%d audit_rate_limit=%d "
74104 "audit_backlog_limit=%d\n",
74105- atomic_read(&audit_lost),
74106+ atomic_read_unchecked(&audit_lost),
74107 audit_rate_limit,
74108 audit_backlog_limit);
74109 audit_panic(message);
74110@@ -681,7 +681,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
74111 status_set.pid = audit_pid;
74112 status_set.rate_limit = audit_rate_limit;
74113 status_set.backlog_limit = audit_backlog_limit;
74114- status_set.lost = atomic_read(&audit_lost);
74115+ status_set.lost = atomic_read_unchecked(&audit_lost);
74116 status_set.backlog = skb_queue_len(&audit_skb_queue);
74117 audit_send_reply(NETLINK_CB(skb).portid, seq, AUDIT_GET, 0, 0,
74118 &status_set, sizeof(status_set));
74119diff --git a/kernel/auditsc.c b/kernel/auditsc.c
74120index a371f85..da826c1 100644
74121--- a/kernel/auditsc.c
74122+++ b/kernel/auditsc.c
74123@@ -2292,7 +2292,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
74124 }
74125
74126 /* global counter which is incremented every time something logs in */
74127-static atomic_t session_id = ATOMIC_INIT(0);
74128+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
74129
74130 /**
74131 * audit_set_loginuid - set current task's audit_context loginuid
74132@@ -2316,7 +2316,7 @@ int audit_set_loginuid(kuid_t loginuid)
74133 return -EPERM;
74134 #endif /* CONFIG_AUDIT_LOGINUID_IMMUTABLE */
74135
74136- sessionid = atomic_inc_return(&session_id);
74137+ sessionid = atomic_inc_return_unchecked(&session_id);
74138 if (context && context->in_syscall) {
74139 struct audit_buffer *ab;
74140
74141diff --git a/kernel/capability.c b/kernel/capability.c
74142index 493d972..f87dfbd 100644
74143--- a/kernel/capability.c
74144+++ b/kernel/capability.c
74145@@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
74146 * before modification is attempted and the application
74147 * fails.
74148 */
74149+ if (tocopy > ARRAY_SIZE(kdata))
74150+ return -EFAULT;
74151+
74152 if (copy_to_user(dataptr, kdata, tocopy
74153 * sizeof(struct __user_cap_data_struct))) {
74154 return -EFAULT;
74155@@ -303,10 +306,11 @@ bool has_ns_capability(struct task_struct *t,
74156 int ret;
74157
74158 rcu_read_lock();
74159- ret = security_capable(__task_cred(t), ns, cap);
74160+ ret = security_capable(__task_cred(t), ns, cap) == 0 &&
74161+ gr_task_is_capable(t, __task_cred(t), cap);
74162 rcu_read_unlock();
74163
74164- return (ret == 0);
74165+ return ret;
74166 }
74167
74168 /**
74169@@ -343,10 +347,10 @@ bool has_ns_capability_noaudit(struct task_struct *t,
74170 int ret;
74171
74172 rcu_read_lock();
74173- ret = security_capable_noaudit(__task_cred(t), ns, cap);
74174+ ret = security_capable_noaudit(__task_cred(t), ns, cap) == 0 && gr_task_is_capable_nolog(t, cap);
74175 rcu_read_unlock();
74176
74177- return (ret == 0);
74178+ return ret;
74179 }
74180
74181 /**
74182@@ -384,7 +388,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
74183 BUG();
74184 }
74185
74186- if (security_capable(current_cred(), ns, cap) == 0) {
74187+ if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable(cap)) {
74188 current->flags |= PF_SUPERPRIV;
74189 return true;
74190 }
74191@@ -392,6 +396,21 @@ bool ns_capable(struct user_namespace *ns, int cap)
74192 }
74193 EXPORT_SYMBOL(ns_capable);
74194
74195+bool ns_capable_nolog(struct user_namespace *ns, int cap)
74196+{
74197+ if (unlikely(!cap_valid(cap))) {
74198+ printk(KERN_CRIT "capable_nolog() called with invalid cap=%u\n", cap);
74199+ BUG();
74200+ }
74201+
74202+ if (security_capable_noaudit(current_cred(), ns, cap) == 0 && gr_is_capable_nolog(cap)) {
74203+ current->flags |= PF_SUPERPRIV;
74204+ return true;
74205+ }
74206+ return false;
74207+}
74208+EXPORT_SYMBOL(ns_capable_nolog);
74209+
74210 /**
74211 * capable - Determine if the current task has a superior capability in effect
74212 * @cap: The capability to be tested for
74213@@ -408,6 +427,12 @@ bool capable(int cap)
74214 }
74215 EXPORT_SYMBOL(capable);
74216
74217+bool capable_nolog(int cap)
74218+{
74219+ return ns_capable_nolog(&init_user_ns, cap);
74220+}
74221+EXPORT_SYMBOL(capable_nolog);
74222+
74223 /**
74224 * nsown_capable - Check superior capability to one's own user_ns
74225 * @cap: The capability in question
74226@@ -440,3 +465,10 @@ bool inode_capable(const struct inode *inode, int cap)
74227
74228 return ns_capable(ns, cap) && kuid_has_mapping(ns, inode->i_uid);
74229 }
74230+
74231+bool inode_capable_nolog(const struct inode *inode, int cap)
74232+{
74233+ struct user_namespace *ns = current_user_ns();
74234+
74235+ return ns_capable_nolog(ns, cap) && kuid_has_mapping(ns, inode->i_uid);
74236+}
74237diff --git a/kernel/cgroup.c b/kernel/cgroup.c
74238index 1e23664..570a83d 100644
74239--- a/kernel/cgroup.c
74240+++ b/kernel/cgroup.c
74241@@ -5543,7 +5543,7 @@ static int cgroup_css_links_read(struct cgroup *cont,
74242 struct css_set *cg = link->cg;
74243 struct task_struct *task;
74244 int count = 0;
74245- seq_printf(seq, "css_set %p\n", cg);
74246+ seq_printf(seq, "css_set %pK\n", cg);
74247 list_for_each_entry(task, &cg->tasks, cg_list) {
74248 if (count++ > MAX_TASKS_SHOWN_PER_CSS) {
74249 seq_puts(seq, " ...\n");
74250diff --git a/kernel/compat.c b/kernel/compat.c
74251index 36700e9..73d770c 100644
74252--- a/kernel/compat.c
74253+++ b/kernel/compat.c
74254@@ -13,6 +13,7 @@
74255
74256 #include <linux/linkage.h>
74257 #include <linux/compat.h>
74258+#include <linux/module.h>
74259 #include <linux/errno.h>
74260 #include <linux/time.h>
74261 #include <linux/signal.h>
74262@@ -220,7 +221,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
74263 mm_segment_t oldfs;
74264 long ret;
74265
74266- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
74267+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
74268 oldfs = get_fs();
74269 set_fs(KERNEL_DS);
74270 ret = hrtimer_nanosleep_restart(restart);
74271@@ -252,7 +253,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
74272 oldfs = get_fs();
74273 set_fs(KERNEL_DS);
74274 ret = hrtimer_nanosleep(&tu,
74275- rmtp ? (struct timespec __user *)&rmt : NULL,
74276+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
74277 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
74278 set_fs(oldfs);
74279
74280@@ -361,7 +362,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
74281 mm_segment_t old_fs = get_fs();
74282
74283 set_fs(KERNEL_DS);
74284- ret = sys_sigpending((old_sigset_t __user *) &s);
74285+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
74286 set_fs(old_fs);
74287 if (ret == 0)
74288 ret = put_user(s, set);
74289@@ -451,7 +452,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
74290 mm_segment_t old_fs = get_fs();
74291
74292 set_fs(KERNEL_DS);
74293- ret = sys_old_getrlimit(resource, &r);
74294+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
74295 set_fs(old_fs);
74296
74297 if (!ret) {
74298@@ -523,7 +524,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
74299 mm_segment_t old_fs = get_fs();
74300
74301 set_fs(KERNEL_DS);
74302- ret = sys_getrusage(who, (struct rusage __user *) &r);
74303+ ret = sys_getrusage(who, (struct rusage __force_user *) &r);
74304 set_fs(old_fs);
74305
74306 if (ret)
74307@@ -552,8 +553,8 @@ COMPAT_SYSCALL_DEFINE4(wait4,
74308 set_fs (KERNEL_DS);
74309 ret = sys_wait4(pid,
74310 (stat_addr ?
74311- (unsigned int __user *) &status : NULL),
74312- options, (struct rusage __user *) &r);
74313+ (unsigned int __force_user *) &status : NULL),
74314+ options, (struct rusage __force_user *) &r);
74315 set_fs (old_fs);
74316
74317 if (ret > 0) {
74318@@ -579,8 +580,8 @@ COMPAT_SYSCALL_DEFINE5(waitid,
74319 memset(&info, 0, sizeof(info));
74320
74321 set_fs(KERNEL_DS);
74322- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
74323- uru ? (struct rusage __user *)&ru : NULL);
74324+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
74325+ uru ? (struct rusage __force_user *)&ru : NULL);
74326 set_fs(old_fs);
74327
74328 if ((ret < 0) || (info.si_signo == 0))
74329@@ -714,8 +715,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
74330 oldfs = get_fs();
74331 set_fs(KERNEL_DS);
74332 err = sys_timer_settime(timer_id, flags,
74333- (struct itimerspec __user *) &newts,
74334- (struct itimerspec __user *) &oldts);
74335+ (struct itimerspec __force_user *) &newts,
74336+ (struct itimerspec __force_user *) &oldts);
74337 set_fs(oldfs);
74338 if (!err && old && put_compat_itimerspec(old, &oldts))
74339 return -EFAULT;
74340@@ -732,7 +733,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
74341 oldfs = get_fs();
74342 set_fs(KERNEL_DS);
74343 err = sys_timer_gettime(timer_id,
74344- (struct itimerspec __user *) &ts);
74345+ (struct itimerspec __force_user *) &ts);
74346 set_fs(oldfs);
74347 if (!err && put_compat_itimerspec(setting, &ts))
74348 return -EFAULT;
74349@@ -751,7 +752,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
74350 oldfs = get_fs();
74351 set_fs(KERNEL_DS);
74352 err = sys_clock_settime(which_clock,
74353- (struct timespec __user *) &ts);
74354+ (struct timespec __force_user *) &ts);
74355 set_fs(oldfs);
74356 return err;
74357 }
74358@@ -766,7 +767,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
74359 oldfs = get_fs();
74360 set_fs(KERNEL_DS);
74361 err = sys_clock_gettime(which_clock,
74362- (struct timespec __user *) &ts);
74363+ (struct timespec __force_user *) &ts);
74364 set_fs(oldfs);
74365 if (!err && put_compat_timespec(&ts, tp))
74366 return -EFAULT;
74367@@ -786,7 +787,7 @@ long compat_sys_clock_adjtime(clockid_t which_clock,
74368
74369 oldfs = get_fs();
74370 set_fs(KERNEL_DS);
74371- ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
74372+ ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
74373 set_fs(oldfs);
74374
74375 err = compat_put_timex(utp, &txc);
74376@@ -806,7 +807,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
74377 oldfs = get_fs();
74378 set_fs(KERNEL_DS);
74379 err = sys_clock_getres(which_clock,
74380- (struct timespec __user *) &ts);
74381+ (struct timespec __force_user *) &ts);
74382 set_fs(oldfs);
74383 if (!err && tp && put_compat_timespec(&ts, tp))
74384 return -EFAULT;
74385@@ -818,9 +819,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
74386 long err;
74387 mm_segment_t oldfs;
74388 struct timespec tu;
74389- struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
74390+ struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
74391
74392- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
74393+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
74394 oldfs = get_fs();
74395 set_fs(KERNEL_DS);
74396 err = clock_nanosleep_restart(restart);
74397@@ -852,8 +853,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
74398 oldfs = get_fs();
74399 set_fs(KERNEL_DS);
74400 err = sys_clock_nanosleep(which_clock, flags,
74401- (struct timespec __user *) &in,
74402- (struct timespec __user *) &out);
74403+ (struct timespec __force_user *) &in,
74404+ (struct timespec __force_user *) &out);
74405 set_fs(oldfs);
74406
74407 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
74408diff --git a/kernel/configs.c b/kernel/configs.c
74409index 42e8fa0..9e7406b 100644
74410--- a/kernel/configs.c
74411+++ b/kernel/configs.c
74412@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
74413 struct proc_dir_entry *entry;
74414
74415 /* create the current config file */
74416+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
74417+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
74418+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
74419+ &ikconfig_file_ops);
74420+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
74421+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
74422+ &ikconfig_file_ops);
74423+#endif
74424+#else
74425 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
74426 &ikconfig_file_ops);
74427+#endif
74428+
74429 if (!entry)
74430 return -ENOMEM;
74431
74432diff --git a/kernel/cred.c b/kernel/cred.c
74433index e0573a4..3874e41 100644
74434--- a/kernel/cred.c
74435+++ b/kernel/cred.c
74436@@ -164,6 +164,16 @@ void exit_creds(struct task_struct *tsk)
74437 validate_creds(cred);
74438 alter_cred_subscribers(cred, -1);
74439 put_cred(cred);
74440+
74441+#ifdef CONFIG_GRKERNSEC_SETXID
74442+ cred = (struct cred *) tsk->delayed_cred;
74443+ if (cred != NULL) {
74444+ tsk->delayed_cred = NULL;
74445+ validate_creds(cred);
74446+ alter_cred_subscribers(cred, -1);
74447+ put_cred(cred);
74448+ }
74449+#endif
74450 }
74451
74452 /**
74453@@ -411,7 +421,7 @@ static bool cred_cap_issubset(const struct cred *set, const struct cred *subset)
74454 * Always returns 0 thus allowing this function to be tail-called at the end
74455 * of, say, sys_setgid().
74456 */
74457-int commit_creds(struct cred *new)
74458+static int __commit_creds(struct cred *new)
74459 {
74460 struct task_struct *task = current;
74461 const struct cred *old = task->real_cred;
74462@@ -430,6 +440,8 @@ int commit_creds(struct cred *new)
74463
74464 get_cred(new); /* we will require a ref for the subj creds too */
74465
74466+ gr_set_role_label(task, new->uid, new->gid);
74467+
74468 /* dumpability changes */
74469 if (!uid_eq(old->euid, new->euid) ||
74470 !gid_eq(old->egid, new->egid) ||
74471@@ -479,6 +491,102 @@ int commit_creds(struct cred *new)
74472 put_cred(old);
74473 return 0;
74474 }
74475+#ifdef CONFIG_GRKERNSEC_SETXID
74476+extern int set_user(struct cred *new);
74477+
74478+void gr_delayed_cred_worker(void)
74479+{
74480+ const struct cred *new = current->delayed_cred;
74481+ struct cred *ncred;
74482+
74483+ current->delayed_cred = NULL;
74484+
74485+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID) && new != NULL) {
74486+ // from doing get_cred on it when queueing this
74487+ put_cred(new);
74488+ return;
74489+ } else if (new == NULL)
74490+ return;
74491+
74492+ ncred = prepare_creds();
74493+ if (!ncred)
74494+ goto die;
74495+ // uids
74496+ ncred->uid = new->uid;
74497+ ncred->euid = new->euid;
74498+ ncred->suid = new->suid;
74499+ ncred->fsuid = new->fsuid;
74500+ // gids
74501+ ncred->gid = new->gid;
74502+ ncred->egid = new->egid;
74503+ ncred->sgid = new->sgid;
74504+ ncred->fsgid = new->fsgid;
74505+ // groups
74506+ if (set_groups(ncred, new->group_info) < 0) {
74507+ abort_creds(ncred);
74508+ goto die;
74509+ }
74510+ // caps
74511+ ncred->securebits = new->securebits;
74512+ ncred->cap_inheritable = new->cap_inheritable;
74513+ ncred->cap_permitted = new->cap_permitted;
74514+ ncred->cap_effective = new->cap_effective;
74515+ ncred->cap_bset = new->cap_bset;
74516+
74517+ if (set_user(ncred)) {
74518+ abort_creds(ncred);
74519+ goto die;
74520+ }
74521+
74522+ // from doing get_cred on it when queueing this
74523+ put_cred(new);
74524+
74525+ __commit_creds(ncred);
74526+ return;
74527+die:
74528+ // from doing get_cred on it when queueing this
74529+ put_cred(new);
74530+ do_group_exit(SIGKILL);
74531+}
74532+#endif
74533+
74534+int commit_creds(struct cred *new)
74535+{
74536+#ifdef CONFIG_GRKERNSEC_SETXID
74537+ int ret;
74538+ int schedule_it = 0;
74539+ struct task_struct *t;
74540+
74541+ /* we won't get called with tasklist_lock held for writing
74542+ and interrupts disabled as the cred struct in that case is
74543+ init_cred
74544+ */
74545+ if (grsec_enable_setxid && !current_is_single_threaded() &&
74546+ uid_eq(current_uid(), GLOBAL_ROOT_UID) &&
74547+ !uid_eq(new->uid, GLOBAL_ROOT_UID)) {
74548+ schedule_it = 1;
74549+ }
74550+ ret = __commit_creds(new);
74551+ if (schedule_it) {
74552+ rcu_read_lock();
74553+ read_lock(&tasklist_lock);
74554+ for (t = next_thread(current); t != current;
74555+ t = next_thread(t)) {
74556+ if (t->delayed_cred == NULL) {
74557+ t->delayed_cred = get_cred(new);
74558+ set_tsk_thread_flag(t, TIF_GRSEC_SETXID);
74559+ set_tsk_need_resched(t);
74560+ }
74561+ }
74562+ read_unlock(&tasklist_lock);
74563+ rcu_read_unlock();
74564+ }
74565+ return ret;
74566+#else
74567+ return __commit_creds(new);
74568+#endif
74569+}
74570+
74571 EXPORT_SYMBOL(commit_creds);
74572
74573 /**
74574diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
74575index 9a61738..c5c8f3a 100644
74576--- a/kernel/debug/debug_core.c
74577+++ b/kernel/debug/debug_core.c
74578@@ -122,7 +122,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
74579 */
74580 static atomic_t masters_in_kgdb;
74581 static atomic_t slaves_in_kgdb;
74582-static atomic_t kgdb_break_tasklet_var;
74583+static atomic_unchecked_t kgdb_break_tasklet_var;
74584 atomic_t kgdb_setting_breakpoint;
74585
74586 struct task_struct *kgdb_usethread;
74587@@ -132,7 +132,7 @@ int kgdb_single_step;
74588 static pid_t kgdb_sstep_pid;
74589
74590 /* to keep track of the CPU which is doing the single stepping*/
74591-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
74592+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
74593
74594 /*
74595 * If you are debugging a problem where roundup (the collection of
74596@@ -540,7 +540,7 @@ return_normal:
74597 * kernel will only try for the value of sstep_tries before
74598 * giving up and continuing on.
74599 */
74600- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
74601+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
74602 (kgdb_info[cpu].task &&
74603 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
74604 atomic_set(&kgdb_active, -1);
74605@@ -634,8 +634,8 @@ cpu_master_loop:
74606 }
74607
74608 kgdb_restore:
74609- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
74610- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
74611+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
74612+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
74613 if (kgdb_info[sstep_cpu].task)
74614 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
74615 else
74616@@ -887,18 +887,18 @@ static void kgdb_unregister_callbacks(void)
74617 static void kgdb_tasklet_bpt(unsigned long ing)
74618 {
74619 kgdb_breakpoint();
74620- atomic_set(&kgdb_break_tasklet_var, 0);
74621+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
74622 }
74623
74624 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
74625
74626 void kgdb_schedule_breakpoint(void)
74627 {
74628- if (atomic_read(&kgdb_break_tasklet_var) ||
74629+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
74630 atomic_read(&kgdb_active) != -1 ||
74631 atomic_read(&kgdb_setting_breakpoint))
74632 return;
74633- atomic_inc(&kgdb_break_tasklet_var);
74634+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
74635 tasklet_schedule(&kgdb_tasklet_breakpoint);
74636 }
74637 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
74638diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
74639index 8875254..7cf4928 100644
74640--- a/kernel/debug/kdb/kdb_main.c
74641+++ b/kernel/debug/kdb/kdb_main.c
74642@@ -1974,7 +1974,7 @@ static int kdb_lsmod(int argc, const char **argv)
74643 continue;
74644
74645 kdb_printf("%-20s%8u 0x%p ", mod->name,
74646- mod->core_size, (void *)mod);
74647+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
74648 #ifdef CONFIG_MODULE_UNLOAD
74649 kdb_printf("%4ld ", module_refcount(mod));
74650 #endif
74651@@ -1984,7 +1984,7 @@ static int kdb_lsmod(int argc, const char **argv)
74652 kdb_printf(" (Loading)");
74653 else
74654 kdb_printf(" (Live)");
74655- kdb_printf(" 0x%p", mod->module_core);
74656+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
74657
74658 #ifdef CONFIG_MODULE_UNLOAD
74659 {
74660diff --git a/kernel/events/core.c b/kernel/events/core.c
74661index 7b6646a..3cb1135 100644
74662--- a/kernel/events/core.c
74663+++ b/kernel/events/core.c
74664@@ -182,7 +182,7 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
74665 return 0;
74666 }
74667
74668-static atomic64_t perf_event_id;
74669+static atomic64_unchecked_t perf_event_id;
74670
74671 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
74672 enum event_type_t event_type);
74673@@ -2677,7 +2677,7 @@ static void __perf_event_read(void *info)
74674
74675 static inline u64 perf_event_count(struct perf_event *event)
74676 {
74677- return local64_read(&event->count) + atomic64_read(&event->child_count);
74678+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
74679 }
74680
74681 static u64 perf_event_read(struct perf_event *event)
74682@@ -3007,9 +3007,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
74683 mutex_lock(&event->child_mutex);
74684 total += perf_event_read(event);
74685 *enabled += event->total_time_enabled +
74686- atomic64_read(&event->child_total_time_enabled);
74687+ atomic64_read_unchecked(&event->child_total_time_enabled);
74688 *running += event->total_time_running +
74689- atomic64_read(&event->child_total_time_running);
74690+ atomic64_read_unchecked(&event->child_total_time_running);
74691
74692 list_for_each_entry(child, &event->child_list, child_list) {
74693 total += perf_event_read(child);
74694@@ -3412,10 +3412,10 @@ void perf_event_update_userpage(struct perf_event *event)
74695 userpg->offset -= local64_read(&event->hw.prev_count);
74696
74697 userpg->time_enabled = enabled +
74698- atomic64_read(&event->child_total_time_enabled);
74699+ atomic64_read_unchecked(&event->child_total_time_enabled);
74700
74701 userpg->time_running = running +
74702- atomic64_read(&event->child_total_time_running);
74703+ atomic64_read_unchecked(&event->child_total_time_running);
74704
74705 arch_perf_update_userpage(userpg, now);
74706
74707@@ -3974,11 +3974,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
74708 values[n++] = perf_event_count(event);
74709 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
74710 values[n++] = enabled +
74711- atomic64_read(&event->child_total_time_enabled);
74712+ atomic64_read_unchecked(&event->child_total_time_enabled);
74713 }
74714 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
74715 values[n++] = running +
74716- atomic64_read(&event->child_total_time_running);
74717+ atomic64_read_unchecked(&event->child_total_time_running);
74718 }
74719 if (read_format & PERF_FORMAT_ID)
74720 values[n++] = primary_event_id(event);
74721@@ -4721,12 +4721,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
74722 * need to add enough zero bytes after the string to handle
74723 * the 64bit alignment we do later.
74724 */
74725- buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
74726+ buf = kzalloc(PATH_MAX, GFP_KERNEL);
74727 if (!buf) {
74728 name = strncpy(tmp, "//enomem", sizeof(tmp));
74729 goto got_name;
74730 }
74731- name = d_path(&file->f_path, buf, PATH_MAX);
74732+ name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
74733 if (IS_ERR(name)) {
74734 name = strncpy(tmp, "//toolong", sizeof(tmp));
74735 goto got_name;
74736@@ -6165,7 +6165,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
74737 event->parent = parent_event;
74738
74739 event->ns = get_pid_ns(task_active_pid_ns(current));
74740- event->id = atomic64_inc_return(&perf_event_id);
74741+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
74742
74743 event->state = PERF_EVENT_STATE_INACTIVE;
74744
74745@@ -6790,10 +6790,10 @@ static void sync_child_event(struct perf_event *child_event,
74746 /*
74747 * Add back the child's count to the parent's count:
74748 */
74749- atomic64_add(child_val, &parent_event->child_count);
74750- atomic64_add(child_event->total_time_enabled,
74751+ atomic64_add_unchecked(child_val, &parent_event->child_count);
74752+ atomic64_add_unchecked(child_event->total_time_enabled,
74753 &parent_event->child_total_time_enabled);
74754- atomic64_add(child_event->total_time_running,
74755+ atomic64_add_unchecked(child_event->total_time_running,
74756 &parent_event->child_total_time_running);
74757
74758 /*
74759diff --git a/kernel/exit.c b/kernel/exit.c
74760index b4df219..f13c02d 100644
74761--- a/kernel/exit.c
74762+++ b/kernel/exit.c
74763@@ -170,6 +170,10 @@ void release_task(struct task_struct * p)
74764 struct task_struct *leader;
74765 int zap_leader;
74766 repeat:
74767+#ifdef CONFIG_NET
74768+ gr_del_task_from_ip_table(p);
74769+#endif
74770+
74771 /* don't need to get the RCU readlock here - the process is dead and
74772 * can't be modifying its own credentials. But shut RCU-lockdep up */
74773 rcu_read_lock();
74774@@ -338,7 +342,7 @@ int allow_signal(int sig)
74775 * know it'll be handled, so that they don't get converted to
74776 * SIGKILL or just silently dropped.
74777 */
74778- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
74779+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
74780 recalc_sigpending();
74781 spin_unlock_irq(&current->sighand->siglock);
74782 return 0;
74783@@ -708,6 +712,8 @@ void do_exit(long code)
74784 struct task_struct *tsk = current;
74785 int group_dead;
74786
74787+ set_fs(USER_DS);
74788+
74789 profile_task_exit(tsk);
74790
74791 WARN_ON(blk_needs_flush_plug(tsk));
74792@@ -724,7 +730,6 @@ void do_exit(long code)
74793 * mm_release()->clear_child_tid() from writing to a user-controlled
74794 * kernel address.
74795 */
74796- set_fs(USER_DS);
74797
74798 ptrace_event(PTRACE_EVENT_EXIT, code);
74799
74800@@ -783,6 +788,9 @@ void do_exit(long code)
74801 tsk->exit_code = code;
74802 taskstats_exit(tsk, group_dead);
74803
74804+ gr_acl_handle_psacct(tsk, code);
74805+ gr_acl_handle_exit();
74806+
74807 exit_mm(tsk);
74808
74809 if (group_dead)
74810@@ -903,7 +911,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
74811 * Take down every thread in the group. This is called by fatal signals
74812 * as well as by sys_exit_group (below).
74813 */
74814-void
74815+__noreturn void
74816 do_group_exit(int exit_code)
74817 {
74818 struct signal_struct *sig = current->signal;
74819diff --git a/kernel/fork.c b/kernel/fork.c
74820index 5630e52..0cee608 100644
74821--- a/kernel/fork.c
74822+++ b/kernel/fork.c
74823@@ -318,7 +318,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
74824 *stackend = STACK_END_MAGIC; /* for overflow detection */
74825
74826 #ifdef CONFIG_CC_STACKPROTECTOR
74827- tsk->stack_canary = get_random_int();
74828+ tsk->stack_canary = pax_get_random_long();
74829 #endif
74830
74831 /*
74832@@ -344,13 +344,81 @@ free_tsk:
74833 }
74834
74835 #ifdef CONFIG_MMU
74836+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct mm_struct *oldmm, struct vm_area_struct *mpnt)
74837+{
74838+ struct vm_area_struct *tmp;
74839+ unsigned long charge;
74840+ struct mempolicy *pol;
74841+ struct file *file;
74842+
74843+ charge = 0;
74844+ if (mpnt->vm_flags & VM_ACCOUNT) {
74845+ unsigned long len = vma_pages(mpnt);
74846+
74847+ if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
74848+ goto fail_nomem;
74849+ charge = len;
74850+ }
74851+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
74852+ if (!tmp)
74853+ goto fail_nomem;
74854+ *tmp = *mpnt;
74855+ tmp->vm_mm = mm;
74856+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
74857+ pol = mpol_dup(vma_policy(mpnt));
74858+ if (IS_ERR(pol))
74859+ goto fail_nomem_policy;
74860+ vma_set_policy(tmp, pol);
74861+ if (anon_vma_fork(tmp, mpnt))
74862+ goto fail_nomem_anon_vma_fork;
74863+ tmp->vm_flags &= ~VM_LOCKED;
74864+ tmp->vm_next = tmp->vm_prev = NULL;
74865+ tmp->vm_mirror = NULL;
74866+ file = tmp->vm_file;
74867+ if (file) {
74868+ struct inode *inode = file->f_path.dentry->d_inode;
74869+ struct address_space *mapping = file->f_mapping;
74870+
74871+ get_file(file);
74872+ if (tmp->vm_flags & VM_DENYWRITE)
74873+ atomic_dec(&inode->i_writecount);
74874+ mutex_lock(&mapping->i_mmap_mutex);
74875+ if (tmp->vm_flags & VM_SHARED)
74876+ mapping->i_mmap_writable++;
74877+ flush_dcache_mmap_lock(mapping);
74878+ /* insert tmp into the share list, just after mpnt */
74879+ if (unlikely(tmp->vm_flags & VM_NONLINEAR))
74880+ vma_nonlinear_insert(tmp, &mapping->i_mmap_nonlinear);
74881+ else
74882+ vma_interval_tree_insert_after(tmp, mpnt, &mapping->i_mmap);
74883+ flush_dcache_mmap_unlock(mapping);
74884+ mutex_unlock(&mapping->i_mmap_mutex);
74885+ }
74886+
74887+ /*
74888+ * Clear hugetlb-related page reserves for children. This only
74889+ * affects MAP_PRIVATE mappings. Faults generated by the child
74890+ * are not guaranteed to succeed, even if read-only
74891+ */
74892+ if (is_vm_hugetlb_page(tmp))
74893+ reset_vma_resv_huge_pages(tmp);
74894+
74895+ return tmp;
74896+
74897+fail_nomem_anon_vma_fork:
74898+ mpol_put(pol);
74899+fail_nomem_policy:
74900+ kmem_cache_free(vm_area_cachep, tmp);
74901+fail_nomem:
74902+ vm_unacct_memory(charge);
74903+ return NULL;
74904+}
74905+
74906 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
74907 {
74908 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
74909 struct rb_node **rb_link, *rb_parent;
74910 int retval;
74911- unsigned long charge;
74912- struct mempolicy *pol;
74913
74914 uprobe_start_dup_mmap();
74915 down_write(&oldmm->mmap_sem);
74916@@ -364,8 +432,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
74917 mm->locked_vm = 0;
74918 mm->mmap = NULL;
74919 mm->mmap_cache = NULL;
74920- mm->free_area_cache = oldmm->mmap_base;
74921- mm->cached_hole_size = ~0UL;
74922+ mm->free_area_cache = oldmm->free_area_cache;
74923+ mm->cached_hole_size = oldmm->cached_hole_size;
74924 mm->map_count = 0;
74925 cpumask_clear(mm_cpumask(mm));
74926 mm->mm_rb = RB_ROOT;
74927@@ -381,57 +449,15 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
74928
74929 prev = NULL;
74930 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
74931- struct file *file;
74932-
74933 if (mpnt->vm_flags & VM_DONTCOPY) {
74934 vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
74935 -vma_pages(mpnt));
74936 continue;
74937 }
74938- charge = 0;
74939- if (mpnt->vm_flags & VM_ACCOUNT) {
74940- unsigned long len = vma_pages(mpnt);
74941-
74942- if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
74943- goto fail_nomem;
74944- charge = len;
74945- }
74946- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
74947- if (!tmp)
74948- goto fail_nomem;
74949- *tmp = *mpnt;
74950- INIT_LIST_HEAD(&tmp->anon_vma_chain);
74951- pol = mpol_dup(vma_policy(mpnt));
74952- retval = PTR_ERR(pol);
74953- if (IS_ERR(pol))
74954- goto fail_nomem_policy;
74955- vma_set_policy(tmp, pol);
74956- tmp->vm_mm = mm;
74957- if (anon_vma_fork(tmp, mpnt))
74958- goto fail_nomem_anon_vma_fork;
74959- tmp->vm_flags &= ~VM_LOCKED;
74960- tmp->vm_next = tmp->vm_prev = NULL;
74961- file = tmp->vm_file;
74962- if (file) {
74963- struct inode *inode = file->f_path.dentry->d_inode;
74964- struct address_space *mapping = file->f_mapping;
74965-
74966- get_file(file);
74967- if (tmp->vm_flags & VM_DENYWRITE)
74968- atomic_dec(&inode->i_writecount);
74969- mutex_lock(&mapping->i_mmap_mutex);
74970- if (tmp->vm_flags & VM_SHARED)
74971- mapping->i_mmap_writable++;
74972- flush_dcache_mmap_lock(mapping);
74973- /* insert tmp into the share list, just after mpnt */
74974- if (unlikely(tmp->vm_flags & VM_NONLINEAR))
74975- vma_nonlinear_insert(tmp,
74976- &mapping->i_mmap_nonlinear);
74977- else
74978- vma_interval_tree_insert_after(tmp, mpnt,
74979- &mapping->i_mmap);
74980- flush_dcache_mmap_unlock(mapping);
74981- mutex_unlock(&mapping->i_mmap_mutex);
74982+ tmp = dup_vma(mm, oldmm, mpnt);
74983+ if (!tmp) {
74984+ retval = -ENOMEM;
74985+ goto out;
74986 }
74987
74988 /*
74989@@ -463,6 +489,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
74990 if (retval)
74991 goto out;
74992 }
74993+
74994+#ifdef CONFIG_PAX_SEGMEXEC
74995+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
74996+ struct vm_area_struct *mpnt_m;
74997+
74998+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
74999+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
75000+
75001+ if (!mpnt->vm_mirror)
75002+ continue;
75003+
75004+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
75005+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
75006+ mpnt->vm_mirror = mpnt_m;
75007+ } else {
75008+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
75009+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
75010+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
75011+ mpnt->vm_mirror->vm_mirror = mpnt;
75012+ }
75013+ }
75014+ BUG_ON(mpnt_m);
75015+ }
75016+#endif
75017+
75018 /* a new mm has just been created */
75019 arch_dup_mmap(oldmm, mm);
75020 retval = 0;
75021@@ -472,14 +523,6 @@ out:
75022 up_write(&oldmm->mmap_sem);
75023 uprobe_end_dup_mmap();
75024 return retval;
75025-fail_nomem_anon_vma_fork:
75026- mpol_put(pol);
75027-fail_nomem_policy:
75028- kmem_cache_free(vm_area_cachep, tmp);
75029-fail_nomem:
75030- retval = -ENOMEM;
75031- vm_unacct_memory(charge);
75032- goto out;
75033 }
75034
75035 static inline int mm_alloc_pgd(struct mm_struct *mm)
75036@@ -694,8 +737,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
75037 return ERR_PTR(err);
75038
75039 mm = get_task_mm(task);
75040- if (mm && mm != current->mm &&
75041- !ptrace_may_access(task, mode)) {
75042+ if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
75043+ (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
75044 mmput(mm);
75045 mm = ERR_PTR(-EACCES);
75046 }
75047@@ -917,13 +960,20 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
75048 spin_unlock(&fs->lock);
75049 return -EAGAIN;
75050 }
75051- fs->users++;
75052+ atomic_inc(&fs->users);
75053 spin_unlock(&fs->lock);
75054 return 0;
75055 }
75056 tsk->fs = copy_fs_struct(fs);
75057 if (!tsk->fs)
75058 return -ENOMEM;
75059+ /* Carry through gr_chroot_dentry and is_chrooted instead
75060+ of recomputing it here. Already copied when the task struct
75061+ is duplicated. This allows pivot_root to not be treated as
75062+ a chroot
75063+ */
75064+ //gr_set_chroot_entries(tsk, &tsk->fs->root);
75065+
75066 return 0;
75067 }
75068
75069@@ -1196,6 +1246,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
75070 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
75071 #endif
75072 retval = -EAGAIN;
75073+
75074+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
75075+
75076 if (atomic_read(&p->real_cred->user->processes) >=
75077 task_rlimit(p, RLIMIT_NPROC)) {
75078 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
75079@@ -1435,6 +1488,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,
75080 goto bad_fork_free_pid;
75081 }
75082
75083+ /* synchronizes with gr_set_acls()
75084+ we need to call this past the point of no return for fork()
75085+ */
75086+ gr_copy_label(p);
75087+
75088 if (clone_flags & CLONE_THREAD) {
75089 current->signal->nr_threads++;
75090 atomic_inc(&current->signal->live);
75091@@ -1518,6 +1576,8 @@ bad_fork_cleanup_count:
75092 bad_fork_free:
75093 free_task(p);
75094 fork_out:
75095+ gr_log_forkfail(retval);
75096+
75097 return ERR_PTR(retval);
75098 }
75099
75100@@ -1568,6 +1628,23 @@ long do_fork(unsigned long clone_flags,
75101 return -EINVAL;
75102 }
75103
75104+#ifdef CONFIG_GRKERNSEC
75105+ if (clone_flags & CLONE_NEWUSER) {
75106+ /*
75107+ * This doesn't really inspire confidence:
75108+ * http://marc.info/?l=linux-kernel&m=135543612731939&w=2
75109+ * http://marc.info/?l=linux-kernel&m=135545831607095&w=2
75110+ * Increases kernel attack surface in areas developers
75111+ * previously cared little about ("low importance due
75112+ * to requiring "root" capability")
75113+ * To be removed when this code receives *proper* review
75114+ */
75115+ if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
75116+ !capable(CAP_SETGID))
75117+ return -EPERM;
75118+ }
75119+#endif
75120+
75121 /*
75122 * Determine whether and which event to report to ptracer. When
75123 * called from kernel_thread or CLONE_UNTRACED is explicitly
75124@@ -1602,6 +1679,8 @@ long do_fork(unsigned long clone_flags,
75125 if (clone_flags & CLONE_PARENT_SETTID)
75126 put_user(nr, parent_tidptr);
75127
75128+ gr_handle_brute_check();
75129+
75130 if (clone_flags & CLONE_VFORK) {
75131 p->vfork_done = &vfork;
75132 init_completion(&vfork);
75133@@ -1755,7 +1834,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
75134 return 0;
75135
75136 /* don't need lock here; in the worst case we'll do useless copy */
75137- if (fs->users == 1)
75138+ if (atomic_read(&fs->users) == 1)
75139 return 0;
75140
75141 *new_fsp = copy_fs_struct(fs);
75142@@ -1869,7 +1948,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
75143 fs = current->fs;
75144 spin_lock(&fs->lock);
75145 current->fs = new_fs;
75146- if (--fs->users)
75147+ gr_set_chroot_entries(current, &current->fs->root);
75148+ if (atomic_dec_return(&fs->users))
75149 new_fs = NULL;
75150 else
75151 new_fs = fs;
75152diff --git a/kernel/futex.c b/kernel/futex.c
75153index 8879430..31696f1 100644
75154--- a/kernel/futex.c
75155+++ b/kernel/futex.c
75156@@ -54,6 +54,7 @@
75157 #include <linux/mount.h>
75158 #include <linux/pagemap.h>
75159 #include <linux/syscalls.h>
75160+#include <linux/ptrace.h>
75161 #include <linux/signal.h>
75162 #include <linux/export.h>
75163 #include <linux/magic.h>
75164@@ -239,6 +240,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
75165 struct page *page, *page_head;
75166 int err, ro = 0;
75167
75168+#ifdef CONFIG_PAX_SEGMEXEC
75169+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
75170+ return -EFAULT;
75171+#endif
75172+
75173 /*
75174 * The futex address must be "naturally" aligned.
75175 */
75176@@ -2731,6 +2737,7 @@ static int __init futex_init(void)
75177 {
75178 u32 curval;
75179 int i;
75180+ mm_segment_t oldfs;
75181
75182 /*
75183 * This will fail and we want it. Some arch implementations do
75184@@ -2742,8 +2749,11 @@ static int __init futex_init(void)
75185 * implementation, the non-functional ones will return
75186 * -ENOSYS.
75187 */
75188+ oldfs = get_fs();
75189+ set_fs(USER_DS);
75190 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
75191 futex_cmpxchg_enabled = 1;
75192+ set_fs(oldfs);
75193
75194 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
75195 plist_head_init(&futex_queues[i].chain);
75196diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
75197index a9642d5..51eb98c 100644
75198--- a/kernel/futex_compat.c
75199+++ b/kernel/futex_compat.c
75200@@ -31,7 +31,7 @@ fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
75201 return 0;
75202 }
75203
75204-static void __user *futex_uaddr(struct robust_list __user *entry,
75205+static void __user __intentional_overflow(-1) *futex_uaddr(struct robust_list __user *entry,
75206 compat_long_t futex_offset)
75207 {
75208 compat_uptr_t base = ptr_to_compat(entry);
75209diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
75210index 9b22d03..6295b62 100644
75211--- a/kernel/gcov/base.c
75212+++ b/kernel/gcov/base.c
75213@@ -102,11 +102,6 @@ void gcov_enable_events(void)
75214 }
75215
75216 #ifdef CONFIG_MODULES
75217-static inline int within(void *addr, void *start, unsigned long size)
75218-{
75219- return ((addr >= start) && (addr < start + size));
75220-}
75221-
75222 /* Update list and generate events when modules are unloaded. */
75223 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
75224 void *data)
75225@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
75226 prev = NULL;
75227 /* Remove entries located in module from linked list. */
75228 for (info = gcov_info_head; info; info = info->next) {
75229- if (within(info, mod->module_core, mod->core_size)) {
75230+ if (within_module_core_rw((unsigned long)info, mod)) {
75231 if (prev)
75232 prev->next = info->next;
75233 else
75234diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
75235index cdd5607..c3fc919 100644
75236--- a/kernel/hrtimer.c
75237+++ b/kernel/hrtimer.c
75238@@ -1407,7 +1407,7 @@ void hrtimer_peek_ahead_timers(void)
75239 local_irq_restore(flags);
75240 }
75241
75242-static void run_hrtimer_softirq(struct softirq_action *h)
75243+static void run_hrtimer_softirq(void)
75244 {
75245 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
75246
75247@@ -1751,7 +1751,7 @@ static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self,
75248 return NOTIFY_OK;
75249 }
75250
75251-static struct notifier_block __cpuinitdata hrtimers_nb = {
75252+static struct notifier_block hrtimers_nb = {
75253 .notifier_call = hrtimer_cpu_notify,
75254 };
75255
75256diff --git a/kernel/jump_label.c b/kernel/jump_label.c
75257index 60f48fa..7f3a770 100644
75258--- a/kernel/jump_label.c
75259+++ b/kernel/jump_label.c
75260@@ -13,6 +13,7 @@
75261 #include <linux/sort.h>
75262 #include <linux/err.h>
75263 #include <linux/static_key.h>
75264+#include <linux/mm.h>
75265
75266 #ifdef HAVE_JUMP_LABEL
75267
75268@@ -50,7 +51,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
75269
75270 size = (((unsigned long)stop - (unsigned long)start)
75271 / sizeof(struct jump_entry));
75272+ pax_open_kernel();
75273 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
75274+ pax_close_kernel();
75275 }
75276
75277 static void jump_label_update(struct static_key *key, int enable);
75278@@ -357,10 +360,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
75279 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
75280 struct jump_entry *iter;
75281
75282+ pax_open_kernel();
75283 for (iter = iter_start; iter < iter_stop; iter++) {
75284 if (within_module_init(iter->code, mod))
75285 iter->code = 0;
75286 }
75287+ pax_close_kernel();
75288 }
75289
75290 static int
75291diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
75292index 2169fee..706ccca 100644
75293--- a/kernel/kallsyms.c
75294+++ b/kernel/kallsyms.c
75295@@ -11,6 +11,9 @@
75296 * Changed the compression method from stem compression to "table lookup"
75297 * compression (see scripts/kallsyms.c for a more complete description)
75298 */
75299+#ifdef CONFIG_GRKERNSEC_HIDESYM
75300+#define __INCLUDED_BY_HIDESYM 1
75301+#endif
75302 #include <linux/kallsyms.h>
75303 #include <linux/module.h>
75304 #include <linux/init.h>
75305@@ -53,12 +56,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
75306
75307 static inline int is_kernel_inittext(unsigned long addr)
75308 {
75309+ if (system_state != SYSTEM_BOOTING)
75310+ return 0;
75311+
75312 if (addr >= (unsigned long)_sinittext
75313 && addr <= (unsigned long)_einittext)
75314 return 1;
75315 return 0;
75316 }
75317
75318+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
75319+#ifdef CONFIG_MODULES
75320+static inline int is_module_text(unsigned long addr)
75321+{
75322+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
75323+ return 1;
75324+
75325+ addr = ktla_ktva(addr);
75326+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
75327+}
75328+#else
75329+static inline int is_module_text(unsigned long addr)
75330+{
75331+ return 0;
75332+}
75333+#endif
75334+#endif
75335+
75336 static inline int is_kernel_text(unsigned long addr)
75337 {
75338 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
75339@@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigned long addr)
75340
75341 static inline int is_kernel(unsigned long addr)
75342 {
75343+
75344+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
75345+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
75346+ return 1;
75347+
75348+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
75349+#else
75350 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
75351+#endif
75352+
75353 return 1;
75354 return in_gate_area_no_mm(addr);
75355 }
75356
75357 static int is_ksym_addr(unsigned long addr)
75358 {
75359+
75360+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
75361+ if (is_module_text(addr))
75362+ return 0;
75363+#endif
75364+
75365 if (all_var)
75366 return is_kernel(addr);
75367
75368@@ -470,7 +509,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
75369
75370 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
75371 {
75372- iter->name[0] = '\0';
75373 iter->nameoff = get_symbol_offset(new_pos);
75374 iter->pos = new_pos;
75375 }
75376@@ -518,6 +556,11 @@ static int s_show(struct seq_file *m, void *p)
75377 {
75378 struct kallsym_iter *iter = m->private;
75379
75380+#ifdef CONFIG_GRKERNSEC_HIDESYM
75381+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID))
75382+ return 0;
75383+#endif
75384+
75385 /* Some debugging symbols have no name. Ignore them. */
75386 if (!iter->name[0])
75387 return 0;
75388@@ -531,6 +574,7 @@ static int s_show(struct seq_file *m, void *p)
75389 */
75390 type = iter->exported ? toupper(iter->type) :
75391 tolower(iter->type);
75392+
75393 seq_printf(m, "%pK %c %s\t[%s]\n", (void *)iter->value,
75394 type, iter->name, iter->module_name);
75395 } else
75396@@ -556,7 +600,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
75397 struct kallsym_iter *iter;
75398 int ret;
75399
75400- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
75401+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
75402 if (!iter)
75403 return -ENOMEM;
75404 reset_iter(iter, 0);
75405diff --git a/kernel/kcmp.c b/kernel/kcmp.c
75406index e30ac0f..3528cac 100644
75407--- a/kernel/kcmp.c
75408+++ b/kernel/kcmp.c
75409@@ -99,6 +99,10 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
75410 struct task_struct *task1, *task2;
75411 int ret;
75412
75413+#ifdef CONFIG_GRKERNSEC
75414+ return -ENOSYS;
75415+#endif
75416+
75417 rcu_read_lock();
75418
75419 /*
75420diff --git a/kernel/kexec.c b/kernel/kexec.c
75421index 5e4bd78..00c5b91 100644
75422--- a/kernel/kexec.c
75423+++ b/kernel/kexec.c
75424@@ -1045,7 +1045,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
75425 unsigned long flags)
75426 {
75427 struct compat_kexec_segment in;
75428- struct kexec_segment out, __user *ksegments;
75429+ struct kexec_segment out;
75430+ struct kexec_segment __user *ksegments;
75431 unsigned long i, result;
75432
75433 /* Don't allow clients that don't understand the native
75434diff --git a/kernel/kmod.c b/kernel/kmod.c
75435index 0023a87..9c0c068 100644
75436--- a/kernel/kmod.c
75437+++ b/kernel/kmod.c
75438@@ -74,7 +74,7 @@ static void free_modprobe_argv(struct subprocess_info *info)
75439 kfree(info->argv);
75440 }
75441
75442-static int call_modprobe(char *module_name, int wait)
75443+static int call_modprobe(char *module_name, char *module_param, int wait)
75444 {
75445 static char *envp[] = {
75446 "HOME=/",
75447@@ -83,7 +83,7 @@ static int call_modprobe(char *module_name, int wait)
75448 NULL
75449 };
75450
75451- char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
75452+ char **argv = kmalloc(sizeof(char *[6]), GFP_KERNEL);
75453 if (!argv)
75454 goto out;
75455
75456@@ -95,7 +95,8 @@ static int call_modprobe(char *module_name, int wait)
75457 argv[1] = "-q";
75458 argv[2] = "--";
75459 argv[3] = module_name; /* check free_modprobe_argv() */
75460- argv[4] = NULL;
75461+ argv[4] = module_param;
75462+ argv[5] = NULL;
75463
75464 return call_usermodehelper_fns(modprobe_path, argv, envp,
75465 wait | UMH_KILLABLE, NULL, free_modprobe_argv, NULL);
75466@@ -120,9 +121,8 @@ out:
75467 * If module auto-loading support is disabled then this function
75468 * becomes a no-operation.
75469 */
75470-int __request_module(bool wait, const char *fmt, ...)
75471+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
75472 {
75473- va_list args;
75474 char module_name[MODULE_NAME_LEN];
75475 unsigned int max_modprobes;
75476 int ret;
75477@@ -130,9 +130,7 @@ int __request_module(bool wait, const char *fmt, ...)
75478 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
75479 static int kmod_loop_msg;
75480
75481- va_start(args, fmt);
75482- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
75483- va_end(args);
75484+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
75485 if (ret >= MODULE_NAME_LEN)
75486 return -ENAMETOOLONG;
75487
75488@@ -140,6 +138,20 @@ int __request_module(bool wait, const char *fmt, ...)
75489 if (ret)
75490 return ret;
75491
75492+#ifdef CONFIG_GRKERNSEC_MODHARDEN
75493+ if (uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
75494+ /* hack to workaround consolekit/udisks stupidity */
75495+ read_lock(&tasklist_lock);
75496+ if (!strcmp(current->comm, "mount") &&
75497+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
75498+ read_unlock(&tasklist_lock);
75499+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
75500+ return -EPERM;
75501+ }
75502+ read_unlock(&tasklist_lock);
75503+ }
75504+#endif
75505+
75506 /* If modprobe needs a service that is in a module, we get a recursive
75507 * loop. Limit the number of running kmod threads to max_threads/2 or
75508 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
75509@@ -168,11 +180,52 @@ int __request_module(bool wait, const char *fmt, ...)
75510
75511 trace_module_request(module_name, wait, _RET_IP_);
75512
75513- ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
75514+ ret = call_modprobe(module_name, module_param, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
75515
75516 atomic_dec(&kmod_concurrent);
75517 return ret;
75518 }
75519+
75520+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
75521+{
75522+ va_list args;
75523+ int ret;
75524+
75525+ va_start(args, fmt);
75526+ ret = ____request_module(wait, module_param, fmt, args);
75527+ va_end(args);
75528+
75529+ return ret;
75530+}
75531+
75532+int __request_module(bool wait, const char *fmt, ...)
75533+{
75534+ va_list args;
75535+ int ret;
75536+
75537+#ifdef CONFIG_GRKERNSEC_MODHARDEN
75538+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
75539+ char module_param[MODULE_NAME_LEN];
75540+
75541+ memset(module_param, 0, sizeof(module_param));
75542+
75543+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", GR_GLOBAL_UID(current_uid()));
75544+
75545+ va_start(args, fmt);
75546+ ret = ____request_module(wait, module_param, fmt, args);
75547+ va_end(args);
75548+
75549+ return ret;
75550+ }
75551+#endif
75552+
75553+ va_start(args, fmt);
75554+ ret = ____request_module(wait, NULL, fmt, args);
75555+ va_end(args);
75556+
75557+ return ret;
75558+}
75559+
75560 EXPORT_SYMBOL(__request_module);
75561 #endif /* CONFIG_MODULES */
75562
75563@@ -283,7 +336,7 @@ static int wait_for_helper(void *data)
75564 *
75565 * Thus the __user pointer cast is valid here.
75566 */
75567- sys_wait4(pid, (int __user *)&ret, 0, NULL);
75568+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
75569
75570 /*
75571 * If ret is 0, either ____call_usermodehelper failed and the
75572@@ -635,7 +688,7 @@ EXPORT_SYMBOL(call_usermodehelper_fns);
75573 static int proc_cap_handler(struct ctl_table *table, int write,
75574 void __user *buffer, size_t *lenp, loff_t *ppos)
75575 {
75576- struct ctl_table t;
75577+ ctl_table_no_const t;
75578 unsigned long cap_array[_KERNEL_CAPABILITY_U32S];
75579 kernel_cap_t new_cap;
75580 int err, i;
75581diff --git a/kernel/kprobes.c b/kernel/kprobes.c
75582index 098f396..fe85ff1 100644
75583--- a/kernel/kprobes.c
75584+++ b/kernel/kprobes.c
75585@@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
75586 * kernel image and loaded module images reside. This is required
75587 * so x86_64 can correctly handle the %rip-relative fixups.
75588 */
75589- kip->insns = module_alloc(PAGE_SIZE);
75590+ kip->insns = module_alloc_exec(PAGE_SIZE);
75591 if (!kip->insns) {
75592 kfree(kip);
75593 return NULL;
75594@@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
75595 */
75596 if (!list_is_singular(&kip->list)) {
75597 list_del(&kip->list);
75598- module_free(NULL, kip->insns);
75599+ module_free_exec(NULL, kip->insns);
75600 kfree(kip);
75601 }
75602 return 1;
75603@@ -2063,7 +2063,7 @@ static int __init init_kprobes(void)
75604 {
75605 int i, err = 0;
75606 unsigned long offset = 0, size = 0;
75607- char *modname, namebuf[128];
75608+ char *modname, namebuf[KSYM_NAME_LEN];
75609 const char *symbol_name;
75610 void *addr;
75611 struct kprobe_blackpoint *kb;
75612@@ -2148,11 +2148,11 @@ static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
75613 kprobe_type = "k";
75614
75615 if (sym)
75616- seq_printf(pi, "%p %s %s+0x%x %s ",
75617+ seq_printf(pi, "%pK %s %s+0x%x %s ",
75618 p->addr, kprobe_type, sym, offset,
75619 (modname ? modname : " "));
75620 else
75621- seq_printf(pi, "%p %s %p ",
75622+ seq_printf(pi, "%pK %s %pK ",
75623 p->addr, kprobe_type, p->addr);
75624
75625 if (!pp)
75626@@ -2190,7 +2190,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
75627 const char *sym = NULL;
75628 unsigned int i = *(loff_t *) v;
75629 unsigned long offset = 0;
75630- char *modname, namebuf[128];
75631+ char *modname, namebuf[KSYM_NAME_LEN];
75632
75633 head = &kprobe_table[i];
75634 preempt_disable();
75635diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
75636index 6ada93c..dce7d5d 100644
75637--- a/kernel/ksysfs.c
75638+++ b/kernel/ksysfs.c
75639@@ -46,6 +46,8 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
75640 {
75641 if (count+1 > UEVENT_HELPER_PATH_LEN)
75642 return -ENOENT;
75643+ if (!capable(CAP_SYS_ADMIN))
75644+ return -EPERM;
75645 memcpy(uevent_helper, buf, count);
75646 uevent_helper[count] = '\0';
75647 if (count && uevent_helper[count-1] == '\n')
75648@@ -172,7 +174,7 @@ static ssize_t notes_read(struct file *filp, struct kobject *kobj,
75649 return count;
75650 }
75651
75652-static struct bin_attribute notes_attr = {
75653+static bin_attribute_no_const notes_attr __read_only = {
75654 .attr = {
75655 .name = "notes",
75656 .mode = S_IRUGO,
75657diff --git a/kernel/lockdep.c b/kernel/lockdep.c
75658index 7981e5b..7f2105c 100644
75659--- a/kernel/lockdep.c
75660+++ b/kernel/lockdep.c
75661@@ -590,6 +590,10 @@ static int static_obj(void *obj)
75662 end = (unsigned long) &_end,
75663 addr = (unsigned long) obj;
75664
75665+#ifdef CONFIG_PAX_KERNEXEC
75666+ start = ktla_ktva(start);
75667+#endif
75668+
75669 /*
75670 * static variable?
75671 */
75672@@ -730,6 +734,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
75673 if (!static_obj(lock->key)) {
75674 debug_locks_off();
75675 printk("INFO: trying to register non-static key.\n");
75676+ printk("lock:%pS key:%pS.\n", lock, lock->key);
75677 printk("the code is fine but needs lockdep annotation.\n");
75678 printk("turning off the locking correctness validator.\n");
75679 dump_stack();
75680@@ -3078,7 +3083,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
75681 if (!class)
75682 return 0;
75683 }
75684- atomic_inc((atomic_t *)&class->ops);
75685+ atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
75686 if (very_verbose(class)) {
75687 printk("\nacquire class [%p] %s", class->key, class->name);
75688 if (class->name_version > 1)
75689diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
75690index b2c71c5..7b88d63 100644
75691--- a/kernel/lockdep_proc.c
75692+++ b/kernel/lockdep_proc.c
75693@@ -65,7 +65,7 @@ static int l_show(struct seq_file *m, void *v)
75694 return 0;
75695 }
75696
75697- seq_printf(m, "%p", class->key);
75698+ seq_printf(m, "%pK", class->key);
75699 #ifdef CONFIG_DEBUG_LOCKDEP
75700 seq_printf(m, " OPS:%8ld", class->ops);
75701 #endif
75702@@ -83,7 +83,7 @@ static int l_show(struct seq_file *m, void *v)
75703
75704 list_for_each_entry(entry, &class->locks_after, entry) {
75705 if (entry->distance == 1) {
75706- seq_printf(m, " -> [%p] ", entry->class->key);
75707+ seq_printf(m, " -> [%pK] ", entry->class->key);
75708 print_name(m, entry->class);
75709 seq_puts(m, "\n");
75710 }
75711@@ -152,7 +152,7 @@ static int lc_show(struct seq_file *m, void *v)
75712 if (!class->key)
75713 continue;
75714
75715- seq_printf(m, "[%p] ", class->key);
75716+ seq_printf(m, "[%pK] ", class->key);
75717 print_name(m, class);
75718 seq_puts(m, "\n");
75719 }
75720@@ -495,7 +495,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
75721 if (!i)
75722 seq_line(m, '-', 40-namelen, namelen);
75723
75724- snprintf(ip, sizeof(ip), "[<%p>]",
75725+ snprintf(ip, sizeof(ip), "[<%pK>]",
75726 (void *)class->contention_point[i]);
75727 seq_printf(m, "%40s %14lu %29s %pS\n",
75728 name, stats->contention_point[i],
75729@@ -510,7 +510,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
75730 if (!i)
75731 seq_line(m, '-', 40-namelen, namelen);
75732
75733- snprintf(ip, sizeof(ip), "[<%p>]",
75734+ snprintf(ip, sizeof(ip), "[<%pK>]",
75735 (void *)class->contending_point[i]);
75736 seq_printf(m, "%40s %14lu %29s %pS\n",
75737 name, stats->contending_point[i],
75738diff --git a/kernel/module.c b/kernel/module.c
75739index eab0827..f488603 100644
75740--- a/kernel/module.c
75741+++ b/kernel/module.c
75742@@ -61,6 +61,7 @@
75743 #include <linux/pfn.h>
75744 #include <linux/bsearch.h>
75745 #include <linux/fips.h>
75746+#include <linux/grsecurity.h>
75747 #include <uapi/linux/module.h>
75748 #include "module-internal.h"
75749
75750@@ -156,7 +157,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
75751
75752 /* Bounds of module allocation, for speeding __module_address.
75753 * Protected by module_mutex. */
75754-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
75755+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
75756+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
75757
75758 int register_module_notifier(struct notifier_block * nb)
75759 {
75760@@ -322,7 +324,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
75761 return true;
75762
75763 list_for_each_entry_rcu(mod, &modules, list) {
75764- struct symsearch arr[] = {
75765+ struct symsearch modarr[] = {
75766 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
75767 NOT_GPL_ONLY, false },
75768 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
75769@@ -347,7 +349,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
75770 if (mod->state == MODULE_STATE_UNFORMED)
75771 continue;
75772
75773- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
75774+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
75775 return true;
75776 }
75777 return false;
75778@@ -484,7 +486,7 @@ static inline void __percpu *mod_percpu(struct module *mod)
75779 static int percpu_modalloc(struct module *mod,
75780 unsigned long size, unsigned long align)
75781 {
75782- if (align > PAGE_SIZE) {
75783+ if (align-1 >= PAGE_SIZE) {
75784 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
75785 mod->name, align, PAGE_SIZE);
75786 align = PAGE_SIZE;
75787@@ -1088,7 +1090,7 @@ struct module_attribute module_uevent =
75788 static ssize_t show_coresize(struct module_attribute *mattr,
75789 struct module_kobject *mk, char *buffer)
75790 {
75791- return sprintf(buffer, "%u\n", mk->mod->core_size);
75792+ return sprintf(buffer, "%u\n", mk->mod->core_size_rx + mk->mod->core_size_rw);
75793 }
75794
75795 static struct module_attribute modinfo_coresize =
75796@@ -1097,7 +1099,7 @@ static struct module_attribute modinfo_coresize =
75797 static ssize_t show_initsize(struct module_attribute *mattr,
75798 struct module_kobject *mk, char *buffer)
75799 {
75800- return sprintf(buffer, "%u\n", mk->mod->init_size);
75801+ return sprintf(buffer, "%u\n", mk->mod->init_size_rx + mk->mod->init_size_rw);
75802 }
75803
75804 static struct module_attribute modinfo_initsize =
75805@@ -1311,7 +1313,7 @@ resolve_symbol_wait(struct module *mod,
75806 */
75807 #ifdef CONFIG_SYSFS
75808
75809-#ifdef CONFIG_KALLSYMS
75810+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
75811 static inline bool sect_empty(const Elf_Shdr *sect)
75812 {
75813 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
75814@@ -1451,7 +1453,7 @@ static void add_notes_attrs(struct module *mod, const struct load_info *info)
75815 {
75816 unsigned int notes, loaded, i;
75817 struct module_notes_attrs *notes_attrs;
75818- struct bin_attribute *nattr;
75819+ bin_attribute_no_const *nattr;
75820
75821 /* failed to create section attributes, so can't create notes */
75822 if (!mod->sect_attrs)
75823@@ -1563,7 +1565,7 @@ static void del_usage_links(struct module *mod)
75824 static int module_add_modinfo_attrs(struct module *mod)
75825 {
75826 struct module_attribute *attr;
75827- struct module_attribute *temp_attr;
75828+ module_attribute_no_const *temp_attr;
75829 int error = 0;
75830 int i;
75831
75832@@ -1777,21 +1779,21 @@ static void set_section_ro_nx(void *base,
75833
75834 static void unset_module_core_ro_nx(struct module *mod)
75835 {
75836- set_page_attributes(mod->module_core + mod->core_text_size,
75837- mod->module_core + mod->core_size,
75838+ set_page_attributes(mod->module_core_rw,
75839+ mod->module_core_rw + mod->core_size_rw,
75840 set_memory_x);
75841- set_page_attributes(mod->module_core,
75842- mod->module_core + mod->core_ro_size,
75843+ set_page_attributes(mod->module_core_rx,
75844+ mod->module_core_rx + mod->core_size_rx,
75845 set_memory_rw);
75846 }
75847
75848 static void unset_module_init_ro_nx(struct module *mod)
75849 {
75850- set_page_attributes(mod->module_init + mod->init_text_size,
75851- mod->module_init + mod->init_size,
75852+ set_page_attributes(mod->module_init_rw,
75853+ mod->module_init_rw + mod->init_size_rw,
75854 set_memory_x);
75855- set_page_attributes(mod->module_init,
75856- mod->module_init + mod->init_ro_size,
75857+ set_page_attributes(mod->module_init_rx,
75858+ mod->module_init_rx + mod->init_size_rx,
75859 set_memory_rw);
75860 }
75861
75862@@ -1804,14 +1806,14 @@ void set_all_modules_text_rw(void)
75863 list_for_each_entry_rcu(mod, &modules, list) {
75864 if (mod->state == MODULE_STATE_UNFORMED)
75865 continue;
75866- if ((mod->module_core) && (mod->core_text_size)) {
75867- set_page_attributes(mod->module_core,
75868- mod->module_core + mod->core_text_size,
75869+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
75870+ set_page_attributes(mod->module_core_rx,
75871+ mod->module_core_rx + mod->core_size_rx,
75872 set_memory_rw);
75873 }
75874- if ((mod->module_init) && (mod->init_text_size)) {
75875- set_page_attributes(mod->module_init,
75876- mod->module_init + mod->init_text_size,
75877+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
75878+ set_page_attributes(mod->module_init_rx,
75879+ mod->module_init_rx + mod->init_size_rx,
75880 set_memory_rw);
75881 }
75882 }
75883@@ -1827,14 +1829,14 @@ void set_all_modules_text_ro(void)
75884 list_for_each_entry_rcu(mod, &modules, list) {
75885 if (mod->state == MODULE_STATE_UNFORMED)
75886 continue;
75887- if ((mod->module_core) && (mod->core_text_size)) {
75888- set_page_attributes(mod->module_core,
75889- mod->module_core + mod->core_text_size,
75890+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
75891+ set_page_attributes(mod->module_core_rx,
75892+ mod->module_core_rx + mod->core_size_rx,
75893 set_memory_ro);
75894 }
75895- if ((mod->module_init) && (mod->init_text_size)) {
75896- set_page_attributes(mod->module_init,
75897- mod->module_init + mod->init_text_size,
75898+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
75899+ set_page_attributes(mod->module_init_rx,
75900+ mod->module_init_rx + mod->init_size_rx,
75901 set_memory_ro);
75902 }
75903 }
75904@@ -1880,16 +1882,19 @@ static void free_module(struct module *mod)
75905
75906 /* This may be NULL, but that's OK */
75907 unset_module_init_ro_nx(mod);
75908- module_free(mod, mod->module_init);
75909+ module_free(mod, mod->module_init_rw);
75910+ module_free_exec(mod, mod->module_init_rx);
75911 kfree(mod->args);
75912 percpu_modfree(mod);
75913
75914 /* Free lock-classes: */
75915- lockdep_free_key_range(mod->module_core, mod->core_size);
75916+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
75917+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
75918
75919 /* Finally, free the core (containing the module structure) */
75920 unset_module_core_ro_nx(mod);
75921- module_free(mod, mod->module_core);
75922+ module_free_exec(mod, mod->module_core_rx);
75923+ module_free(mod, mod->module_core_rw);
75924
75925 #ifdef CONFIG_MPU
75926 update_protections(current->mm);
75927@@ -1959,9 +1964,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
75928 int ret = 0;
75929 const struct kernel_symbol *ksym;
75930
75931+#ifdef CONFIG_GRKERNSEC_MODHARDEN
75932+ int is_fs_load = 0;
75933+ int register_filesystem_found = 0;
75934+ char *p;
75935+
75936+ p = strstr(mod->args, "grsec_modharden_fs");
75937+ if (p) {
75938+ char *endptr = p + sizeof("grsec_modharden_fs") - 1;
75939+ /* copy \0 as well */
75940+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
75941+ is_fs_load = 1;
75942+ }
75943+#endif
75944+
75945 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
75946 const char *name = info->strtab + sym[i].st_name;
75947
75948+#ifdef CONFIG_GRKERNSEC_MODHARDEN
75949+ /* it's a real shame this will never get ripped and copied
75950+ upstream! ;(
75951+ */
75952+ if (is_fs_load && !strcmp(name, "register_filesystem"))
75953+ register_filesystem_found = 1;
75954+#endif
75955+
75956 switch (sym[i].st_shndx) {
75957 case SHN_COMMON:
75958 /* We compiled with -fno-common. These are not
75959@@ -1982,7 +2009,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
75960 ksym = resolve_symbol_wait(mod, info, name);
75961 /* Ok if resolved. */
75962 if (ksym && !IS_ERR(ksym)) {
75963+ pax_open_kernel();
75964 sym[i].st_value = ksym->value;
75965+ pax_close_kernel();
75966 break;
75967 }
75968
75969@@ -2001,11 +2030,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
75970 secbase = (unsigned long)mod_percpu(mod);
75971 else
75972 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
75973+ pax_open_kernel();
75974 sym[i].st_value += secbase;
75975+ pax_close_kernel();
75976 break;
75977 }
75978 }
75979
75980+#ifdef CONFIG_GRKERNSEC_MODHARDEN
75981+ if (is_fs_load && !register_filesystem_found) {
75982+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
75983+ ret = -EPERM;
75984+ }
75985+#endif
75986+
75987 return ret;
75988 }
75989
75990@@ -2089,22 +2127,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
75991 || s->sh_entsize != ~0UL
75992 || strstarts(sname, ".init"))
75993 continue;
75994- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
75995+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
75996+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
75997+ else
75998+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
75999 pr_debug("\t%s\n", sname);
76000 }
76001- switch (m) {
76002- case 0: /* executable */
76003- mod->core_size = debug_align(mod->core_size);
76004- mod->core_text_size = mod->core_size;
76005- break;
76006- case 1: /* RO: text and ro-data */
76007- mod->core_size = debug_align(mod->core_size);
76008- mod->core_ro_size = mod->core_size;
76009- break;
76010- case 3: /* whole core */
76011- mod->core_size = debug_align(mod->core_size);
76012- break;
76013- }
76014 }
76015
76016 pr_debug("Init section allocation order:\n");
76017@@ -2118,23 +2146,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
76018 || s->sh_entsize != ~0UL
76019 || !strstarts(sname, ".init"))
76020 continue;
76021- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
76022- | INIT_OFFSET_MASK);
76023+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
76024+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
76025+ else
76026+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
76027+ s->sh_entsize |= INIT_OFFSET_MASK;
76028 pr_debug("\t%s\n", sname);
76029 }
76030- switch (m) {
76031- case 0: /* executable */
76032- mod->init_size = debug_align(mod->init_size);
76033- mod->init_text_size = mod->init_size;
76034- break;
76035- case 1: /* RO: text and ro-data */
76036- mod->init_size = debug_align(mod->init_size);
76037- mod->init_ro_size = mod->init_size;
76038- break;
76039- case 3: /* whole init */
76040- mod->init_size = debug_align(mod->init_size);
76041- break;
76042- }
76043 }
76044 }
76045
76046@@ -2306,7 +2324,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
76047
76048 /* Put symbol section at end of init part of module. */
76049 symsect->sh_flags |= SHF_ALLOC;
76050- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
76051+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
76052 info->index.sym) | INIT_OFFSET_MASK;
76053 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
76054
76055@@ -2323,13 +2341,13 @@ static void layout_symtab(struct module *mod, struct load_info *info)
76056 }
76057
76058 /* Append room for core symbols at end of core part. */
76059- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
76060- info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
76061- mod->core_size += strtab_size;
76062+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
76063+ info->stroffs = mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
76064+ mod->core_size_rx += strtab_size;
76065
76066 /* Put string table section at end of init part of module. */
76067 strsect->sh_flags |= SHF_ALLOC;
76068- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
76069+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
76070 info->index.str) | INIT_OFFSET_MASK;
76071 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
76072 }
76073@@ -2347,12 +2365,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
76074 /* Make sure we get permanent strtab: don't use info->strtab. */
76075 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
76076
76077+ pax_open_kernel();
76078+
76079 /* Set types up while we still have access to sections. */
76080 for (i = 0; i < mod->num_symtab; i++)
76081 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
76082
76083- mod->core_symtab = dst = mod->module_core + info->symoffs;
76084- mod->core_strtab = s = mod->module_core + info->stroffs;
76085+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
76086+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
76087 src = mod->symtab;
76088 for (ndst = i = 0; i < mod->num_symtab; i++) {
76089 if (i == 0 ||
76090@@ -2364,6 +2384,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
76091 }
76092 }
76093 mod->core_num_syms = ndst;
76094+
76095+ pax_close_kernel();
76096 }
76097 #else
76098 static inline void layout_symtab(struct module *mod, struct load_info *info)
76099@@ -2397,17 +2419,33 @@ void * __weak module_alloc(unsigned long size)
76100 return vmalloc_exec(size);
76101 }
76102
76103-static void *module_alloc_update_bounds(unsigned long size)
76104+static void *module_alloc_update_bounds_rw(unsigned long size)
76105 {
76106 void *ret = module_alloc(size);
76107
76108 if (ret) {
76109 mutex_lock(&module_mutex);
76110 /* Update module bounds. */
76111- if ((unsigned long)ret < module_addr_min)
76112- module_addr_min = (unsigned long)ret;
76113- if ((unsigned long)ret + size > module_addr_max)
76114- module_addr_max = (unsigned long)ret + size;
76115+ if ((unsigned long)ret < module_addr_min_rw)
76116+ module_addr_min_rw = (unsigned long)ret;
76117+ if ((unsigned long)ret + size > module_addr_max_rw)
76118+ module_addr_max_rw = (unsigned long)ret + size;
76119+ mutex_unlock(&module_mutex);
76120+ }
76121+ return ret;
76122+}
76123+
76124+static void *module_alloc_update_bounds_rx(unsigned long size)
76125+{
76126+ void *ret = module_alloc_exec(size);
76127+
76128+ if (ret) {
76129+ mutex_lock(&module_mutex);
76130+ /* Update module bounds. */
76131+ if ((unsigned long)ret < module_addr_min_rx)
76132+ module_addr_min_rx = (unsigned long)ret;
76133+ if ((unsigned long)ret + size > module_addr_max_rx)
76134+ module_addr_max_rx = (unsigned long)ret + size;
76135 mutex_unlock(&module_mutex);
76136 }
76137 return ret;
76138@@ -2683,8 +2721,14 @@ static struct module *setup_load_info(struct load_info *info, int flags)
76139 static int check_modinfo(struct module *mod, struct load_info *info, int flags)
76140 {
76141 const char *modmagic = get_modinfo(info, "vermagic");
76142+ const char *license = get_modinfo(info, "license");
76143 int err;
76144
76145+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
76146+ if (!license || !license_is_gpl_compatible(license))
76147+ return -ENOEXEC;
76148+#endif
76149+
76150 if (flags & MODULE_INIT_IGNORE_VERMAGIC)
76151 modmagic = NULL;
76152
76153@@ -2710,7 +2754,7 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags)
76154 }
76155
76156 /* Set up license info based on the info section */
76157- set_license(mod, get_modinfo(info, "license"));
76158+ set_license(mod, license);
76159
76160 return 0;
76161 }
76162@@ -2804,7 +2848,7 @@ static int move_module(struct module *mod, struct load_info *info)
76163 void *ptr;
76164
76165 /* Do the allocs. */
76166- ptr = module_alloc_update_bounds(mod->core_size);
76167+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
76168 /*
76169 * The pointer to this block is stored in the module structure
76170 * which is inside the block. Just mark it as not being a
76171@@ -2814,11 +2858,11 @@ static int move_module(struct module *mod, struct load_info *info)
76172 if (!ptr)
76173 return -ENOMEM;
76174
76175- memset(ptr, 0, mod->core_size);
76176- mod->module_core = ptr;
76177+ memset(ptr, 0, mod->core_size_rw);
76178+ mod->module_core_rw = ptr;
76179
76180- if (mod->init_size) {
76181- ptr = module_alloc_update_bounds(mod->init_size);
76182+ if (mod->init_size_rw) {
76183+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
76184 /*
76185 * The pointer to this block is stored in the module structure
76186 * which is inside the block. This block doesn't need to be
76187@@ -2827,13 +2871,45 @@ static int move_module(struct module *mod, struct load_info *info)
76188 */
76189 kmemleak_ignore(ptr);
76190 if (!ptr) {
76191- module_free(mod, mod->module_core);
76192+ module_free(mod, mod->module_core_rw);
76193 return -ENOMEM;
76194 }
76195- memset(ptr, 0, mod->init_size);
76196- mod->module_init = ptr;
76197+ memset(ptr, 0, mod->init_size_rw);
76198+ mod->module_init_rw = ptr;
76199 } else
76200- mod->module_init = NULL;
76201+ mod->module_init_rw = NULL;
76202+
76203+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
76204+ kmemleak_not_leak(ptr);
76205+ if (!ptr) {
76206+ if (mod->module_init_rw)
76207+ module_free(mod, mod->module_init_rw);
76208+ module_free(mod, mod->module_core_rw);
76209+ return -ENOMEM;
76210+ }
76211+
76212+ pax_open_kernel();
76213+ memset(ptr, 0, mod->core_size_rx);
76214+ pax_close_kernel();
76215+ mod->module_core_rx = ptr;
76216+
76217+ if (mod->init_size_rx) {
76218+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
76219+ kmemleak_ignore(ptr);
76220+ if (!ptr && mod->init_size_rx) {
76221+ module_free_exec(mod, mod->module_core_rx);
76222+ if (mod->module_init_rw)
76223+ module_free(mod, mod->module_init_rw);
76224+ module_free(mod, mod->module_core_rw);
76225+ return -ENOMEM;
76226+ }
76227+
76228+ pax_open_kernel();
76229+ memset(ptr, 0, mod->init_size_rx);
76230+ pax_close_kernel();
76231+ mod->module_init_rx = ptr;
76232+ } else
76233+ mod->module_init_rx = NULL;
76234
76235 /* Transfer each section which specifies SHF_ALLOC */
76236 pr_debug("final section addresses:\n");
76237@@ -2844,16 +2920,45 @@ static int move_module(struct module *mod, struct load_info *info)
76238 if (!(shdr->sh_flags & SHF_ALLOC))
76239 continue;
76240
76241- if (shdr->sh_entsize & INIT_OFFSET_MASK)
76242- dest = mod->module_init
76243- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
76244- else
76245- dest = mod->module_core + shdr->sh_entsize;
76246+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
76247+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
76248+ dest = mod->module_init_rw
76249+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
76250+ else
76251+ dest = mod->module_init_rx
76252+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
76253+ } else {
76254+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
76255+ dest = mod->module_core_rw + shdr->sh_entsize;
76256+ else
76257+ dest = mod->module_core_rx + shdr->sh_entsize;
76258+ }
76259+
76260+ if (shdr->sh_type != SHT_NOBITS) {
76261+
76262+#ifdef CONFIG_PAX_KERNEXEC
76263+#ifdef CONFIG_X86_64
76264+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
76265+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
76266+#endif
76267+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
76268+ pax_open_kernel();
76269+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
76270+ pax_close_kernel();
76271+ } else
76272+#endif
76273
76274- if (shdr->sh_type != SHT_NOBITS)
76275 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
76276+ }
76277 /* Update sh_addr to point to copy in image. */
76278- shdr->sh_addr = (unsigned long)dest;
76279+
76280+#ifdef CONFIG_PAX_KERNEXEC
76281+ if (shdr->sh_flags & SHF_EXECINSTR)
76282+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
76283+ else
76284+#endif
76285+
76286+ shdr->sh_addr = (unsigned long)dest;
76287 pr_debug("\t0x%lx %s\n",
76288 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
76289 }
76290@@ -2908,12 +3013,12 @@ static void flush_module_icache(const struct module *mod)
76291 * Do it before processing of module parameters, so the module
76292 * can provide parameter accessor functions of its own.
76293 */
76294- if (mod->module_init)
76295- flush_icache_range((unsigned long)mod->module_init,
76296- (unsigned long)mod->module_init
76297- + mod->init_size);
76298- flush_icache_range((unsigned long)mod->module_core,
76299- (unsigned long)mod->module_core + mod->core_size);
76300+ if (mod->module_init_rx)
76301+ flush_icache_range((unsigned long)mod->module_init_rx,
76302+ (unsigned long)mod->module_init_rx
76303+ + mod->init_size_rx);
76304+ flush_icache_range((unsigned long)mod->module_core_rx,
76305+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
76306
76307 set_fs(old_fs);
76308 }
76309@@ -2983,8 +3088,10 @@ out:
76310 static void module_deallocate(struct module *mod, struct load_info *info)
76311 {
76312 percpu_modfree(mod);
76313- module_free(mod, mod->module_init);
76314- module_free(mod, mod->module_core);
76315+ module_free_exec(mod, mod->module_init_rx);
76316+ module_free_exec(mod, mod->module_core_rx);
76317+ module_free(mod, mod->module_init_rw);
76318+ module_free(mod, mod->module_core_rw);
76319 }
76320
76321 int __weak module_finalize(const Elf_Ehdr *hdr,
76322@@ -2997,7 +3104,9 @@ int __weak module_finalize(const Elf_Ehdr *hdr,
76323 static int post_relocation(struct module *mod, const struct load_info *info)
76324 {
76325 /* Sort exception table now relocations are done. */
76326+ pax_open_kernel();
76327 sort_extable(mod->extable, mod->extable + mod->num_exentries);
76328+ pax_close_kernel();
76329
76330 /* Copy relocated percpu area over. */
76331 percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
76332@@ -3051,16 +3160,16 @@ static int do_init_module(struct module *mod)
76333 MODULE_STATE_COMING, mod);
76334
76335 /* Set RO and NX regions for core */
76336- set_section_ro_nx(mod->module_core,
76337- mod->core_text_size,
76338- mod->core_ro_size,
76339- mod->core_size);
76340+ set_section_ro_nx(mod->module_core_rx,
76341+ mod->core_size_rx,
76342+ mod->core_size_rx,
76343+ mod->core_size_rx);
76344
76345 /* Set RO and NX regions for init */
76346- set_section_ro_nx(mod->module_init,
76347- mod->init_text_size,
76348- mod->init_ro_size,
76349- mod->init_size);
76350+ set_section_ro_nx(mod->module_init_rx,
76351+ mod->init_size_rx,
76352+ mod->init_size_rx,
76353+ mod->init_size_rx);
76354
76355 do_mod_ctors(mod);
76356 /* Start the module */
76357@@ -3122,11 +3231,12 @@ static int do_init_module(struct module *mod)
76358 mod->strtab = mod->core_strtab;
76359 #endif
76360 unset_module_init_ro_nx(mod);
76361- module_free(mod, mod->module_init);
76362- mod->module_init = NULL;
76363- mod->init_size = 0;
76364- mod->init_ro_size = 0;
76365- mod->init_text_size = 0;
76366+ module_free(mod, mod->module_init_rw);
76367+ module_free_exec(mod, mod->module_init_rx);
76368+ mod->module_init_rw = NULL;
76369+ mod->module_init_rx = NULL;
76370+ mod->init_size_rw = 0;
76371+ mod->init_size_rx = 0;
76372 mutex_unlock(&module_mutex);
76373 wake_up_all(&module_wq);
76374
76375@@ -3209,9 +3319,38 @@ again:
76376 if (err)
76377 goto free_unload;
76378
76379+ /* Now copy in args */
76380+ mod->args = strndup_user(uargs, ~0UL >> 1);
76381+ if (IS_ERR(mod->args)) {
76382+ err = PTR_ERR(mod->args);
76383+ goto free_unload;
76384+ }
76385+
76386 /* Set up MODINFO_ATTR fields */
76387 setup_modinfo(mod, info);
76388
76389+#ifdef CONFIG_GRKERNSEC_MODHARDEN
76390+ {
76391+ char *p, *p2;
76392+
76393+ if (strstr(mod->args, "grsec_modharden_netdev")) {
76394+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
76395+ err = -EPERM;
76396+ goto free_modinfo;
76397+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
76398+ p += sizeof("grsec_modharden_normal") - 1;
76399+ p2 = strstr(p, "_");
76400+ if (p2) {
76401+ *p2 = '\0';
76402+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
76403+ *p2 = '_';
76404+ }
76405+ err = -EPERM;
76406+ goto free_modinfo;
76407+ }
76408+ }
76409+#endif
76410+
76411 /* Fix up syms, so that st_value is a pointer to location. */
76412 err = simplify_symbols(mod, info);
76413 if (err < 0)
76414@@ -3227,13 +3366,6 @@ again:
76415
76416 flush_module_icache(mod);
76417
76418- /* Now copy in args */
76419- mod->args = strndup_user(uargs, ~0UL >> 1);
76420- if (IS_ERR(mod->args)) {
76421- err = PTR_ERR(mod->args);
76422- goto free_arch_cleanup;
76423- }
76424-
76425 dynamic_debug_setup(info->debug, info->num_debug);
76426
76427 mutex_lock(&module_mutex);
76428@@ -3278,11 +3410,10 @@ again:
76429 mutex_unlock(&module_mutex);
76430 dynamic_debug_remove(info->debug);
76431 synchronize_sched();
76432- kfree(mod->args);
76433- free_arch_cleanup:
76434 module_arch_cleanup(mod);
76435 free_modinfo:
76436 free_modinfo(mod);
76437+ kfree(mod->args);
76438 free_unload:
76439 module_unload_free(mod);
76440 unlink_mod:
76441@@ -3365,10 +3496,16 @@ static const char *get_ksymbol(struct module *mod,
76442 unsigned long nextval;
76443
76444 /* At worse, next value is at end of module */
76445- if (within_module_init(addr, mod))
76446- nextval = (unsigned long)mod->module_init+mod->init_text_size;
76447+ if (within_module_init_rx(addr, mod))
76448+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
76449+ else if (within_module_init_rw(addr, mod))
76450+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
76451+ else if (within_module_core_rx(addr, mod))
76452+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
76453+ else if (within_module_core_rw(addr, mod))
76454+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
76455 else
76456- nextval = (unsigned long)mod->module_core+mod->core_text_size;
76457+ return NULL;
76458
76459 /* Scan for closest preceding symbol, and next symbol. (ELF
76460 starts real symbols at 1). */
76461@@ -3621,7 +3758,7 @@ static int m_show(struct seq_file *m, void *p)
76462 return 0;
76463
76464 seq_printf(m, "%s %u",
76465- mod->name, mod->init_size + mod->core_size);
76466+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
76467 print_unload_info(m, mod);
76468
76469 /* Informative for users. */
76470@@ -3630,7 +3767,7 @@ static int m_show(struct seq_file *m, void *p)
76471 mod->state == MODULE_STATE_COMING ? "Loading":
76472 "Live");
76473 /* Used by oprofile and other similar tools. */
76474- seq_printf(m, " 0x%pK", mod->module_core);
76475+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
76476
76477 /* Taints info */
76478 if (mod->taints)
76479@@ -3666,7 +3803,17 @@ static const struct file_operations proc_modules_operations = {
76480
76481 static int __init proc_modules_init(void)
76482 {
76483+#ifndef CONFIG_GRKERNSEC_HIDESYM
76484+#ifdef CONFIG_GRKERNSEC_PROC_USER
76485+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
76486+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
76487+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
76488+#else
76489 proc_create("modules", 0, NULL, &proc_modules_operations);
76490+#endif
76491+#else
76492+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
76493+#endif
76494 return 0;
76495 }
76496 module_init(proc_modules_init);
76497@@ -3727,14 +3874,14 @@ struct module *__module_address(unsigned long addr)
76498 {
76499 struct module *mod;
76500
76501- if (addr < module_addr_min || addr > module_addr_max)
76502+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
76503+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
76504 return NULL;
76505
76506 list_for_each_entry_rcu(mod, &modules, list) {
76507 if (mod->state == MODULE_STATE_UNFORMED)
76508 continue;
76509- if (within_module_core(addr, mod)
76510- || within_module_init(addr, mod))
76511+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
76512 return mod;
76513 }
76514 return NULL;
76515@@ -3769,11 +3916,20 @@ bool is_module_text_address(unsigned long addr)
76516 */
76517 struct module *__module_text_address(unsigned long addr)
76518 {
76519- struct module *mod = __module_address(addr);
76520+ struct module *mod;
76521+
76522+#ifdef CONFIG_X86_32
76523+ addr = ktla_ktva(addr);
76524+#endif
76525+
76526+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
76527+ return NULL;
76528+
76529+ mod = __module_address(addr);
76530+
76531 if (mod) {
76532 /* Make sure it's within the text section. */
76533- if (!within(addr, mod->module_init, mod->init_text_size)
76534- && !within(addr, mod->module_core, mod->core_text_size))
76535+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
76536 mod = NULL;
76537 }
76538 return mod;
76539diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
76540index 7e3443f..b2a1e6b 100644
76541--- a/kernel/mutex-debug.c
76542+++ b/kernel/mutex-debug.c
76543@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
76544 }
76545
76546 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
76547- struct thread_info *ti)
76548+ struct task_struct *task)
76549 {
76550 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
76551
76552 /* Mark the current thread as blocked on the lock: */
76553- ti->task->blocked_on = waiter;
76554+ task->blocked_on = waiter;
76555 }
76556
76557 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
76558- struct thread_info *ti)
76559+ struct task_struct *task)
76560 {
76561 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
76562- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
76563- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
76564- ti->task->blocked_on = NULL;
76565+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
76566+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
76567+ task->blocked_on = NULL;
76568
76569 list_del_init(&waiter->list);
76570 waiter->task = NULL;
76571diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
76572index 0799fd3..d06ae3b 100644
76573--- a/kernel/mutex-debug.h
76574+++ b/kernel/mutex-debug.h
76575@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
76576 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
76577 extern void debug_mutex_add_waiter(struct mutex *lock,
76578 struct mutex_waiter *waiter,
76579- struct thread_info *ti);
76580+ struct task_struct *task);
76581 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
76582- struct thread_info *ti);
76583+ struct task_struct *task);
76584 extern void debug_mutex_unlock(struct mutex *lock);
76585 extern void debug_mutex_init(struct mutex *lock, const char *name,
76586 struct lock_class_key *key);
76587diff --git a/kernel/mutex.c b/kernel/mutex.c
76588index a307cc9..27fd2e9 100644
76589--- a/kernel/mutex.c
76590+++ b/kernel/mutex.c
76591@@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
76592 spin_lock_mutex(&lock->wait_lock, flags);
76593
76594 debug_mutex_lock_common(lock, &waiter);
76595- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
76596+ debug_mutex_add_waiter(lock, &waiter, task);
76597
76598 /* add waiting tasks to the end of the waitqueue (FIFO): */
76599 list_add_tail(&waiter.list, &lock->wait_list);
76600@@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
76601 * TASK_UNINTERRUPTIBLE case.)
76602 */
76603 if (unlikely(signal_pending_state(state, task))) {
76604- mutex_remove_waiter(lock, &waiter,
76605- task_thread_info(task));
76606+ mutex_remove_waiter(lock, &waiter, task);
76607 mutex_release(&lock->dep_map, 1, ip);
76608 spin_unlock_mutex(&lock->wait_lock, flags);
76609
76610@@ -247,7 +246,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
76611 done:
76612 lock_acquired(&lock->dep_map, ip);
76613 /* got the lock - rejoice! */
76614- mutex_remove_waiter(lock, &waiter, current_thread_info());
76615+ mutex_remove_waiter(lock, &waiter, task);
76616 mutex_set_owner(lock);
76617
76618 /* set it to 0 if there are no waiters left: */
76619diff --git a/kernel/notifier.c b/kernel/notifier.c
76620index 2d5cc4c..d9ea600 100644
76621--- a/kernel/notifier.c
76622+++ b/kernel/notifier.c
76623@@ -5,6 +5,7 @@
76624 #include <linux/rcupdate.h>
76625 #include <linux/vmalloc.h>
76626 #include <linux/reboot.h>
76627+#include <linux/mm.h>
76628
76629 /*
76630 * Notifier list for kernel code which wants to be called
76631@@ -24,10 +25,12 @@ static int notifier_chain_register(struct notifier_block **nl,
76632 while ((*nl) != NULL) {
76633 if (n->priority > (*nl)->priority)
76634 break;
76635- nl = &((*nl)->next);
76636+ nl = (struct notifier_block **)&((*nl)->next);
76637 }
76638- n->next = *nl;
76639+ pax_open_kernel();
76640+ *(const void **)&n->next = *nl;
76641 rcu_assign_pointer(*nl, n);
76642+ pax_close_kernel();
76643 return 0;
76644 }
76645
76646@@ -39,10 +42,12 @@ static int notifier_chain_cond_register(struct notifier_block **nl,
76647 return 0;
76648 if (n->priority > (*nl)->priority)
76649 break;
76650- nl = &((*nl)->next);
76651+ nl = (struct notifier_block **)&((*nl)->next);
76652 }
76653- n->next = *nl;
76654+ pax_open_kernel();
76655+ *(const void **)&n->next = *nl;
76656 rcu_assign_pointer(*nl, n);
76657+ pax_close_kernel();
76658 return 0;
76659 }
76660
76661@@ -51,10 +56,12 @@ static int notifier_chain_unregister(struct notifier_block **nl,
76662 {
76663 while ((*nl) != NULL) {
76664 if ((*nl) == n) {
76665+ pax_open_kernel();
76666 rcu_assign_pointer(*nl, n->next);
76667+ pax_close_kernel();
76668 return 0;
76669 }
76670- nl = &((*nl)->next);
76671+ nl = (struct notifier_block **)&((*nl)->next);
76672 }
76673 return -ENOENT;
76674 }
76675diff --git a/kernel/panic.c b/kernel/panic.c
76676index e1b2822..5edc1d9 100644
76677--- a/kernel/panic.c
76678+++ b/kernel/panic.c
76679@@ -410,7 +410,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
76680 const char *board;
76681
76682 printk(KERN_WARNING "------------[ cut here ]------------\n");
76683- printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
76684+ printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
76685 board = dmi_get_system_info(DMI_PRODUCT_NAME);
76686 if (board)
76687 printk(KERN_WARNING "Hardware name: %s\n", board);
76688@@ -465,7 +465,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
76689 */
76690 void __stack_chk_fail(void)
76691 {
76692- panic("stack-protector: Kernel stack is corrupted in: %p\n",
76693+ dump_stack();
76694+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
76695 __builtin_return_address(0));
76696 }
76697 EXPORT_SYMBOL(__stack_chk_fail);
76698diff --git a/kernel/pid.c b/kernel/pid.c
76699index f2c6a68..4922d97 100644
76700--- a/kernel/pid.c
76701+++ b/kernel/pid.c
76702@@ -33,6 +33,7 @@
76703 #include <linux/rculist.h>
76704 #include <linux/bootmem.h>
76705 #include <linux/hash.h>
76706+#include <linux/security.h>
76707 #include <linux/pid_namespace.h>
76708 #include <linux/init_task.h>
76709 #include <linux/syscalls.h>
76710@@ -46,7 +47,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
76711
76712 int pid_max = PID_MAX_DEFAULT;
76713
76714-#define RESERVED_PIDS 300
76715+#define RESERVED_PIDS 500
76716
76717 int pid_max_min = RESERVED_PIDS + 1;
76718 int pid_max_max = PID_MAX_LIMIT;
76719@@ -441,10 +442,18 @@ EXPORT_SYMBOL(pid_task);
76720 */
76721 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
76722 {
76723+ struct task_struct *task;
76724+
76725 rcu_lockdep_assert(rcu_read_lock_held(),
76726 "find_task_by_pid_ns() needs rcu_read_lock()"
76727 " protection");
76728- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
76729+
76730+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
76731+
76732+ if (gr_pid_is_chrooted(task))
76733+ return NULL;
76734+
76735+ return task;
76736 }
76737
76738 struct task_struct *find_task_by_vpid(pid_t vnr)
76739@@ -452,6 +461,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
76740 return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
76741 }
76742
76743+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
76744+{
76745+ rcu_lockdep_assert(rcu_read_lock_held(),
76746+ "find_task_by_pid_ns() needs rcu_read_lock()"
76747+ " protection");
76748+ return pid_task(find_pid_ns(vnr, task_active_pid_ns(current)), PIDTYPE_PID);
76749+}
76750+
76751 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
76752 {
76753 struct pid *pid;
76754diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
76755index bea15bd..789f3d0 100644
76756--- a/kernel/pid_namespace.c
76757+++ b/kernel/pid_namespace.c
76758@@ -249,7 +249,7 @@ static int pid_ns_ctl_handler(struct ctl_table *table, int write,
76759 void __user *buffer, size_t *lenp, loff_t *ppos)
76760 {
76761 struct pid_namespace *pid_ns = task_active_pid_ns(current);
76762- struct ctl_table tmp = *table;
76763+ ctl_table_no_const tmp = *table;
76764
76765 if (write && !ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN))
76766 return -EPERM;
76767diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
76768index 942ca27..111e609 100644
76769--- a/kernel/posix-cpu-timers.c
76770+++ b/kernel/posix-cpu-timers.c
76771@@ -1576,14 +1576,14 @@ struct k_clock clock_posix_cpu = {
76772
76773 static __init int init_posix_cpu_timers(void)
76774 {
76775- struct k_clock process = {
76776+ static struct k_clock process = {
76777 .clock_getres = process_cpu_clock_getres,
76778 .clock_get = process_cpu_clock_get,
76779 .timer_create = process_cpu_timer_create,
76780 .nsleep = process_cpu_nsleep,
76781 .nsleep_restart = process_cpu_nsleep_restart,
76782 };
76783- struct k_clock thread = {
76784+ static struct k_clock thread = {
76785 .clock_getres = thread_cpu_clock_getres,
76786 .clock_get = thread_cpu_clock_get,
76787 .timer_create = thread_cpu_timer_create,
76788diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
76789index e885be1..380fe76 100644
76790--- a/kernel/posix-timers.c
76791+++ b/kernel/posix-timers.c
76792@@ -43,6 +43,7 @@
76793 #include <linux/idr.h>
76794 #include <linux/posix-clock.h>
76795 #include <linux/posix-timers.h>
76796+#include <linux/grsecurity.h>
76797 #include <linux/syscalls.h>
76798 #include <linux/wait.h>
76799 #include <linux/workqueue.h>
76800@@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
76801 * which we beg off on and pass to do_sys_settimeofday().
76802 */
76803
76804-static struct k_clock posix_clocks[MAX_CLOCKS];
76805+static struct k_clock *posix_clocks[MAX_CLOCKS];
76806
76807 /*
76808 * These ones are defined below.
76809@@ -227,7 +228,7 @@ static int posix_get_boottime(const clockid_t which_clock, struct timespec *tp)
76810 */
76811 static __init int init_posix_timers(void)
76812 {
76813- struct k_clock clock_realtime = {
76814+ static struct k_clock clock_realtime = {
76815 .clock_getres = hrtimer_get_res,
76816 .clock_get = posix_clock_realtime_get,
76817 .clock_set = posix_clock_realtime_set,
76818@@ -239,7 +240,7 @@ static __init int init_posix_timers(void)
76819 .timer_get = common_timer_get,
76820 .timer_del = common_timer_del,
76821 };
76822- struct k_clock clock_monotonic = {
76823+ static struct k_clock clock_monotonic = {
76824 .clock_getres = hrtimer_get_res,
76825 .clock_get = posix_ktime_get_ts,
76826 .nsleep = common_nsleep,
76827@@ -249,19 +250,19 @@ static __init int init_posix_timers(void)
76828 .timer_get = common_timer_get,
76829 .timer_del = common_timer_del,
76830 };
76831- struct k_clock clock_monotonic_raw = {
76832+ static struct k_clock clock_monotonic_raw = {
76833 .clock_getres = hrtimer_get_res,
76834 .clock_get = posix_get_monotonic_raw,
76835 };
76836- struct k_clock clock_realtime_coarse = {
76837+ static struct k_clock clock_realtime_coarse = {
76838 .clock_getres = posix_get_coarse_res,
76839 .clock_get = posix_get_realtime_coarse,
76840 };
76841- struct k_clock clock_monotonic_coarse = {
76842+ static struct k_clock clock_monotonic_coarse = {
76843 .clock_getres = posix_get_coarse_res,
76844 .clock_get = posix_get_monotonic_coarse,
76845 };
76846- struct k_clock clock_boottime = {
76847+ static struct k_clock clock_boottime = {
76848 .clock_getres = hrtimer_get_res,
76849 .clock_get = posix_get_boottime,
76850 .nsleep = common_nsleep,
76851@@ -473,7 +474,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
76852 return;
76853 }
76854
76855- posix_clocks[clock_id] = *new_clock;
76856+ posix_clocks[clock_id] = new_clock;
76857 }
76858 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
76859
76860@@ -519,9 +520,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
76861 return (id & CLOCKFD_MASK) == CLOCKFD ?
76862 &clock_posix_dynamic : &clock_posix_cpu;
76863
76864- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
76865+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
76866 return NULL;
76867- return &posix_clocks[id];
76868+ return posix_clocks[id];
76869 }
76870
76871 static int common_timer_create(struct k_itimer *new_timer)
76872@@ -966,6 +967,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
76873 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
76874 return -EFAULT;
76875
76876+ /* only the CLOCK_REALTIME clock can be set, all other clocks
76877+ have their clock_set fptr set to a nosettime dummy function
76878+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
76879+ call common_clock_set, which calls do_sys_settimeofday, which
76880+ we hook
76881+ */
76882+
76883 return kc->clock_set(which_clock, &new_tp);
76884 }
76885
76886diff --git a/kernel/power/process.c b/kernel/power/process.c
76887index d5a258b..4271191 100644
76888--- a/kernel/power/process.c
76889+++ b/kernel/power/process.c
76890@@ -33,6 +33,7 @@ static int try_to_freeze_tasks(bool user_only)
76891 u64 elapsed_csecs64;
76892 unsigned int elapsed_csecs;
76893 bool wakeup = false;
76894+ bool timedout = false;
76895
76896 do_gettimeofday(&start);
76897
76898@@ -43,13 +44,20 @@ static int try_to_freeze_tasks(bool user_only)
76899
76900 while (true) {
76901 todo = 0;
76902+ if (time_after(jiffies, end_time))
76903+ timedout = true;
76904 read_lock(&tasklist_lock);
76905 do_each_thread(g, p) {
76906 if (p == current || !freeze_task(p))
76907 continue;
76908
76909- if (!freezer_should_skip(p))
76910+ if (!freezer_should_skip(p)) {
76911 todo++;
76912+ if (timedout) {
76913+ printk(KERN_ERR "Task refusing to freeze:\n");
76914+ sched_show_task(p);
76915+ }
76916+ }
76917 } while_each_thread(g, p);
76918 read_unlock(&tasklist_lock);
76919
76920@@ -58,7 +66,7 @@ static int try_to_freeze_tasks(bool user_only)
76921 todo += wq_busy;
76922 }
76923
76924- if (!todo || time_after(jiffies, end_time))
76925+ if (!todo || timedout)
76926 break;
76927
76928 if (pm_wakeup_pending()) {
76929diff --git a/kernel/printk.c b/kernel/printk.c
76930index 267ce78..2487112 100644
76931--- a/kernel/printk.c
76932+++ b/kernel/printk.c
76933@@ -609,11 +609,17 @@ static unsigned int devkmsg_poll(struct file *file, poll_table *wait)
76934 return ret;
76935 }
76936
76937+static int check_syslog_permissions(int type, bool from_file);
76938+
76939 static int devkmsg_open(struct inode *inode, struct file *file)
76940 {
76941 struct devkmsg_user *user;
76942 int err;
76943
76944+ err = check_syslog_permissions(SYSLOG_ACTION_OPEN, SYSLOG_FROM_FILE);
76945+ if (err)
76946+ return err;
76947+
76948 /* write-only does not need any file context */
76949 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
76950 return 0;
76951@@ -822,7 +828,7 @@ static int syslog_action_restricted(int type)
76952 if (dmesg_restrict)
76953 return 1;
76954 /* Unless restricted, we allow "read all" and "get buffer size" for everybody */
76955- return type != SYSLOG_ACTION_READ_ALL && type != SYSLOG_ACTION_SIZE_BUFFER;
76956+ return type != SYSLOG_ACTION_OPEN && type != SYSLOG_ACTION_READ_ALL && type != SYSLOG_ACTION_SIZE_BUFFER;
76957 }
76958
76959 static int check_syslog_permissions(int type, bool from_file)
76960@@ -834,6 +840,11 @@ static int check_syslog_permissions(int type, bool from_file)
76961 if (from_file && type != SYSLOG_ACTION_OPEN)
76962 return 0;
76963
76964+#ifdef CONFIG_GRKERNSEC_DMESG
76965+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
76966+ return -EPERM;
76967+#endif
76968+
76969 if (syslog_action_restricted(type)) {
76970 if (capable(CAP_SYSLOG))
76971 return 0;
76972diff --git a/kernel/profile.c b/kernel/profile.c
76973index 1f39181..86093471 100644
76974--- a/kernel/profile.c
76975+++ b/kernel/profile.c
76976@@ -40,7 +40,7 @@ struct profile_hit {
76977 /* Oprofile timer tick hook */
76978 static int (*timer_hook)(struct pt_regs *) __read_mostly;
76979
76980-static atomic_t *prof_buffer;
76981+static atomic_unchecked_t *prof_buffer;
76982 static unsigned long prof_len, prof_shift;
76983
76984 int prof_on __read_mostly;
76985@@ -282,7 +282,7 @@ static void profile_flip_buffers(void)
76986 hits[i].pc = 0;
76987 continue;
76988 }
76989- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
76990+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
76991 hits[i].hits = hits[i].pc = 0;
76992 }
76993 }
76994@@ -343,9 +343,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
76995 * Add the current hit(s) and flush the write-queue out
76996 * to the global buffer:
76997 */
76998- atomic_add(nr_hits, &prof_buffer[pc]);
76999+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
77000 for (i = 0; i < NR_PROFILE_HIT; ++i) {
77001- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
77002+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
77003 hits[i].pc = hits[i].hits = 0;
77004 }
77005 out:
77006@@ -420,7 +420,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
77007 {
77008 unsigned long pc;
77009 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
77010- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
77011+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
77012 }
77013 #endif /* !CONFIG_SMP */
77014
77015@@ -518,7 +518,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
77016 return -EFAULT;
77017 buf++; p++; count--; read++;
77018 }
77019- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
77020+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
77021 if (copy_to_user(buf, (void *)pnt, count))
77022 return -EFAULT;
77023 read += count;
77024@@ -549,7 +549,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
77025 }
77026 #endif
77027 profile_discard_flip_buffers();
77028- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
77029+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
77030 return count;
77031 }
77032
77033diff --git a/kernel/ptrace.c b/kernel/ptrace.c
77034index 6cbeaae..cfe7ff0 100644
77035--- a/kernel/ptrace.c
77036+++ b/kernel/ptrace.c
77037@@ -324,7 +324,7 @@ static int ptrace_attach(struct task_struct *task, long request,
77038 if (seize)
77039 flags |= PT_SEIZED;
77040 rcu_read_lock();
77041- if (ns_capable(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
77042+ if (ns_capable_nolog(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
77043 flags |= PT_PTRACE_CAP;
77044 rcu_read_unlock();
77045 task->ptrace = flags;
77046@@ -535,7 +535,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
77047 break;
77048 return -EIO;
77049 }
77050- if (copy_to_user(dst, buf, retval))
77051+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
77052 return -EFAULT;
77053 copied += retval;
77054 src += retval;
77055@@ -720,7 +720,7 @@ int ptrace_request(struct task_struct *child, long request,
77056 bool seized = child->ptrace & PT_SEIZED;
77057 int ret = -EIO;
77058 siginfo_t siginfo, *si;
77059- void __user *datavp = (void __user *) data;
77060+ void __user *datavp = (__force void __user *) data;
77061 unsigned long __user *datalp = datavp;
77062 unsigned long flags;
77063
77064@@ -922,14 +922,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
77065 goto out;
77066 }
77067
77068+ if (gr_handle_ptrace(child, request)) {
77069+ ret = -EPERM;
77070+ goto out_put_task_struct;
77071+ }
77072+
77073 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
77074 ret = ptrace_attach(child, request, addr, data);
77075 /*
77076 * Some architectures need to do book-keeping after
77077 * a ptrace attach.
77078 */
77079- if (!ret)
77080+ if (!ret) {
77081 arch_ptrace_attach(child);
77082+ gr_audit_ptrace(child);
77083+ }
77084 goto out_put_task_struct;
77085 }
77086
77087@@ -957,7 +964,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
77088 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
77089 if (copied != sizeof(tmp))
77090 return -EIO;
77091- return put_user(tmp, (unsigned long __user *)data);
77092+ return put_user(tmp, (__force unsigned long __user *)data);
77093 }
77094
77095 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
77096@@ -1051,7 +1058,7 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
77097 }
77098
77099 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
77100- compat_long_t addr, compat_long_t data)
77101+ compat_ulong_t addr, compat_ulong_t data)
77102 {
77103 struct task_struct *child;
77104 long ret;
77105@@ -1067,14 +1074,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
77106 goto out;
77107 }
77108
77109+ if (gr_handle_ptrace(child, request)) {
77110+ ret = -EPERM;
77111+ goto out_put_task_struct;
77112+ }
77113+
77114 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
77115 ret = ptrace_attach(child, request, addr, data);
77116 /*
77117 * Some architectures need to do book-keeping after
77118 * a ptrace attach.
77119 */
77120- if (!ret)
77121+ if (!ret) {
77122 arch_ptrace_attach(child);
77123+ gr_audit_ptrace(child);
77124+ }
77125 goto out_put_task_struct;
77126 }
77127
77128diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
77129index e7dce58..ad0d7b7 100644
77130--- a/kernel/rcutiny.c
77131+++ b/kernel/rcutiny.c
77132@@ -46,7 +46,7 @@
77133 struct rcu_ctrlblk;
77134 static void invoke_rcu_callbacks(void);
77135 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
77136-static void rcu_process_callbacks(struct softirq_action *unused);
77137+static void rcu_process_callbacks(void);
77138 static void __call_rcu(struct rcu_head *head,
77139 void (*func)(struct rcu_head *rcu),
77140 struct rcu_ctrlblk *rcp);
77141@@ -310,7 +310,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
77142 rcu_is_callbacks_kthread()));
77143 }
77144
77145-static void rcu_process_callbacks(struct softirq_action *unused)
77146+static void rcu_process_callbacks(void)
77147 {
77148 __rcu_process_callbacks(&rcu_sched_ctrlblk);
77149 __rcu_process_callbacks(&rcu_bh_ctrlblk);
77150diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h
77151index f85016a..91cb03b 100644
77152--- a/kernel/rcutiny_plugin.h
77153+++ b/kernel/rcutiny_plugin.h
77154@@ -896,7 +896,7 @@ static int rcu_kthread(void *arg)
77155 have_rcu_kthread_work = morework;
77156 local_irq_restore(flags);
77157 if (work)
77158- rcu_process_callbacks(NULL);
77159+ rcu_process_callbacks();
77160 schedule_timeout_interruptible(1); /* Leave CPU for others. */
77161 }
77162
77163diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
77164index 31dea01..ad91ffb 100644
77165--- a/kernel/rcutorture.c
77166+++ b/kernel/rcutorture.c
77167@@ -163,12 +163,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
77168 { 0 };
77169 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
77170 { 0 };
77171-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
77172-static atomic_t n_rcu_torture_alloc;
77173-static atomic_t n_rcu_torture_alloc_fail;
77174-static atomic_t n_rcu_torture_free;
77175-static atomic_t n_rcu_torture_mberror;
77176-static atomic_t n_rcu_torture_error;
77177+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
77178+static atomic_unchecked_t n_rcu_torture_alloc;
77179+static atomic_unchecked_t n_rcu_torture_alloc_fail;
77180+static atomic_unchecked_t n_rcu_torture_free;
77181+static atomic_unchecked_t n_rcu_torture_mberror;
77182+static atomic_unchecked_t n_rcu_torture_error;
77183 static long n_rcu_torture_barrier_error;
77184 static long n_rcu_torture_boost_ktrerror;
77185 static long n_rcu_torture_boost_rterror;
77186@@ -272,11 +272,11 @@ rcu_torture_alloc(void)
77187
77188 spin_lock_bh(&rcu_torture_lock);
77189 if (list_empty(&rcu_torture_freelist)) {
77190- atomic_inc(&n_rcu_torture_alloc_fail);
77191+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
77192 spin_unlock_bh(&rcu_torture_lock);
77193 return NULL;
77194 }
77195- atomic_inc(&n_rcu_torture_alloc);
77196+ atomic_inc_unchecked(&n_rcu_torture_alloc);
77197 p = rcu_torture_freelist.next;
77198 list_del_init(p);
77199 spin_unlock_bh(&rcu_torture_lock);
77200@@ -289,7 +289,7 @@ rcu_torture_alloc(void)
77201 static void
77202 rcu_torture_free(struct rcu_torture *p)
77203 {
77204- atomic_inc(&n_rcu_torture_free);
77205+ atomic_inc_unchecked(&n_rcu_torture_free);
77206 spin_lock_bh(&rcu_torture_lock);
77207 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
77208 spin_unlock_bh(&rcu_torture_lock);
77209@@ -409,7 +409,7 @@ rcu_torture_cb(struct rcu_head *p)
77210 i = rp->rtort_pipe_count;
77211 if (i > RCU_TORTURE_PIPE_LEN)
77212 i = RCU_TORTURE_PIPE_LEN;
77213- atomic_inc(&rcu_torture_wcount[i]);
77214+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
77215 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
77216 rp->rtort_mbtest = 0;
77217 rcu_torture_free(rp);
77218@@ -457,7 +457,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
77219 i = rp->rtort_pipe_count;
77220 if (i > RCU_TORTURE_PIPE_LEN)
77221 i = RCU_TORTURE_PIPE_LEN;
77222- atomic_inc(&rcu_torture_wcount[i]);
77223+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
77224 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
77225 rp->rtort_mbtest = 0;
77226 list_del(&rp->rtort_free);
77227@@ -975,7 +975,7 @@ rcu_torture_writer(void *arg)
77228 i = old_rp->rtort_pipe_count;
77229 if (i > RCU_TORTURE_PIPE_LEN)
77230 i = RCU_TORTURE_PIPE_LEN;
77231- atomic_inc(&rcu_torture_wcount[i]);
77232+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
77233 old_rp->rtort_pipe_count++;
77234 cur_ops->deferred_free(old_rp);
77235 }
77236@@ -1060,7 +1060,7 @@ static void rcu_torture_timer(unsigned long unused)
77237 }
77238 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);
77239 if (p->rtort_mbtest == 0)
77240- atomic_inc(&n_rcu_torture_mberror);
77241+ atomic_inc_unchecked(&n_rcu_torture_mberror);
77242 spin_lock(&rand_lock);
77243 cur_ops->read_delay(&rand);
77244 n_rcu_torture_timers++;
77245@@ -1124,7 +1124,7 @@ rcu_torture_reader(void *arg)
77246 }
77247 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);
77248 if (p->rtort_mbtest == 0)
77249- atomic_inc(&n_rcu_torture_mberror);
77250+ atomic_inc_unchecked(&n_rcu_torture_mberror);
77251 cur_ops->read_delay(&rand);
77252 preempt_disable();
77253 pipe_count = p->rtort_pipe_count;
77254@@ -1183,11 +1183,11 @@ rcu_torture_printk(char *page)
77255 rcu_torture_current,
77256 rcu_torture_current_version,
77257 list_empty(&rcu_torture_freelist),
77258- atomic_read(&n_rcu_torture_alloc),
77259- atomic_read(&n_rcu_torture_alloc_fail),
77260- atomic_read(&n_rcu_torture_free));
77261+ atomic_read_unchecked(&n_rcu_torture_alloc),
77262+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
77263+ atomic_read_unchecked(&n_rcu_torture_free));
77264 cnt += sprintf(&page[cnt], "rtmbe: %d rtbke: %ld rtbre: %ld ",
77265- atomic_read(&n_rcu_torture_mberror),
77266+ atomic_read_unchecked(&n_rcu_torture_mberror),
77267 n_rcu_torture_boost_ktrerror,
77268 n_rcu_torture_boost_rterror);
77269 cnt += sprintf(&page[cnt], "rtbf: %ld rtb: %ld nt: %ld ",
77270@@ -1206,14 +1206,14 @@ rcu_torture_printk(char *page)
77271 n_barrier_attempts,
77272 n_rcu_torture_barrier_error);
77273 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
77274- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
77275+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
77276 n_rcu_torture_barrier_error != 0 ||
77277 n_rcu_torture_boost_ktrerror != 0 ||
77278 n_rcu_torture_boost_rterror != 0 ||
77279 n_rcu_torture_boost_failure != 0 ||
77280 i > 1) {
77281 cnt += sprintf(&page[cnt], "!!! ");
77282- atomic_inc(&n_rcu_torture_error);
77283+ atomic_inc_unchecked(&n_rcu_torture_error);
77284 WARN_ON_ONCE(1);
77285 }
77286 cnt += sprintf(&page[cnt], "Reader Pipe: ");
77287@@ -1227,7 +1227,7 @@ rcu_torture_printk(char *page)
77288 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
77289 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
77290 cnt += sprintf(&page[cnt], " %d",
77291- atomic_read(&rcu_torture_wcount[i]));
77292+ atomic_read_unchecked(&rcu_torture_wcount[i]));
77293 }
77294 cnt += sprintf(&page[cnt], "\n");
77295 if (cur_ops->stats)
77296@@ -1920,7 +1920,7 @@ rcu_torture_cleanup(void)
77297
77298 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
77299
77300- if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
77301+ if (atomic_read_unchecked(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
77302 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
77303 else if (n_online_successes != n_online_attempts ||
77304 n_offline_successes != n_offline_attempts)
77305@@ -1989,18 +1989,18 @@ rcu_torture_init(void)
77306
77307 rcu_torture_current = NULL;
77308 rcu_torture_current_version = 0;
77309- atomic_set(&n_rcu_torture_alloc, 0);
77310- atomic_set(&n_rcu_torture_alloc_fail, 0);
77311- atomic_set(&n_rcu_torture_free, 0);
77312- atomic_set(&n_rcu_torture_mberror, 0);
77313- atomic_set(&n_rcu_torture_error, 0);
77314+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
77315+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
77316+ atomic_set_unchecked(&n_rcu_torture_free, 0);
77317+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
77318+ atomic_set_unchecked(&n_rcu_torture_error, 0);
77319 n_rcu_torture_barrier_error = 0;
77320 n_rcu_torture_boost_ktrerror = 0;
77321 n_rcu_torture_boost_rterror = 0;
77322 n_rcu_torture_boost_failure = 0;
77323 n_rcu_torture_boosts = 0;
77324 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
77325- atomic_set(&rcu_torture_wcount[i], 0);
77326+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
77327 for_each_possible_cpu(cpu) {
77328 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
77329 per_cpu(rcu_torture_count, cpu)[i] = 0;
77330diff --git a/kernel/rcutree.c b/kernel/rcutree.c
77331index e441b77..dd54f17 100644
77332--- a/kernel/rcutree.c
77333+++ b/kernel/rcutree.c
77334@@ -349,9 +349,9 @@ static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
77335 rcu_prepare_for_idle(smp_processor_id());
77336 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
77337 smp_mb__before_atomic_inc(); /* See above. */
77338- atomic_inc(&rdtp->dynticks);
77339+ atomic_inc_unchecked(&rdtp->dynticks);
77340 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
77341- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
77342+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
77343
77344 /*
77345 * It is illegal to enter an extended quiescent state while
77346@@ -487,10 +487,10 @@ static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval,
77347 int user)
77348 {
77349 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
77350- atomic_inc(&rdtp->dynticks);
77351+ atomic_inc_unchecked(&rdtp->dynticks);
77352 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
77353 smp_mb__after_atomic_inc(); /* See above. */
77354- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
77355+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
77356 rcu_cleanup_after_idle(smp_processor_id());
77357 trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting);
77358 if (!user && !is_idle_task(current)) {
77359@@ -629,14 +629,14 @@ void rcu_nmi_enter(void)
77360 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
77361
77362 if (rdtp->dynticks_nmi_nesting == 0 &&
77363- (atomic_read(&rdtp->dynticks) & 0x1))
77364+ (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
77365 return;
77366 rdtp->dynticks_nmi_nesting++;
77367 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
77368- atomic_inc(&rdtp->dynticks);
77369+ atomic_inc_unchecked(&rdtp->dynticks);
77370 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
77371 smp_mb__after_atomic_inc(); /* See above. */
77372- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
77373+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
77374 }
77375
77376 /**
77377@@ -655,9 +655,9 @@ void rcu_nmi_exit(void)
77378 return;
77379 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
77380 smp_mb__before_atomic_inc(); /* See above. */
77381- atomic_inc(&rdtp->dynticks);
77382+ atomic_inc_unchecked(&rdtp->dynticks);
77383 smp_mb__after_atomic_inc(); /* Force delay to next write. */
77384- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
77385+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
77386 }
77387
77388 /**
77389@@ -671,7 +671,7 @@ int rcu_is_cpu_idle(void)
77390 int ret;
77391
77392 preempt_disable();
77393- ret = (atomic_read(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
77394+ ret = (atomic_read_unchecked(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
77395 preempt_enable();
77396 return ret;
77397 }
77398@@ -739,7 +739,7 @@ int rcu_is_cpu_rrupt_from_idle(void)
77399 */
77400 static int dyntick_save_progress_counter(struct rcu_data *rdp)
77401 {
77402- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
77403+ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
77404 return (rdp->dynticks_snap & 0x1) == 0;
77405 }
77406
77407@@ -754,7 +754,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
77408 unsigned int curr;
77409 unsigned int snap;
77410
77411- curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
77412+ curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
77413 snap = (unsigned int)rdp->dynticks_snap;
77414
77415 /*
77416@@ -802,10 +802,10 @@ static int jiffies_till_stall_check(void)
77417 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
77418 */
77419 if (till_stall_check < 3) {
77420- ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
77421+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 3;
77422 till_stall_check = 3;
77423 } else if (till_stall_check > 300) {
77424- ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
77425+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 300;
77426 till_stall_check = 300;
77427 }
77428 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
77429@@ -1592,7 +1592,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
77430 rsp->qlen += rdp->qlen;
77431 rdp->n_cbs_orphaned += rdp->qlen;
77432 rdp->qlen_lazy = 0;
77433- ACCESS_ONCE(rdp->qlen) = 0;
77434+ ACCESS_ONCE_RW(rdp->qlen) = 0;
77435 }
77436
77437 /*
77438@@ -1838,7 +1838,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
77439 }
77440 smp_mb(); /* List handling before counting for rcu_barrier(). */
77441 rdp->qlen_lazy -= count_lazy;
77442- ACCESS_ONCE(rdp->qlen) -= count;
77443+ ACCESS_ONCE_RW(rdp->qlen) -= count;
77444 rdp->n_cbs_invoked += count;
77445
77446 /* Reinstate batch limit if we have worked down the excess. */
77447@@ -2031,7 +2031,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
77448 /*
77449 * Do RCU core processing for the current CPU.
77450 */
77451-static void rcu_process_callbacks(struct softirq_action *unused)
77452+static void rcu_process_callbacks(void)
77453 {
77454 struct rcu_state *rsp;
77455
77456@@ -2154,7 +2154,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
77457 local_irq_restore(flags);
77458 return;
77459 }
77460- ACCESS_ONCE(rdp->qlen)++;
77461+ ACCESS_ONCE_RW(rdp->qlen)++;
77462 if (lazy)
77463 rdp->qlen_lazy++;
77464 else
77465@@ -2363,11 +2363,11 @@ void synchronize_sched_expedited(void)
77466 * counter wrap on a 32-bit system. Quite a few more CPUs would of
77467 * course be required on a 64-bit system.
77468 */
77469- if (ULONG_CMP_GE((ulong)atomic_long_read(&rsp->expedited_start),
77470+ if (ULONG_CMP_GE((ulong)atomic_long_read_unchecked(&rsp->expedited_start),
77471 (ulong)atomic_long_read(&rsp->expedited_done) +
77472 ULONG_MAX / 8)) {
77473 synchronize_sched();
77474- atomic_long_inc(&rsp->expedited_wrap);
77475+ atomic_long_inc_unchecked(&rsp->expedited_wrap);
77476 return;
77477 }
77478
77479@@ -2375,7 +2375,7 @@ void synchronize_sched_expedited(void)
77480 * Take a ticket. Note that atomic_inc_return() implies a
77481 * full memory barrier.
77482 */
77483- snap = atomic_long_inc_return(&rsp->expedited_start);
77484+ snap = atomic_long_inc_return_unchecked(&rsp->expedited_start);
77485 firstsnap = snap;
77486 get_online_cpus();
77487 WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
77488@@ -2388,14 +2388,14 @@ void synchronize_sched_expedited(void)
77489 synchronize_sched_expedited_cpu_stop,
77490 NULL) == -EAGAIN) {
77491 put_online_cpus();
77492- atomic_long_inc(&rsp->expedited_tryfail);
77493+ atomic_long_inc_unchecked(&rsp->expedited_tryfail);
77494
77495 /* Check to see if someone else did our work for us. */
77496 s = atomic_long_read(&rsp->expedited_done);
77497 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
77498 /* ensure test happens before caller kfree */
77499 smp_mb__before_atomic_inc(); /* ^^^ */
77500- atomic_long_inc(&rsp->expedited_workdone1);
77501+ atomic_long_inc_unchecked(&rsp->expedited_workdone1);
77502 return;
77503 }
77504
77505@@ -2404,7 +2404,7 @@ void synchronize_sched_expedited(void)
77506 udelay(trycount * num_online_cpus());
77507 } else {
77508 wait_rcu_gp(call_rcu_sched);
77509- atomic_long_inc(&rsp->expedited_normal);
77510+ atomic_long_inc_unchecked(&rsp->expedited_normal);
77511 return;
77512 }
77513
77514@@ -2413,7 +2413,7 @@ void synchronize_sched_expedited(void)
77515 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
77516 /* ensure test happens before caller kfree */
77517 smp_mb__before_atomic_inc(); /* ^^^ */
77518- atomic_long_inc(&rsp->expedited_workdone2);
77519+ atomic_long_inc_unchecked(&rsp->expedited_workdone2);
77520 return;
77521 }
77522
77523@@ -2425,10 +2425,10 @@ void synchronize_sched_expedited(void)
77524 * period works for us.
77525 */
77526 get_online_cpus();
77527- snap = atomic_long_read(&rsp->expedited_start);
77528+ snap = atomic_long_read_unchecked(&rsp->expedited_start);
77529 smp_mb(); /* ensure read is before try_stop_cpus(). */
77530 }
77531- atomic_long_inc(&rsp->expedited_stoppedcpus);
77532+ atomic_long_inc_unchecked(&rsp->expedited_stoppedcpus);
77533
77534 /*
77535 * Everyone up to our most recent fetch is covered by our grace
77536@@ -2437,16 +2437,16 @@ void synchronize_sched_expedited(void)
77537 * than we did already did their update.
77538 */
77539 do {
77540- atomic_long_inc(&rsp->expedited_done_tries);
77541+ atomic_long_inc_unchecked(&rsp->expedited_done_tries);
77542 s = atomic_long_read(&rsp->expedited_done);
77543 if (ULONG_CMP_GE((ulong)s, (ulong)snap)) {
77544 /* ensure test happens before caller kfree */
77545 smp_mb__before_atomic_inc(); /* ^^^ */
77546- atomic_long_inc(&rsp->expedited_done_lost);
77547+ atomic_long_inc_unchecked(&rsp->expedited_done_lost);
77548 break;
77549 }
77550 } while (atomic_long_cmpxchg(&rsp->expedited_done, s, snap) != s);
77551- atomic_long_inc(&rsp->expedited_done_exit);
77552+ atomic_long_inc_unchecked(&rsp->expedited_done_exit);
77553
77554 put_online_cpus();
77555 }
77556@@ -2620,7 +2620,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
77557 * ACCESS_ONCE() to prevent the compiler from speculating
77558 * the increment to precede the early-exit check.
77559 */
77560- ACCESS_ONCE(rsp->n_barrier_done)++;
77561+ ACCESS_ONCE_RW(rsp->n_barrier_done)++;
77562 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
77563 _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
77564 smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
77565@@ -2670,7 +2670,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
77566
77567 /* Increment ->n_barrier_done to prevent duplicate work. */
77568 smp_mb(); /* Keep increment after above mechanism. */
77569- ACCESS_ONCE(rsp->n_barrier_done)++;
77570+ ACCESS_ONCE_RW(rsp->n_barrier_done)++;
77571 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
77572 _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
77573 smp_mb(); /* Keep increment before caller's subsequent code. */
77574@@ -2715,10 +2715,10 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
77575 rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
77576 init_callback_list(rdp);
77577 rdp->qlen_lazy = 0;
77578- ACCESS_ONCE(rdp->qlen) = 0;
77579+ ACCESS_ONCE_RW(rdp->qlen) = 0;
77580 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
77581 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
77582- WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
77583+ WARN_ON_ONCE(atomic_read_unchecked(&rdp->dynticks->dynticks) != 1);
77584 #ifdef CONFIG_RCU_USER_QS
77585 WARN_ON_ONCE(rdp->dynticks->in_user);
77586 #endif
77587@@ -2754,8 +2754,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
77588 rdp->blimit = blimit;
77589 init_callback_list(rdp); /* Re-enable callbacks on this CPU. */
77590 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
77591- atomic_set(&rdp->dynticks->dynticks,
77592- (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
77593+ atomic_set_unchecked(&rdp->dynticks->dynticks,
77594+ (atomic_read_unchecked(&rdp->dynticks->dynticks) & ~0x1) + 1);
77595 rcu_prepare_for_idle_init(cpu);
77596 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
77597
77598diff --git a/kernel/rcutree.h b/kernel/rcutree.h
77599index 4b69291..704c92e 100644
77600--- a/kernel/rcutree.h
77601+++ b/kernel/rcutree.h
77602@@ -86,7 +86,7 @@ struct rcu_dynticks {
77603 long long dynticks_nesting; /* Track irq/process nesting level. */
77604 /* Process level is worth LLONG_MAX/2. */
77605 int dynticks_nmi_nesting; /* Track NMI nesting level. */
77606- atomic_t dynticks; /* Even value for idle, else odd. */
77607+ atomic_unchecked_t dynticks;/* Even value for idle, else odd. */
77608 #ifdef CONFIG_RCU_FAST_NO_HZ
77609 int dyntick_drain; /* Prepare-for-idle state variable. */
77610 unsigned long dyntick_holdoff;
77611@@ -423,17 +423,17 @@ struct rcu_state {
77612 /* _rcu_barrier(). */
77613 /* End of fields guarded by barrier_mutex. */
77614
77615- atomic_long_t expedited_start; /* Starting ticket. */
77616- atomic_long_t expedited_done; /* Done ticket. */
77617- atomic_long_t expedited_wrap; /* # near-wrap incidents. */
77618- atomic_long_t expedited_tryfail; /* # acquisition failures. */
77619- atomic_long_t expedited_workdone1; /* # done by others #1. */
77620- atomic_long_t expedited_workdone2; /* # done by others #2. */
77621- atomic_long_t expedited_normal; /* # fallbacks to normal. */
77622- atomic_long_t expedited_stoppedcpus; /* # successful stop_cpus. */
77623- atomic_long_t expedited_done_tries; /* # tries to update _done. */
77624- atomic_long_t expedited_done_lost; /* # times beaten to _done. */
77625- atomic_long_t expedited_done_exit; /* # times exited _done loop. */
77626+ atomic_long_unchecked_t expedited_start; /* Starting ticket. */
77627+ atomic_long_t expedited_done; /* Done ticket. */
77628+ atomic_long_unchecked_t expedited_wrap; /* # near-wrap incidents. */
77629+ atomic_long_unchecked_t expedited_tryfail; /* # acquisition failures. */
77630+ atomic_long_unchecked_t expedited_workdone1; /* # done by others #1. */
77631+ atomic_long_unchecked_t expedited_workdone2; /* # done by others #2. */
77632+ atomic_long_unchecked_t expedited_normal; /* # fallbacks to normal. */
77633+ atomic_long_unchecked_t expedited_stoppedcpus; /* # successful stop_cpus. */
77634+ atomic_long_unchecked_t expedited_done_tries; /* # tries to update _done. */
77635+ atomic_long_unchecked_t expedited_done_lost; /* # times beaten to _done. */
77636+ atomic_long_unchecked_t expedited_done_exit; /* # times exited _done loop. */
77637
77638 unsigned long jiffies_force_qs; /* Time at which to invoke */
77639 /* force_quiescent_state(). */
77640diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
77641index c1cc7e1..f62e436 100644
77642--- a/kernel/rcutree_plugin.h
77643+++ b/kernel/rcutree_plugin.h
77644@@ -892,7 +892,7 @@ void synchronize_rcu_expedited(void)
77645
77646 /* Clean up and exit. */
77647 smp_mb(); /* ensure expedited GP seen before counter increment. */
77648- ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
77649+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
77650 unlock_mb_ret:
77651 mutex_unlock(&sync_rcu_preempt_exp_mutex);
77652 mb_ret:
77653@@ -1440,7 +1440,7 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
77654 free_cpumask_var(cm);
77655 }
77656
77657-static struct smp_hotplug_thread rcu_cpu_thread_spec = {
77658+static struct smp_hotplug_thread rcu_cpu_thread_spec __read_only = {
77659 .store = &rcu_cpu_kthread_task,
77660 .thread_should_run = rcu_cpu_kthread_should_run,
77661 .thread_fn = rcu_cpu_kthread,
77662@@ -2072,7 +2072,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
77663 print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
77664 printk(KERN_ERR "\t%d: (%lu %s) idle=%03x/%llx/%d %s\n",
77665 cpu, ticks_value, ticks_title,
77666- atomic_read(&rdtp->dynticks) & 0xfff,
77667+ atomic_read_unchecked(&rdtp->dynticks) & 0xfff,
77668 rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
77669 fast_no_hz);
77670 }
77671@@ -2192,7 +2192,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
77672
77673 /* Enqueue the callback on the nocb list and update counts. */
77674 old_rhpp = xchg(&rdp->nocb_tail, rhtp);
77675- ACCESS_ONCE(*old_rhpp) = rhp;
77676+ ACCESS_ONCE_RW(*old_rhpp) = rhp;
77677 atomic_long_add(rhcount, &rdp->nocb_q_count);
77678 atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
77679
77680@@ -2384,12 +2384,12 @@ static int rcu_nocb_kthread(void *arg)
77681 * Extract queued callbacks, update counts, and wait
77682 * for a grace period to elapse.
77683 */
77684- ACCESS_ONCE(rdp->nocb_head) = NULL;
77685+ ACCESS_ONCE_RW(rdp->nocb_head) = NULL;
77686 tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
77687 c = atomic_long_xchg(&rdp->nocb_q_count, 0);
77688 cl = atomic_long_xchg(&rdp->nocb_q_count_lazy, 0);
77689- ACCESS_ONCE(rdp->nocb_p_count) += c;
77690- ACCESS_ONCE(rdp->nocb_p_count_lazy) += cl;
77691+ ACCESS_ONCE_RW(rdp->nocb_p_count) += c;
77692+ ACCESS_ONCE_RW(rdp->nocb_p_count_lazy) += cl;
77693 wait_rcu_gp(rdp->rsp->call_remote);
77694
77695 /* Each pass through the following loop invokes a callback. */
77696@@ -2411,8 +2411,8 @@ static int rcu_nocb_kthread(void *arg)
77697 list = next;
77698 }
77699 trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
77700- ACCESS_ONCE(rdp->nocb_p_count) -= c;
77701- ACCESS_ONCE(rdp->nocb_p_count_lazy) -= cl;
77702+ ACCESS_ONCE_RW(rdp->nocb_p_count) -= c;
77703+ ACCESS_ONCE_RW(rdp->nocb_p_count_lazy) -= cl;
77704 rdp->n_nocbs_invoked += c;
77705 }
77706 return 0;
77707@@ -2438,7 +2438,7 @@ static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp)
77708 rdp = per_cpu_ptr(rsp->rda, cpu);
77709 t = kthread_run(rcu_nocb_kthread, rdp, "rcuo%d", cpu);
77710 BUG_ON(IS_ERR(t));
77711- ACCESS_ONCE(rdp->nocb_kthread) = t;
77712+ ACCESS_ONCE_RW(rdp->nocb_kthread) = t;
77713 }
77714 }
77715
77716diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
77717index 0d095dc..1985b19 100644
77718--- a/kernel/rcutree_trace.c
77719+++ b/kernel/rcutree_trace.c
77720@@ -123,7 +123,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
77721 ulong2long(rdp->completed), ulong2long(rdp->gpnum),
77722 rdp->passed_quiesce, rdp->qs_pending);
77723 seq_printf(m, " dt=%d/%llx/%d df=%lu",
77724- atomic_read(&rdp->dynticks->dynticks),
77725+ atomic_read_unchecked(&rdp->dynticks->dynticks),
77726 rdp->dynticks->dynticks_nesting,
77727 rdp->dynticks->dynticks_nmi_nesting,
77728 rdp->dynticks_fqs);
77729@@ -184,17 +184,17 @@ static int show_rcuexp(struct seq_file *m, void *v)
77730 struct rcu_state *rsp = (struct rcu_state *)m->private;
77731
77732 seq_printf(m, "s=%lu d=%lu w=%lu tf=%lu wd1=%lu wd2=%lu n=%lu sc=%lu dt=%lu dl=%lu dx=%lu\n",
77733- atomic_long_read(&rsp->expedited_start),
77734+ atomic_long_read_unchecked(&rsp->expedited_start),
77735 atomic_long_read(&rsp->expedited_done),
77736- atomic_long_read(&rsp->expedited_wrap),
77737- atomic_long_read(&rsp->expedited_tryfail),
77738- atomic_long_read(&rsp->expedited_workdone1),
77739- atomic_long_read(&rsp->expedited_workdone2),
77740- atomic_long_read(&rsp->expedited_normal),
77741- atomic_long_read(&rsp->expedited_stoppedcpus),
77742- atomic_long_read(&rsp->expedited_done_tries),
77743- atomic_long_read(&rsp->expedited_done_lost),
77744- atomic_long_read(&rsp->expedited_done_exit));
77745+ atomic_long_read_unchecked(&rsp->expedited_wrap),
77746+ atomic_long_read_unchecked(&rsp->expedited_tryfail),
77747+ atomic_long_read_unchecked(&rsp->expedited_workdone1),
77748+ atomic_long_read_unchecked(&rsp->expedited_workdone2),
77749+ atomic_long_read_unchecked(&rsp->expedited_normal),
77750+ atomic_long_read_unchecked(&rsp->expedited_stoppedcpus),
77751+ atomic_long_read_unchecked(&rsp->expedited_done_tries),
77752+ atomic_long_read_unchecked(&rsp->expedited_done_lost),
77753+ atomic_long_read_unchecked(&rsp->expedited_done_exit));
77754 return 0;
77755 }
77756
77757diff --git a/kernel/resource.c b/kernel/resource.c
77758index 73f35d4..4684fc4 100644
77759--- a/kernel/resource.c
77760+++ b/kernel/resource.c
77761@@ -143,8 +143,18 @@ static const struct file_operations proc_iomem_operations = {
77762
77763 static int __init ioresources_init(void)
77764 {
77765+#ifdef CONFIG_GRKERNSEC_PROC_ADD
77766+#ifdef CONFIG_GRKERNSEC_PROC_USER
77767+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
77768+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
77769+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
77770+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
77771+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
77772+#endif
77773+#else
77774 proc_create("ioports", 0, NULL, &proc_ioports_operations);
77775 proc_create("iomem", 0, NULL, &proc_iomem_operations);
77776+#endif
77777 return 0;
77778 }
77779 __initcall(ioresources_init);
77780diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
77781index 98ec494..4241d6d 100644
77782--- a/kernel/rtmutex-tester.c
77783+++ b/kernel/rtmutex-tester.c
77784@@ -20,7 +20,7 @@
77785 #define MAX_RT_TEST_MUTEXES 8
77786
77787 static spinlock_t rttest_lock;
77788-static atomic_t rttest_event;
77789+static atomic_unchecked_t rttest_event;
77790
77791 struct test_thread_data {
77792 int opcode;
77793@@ -61,7 +61,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
77794
77795 case RTTEST_LOCKCONT:
77796 td->mutexes[td->opdata] = 1;
77797- td->event = atomic_add_return(1, &rttest_event);
77798+ td->event = atomic_add_return_unchecked(1, &rttest_event);
77799 return 0;
77800
77801 case RTTEST_RESET:
77802@@ -74,7 +74,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
77803 return 0;
77804
77805 case RTTEST_RESETEVENT:
77806- atomic_set(&rttest_event, 0);
77807+ atomic_set_unchecked(&rttest_event, 0);
77808 return 0;
77809
77810 default:
77811@@ -91,9 +91,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
77812 return ret;
77813
77814 td->mutexes[id] = 1;
77815- td->event = atomic_add_return(1, &rttest_event);
77816+ td->event = atomic_add_return_unchecked(1, &rttest_event);
77817 rt_mutex_lock(&mutexes[id]);
77818- td->event = atomic_add_return(1, &rttest_event);
77819+ td->event = atomic_add_return_unchecked(1, &rttest_event);
77820 td->mutexes[id] = 4;
77821 return 0;
77822
77823@@ -104,9 +104,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
77824 return ret;
77825
77826 td->mutexes[id] = 1;
77827- td->event = atomic_add_return(1, &rttest_event);
77828+ td->event = atomic_add_return_unchecked(1, &rttest_event);
77829 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
77830- td->event = atomic_add_return(1, &rttest_event);
77831+ td->event = atomic_add_return_unchecked(1, &rttest_event);
77832 td->mutexes[id] = ret ? 0 : 4;
77833 return ret ? -EINTR : 0;
77834
77835@@ -115,9 +115,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
77836 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
77837 return ret;
77838
77839- td->event = atomic_add_return(1, &rttest_event);
77840+ td->event = atomic_add_return_unchecked(1, &rttest_event);
77841 rt_mutex_unlock(&mutexes[id]);
77842- td->event = atomic_add_return(1, &rttest_event);
77843+ td->event = atomic_add_return_unchecked(1, &rttest_event);
77844 td->mutexes[id] = 0;
77845 return 0;
77846
77847@@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
77848 break;
77849
77850 td->mutexes[dat] = 2;
77851- td->event = atomic_add_return(1, &rttest_event);
77852+ td->event = atomic_add_return_unchecked(1, &rttest_event);
77853 break;
77854
77855 default:
77856@@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
77857 return;
77858
77859 td->mutexes[dat] = 3;
77860- td->event = atomic_add_return(1, &rttest_event);
77861+ td->event = atomic_add_return_unchecked(1, &rttest_event);
77862 break;
77863
77864 case RTTEST_LOCKNOWAIT:
77865@@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
77866 return;
77867
77868 td->mutexes[dat] = 1;
77869- td->event = atomic_add_return(1, &rttest_event);
77870+ td->event = atomic_add_return_unchecked(1, &rttest_event);
77871 return;
77872
77873 default:
77874diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
77875index 0984a21..939f183 100644
77876--- a/kernel/sched/auto_group.c
77877+++ b/kernel/sched/auto_group.c
77878@@ -11,7 +11,7 @@
77879
77880 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
77881 static struct autogroup autogroup_default;
77882-static atomic_t autogroup_seq_nr;
77883+static atomic_unchecked_t autogroup_seq_nr;
77884
77885 void __init autogroup_init(struct task_struct *init_task)
77886 {
77887@@ -78,7 +78,7 @@ static inline struct autogroup *autogroup_create(void)
77888
77889 kref_init(&ag->kref);
77890 init_rwsem(&ag->lock);
77891- ag->id = atomic_inc_return(&autogroup_seq_nr);
77892+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
77893 ag->tg = tg;
77894 #ifdef CONFIG_RT_GROUP_SCHED
77895 /*
77896diff --git a/kernel/sched/core.c b/kernel/sched/core.c
77897index 26058d0..e315889 100644
77898--- a/kernel/sched/core.c
77899+++ b/kernel/sched/core.c
77900@@ -3367,7 +3367,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible);
77901 * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
77902 * positive (at least 1, or number of jiffies left till timeout) if completed.
77903 */
77904-long __sched
77905+long __sched __intentional_overflow(-1)
77906 wait_for_completion_interruptible_timeout(struct completion *x,
77907 unsigned long timeout)
77908 {
77909@@ -3384,7 +3384,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
77910 *
77911 * The return value is -ERESTARTSYS if interrupted, 0 if completed.
77912 */
77913-int __sched wait_for_completion_killable(struct completion *x)
77914+int __sched __intentional_overflow(-1) wait_for_completion_killable(struct completion *x)
77915 {
77916 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
77917 if (t == -ERESTARTSYS)
77918@@ -3405,7 +3405,7 @@ EXPORT_SYMBOL(wait_for_completion_killable);
77919 * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
77920 * positive (at least 1, or number of jiffies left till timeout) if completed.
77921 */
77922-long __sched
77923+long __sched __intentional_overflow(-1)
77924 wait_for_completion_killable_timeout(struct completion *x,
77925 unsigned long timeout)
77926 {
77927@@ -3631,6 +3631,8 @@ int can_nice(const struct task_struct *p, const int nice)
77928 /* convert nice value [19,-20] to rlimit style value [1,40] */
77929 int nice_rlim = 20 - nice;
77930
77931+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
77932+
77933 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
77934 capable(CAP_SYS_NICE));
77935 }
77936@@ -3664,7 +3666,8 @@ SYSCALL_DEFINE1(nice, int, increment)
77937 if (nice > 19)
77938 nice = 19;
77939
77940- if (increment < 0 && !can_nice(current, nice))
77941+ if (increment < 0 && (!can_nice(current, nice) ||
77942+ gr_handle_chroot_nice()))
77943 return -EPERM;
77944
77945 retval = security_task_setnice(current, nice);
77946@@ -3818,6 +3821,7 @@ recheck:
77947 unsigned long rlim_rtprio =
77948 task_rlimit(p, RLIMIT_RTPRIO);
77949
77950+ gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
77951 /* can't set/change the rt policy */
77952 if (policy != p->policy && !rlim_rtprio)
77953 return -EPERM;
77954@@ -4901,7 +4905,7 @@ static void migrate_tasks(unsigned int dead_cpu)
77955
77956 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
77957
77958-static struct ctl_table sd_ctl_dir[] = {
77959+static ctl_table_no_const sd_ctl_dir[] __read_only = {
77960 {
77961 .procname = "sched_domain",
77962 .mode = 0555,
77963@@ -4918,17 +4922,17 @@ static struct ctl_table sd_ctl_root[] = {
77964 {}
77965 };
77966
77967-static struct ctl_table *sd_alloc_ctl_entry(int n)
77968+static ctl_table_no_const *sd_alloc_ctl_entry(int n)
77969 {
77970- struct ctl_table *entry =
77971+ ctl_table_no_const *entry =
77972 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
77973
77974 return entry;
77975 }
77976
77977-static void sd_free_ctl_entry(struct ctl_table **tablep)
77978+static void sd_free_ctl_entry(ctl_table_no_const *tablep)
77979 {
77980- struct ctl_table *entry;
77981+ ctl_table_no_const *entry;
77982
77983 /*
77984 * In the intermediate directories, both the child directory and
77985@@ -4936,22 +4940,25 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
77986 * will always be set. In the lowest directory the names are
77987 * static strings and all have proc handlers.
77988 */
77989- for (entry = *tablep; entry->mode; entry++) {
77990- if (entry->child)
77991- sd_free_ctl_entry(&entry->child);
77992+ for (entry = tablep; entry->mode; entry++) {
77993+ if (entry->child) {
77994+ sd_free_ctl_entry(entry->child);
77995+ pax_open_kernel();
77996+ entry->child = NULL;
77997+ pax_close_kernel();
77998+ }
77999 if (entry->proc_handler == NULL)
78000 kfree(entry->procname);
78001 }
78002
78003- kfree(*tablep);
78004- *tablep = NULL;
78005+ kfree(tablep);
78006 }
78007
78008 static int min_load_idx = 0;
78009 static int max_load_idx = CPU_LOAD_IDX_MAX;
78010
78011 static void
78012-set_table_entry(struct ctl_table *entry,
78013+set_table_entry(ctl_table_no_const *entry,
78014 const char *procname, void *data, int maxlen,
78015 umode_t mode, proc_handler *proc_handler,
78016 bool load_idx)
78017@@ -4971,7 +4978,7 @@ set_table_entry(struct ctl_table *entry,
78018 static struct ctl_table *
78019 sd_alloc_ctl_domain_table(struct sched_domain *sd)
78020 {
78021- struct ctl_table *table = sd_alloc_ctl_entry(13);
78022+ ctl_table_no_const *table = sd_alloc_ctl_entry(13);
78023
78024 if (table == NULL)
78025 return NULL;
78026@@ -5006,9 +5013,9 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
78027 return table;
78028 }
78029
78030-static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
78031+static ctl_table_no_const *sd_alloc_ctl_cpu_table(int cpu)
78032 {
78033- struct ctl_table *entry, *table;
78034+ ctl_table_no_const *entry, *table;
78035 struct sched_domain *sd;
78036 int domain_num = 0, i;
78037 char buf[32];
78038@@ -5035,11 +5042,13 @@ static struct ctl_table_header *sd_sysctl_header;
78039 static void register_sched_domain_sysctl(void)
78040 {
78041 int i, cpu_num = num_possible_cpus();
78042- struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
78043+ ctl_table_no_const *entry = sd_alloc_ctl_entry(cpu_num + 1);
78044 char buf[32];
78045
78046 WARN_ON(sd_ctl_dir[0].child);
78047+ pax_open_kernel();
78048 sd_ctl_dir[0].child = entry;
78049+ pax_close_kernel();
78050
78051 if (entry == NULL)
78052 return;
78053@@ -5062,8 +5071,12 @@ static void unregister_sched_domain_sysctl(void)
78054 if (sd_sysctl_header)
78055 unregister_sysctl_table(sd_sysctl_header);
78056 sd_sysctl_header = NULL;
78057- if (sd_ctl_dir[0].child)
78058- sd_free_ctl_entry(&sd_ctl_dir[0].child);
78059+ if (sd_ctl_dir[0].child) {
78060+ sd_free_ctl_entry(sd_ctl_dir[0].child);
78061+ pax_open_kernel();
78062+ sd_ctl_dir[0].child = NULL;
78063+ pax_close_kernel();
78064+ }
78065 }
78066 #else
78067 static void register_sched_domain_sysctl(void)
78068@@ -5162,7 +5175,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
78069 * happens before everything else. This has to be lower priority than
78070 * the notifier in the perf_event subsystem, though.
78071 */
78072-static struct notifier_block __cpuinitdata migration_notifier = {
78073+static struct notifier_block migration_notifier = {
78074 .notifier_call = migration_call,
78075 .priority = CPU_PRI_MIGRATION,
78076 };
78077diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
78078index 81fa536..6ccf96a 100644
78079--- a/kernel/sched/fair.c
78080+++ b/kernel/sched/fair.c
78081@@ -830,7 +830,7 @@ void task_numa_fault(int node, int pages, bool migrated)
78082
78083 static void reset_ptenuma_scan(struct task_struct *p)
78084 {
78085- ACCESS_ONCE(p->mm->numa_scan_seq)++;
78086+ ACCESS_ONCE_RW(p->mm->numa_scan_seq)++;
78087 p->mm->numa_scan_offset = 0;
78088 }
78089
78090@@ -3254,25 +3254,18 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
78091 */
78092 static int select_idle_sibling(struct task_struct *p, int target)
78093 {
78094- int cpu = smp_processor_id();
78095- int prev_cpu = task_cpu(p);
78096 struct sched_domain *sd;
78097 struct sched_group *sg;
78098- int i;
78099+ int i = task_cpu(p);
78100
78101- /*
78102- * If the task is going to be woken-up on this cpu and if it is
78103- * already idle, then it is the right target.
78104- */
78105- if (target == cpu && idle_cpu(cpu))
78106- return cpu;
78107+ if (idle_cpu(target))
78108+ return target;
78109
78110 /*
78111- * If the task is going to be woken-up on the cpu where it previously
78112- * ran and if it is currently idle, then it the right target.
78113+ * If the prevous cpu is cache affine and idle, don't be stupid.
78114 */
78115- if (target == prev_cpu && idle_cpu(prev_cpu))
78116- return prev_cpu;
78117+ if (i != target && cpus_share_cache(i, target) && idle_cpu(i))
78118+ return i;
78119
78120 /*
78121 * Otherwise, iterate the domains and find an elegible idle cpu.
78122@@ -3286,7 +3279,7 @@ static int select_idle_sibling(struct task_struct *p, int target)
78123 goto next;
78124
78125 for_each_cpu(i, sched_group_cpus(sg)) {
78126- if (!idle_cpu(i))
78127+ if (i == target || !idle_cpu(i))
78128 goto next;
78129 }
78130
78131@@ -5663,7 +5656,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
78132 * run_rebalance_domains is triggered when needed from the scheduler tick.
78133 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
78134 */
78135-static void run_rebalance_domains(struct softirq_action *h)
78136+static void run_rebalance_domains(void)
78137 {
78138 int this_cpu = smp_processor_id();
78139 struct rq *this_rq = cpu_rq(this_cpu);
78140diff --git a/kernel/signal.c b/kernel/signal.c
78141index dec9c30..d1da15b 100644
78142--- a/kernel/signal.c
78143+++ b/kernel/signal.c
78144@@ -50,12 +50,12 @@ static struct kmem_cache *sigqueue_cachep;
78145
78146 int print_fatal_signals __read_mostly;
78147
78148-static void __user *sig_handler(struct task_struct *t, int sig)
78149+static __sighandler_t sig_handler(struct task_struct *t, int sig)
78150 {
78151 return t->sighand->action[sig - 1].sa.sa_handler;
78152 }
78153
78154-static int sig_handler_ignored(void __user *handler, int sig)
78155+static int sig_handler_ignored(__sighandler_t handler, int sig)
78156 {
78157 /* Is it explicitly or implicitly ignored? */
78158 return handler == SIG_IGN ||
78159@@ -64,7 +64,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
78160
78161 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
78162 {
78163- void __user *handler;
78164+ __sighandler_t handler;
78165
78166 handler = sig_handler(t, sig);
78167
78168@@ -368,6 +368,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
78169 atomic_inc(&user->sigpending);
78170 rcu_read_unlock();
78171
78172+ if (!override_rlimit)
78173+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
78174+
78175 if (override_rlimit ||
78176 atomic_read(&user->sigpending) <=
78177 task_rlimit(t, RLIMIT_SIGPENDING)) {
78178@@ -495,7 +498,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
78179
78180 int unhandled_signal(struct task_struct *tsk, int sig)
78181 {
78182- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
78183+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
78184 if (is_global_init(tsk))
78185 return 1;
78186 if (handler != SIG_IGN && handler != SIG_DFL)
78187@@ -815,6 +818,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
78188 }
78189 }
78190
78191+ /* allow glibc communication via tgkill to other threads in our
78192+ thread group */
78193+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
78194+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
78195+ && gr_handle_signal(t, sig))
78196+ return -EPERM;
78197+
78198 return security_task_kill(t, info, sig, 0);
78199 }
78200
78201@@ -1197,7 +1207,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
78202 return send_signal(sig, info, p, 1);
78203 }
78204
78205-static int
78206+int
78207 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
78208 {
78209 return send_signal(sig, info, t, 0);
78210@@ -1234,6 +1244,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
78211 unsigned long int flags;
78212 int ret, blocked, ignored;
78213 struct k_sigaction *action;
78214+ int is_unhandled = 0;
78215
78216 spin_lock_irqsave(&t->sighand->siglock, flags);
78217 action = &t->sighand->action[sig-1];
78218@@ -1248,9 +1259,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
78219 }
78220 if (action->sa.sa_handler == SIG_DFL)
78221 t->signal->flags &= ~SIGNAL_UNKILLABLE;
78222+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
78223+ is_unhandled = 1;
78224 ret = specific_send_sig_info(sig, info, t);
78225 spin_unlock_irqrestore(&t->sighand->siglock, flags);
78226
78227+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
78228+ normal operation */
78229+ if (is_unhandled) {
78230+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
78231+ gr_handle_crash(t, sig);
78232+ }
78233+
78234 return ret;
78235 }
78236
78237@@ -1317,8 +1337,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
78238 ret = check_kill_permission(sig, info, p);
78239 rcu_read_unlock();
78240
78241- if (!ret && sig)
78242+ if (!ret && sig) {
78243 ret = do_send_sig_info(sig, info, p, true);
78244+ if (!ret)
78245+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
78246+ }
78247
78248 return ret;
78249 }
78250@@ -2855,7 +2878,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
78251 int error = -ESRCH;
78252
78253 rcu_read_lock();
78254- p = find_task_by_vpid(pid);
78255+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
78256+ /* allow glibc communication via tgkill to other threads in our
78257+ thread group */
78258+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
78259+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
78260+ p = find_task_by_vpid_unrestricted(pid);
78261+ else
78262+#endif
78263+ p = find_task_by_vpid(pid);
78264 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
78265 error = check_kill_permission(sig, info, p);
78266 /*
78267@@ -3138,8 +3169,8 @@ COMPAT_SYSCALL_DEFINE2(sigaltstack,
78268 }
78269 seg = get_fs();
78270 set_fs(KERNEL_DS);
78271- ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
78272- (stack_t __force __user *) &uoss,
78273+ ret = do_sigaltstack((stack_t __force_user *) (uss_ptr ? &uss : NULL),
78274+ (stack_t __force_user *) &uoss,
78275 compat_user_stack_pointer());
78276 set_fs(seg);
78277 if (ret >= 0 && uoss_ptr) {
78278diff --git a/kernel/smp.c b/kernel/smp.c
78279index 69f38bd..77bbf12 100644
78280--- a/kernel/smp.c
78281+++ b/kernel/smp.c
78282@@ -77,7 +77,7 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
78283 return NOTIFY_OK;
78284 }
78285
78286-static struct notifier_block __cpuinitdata hotplug_cfd_notifier = {
78287+static struct notifier_block hotplug_cfd_notifier = {
78288 .notifier_call = hotplug_cfd,
78289 };
78290
78291diff --git a/kernel/smpboot.c b/kernel/smpboot.c
78292index d6c5fc0..530560c 100644
78293--- a/kernel/smpboot.c
78294+++ b/kernel/smpboot.c
78295@@ -275,7 +275,7 @@ int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
78296 }
78297 smpboot_unpark_thread(plug_thread, cpu);
78298 }
78299- list_add(&plug_thread->list, &hotplug_threads);
78300+ pax_list_add(&plug_thread->list, &hotplug_threads);
78301 out:
78302 mutex_unlock(&smpboot_threads_lock);
78303 return ret;
78304@@ -292,7 +292,7 @@ void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread)
78305 {
78306 get_online_cpus();
78307 mutex_lock(&smpboot_threads_lock);
78308- list_del(&plug_thread->list);
78309+ pax_list_del(&plug_thread->list);
78310 smpboot_destroy_threads(plug_thread);
78311 mutex_unlock(&smpboot_threads_lock);
78312 put_online_cpus();
78313diff --git a/kernel/softirq.c b/kernel/softirq.c
78314index ed567ba..e71dabf 100644
78315--- a/kernel/softirq.c
78316+++ b/kernel/softirq.c
78317@@ -53,11 +53,11 @@ irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
78318 EXPORT_SYMBOL(irq_stat);
78319 #endif
78320
78321-static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
78322+static struct softirq_action softirq_vec[NR_SOFTIRQS] __read_only __aligned(PAGE_SIZE);
78323
78324 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
78325
78326-char *softirq_to_name[NR_SOFTIRQS] = {
78327+const char * const softirq_to_name[NR_SOFTIRQS] = {
78328 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
78329 "TASKLET", "SCHED", "HRTIMER", "RCU"
78330 };
78331@@ -244,7 +244,7 @@ restart:
78332 kstat_incr_softirqs_this_cpu(vec_nr);
78333
78334 trace_softirq_entry(vec_nr);
78335- h->action(h);
78336+ h->action();
78337 trace_softirq_exit(vec_nr);
78338 if (unlikely(prev_count != preempt_count())) {
78339 printk(KERN_ERR "huh, entered softirq %u %s %p"
78340@@ -391,7 +391,7 @@ void __raise_softirq_irqoff(unsigned int nr)
78341 or_softirq_pending(1UL << nr);
78342 }
78343
78344-void open_softirq(int nr, void (*action)(struct softirq_action *))
78345+void __init open_softirq(int nr, void (*action)(void))
78346 {
78347 softirq_vec[nr].action = action;
78348 }
78349@@ -447,7 +447,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
78350
78351 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
78352
78353-static void tasklet_action(struct softirq_action *a)
78354+static void tasklet_action(void)
78355 {
78356 struct tasklet_struct *list;
78357
78358@@ -482,7 +482,7 @@ static void tasklet_action(struct softirq_action *a)
78359 }
78360 }
78361
78362-static void tasklet_hi_action(struct softirq_action *a)
78363+static void tasklet_hi_action(void)
78364 {
78365 struct tasklet_struct *list;
78366
78367@@ -718,7 +718,7 @@ static int __cpuinit remote_softirq_cpu_notify(struct notifier_block *self,
78368 return NOTIFY_OK;
78369 }
78370
78371-static struct notifier_block __cpuinitdata remote_softirq_cpu_notifier = {
78372+static struct notifier_block remote_softirq_cpu_notifier = {
78373 .notifier_call = remote_softirq_cpu_notify,
78374 };
78375
78376@@ -835,11 +835,11 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb,
78377 return NOTIFY_OK;
78378 }
78379
78380-static struct notifier_block __cpuinitdata cpu_nfb = {
78381+static struct notifier_block cpu_nfb = {
78382 .notifier_call = cpu_callback
78383 };
78384
78385-static struct smp_hotplug_thread softirq_threads = {
78386+static struct smp_hotplug_thread softirq_threads __read_only = {
78387 .store = &ksoftirqd,
78388 .thread_should_run = ksoftirqd_should_run,
78389 .thread_fn = run_ksoftirqd,
78390diff --git a/kernel/srcu.c b/kernel/srcu.c
78391index 2b85982..d52ab26 100644
78392--- a/kernel/srcu.c
78393+++ b/kernel/srcu.c
78394@@ -305,9 +305,9 @@ int __srcu_read_lock(struct srcu_struct *sp)
78395 preempt_disable();
78396 idx = rcu_dereference_index_check(sp->completed,
78397 rcu_read_lock_sched_held()) & 0x1;
78398- ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
78399+ ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
78400 smp_mb(); /* B */ /* Avoid leaking the critical section. */
78401- ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
78402+ ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
78403 preempt_enable();
78404 return idx;
78405 }
78406@@ -323,7 +323,7 @@ void __srcu_read_unlock(struct srcu_struct *sp, int idx)
78407 {
78408 preempt_disable();
78409 smp_mb(); /* C */ /* Avoid leaking the critical section. */
78410- ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) -= 1;
78411+ ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) -= 1;
78412 preempt_enable();
78413 }
78414 EXPORT_SYMBOL_GPL(__srcu_read_unlock);
78415diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
78416index 2f194e9..2c05ea9 100644
78417--- a/kernel/stop_machine.c
78418+++ b/kernel/stop_machine.c
78419@@ -362,7 +362,7 @@ static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb,
78420 * cpu notifiers. It currently shares the same priority as sched
78421 * migration_notifier.
78422 */
78423-static struct notifier_block __cpuinitdata cpu_stop_cpu_notifier = {
78424+static struct notifier_block cpu_stop_cpu_notifier = {
78425 .notifier_call = cpu_stop_cpu_callback,
78426 .priority = 10,
78427 };
78428diff --git a/kernel/sys.c b/kernel/sys.c
78429index 265b376..4e42ef5 100644
78430--- a/kernel/sys.c
78431+++ b/kernel/sys.c
78432@@ -157,6 +157,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
78433 error = -EACCES;
78434 goto out;
78435 }
78436+
78437+ if (gr_handle_chroot_setpriority(p, niceval)) {
78438+ error = -EACCES;
78439+ goto out;
78440+ }
78441+
78442 no_nice = security_task_setnice(p, niceval);
78443 if (no_nice) {
78444 error = no_nice;
78445@@ -595,6 +601,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
78446 goto error;
78447 }
78448
78449+ if (gr_check_group_change(new->gid, new->egid, INVALID_GID))
78450+ goto error;
78451+
78452 if (rgid != (gid_t) -1 ||
78453 (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
78454 new->sgid = new->egid;
78455@@ -630,6 +639,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
78456 old = current_cred();
78457
78458 retval = -EPERM;
78459+
78460+ if (gr_check_group_change(kgid, kgid, kgid))
78461+ goto error;
78462+
78463 if (nsown_capable(CAP_SETGID))
78464 new->gid = new->egid = new->sgid = new->fsgid = kgid;
78465 else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
78466@@ -647,7 +660,7 @@ error:
78467 /*
78468 * change the user struct in a credentials set to match the new UID
78469 */
78470-static int set_user(struct cred *new)
78471+int set_user(struct cred *new)
78472 {
78473 struct user_struct *new_user;
78474
78475@@ -727,6 +740,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
78476 goto error;
78477 }
78478
78479+ if (gr_check_user_change(new->uid, new->euid, INVALID_UID))
78480+ goto error;
78481+
78482 if (!uid_eq(new->uid, old->uid)) {
78483 retval = set_user(new);
78484 if (retval < 0)
78485@@ -777,6 +793,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
78486 old = current_cred();
78487
78488 retval = -EPERM;
78489+
78490+ if (gr_check_crash_uid(kuid))
78491+ goto error;
78492+ if (gr_check_user_change(kuid, kuid, kuid))
78493+ goto error;
78494+
78495 if (nsown_capable(CAP_SETUID)) {
78496 new->suid = new->uid = kuid;
78497 if (!uid_eq(kuid, old->uid)) {
78498@@ -846,6 +868,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
78499 goto error;
78500 }
78501
78502+ if (gr_check_user_change(kruid, keuid, INVALID_UID))
78503+ goto error;
78504+
78505 if (ruid != (uid_t) -1) {
78506 new->uid = kruid;
78507 if (!uid_eq(kruid, old->uid)) {
78508@@ -928,6 +953,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
78509 goto error;
78510 }
78511
78512+ if (gr_check_group_change(krgid, kegid, INVALID_GID))
78513+ goto error;
78514+
78515 if (rgid != (gid_t) -1)
78516 new->gid = krgid;
78517 if (egid != (gid_t) -1)
78518@@ -981,6 +1009,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
78519 if (!uid_valid(kuid))
78520 return old_fsuid;
78521
78522+ if (gr_check_user_change(INVALID_UID, INVALID_UID, kuid))
78523+ goto error;
78524+
78525 new = prepare_creds();
78526 if (!new)
78527 return old_fsuid;
78528@@ -995,6 +1026,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
78529 }
78530 }
78531
78532+error:
78533 abort_creds(new);
78534 return old_fsuid;
78535
78536@@ -1027,12 +1059,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
78537 if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) ||
78538 gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
78539 nsown_capable(CAP_SETGID)) {
78540+ if (gr_check_group_change(INVALID_GID, INVALID_GID, kgid))
78541+ goto error;
78542+
78543 if (!gid_eq(kgid, old->fsgid)) {
78544 new->fsgid = kgid;
78545 goto change_okay;
78546 }
78547 }
78548
78549+error:
78550 abort_creds(new);
78551 return old_fsgid;
78552
78553@@ -1340,19 +1376,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
78554 return -EFAULT;
78555
78556 down_read(&uts_sem);
78557- error = __copy_to_user(&name->sysname, &utsname()->sysname,
78558+ error = __copy_to_user(name->sysname, &utsname()->sysname,
78559 __OLD_UTS_LEN);
78560 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
78561- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
78562+ error |= __copy_to_user(name->nodename, &utsname()->nodename,
78563 __OLD_UTS_LEN);
78564 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
78565- error |= __copy_to_user(&name->release, &utsname()->release,
78566+ error |= __copy_to_user(name->release, &utsname()->release,
78567 __OLD_UTS_LEN);
78568 error |= __put_user(0, name->release + __OLD_UTS_LEN);
78569- error |= __copy_to_user(&name->version, &utsname()->version,
78570+ error |= __copy_to_user(name->version, &utsname()->version,
78571 __OLD_UTS_LEN);
78572 error |= __put_user(0, name->version + __OLD_UTS_LEN);
78573- error |= __copy_to_user(&name->machine, &utsname()->machine,
78574+ error |= __copy_to_user(name->machine, &utsname()->machine,
78575 __OLD_UTS_LEN);
78576 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
78577 up_read(&uts_sem);
78578@@ -2026,7 +2062,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
78579 error = get_dumpable(me->mm);
78580 break;
78581 case PR_SET_DUMPABLE:
78582- if (arg2 < 0 || arg2 > 1) {
78583+ if (arg2 > 1) {
78584 error = -EINVAL;
78585 break;
78586 }
78587diff --git a/kernel/sysctl.c b/kernel/sysctl.c
78588index c88878d..e4fa5d1 100644
78589--- a/kernel/sysctl.c
78590+++ b/kernel/sysctl.c
78591@@ -92,7 +92,6 @@
78592
78593
78594 #if defined(CONFIG_SYSCTL)
78595-
78596 /* External variables not in a header file. */
78597 extern int sysctl_overcommit_memory;
78598 extern int sysctl_overcommit_ratio;
78599@@ -172,10 +171,8 @@ static int proc_taint(struct ctl_table *table, int write,
78600 void __user *buffer, size_t *lenp, loff_t *ppos);
78601 #endif
78602
78603-#ifdef CONFIG_PRINTK
78604 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
78605 void __user *buffer, size_t *lenp, loff_t *ppos);
78606-#endif
78607
78608 static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write,
78609 void __user *buffer, size_t *lenp, loff_t *ppos);
78610@@ -206,6 +203,8 @@ static int sysrq_sysctl_handler(ctl_table *table, int write,
78611
78612 #endif
78613
78614+extern struct ctl_table grsecurity_table[];
78615+
78616 static struct ctl_table kern_table[];
78617 static struct ctl_table vm_table[];
78618 static struct ctl_table fs_table[];
78619@@ -220,6 +219,20 @@ extern struct ctl_table epoll_table[];
78620 int sysctl_legacy_va_layout;
78621 #endif
78622
78623+#ifdef CONFIG_PAX_SOFTMODE
78624+static ctl_table pax_table[] = {
78625+ {
78626+ .procname = "softmode",
78627+ .data = &pax_softmode,
78628+ .maxlen = sizeof(unsigned int),
78629+ .mode = 0600,
78630+ .proc_handler = &proc_dointvec,
78631+ },
78632+
78633+ { }
78634+};
78635+#endif
78636+
78637 /* The default sysctl tables: */
78638
78639 static struct ctl_table sysctl_base_table[] = {
78640@@ -268,6 +281,22 @@ static int max_extfrag_threshold = 1000;
78641 #endif
78642
78643 static struct ctl_table kern_table[] = {
78644+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
78645+ {
78646+ .procname = "grsecurity",
78647+ .mode = 0500,
78648+ .child = grsecurity_table,
78649+ },
78650+#endif
78651+
78652+#ifdef CONFIG_PAX_SOFTMODE
78653+ {
78654+ .procname = "pax",
78655+ .mode = 0500,
78656+ .child = pax_table,
78657+ },
78658+#endif
78659+
78660 {
78661 .procname = "sched_child_runs_first",
78662 .data = &sysctl_sched_child_runs_first,
78663@@ -593,7 +622,7 @@ static struct ctl_table kern_table[] = {
78664 .data = &modprobe_path,
78665 .maxlen = KMOD_PATH_LEN,
78666 .mode = 0644,
78667- .proc_handler = proc_dostring,
78668+ .proc_handler = proc_dostring_modpriv,
78669 },
78670 {
78671 .procname = "modules_disabled",
78672@@ -760,16 +789,20 @@ static struct ctl_table kern_table[] = {
78673 .extra1 = &zero,
78674 .extra2 = &one,
78675 },
78676+#endif
78677 {
78678 .procname = "kptr_restrict",
78679 .data = &kptr_restrict,
78680 .maxlen = sizeof(int),
78681 .mode = 0644,
78682 .proc_handler = proc_dointvec_minmax_sysadmin,
78683+#ifdef CONFIG_GRKERNSEC_HIDESYM
78684+ .extra1 = &two,
78685+#else
78686 .extra1 = &zero,
78687+#endif
78688 .extra2 = &two,
78689 },
78690-#endif
78691 {
78692 .procname = "ngroups_max",
78693 .data = &ngroups_max,
78694@@ -1266,6 +1299,13 @@ static struct ctl_table vm_table[] = {
78695 .proc_handler = proc_dointvec_minmax,
78696 .extra1 = &zero,
78697 },
78698+ {
78699+ .procname = "heap_stack_gap",
78700+ .data = &sysctl_heap_stack_gap,
78701+ .maxlen = sizeof(sysctl_heap_stack_gap),
78702+ .mode = 0644,
78703+ .proc_handler = proc_doulongvec_minmax,
78704+ },
78705 #else
78706 {
78707 .procname = "nr_trim_pages",
78708@@ -1716,6 +1756,16 @@ int proc_dostring(struct ctl_table *table, int write,
78709 buffer, lenp, ppos);
78710 }
78711
78712+int proc_dostring_modpriv(struct ctl_table *table, int write,
78713+ void __user *buffer, size_t *lenp, loff_t *ppos)
78714+{
78715+ if (write && !capable(CAP_SYS_MODULE))
78716+ return -EPERM;
78717+
78718+ return _proc_do_string(table->data, table->maxlen, write,
78719+ buffer, lenp, ppos);
78720+}
78721+
78722 static size_t proc_skip_spaces(char **buf)
78723 {
78724 size_t ret;
78725@@ -1821,6 +1871,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
78726 len = strlen(tmp);
78727 if (len > *size)
78728 len = *size;
78729+ if (len > sizeof(tmp))
78730+ len = sizeof(tmp);
78731 if (copy_to_user(*buf, tmp, len))
78732 return -EFAULT;
78733 *size -= len;
78734@@ -1985,7 +2037,7 @@ int proc_dointvec(struct ctl_table *table, int write,
78735 static int proc_taint(struct ctl_table *table, int write,
78736 void __user *buffer, size_t *lenp, loff_t *ppos)
78737 {
78738- struct ctl_table t;
78739+ ctl_table_no_const t;
78740 unsigned long tmptaint = get_taint();
78741 int err;
78742
78743@@ -2013,7 +2065,6 @@ static int proc_taint(struct ctl_table *table, int write,
78744 return err;
78745 }
78746
78747-#ifdef CONFIG_PRINTK
78748 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
78749 void __user *buffer, size_t *lenp, loff_t *ppos)
78750 {
78751@@ -2022,7 +2073,6 @@ static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
78752
78753 return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
78754 }
78755-#endif
78756
78757 struct do_proc_dointvec_minmax_conv_param {
78758 int *min;
78759@@ -2169,8 +2219,11 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
78760 *i = val;
78761 } else {
78762 val = convdiv * (*i) / convmul;
78763- if (!first)
78764+ if (!first) {
78765 err = proc_put_char(&buffer, &left, '\t');
78766+ if (err)
78767+ break;
78768+ }
78769 err = proc_put_long(&buffer, &left, val, false);
78770 if (err)
78771 break;
78772@@ -2562,6 +2615,12 @@ int proc_dostring(struct ctl_table *table, int write,
78773 return -ENOSYS;
78774 }
78775
78776+int proc_dostring_modpriv(struct ctl_table *table, int write,
78777+ void __user *buffer, size_t *lenp, loff_t *ppos)
78778+{
78779+ return -ENOSYS;
78780+}
78781+
78782 int proc_dointvec(struct ctl_table *table, int write,
78783 void __user *buffer, size_t *lenp, loff_t *ppos)
78784 {
78785@@ -2618,5 +2677,6 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
78786 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
78787 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
78788 EXPORT_SYMBOL(proc_dostring);
78789+EXPORT_SYMBOL(proc_dostring_modpriv);
78790 EXPORT_SYMBOL(proc_doulongvec_minmax);
78791 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
78792diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
78793index 0ddf3a0..a199f50 100644
78794--- a/kernel/sysctl_binary.c
78795+++ b/kernel/sysctl_binary.c
78796@@ -989,7 +989,7 @@ static ssize_t bin_intvec(struct file *file,
78797 int i;
78798
78799 set_fs(KERNEL_DS);
78800- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
78801+ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
78802 set_fs(old_fs);
78803 if (result < 0)
78804 goto out_kfree;
78805@@ -1034,7 +1034,7 @@ static ssize_t bin_intvec(struct file *file,
78806 }
78807
78808 set_fs(KERNEL_DS);
78809- result = vfs_write(file, buffer, str - buffer, &pos);
78810+ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
78811 set_fs(old_fs);
78812 if (result < 0)
78813 goto out_kfree;
78814@@ -1067,7 +1067,7 @@ static ssize_t bin_ulongvec(struct file *file,
78815 int i;
78816
78817 set_fs(KERNEL_DS);
78818- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
78819+ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
78820 set_fs(old_fs);
78821 if (result < 0)
78822 goto out_kfree;
78823@@ -1112,7 +1112,7 @@ static ssize_t bin_ulongvec(struct file *file,
78824 }
78825
78826 set_fs(KERNEL_DS);
78827- result = vfs_write(file, buffer, str - buffer, &pos);
78828+ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
78829 set_fs(old_fs);
78830 if (result < 0)
78831 goto out_kfree;
78832@@ -1138,7 +1138,7 @@ static ssize_t bin_uuid(struct file *file,
78833 int i;
78834
78835 set_fs(KERNEL_DS);
78836- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
78837+ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
78838 set_fs(old_fs);
78839 if (result < 0)
78840 goto out;
78841@@ -1185,7 +1185,7 @@ static ssize_t bin_dn_node_address(struct file *file,
78842 __le16 dnaddr;
78843
78844 set_fs(KERNEL_DS);
78845- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
78846+ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
78847 set_fs(old_fs);
78848 if (result < 0)
78849 goto out;
78850@@ -1234,7 +1234,7 @@ static ssize_t bin_dn_node_address(struct file *file,
78851 le16_to_cpu(dnaddr) & 0x3ff);
78852
78853 set_fs(KERNEL_DS);
78854- result = vfs_write(file, buf, len, &pos);
78855+ result = vfs_write(file, (const char __force_user *)buf, len, &pos);
78856 set_fs(old_fs);
78857 if (result < 0)
78858 goto out;
78859diff --git a/kernel/taskstats.c b/kernel/taskstats.c
78860index 145bb4d..b2aa969 100644
78861--- a/kernel/taskstats.c
78862+++ b/kernel/taskstats.c
78863@@ -28,9 +28,12 @@
78864 #include <linux/fs.h>
78865 #include <linux/file.h>
78866 #include <linux/pid_namespace.h>
78867+#include <linux/grsecurity.h>
78868 #include <net/genetlink.h>
78869 #include <linux/atomic.h>
78870
78871+extern int gr_is_taskstats_denied(int pid);
78872+
78873 /*
78874 * Maximum length of a cpumask that can be specified in
78875 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
78876@@ -570,6 +573,9 @@ err:
78877
78878 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
78879 {
78880+ if (gr_is_taskstats_denied(current->pid))
78881+ return -EACCES;
78882+
78883 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
78884 return cmd_attr_register_cpumask(info);
78885 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
78886diff --git a/kernel/time.c b/kernel/time.c
78887index d226c6a..2f0d217 100644
78888--- a/kernel/time.c
78889+++ b/kernel/time.c
78890@@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
78891 return error;
78892
78893 if (tz) {
78894+ /* we log in do_settimeofday called below, so don't log twice
78895+ */
78896+ if (!tv)
78897+ gr_log_timechange();
78898+
78899 sys_tz = *tz;
78900 update_vsyscall_tz();
78901 if (firsttime) {
78902@@ -493,7 +498,7 @@ EXPORT_SYMBOL(usecs_to_jiffies);
78903 * The >> (NSEC_JIFFIE_SC - SEC_JIFFIE_SC) converts the scaled nsec
78904 * value to a scaled second value.
78905 */
78906-unsigned long
78907+unsigned long __intentional_overflow(-1)
78908 timespec_to_jiffies(const struct timespec *value)
78909 {
78910 unsigned long sec = value->tv_sec;
78911diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
78912index f11d83b..d016d91 100644
78913--- a/kernel/time/alarmtimer.c
78914+++ b/kernel/time/alarmtimer.c
78915@@ -750,7 +750,7 @@ static int __init alarmtimer_init(void)
78916 struct platform_device *pdev;
78917 int error = 0;
78918 int i;
78919- struct k_clock alarm_clock = {
78920+ static struct k_clock alarm_clock = {
78921 .clock_getres = alarm_clock_getres,
78922 .clock_get = alarm_clock_get,
78923 .timer_create = alarm_timer_create,
78924diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
78925index a13987a..36cd791 100644
78926--- a/kernel/time/tick-broadcast.c
78927+++ b/kernel/time/tick-broadcast.c
78928@@ -116,7 +116,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
78929 * then clear the broadcast bit.
78930 */
78931 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
78932- int cpu = smp_processor_id();
78933+ cpu = smp_processor_id();
78934
78935 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
78936 tick_broadcast_clear_oneshot(cpu);
78937diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
78938index cbc6acb..3a77191 100644
78939--- a/kernel/time/timekeeping.c
78940+++ b/kernel/time/timekeeping.c
78941@@ -15,6 +15,7 @@
78942 #include <linux/init.h>
78943 #include <linux/mm.h>
78944 #include <linux/sched.h>
78945+#include <linux/grsecurity.h>
78946 #include <linux/syscore_ops.h>
78947 #include <linux/clocksource.h>
78948 #include <linux/jiffies.h>
78949@@ -412,6 +413,8 @@ int do_settimeofday(const struct timespec *tv)
78950 if (!timespec_valid_strict(tv))
78951 return -EINVAL;
78952
78953+ gr_log_timechange();
78954+
78955 write_seqlock_irqsave(&tk->lock, flags);
78956
78957 timekeeping_forward_now(tk);
78958diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
78959index af5a7e9..715611a 100644
78960--- a/kernel/time/timer_list.c
78961+++ b/kernel/time/timer_list.c
78962@@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
78963
78964 static void print_name_offset(struct seq_file *m, void *sym)
78965 {
78966+#ifdef CONFIG_GRKERNSEC_HIDESYM
78967+ SEQ_printf(m, "<%p>", NULL);
78968+#else
78969 char symname[KSYM_NAME_LEN];
78970
78971 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
78972 SEQ_printf(m, "<%pK>", sym);
78973 else
78974 SEQ_printf(m, "%s", symname);
78975+#endif
78976 }
78977
78978 static void
78979@@ -112,7 +116,11 @@ next_one:
78980 static void
78981 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
78982 {
78983+#ifdef CONFIG_GRKERNSEC_HIDESYM
78984+ SEQ_printf(m, " .base: %p\n", NULL);
78985+#else
78986 SEQ_printf(m, " .base: %pK\n", base);
78987+#endif
78988 SEQ_printf(m, " .index: %d\n",
78989 base->index);
78990 SEQ_printf(m, " .resolution: %Lu nsecs\n",
78991@@ -293,7 +301,11 @@ static int __init init_timer_list_procfs(void)
78992 {
78993 struct proc_dir_entry *pe;
78994
78995+#ifdef CONFIG_GRKERNSEC_PROC_ADD
78996+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
78997+#else
78998 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
78999+#endif
79000 if (!pe)
79001 return -ENOMEM;
79002 return 0;
79003diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
79004index 0b537f2..40d6c20 100644
79005--- a/kernel/time/timer_stats.c
79006+++ b/kernel/time/timer_stats.c
79007@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
79008 static unsigned long nr_entries;
79009 static struct entry entries[MAX_ENTRIES];
79010
79011-static atomic_t overflow_count;
79012+static atomic_unchecked_t overflow_count;
79013
79014 /*
79015 * The entries are in a hash-table, for fast lookup:
79016@@ -140,7 +140,7 @@ static void reset_entries(void)
79017 nr_entries = 0;
79018 memset(entries, 0, sizeof(entries));
79019 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
79020- atomic_set(&overflow_count, 0);
79021+ atomic_set_unchecked(&overflow_count, 0);
79022 }
79023
79024 static struct entry *alloc_entry(void)
79025@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
79026 if (likely(entry))
79027 entry->count++;
79028 else
79029- atomic_inc(&overflow_count);
79030+ atomic_inc_unchecked(&overflow_count);
79031
79032 out_unlock:
79033 raw_spin_unlock_irqrestore(lock, flags);
79034@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
79035
79036 static void print_name_offset(struct seq_file *m, unsigned long addr)
79037 {
79038+#ifdef CONFIG_GRKERNSEC_HIDESYM
79039+ seq_printf(m, "<%p>", NULL);
79040+#else
79041 char symname[KSYM_NAME_LEN];
79042
79043 if (lookup_symbol_name(addr, symname) < 0)
79044- seq_printf(m, "<%p>", (void *)addr);
79045+ seq_printf(m, "<%pK>", (void *)addr);
79046 else
79047 seq_printf(m, "%s", symname);
79048+#endif
79049 }
79050
79051 static int tstats_show(struct seq_file *m, void *v)
79052@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
79053
79054 seq_puts(m, "Timer Stats Version: v0.2\n");
79055 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
79056- if (atomic_read(&overflow_count))
79057+ if (atomic_read_unchecked(&overflow_count))
79058 seq_printf(m, "Overflow: %d entries\n",
79059- atomic_read(&overflow_count));
79060+ atomic_read_unchecked(&overflow_count));
79061
79062 for (i = 0; i < nr_entries; i++) {
79063 entry = entries + i;
79064@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
79065 {
79066 struct proc_dir_entry *pe;
79067
79068+#ifdef CONFIG_GRKERNSEC_PROC_ADD
79069+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
79070+#else
79071 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
79072+#endif
79073 if (!pe)
79074 return -ENOMEM;
79075 return 0;
79076diff --git a/kernel/timer.c b/kernel/timer.c
79077index 367d008..5dee98f 100644
79078--- a/kernel/timer.c
79079+++ b/kernel/timer.c
79080@@ -1363,7 +1363,7 @@ void update_process_times(int user_tick)
79081 /*
79082 * This function runs timers and the timer-tq in bottom half context.
79083 */
79084-static void run_timer_softirq(struct softirq_action *h)
79085+static void run_timer_softirq(void)
79086 {
79087 struct tvec_base *base = __this_cpu_read(tvec_bases);
79088
79089@@ -1481,7 +1481,7 @@ static void process_timeout(unsigned long __data)
79090 *
79091 * In all cases the return value is guaranteed to be non-negative.
79092 */
79093-signed long __sched schedule_timeout(signed long timeout)
79094+signed long __sched __intentional_overflow(-1) schedule_timeout(signed long timeout)
79095 {
79096 struct timer_list timer;
79097 unsigned long expire;
79098@@ -1772,7 +1772,7 @@ static int __cpuinit timer_cpu_notify(struct notifier_block *self,
79099 return NOTIFY_OK;
79100 }
79101
79102-static struct notifier_block __cpuinitdata timers_nb = {
79103+static struct notifier_block timers_nb = {
79104 .notifier_call = timer_cpu_notify,
79105 };
79106
79107diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
79108index c0bd030..62a1927 100644
79109--- a/kernel/trace/blktrace.c
79110+++ b/kernel/trace/blktrace.c
79111@@ -317,7 +317,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
79112 struct blk_trace *bt = filp->private_data;
79113 char buf[16];
79114
79115- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
79116+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
79117
79118 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
79119 }
79120@@ -375,7 +375,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
79121 return 1;
79122
79123 bt = buf->chan->private_data;
79124- atomic_inc(&bt->dropped);
79125+ atomic_inc_unchecked(&bt->dropped);
79126 return 0;
79127 }
79128
79129@@ -476,7 +476,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
79130
79131 bt->dir = dir;
79132 bt->dev = dev;
79133- atomic_set(&bt->dropped, 0);
79134+ atomic_set_unchecked(&bt->dropped, 0);
79135
79136 ret = -EIO;
79137 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
79138diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
79139index b27052c..0e1af95 100644
79140--- a/kernel/trace/ftrace.c
79141+++ b/kernel/trace/ftrace.c
79142@@ -1874,12 +1874,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
79143 if (unlikely(ftrace_disabled))
79144 return 0;
79145
79146+ ret = ftrace_arch_code_modify_prepare();
79147+ FTRACE_WARN_ON(ret);
79148+ if (ret)
79149+ return 0;
79150+
79151 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
79152+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
79153 if (ret) {
79154 ftrace_bug(ret, ip);
79155- return 0;
79156 }
79157- return 1;
79158+ return ret ? 0 : 1;
79159 }
79160
79161 /*
79162@@ -2965,7 +2970,7 @@ static void ftrace_free_entry_rcu(struct rcu_head *rhp)
79163
79164 int
79165 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
79166- void *data)
79167+ void *data)
79168 {
79169 struct ftrace_func_probe *entry;
79170 struct ftrace_page *pg;
79171@@ -3832,8 +3837,10 @@ static int ftrace_process_locs(struct module *mod,
79172 if (!count)
79173 return 0;
79174
79175+ pax_open_kernel();
79176 sort(start, count, sizeof(*start),
79177 ftrace_cmp_ips, ftrace_swap_ips);
79178+ pax_close_kernel();
79179
79180 start_pg = ftrace_allocate_pages(count);
79181 if (!start_pg)
79182@@ -4559,8 +4566,6 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
79183 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
79184
79185 static int ftrace_graph_active;
79186-static struct notifier_block ftrace_suspend_notifier;
79187-
79188 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
79189 {
79190 return 0;
79191@@ -4704,6 +4709,10 @@ ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
79192 return NOTIFY_DONE;
79193 }
79194
79195+static struct notifier_block ftrace_suspend_notifier = {
79196+ .notifier_call = ftrace_suspend_notifier_call
79197+};
79198+
79199 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
79200 trace_func_graph_ent_t entryfunc)
79201 {
79202@@ -4717,7 +4726,6 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
79203 goto out;
79204 }
79205
79206- ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
79207 register_pm_notifier(&ftrace_suspend_notifier);
79208
79209 ftrace_graph_active++;
79210diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
79211index ce8514f..8233573 100644
79212--- a/kernel/trace/ring_buffer.c
79213+++ b/kernel/trace/ring_buffer.c
79214@@ -346,9 +346,9 @@ struct buffer_data_page {
79215 */
79216 struct buffer_page {
79217 struct list_head list; /* list of buffer pages */
79218- local_t write; /* index for next write */
79219+ local_unchecked_t write; /* index for next write */
79220 unsigned read; /* index for next read */
79221- local_t entries; /* entries on this page */
79222+ local_unchecked_t entries; /* entries on this page */
79223 unsigned long real_end; /* real end of data */
79224 struct buffer_data_page *page; /* Actual data page */
79225 };
79226@@ -461,8 +461,8 @@ struct ring_buffer_per_cpu {
79227 unsigned long last_overrun;
79228 local_t entries_bytes;
79229 local_t entries;
79230- local_t overrun;
79231- local_t commit_overrun;
79232+ local_unchecked_t overrun;
79233+ local_unchecked_t commit_overrun;
79234 local_t dropped_events;
79235 local_t committing;
79236 local_t commits;
79237@@ -861,8 +861,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
79238 *
79239 * We add a counter to the write field to denote this.
79240 */
79241- old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
79242- old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
79243+ old_write = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->write);
79244+ old_entries = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->entries);
79245
79246 /*
79247 * Just make sure we have seen our old_write and synchronize
79248@@ -890,8 +890,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
79249 * cmpxchg to only update if an interrupt did not already
79250 * do it for us. If the cmpxchg fails, we don't care.
79251 */
79252- (void)local_cmpxchg(&next_page->write, old_write, val);
79253- (void)local_cmpxchg(&next_page->entries, old_entries, eval);
79254+ (void)local_cmpxchg_unchecked(&next_page->write, old_write, val);
79255+ (void)local_cmpxchg_unchecked(&next_page->entries, old_entries, eval);
79256
79257 /*
79258 * No need to worry about races with clearing out the commit.
79259@@ -1250,12 +1250,12 @@ static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
79260
79261 static inline unsigned long rb_page_entries(struct buffer_page *bpage)
79262 {
79263- return local_read(&bpage->entries) & RB_WRITE_MASK;
79264+ return local_read_unchecked(&bpage->entries) & RB_WRITE_MASK;
79265 }
79266
79267 static inline unsigned long rb_page_write(struct buffer_page *bpage)
79268 {
79269- return local_read(&bpage->write) & RB_WRITE_MASK;
79270+ return local_read_unchecked(&bpage->write) & RB_WRITE_MASK;
79271 }
79272
79273 static int
79274@@ -1350,7 +1350,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
79275 * bytes consumed in ring buffer from here.
79276 * Increment overrun to account for the lost events.
79277 */
79278- local_add(page_entries, &cpu_buffer->overrun);
79279+ local_add_unchecked(page_entries, &cpu_buffer->overrun);
79280 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
79281 }
79282
79283@@ -1906,7 +1906,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
79284 * it is our responsibility to update
79285 * the counters.
79286 */
79287- local_add(entries, &cpu_buffer->overrun);
79288+ local_add_unchecked(entries, &cpu_buffer->overrun);
79289 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
79290
79291 /*
79292@@ -2056,7 +2056,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
79293 if (tail == BUF_PAGE_SIZE)
79294 tail_page->real_end = 0;
79295
79296- local_sub(length, &tail_page->write);
79297+ local_sub_unchecked(length, &tail_page->write);
79298 return;
79299 }
79300
79301@@ -2091,7 +2091,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
79302 rb_event_set_padding(event);
79303
79304 /* Set the write back to the previous setting */
79305- local_sub(length, &tail_page->write);
79306+ local_sub_unchecked(length, &tail_page->write);
79307 return;
79308 }
79309
79310@@ -2103,7 +2103,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
79311
79312 /* Set write to end of buffer */
79313 length = (tail + length) - BUF_PAGE_SIZE;
79314- local_sub(length, &tail_page->write);
79315+ local_sub_unchecked(length, &tail_page->write);
79316 }
79317
79318 /*
79319@@ -2129,7 +2129,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
79320 * about it.
79321 */
79322 if (unlikely(next_page == commit_page)) {
79323- local_inc(&cpu_buffer->commit_overrun);
79324+ local_inc_unchecked(&cpu_buffer->commit_overrun);
79325 goto out_reset;
79326 }
79327
79328@@ -2185,7 +2185,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
79329 cpu_buffer->tail_page) &&
79330 (cpu_buffer->commit_page ==
79331 cpu_buffer->reader_page))) {
79332- local_inc(&cpu_buffer->commit_overrun);
79333+ local_inc_unchecked(&cpu_buffer->commit_overrun);
79334 goto out_reset;
79335 }
79336 }
79337@@ -2233,7 +2233,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
79338 length += RB_LEN_TIME_EXTEND;
79339
79340 tail_page = cpu_buffer->tail_page;
79341- write = local_add_return(length, &tail_page->write);
79342+ write = local_add_return_unchecked(length, &tail_page->write);
79343
79344 /* set write to only the index of the write */
79345 write &= RB_WRITE_MASK;
79346@@ -2250,7 +2250,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
79347 kmemcheck_annotate_bitfield(event, bitfield);
79348 rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
79349
79350- local_inc(&tail_page->entries);
79351+ local_inc_unchecked(&tail_page->entries);
79352
79353 /*
79354 * If this is the first commit on the page, then update
79355@@ -2283,7 +2283,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
79356
79357 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
79358 unsigned long write_mask =
79359- local_read(&bpage->write) & ~RB_WRITE_MASK;
79360+ local_read_unchecked(&bpage->write) & ~RB_WRITE_MASK;
79361 unsigned long event_length = rb_event_length(event);
79362 /*
79363 * This is on the tail page. It is possible that
79364@@ -2293,7 +2293,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
79365 */
79366 old_index += write_mask;
79367 new_index += write_mask;
79368- index = local_cmpxchg(&bpage->write, old_index, new_index);
79369+ index = local_cmpxchg_unchecked(&bpage->write, old_index, new_index);
79370 if (index == old_index) {
79371 /* update counters */
79372 local_sub(event_length, &cpu_buffer->entries_bytes);
79373@@ -2632,7 +2632,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
79374
79375 /* Do the likely case first */
79376 if (likely(bpage->page == (void *)addr)) {
79377- local_dec(&bpage->entries);
79378+ local_dec_unchecked(&bpage->entries);
79379 return;
79380 }
79381
79382@@ -2644,7 +2644,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
79383 start = bpage;
79384 do {
79385 if (bpage->page == (void *)addr) {
79386- local_dec(&bpage->entries);
79387+ local_dec_unchecked(&bpage->entries);
79388 return;
79389 }
79390 rb_inc_page(cpu_buffer, &bpage);
79391@@ -2926,7 +2926,7 @@ static inline unsigned long
79392 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
79393 {
79394 return local_read(&cpu_buffer->entries) -
79395- (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
79396+ (local_read_unchecked(&cpu_buffer->overrun) + cpu_buffer->read);
79397 }
79398
79399 /**
79400@@ -3015,7 +3015,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
79401 return 0;
79402
79403 cpu_buffer = buffer->buffers[cpu];
79404- ret = local_read(&cpu_buffer->overrun);
79405+ ret = local_read_unchecked(&cpu_buffer->overrun);
79406
79407 return ret;
79408 }
79409@@ -3038,7 +3038,7 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
79410 return 0;
79411
79412 cpu_buffer = buffer->buffers[cpu];
79413- ret = local_read(&cpu_buffer->commit_overrun);
79414+ ret = local_read_unchecked(&cpu_buffer->commit_overrun);
79415
79416 return ret;
79417 }
79418@@ -3105,7 +3105,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
79419 /* if you care about this being correct, lock the buffer */
79420 for_each_buffer_cpu(buffer, cpu) {
79421 cpu_buffer = buffer->buffers[cpu];
79422- overruns += local_read(&cpu_buffer->overrun);
79423+ overruns += local_read_unchecked(&cpu_buffer->overrun);
79424 }
79425
79426 return overruns;
79427@@ -3281,8 +3281,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
79428 /*
79429 * Reset the reader page to size zero.
79430 */
79431- local_set(&cpu_buffer->reader_page->write, 0);
79432- local_set(&cpu_buffer->reader_page->entries, 0);
79433+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
79434+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
79435 local_set(&cpu_buffer->reader_page->page->commit, 0);
79436 cpu_buffer->reader_page->real_end = 0;
79437
79438@@ -3316,7 +3316,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
79439 * want to compare with the last_overrun.
79440 */
79441 smp_mb();
79442- overwrite = local_read(&(cpu_buffer->overrun));
79443+ overwrite = local_read_unchecked(&(cpu_buffer->overrun));
79444
79445 /*
79446 * Here's the tricky part.
79447@@ -3886,8 +3886,8 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
79448
79449 cpu_buffer->head_page
79450 = list_entry(cpu_buffer->pages, struct buffer_page, list);
79451- local_set(&cpu_buffer->head_page->write, 0);
79452- local_set(&cpu_buffer->head_page->entries, 0);
79453+ local_set_unchecked(&cpu_buffer->head_page->write, 0);
79454+ local_set_unchecked(&cpu_buffer->head_page->entries, 0);
79455 local_set(&cpu_buffer->head_page->page->commit, 0);
79456
79457 cpu_buffer->head_page->read = 0;
79458@@ -3897,14 +3897,14 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
79459
79460 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
79461 INIT_LIST_HEAD(&cpu_buffer->new_pages);
79462- local_set(&cpu_buffer->reader_page->write, 0);
79463- local_set(&cpu_buffer->reader_page->entries, 0);
79464+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
79465+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
79466 local_set(&cpu_buffer->reader_page->page->commit, 0);
79467 cpu_buffer->reader_page->read = 0;
79468
79469 local_set(&cpu_buffer->entries_bytes, 0);
79470- local_set(&cpu_buffer->overrun, 0);
79471- local_set(&cpu_buffer->commit_overrun, 0);
79472+ local_set_unchecked(&cpu_buffer->overrun, 0);
79473+ local_set_unchecked(&cpu_buffer->commit_overrun, 0);
79474 local_set(&cpu_buffer->dropped_events, 0);
79475 local_set(&cpu_buffer->entries, 0);
79476 local_set(&cpu_buffer->committing, 0);
79477@@ -4308,8 +4308,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
79478 rb_init_page(bpage);
79479 bpage = reader->page;
79480 reader->page = *data_page;
79481- local_set(&reader->write, 0);
79482- local_set(&reader->entries, 0);
79483+ local_set_unchecked(&reader->write, 0);
79484+ local_set_unchecked(&reader->entries, 0);
79485 reader->read = 0;
79486 *data_page = bpage;
79487
79488diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
79489index fe1d581..43a0f38 100644
79490--- a/kernel/trace/trace.c
79491+++ b/kernel/trace/trace.c
79492@@ -4494,10 +4494,9 @@ static const struct file_operations tracing_dyn_info_fops = {
79493 };
79494 #endif
79495
79496-static struct dentry *d_tracer;
79497-
79498 struct dentry *tracing_init_dentry(void)
79499 {
79500+ static struct dentry *d_tracer;
79501 static int once;
79502
79503 if (d_tracer)
79504@@ -4517,10 +4516,9 @@ struct dentry *tracing_init_dentry(void)
79505 return d_tracer;
79506 }
79507
79508-static struct dentry *d_percpu;
79509-
79510 struct dentry *tracing_dentry_percpu(void)
79511 {
79512+ static struct dentry *d_percpu;
79513 static int once;
79514 struct dentry *d_tracer;
79515
79516diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
79517index 880073d..42db7c3 100644
79518--- a/kernel/trace/trace_events.c
79519+++ b/kernel/trace/trace_events.c
79520@@ -1330,10 +1330,6 @@ static LIST_HEAD(ftrace_module_file_list);
79521 struct ftrace_module_file_ops {
79522 struct list_head list;
79523 struct module *mod;
79524- struct file_operations id;
79525- struct file_operations enable;
79526- struct file_operations format;
79527- struct file_operations filter;
79528 };
79529
79530 static struct ftrace_module_file_ops *
79531@@ -1354,17 +1350,12 @@ trace_create_file_ops(struct module *mod)
79532
79533 file_ops->mod = mod;
79534
79535- file_ops->id = ftrace_event_id_fops;
79536- file_ops->id.owner = mod;
79537-
79538- file_ops->enable = ftrace_enable_fops;
79539- file_ops->enable.owner = mod;
79540-
79541- file_ops->filter = ftrace_event_filter_fops;
79542- file_ops->filter.owner = mod;
79543-
79544- file_ops->format = ftrace_event_format_fops;
79545- file_ops->format.owner = mod;
79546+ pax_open_kernel();
79547+ mod->trace_id.owner = mod;
79548+ mod->trace_enable.owner = mod;
79549+ mod->trace_filter.owner = mod;
79550+ mod->trace_format.owner = mod;
79551+ pax_close_kernel();
79552
79553 list_add(&file_ops->list, &ftrace_module_file_list);
79554
79555@@ -1388,8 +1379,8 @@ static void trace_module_add_events(struct module *mod)
79556
79557 for_each_event(call, start, end) {
79558 __trace_add_event_call(*call, mod,
79559- &file_ops->id, &file_ops->enable,
79560- &file_ops->filter, &file_ops->format);
79561+ &mod->trace_id, &mod->trace_enable,
79562+ &mod->trace_filter, &mod->trace_format);
79563 }
79564 }
79565
79566diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
79567index fd3c8aa..5f324a6 100644
79568--- a/kernel/trace/trace_mmiotrace.c
79569+++ b/kernel/trace/trace_mmiotrace.c
79570@@ -24,7 +24,7 @@ struct header_iter {
79571 static struct trace_array *mmio_trace_array;
79572 static bool overrun_detected;
79573 static unsigned long prev_overruns;
79574-static atomic_t dropped_count;
79575+static atomic_unchecked_t dropped_count;
79576
79577 static void mmio_reset_data(struct trace_array *tr)
79578 {
79579@@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
79580
79581 static unsigned long count_overruns(struct trace_iterator *iter)
79582 {
79583- unsigned long cnt = atomic_xchg(&dropped_count, 0);
79584+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
79585 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
79586
79587 if (over > prev_overruns)
79588@@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
79589 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
79590 sizeof(*entry), 0, pc);
79591 if (!event) {
79592- atomic_inc(&dropped_count);
79593+ atomic_inc_unchecked(&dropped_count);
79594 return;
79595 }
79596 entry = ring_buffer_event_data(event);
79597@@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
79598 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
79599 sizeof(*entry), 0, pc);
79600 if (!event) {
79601- atomic_inc(&dropped_count);
79602+ atomic_inc_unchecked(&dropped_count);
79603 return;
79604 }
79605 entry = ring_buffer_event_data(event);
79606diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
79607index 194d796..76edb8f 100644
79608--- a/kernel/trace/trace_output.c
79609+++ b/kernel/trace/trace_output.c
79610@@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s, const struct path *path)
79611
79612 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
79613 if (!IS_ERR(p)) {
79614- p = mangle_path(s->buffer + s->len, p, "\n");
79615+ p = mangle_path(s->buffer + s->len, p, "\n\\");
79616 if (p) {
79617 s->len = p - s->buffer;
79618 return 1;
79619@@ -852,14 +852,16 @@ int register_ftrace_event(struct trace_event *event)
79620 goto out;
79621 }
79622
79623+ pax_open_kernel();
79624 if (event->funcs->trace == NULL)
79625- event->funcs->trace = trace_nop_print;
79626+ *(void **)&event->funcs->trace = trace_nop_print;
79627 if (event->funcs->raw == NULL)
79628- event->funcs->raw = trace_nop_print;
79629+ *(void **)&event->funcs->raw = trace_nop_print;
79630 if (event->funcs->hex == NULL)
79631- event->funcs->hex = trace_nop_print;
79632+ *(void **)&event->funcs->hex = trace_nop_print;
79633 if (event->funcs->binary == NULL)
79634- event->funcs->binary = trace_nop_print;
79635+ *(void **)&event->funcs->binary = trace_nop_print;
79636+ pax_close_kernel();
79637
79638 key = event->type & (EVENT_HASHSIZE - 1);
79639
79640diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
79641index 42ca822..cdcacc6 100644
79642--- a/kernel/trace/trace_stack.c
79643+++ b/kernel/trace/trace_stack.c
79644@@ -52,7 +52,7 @@ static inline void check_stack(void)
79645 return;
79646
79647 /* we do not handle interrupt stacks yet */
79648- if (!object_is_on_stack(&this_size))
79649+ if (!object_starts_on_stack(&this_size))
79650 return;
79651
79652 local_irq_save(flags);
79653diff --git a/kernel/user.c b/kernel/user.c
79654index 7f6ff2b..1ac8f18 100644
79655--- a/kernel/user.c
79656+++ b/kernel/user.c
79657@@ -47,9 +47,7 @@ struct user_namespace init_user_ns = {
79658 .count = 4294967295U,
79659 },
79660 },
79661- .kref = {
79662- .refcount = ATOMIC_INIT(3),
79663- },
79664+ .count = ATOMIC_INIT(3),
79665 .owner = GLOBAL_ROOT_UID,
79666 .group = GLOBAL_ROOT_GID,
79667 .proc_inum = PROC_USER_INIT_INO,
79668diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
79669index f45e128..a5a5fb6 100644
79670--- a/kernel/user_namespace.c
79671+++ b/kernel/user_namespace.c
79672@@ -88,7 +88,7 @@ int create_user_ns(struct cred *new)
79673 return ret;
79674 }
79675
79676- kref_init(&ns->kref);
79677+ atomic_set(&ns->count, 1);
79678 /* Leave the new->user_ns reference with the new user namespace. */
79679 ns->parent = parent_ns;
79680 ns->owner = owner;
79681@@ -116,15 +116,16 @@ int unshare_userns(unsigned long unshare_flags, struct cred **new_cred)
79682 return create_user_ns(cred);
79683 }
79684
79685-void free_user_ns(struct kref *kref)
79686+void free_user_ns(struct user_namespace *ns)
79687 {
79688- struct user_namespace *parent, *ns =
79689- container_of(kref, struct user_namespace, kref);
79690+ struct user_namespace *parent;
79691
79692- parent = ns->parent;
79693- proc_free_inum(ns->proc_inum);
79694- kmem_cache_free(user_ns_cachep, ns);
79695- put_user_ns(parent);
79696+ do {
79697+ parent = ns->parent;
79698+ proc_free_inum(ns->proc_inum);
79699+ kmem_cache_free(user_ns_cachep, ns);
79700+ ns = parent;
79701+ } while (atomic_dec_and_test(&parent->count));
79702 }
79703 EXPORT_SYMBOL(free_user_ns);
79704
79705@@ -815,7 +816,7 @@ static int userns_install(struct nsproxy *nsproxy, void *ns)
79706 if (atomic_read(&current->mm->mm_users) > 1)
79707 return -EINVAL;
79708
79709- if (current->fs->users != 1)
79710+ if (atomic_read(&current->fs->users) != 1)
79711 return -EINVAL;
79712
79713 if (!ns_capable(user_ns, CAP_SYS_ADMIN))
79714diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c
79715index 63da38c..639904e 100644
79716--- a/kernel/utsname_sysctl.c
79717+++ b/kernel/utsname_sysctl.c
79718@@ -46,7 +46,7 @@ static void put_uts(ctl_table *table, int write, void *which)
79719 static int proc_do_uts_string(ctl_table *table, int write,
79720 void __user *buffer, size_t *lenp, loff_t *ppos)
79721 {
79722- struct ctl_table uts_table;
79723+ ctl_table_no_const uts_table;
79724 int r;
79725 memcpy(&uts_table, table, sizeof(uts_table));
79726 uts_table.data = get_uts(table, write);
79727diff --git a/kernel/watchdog.c b/kernel/watchdog.c
79728index 75a2ab3..5961da7 100644
79729--- a/kernel/watchdog.c
79730+++ b/kernel/watchdog.c
79731@@ -527,7 +527,7 @@ int proc_dowatchdog(struct ctl_table *table, int write,
79732 }
79733 #endif /* CONFIG_SYSCTL */
79734
79735-static struct smp_hotplug_thread watchdog_threads = {
79736+static struct smp_hotplug_thread watchdog_threads __read_only = {
79737 .store = &softlockup_watchdog,
79738 .thread_should_run = watchdog_should_run,
79739 .thread_fn = watchdog,
79740diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
79741index 67604e5..fe94fb1 100644
79742--- a/lib/Kconfig.debug
79743+++ b/lib/Kconfig.debug
79744@@ -550,7 +550,7 @@ config DEBUG_MUTEXES
79745
79746 config DEBUG_LOCK_ALLOC
79747 bool "Lock debugging: detect incorrect freeing of live locks"
79748- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
79749+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
79750 select DEBUG_SPINLOCK
79751 select DEBUG_MUTEXES
79752 select LOCKDEP
79753@@ -564,7 +564,7 @@ config DEBUG_LOCK_ALLOC
79754
79755 config PROVE_LOCKING
79756 bool "Lock debugging: prove locking correctness"
79757- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
79758+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
79759 select LOCKDEP
79760 select DEBUG_SPINLOCK
79761 select DEBUG_MUTEXES
79762@@ -670,7 +670,7 @@ config LOCKDEP
79763
79764 config LOCK_STAT
79765 bool "Lock usage statistics"
79766- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
79767+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
79768 select LOCKDEP
79769 select DEBUG_SPINLOCK
79770 select DEBUG_MUTEXES
79771@@ -1278,6 +1278,7 @@ config LATENCYTOP
79772 depends on DEBUG_KERNEL
79773 depends on STACKTRACE_SUPPORT
79774 depends on PROC_FS
79775+ depends on !GRKERNSEC_HIDESYM
79776 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND
79777 select KALLSYMS
79778 select KALLSYMS_ALL
79779@@ -1306,7 +1307,7 @@ config INTERVAL_TREE_TEST
79780
79781 config PROVIDE_OHCI1394_DMA_INIT
79782 bool "Remote debugging over FireWire early on boot"
79783- depends on PCI && X86
79784+ depends on PCI && X86 && !GRKERNSEC
79785 help
79786 If you want to debug problems which hang or crash the kernel early
79787 on boot and the crashing machine has a FireWire port, you can use
79788@@ -1335,7 +1336,7 @@ config PROVIDE_OHCI1394_DMA_INIT
79789
79790 config FIREWIRE_OHCI_REMOTE_DMA
79791 bool "Remote debugging over FireWire with firewire-ohci"
79792- depends on FIREWIRE_OHCI
79793+ depends on FIREWIRE_OHCI && !GRKERNSEC
79794 help
79795 This option lets you use the FireWire bus for remote debugging
79796 with help of the firewire-ohci driver. It enables unfiltered
79797diff --git a/lib/Makefile b/lib/Makefile
79798index 02ed6c0..bd243da 100644
79799--- a/lib/Makefile
79800+++ b/lib/Makefile
79801@@ -47,7 +47,7 @@ obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
79802
79803 obj-$(CONFIG_BTREE) += btree.o
79804 obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
79805-obj-$(CONFIG_DEBUG_LIST) += list_debug.o
79806+obj-y += list_debug.o
79807 obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
79808
79809 ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
79810diff --git a/lib/bitmap.c b/lib/bitmap.c
79811index 06f7e4f..f3cf2b0 100644
79812--- a/lib/bitmap.c
79813+++ b/lib/bitmap.c
79814@@ -422,7 +422,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
79815 {
79816 int c, old_c, totaldigits, ndigits, nchunks, nbits;
79817 u32 chunk;
79818- const char __user __force *ubuf = (const char __user __force *)buf;
79819+ const char __user *ubuf = (const char __force_user *)buf;
79820
79821 bitmap_zero(maskp, nmaskbits);
79822
79823@@ -507,7 +507,7 @@ int bitmap_parse_user(const char __user *ubuf,
79824 {
79825 if (!access_ok(VERIFY_READ, ubuf, ulen))
79826 return -EFAULT;
79827- return __bitmap_parse((const char __force *)ubuf,
79828+ return __bitmap_parse((const char __force_kernel *)ubuf,
79829 ulen, 1, maskp, nmaskbits);
79830
79831 }
79832@@ -598,7 +598,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
79833 {
79834 unsigned a, b;
79835 int c, old_c, totaldigits;
79836- const char __user __force *ubuf = (const char __user __force *)buf;
79837+ const char __user *ubuf = (const char __force_user *)buf;
79838 int exp_digit, in_range;
79839
79840 totaldigits = c = 0;
79841@@ -698,7 +698,7 @@ int bitmap_parselist_user(const char __user *ubuf,
79842 {
79843 if (!access_ok(VERIFY_READ, ubuf, ulen))
79844 return -EFAULT;
79845- return __bitmap_parselist((const char __force *)ubuf,
79846+ return __bitmap_parselist((const char __force_kernel *)ubuf,
79847 ulen, 1, maskp, nmaskbits);
79848 }
79849 EXPORT_SYMBOL(bitmap_parselist_user);
79850diff --git a/lib/bug.c b/lib/bug.c
79851index d0cdf14..4d07bd2 100644
79852--- a/lib/bug.c
79853+++ b/lib/bug.c
79854@@ -134,6 +134,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
79855 return BUG_TRAP_TYPE_NONE;
79856
79857 bug = find_bug(bugaddr);
79858+ if (!bug)
79859+ return BUG_TRAP_TYPE_NONE;
79860
79861 file = NULL;
79862 line = 0;
79863diff --git a/lib/debugobjects.c b/lib/debugobjects.c
79864index d11808c..dc2d6f8 100644
79865--- a/lib/debugobjects.c
79866+++ b/lib/debugobjects.c
79867@@ -287,7 +287,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
79868 if (limit > 4)
79869 return;
79870
79871- is_on_stack = object_is_on_stack(addr);
79872+ is_on_stack = object_starts_on_stack(addr);
79873 if (is_on_stack == onstack)
79874 return;
79875
79876diff --git a/lib/devres.c b/lib/devres.c
79877index 80b9c76..9e32279 100644
79878--- a/lib/devres.c
79879+++ b/lib/devres.c
79880@@ -80,7 +80,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
79881 void devm_iounmap(struct device *dev, void __iomem *addr)
79882 {
79883 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
79884- (void *)addr));
79885+ (void __force *)addr));
79886 iounmap(addr);
79887 }
79888 EXPORT_SYMBOL(devm_iounmap);
79889@@ -192,7 +192,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
79890 {
79891 ioport_unmap(addr);
79892 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
79893- devm_ioport_map_match, (void *)addr));
79894+ devm_ioport_map_match, (void __force *)addr));
79895 }
79896 EXPORT_SYMBOL(devm_ioport_unmap);
79897
79898diff --git a/lib/div64.c b/lib/div64.c
79899index a163b6c..9618fa5 100644
79900--- a/lib/div64.c
79901+++ b/lib/div64.c
79902@@ -59,7 +59,7 @@ uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
79903 EXPORT_SYMBOL(__div64_32);
79904
79905 #ifndef div_s64_rem
79906-s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
79907+s64 __intentional_overflow(-1) div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
79908 {
79909 u64 quotient;
79910
79911@@ -90,7 +90,7 @@ EXPORT_SYMBOL(div_s64_rem);
79912 * 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c.txt'
79913 */
79914 #ifndef div64_u64
79915-u64 div64_u64(u64 dividend, u64 divisor)
79916+u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
79917 {
79918 u32 high = divisor >> 32;
79919 u64 quot;
79920diff --git a/lib/dma-debug.c b/lib/dma-debug.c
79921index 5e396ac..58d5de1 100644
79922--- a/lib/dma-debug.c
79923+++ b/lib/dma-debug.c
79924@@ -768,7 +768,7 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti
79925
79926 void dma_debug_add_bus(struct bus_type *bus)
79927 {
79928- struct notifier_block *nb;
79929+ notifier_block_no_const *nb;
79930
79931 if (global_disable)
79932 return;
79933@@ -942,7 +942,7 @@ out:
79934
79935 static void check_for_stack(struct device *dev, void *addr)
79936 {
79937- if (object_is_on_stack(addr))
79938+ if (object_starts_on_stack(addr))
79939 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
79940 "stack [addr=%p]\n", addr);
79941 }
79942diff --git a/lib/inflate.c b/lib/inflate.c
79943index 013a761..c28f3fc 100644
79944--- a/lib/inflate.c
79945+++ b/lib/inflate.c
79946@@ -269,7 +269,7 @@ static void free(void *where)
79947 malloc_ptr = free_mem_ptr;
79948 }
79949 #else
79950-#define malloc(a) kmalloc(a, GFP_KERNEL)
79951+#define malloc(a) kmalloc((a), GFP_KERNEL)
79952 #define free(a) kfree(a)
79953 #endif
79954
79955diff --git a/lib/ioremap.c b/lib/ioremap.c
79956index 0c9216c..863bd89 100644
79957--- a/lib/ioremap.c
79958+++ b/lib/ioremap.c
79959@@ -38,7 +38,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
79960 unsigned long next;
79961
79962 phys_addr -= addr;
79963- pmd = pmd_alloc(&init_mm, pud, addr);
79964+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
79965 if (!pmd)
79966 return -ENOMEM;
79967 do {
79968@@ -56,7 +56,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
79969 unsigned long next;
79970
79971 phys_addr -= addr;
79972- pud = pud_alloc(&init_mm, pgd, addr);
79973+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
79974 if (!pud)
79975 return -ENOMEM;
79976 do {
79977diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
79978index bd2bea9..6b3c95e 100644
79979--- a/lib/is_single_threaded.c
79980+++ b/lib/is_single_threaded.c
79981@@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
79982 struct task_struct *p, *t;
79983 bool ret;
79984
79985+ if (!mm)
79986+ return true;
79987+
79988 if (atomic_read(&task->signal->live) != 1)
79989 return false;
79990
79991diff --git a/lib/kobject.c b/lib/kobject.c
79992index e07ee1f..998489d 100644
79993--- a/lib/kobject.c
79994+++ b/lib/kobject.c
79995@@ -852,9 +852,9 @@ EXPORT_SYMBOL_GPL(kset_create_and_add);
79996
79997
79998 static DEFINE_SPINLOCK(kobj_ns_type_lock);
79999-static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES];
80000+static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES] __read_only;
80001
80002-int kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
80003+int __init kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
80004 {
80005 enum kobj_ns_type type = ops->type;
80006 int error;
80007diff --git a/lib/list_debug.c b/lib/list_debug.c
80008index c24c2f7..06e070b 100644
80009--- a/lib/list_debug.c
80010+++ b/lib/list_debug.c
80011@@ -11,7 +11,9 @@
80012 #include <linux/bug.h>
80013 #include <linux/kernel.h>
80014 #include <linux/rculist.h>
80015+#include <linux/mm.h>
80016
80017+#ifdef CONFIG_DEBUG_LIST
80018 /*
80019 * Insert a new entry between two known consecutive entries.
80020 *
80021@@ -19,21 +21,32 @@
80022 * the prev/next entries already!
80023 */
80024
80025-void __list_add(struct list_head *new,
80026- struct list_head *prev,
80027- struct list_head *next)
80028+static bool __list_add_debug(struct list_head *new,
80029+ struct list_head *prev,
80030+ struct list_head *next)
80031 {
80032- WARN(next->prev != prev,
80033+ if (WARN(next->prev != prev,
80034 "list_add corruption. next->prev should be "
80035 "prev (%p), but was %p. (next=%p).\n",
80036- prev, next->prev, next);
80037- WARN(prev->next != next,
80038+ prev, next->prev, next) ||
80039+ WARN(prev->next != next,
80040 "list_add corruption. prev->next should be "
80041 "next (%p), but was %p. (prev=%p).\n",
80042- next, prev->next, prev);
80043- WARN(new == prev || new == next,
80044- "list_add double add: new=%p, prev=%p, next=%p.\n",
80045- new, prev, next);
80046+ next, prev->next, prev) ||
80047+ WARN(new == prev || new == next,
80048+ "list_add double add: new=%p, prev=%p, next=%p.\n",
80049+ new, prev, next))
80050+ return false;
80051+ return true;
80052+}
80053+
80054+void __list_add(struct list_head *new,
80055+ struct list_head *prev,
80056+ struct list_head *next)
80057+{
80058+ if (!__list_add_debug(new, prev, next))
80059+ return;
80060+
80061 next->prev = new;
80062 new->next = next;
80063 new->prev = prev;
80064@@ -41,7 +54,7 @@ void __list_add(struct list_head *new,
80065 }
80066 EXPORT_SYMBOL(__list_add);
80067
80068-void __list_del_entry(struct list_head *entry)
80069+static bool __list_del_entry_debug(struct list_head *entry)
80070 {
80071 struct list_head *prev, *next;
80072
80073@@ -60,9 +73,16 @@ void __list_del_entry(struct list_head *entry)
80074 WARN(next->prev != entry,
80075 "list_del corruption. next->prev should be %p, "
80076 "but was %p\n", entry, next->prev))
80077+ return false;
80078+ return true;
80079+}
80080+
80081+void __list_del_entry(struct list_head *entry)
80082+{
80083+ if (!__list_del_entry_debug(entry))
80084 return;
80085
80086- __list_del(prev, next);
80087+ __list_del(entry->prev, entry->next);
80088 }
80089 EXPORT_SYMBOL(__list_del_entry);
80090
80091@@ -86,15 +106,85 @@ EXPORT_SYMBOL(list_del);
80092 void __list_add_rcu(struct list_head *new,
80093 struct list_head *prev, struct list_head *next)
80094 {
80095- WARN(next->prev != prev,
80096- "list_add_rcu corruption. next->prev should be prev (%p), but was %p. (next=%p).\n",
80097- prev, next->prev, next);
80098- WARN(prev->next != next,
80099- "list_add_rcu corruption. prev->next should be next (%p), but was %p. (prev=%p).\n",
80100- next, prev->next, prev);
80101+ if (!__list_add_debug(new, prev, next))
80102+ return;
80103+
80104 new->next = next;
80105 new->prev = prev;
80106 rcu_assign_pointer(list_next_rcu(prev), new);
80107 next->prev = new;
80108 }
80109 EXPORT_SYMBOL(__list_add_rcu);
80110+#endif
80111+
80112+void __pax_list_add(struct list_head *new, struct list_head *prev, struct list_head *next)
80113+{
80114+#ifdef CONFIG_DEBUG_LIST
80115+ if (!__list_add_debug(new, prev, next))
80116+ return;
80117+#endif
80118+
80119+ pax_open_kernel();
80120+ next->prev = new;
80121+ new->next = next;
80122+ new->prev = prev;
80123+ prev->next = new;
80124+ pax_close_kernel();
80125+}
80126+EXPORT_SYMBOL(__pax_list_add);
80127+
80128+void pax_list_del(struct list_head *entry)
80129+{
80130+#ifdef CONFIG_DEBUG_LIST
80131+ if (!__list_del_entry_debug(entry))
80132+ return;
80133+#endif
80134+
80135+ pax_open_kernel();
80136+ __list_del(entry->prev, entry->next);
80137+ entry->next = LIST_POISON1;
80138+ entry->prev = LIST_POISON2;
80139+ pax_close_kernel();
80140+}
80141+EXPORT_SYMBOL(pax_list_del);
80142+
80143+void pax_list_del_init(struct list_head *entry)
80144+{
80145+ pax_open_kernel();
80146+ __list_del(entry->prev, entry->next);
80147+ INIT_LIST_HEAD(entry);
80148+ pax_close_kernel();
80149+}
80150+EXPORT_SYMBOL(pax_list_del_init);
80151+
80152+void __pax_list_add_rcu(struct list_head *new,
80153+ struct list_head *prev, struct list_head *next)
80154+{
80155+#ifdef CONFIG_DEBUG_LIST
80156+ if (!__list_add_debug(new, prev, next))
80157+ return;
80158+#endif
80159+
80160+ pax_open_kernel();
80161+ new->next = next;
80162+ new->prev = prev;
80163+ rcu_assign_pointer(list_next_rcu(prev), new);
80164+ next->prev = new;
80165+ pax_close_kernel();
80166+}
80167+EXPORT_SYMBOL(__pax_list_add_rcu);
80168+
80169+void pax_list_del_rcu(struct list_head *entry)
80170+{
80171+#ifdef CONFIG_DEBUG_LIST
80172+ if (!__list_del_entry_debug(entry))
80173+ return;
80174+#endif
80175+
80176+ pax_open_kernel();
80177+ __list_del(entry->prev, entry->next);
80178+ entry->next = LIST_POISON1;
80179+ entry->prev = LIST_POISON2;
80180+ pax_close_kernel();
80181+}
80182+EXPORT_SYMBOL(pax_list_del_rcu);
80183diff --git a/lib/radix-tree.c b/lib/radix-tree.c
80184index e796429..6e38f9f 100644
80185--- a/lib/radix-tree.c
80186+++ b/lib/radix-tree.c
80187@@ -92,7 +92,7 @@ struct radix_tree_preload {
80188 int nr;
80189 struct radix_tree_node *nodes[RADIX_TREE_PRELOAD_SIZE];
80190 };
80191-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
80192+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
80193
80194 static inline void *ptr_to_indirect(void *ptr)
80195 {
80196diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
80197index bb2b201..46abaf9 100644
80198--- a/lib/strncpy_from_user.c
80199+++ b/lib/strncpy_from_user.c
80200@@ -21,7 +21,7 @@
80201 */
80202 static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max)
80203 {
80204- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
80205+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
80206 long res = 0;
80207
80208 /*
80209diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
80210index a28df52..3d55877 100644
80211--- a/lib/strnlen_user.c
80212+++ b/lib/strnlen_user.c
80213@@ -26,7 +26,7 @@
80214 */
80215 static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max)
80216 {
80217- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
80218+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
80219 long align, res = 0;
80220 unsigned long c;
80221
80222diff --git a/lib/swiotlb.c b/lib/swiotlb.c
80223index 196b069..358f342 100644
80224--- a/lib/swiotlb.c
80225+++ b/lib/swiotlb.c
80226@@ -642,7 +642,7 @@ EXPORT_SYMBOL(swiotlb_alloc_coherent);
80227
80228 void
80229 swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
80230- dma_addr_t dev_addr)
80231+ dma_addr_t dev_addr, struct dma_attrs *attrs)
80232 {
80233 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
80234
80235diff --git a/lib/vsprintf.c b/lib/vsprintf.c
80236index fab33a9..3b5fe68 100644
80237--- a/lib/vsprintf.c
80238+++ b/lib/vsprintf.c
80239@@ -16,6 +16,9 @@
80240 * - scnprintf and vscnprintf
80241 */
80242
80243+#ifdef CONFIG_GRKERNSEC_HIDESYM
80244+#define __INCLUDED_BY_HIDESYM 1
80245+#endif
80246 #include <stdarg.h>
80247 #include <linux/module.h> /* for KSYM_SYMBOL_LEN */
80248 #include <linux/types.h>
80249@@ -541,7 +544,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
80250 char sym[KSYM_SYMBOL_LEN];
80251 if (ext == 'B')
80252 sprint_backtrace(sym, value);
80253- else if (ext != 'f' && ext != 's')
80254+ else if (ext != 'f' && ext != 's' && ext != 'a')
80255 sprint_symbol(sym, value);
80256 else
80257 sprint_symbol_no_offset(sym, value);
80258@@ -974,7 +977,11 @@ char *netdev_feature_string(char *buf, char *end, const u8 *addr,
80259 return number(buf, end, *(const netdev_features_t *)addr, spec);
80260 }
80261
80262+#ifdef CONFIG_GRKERNSEC_HIDESYM
80263+int kptr_restrict __read_mostly = 2;
80264+#else
80265 int kptr_restrict __read_mostly;
80266+#endif
80267
80268 /*
80269 * Show a '%p' thing. A kernel extension is that the '%p' is followed
80270@@ -988,6 +995,8 @@ int kptr_restrict __read_mostly;
80271 * - 'S' For symbolic direct pointers with offset
80272 * - 's' For symbolic direct pointers without offset
80273 * - 'B' For backtraced symbolic direct pointers with offset
80274+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
80275+ * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
80276 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
80277 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
80278 * - 'M' For a 6-byte MAC address, it prints the address in the
80279@@ -1043,12 +1052,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
80280
80281 if (!ptr && *fmt != 'K') {
80282 /*
80283- * Print (null) with the same width as a pointer so it makes
80284+ * Print (nil) with the same width as a pointer so it makes
80285 * tabular output look nice.
80286 */
80287 if (spec.field_width == -1)
80288 spec.field_width = default_width;
80289- return string(buf, end, "(null)", spec);
80290+ return string(buf, end, "(nil)", spec);
80291 }
80292
80293 switch (*fmt) {
80294@@ -1058,6 +1067,13 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
80295 /* Fallthrough */
80296 case 'S':
80297 case 's':
80298+#ifdef CONFIG_GRKERNSEC_HIDESYM
80299+ break;
80300+#else
80301+ return symbol_string(buf, end, ptr, spec, *fmt);
80302+#endif
80303+ case 'A':
80304+ case 'a':
80305 case 'B':
80306 return symbol_string(buf, end, ptr, spec, *fmt);
80307 case 'R':
80308@@ -1098,6 +1114,8 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
80309 va_end(va);
80310 return buf;
80311 }
80312+ case 'P':
80313+ break;
80314 case 'K':
80315 /*
80316 * %pK cannot be used in IRQ context because its test
80317@@ -1121,6 +1139,21 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
80318 }
80319 break;
80320 }
80321+
80322+#ifdef CONFIG_GRKERNSEC_HIDESYM
80323+ /* 'P' = approved pointers to copy to userland,
80324+ as in the /proc/kallsyms case, as we make it display nothing
80325+ for non-root users, and the real contents for root users
80326+ Also ignore 'K' pointers, since we force their NULLing for non-root users
80327+ above
80328+ */
80329+ if ((unsigned long)ptr > TASK_SIZE && *fmt != 'P' && *fmt != 'K' && is_usercopy_object(buf)) {
80330+ printk(KERN_ALERT "grsec: kernel infoleak detected! Please report this log to spender@grsecurity.net.\n");
80331+ dump_stack();
80332+ ptr = NULL;
80333+ }
80334+#endif
80335+
80336 spec.flags |= SMALL;
80337 if (spec.field_width == -1) {
80338 spec.field_width = default_width;
80339@@ -1842,11 +1875,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
80340 typeof(type) value; \
80341 if (sizeof(type) == 8) { \
80342 args = PTR_ALIGN(args, sizeof(u32)); \
80343- *(u32 *)&value = *(u32 *)args; \
80344- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
80345+ *(u32 *)&value = *(const u32 *)args; \
80346+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
80347 } else { \
80348 args = PTR_ALIGN(args, sizeof(type)); \
80349- value = *(typeof(type) *)args; \
80350+ value = *(const typeof(type) *)args; \
80351 } \
80352 args += sizeof(type); \
80353 value; \
80354@@ -1909,7 +1942,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
80355 case FORMAT_TYPE_STR: {
80356 const char *str_arg = args;
80357 args += strlen(str_arg) + 1;
80358- str = string(str, end, (char *)str_arg, spec);
80359+ str = string(str, end, str_arg, spec);
80360 break;
80361 }
80362
80363diff --git a/localversion-grsec b/localversion-grsec
80364new file mode 100644
80365index 0000000..7cd6065
80366--- /dev/null
80367+++ b/localversion-grsec
80368@@ -0,0 +1 @@
80369+-grsec
80370diff --git a/mm/Kconfig b/mm/Kconfig
80371index 278e3ab..87c384d 100644
80372--- a/mm/Kconfig
80373+++ b/mm/Kconfig
80374@@ -286,10 +286,10 @@ config KSM
80375 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
80376
80377 config DEFAULT_MMAP_MIN_ADDR
80378- int "Low address space to protect from user allocation"
80379+ int "Low address space to protect from user allocation"
80380 depends on MMU
80381- default 4096
80382- help
80383+ default 65536
80384+ help
80385 This is the portion of low virtual memory which should be protected
80386 from userspace allocation. Keeping a user from writing to low pages
80387 can help reduce the impact of kernel NULL pointer bugs.
80388@@ -320,7 +320,7 @@ config MEMORY_FAILURE
80389
80390 config HWPOISON_INJECT
80391 tristate "HWPoison pages injector"
80392- depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
80393+ depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS && !GRKERNSEC
80394 select PROC_PAGE_MONITOR
80395
80396 config NOMMU_INITIAL_TRIM_EXCESS
80397diff --git a/mm/filemap.c b/mm/filemap.c
80398index 83efee7..3f99381 100644
80399--- a/mm/filemap.c
80400+++ b/mm/filemap.c
80401@@ -1747,7 +1747,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
80402 struct address_space *mapping = file->f_mapping;
80403
80404 if (!mapping->a_ops->readpage)
80405- return -ENOEXEC;
80406+ return -ENODEV;
80407 file_accessed(file);
80408 vma->vm_ops = &generic_file_vm_ops;
80409 return 0;
80410@@ -2087,6 +2087,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
80411 *pos = i_size_read(inode);
80412
80413 if (limit != RLIM_INFINITY) {
80414+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
80415 if (*pos >= limit) {
80416 send_sig(SIGXFSZ, current, 0);
80417 return -EFBIG;
80418diff --git a/mm/fremap.c b/mm/fremap.c
80419index a0aaf0e..20325c3 100644
80420--- a/mm/fremap.c
80421+++ b/mm/fremap.c
80422@@ -157,6 +157,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
80423 retry:
80424 vma = find_vma(mm, start);
80425
80426+#ifdef CONFIG_PAX_SEGMEXEC
80427+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
80428+ goto out;
80429+#endif
80430+
80431 /*
80432 * Make sure the vma is shared, that it supports prefaulting,
80433 * and that the remapped range is valid and fully within
80434diff --git a/mm/highmem.c b/mm/highmem.c
80435index b32b70c..e512eb0 100644
80436--- a/mm/highmem.c
80437+++ b/mm/highmem.c
80438@@ -138,8 +138,9 @@ static void flush_all_zero_pkmaps(void)
80439 * So no dangers, even with speculative execution.
80440 */
80441 page = pte_page(pkmap_page_table[i]);
80442+ pax_open_kernel();
80443 pte_clear(&init_mm, PKMAP_ADDR(i), &pkmap_page_table[i]);
80444-
80445+ pax_close_kernel();
80446 set_page_address(page, NULL);
80447 need_flush = 1;
80448 }
80449@@ -198,9 +199,11 @@ start:
80450 }
80451 }
80452 vaddr = PKMAP_ADDR(last_pkmap_nr);
80453+
80454+ pax_open_kernel();
80455 set_pte_at(&init_mm, vaddr,
80456 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
80457-
80458+ pax_close_kernel();
80459 pkmap_count[last_pkmap_nr] = 1;
80460 set_page_address(page, (void *)vaddr);
80461
80462diff --git a/mm/hugetlb.c b/mm/hugetlb.c
80463index d7cec92..b05cc33 100644
80464--- a/mm/hugetlb.c
80465+++ b/mm/hugetlb.c
80466@@ -2008,15 +2008,17 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
80467 struct hstate *h = &default_hstate;
80468 unsigned long tmp;
80469 int ret;
80470+ ctl_table_no_const hugetlb_table;
80471
80472 tmp = h->max_huge_pages;
80473
80474 if (write && h->order >= MAX_ORDER)
80475 return -EINVAL;
80476
80477- table->data = &tmp;
80478- table->maxlen = sizeof(unsigned long);
80479- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
80480+ hugetlb_table = *table;
80481+ hugetlb_table.data = &tmp;
80482+ hugetlb_table.maxlen = sizeof(unsigned long);
80483+ ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
80484 if (ret)
80485 goto out;
80486
80487@@ -2073,15 +2075,17 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
80488 struct hstate *h = &default_hstate;
80489 unsigned long tmp;
80490 int ret;
80491+ ctl_table_no_const hugetlb_table;
80492
80493 tmp = h->nr_overcommit_huge_pages;
80494
80495 if (write && h->order >= MAX_ORDER)
80496 return -EINVAL;
80497
80498- table->data = &tmp;
80499- table->maxlen = sizeof(unsigned long);
80500- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
80501+ hugetlb_table = *table;
80502+ hugetlb_table.data = &tmp;
80503+ hugetlb_table.maxlen = sizeof(unsigned long);
80504+ ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
80505 if (ret)
80506 goto out;
80507
80508@@ -2515,6 +2519,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
80509 return 1;
80510 }
80511
80512+#ifdef CONFIG_PAX_SEGMEXEC
80513+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
80514+{
80515+ struct mm_struct *mm = vma->vm_mm;
80516+ struct vm_area_struct *vma_m;
80517+ unsigned long address_m;
80518+ pte_t *ptep_m;
80519+
80520+ vma_m = pax_find_mirror_vma(vma);
80521+ if (!vma_m)
80522+ return;
80523+
80524+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
80525+ address_m = address + SEGMEXEC_TASK_SIZE;
80526+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
80527+ get_page(page_m);
80528+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
80529+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
80530+}
80531+#endif
80532+
80533 /*
80534 * Hugetlb_cow() should be called with page lock of the original hugepage held.
80535 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
80536@@ -2633,6 +2658,11 @@ retry_avoidcopy:
80537 make_huge_pte(vma, new_page, 1));
80538 page_remove_rmap(old_page);
80539 hugepage_add_new_anon_rmap(new_page, vma, address);
80540+
80541+#ifdef CONFIG_PAX_SEGMEXEC
80542+ pax_mirror_huge_pte(vma, address, new_page);
80543+#endif
80544+
80545 /* Make the old page be freed below */
80546 new_page = old_page;
80547 }
80548@@ -2792,6 +2822,10 @@ retry:
80549 && (vma->vm_flags & VM_SHARED)));
80550 set_huge_pte_at(mm, address, ptep, new_pte);
80551
80552+#ifdef CONFIG_PAX_SEGMEXEC
80553+ pax_mirror_huge_pte(vma, address, page);
80554+#endif
80555+
80556 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
80557 /* Optimization, do the COW without a second fault */
80558 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
80559@@ -2821,6 +2855,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
80560 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
80561 struct hstate *h = hstate_vma(vma);
80562
80563+#ifdef CONFIG_PAX_SEGMEXEC
80564+ struct vm_area_struct *vma_m;
80565+#endif
80566+
80567 address &= huge_page_mask(h);
80568
80569 ptep = huge_pte_offset(mm, address);
80570@@ -2834,6 +2872,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
80571 VM_FAULT_SET_HINDEX(hstate_index(h));
80572 }
80573
80574+#ifdef CONFIG_PAX_SEGMEXEC
80575+ vma_m = pax_find_mirror_vma(vma);
80576+ if (vma_m) {
80577+ unsigned long address_m;
80578+
80579+ if (vma->vm_start > vma_m->vm_start) {
80580+ address_m = address;
80581+ address -= SEGMEXEC_TASK_SIZE;
80582+ vma = vma_m;
80583+ h = hstate_vma(vma);
80584+ } else
80585+ address_m = address + SEGMEXEC_TASK_SIZE;
80586+
80587+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
80588+ return VM_FAULT_OOM;
80589+ address_m &= HPAGE_MASK;
80590+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
80591+ }
80592+#endif
80593+
80594 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
80595 if (!ptep)
80596 return VM_FAULT_OOM;
80597diff --git a/mm/internal.h b/mm/internal.h
80598index 9ba2110..eaf0674 100644
80599--- a/mm/internal.h
80600+++ b/mm/internal.h
80601@@ -100,6 +100,7 @@ extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
80602 * in mm/page_alloc.c
80603 */
80604 extern void __free_pages_bootmem(struct page *page, unsigned int order);
80605+extern void free_compound_page(struct page *page);
80606 extern void prep_compound_page(struct page *page, unsigned long order);
80607 #ifdef CONFIG_MEMORY_FAILURE
80608 extern bool is_free_buddy_page(struct page *page);
80609diff --git a/mm/kmemleak.c b/mm/kmemleak.c
80610index 752a705..6c3102e 100644
80611--- a/mm/kmemleak.c
80612+++ b/mm/kmemleak.c
80613@@ -363,7 +363,7 @@ static void print_unreferenced(struct seq_file *seq,
80614
80615 for (i = 0; i < object->trace_len; i++) {
80616 void *ptr = (void *)object->trace[i];
80617- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
80618+ seq_printf(seq, " [<%pP>] %pA\n", ptr, ptr);
80619 }
80620 }
80621
80622@@ -1853,7 +1853,7 @@ static int __init kmemleak_late_init(void)
80623 return -ENOMEM;
80624 }
80625
80626- dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
80627+ dentry = debugfs_create_file("kmemleak", S_IRUSR, NULL, NULL,
80628 &kmemleak_fops);
80629 if (!dentry)
80630 pr_warning("Failed to create the debugfs kmemleak file\n");
80631diff --git a/mm/maccess.c b/mm/maccess.c
80632index d53adf9..03a24bf 100644
80633--- a/mm/maccess.c
80634+++ b/mm/maccess.c
80635@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
80636 set_fs(KERNEL_DS);
80637 pagefault_disable();
80638 ret = __copy_from_user_inatomic(dst,
80639- (__force const void __user *)src, size);
80640+ (const void __force_user *)src, size);
80641 pagefault_enable();
80642 set_fs(old_fs);
80643
80644@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
80645
80646 set_fs(KERNEL_DS);
80647 pagefault_disable();
80648- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
80649+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
80650 pagefault_enable();
80651 set_fs(old_fs);
80652
80653diff --git a/mm/madvise.c b/mm/madvise.c
80654index 03dfa5c..b032917 100644
80655--- a/mm/madvise.c
80656+++ b/mm/madvise.c
80657@@ -48,6 +48,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
80658 pgoff_t pgoff;
80659 unsigned long new_flags = vma->vm_flags;
80660
80661+#ifdef CONFIG_PAX_SEGMEXEC
80662+ struct vm_area_struct *vma_m;
80663+#endif
80664+
80665 switch (behavior) {
80666 case MADV_NORMAL:
80667 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
80668@@ -123,6 +127,13 @@ success:
80669 /*
80670 * vm_flags is protected by the mmap_sem held in write mode.
80671 */
80672+
80673+#ifdef CONFIG_PAX_SEGMEXEC
80674+ vma_m = pax_find_mirror_vma(vma);
80675+ if (vma_m)
80676+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
80677+#endif
80678+
80679 vma->vm_flags = new_flags;
80680
80681 out:
80682@@ -181,6 +192,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
80683 struct vm_area_struct ** prev,
80684 unsigned long start, unsigned long end)
80685 {
80686+
80687+#ifdef CONFIG_PAX_SEGMEXEC
80688+ struct vm_area_struct *vma_m;
80689+#endif
80690+
80691 *prev = vma;
80692 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
80693 return -EINVAL;
80694@@ -193,6 +209,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
80695 zap_page_range(vma, start, end - start, &details);
80696 } else
80697 zap_page_range(vma, start, end - start, NULL);
80698+
80699+#ifdef CONFIG_PAX_SEGMEXEC
80700+ vma_m = pax_find_mirror_vma(vma);
80701+ if (vma_m) {
80702+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
80703+ struct zap_details details = {
80704+ .nonlinear_vma = vma_m,
80705+ .last_index = ULONG_MAX,
80706+ };
80707+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
80708+ } else
80709+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
80710+ }
80711+#endif
80712+
80713 return 0;
80714 }
80715
80716@@ -397,6 +428,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
80717 if (end < start)
80718 goto out;
80719
80720+#ifdef CONFIG_PAX_SEGMEXEC
80721+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
80722+ if (end > SEGMEXEC_TASK_SIZE)
80723+ goto out;
80724+ } else
80725+#endif
80726+
80727+ if (end > TASK_SIZE)
80728+ goto out;
80729+
80730 error = 0;
80731 if (end == start)
80732 goto out;
80733diff --git a/mm/memory-failure.c b/mm/memory-failure.c
80734index c6e4dd3..1f41988 100644
80735--- a/mm/memory-failure.c
80736+++ b/mm/memory-failure.c
80737@@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
80738
80739 int sysctl_memory_failure_recovery __read_mostly = 1;
80740
80741-atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
80742+atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
80743
80744 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
80745
80746@@ -202,7 +202,7 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
80747 pfn, t->comm, t->pid);
80748 si.si_signo = SIGBUS;
80749 si.si_errno = 0;
80750- si.si_addr = (void *)addr;
80751+ si.si_addr = (void __user *)addr;
80752 #ifdef __ARCH_SI_TRAPNO
80753 si.si_trapno = trapno;
80754 #endif
80755@@ -760,7 +760,7 @@ static struct page_state {
80756 unsigned long res;
80757 char *msg;
80758 int (*action)(struct page *p, unsigned long pfn);
80759-} error_states[] = {
80760+} __do_const error_states[] = {
80761 { reserved, reserved, "reserved kernel", me_kernel },
80762 /*
80763 * free pages are specially detected outside this table:
80764@@ -1040,7 +1040,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
80765 }
80766
80767 nr_pages = 1 << compound_trans_order(hpage);
80768- atomic_long_add(nr_pages, &mce_bad_pages);
80769+ atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
80770
80771 /*
80772 * We need/can do nothing about count=0 pages.
80773@@ -1070,7 +1070,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
80774 if (!PageHWPoison(hpage)
80775 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
80776 || (p != hpage && TestSetPageHWPoison(hpage))) {
80777- atomic_long_sub(nr_pages, &mce_bad_pages);
80778+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
80779 return 0;
80780 }
80781 set_page_hwpoison_huge_page(hpage);
80782@@ -1128,7 +1128,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
80783 }
80784 if (hwpoison_filter(p)) {
80785 if (TestClearPageHWPoison(p))
80786- atomic_long_sub(nr_pages, &mce_bad_pages);
80787+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
80788 unlock_page(hpage);
80789 put_page(hpage);
80790 return 0;
80791@@ -1323,7 +1323,7 @@ int unpoison_memory(unsigned long pfn)
80792 return 0;
80793 }
80794 if (TestClearPageHWPoison(p))
80795- atomic_long_sub(nr_pages, &mce_bad_pages);
80796+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
80797 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
80798 return 0;
80799 }
80800@@ -1337,7 +1337,7 @@ int unpoison_memory(unsigned long pfn)
80801 */
80802 if (TestClearPageHWPoison(page)) {
80803 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
80804- atomic_long_sub(nr_pages, &mce_bad_pages);
80805+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
80806 freeit = 1;
80807 if (PageHuge(page))
80808 clear_page_hwpoison_huge_page(page);
80809@@ -1442,7 +1442,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
80810 }
80811 done:
80812 if (!PageHWPoison(hpage))
80813- atomic_long_add(1 << compound_trans_order(hpage),
80814+ atomic_long_add_unchecked(1 << compound_trans_order(hpage),
80815 &mce_bad_pages);
80816 set_page_hwpoison_huge_page(hpage);
80817 dequeue_hwpoisoned_huge_page(hpage);
80818@@ -1583,7 +1583,7 @@ int soft_offline_page(struct page *page, int flags)
80819 return ret;
80820
80821 done:
80822- atomic_long_add(1, &mce_bad_pages);
80823+ atomic_long_add_unchecked(1, &mce_bad_pages);
80824 SetPageHWPoison(page);
80825 /* keep elevated page count for bad page */
80826 return ret;
80827diff --git a/mm/memory.c b/mm/memory.c
80828index bb1369f..b9631d2 100644
80829--- a/mm/memory.c
80830+++ b/mm/memory.c
80831@@ -433,6 +433,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
80832 free_pte_range(tlb, pmd, addr);
80833 } while (pmd++, addr = next, addr != end);
80834
80835+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
80836 start &= PUD_MASK;
80837 if (start < floor)
80838 return;
80839@@ -447,6 +448,8 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
80840 pmd = pmd_offset(pud, start);
80841 pud_clear(pud);
80842 pmd_free_tlb(tlb, pmd, start);
80843+#endif
80844+
80845 }
80846
80847 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
80848@@ -466,6 +469,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
80849 free_pmd_range(tlb, pud, addr, next, floor, ceiling);
80850 } while (pud++, addr = next, addr != end);
80851
80852+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
80853 start &= PGDIR_MASK;
80854 if (start < floor)
80855 return;
80856@@ -480,6 +484,8 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
80857 pud = pud_offset(pgd, start);
80858 pgd_clear(pgd);
80859 pud_free_tlb(tlb, pud, start);
80860+#endif
80861+
80862 }
80863
80864 /*
80865@@ -1618,12 +1624,6 @@ no_page_table:
80866 return page;
80867 }
80868
80869-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
80870-{
80871- return stack_guard_page_start(vma, addr) ||
80872- stack_guard_page_end(vma, addr+PAGE_SIZE);
80873-}
80874-
80875 /**
80876 * __get_user_pages() - pin user pages in memory
80877 * @tsk: task_struct of target task
80878@@ -1709,10 +1709,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
80879
80880 i = 0;
80881
80882- do {
80883+ while (nr_pages) {
80884 struct vm_area_struct *vma;
80885
80886- vma = find_extend_vma(mm, start);
80887+ vma = find_vma(mm, start);
80888 if (!vma && in_gate_area(mm, start)) {
80889 unsigned long pg = start & PAGE_MASK;
80890 pgd_t *pgd;
80891@@ -1760,7 +1760,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
80892 goto next_page;
80893 }
80894
80895- if (!vma ||
80896+ if (!vma || start < vma->vm_start ||
80897 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
80898 !(vm_flags & vma->vm_flags))
80899 return i ? : -EFAULT;
80900@@ -1787,11 +1787,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
80901 int ret;
80902 unsigned int fault_flags = 0;
80903
80904- /* For mlock, just skip the stack guard page. */
80905- if (foll_flags & FOLL_MLOCK) {
80906- if (stack_guard_page(vma, start))
80907- goto next_page;
80908- }
80909 if (foll_flags & FOLL_WRITE)
80910 fault_flags |= FAULT_FLAG_WRITE;
80911 if (nonblocking)
80912@@ -1865,7 +1860,7 @@ next_page:
80913 start += PAGE_SIZE;
80914 nr_pages--;
80915 } while (nr_pages && start < vma->vm_end);
80916- } while (nr_pages);
80917+ }
80918 return i;
80919 }
80920 EXPORT_SYMBOL(__get_user_pages);
80921@@ -2072,6 +2067,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
80922 page_add_file_rmap(page);
80923 set_pte_at(mm, addr, pte, mk_pte(page, prot));
80924
80925+#ifdef CONFIG_PAX_SEGMEXEC
80926+ pax_mirror_file_pte(vma, addr, page, ptl);
80927+#endif
80928+
80929 retval = 0;
80930 pte_unmap_unlock(pte, ptl);
80931 return retval;
80932@@ -2116,9 +2115,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
80933 if (!page_count(page))
80934 return -EINVAL;
80935 if (!(vma->vm_flags & VM_MIXEDMAP)) {
80936+
80937+#ifdef CONFIG_PAX_SEGMEXEC
80938+ struct vm_area_struct *vma_m;
80939+#endif
80940+
80941 BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
80942 BUG_ON(vma->vm_flags & VM_PFNMAP);
80943 vma->vm_flags |= VM_MIXEDMAP;
80944+
80945+#ifdef CONFIG_PAX_SEGMEXEC
80946+ vma_m = pax_find_mirror_vma(vma);
80947+ if (vma_m)
80948+ vma_m->vm_flags |= VM_MIXEDMAP;
80949+#endif
80950+
80951 }
80952 return insert_page(vma, addr, page, vma->vm_page_prot);
80953 }
80954@@ -2201,6 +2212,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
80955 unsigned long pfn)
80956 {
80957 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
80958+ BUG_ON(vma->vm_mirror);
80959
80960 if (addr < vma->vm_start || addr >= vma->vm_end)
80961 return -EFAULT;
80962@@ -2401,7 +2413,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
80963
80964 BUG_ON(pud_huge(*pud));
80965
80966- pmd = pmd_alloc(mm, pud, addr);
80967+ pmd = (mm == &init_mm) ?
80968+ pmd_alloc_kernel(mm, pud, addr) :
80969+ pmd_alloc(mm, pud, addr);
80970 if (!pmd)
80971 return -ENOMEM;
80972 do {
80973@@ -2421,7 +2435,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
80974 unsigned long next;
80975 int err;
80976
80977- pud = pud_alloc(mm, pgd, addr);
80978+ pud = (mm == &init_mm) ?
80979+ pud_alloc_kernel(mm, pgd, addr) :
80980+ pud_alloc(mm, pgd, addr);
80981 if (!pud)
80982 return -ENOMEM;
80983 do {
80984@@ -2509,6 +2525,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
80985 copy_user_highpage(dst, src, va, vma);
80986 }
80987
80988+#ifdef CONFIG_PAX_SEGMEXEC
80989+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
80990+{
80991+ struct mm_struct *mm = vma->vm_mm;
80992+ spinlock_t *ptl;
80993+ pte_t *pte, entry;
80994+
80995+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
80996+ entry = *pte;
80997+ if (!pte_present(entry)) {
80998+ if (!pte_none(entry)) {
80999+ BUG_ON(pte_file(entry));
81000+ free_swap_and_cache(pte_to_swp_entry(entry));
81001+ pte_clear_not_present_full(mm, address, pte, 0);
81002+ }
81003+ } else {
81004+ struct page *page;
81005+
81006+ flush_cache_page(vma, address, pte_pfn(entry));
81007+ entry = ptep_clear_flush(vma, address, pte);
81008+ BUG_ON(pte_dirty(entry));
81009+ page = vm_normal_page(vma, address, entry);
81010+ if (page) {
81011+ update_hiwater_rss(mm);
81012+ if (PageAnon(page))
81013+ dec_mm_counter_fast(mm, MM_ANONPAGES);
81014+ else
81015+ dec_mm_counter_fast(mm, MM_FILEPAGES);
81016+ page_remove_rmap(page);
81017+ page_cache_release(page);
81018+ }
81019+ }
81020+ pte_unmap_unlock(pte, ptl);
81021+}
81022+
81023+/* PaX: if vma is mirrored, synchronize the mirror's PTE
81024+ *
81025+ * the ptl of the lower mapped page is held on entry and is not released on exit
81026+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
81027+ */
81028+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
81029+{
81030+ struct mm_struct *mm = vma->vm_mm;
81031+ unsigned long address_m;
81032+ spinlock_t *ptl_m;
81033+ struct vm_area_struct *vma_m;
81034+ pmd_t *pmd_m;
81035+ pte_t *pte_m, entry_m;
81036+
81037+ BUG_ON(!page_m || !PageAnon(page_m));
81038+
81039+ vma_m = pax_find_mirror_vma(vma);
81040+ if (!vma_m)
81041+ return;
81042+
81043+ BUG_ON(!PageLocked(page_m));
81044+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
81045+ address_m = address + SEGMEXEC_TASK_SIZE;
81046+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
81047+ pte_m = pte_offset_map(pmd_m, address_m);
81048+ ptl_m = pte_lockptr(mm, pmd_m);
81049+ if (ptl != ptl_m) {
81050+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
81051+ if (!pte_none(*pte_m))
81052+ goto out;
81053+ }
81054+
81055+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
81056+ page_cache_get(page_m);
81057+ page_add_anon_rmap(page_m, vma_m, address_m);
81058+ inc_mm_counter_fast(mm, MM_ANONPAGES);
81059+ set_pte_at(mm, address_m, pte_m, entry_m);
81060+ update_mmu_cache(vma_m, address_m, entry_m);
81061+out:
81062+ if (ptl != ptl_m)
81063+ spin_unlock(ptl_m);
81064+ pte_unmap(pte_m);
81065+ unlock_page(page_m);
81066+}
81067+
81068+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
81069+{
81070+ struct mm_struct *mm = vma->vm_mm;
81071+ unsigned long address_m;
81072+ spinlock_t *ptl_m;
81073+ struct vm_area_struct *vma_m;
81074+ pmd_t *pmd_m;
81075+ pte_t *pte_m, entry_m;
81076+
81077+ BUG_ON(!page_m || PageAnon(page_m));
81078+
81079+ vma_m = pax_find_mirror_vma(vma);
81080+ if (!vma_m)
81081+ return;
81082+
81083+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
81084+ address_m = address + SEGMEXEC_TASK_SIZE;
81085+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
81086+ pte_m = pte_offset_map(pmd_m, address_m);
81087+ ptl_m = pte_lockptr(mm, pmd_m);
81088+ if (ptl != ptl_m) {
81089+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
81090+ if (!pte_none(*pte_m))
81091+ goto out;
81092+ }
81093+
81094+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
81095+ page_cache_get(page_m);
81096+ page_add_file_rmap(page_m);
81097+ inc_mm_counter_fast(mm, MM_FILEPAGES);
81098+ set_pte_at(mm, address_m, pte_m, entry_m);
81099+ update_mmu_cache(vma_m, address_m, entry_m);
81100+out:
81101+ if (ptl != ptl_m)
81102+ spin_unlock(ptl_m);
81103+ pte_unmap(pte_m);
81104+}
81105+
81106+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
81107+{
81108+ struct mm_struct *mm = vma->vm_mm;
81109+ unsigned long address_m;
81110+ spinlock_t *ptl_m;
81111+ struct vm_area_struct *vma_m;
81112+ pmd_t *pmd_m;
81113+ pte_t *pte_m, entry_m;
81114+
81115+ vma_m = pax_find_mirror_vma(vma);
81116+ if (!vma_m)
81117+ return;
81118+
81119+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
81120+ address_m = address + SEGMEXEC_TASK_SIZE;
81121+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
81122+ pte_m = pte_offset_map(pmd_m, address_m);
81123+ ptl_m = pte_lockptr(mm, pmd_m);
81124+ if (ptl != ptl_m) {
81125+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
81126+ if (!pte_none(*pte_m))
81127+ goto out;
81128+ }
81129+
81130+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
81131+ set_pte_at(mm, address_m, pte_m, entry_m);
81132+out:
81133+ if (ptl != ptl_m)
81134+ spin_unlock(ptl_m);
81135+ pte_unmap(pte_m);
81136+}
81137+
81138+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
81139+{
81140+ struct page *page_m;
81141+ pte_t entry;
81142+
81143+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
81144+ goto out;
81145+
81146+ entry = *pte;
81147+ page_m = vm_normal_page(vma, address, entry);
81148+ if (!page_m)
81149+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
81150+ else if (PageAnon(page_m)) {
81151+ if (pax_find_mirror_vma(vma)) {
81152+ pte_unmap_unlock(pte, ptl);
81153+ lock_page(page_m);
81154+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
81155+ if (pte_same(entry, *pte))
81156+ pax_mirror_anon_pte(vma, address, page_m, ptl);
81157+ else
81158+ unlock_page(page_m);
81159+ }
81160+ } else
81161+ pax_mirror_file_pte(vma, address, page_m, ptl);
81162+
81163+out:
81164+ pte_unmap_unlock(pte, ptl);
81165+}
81166+#endif
81167+
81168 /*
81169 * This routine handles present pages, when users try to write
81170 * to a shared page. It is done by copying the page to a new address
81171@@ -2725,6 +2921,12 @@ gotten:
81172 */
81173 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
81174 if (likely(pte_same(*page_table, orig_pte))) {
81175+
81176+#ifdef CONFIG_PAX_SEGMEXEC
81177+ if (pax_find_mirror_vma(vma))
81178+ BUG_ON(!trylock_page(new_page));
81179+#endif
81180+
81181 if (old_page) {
81182 if (!PageAnon(old_page)) {
81183 dec_mm_counter_fast(mm, MM_FILEPAGES);
81184@@ -2776,6 +2978,10 @@ gotten:
81185 page_remove_rmap(old_page);
81186 }
81187
81188+#ifdef CONFIG_PAX_SEGMEXEC
81189+ pax_mirror_anon_pte(vma, address, new_page, ptl);
81190+#endif
81191+
81192 /* Free the old page.. */
81193 new_page = old_page;
81194 ret |= VM_FAULT_WRITE;
81195@@ -3051,6 +3257,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
81196 swap_free(entry);
81197 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
81198 try_to_free_swap(page);
81199+
81200+#ifdef CONFIG_PAX_SEGMEXEC
81201+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
81202+#endif
81203+
81204 unlock_page(page);
81205 if (swapcache) {
81206 /*
81207@@ -3074,6 +3285,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
81208
81209 /* No need to invalidate - it was non-present before */
81210 update_mmu_cache(vma, address, page_table);
81211+
81212+#ifdef CONFIG_PAX_SEGMEXEC
81213+ pax_mirror_anon_pte(vma, address, page, ptl);
81214+#endif
81215+
81216 unlock:
81217 pte_unmap_unlock(page_table, ptl);
81218 out:
81219@@ -3093,40 +3309,6 @@ out_release:
81220 }
81221
81222 /*
81223- * This is like a special single-page "expand_{down|up}wards()",
81224- * except we must first make sure that 'address{-|+}PAGE_SIZE'
81225- * doesn't hit another vma.
81226- */
81227-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
81228-{
81229- address &= PAGE_MASK;
81230- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
81231- struct vm_area_struct *prev = vma->vm_prev;
81232-
81233- /*
81234- * Is there a mapping abutting this one below?
81235- *
81236- * That's only ok if it's the same stack mapping
81237- * that has gotten split..
81238- */
81239- if (prev && prev->vm_end == address)
81240- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
81241-
81242- expand_downwards(vma, address - PAGE_SIZE);
81243- }
81244- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
81245- struct vm_area_struct *next = vma->vm_next;
81246-
81247- /* As VM_GROWSDOWN but s/below/above/ */
81248- if (next && next->vm_start == address + PAGE_SIZE)
81249- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
81250-
81251- expand_upwards(vma, address + PAGE_SIZE);
81252- }
81253- return 0;
81254-}
81255-
81256-/*
81257 * We enter with non-exclusive mmap_sem (to exclude vma changes,
81258 * but allow concurrent faults), and pte mapped but not yet locked.
81259 * We return with mmap_sem still held, but pte unmapped and unlocked.
81260@@ -3135,27 +3317,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
81261 unsigned long address, pte_t *page_table, pmd_t *pmd,
81262 unsigned int flags)
81263 {
81264- struct page *page;
81265+ struct page *page = NULL;
81266 spinlock_t *ptl;
81267 pte_t entry;
81268
81269- pte_unmap(page_table);
81270-
81271- /* Check if we need to add a guard page to the stack */
81272- if (check_stack_guard_page(vma, address) < 0)
81273- return VM_FAULT_SIGBUS;
81274-
81275- /* Use the zero-page for reads */
81276 if (!(flags & FAULT_FLAG_WRITE)) {
81277 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
81278 vma->vm_page_prot));
81279- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
81280+ ptl = pte_lockptr(mm, pmd);
81281+ spin_lock(ptl);
81282 if (!pte_none(*page_table))
81283 goto unlock;
81284 goto setpte;
81285 }
81286
81287 /* Allocate our own private page. */
81288+ pte_unmap(page_table);
81289+
81290 if (unlikely(anon_vma_prepare(vma)))
81291 goto oom;
81292 page = alloc_zeroed_user_highpage_movable(vma, address);
81293@@ -3174,6 +3352,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
81294 if (!pte_none(*page_table))
81295 goto release;
81296
81297+#ifdef CONFIG_PAX_SEGMEXEC
81298+ if (pax_find_mirror_vma(vma))
81299+ BUG_ON(!trylock_page(page));
81300+#endif
81301+
81302 inc_mm_counter_fast(mm, MM_ANONPAGES);
81303 page_add_new_anon_rmap(page, vma, address);
81304 setpte:
81305@@ -3181,6 +3364,12 @@ setpte:
81306
81307 /* No need to invalidate - it was non-present before */
81308 update_mmu_cache(vma, address, page_table);
81309+
81310+#ifdef CONFIG_PAX_SEGMEXEC
81311+ if (page)
81312+ pax_mirror_anon_pte(vma, address, page, ptl);
81313+#endif
81314+
81315 unlock:
81316 pte_unmap_unlock(page_table, ptl);
81317 return 0;
81318@@ -3324,6 +3513,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
81319 */
81320 /* Only go through if we didn't race with anybody else... */
81321 if (likely(pte_same(*page_table, orig_pte))) {
81322+
81323+#ifdef CONFIG_PAX_SEGMEXEC
81324+ if (anon && pax_find_mirror_vma(vma))
81325+ BUG_ON(!trylock_page(page));
81326+#endif
81327+
81328 flush_icache_page(vma, page);
81329 entry = mk_pte(page, vma->vm_page_prot);
81330 if (flags & FAULT_FLAG_WRITE)
81331@@ -3343,6 +3538,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
81332
81333 /* no need to invalidate: a not-present page won't be cached */
81334 update_mmu_cache(vma, address, page_table);
81335+
81336+#ifdef CONFIG_PAX_SEGMEXEC
81337+ if (anon)
81338+ pax_mirror_anon_pte(vma, address, page, ptl);
81339+ else
81340+ pax_mirror_file_pte(vma, address, page, ptl);
81341+#endif
81342+
81343 } else {
81344 if (cow_page)
81345 mem_cgroup_uncharge_page(cow_page);
81346@@ -3664,6 +3867,12 @@ int handle_pte_fault(struct mm_struct *mm,
81347 if (flags & FAULT_FLAG_WRITE)
81348 flush_tlb_fix_spurious_fault(vma, address);
81349 }
81350+
81351+#ifdef CONFIG_PAX_SEGMEXEC
81352+ pax_mirror_pte(vma, address, pte, pmd, ptl);
81353+ return 0;
81354+#endif
81355+
81356 unlock:
81357 pte_unmap_unlock(pte, ptl);
81358 return 0;
81359@@ -3680,6 +3889,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
81360 pmd_t *pmd;
81361 pte_t *pte;
81362
81363+#ifdef CONFIG_PAX_SEGMEXEC
81364+ struct vm_area_struct *vma_m;
81365+#endif
81366+
81367 __set_current_state(TASK_RUNNING);
81368
81369 count_vm_event(PGFAULT);
81370@@ -3691,6 +3904,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
81371 if (unlikely(is_vm_hugetlb_page(vma)))
81372 return hugetlb_fault(mm, vma, address, flags);
81373
81374+#ifdef CONFIG_PAX_SEGMEXEC
81375+ vma_m = pax_find_mirror_vma(vma);
81376+ if (vma_m) {
81377+ unsigned long address_m;
81378+ pgd_t *pgd_m;
81379+ pud_t *pud_m;
81380+ pmd_t *pmd_m;
81381+
81382+ if (vma->vm_start > vma_m->vm_start) {
81383+ address_m = address;
81384+ address -= SEGMEXEC_TASK_SIZE;
81385+ vma = vma_m;
81386+ } else
81387+ address_m = address + SEGMEXEC_TASK_SIZE;
81388+
81389+ pgd_m = pgd_offset(mm, address_m);
81390+ pud_m = pud_alloc(mm, pgd_m, address_m);
81391+ if (!pud_m)
81392+ return VM_FAULT_OOM;
81393+ pmd_m = pmd_alloc(mm, pud_m, address_m);
81394+ if (!pmd_m)
81395+ return VM_FAULT_OOM;
81396+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
81397+ return VM_FAULT_OOM;
81398+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
81399+ }
81400+#endif
81401+
81402 retry:
81403 pgd = pgd_offset(mm, address);
81404 pud = pud_alloc(mm, pgd, address);
81405@@ -3789,6 +4030,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
81406 spin_unlock(&mm->page_table_lock);
81407 return 0;
81408 }
81409+
81410+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
81411+{
81412+ pud_t *new = pud_alloc_one(mm, address);
81413+ if (!new)
81414+ return -ENOMEM;
81415+
81416+ smp_wmb(); /* See comment in __pte_alloc */
81417+
81418+ spin_lock(&mm->page_table_lock);
81419+ if (pgd_present(*pgd)) /* Another has populated it */
81420+ pud_free(mm, new);
81421+ else
81422+ pgd_populate_kernel(mm, pgd, new);
81423+ spin_unlock(&mm->page_table_lock);
81424+ return 0;
81425+}
81426 #endif /* __PAGETABLE_PUD_FOLDED */
81427
81428 #ifndef __PAGETABLE_PMD_FOLDED
81429@@ -3819,11 +4077,35 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
81430 spin_unlock(&mm->page_table_lock);
81431 return 0;
81432 }
81433+
81434+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
81435+{
81436+ pmd_t *new = pmd_alloc_one(mm, address);
81437+ if (!new)
81438+ return -ENOMEM;
81439+
81440+ smp_wmb(); /* See comment in __pte_alloc */
81441+
81442+ spin_lock(&mm->page_table_lock);
81443+#ifndef __ARCH_HAS_4LEVEL_HACK
81444+ if (pud_present(*pud)) /* Another has populated it */
81445+ pmd_free(mm, new);
81446+ else
81447+ pud_populate_kernel(mm, pud, new);
81448+#else
81449+ if (pgd_present(*pud)) /* Another has populated it */
81450+ pmd_free(mm, new);
81451+ else
81452+ pgd_populate_kernel(mm, pud, new);
81453+#endif /* __ARCH_HAS_4LEVEL_HACK */
81454+ spin_unlock(&mm->page_table_lock);
81455+ return 0;
81456+}
81457 #endif /* __PAGETABLE_PMD_FOLDED */
81458
81459-int make_pages_present(unsigned long addr, unsigned long end)
81460+ssize_t make_pages_present(unsigned long addr, unsigned long end)
81461 {
81462- int ret, len, write;
81463+ ssize_t ret, len, write;
81464 struct vm_area_struct * vma;
81465
81466 vma = find_vma(current->mm, addr);
81467@@ -3856,7 +4138,7 @@ static int __init gate_vma_init(void)
81468 gate_vma.vm_start = FIXADDR_USER_START;
81469 gate_vma.vm_end = FIXADDR_USER_END;
81470 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
81471- gate_vma.vm_page_prot = __P101;
81472+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
81473
81474 return 0;
81475 }
81476@@ -3990,8 +4272,8 @@ out:
81477 return ret;
81478 }
81479
81480-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
81481- void *buf, int len, int write)
81482+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
81483+ void *buf, size_t len, int write)
81484 {
81485 resource_size_t phys_addr;
81486 unsigned long prot = 0;
81487@@ -4016,8 +4298,8 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
81488 * Access another process' address space as given in mm. If non-NULL, use the
81489 * given task for page fault accounting.
81490 */
81491-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
81492- unsigned long addr, void *buf, int len, int write)
81493+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
81494+ unsigned long addr, void *buf, size_t len, int write)
81495 {
81496 struct vm_area_struct *vma;
81497 void *old_buf = buf;
81498@@ -4025,7 +4307,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
81499 down_read(&mm->mmap_sem);
81500 /* ignore errors, just check how much was successfully transferred */
81501 while (len) {
81502- int bytes, ret, offset;
81503+ ssize_t bytes, ret, offset;
81504 void *maddr;
81505 struct page *page = NULL;
81506
81507@@ -4084,8 +4366,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
81508 *
81509 * The caller must hold a reference on @mm.
81510 */
81511-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
81512- void *buf, int len, int write)
81513+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
81514+ void *buf, size_t len, int write)
81515 {
81516 return __access_remote_vm(NULL, mm, addr, buf, len, write);
81517 }
81518@@ -4095,11 +4377,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
81519 * Source/target buffer must be kernel space,
81520 * Do not walk the page table directly, use get_user_pages
81521 */
81522-int access_process_vm(struct task_struct *tsk, unsigned long addr,
81523- void *buf, int len, int write)
81524+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr,
81525+ void *buf, size_t len, int write)
81526 {
81527 struct mm_struct *mm;
81528- int ret;
81529+ ssize_t ret;
81530
81531 mm = get_task_mm(tsk);
81532 if (!mm)
81533diff --git a/mm/mempolicy.c b/mm/mempolicy.c
81534index 3df6d12..a11056a 100644
81535--- a/mm/mempolicy.c
81536+++ b/mm/mempolicy.c
81537@@ -721,6 +721,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
81538 unsigned long vmstart;
81539 unsigned long vmend;
81540
81541+#ifdef CONFIG_PAX_SEGMEXEC
81542+ struct vm_area_struct *vma_m;
81543+#endif
81544+
81545 vma = find_vma(mm, start);
81546 if (!vma || vma->vm_start > start)
81547 return -EFAULT;
81548@@ -757,9 +761,20 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
81549 if (err)
81550 goto out;
81551 }
81552+
81553 err = vma_replace_policy(vma, new_pol);
81554 if (err)
81555 goto out;
81556+
81557+#ifdef CONFIG_PAX_SEGMEXEC
81558+ vma_m = pax_find_mirror_vma(vma);
81559+ if (vma_m) {
81560+ err = vma_replace_policy(vma_m, new_pol);
81561+ if (err)
81562+ goto out;
81563+ }
81564+#endif
81565+
81566 }
81567
81568 out:
81569@@ -1216,6 +1231,17 @@ static long do_mbind(unsigned long start, unsigned long len,
81570
81571 if (end < start)
81572 return -EINVAL;
81573+
81574+#ifdef CONFIG_PAX_SEGMEXEC
81575+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
81576+ if (end > SEGMEXEC_TASK_SIZE)
81577+ return -EINVAL;
81578+ } else
81579+#endif
81580+
81581+ if (end > TASK_SIZE)
81582+ return -EINVAL;
81583+
81584 if (end == start)
81585 return 0;
81586
81587@@ -1445,8 +1471,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
81588 */
81589 tcred = __task_cred(task);
81590 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
81591- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
81592- !capable(CAP_SYS_NICE)) {
81593+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
81594 rcu_read_unlock();
81595 err = -EPERM;
81596 goto out_put;
81597@@ -1477,6 +1502,15 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
81598 goto out;
81599 }
81600
81601+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
81602+ if (mm != current->mm &&
81603+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
81604+ mmput(mm);
81605+ err = -EPERM;
81606+ goto out;
81607+ }
81608+#endif
81609+
81610 err = do_migrate_pages(mm, old, new,
81611 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
81612
81613diff --git a/mm/migrate.c b/mm/migrate.c
81614index 2fd8b4a..d70358f 100644
81615--- a/mm/migrate.c
81616+++ b/mm/migrate.c
81617@@ -1401,8 +1401,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
81618 */
81619 tcred = __task_cred(task);
81620 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
81621- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
81622- !capable(CAP_SYS_NICE)) {
81623+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
81624 rcu_read_unlock();
81625 err = -EPERM;
81626 goto out;
81627diff --git a/mm/mlock.c b/mm/mlock.c
81628index c9bd528..da8d069 100644
81629--- a/mm/mlock.c
81630+++ b/mm/mlock.c
81631@@ -13,6 +13,7 @@
81632 #include <linux/pagemap.h>
81633 #include <linux/mempolicy.h>
81634 #include <linux/syscalls.h>
81635+#include <linux/security.h>
81636 #include <linux/sched.h>
81637 #include <linux/export.h>
81638 #include <linux/rmap.h>
81639@@ -369,7 +370,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
81640 {
81641 unsigned long nstart, end, tmp;
81642 struct vm_area_struct * vma, * prev;
81643- int error;
81644+ int error = 0;
81645
81646 VM_BUG_ON(start & ~PAGE_MASK);
81647 VM_BUG_ON(len != PAGE_ALIGN(len));
81648@@ -378,6 +379,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
81649 return -EINVAL;
81650 if (end == start)
81651 return 0;
81652+ if (end > TASK_SIZE)
81653+ return -EINVAL;
81654+
81655 vma = find_vma(current->mm, start);
81656 if (!vma || vma->vm_start > start)
81657 return -ENOMEM;
81658@@ -389,6 +393,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
81659 for (nstart = start ; ; ) {
81660 vm_flags_t newflags;
81661
81662+#ifdef CONFIG_PAX_SEGMEXEC
81663+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
81664+ break;
81665+#endif
81666+
81667 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
81668
81669 newflags = vma->vm_flags | VM_LOCKED;
81670@@ -494,6 +503,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
81671 lock_limit >>= PAGE_SHIFT;
81672
81673 /* check against resource limits */
81674+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
81675 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
81676 error = do_mlock(start, len, 1);
81677 up_write(&current->mm->mmap_sem);
81678@@ -528,6 +538,12 @@ static int do_mlockall(int flags)
81679 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
81680 vm_flags_t newflags;
81681
81682+#ifdef CONFIG_PAX_SEGMEXEC
81683+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
81684+ break;
81685+#endif
81686+
81687+ BUG_ON(vma->vm_end > TASK_SIZE);
81688 newflags = vma->vm_flags | VM_LOCKED;
81689 if (!(flags & MCL_CURRENT))
81690 newflags &= ~VM_LOCKED;
81691@@ -560,6 +576,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
81692 lock_limit >>= PAGE_SHIFT;
81693
81694 ret = -ENOMEM;
81695+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
81696 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
81697 capable(CAP_IPC_LOCK))
81698 ret = do_mlockall(flags);
81699diff --git a/mm/mmap.c b/mm/mmap.c
81700index 8832b87..04240d1 100644
81701--- a/mm/mmap.c
81702+++ b/mm/mmap.c
81703@@ -32,6 +32,7 @@
81704 #include <linux/khugepaged.h>
81705 #include <linux/uprobes.h>
81706 #include <linux/rbtree_augmented.h>
81707+#include <linux/random.h>
81708
81709 #include <asm/uaccess.h>
81710 #include <asm/cacheflush.h>
81711@@ -48,6 +49,16 @@
81712 #define arch_rebalance_pgtables(addr, len) (addr)
81713 #endif
81714
81715+static inline void verify_mm_writelocked(struct mm_struct *mm)
81716+{
81717+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
81718+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
81719+ up_read(&mm->mmap_sem);
81720+ BUG();
81721+ }
81722+#endif
81723+}
81724+
81725 static void unmap_region(struct mm_struct *mm,
81726 struct vm_area_struct *vma, struct vm_area_struct *prev,
81727 unsigned long start, unsigned long end);
81728@@ -67,22 +78,32 @@ static void unmap_region(struct mm_struct *mm,
81729 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
81730 *
81731 */
81732-pgprot_t protection_map[16] = {
81733+pgprot_t protection_map[16] __read_only = {
81734 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
81735 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
81736 };
81737
81738-pgprot_t vm_get_page_prot(unsigned long vm_flags)
81739+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
81740 {
81741- return __pgprot(pgprot_val(protection_map[vm_flags &
81742+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
81743 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
81744 pgprot_val(arch_vm_get_page_prot(vm_flags)));
81745+
81746+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
81747+ if (!(__supported_pte_mask & _PAGE_NX) &&
81748+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
81749+ (vm_flags & (VM_READ | VM_WRITE)))
81750+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
81751+#endif
81752+
81753+ return prot;
81754 }
81755 EXPORT_SYMBOL(vm_get_page_prot);
81756
81757 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
81758 int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
81759 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
81760+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
81761 /*
81762 * Make sure vm_committed_as in one cacheline and not cacheline shared with
81763 * other variables. It can be updated by several CPUs frequently.
81764@@ -238,6 +259,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
81765 struct vm_area_struct *next = vma->vm_next;
81766
81767 might_sleep();
81768+ BUG_ON(vma->vm_mirror);
81769 if (vma->vm_ops && vma->vm_ops->close)
81770 vma->vm_ops->close(vma);
81771 if (vma->vm_file)
81772@@ -281,6 +303,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
81773 * not page aligned -Ram Gupta
81774 */
81775 rlim = rlimit(RLIMIT_DATA);
81776+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
81777 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
81778 (mm->end_data - mm->start_data) > rlim)
81779 goto out;
81780@@ -888,6 +911,12 @@ static int
81781 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
81782 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
81783 {
81784+
81785+#ifdef CONFIG_PAX_SEGMEXEC
81786+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
81787+ return 0;
81788+#endif
81789+
81790 if (is_mergeable_vma(vma, file, vm_flags) &&
81791 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
81792 if (vma->vm_pgoff == vm_pgoff)
81793@@ -907,6 +936,12 @@ static int
81794 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
81795 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
81796 {
81797+
81798+#ifdef CONFIG_PAX_SEGMEXEC
81799+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
81800+ return 0;
81801+#endif
81802+
81803 if (is_mergeable_vma(vma, file, vm_flags) &&
81804 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
81805 pgoff_t vm_pglen;
81806@@ -949,13 +984,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
81807 struct vm_area_struct *vma_merge(struct mm_struct *mm,
81808 struct vm_area_struct *prev, unsigned long addr,
81809 unsigned long end, unsigned long vm_flags,
81810- struct anon_vma *anon_vma, struct file *file,
81811+ struct anon_vma *anon_vma, struct file *file,
81812 pgoff_t pgoff, struct mempolicy *policy)
81813 {
81814 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
81815 struct vm_area_struct *area, *next;
81816 int err;
81817
81818+#ifdef CONFIG_PAX_SEGMEXEC
81819+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
81820+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
81821+
81822+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
81823+#endif
81824+
81825 /*
81826 * We later require that vma->vm_flags == vm_flags,
81827 * so this tests vma->vm_flags & VM_SPECIAL, too.
81828@@ -971,6 +1013,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
81829 if (next && next->vm_end == end) /* cases 6, 7, 8 */
81830 next = next->vm_next;
81831
81832+#ifdef CONFIG_PAX_SEGMEXEC
81833+ if (prev)
81834+ prev_m = pax_find_mirror_vma(prev);
81835+ if (area)
81836+ area_m = pax_find_mirror_vma(area);
81837+ if (next)
81838+ next_m = pax_find_mirror_vma(next);
81839+#endif
81840+
81841 /*
81842 * Can it merge with the predecessor?
81843 */
81844@@ -990,9 +1041,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
81845 /* cases 1, 6 */
81846 err = vma_adjust(prev, prev->vm_start,
81847 next->vm_end, prev->vm_pgoff, NULL);
81848- } else /* cases 2, 5, 7 */
81849+
81850+#ifdef CONFIG_PAX_SEGMEXEC
81851+ if (!err && prev_m)
81852+ err = vma_adjust(prev_m, prev_m->vm_start,
81853+ next_m->vm_end, prev_m->vm_pgoff, NULL);
81854+#endif
81855+
81856+ } else { /* cases 2, 5, 7 */
81857 err = vma_adjust(prev, prev->vm_start,
81858 end, prev->vm_pgoff, NULL);
81859+
81860+#ifdef CONFIG_PAX_SEGMEXEC
81861+ if (!err && prev_m)
81862+ err = vma_adjust(prev_m, prev_m->vm_start,
81863+ end_m, prev_m->vm_pgoff, NULL);
81864+#endif
81865+
81866+ }
81867 if (err)
81868 return NULL;
81869 khugepaged_enter_vma_merge(prev);
81870@@ -1006,12 +1072,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
81871 mpol_equal(policy, vma_policy(next)) &&
81872 can_vma_merge_before(next, vm_flags,
81873 anon_vma, file, pgoff+pglen)) {
81874- if (prev && addr < prev->vm_end) /* case 4 */
81875+ if (prev && addr < prev->vm_end) { /* case 4 */
81876 err = vma_adjust(prev, prev->vm_start,
81877 addr, prev->vm_pgoff, NULL);
81878- else /* cases 3, 8 */
81879+
81880+#ifdef CONFIG_PAX_SEGMEXEC
81881+ if (!err && prev_m)
81882+ err = vma_adjust(prev_m, prev_m->vm_start,
81883+ addr_m, prev_m->vm_pgoff, NULL);
81884+#endif
81885+
81886+ } else { /* cases 3, 8 */
81887 err = vma_adjust(area, addr, next->vm_end,
81888 next->vm_pgoff - pglen, NULL);
81889+
81890+#ifdef CONFIG_PAX_SEGMEXEC
81891+ if (!err && area_m)
81892+ err = vma_adjust(area_m, addr_m, next_m->vm_end,
81893+ next_m->vm_pgoff - pglen, NULL);
81894+#endif
81895+
81896+ }
81897 if (err)
81898 return NULL;
81899 khugepaged_enter_vma_merge(area);
81900@@ -1120,8 +1201,10 @@ none:
81901 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
81902 struct file *file, long pages)
81903 {
81904- const unsigned long stack_flags
81905- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
81906+
81907+#ifdef CONFIG_PAX_RANDMMAP
81908+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
81909+#endif
81910
81911 mm->total_vm += pages;
81912
81913@@ -1129,7 +1212,7 @@ void vm_stat_account(struct mm_struct *mm, unsigned long flags,
81914 mm->shared_vm += pages;
81915 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
81916 mm->exec_vm += pages;
81917- } else if (flags & stack_flags)
81918+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
81919 mm->stack_vm += pages;
81920 }
81921 #endif /* CONFIG_PROC_FS */
81922@@ -1165,7 +1248,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
81923 * (the exception is when the underlying filesystem is noexec
81924 * mounted, in which case we dont add PROT_EXEC.)
81925 */
81926- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
81927+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
81928 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
81929 prot |= PROT_EXEC;
81930
81931@@ -1191,7 +1274,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
81932 /* Obtain the address to map to. we verify (or select) it and ensure
81933 * that it represents a valid section of the address space.
81934 */
81935- addr = get_unmapped_area(file, addr, len, pgoff, flags);
81936+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
81937 if (addr & ~PAGE_MASK)
81938 return addr;
81939
81940@@ -1202,6 +1285,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
81941 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
81942 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
81943
81944+#ifdef CONFIG_PAX_MPROTECT
81945+ if (mm->pax_flags & MF_PAX_MPROTECT) {
81946+#ifndef CONFIG_PAX_MPROTECT_COMPAT
81947+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
81948+ gr_log_rwxmmap(file);
81949+
81950+#ifdef CONFIG_PAX_EMUPLT
81951+ vm_flags &= ~VM_EXEC;
81952+#else
81953+ return -EPERM;
81954+#endif
81955+
81956+ }
81957+
81958+ if (!(vm_flags & VM_EXEC))
81959+ vm_flags &= ~VM_MAYEXEC;
81960+#else
81961+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
81962+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
81963+#endif
81964+ else
81965+ vm_flags &= ~VM_MAYWRITE;
81966+ }
81967+#endif
81968+
81969+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
81970+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
81971+ vm_flags &= ~VM_PAGEEXEC;
81972+#endif
81973+
81974 if (flags & MAP_LOCKED)
81975 if (!can_do_mlock())
81976 return -EPERM;
81977@@ -1213,6 +1326,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
81978 locked += mm->locked_vm;
81979 lock_limit = rlimit(RLIMIT_MEMLOCK);
81980 lock_limit >>= PAGE_SHIFT;
81981+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
81982 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
81983 return -EAGAIN;
81984 }
81985@@ -1279,6 +1393,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
81986 }
81987 }
81988
81989+ if (!gr_acl_handle_mmap(file, prot))
81990+ return -EACCES;
81991+
81992 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
81993 }
81994
81995@@ -1356,7 +1473,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
81996 vm_flags_t vm_flags = vma->vm_flags;
81997
81998 /* If it was private or non-writable, the write bit is already clear */
81999- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
82000+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
82001 return 0;
82002
82003 /* The backer wishes to know when pages are first written to? */
82004@@ -1405,16 +1522,30 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
82005 unsigned long charged = 0;
82006 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
82007
82008+#ifdef CONFIG_PAX_SEGMEXEC
82009+ struct vm_area_struct *vma_m = NULL;
82010+#endif
82011+
82012+ /*
82013+ * mm->mmap_sem is required to protect against another thread
82014+ * changing the mappings in case we sleep.
82015+ */
82016+ verify_mm_writelocked(mm);
82017+
82018 /* Clear old maps */
82019 error = -ENOMEM;
82020-munmap_back:
82021 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
82022 if (do_munmap(mm, addr, len))
82023 return -ENOMEM;
82024- goto munmap_back;
82025+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
82026 }
82027
82028 /* Check against address space limit. */
82029+
82030+#ifdef CONFIG_PAX_RANDMMAP
82031+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (vm_flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
82032+#endif
82033+
82034 if (!may_expand_vm(mm, len >> PAGE_SHIFT))
82035 return -ENOMEM;
82036
82037@@ -1460,6 +1591,16 @@ munmap_back:
82038 goto unacct_error;
82039 }
82040
82041+#ifdef CONFIG_PAX_SEGMEXEC
82042+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
82043+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
82044+ if (!vma_m) {
82045+ error = -ENOMEM;
82046+ goto free_vma;
82047+ }
82048+ }
82049+#endif
82050+
82051 vma->vm_mm = mm;
82052 vma->vm_start = addr;
82053 vma->vm_end = addr + len;
82054@@ -1484,6 +1625,13 @@ munmap_back:
82055 if (error)
82056 goto unmap_and_free_vma;
82057
82058+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
82059+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
82060+ vma->vm_flags |= VM_PAGEEXEC;
82061+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
82062+ }
82063+#endif
82064+
82065 /* Can addr have changed??
82066 *
82067 * Answer: Yes, several device drivers can do it in their
82068@@ -1522,6 +1670,11 @@ munmap_back:
82069 vma_link(mm, vma, prev, rb_link, rb_parent);
82070 file = vma->vm_file;
82071
82072+#ifdef CONFIG_PAX_SEGMEXEC
82073+ if (vma_m)
82074+ BUG_ON(pax_mirror_vma(vma_m, vma));
82075+#endif
82076+
82077 /* Once vma denies write, undo our temporary denial count */
82078 if (correct_wcount)
82079 atomic_inc(&inode->i_writecount);
82080@@ -1529,6 +1682,7 @@ out:
82081 perf_event_mmap(vma);
82082
82083 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
82084+ track_exec_limit(mm, addr, addr + len, vm_flags);
82085 if (vm_flags & VM_LOCKED) {
82086 if (!mlock_vma_pages_range(vma, addr, addr + len))
82087 mm->locked_vm += (len >> PAGE_SHIFT);
82088@@ -1550,6 +1704,12 @@ unmap_and_free_vma:
82089 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
82090 charged = 0;
82091 free_vma:
82092+
82093+#ifdef CONFIG_PAX_SEGMEXEC
82094+ if (vma_m)
82095+ kmem_cache_free(vm_area_cachep, vma_m);
82096+#endif
82097+
82098 kmem_cache_free(vm_area_cachep, vma);
82099 unacct_error:
82100 if (charged)
82101@@ -1557,6 +1717,62 @@ unacct_error:
82102 return error;
82103 }
82104
82105+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
82106+unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
82107+{
82108+ if ((mm->pax_flags & MF_PAX_RANDMMAP) && !filp && (flags & MAP_STACK))
82109+ return (random32() & 0xFF) << PAGE_SHIFT;
82110+
82111+ return 0;
82112+}
82113+#endif
82114+
82115+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset)
82116+{
82117+ if (!vma) {
82118+#ifdef CONFIG_STACK_GROWSUP
82119+ if (addr > sysctl_heap_stack_gap)
82120+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
82121+ else
82122+ vma = find_vma(current->mm, 0);
82123+ if (vma && (vma->vm_flags & VM_GROWSUP))
82124+ return false;
82125+#endif
82126+ return true;
82127+ }
82128+
82129+ if (addr + len > vma->vm_start)
82130+ return false;
82131+
82132+ if (vma->vm_flags & VM_GROWSDOWN)
82133+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
82134+#ifdef CONFIG_STACK_GROWSUP
82135+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
82136+ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
82137+#endif
82138+ else if (offset)
82139+ return offset <= vma->vm_start - addr - len;
82140+
82141+ return true;
82142+}
82143+
82144+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset)
82145+{
82146+ if (vma->vm_start < len)
82147+ return -ENOMEM;
82148+
82149+ if (!(vma->vm_flags & VM_GROWSDOWN)) {
82150+ if (offset <= vma->vm_start - len)
82151+ return vma->vm_start - len - offset;
82152+ else
82153+ return -ENOMEM;
82154+ }
82155+
82156+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
82157+ return vma->vm_start - len - sysctl_heap_stack_gap;
82158+ return -ENOMEM;
82159+}
82160+
82161 unsigned long unmapped_area(struct vm_unmapped_area_info *info)
82162 {
82163 /*
82164@@ -1776,6 +1992,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
82165 struct mm_struct *mm = current->mm;
82166 struct vm_area_struct *vma;
82167 struct vm_unmapped_area_info info;
82168+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
82169
82170 if (len > TASK_SIZE)
82171 return -ENOMEM;
82172@@ -1783,17 +2000,26 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
82173 if (flags & MAP_FIXED)
82174 return addr;
82175
82176+#ifdef CONFIG_PAX_RANDMMAP
82177+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
82178+#endif
82179+
82180 if (addr) {
82181 addr = PAGE_ALIGN(addr);
82182 vma = find_vma(mm, addr);
82183- if (TASK_SIZE - len >= addr &&
82184- (!vma || addr + len <= vma->vm_start))
82185+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
82186 return addr;
82187 }
82188
82189 info.flags = 0;
82190 info.length = len;
82191 info.low_limit = TASK_UNMAPPED_BASE;
82192+
82193+#ifdef CONFIG_PAX_RANDMMAP
82194+ if (mm->pax_flags & MF_PAX_RANDMMAP)
82195+ info.low_limit += mm->delta_mmap;
82196+#endif
82197+
82198 info.high_limit = TASK_SIZE;
82199 info.align_mask = 0;
82200 return vm_unmapped_area(&info);
82201@@ -1802,10 +2028,16 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
82202
82203 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
82204 {
82205+
82206+#ifdef CONFIG_PAX_SEGMEXEC
82207+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
82208+ return;
82209+#endif
82210+
82211 /*
82212 * Is this a new hole at the lowest possible address?
82213 */
82214- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache)
82215+ if (addr >= mm->mmap_base && addr < mm->free_area_cache)
82216 mm->free_area_cache = addr;
82217 }
82218
82219@@ -1823,6 +2055,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
82220 struct mm_struct *mm = current->mm;
82221 unsigned long addr = addr0;
82222 struct vm_unmapped_area_info info;
82223+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
82224
82225 /* requested length too big for entire address space */
82226 if (len > TASK_SIZE)
82227@@ -1831,12 +2064,15 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
82228 if (flags & MAP_FIXED)
82229 return addr;
82230
82231+#ifdef CONFIG_PAX_RANDMMAP
82232+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
82233+#endif
82234+
82235 /* requesting a specific address */
82236 if (addr) {
82237 addr = PAGE_ALIGN(addr);
82238 vma = find_vma(mm, addr);
82239- if (TASK_SIZE - len >= addr &&
82240- (!vma || addr + len <= vma->vm_start))
82241+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
82242 return addr;
82243 }
82244
82245@@ -1857,6 +2093,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
82246 VM_BUG_ON(addr != -ENOMEM);
82247 info.flags = 0;
82248 info.low_limit = TASK_UNMAPPED_BASE;
82249+
82250+#ifdef CONFIG_PAX_RANDMMAP
82251+ if (mm->pax_flags & MF_PAX_RANDMMAP)
82252+ info.low_limit += mm->delta_mmap;
82253+#endif
82254+
82255 info.high_limit = TASK_SIZE;
82256 addr = vm_unmapped_area(&info);
82257 }
82258@@ -1867,6 +2109,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
82259
82260 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
82261 {
82262+
82263+#ifdef CONFIG_PAX_SEGMEXEC
82264+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
82265+ return;
82266+#endif
82267+
82268 /*
82269 * Is this a new hole at the highest possible address?
82270 */
82271@@ -1874,8 +2122,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
82272 mm->free_area_cache = addr;
82273
82274 /* dont allow allocations above current base */
82275- if (mm->free_area_cache > mm->mmap_base)
82276+ if (mm->free_area_cache > mm->mmap_base) {
82277 mm->free_area_cache = mm->mmap_base;
82278+ mm->cached_hole_size = ~0UL;
82279+ }
82280 }
82281
82282 unsigned long
82283@@ -1922,7 +2172,7 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
82284
82285 /* Check the cache first. */
82286 /* (Cache hit rate is typically around 35%.) */
82287- vma = mm->mmap_cache;
82288+ vma = ACCESS_ONCE(mm->mmap_cache);
82289 if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) {
82290 struct rb_node *rb_node;
82291
82292@@ -1974,6 +2224,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
82293 return vma;
82294 }
82295
82296+#ifdef CONFIG_PAX_SEGMEXEC
82297+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
82298+{
82299+ struct vm_area_struct *vma_m;
82300+
82301+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
82302+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
82303+ BUG_ON(vma->vm_mirror);
82304+ return NULL;
82305+ }
82306+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
82307+ vma_m = vma->vm_mirror;
82308+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
82309+ BUG_ON(vma->vm_file != vma_m->vm_file);
82310+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
82311+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
82312+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
82313+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED));
82314+ return vma_m;
82315+}
82316+#endif
82317+
82318 /*
82319 * Verify that the stack growth is acceptable and
82320 * update accounting. This is shared with both the
82321@@ -1990,6 +2262,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
82322 return -ENOMEM;
82323
82324 /* Stack limit test */
82325+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
82326 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
82327 return -ENOMEM;
82328
82329@@ -2000,6 +2273,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
82330 locked = mm->locked_vm + grow;
82331 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
82332 limit >>= PAGE_SHIFT;
82333+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
82334 if (locked > limit && !capable(CAP_IPC_LOCK))
82335 return -ENOMEM;
82336 }
82337@@ -2029,37 +2303,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
82338 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
82339 * vma is the last one with address > vma->vm_end. Have to extend vma.
82340 */
82341+#ifndef CONFIG_IA64
82342+static
82343+#endif
82344 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
82345 {
82346 int error;
82347+ bool locknext;
82348
82349 if (!(vma->vm_flags & VM_GROWSUP))
82350 return -EFAULT;
82351
82352+ /* Also guard against wrapping around to address 0. */
82353+ if (address < PAGE_ALIGN(address+1))
82354+ address = PAGE_ALIGN(address+1);
82355+ else
82356+ return -ENOMEM;
82357+
82358 /*
82359 * We must make sure the anon_vma is allocated
82360 * so that the anon_vma locking is not a noop.
82361 */
82362 if (unlikely(anon_vma_prepare(vma)))
82363 return -ENOMEM;
82364+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
82365+ if (locknext && anon_vma_prepare(vma->vm_next))
82366+ return -ENOMEM;
82367 vma_lock_anon_vma(vma);
82368+ if (locknext)
82369+ vma_lock_anon_vma(vma->vm_next);
82370
82371 /*
82372 * vma->vm_start/vm_end cannot change under us because the caller
82373 * is required to hold the mmap_sem in read mode. We need the
82374- * anon_vma lock to serialize against concurrent expand_stacks.
82375- * Also guard against wrapping around to address 0.
82376+ * anon_vma locks to serialize against concurrent expand_stacks
82377+ * and expand_upwards.
82378 */
82379- if (address < PAGE_ALIGN(address+4))
82380- address = PAGE_ALIGN(address+4);
82381- else {
82382- vma_unlock_anon_vma(vma);
82383- return -ENOMEM;
82384- }
82385 error = 0;
82386
82387 /* Somebody else might have raced and expanded it already */
82388- if (address > vma->vm_end) {
82389+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
82390+ error = -ENOMEM;
82391+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
82392 unsigned long size, grow;
82393
82394 size = address - vma->vm_start;
82395@@ -2094,6 +2379,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
82396 }
82397 }
82398 }
82399+ if (locknext)
82400+ vma_unlock_anon_vma(vma->vm_next);
82401 vma_unlock_anon_vma(vma);
82402 khugepaged_enter_vma_merge(vma);
82403 validate_mm(vma->vm_mm);
82404@@ -2108,6 +2395,8 @@ int expand_downwards(struct vm_area_struct *vma,
82405 unsigned long address)
82406 {
82407 int error;
82408+ bool lockprev = false;
82409+ struct vm_area_struct *prev;
82410
82411 /*
82412 * We must make sure the anon_vma is allocated
82413@@ -2121,6 +2410,15 @@ int expand_downwards(struct vm_area_struct *vma,
82414 if (error)
82415 return error;
82416
82417+ prev = vma->vm_prev;
82418+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
82419+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
82420+#endif
82421+ if (lockprev && anon_vma_prepare(prev))
82422+ return -ENOMEM;
82423+ if (lockprev)
82424+ vma_lock_anon_vma(prev);
82425+
82426 vma_lock_anon_vma(vma);
82427
82428 /*
82429@@ -2130,9 +2428,17 @@ int expand_downwards(struct vm_area_struct *vma,
82430 */
82431
82432 /* Somebody else might have raced and expanded it already */
82433- if (address < vma->vm_start) {
82434+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
82435+ error = -ENOMEM;
82436+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
82437 unsigned long size, grow;
82438
82439+#ifdef CONFIG_PAX_SEGMEXEC
82440+ struct vm_area_struct *vma_m;
82441+
82442+ vma_m = pax_find_mirror_vma(vma);
82443+#endif
82444+
82445 size = vma->vm_end - address;
82446 grow = (vma->vm_start - address) >> PAGE_SHIFT;
82447
82448@@ -2157,6 +2463,18 @@ int expand_downwards(struct vm_area_struct *vma,
82449 vma->vm_pgoff -= grow;
82450 anon_vma_interval_tree_post_update_vma(vma);
82451 vma_gap_update(vma);
82452+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
82453+
82454+#ifdef CONFIG_PAX_SEGMEXEC
82455+ if (vma_m) {
82456+ anon_vma_interval_tree_pre_update_vma(vma_m);
82457+ vma_m->vm_start -= grow << PAGE_SHIFT;
82458+ vma_m->vm_pgoff -= grow;
82459+ anon_vma_interval_tree_post_update_vma(vma_m);
82460+ vma_gap_update(vma_m);
82461+ }
82462+#endif
82463+
82464 spin_unlock(&vma->vm_mm->page_table_lock);
82465
82466 perf_event_mmap(vma);
82467@@ -2263,6 +2581,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
82468 do {
82469 long nrpages = vma_pages(vma);
82470
82471+#ifdef CONFIG_PAX_SEGMEXEC
82472+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
82473+ vma = remove_vma(vma);
82474+ continue;
82475+ }
82476+#endif
82477+
82478 if (vma->vm_flags & VM_ACCOUNT)
82479 nr_accounted += nrpages;
82480 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
82481@@ -2308,6 +2633,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
82482 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
82483 vma->vm_prev = NULL;
82484 do {
82485+
82486+#ifdef CONFIG_PAX_SEGMEXEC
82487+ if (vma->vm_mirror) {
82488+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
82489+ vma->vm_mirror->vm_mirror = NULL;
82490+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
82491+ vma->vm_mirror = NULL;
82492+ }
82493+#endif
82494+
82495 vma_rb_erase(vma, &mm->mm_rb);
82496 mm->map_count--;
82497 tail_vma = vma;
82498@@ -2339,14 +2674,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
82499 struct vm_area_struct *new;
82500 int err = -ENOMEM;
82501
82502+#ifdef CONFIG_PAX_SEGMEXEC
82503+ struct vm_area_struct *vma_m, *new_m = NULL;
82504+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
82505+#endif
82506+
82507 if (is_vm_hugetlb_page(vma) && (addr &
82508 ~(huge_page_mask(hstate_vma(vma)))))
82509 return -EINVAL;
82510
82511+#ifdef CONFIG_PAX_SEGMEXEC
82512+ vma_m = pax_find_mirror_vma(vma);
82513+#endif
82514+
82515 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
82516 if (!new)
82517 goto out_err;
82518
82519+#ifdef CONFIG_PAX_SEGMEXEC
82520+ if (vma_m) {
82521+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
82522+ if (!new_m) {
82523+ kmem_cache_free(vm_area_cachep, new);
82524+ goto out_err;
82525+ }
82526+ }
82527+#endif
82528+
82529 /* most fields are the same, copy all, and then fixup */
82530 *new = *vma;
82531
82532@@ -2359,6 +2713,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
82533 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
82534 }
82535
82536+#ifdef CONFIG_PAX_SEGMEXEC
82537+ if (vma_m) {
82538+ *new_m = *vma_m;
82539+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
82540+ new_m->vm_mirror = new;
82541+ new->vm_mirror = new_m;
82542+
82543+ if (new_below)
82544+ new_m->vm_end = addr_m;
82545+ else {
82546+ new_m->vm_start = addr_m;
82547+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
82548+ }
82549+ }
82550+#endif
82551+
82552 pol = mpol_dup(vma_policy(vma));
82553 if (IS_ERR(pol)) {
82554 err = PTR_ERR(pol);
82555@@ -2381,6 +2751,36 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
82556 else
82557 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
82558
82559+#ifdef CONFIG_PAX_SEGMEXEC
82560+ if (!err && vma_m) {
82561+ if (anon_vma_clone(new_m, vma_m))
82562+ goto out_free_mpol;
82563+
82564+ mpol_get(pol);
82565+ vma_set_policy(new_m, pol);
82566+
82567+ if (new_m->vm_file)
82568+ get_file(new_m->vm_file);
82569+
82570+ if (new_m->vm_ops && new_m->vm_ops->open)
82571+ new_m->vm_ops->open(new_m);
82572+
82573+ if (new_below)
82574+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
82575+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
82576+ else
82577+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
82578+
82579+ if (err) {
82580+ if (new_m->vm_ops && new_m->vm_ops->close)
82581+ new_m->vm_ops->close(new_m);
82582+ if (new_m->vm_file)
82583+ fput(new_m->vm_file);
82584+ mpol_put(pol);
82585+ }
82586+ }
82587+#endif
82588+
82589 /* Success. */
82590 if (!err)
82591 return 0;
82592@@ -2390,10 +2790,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
82593 new->vm_ops->close(new);
82594 if (new->vm_file)
82595 fput(new->vm_file);
82596- unlink_anon_vmas(new);
82597 out_free_mpol:
82598 mpol_put(pol);
82599 out_free_vma:
82600+
82601+#ifdef CONFIG_PAX_SEGMEXEC
82602+ if (new_m) {
82603+ unlink_anon_vmas(new_m);
82604+ kmem_cache_free(vm_area_cachep, new_m);
82605+ }
82606+#endif
82607+
82608+ unlink_anon_vmas(new);
82609 kmem_cache_free(vm_area_cachep, new);
82610 out_err:
82611 return err;
82612@@ -2406,6 +2814,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
82613 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
82614 unsigned long addr, int new_below)
82615 {
82616+
82617+#ifdef CONFIG_PAX_SEGMEXEC
82618+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
82619+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
82620+ if (mm->map_count >= sysctl_max_map_count-1)
82621+ return -ENOMEM;
82622+ } else
82623+#endif
82624+
82625 if (mm->map_count >= sysctl_max_map_count)
82626 return -ENOMEM;
82627
82628@@ -2417,11 +2834,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
82629 * work. This now handles partial unmappings.
82630 * Jeremy Fitzhardinge <jeremy@goop.org>
82631 */
82632+#ifdef CONFIG_PAX_SEGMEXEC
82633 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
82634 {
82635+ int ret = __do_munmap(mm, start, len);
82636+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
82637+ return ret;
82638+
82639+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
82640+}
82641+
82642+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
82643+#else
82644+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
82645+#endif
82646+{
82647 unsigned long end;
82648 struct vm_area_struct *vma, *prev, *last;
82649
82650+ /*
82651+ * mm->mmap_sem is required to protect against another thread
82652+ * changing the mappings in case we sleep.
82653+ */
82654+ verify_mm_writelocked(mm);
82655+
82656 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
82657 return -EINVAL;
82658
82659@@ -2496,6 +2932,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
82660 /* Fix up all other VM information */
82661 remove_vma_list(mm, vma);
82662
82663+ track_exec_limit(mm, start, end, 0UL);
82664+
82665 return 0;
82666 }
82667
82668@@ -2504,6 +2942,13 @@ int vm_munmap(unsigned long start, size_t len)
82669 int ret;
82670 struct mm_struct *mm = current->mm;
82671
82672+
82673+#ifdef CONFIG_PAX_SEGMEXEC
82674+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
82675+ (len > SEGMEXEC_TASK_SIZE || start > SEGMEXEC_TASK_SIZE-len))
82676+ return -EINVAL;
82677+#endif
82678+
82679 down_write(&mm->mmap_sem);
82680 ret = do_munmap(mm, start, len);
82681 up_write(&mm->mmap_sem);
82682@@ -2517,16 +2962,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
82683 return vm_munmap(addr, len);
82684 }
82685
82686-static inline void verify_mm_writelocked(struct mm_struct *mm)
82687-{
82688-#ifdef CONFIG_DEBUG_VM
82689- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
82690- WARN_ON(1);
82691- up_read(&mm->mmap_sem);
82692- }
82693-#endif
82694-}
82695-
82696 /*
82697 * this is really a simplified "do_mmap". it only handles
82698 * anonymous maps. eventually we may be able to do some
82699@@ -2540,6 +2975,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
82700 struct rb_node ** rb_link, * rb_parent;
82701 pgoff_t pgoff = addr >> PAGE_SHIFT;
82702 int error;
82703+ unsigned long charged;
82704
82705 len = PAGE_ALIGN(len);
82706 if (!len)
82707@@ -2547,16 +2983,30 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
82708
82709 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
82710
82711+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
82712+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
82713+ flags &= ~VM_EXEC;
82714+
82715+#ifdef CONFIG_PAX_MPROTECT
82716+ if (mm->pax_flags & MF_PAX_MPROTECT)
82717+ flags &= ~VM_MAYEXEC;
82718+#endif
82719+
82720+ }
82721+#endif
82722+
82723 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
82724 if (error & ~PAGE_MASK)
82725 return error;
82726
82727+ charged = len >> PAGE_SHIFT;
82728+
82729 /*
82730 * mlock MCL_FUTURE?
82731 */
82732 if (mm->def_flags & VM_LOCKED) {
82733 unsigned long locked, lock_limit;
82734- locked = len >> PAGE_SHIFT;
82735+ locked = charged;
82736 locked += mm->locked_vm;
82737 lock_limit = rlimit(RLIMIT_MEMLOCK);
82738 lock_limit >>= PAGE_SHIFT;
82739@@ -2573,21 +3023,20 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
82740 /*
82741 * Clear old maps. this also does some error checking for us
82742 */
82743- munmap_back:
82744 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
82745 if (do_munmap(mm, addr, len))
82746 return -ENOMEM;
82747- goto munmap_back;
82748+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
82749 }
82750
82751 /* Check against address space limits *after* clearing old maps... */
82752- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
82753+ if (!may_expand_vm(mm, charged))
82754 return -ENOMEM;
82755
82756 if (mm->map_count > sysctl_max_map_count)
82757 return -ENOMEM;
82758
82759- if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
82760+ if (security_vm_enough_memory_mm(mm, charged))
82761 return -ENOMEM;
82762
82763 /* Can we just expand an old private anonymous mapping? */
82764@@ -2601,7 +3050,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
82765 */
82766 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
82767 if (!vma) {
82768- vm_unacct_memory(len >> PAGE_SHIFT);
82769+ vm_unacct_memory(charged);
82770 return -ENOMEM;
82771 }
82772
82773@@ -2615,11 +3064,12 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
82774 vma_link(mm, vma, prev, rb_link, rb_parent);
82775 out:
82776 perf_event_mmap(vma);
82777- mm->total_vm += len >> PAGE_SHIFT;
82778+ mm->total_vm += charged;
82779 if (flags & VM_LOCKED) {
82780 if (!mlock_vma_pages_range(vma, addr, addr + len))
82781- mm->locked_vm += (len >> PAGE_SHIFT);
82782+ mm->locked_vm += charged;
82783 }
82784+ track_exec_limit(mm, addr, addr + len, flags);
82785 return addr;
82786 }
82787
82788@@ -2677,6 +3127,7 @@ void exit_mmap(struct mm_struct *mm)
82789 while (vma) {
82790 if (vma->vm_flags & VM_ACCOUNT)
82791 nr_accounted += vma_pages(vma);
82792+ vma->vm_mirror = NULL;
82793 vma = remove_vma(vma);
82794 }
82795 vm_unacct_memory(nr_accounted);
82796@@ -2693,6 +3144,13 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
82797 struct vm_area_struct *prev;
82798 struct rb_node **rb_link, *rb_parent;
82799
82800+#ifdef CONFIG_PAX_SEGMEXEC
82801+ struct vm_area_struct *vma_m = NULL;
82802+#endif
82803+
82804+ if (security_mmap_addr(vma->vm_start))
82805+ return -EPERM;
82806+
82807 /*
82808 * The vm_pgoff of a purely anonymous vma should be irrelevant
82809 * until its first write fault, when page's anon_vma and index
82810@@ -2716,7 +3174,21 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
82811 security_vm_enough_memory_mm(mm, vma_pages(vma)))
82812 return -ENOMEM;
82813
82814+#ifdef CONFIG_PAX_SEGMEXEC
82815+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
82816+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
82817+ if (!vma_m)
82818+ return -ENOMEM;
82819+ }
82820+#endif
82821+
82822 vma_link(mm, vma, prev, rb_link, rb_parent);
82823+
82824+#ifdef CONFIG_PAX_SEGMEXEC
82825+ if (vma_m)
82826+ BUG_ON(pax_mirror_vma(vma_m, vma));
82827+#endif
82828+
82829 return 0;
82830 }
82831
82832@@ -2736,6 +3208,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
82833 struct mempolicy *pol;
82834 bool faulted_in_anon_vma = true;
82835
82836+ BUG_ON(vma->vm_mirror);
82837+
82838 /*
82839 * If anonymous vma has not yet been faulted, update new pgoff
82840 * to match new location, to increase its chance of merging.
82841@@ -2802,6 +3276,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
82842 return NULL;
82843 }
82844
82845+#ifdef CONFIG_PAX_SEGMEXEC
82846+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
82847+{
82848+ struct vm_area_struct *prev_m;
82849+ struct rb_node **rb_link_m, *rb_parent_m;
82850+ struct mempolicy *pol_m;
82851+
82852+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
82853+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
82854+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
82855+ *vma_m = *vma;
82856+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
82857+ if (anon_vma_clone(vma_m, vma))
82858+ return -ENOMEM;
82859+ pol_m = vma_policy(vma_m);
82860+ mpol_get(pol_m);
82861+ vma_set_policy(vma_m, pol_m);
82862+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
82863+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
82864+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
82865+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
82866+ if (vma_m->vm_file)
82867+ get_file(vma_m->vm_file);
82868+ if (vma_m->vm_ops && vma_m->vm_ops->open)
82869+ vma_m->vm_ops->open(vma_m);
82870+ BUG_ON(find_vma_links(vma->vm_mm, vma_m->vm_start, vma_m->vm_end, &prev_m, &rb_link_m, &rb_parent_m));
82871+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
82872+ vma_m->vm_mirror = vma;
82873+ vma->vm_mirror = vma_m;
82874+ return 0;
82875+}
82876+#endif
82877+
82878 /*
82879 * Return true if the calling process may expand its vm space by the passed
82880 * number of pages
82881@@ -2813,6 +3320,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
82882
82883 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
82884
82885+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
82886 if (cur + npages > lim)
82887 return 0;
82888 return 1;
82889@@ -2883,6 +3391,22 @@ int install_special_mapping(struct mm_struct *mm,
82890 vma->vm_start = addr;
82891 vma->vm_end = addr + len;
82892
82893+#ifdef CONFIG_PAX_MPROTECT
82894+ if (mm->pax_flags & MF_PAX_MPROTECT) {
82895+#ifndef CONFIG_PAX_MPROTECT_COMPAT
82896+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
82897+ return -EPERM;
82898+ if (!(vm_flags & VM_EXEC))
82899+ vm_flags &= ~VM_MAYEXEC;
82900+#else
82901+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
82902+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
82903+#endif
82904+ else
82905+ vm_flags &= ~VM_MAYWRITE;
82906+ }
82907+#endif
82908+
82909 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
82910 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
82911
82912diff --git a/mm/mprotect.c b/mm/mprotect.c
82913index 94722a4..9837984 100644
82914--- a/mm/mprotect.c
82915+++ b/mm/mprotect.c
82916@@ -23,10 +23,17 @@
82917 #include <linux/mmu_notifier.h>
82918 #include <linux/migrate.h>
82919 #include <linux/perf_event.h>
82920+
82921+#ifdef CONFIG_PAX_MPROTECT
82922+#include <linux/elf.h>
82923+#include <linux/binfmts.h>
82924+#endif
82925+
82926 #include <asm/uaccess.h>
82927 #include <asm/pgtable.h>
82928 #include <asm/cacheflush.h>
82929 #include <asm/tlbflush.h>
82930+#include <asm/mmu_context.h>
82931
82932 #ifndef pgprot_modify
82933 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
82934@@ -233,6 +240,48 @@ unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
82935 return pages;
82936 }
82937
82938+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
82939+/* called while holding the mmap semaphor for writing except stack expansion */
82940+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
82941+{
82942+ unsigned long oldlimit, newlimit = 0UL;
82943+
82944+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
82945+ return;
82946+
82947+ spin_lock(&mm->page_table_lock);
82948+ oldlimit = mm->context.user_cs_limit;
82949+ if ((prot & VM_EXEC) && oldlimit < end)
82950+ /* USER_CS limit moved up */
82951+ newlimit = end;
82952+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
82953+ /* USER_CS limit moved down */
82954+ newlimit = start;
82955+
82956+ if (newlimit) {
82957+ mm->context.user_cs_limit = newlimit;
82958+
82959+#ifdef CONFIG_SMP
82960+ wmb();
82961+ cpus_clear(mm->context.cpu_user_cs_mask);
82962+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
82963+#endif
82964+
82965+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
82966+ }
82967+ spin_unlock(&mm->page_table_lock);
82968+ if (newlimit == end) {
82969+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
82970+
82971+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
82972+ if (is_vm_hugetlb_page(vma))
82973+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
82974+ else
82975+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma), 0);
82976+ }
82977+}
82978+#endif
82979+
82980 int
82981 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
82982 unsigned long start, unsigned long end, unsigned long newflags)
82983@@ -245,11 +294,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
82984 int error;
82985 int dirty_accountable = 0;
82986
82987+#ifdef CONFIG_PAX_SEGMEXEC
82988+ struct vm_area_struct *vma_m = NULL;
82989+ unsigned long start_m, end_m;
82990+
82991+ start_m = start + SEGMEXEC_TASK_SIZE;
82992+ end_m = end + SEGMEXEC_TASK_SIZE;
82993+#endif
82994+
82995 if (newflags == oldflags) {
82996 *pprev = vma;
82997 return 0;
82998 }
82999
83000+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
83001+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
83002+
83003+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
83004+ return -ENOMEM;
83005+
83006+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
83007+ return -ENOMEM;
83008+ }
83009+
83010 /*
83011 * If we make a private mapping writable we increase our commit;
83012 * but (without finer accounting) cannot reduce our commit if we
83013@@ -266,6 +333,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
83014 }
83015 }
83016
83017+#ifdef CONFIG_PAX_SEGMEXEC
83018+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
83019+ if (start != vma->vm_start) {
83020+ error = split_vma(mm, vma, start, 1);
83021+ if (error)
83022+ goto fail;
83023+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
83024+ *pprev = (*pprev)->vm_next;
83025+ }
83026+
83027+ if (end != vma->vm_end) {
83028+ error = split_vma(mm, vma, end, 0);
83029+ if (error)
83030+ goto fail;
83031+ }
83032+
83033+ if (pax_find_mirror_vma(vma)) {
83034+ error = __do_munmap(mm, start_m, end_m - start_m);
83035+ if (error)
83036+ goto fail;
83037+ } else {
83038+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
83039+ if (!vma_m) {
83040+ error = -ENOMEM;
83041+ goto fail;
83042+ }
83043+ vma->vm_flags = newflags;
83044+ error = pax_mirror_vma(vma_m, vma);
83045+ if (error) {
83046+ vma->vm_flags = oldflags;
83047+ goto fail;
83048+ }
83049+ }
83050+ }
83051+#endif
83052+
83053 /*
83054 * First try to merge with previous and/or next vma.
83055 */
83056@@ -296,9 +399,21 @@ success:
83057 * vm_flags and vm_page_prot are protected by the mmap_sem
83058 * held in write mode.
83059 */
83060+
83061+#ifdef CONFIG_PAX_SEGMEXEC
83062+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
83063+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
83064+#endif
83065+
83066 vma->vm_flags = newflags;
83067+
83068+#ifdef CONFIG_PAX_MPROTECT
83069+ if (mm->binfmt && mm->binfmt->handle_mprotect)
83070+ mm->binfmt->handle_mprotect(vma, newflags);
83071+#endif
83072+
83073 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
83074- vm_get_page_prot(newflags));
83075+ vm_get_page_prot(vma->vm_flags));
83076
83077 if (vma_wants_writenotify(vma)) {
83078 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
83079@@ -337,6 +452,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
83080 end = start + len;
83081 if (end <= start)
83082 return -ENOMEM;
83083+
83084+#ifdef CONFIG_PAX_SEGMEXEC
83085+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
83086+ if (end > SEGMEXEC_TASK_SIZE)
83087+ return -EINVAL;
83088+ } else
83089+#endif
83090+
83091+ if (end > TASK_SIZE)
83092+ return -EINVAL;
83093+
83094 if (!arch_validate_prot(prot))
83095 return -EINVAL;
83096
83097@@ -344,7 +470,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
83098 /*
83099 * Does the application expect PROT_READ to imply PROT_EXEC:
83100 */
83101- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
83102+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
83103 prot |= PROT_EXEC;
83104
83105 vm_flags = calc_vm_prot_bits(prot);
83106@@ -376,6 +502,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
83107 if (start > vma->vm_start)
83108 prev = vma;
83109
83110+#ifdef CONFIG_PAX_MPROTECT
83111+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
83112+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
83113+#endif
83114+
83115 for (nstart = start ; ; ) {
83116 unsigned long newflags;
83117
83118@@ -386,6 +517,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
83119
83120 /* newflags >> 4 shift VM_MAY% in place of VM_% */
83121 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
83122+ if (prot & (PROT_WRITE | PROT_EXEC))
83123+ gr_log_rwxmprotect(vma->vm_file);
83124+
83125+ error = -EACCES;
83126+ goto out;
83127+ }
83128+
83129+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
83130 error = -EACCES;
83131 goto out;
83132 }
83133@@ -400,6 +539,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
83134 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
83135 if (error)
83136 goto out;
83137+
83138+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
83139+
83140 nstart = tmp;
83141
83142 if (nstart < prev->vm_end)
83143diff --git a/mm/mremap.c b/mm/mremap.c
83144index e1031e1..1f2a0a1 100644
83145--- a/mm/mremap.c
83146+++ b/mm/mremap.c
83147@@ -125,6 +125,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
83148 continue;
83149 pte = ptep_get_and_clear(mm, old_addr, old_pte);
83150 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
83151+
83152+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
83153+ if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
83154+ pte = pte_exprotect(pte);
83155+#endif
83156+
83157 set_pte_at(mm, new_addr, new_pte, pte);
83158 }
83159
83160@@ -319,6 +325,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
83161 if (is_vm_hugetlb_page(vma))
83162 goto Einval;
83163
83164+#ifdef CONFIG_PAX_SEGMEXEC
83165+ if (pax_find_mirror_vma(vma))
83166+ goto Einval;
83167+#endif
83168+
83169 /* We can't remap across vm area boundaries */
83170 if (old_len > vma->vm_end - addr)
83171 goto Efault;
83172@@ -375,20 +386,25 @@ static unsigned long mremap_to(unsigned long addr,
83173 unsigned long ret = -EINVAL;
83174 unsigned long charged = 0;
83175 unsigned long map_flags;
83176+ unsigned long pax_task_size = TASK_SIZE;
83177
83178 if (new_addr & ~PAGE_MASK)
83179 goto out;
83180
83181- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
83182+#ifdef CONFIG_PAX_SEGMEXEC
83183+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
83184+ pax_task_size = SEGMEXEC_TASK_SIZE;
83185+#endif
83186+
83187+ pax_task_size -= PAGE_SIZE;
83188+
83189+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
83190 goto out;
83191
83192 /* Check if the location we're moving into overlaps the
83193 * old location at all, and fail if it does.
83194 */
83195- if ((new_addr <= addr) && (new_addr+new_len) > addr)
83196- goto out;
83197-
83198- if ((addr <= new_addr) && (addr+old_len) > new_addr)
83199+ if (addr + old_len > new_addr && new_addr + new_len > addr)
83200 goto out;
83201
83202 ret = do_munmap(mm, new_addr, new_len);
83203@@ -456,6 +472,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
83204 struct vm_area_struct *vma;
83205 unsigned long ret = -EINVAL;
83206 unsigned long charged = 0;
83207+ unsigned long pax_task_size = TASK_SIZE;
83208
83209 down_write(&current->mm->mmap_sem);
83210
83211@@ -476,6 +493,17 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
83212 if (!new_len)
83213 goto out;
83214
83215+#ifdef CONFIG_PAX_SEGMEXEC
83216+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
83217+ pax_task_size = SEGMEXEC_TASK_SIZE;
83218+#endif
83219+
83220+ pax_task_size -= PAGE_SIZE;
83221+
83222+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
83223+ old_len > pax_task_size || addr > pax_task_size-old_len)
83224+ goto out;
83225+
83226 if (flags & MREMAP_FIXED) {
83227 if (flags & MREMAP_MAYMOVE)
83228 ret = mremap_to(addr, old_len, new_addr, new_len);
83229@@ -524,6 +552,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
83230 addr + new_len);
83231 }
83232 ret = addr;
83233+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
83234 goto out;
83235 }
83236 }
83237@@ -547,7 +576,12 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
83238 goto out;
83239 }
83240
83241+ map_flags = vma->vm_flags;
83242 ret = move_vma(vma, addr, old_len, new_len, new_addr);
83243+ if (!(ret & ~PAGE_MASK)) {
83244+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
83245+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
83246+ }
83247 }
83248 out:
83249 if (ret & ~PAGE_MASK)
83250diff --git a/mm/nommu.c b/mm/nommu.c
83251index 79c3cac..b2601ea 100644
83252--- a/mm/nommu.c
83253+++ b/mm/nommu.c
83254@@ -62,7 +62,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
83255 int sysctl_overcommit_ratio = 50; /* default is 50% */
83256 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
83257 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
83258-int heap_stack_gap = 0;
83259
83260 atomic_long_t mmap_pages_allocated;
83261
83262@@ -819,7 +818,7 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
83263 struct vm_area_struct *vma;
83264
83265 /* check the cache first */
83266- vma = mm->mmap_cache;
83267+ vma = ACCESS_ONCE(mm->mmap_cache);
83268 if (vma && vma->vm_start <= addr && vma->vm_end > addr)
83269 return vma;
83270
83271@@ -839,15 +838,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
83272 EXPORT_SYMBOL(find_vma);
83273
83274 /*
83275- * find a VMA
83276- * - we don't extend stack VMAs under NOMMU conditions
83277- */
83278-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
83279-{
83280- return find_vma(mm, addr);
83281-}
83282-
83283-/*
83284 * expand a stack to a given address
83285 * - not supported under NOMMU conditions
83286 */
83287@@ -1555,6 +1545,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
83288
83289 /* most fields are the same, copy all, and then fixup */
83290 *new = *vma;
83291+ INIT_LIST_HEAD(&new->anon_vma_chain);
83292 *region = *vma->vm_region;
83293 new->vm_region = region;
83294
83295@@ -1975,8 +1966,8 @@ int generic_file_remap_pages(struct vm_area_struct *vma, unsigned long addr,
83296 }
83297 EXPORT_SYMBOL(generic_file_remap_pages);
83298
83299-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
83300- unsigned long addr, void *buf, int len, int write)
83301+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
83302+ unsigned long addr, void *buf, size_t len, int write)
83303 {
83304 struct vm_area_struct *vma;
83305
83306@@ -2017,8 +2008,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
83307 *
83308 * The caller must hold a reference on @mm.
83309 */
83310-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
83311- void *buf, int len, int write)
83312+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
83313+ void *buf, size_t len, int write)
83314 {
83315 return __access_remote_vm(NULL, mm, addr, buf, len, write);
83316 }
83317@@ -2027,7 +2018,7 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
83318 * Access another process' address space.
83319 * - source/target buffer must be kernel space
83320 */
83321-int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
83322+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write)
83323 {
83324 struct mm_struct *mm;
83325
83326diff --git a/mm/page-writeback.c b/mm/page-writeback.c
83327index 0713bfb..b95bb87 100644
83328--- a/mm/page-writeback.c
83329+++ b/mm/page-writeback.c
83330@@ -655,7 +655,7 @@ unsigned long bdi_dirty_limit(struct backing_dev_info *bdi, unsigned long dirty)
83331 * card's bdi_dirty may rush to many times higher than bdi_setpoint.
83332 * - the bdi dirty thresh drops quickly due to change of JBOD workload
83333 */
83334-static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
83335+static unsigned long __intentional_overflow(-1) bdi_position_ratio(struct backing_dev_info *bdi,
83336 unsigned long thresh,
83337 unsigned long bg_thresh,
83338 unsigned long dirty,
83339@@ -1630,7 +1630,7 @@ ratelimit_handler(struct notifier_block *self, unsigned long action,
83340 }
83341 }
83342
83343-static struct notifier_block __cpuinitdata ratelimit_nb = {
83344+static struct notifier_block ratelimit_nb = {
83345 .notifier_call = ratelimit_handler,
83346 .next = NULL,
83347 };
83348diff --git a/mm/page_alloc.c b/mm/page_alloc.c
83349index 6a83cd3..3ab04ef 100644
83350--- a/mm/page_alloc.c
83351+++ b/mm/page_alloc.c
83352@@ -58,6 +58,7 @@
83353 #include <linux/prefetch.h>
83354 #include <linux/migrate.h>
83355 #include <linux/page-debug-flags.h>
83356+#include <linux/random.h>
83357
83358 #include <asm/tlbflush.h>
83359 #include <asm/div64.h>
83360@@ -338,7 +339,7 @@ out:
83361 * This usage means that zero-order pages may not be compound.
83362 */
83363
83364-static void free_compound_page(struct page *page)
83365+void free_compound_page(struct page *page)
83366 {
83367 __free_pages_ok(page, compound_order(page));
83368 }
83369@@ -693,6 +694,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
83370 int i;
83371 int bad = 0;
83372
83373+#ifdef CONFIG_PAX_MEMORY_SANITIZE
83374+ unsigned long index = 1UL << order;
83375+#endif
83376+
83377 trace_mm_page_free(page, order);
83378 kmemcheck_free_shadow(page, order);
83379
83380@@ -708,6 +713,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
83381 debug_check_no_obj_freed(page_address(page),
83382 PAGE_SIZE << order);
83383 }
83384+
83385+#ifdef CONFIG_PAX_MEMORY_SANITIZE
83386+ for (; index; --index)
83387+ sanitize_highpage(page + index - 1);
83388+#endif
83389+
83390 arch_free_page(page, order);
83391 kernel_map_pages(page, 1 << order, 0);
83392
83393@@ -730,6 +741,19 @@ static void __free_pages_ok(struct page *page, unsigned int order)
83394 local_irq_restore(flags);
83395 }
83396
83397+#ifdef CONFIG_PAX_LATENT_ENTROPY
83398+bool __meminitdata extra_latent_entropy;
83399+
83400+static int __init setup_pax_extra_latent_entropy(char *str)
83401+{
83402+ extra_latent_entropy = true;
83403+ return 0;
83404+}
83405+early_param("pax_extra_latent_entropy", setup_pax_extra_latent_entropy);
83406+
83407+volatile u64 latent_entropy;
83408+#endif
83409+
83410 /*
83411 * Read access to zone->managed_pages is safe because it's unsigned long,
83412 * but we still need to serialize writers. Currently all callers of
83413@@ -752,6 +776,19 @@ void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
83414 set_page_count(p, 0);
83415 }
83416
83417+#ifdef CONFIG_PAX_LATENT_ENTROPY
83418+ if (extra_latent_entropy && !PageHighMem(page) && page_to_pfn(page) < 0x100000) {
83419+ u64 hash = 0;
83420+ size_t index, end = PAGE_SIZE * nr_pages / sizeof hash;
83421+ const u64 *data = lowmem_page_address(page);
83422+
83423+ for (index = 0; index < end; index++)
83424+ hash ^= hash + data[index];
83425+ latent_entropy ^= hash;
83426+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
83427+ }
83428+#endif
83429+
83430 page_zone(page)->managed_pages += 1 << order;
83431 set_page_refcounted(page);
83432 __free_pages(page, order);
83433@@ -861,8 +898,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
83434 arch_alloc_page(page, order);
83435 kernel_map_pages(page, 1 << order, 1);
83436
83437+#ifndef CONFIG_PAX_MEMORY_SANITIZE
83438 if (gfp_flags & __GFP_ZERO)
83439 prep_zero_page(page, order, gfp_flags);
83440+#endif
83441
83442 if (order && (gfp_flags & __GFP_COMP))
83443 prep_compound_page(page, order);
83444@@ -3752,7 +3791,13 @@ static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
83445 unsigned long pfn;
83446
83447 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
83448+#ifdef CONFIG_X86_32
83449+ /* boot failures in VMware 8 on 32bit vanilla since
83450+ this change */
83451+ if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn)))
83452+#else
83453 if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
83454+#endif
83455 return 1;
83456 }
83457 return 0;
83458diff --git a/mm/percpu.c b/mm/percpu.c
83459index 8c8e08f..73a5cda 100644
83460--- a/mm/percpu.c
83461+++ b/mm/percpu.c
83462@@ -122,7 +122,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
83463 static unsigned int pcpu_high_unit_cpu __read_mostly;
83464
83465 /* the address of the first chunk which starts with the kernel static area */
83466-void *pcpu_base_addr __read_mostly;
83467+void *pcpu_base_addr __read_only;
83468 EXPORT_SYMBOL_GPL(pcpu_base_addr);
83469
83470 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
83471diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
83472index fd26d04..0cea1b0 100644
83473--- a/mm/process_vm_access.c
83474+++ b/mm/process_vm_access.c
83475@@ -13,6 +13,7 @@
83476 #include <linux/uio.h>
83477 #include <linux/sched.h>
83478 #include <linux/highmem.h>
83479+#include <linux/security.h>
83480 #include <linux/ptrace.h>
83481 #include <linux/slab.h>
83482 #include <linux/syscalls.h>
83483@@ -258,19 +259,19 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
83484 size_t iov_l_curr_offset = 0;
83485 ssize_t iov_len;
83486
83487+ return -ENOSYS; // PaX: until properly audited
83488+
83489 /*
83490 * Work out how many pages of struct pages we're going to need
83491 * when eventually calling get_user_pages
83492 */
83493 for (i = 0; i < riovcnt; i++) {
83494 iov_len = rvec[i].iov_len;
83495- if (iov_len > 0) {
83496- nr_pages_iov = ((unsigned long)rvec[i].iov_base
83497- + iov_len)
83498- / PAGE_SIZE - (unsigned long)rvec[i].iov_base
83499- / PAGE_SIZE + 1;
83500- nr_pages = max(nr_pages, nr_pages_iov);
83501- }
83502+ if (iov_len <= 0)
83503+ continue;
83504+ nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
83505+ (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
83506+ nr_pages = max(nr_pages, nr_pages_iov);
83507 }
83508
83509 if (nr_pages == 0)
83510@@ -298,6 +299,11 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
83511 goto free_proc_pages;
83512 }
83513
83514+ if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
83515+ rc = -EPERM;
83516+ goto put_task_struct;
83517+ }
83518+
83519 mm = mm_access(task, PTRACE_MODE_ATTACH);
83520 if (!mm || IS_ERR(mm)) {
83521 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
83522diff --git a/mm/rmap.c b/mm/rmap.c
83523index 2c78f8c..9e9c624 100644
83524--- a/mm/rmap.c
83525+++ b/mm/rmap.c
83526@@ -163,6 +163,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
83527 struct anon_vma *anon_vma = vma->anon_vma;
83528 struct anon_vma_chain *avc;
83529
83530+#ifdef CONFIG_PAX_SEGMEXEC
83531+ struct anon_vma_chain *avc_m = NULL;
83532+#endif
83533+
83534 might_sleep();
83535 if (unlikely(!anon_vma)) {
83536 struct mm_struct *mm = vma->vm_mm;
83537@@ -172,6 +176,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
83538 if (!avc)
83539 goto out_enomem;
83540
83541+#ifdef CONFIG_PAX_SEGMEXEC
83542+ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
83543+ if (!avc_m)
83544+ goto out_enomem_free_avc;
83545+#endif
83546+
83547 anon_vma = find_mergeable_anon_vma(vma);
83548 allocated = NULL;
83549 if (!anon_vma) {
83550@@ -185,6 +195,18 @@ int anon_vma_prepare(struct vm_area_struct *vma)
83551 /* page_table_lock to protect against threads */
83552 spin_lock(&mm->page_table_lock);
83553 if (likely(!vma->anon_vma)) {
83554+
83555+#ifdef CONFIG_PAX_SEGMEXEC
83556+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
83557+
83558+ if (vma_m) {
83559+ BUG_ON(vma_m->anon_vma);
83560+ vma_m->anon_vma = anon_vma;
83561+ anon_vma_chain_link(vma_m, avc_m, anon_vma);
83562+ avc_m = NULL;
83563+ }
83564+#endif
83565+
83566 vma->anon_vma = anon_vma;
83567 anon_vma_chain_link(vma, avc, anon_vma);
83568 allocated = NULL;
83569@@ -195,12 +217,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
83570
83571 if (unlikely(allocated))
83572 put_anon_vma(allocated);
83573+
83574+#ifdef CONFIG_PAX_SEGMEXEC
83575+ if (unlikely(avc_m))
83576+ anon_vma_chain_free(avc_m);
83577+#endif
83578+
83579 if (unlikely(avc))
83580 anon_vma_chain_free(avc);
83581 }
83582 return 0;
83583
83584 out_enomem_free_avc:
83585+
83586+#ifdef CONFIG_PAX_SEGMEXEC
83587+ if (avc_m)
83588+ anon_vma_chain_free(avc_m);
83589+#endif
83590+
83591 anon_vma_chain_free(avc);
83592 out_enomem:
83593 return -ENOMEM;
83594@@ -236,7 +270,7 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
83595 * Attach the anon_vmas from src to dst.
83596 * Returns 0 on success, -ENOMEM on failure.
83597 */
83598-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
83599+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
83600 {
83601 struct anon_vma_chain *avc, *pavc;
83602 struct anon_vma *root = NULL;
83603@@ -269,7 +303,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
83604 * the corresponding VMA in the parent process is attached to.
83605 * Returns 0 on success, non-zero on failure.
83606 */
83607-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
83608+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
83609 {
83610 struct anon_vma_chain *avc;
83611 struct anon_vma *anon_vma;
83612diff --git a/mm/shmem.c b/mm/shmem.c
83613index efd0b3a..994b702 100644
83614--- a/mm/shmem.c
83615+++ b/mm/shmem.c
83616@@ -31,7 +31,7 @@
83617 #include <linux/export.h>
83618 #include <linux/swap.h>
83619
83620-static struct vfsmount *shm_mnt;
83621+struct vfsmount *shm_mnt;
83622
83623 #ifdef CONFIG_SHMEM
83624 /*
83625@@ -75,7 +75,7 @@ static struct vfsmount *shm_mnt;
83626 #define BOGO_DIRENT_SIZE 20
83627
83628 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
83629-#define SHORT_SYMLINK_LEN 128
83630+#define SHORT_SYMLINK_LEN 64
83631
83632 /*
83633 * shmem_fallocate and shmem_writepage communicate via inode->i_private
83634@@ -2202,6 +2202,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
83635 static int shmem_xattr_validate(const char *name)
83636 {
83637 struct { const char *prefix; size_t len; } arr[] = {
83638+
83639+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
83640+ { XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN},
83641+#endif
83642+
83643 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
83644 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
83645 };
83646@@ -2257,6 +2262,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
83647 if (err)
83648 return err;
83649
83650+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
83651+ if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) {
83652+ if (strcmp(name, XATTR_NAME_PAX_FLAGS))
83653+ return -EOPNOTSUPP;
83654+ if (size > 8)
83655+ return -EINVAL;
83656+ }
83657+#endif
83658+
83659 return simple_xattr_set(&info->xattrs, name, value, size, flags);
83660 }
83661
83662@@ -2562,8 +2576,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
83663 int err = -ENOMEM;
83664
83665 /* Round up to L1_CACHE_BYTES to resist false sharing */
83666- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
83667- L1_CACHE_BYTES), GFP_KERNEL);
83668+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
83669 if (!sbinfo)
83670 return -ENOMEM;
83671
83672diff --git a/mm/slab.c b/mm/slab.c
83673index e7667a3..a48e73b 100644
83674--- a/mm/slab.c
83675+++ b/mm/slab.c
83676@@ -306,7 +306,7 @@ struct kmem_list3 {
83677 * Need this for bootstrapping a per node allocator.
83678 */
83679 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
83680-static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
83681+static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
83682 #define CACHE_CACHE 0
83683 #define SIZE_AC MAX_NUMNODES
83684 #define SIZE_L3 (2 * MAX_NUMNODES)
83685@@ -407,10 +407,10 @@ static void kmem_list3_init(struct kmem_list3 *parent)
83686 if ((x)->max_freeable < i) \
83687 (x)->max_freeable = i; \
83688 } while (0)
83689-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
83690-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
83691-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
83692-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
83693+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
83694+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
83695+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
83696+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
83697 #else
83698 #define STATS_INC_ACTIVE(x) do { } while (0)
83699 #define STATS_DEC_ACTIVE(x) do { } while (0)
83700@@ -518,7 +518,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
83701 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
83702 */
83703 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
83704- const struct slab *slab, void *obj)
83705+ const struct slab *slab, const void *obj)
83706 {
83707 u32 offset = (obj - slab->s_mem);
83708 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
83709@@ -539,12 +539,13 @@ EXPORT_SYMBOL(malloc_sizes);
83710 struct cache_names {
83711 char *name;
83712 char *name_dma;
83713+ char *name_usercopy;
83714 };
83715
83716 static struct cache_names __initdata cache_names[] = {
83717-#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
83718+#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)", .name_usercopy = "size-" #x "(USERCOPY)" },
83719 #include <linux/kmalloc_sizes.h>
83720- {NULL,}
83721+ {NULL}
83722 #undef CACHE
83723 };
83724
83725@@ -729,6 +730,12 @@ static inline struct kmem_cache *__find_general_cachep(size_t size,
83726 if (unlikely(gfpflags & GFP_DMA))
83727 return csizep->cs_dmacachep;
83728 #endif
83729+
83730+#ifdef CONFIG_PAX_USERCOPY_SLABS
83731+ if (unlikely(gfpflags & GFP_USERCOPY))
83732+ return csizep->cs_usercopycachep;
83733+#endif
83734+
83735 return csizep->cs_cachep;
83736 }
83737
83738@@ -1482,7 +1489,7 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
83739 return notifier_from_errno(err);
83740 }
83741
83742-static struct notifier_block __cpuinitdata cpucache_notifier = {
83743+static struct notifier_block cpucache_notifier = {
83744 &cpuup_callback, NULL, 0
83745 };
83746
83747@@ -1667,12 +1674,12 @@ void __init kmem_cache_init(void)
83748 */
83749
83750 sizes[INDEX_AC].cs_cachep = create_kmalloc_cache(names[INDEX_AC].name,
83751- sizes[INDEX_AC].cs_size, ARCH_KMALLOC_FLAGS);
83752+ sizes[INDEX_AC].cs_size, ARCH_KMALLOC_FLAGS|SLAB_USERCOPY);
83753
83754 if (INDEX_AC != INDEX_L3)
83755 sizes[INDEX_L3].cs_cachep =
83756 create_kmalloc_cache(names[INDEX_L3].name,
83757- sizes[INDEX_L3].cs_size, ARCH_KMALLOC_FLAGS);
83758+ sizes[INDEX_L3].cs_size, ARCH_KMALLOC_FLAGS|SLAB_USERCOPY);
83759
83760 slab_early_init = 0;
83761
83762@@ -1686,13 +1693,20 @@ void __init kmem_cache_init(void)
83763 */
83764 if (!sizes->cs_cachep)
83765 sizes->cs_cachep = create_kmalloc_cache(names->name,
83766- sizes->cs_size, ARCH_KMALLOC_FLAGS);
83767+ sizes->cs_size, ARCH_KMALLOC_FLAGS|SLAB_USERCOPY);
83768
83769 #ifdef CONFIG_ZONE_DMA
83770 sizes->cs_dmacachep = create_kmalloc_cache(
83771 names->name_dma, sizes->cs_size,
83772 SLAB_CACHE_DMA|ARCH_KMALLOC_FLAGS);
83773 #endif
83774+
83775+#ifdef CONFIG_PAX_USERCOPY_SLABS
83776+ sizes->cs_usercopycachep = create_kmalloc_cache(
83777+ names->name_usercopy, sizes->cs_size,
83778+ ARCH_KMALLOC_FLAGS|SLAB_USERCOPY);
83779+#endif
83780+
83781 sizes++;
83782 names++;
83783 }
83784@@ -3924,6 +3938,7 @@ void kfree(const void *objp)
83785
83786 if (unlikely(ZERO_OR_NULL_PTR(objp)))
83787 return;
83788+ VM_BUG_ON(!virt_addr_valid(objp));
83789 local_irq_save(flags);
83790 kfree_debugcheck(objp);
83791 c = virt_to_cache(objp);
83792@@ -4365,10 +4380,10 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
83793 }
83794 /* cpu stats */
83795 {
83796- unsigned long allochit = atomic_read(&cachep->allochit);
83797- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
83798- unsigned long freehit = atomic_read(&cachep->freehit);
83799- unsigned long freemiss = atomic_read(&cachep->freemiss);
83800+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
83801+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
83802+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
83803+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
83804
83805 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
83806 allochit, allocmiss, freehit, freemiss);
83807@@ -4600,13 +4615,71 @@ static const struct file_operations proc_slabstats_operations = {
83808 static int __init slab_proc_init(void)
83809 {
83810 #ifdef CONFIG_DEBUG_SLAB_LEAK
83811- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
83812+ proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
83813 #endif
83814 return 0;
83815 }
83816 module_init(slab_proc_init);
83817 #endif
83818
83819+bool is_usercopy_object(const void *ptr)
83820+{
83821+ struct page *page;
83822+ struct kmem_cache *cachep;
83823+
83824+ if (ZERO_OR_NULL_PTR(ptr))
83825+ return false;
83826+
83827+ if (!slab_is_available())
83828+ return false;
83829+
83830+ if (!virt_addr_valid(ptr))
83831+ return false;
83832+
83833+ page = virt_to_head_page(ptr);
83834+
83835+ if (!PageSlab(page))
83836+ return false;
83837+
83838+ cachep = page->slab_cache;
83839+ return cachep->flags & SLAB_USERCOPY;
83840+}
83841+
83842+#ifdef CONFIG_PAX_USERCOPY
83843+const char *check_heap_object(const void *ptr, unsigned long n)
83844+{
83845+ struct page *page;
83846+ struct kmem_cache *cachep;
83847+ struct slab *slabp;
83848+ unsigned int objnr;
83849+ unsigned long offset;
83850+
83851+ if (ZERO_OR_NULL_PTR(ptr))
83852+ return "<null>";
83853+
83854+ if (!virt_addr_valid(ptr))
83855+ return NULL;
83856+
83857+ page = virt_to_head_page(ptr);
83858+
83859+ if (!PageSlab(page))
83860+ return NULL;
83861+
83862+ cachep = page->slab_cache;
83863+ if (!(cachep->flags & SLAB_USERCOPY))
83864+ return cachep->name;
83865+
83866+ slabp = page->slab_page;
83867+ objnr = obj_to_index(cachep, slabp, ptr);
83868+ BUG_ON(objnr >= cachep->num);
83869+ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
83870+ if (offset <= cachep->object_size && n <= cachep->object_size - offset)
83871+ return NULL;
83872+
83873+ return cachep->name;
83874+}
83875+#endif
83876+
83877 /**
83878 * ksize - get the actual amount of memory allocated for a given object
83879 * @objp: Pointer to the object
83880diff --git a/mm/slab.h b/mm/slab.h
83881index 34a98d6..73633d1 100644
83882--- a/mm/slab.h
83883+++ b/mm/slab.h
83884@@ -58,7 +58,7 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
83885
83886 /* Legal flag mask for kmem_cache_create(), for various configurations */
83887 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
83888- SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
83889+ SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS | SLAB_USERCOPY)
83890
83891 #if defined(CONFIG_DEBUG_SLAB)
83892 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
83893@@ -220,6 +220,9 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
83894 return s;
83895
83896 page = virt_to_head_page(x);
83897+
83898+ BUG_ON(!PageSlab(page));
83899+
83900 cachep = page->slab_cache;
83901 if (slab_equal_or_root(cachep, s))
83902 return cachep;
83903diff --git a/mm/slab_common.c b/mm/slab_common.c
83904index 3f3cd97..93b0236 100644
83905--- a/mm/slab_common.c
83906+++ b/mm/slab_common.c
83907@@ -22,7 +22,7 @@
83908
83909 #include "slab.h"
83910
83911-enum slab_state slab_state;
83912+enum slab_state slab_state __read_only;
83913 LIST_HEAD(slab_caches);
83914 DEFINE_MUTEX(slab_mutex);
83915 struct kmem_cache *kmem_cache;
83916@@ -209,7 +209,7 @@ kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size,
83917
83918 err = __kmem_cache_create(s, flags);
83919 if (!err) {
83920- s->refcount = 1;
83921+ atomic_set(&s->refcount, 1);
83922 list_add(&s->list, &slab_caches);
83923 memcg_cache_list_add(memcg, s);
83924 } else {
83925@@ -255,8 +255,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
83926
83927 get_online_cpus();
83928 mutex_lock(&slab_mutex);
83929- s->refcount--;
83930- if (!s->refcount) {
83931+ if (atomic_dec_and_test(&s->refcount)) {
83932 list_del(&s->list);
83933
83934 if (!__kmem_cache_shutdown(s)) {
83935@@ -302,7 +301,7 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t siz
83936 panic("Creation of kmalloc slab %s size=%zd failed. Reason %d\n",
83937 name, size, err);
83938
83939- s->refcount = -1; /* Exempt from merging for now */
83940+ atomic_set(&s->refcount, -1); /* Exempt from merging for now */
83941 }
83942
83943 struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
83944@@ -315,7 +314,7 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
83945
83946 create_boot_cache(s, name, size, flags);
83947 list_add(&s->list, &slab_caches);
83948- s->refcount = 1;
83949+ atomic_set(&s->refcount, 1);
83950 return s;
83951 }
83952
83953diff --git a/mm/slob.c b/mm/slob.c
83954index a99fdf7..6ee34ec 100644
83955--- a/mm/slob.c
83956+++ b/mm/slob.c
83957@@ -157,7 +157,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
83958 /*
83959 * Return the size of a slob block.
83960 */
83961-static slobidx_t slob_units(slob_t *s)
83962+static slobidx_t slob_units(const slob_t *s)
83963 {
83964 if (s->units > 0)
83965 return s->units;
83966@@ -167,7 +167,7 @@ static slobidx_t slob_units(slob_t *s)
83967 /*
83968 * Return the next free slob block pointer after this one.
83969 */
83970-static slob_t *slob_next(slob_t *s)
83971+static slob_t *slob_next(const slob_t *s)
83972 {
83973 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
83974 slobidx_t next;
83975@@ -182,14 +182,14 @@ static slob_t *slob_next(slob_t *s)
83976 /*
83977 * Returns true if s is the last free block in its page.
83978 */
83979-static int slob_last(slob_t *s)
83980+static int slob_last(const slob_t *s)
83981 {
83982 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
83983 }
83984
83985-static void *slob_new_pages(gfp_t gfp, int order, int node)
83986+static struct page *slob_new_pages(gfp_t gfp, unsigned int order, int node)
83987 {
83988- void *page;
83989+ struct page *page;
83990
83991 #ifdef CONFIG_NUMA
83992 if (node != NUMA_NO_NODE)
83993@@ -201,14 +201,18 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
83994 if (!page)
83995 return NULL;
83996
83997- return page_address(page);
83998+ __SetPageSlab(page);
83999+ return page;
84000 }
84001
84002-static void slob_free_pages(void *b, int order)
84003+static void slob_free_pages(struct page *sp, int order)
84004 {
84005 if (current->reclaim_state)
84006 current->reclaim_state->reclaimed_slab += 1 << order;
84007- free_pages((unsigned long)b, order);
84008+ __ClearPageSlab(sp);
84009+ reset_page_mapcount(sp);
84010+ sp->private = 0;
84011+ __free_pages(sp, order);
84012 }
84013
84014 /*
84015@@ -313,15 +317,15 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
84016
84017 /* Not enough space: must allocate a new page */
84018 if (!b) {
84019- b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
84020- if (!b)
84021+ sp = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
84022+ if (!sp)
84023 return NULL;
84024- sp = virt_to_page(b);
84025- __SetPageSlab(sp);
84026+ b = page_address(sp);
84027
84028 spin_lock_irqsave(&slob_lock, flags);
84029 sp->units = SLOB_UNITS(PAGE_SIZE);
84030 sp->freelist = b;
84031+ sp->private = 0;
84032 INIT_LIST_HEAD(&sp->list);
84033 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
84034 set_slob_page_free(sp, slob_list);
84035@@ -359,9 +363,7 @@ static void slob_free(void *block, int size)
84036 if (slob_page_free(sp))
84037 clear_slob_page_free(sp);
84038 spin_unlock_irqrestore(&slob_lock, flags);
84039- __ClearPageSlab(sp);
84040- reset_page_mapcount(sp);
84041- slob_free_pages(b, 0);
84042+ slob_free_pages(sp, 0);
84043 return;
84044 }
84045
84046@@ -424,11 +426,10 @@ out:
84047 */
84048
84049 static __always_inline void *
84050-__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
84051+__do_kmalloc_node_align(size_t size, gfp_t gfp, int node, unsigned long caller, int align)
84052 {
84053- unsigned int *m;
84054- int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
84055- void *ret;
84056+ slob_t *m;
84057+ void *ret = NULL;
84058
84059 gfp &= gfp_allowed_mask;
84060
84061@@ -442,23 +443,41 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
84062
84063 if (!m)
84064 return NULL;
84065- *m = size;
84066+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
84067+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
84068+ m[0].units = size;
84069+ m[1].units = align;
84070 ret = (void *)m + align;
84071
84072 trace_kmalloc_node(caller, ret,
84073 size, size + align, gfp, node);
84074 } else {
84075 unsigned int order = get_order(size);
84076+ struct page *page;
84077
84078 if (likely(order))
84079 gfp |= __GFP_COMP;
84080- ret = slob_new_pages(gfp, order, node);
84081+ page = slob_new_pages(gfp, order, node);
84082+ if (page) {
84083+ ret = page_address(page);
84084+ page->private = size;
84085+ }
84086
84087 trace_kmalloc_node(caller, ret,
84088 size, PAGE_SIZE << order, gfp, node);
84089 }
84090
84091- kmemleak_alloc(ret, size, 1, gfp);
84092+ return ret;
84093+}
84094+
84095+static __always_inline void *
84096+__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
84097+{
84098+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
84099+ void *ret = __do_kmalloc_node_align(size, gfp, node, caller, align);
84100+
84101+ if (!ZERO_OR_NULL_PTR(ret))
84102+ kmemleak_alloc(ret, size, 1, gfp);
84103 return ret;
84104 }
84105
84106@@ -493,34 +512,112 @@ void kfree(const void *block)
84107 return;
84108 kmemleak_free(block);
84109
84110+ VM_BUG_ON(!virt_addr_valid(block));
84111 sp = virt_to_page(block);
84112- if (PageSlab(sp)) {
84113+ VM_BUG_ON(!PageSlab(sp));
84114+ if (!sp->private) {
84115 int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
84116- unsigned int *m = (unsigned int *)(block - align);
84117- slob_free(m, *m + align);
84118- } else
84119+ slob_t *m = (slob_t *)(block - align);
84120+ slob_free(m, m[0].units + align);
84121+ } else {
84122+ __ClearPageSlab(sp);
84123+ reset_page_mapcount(sp);
84124+ sp->private = 0;
84125 __free_pages(sp, compound_order(sp));
84126+ }
84127 }
84128 EXPORT_SYMBOL(kfree);
84129
84130+bool is_usercopy_object(const void *ptr)
84131+{
84132+ if (!slab_is_available())
84133+ return false;
84134+
84135+ // PAX: TODO
84136+
84137+ return false;
84138+}
84139+
84140+#ifdef CONFIG_PAX_USERCOPY
84141+const char *check_heap_object(const void *ptr, unsigned long n)
84142+{
84143+ struct page *page;
84144+ const slob_t *free;
84145+ const void *base;
84146+ unsigned long flags;
84147+
84148+ if (ZERO_OR_NULL_PTR(ptr))
84149+ return "<null>";
84150+
84151+ if (!virt_addr_valid(ptr))
84152+ return NULL;
84153+
84154+ page = virt_to_head_page(ptr);
84155+ if (!PageSlab(page))
84156+ return NULL;
84157+
84158+ if (page->private) {
84159+ base = page;
84160+ if (base <= ptr && n <= page->private - (ptr - base))
84161+ return NULL;
84162+ return "<slob>";
84163+ }
84164+
84165+ /* some tricky double walking to find the chunk */
84166+ spin_lock_irqsave(&slob_lock, flags);
84167+ base = (void *)((unsigned long)ptr & PAGE_MASK);
84168+ free = page->freelist;
84169+
84170+ while (!slob_last(free) && (void *)free <= ptr) {
84171+ base = free + slob_units(free);
84172+ free = slob_next(free);
84173+ }
84174+
84175+ while (base < (void *)free) {
84176+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
84177+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
84178+ int offset;
84179+
84180+ if (ptr < base + align)
84181+ break;
84182+
84183+ offset = ptr - base - align;
84184+ if (offset >= m) {
84185+ base += size;
84186+ continue;
84187+ }
84188+
84189+ if (n > m - offset)
84190+ break;
84191+
84192+ spin_unlock_irqrestore(&slob_lock, flags);
84193+ return NULL;
84194+ }
84195+
84196+ spin_unlock_irqrestore(&slob_lock, flags);
84197+ return "<slob>";
84198+}
84199+#endif
84200+
84201 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
84202 size_t ksize(const void *block)
84203 {
84204 struct page *sp;
84205 int align;
84206- unsigned int *m;
84207+ slob_t *m;
84208
84209 BUG_ON(!block);
84210 if (unlikely(block == ZERO_SIZE_PTR))
84211 return 0;
84212
84213 sp = virt_to_page(block);
84214- if (unlikely(!PageSlab(sp)))
84215- return PAGE_SIZE << compound_order(sp);
84216+ VM_BUG_ON(!PageSlab(sp));
84217+ if (sp->private)
84218+ return sp->private;
84219
84220 align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
84221- m = (unsigned int *)(block - align);
84222- return SLOB_UNITS(*m) * SLOB_UNIT;
84223+ m = (slob_t *)(block - align);
84224+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
84225 }
84226 EXPORT_SYMBOL(ksize);
84227
84228@@ -536,23 +633,33 @@ int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
84229
84230 void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
84231 {
84232- void *b;
84233+ void *b = NULL;
84234
84235 flags &= gfp_allowed_mask;
84236
84237 lockdep_trace_alloc(flags);
84238
84239+#ifdef CONFIG_PAX_USERCOPY_SLABS
84240+ b = __do_kmalloc_node_align(c->size, flags, node, _RET_IP_, c->align);
84241+#else
84242 if (c->size < PAGE_SIZE) {
84243 b = slob_alloc(c->size, flags, c->align, node);
84244 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
84245 SLOB_UNITS(c->size) * SLOB_UNIT,
84246 flags, node);
84247 } else {
84248- b = slob_new_pages(flags, get_order(c->size), node);
84249+ struct page *sp;
84250+
84251+ sp = slob_new_pages(flags, get_order(c->size), node);
84252+ if (sp) {
84253+ b = page_address(sp);
84254+ sp->private = c->size;
84255+ }
84256 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
84257 PAGE_SIZE << get_order(c->size),
84258 flags, node);
84259 }
84260+#endif
84261
84262 if (c->ctor)
84263 c->ctor(b);
84264@@ -564,10 +671,14 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
84265
84266 static void __kmem_cache_free(void *b, int size)
84267 {
84268- if (size < PAGE_SIZE)
84269+ struct page *sp;
84270+
84271+ sp = virt_to_page(b);
84272+ BUG_ON(!PageSlab(sp));
84273+ if (!sp->private)
84274 slob_free(b, size);
84275 else
84276- slob_free_pages(b, get_order(size));
84277+ slob_free_pages(sp, get_order(size));
84278 }
84279
84280 static void kmem_rcu_free(struct rcu_head *head)
84281@@ -580,17 +691,31 @@ static void kmem_rcu_free(struct rcu_head *head)
84282
84283 void kmem_cache_free(struct kmem_cache *c, void *b)
84284 {
84285+ int size = c->size;
84286+
84287+#ifdef CONFIG_PAX_USERCOPY_SLABS
84288+ if (size + c->align < PAGE_SIZE) {
84289+ size += c->align;
84290+ b -= c->align;
84291+ }
84292+#endif
84293+
84294 kmemleak_free_recursive(b, c->flags);
84295 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
84296 struct slob_rcu *slob_rcu;
84297- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
84298- slob_rcu->size = c->size;
84299+ slob_rcu = b + (size - sizeof(struct slob_rcu));
84300+ slob_rcu->size = size;
84301 call_rcu(&slob_rcu->head, kmem_rcu_free);
84302 } else {
84303- __kmem_cache_free(b, c->size);
84304+ __kmem_cache_free(b, size);
84305 }
84306
84307+#ifdef CONFIG_PAX_USERCOPY_SLABS
84308+ trace_kfree(_RET_IP_, b);
84309+#else
84310 trace_kmem_cache_free(_RET_IP_, b);
84311+#endif
84312+
84313 }
84314 EXPORT_SYMBOL(kmem_cache_free);
84315
84316diff --git a/mm/slub.c b/mm/slub.c
84317index ba2ca53..991c4f7 100644
84318--- a/mm/slub.c
84319+++ b/mm/slub.c
84320@@ -197,7 +197,7 @@ struct track {
84321
84322 enum track_item { TRACK_ALLOC, TRACK_FREE };
84323
84324-#ifdef CONFIG_SYSFS
84325+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
84326 static int sysfs_slab_add(struct kmem_cache *);
84327 static int sysfs_slab_alias(struct kmem_cache *, const char *);
84328 static void sysfs_slab_remove(struct kmem_cache *);
84329@@ -518,7 +518,7 @@ static void print_track(const char *s, struct track *t)
84330 if (!t->addr)
84331 return;
84332
84333- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
84334+ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
84335 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
84336 #ifdef CONFIG_STACKTRACE
84337 {
84338@@ -2653,7 +2653,7 @@ static int slub_min_objects;
84339 * Merge control. If this is set then no merging of slab caches will occur.
84340 * (Could be removed. This was introduced to pacify the merge skeptics.)
84341 */
84342-static int slub_nomerge;
84343+static int slub_nomerge = 1;
84344
84345 /*
84346 * Calculate the order of allocation given an slab object size.
84347@@ -3181,6 +3181,10 @@ EXPORT_SYMBOL(kmalloc_caches);
84348 static struct kmem_cache *kmalloc_dma_caches[SLUB_PAGE_SHIFT];
84349 #endif
84350
84351+#ifdef CONFIG_PAX_USERCOPY_SLABS
84352+static struct kmem_cache *kmalloc_usercopy_caches[SLUB_PAGE_SHIFT];
84353+#endif
84354+
84355 static int __init setup_slub_min_order(char *str)
84356 {
84357 get_option(&str, &slub_min_order);
84358@@ -3272,6 +3276,13 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags)
84359 return kmalloc_dma_caches[index];
84360
84361 #endif
84362+
84363+#ifdef CONFIG_PAX_USERCOPY_SLABS
84364+ if (flags & SLAB_USERCOPY)
84365+ return kmalloc_usercopy_caches[index];
84366+
84367+#endif
84368+
84369 return kmalloc_caches[index];
84370 }
84371
84372@@ -3340,6 +3351,59 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
84373 EXPORT_SYMBOL(__kmalloc_node);
84374 #endif
84375
84376+bool is_usercopy_object(const void *ptr)
84377+{
84378+ struct page *page;
84379+ struct kmem_cache *s;
84380+
84381+ if (ZERO_OR_NULL_PTR(ptr))
84382+ return false;
84383+
84384+ if (!slab_is_available())
84385+ return false;
84386+
84387+ if (!virt_addr_valid(ptr))
84388+ return false;
84389+
84390+ page = virt_to_head_page(ptr);
84391+
84392+ if (!PageSlab(page))
84393+ return false;
84394+
84395+ s = page->slab_cache;
84396+ return s->flags & SLAB_USERCOPY;
84397+}
84398+
84399+#ifdef CONFIG_PAX_USERCOPY
84400+const char *check_heap_object(const void *ptr, unsigned long n)
84401+{
84402+ struct page *page;
84403+ struct kmem_cache *s;
84404+ unsigned long offset;
84405+
84406+ if (ZERO_OR_NULL_PTR(ptr))
84407+ return "<null>";
84408+
84409+ if (!virt_addr_valid(ptr))
84410+ return NULL;
84411+
84412+ page = virt_to_head_page(ptr);
84413+
84414+ if (!PageSlab(page))
84415+ return NULL;
84416+
84417+ s = page->slab_cache;
84418+ if (!(s->flags & SLAB_USERCOPY))
84419+ return s->name;
84420+
84421+ offset = (ptr - page_address(page)) % s->size;
84422+ if (offset <= s->object_size && n <= s->object_size - offset)
84423+ return NULL;
84424+
84425+ return s->name;
84426+}
84427+#endif
84428+
84429 size_t ksize(const void *object)
84430 {
84431 struct page *page;
84432@@ -3404,6 +3468,7 @@ void kfree(const void *x)
84433 if (unlikely(ZERO_OR_NULL_PTR(x)))
84434 return;
84435
84436+ VM_BUG_ON(!virt_addr_valid(x));
84437 page = virt_to_head_page(x);
84438 if (unlikely(!PageSlab(page))) {
84439 BUG_ON(!PageCompound(page));
84440@@ -3712,17 +3777,17 @@ void __init kmem_cache_init(void)
84441
84442 /* Caches that are not of the two-to-the-power-of size */
84443 if (KMALLOC_MIN_SIZE <= 32) {
84444- kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
84445+ kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
84446 caches++;
84447 }
84448
84449 if (KMALLOC_MIN_SIZE <= 64) {
84450- kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
84451+ kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
84452 caches++;
84453 }
84454
84455 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
84456- kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
84457+ kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
84458 caches++;
84459 }
84460
84461@@ -3764,6 +3829,22 @@ void __init kmem_cache_init(void)
84462 }
84463 }
84464 #endif
84465+
84466+#ifdef CONFIG_PAX_USERCOPY_SLABS
84467+ for (i = 0; i < SLUB_PAGE_SHIFT; i++) {
84468+ struct kmem_cache *s = kmalloc_caches[i];
84469+
84470+ if (s && s->size) {
84471+ char *name = kasprintf(GFP_NOWAIT,
84472+ "usercopy-kmalloc-%d", s->object_size);
84473+
84474+ BUG_ON(!name);
84475+ kmalloc_usercopy_caches[i] = create_kmalloc_cache(name,
84476+ s->object_size, SLAB_USERCOPY);
84477+ }
84478+ }
84479+#endif
84480+
84481 printk(KERN_INFO
84482 "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
84483 " CPUs=%d, Nodes=%d\n",
84484@@ -3790,7 +3871,7 @@ static int slab_unmergeable(struct kmem_cache *s)
84485 /*
84486 * We may have set a slab to be unmergeable during bootstrap.
84487 */
84488- if (s->refcount < 0)
84489+ if (atomic_read(&s->refcount) < 0)
84490 return 1;
84491
84492 return 0;
84493@@ -3848,7 +3929,7 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
84494
84495 s = find_mergeable(memcg, size, align, flags, name, ctor);
84496 if (s) {
84497- s->refcount++;
84498+ atomic_inc(&s->refcount);
84499 /*
84500 * Adjust the object sizes so that we clear
84501 * the complete object on kzalloc.
84502@@ -3857,7 +3938,7 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
84503 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
84504
84505 if (sysfs_slab_alias(s, name)) {
84506- s->refcount--;
84507+ atomic_dec(&s->refcount);
84508 s = NULL;
84509 }
84510 }
84511@@ -3919,7 +4000,7 @@ static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
84512 return NOTIFY_OK;
84513 }
84514
84515-static struct notifier_block __cpuinitdata slab_notifier = {
84516+static struct notifier_block slab_notifier = {
84517 .notifier_call = slab_cpuup_callback
84518 };
84519
84520@@ -3977,7 +4058,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
84521 }
84522 #endif
84523
84524-#ifdef CONFIG_SYSFS
84525+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
84526 static int count_inuse(struct page *page)
84527 {
84528 return page->inuse;
84529@@ -4364,12 +4445,12 @@ static void resiliency_test(void)
84530 validate_slab_cache(kmalloc_caches[9]);
84531 }
84532 #else
84533-#ifdef CONFIG_SYSFS
84534+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
84535 static void resiliency_test(void) {};
84536 #endif
84537 #endif
84538
84539-#ifdef CONFIG_SYSFS
84540+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
84541 enum slab_stat_type {
84542 SL_ALL, /* All slabs */
84543 SL_PARTIAL, /* Only partially allocated slabs */
84544@@ -4613,7 +4694,7 @@ SLAB_ATTR_RO(ctor);
84545
84546 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
84547 {
84548- return sprintf(buf, "%d\n", s->refcount - 1);
84549+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
84550 }
84551 SLAB_ATTR_RO(aliases);
84552
84553@@ -5266,6 +5347,7 @@ static char *create_unique_id(struct kmem_cache *s)
84554 return name;
84555 }
84556
84557+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
84558 static int sysfs_slab_add(struct kmem_cache *s)
84559 {
84560 int err;
84561@@ -5323,6 +5405,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
84562 kobject_del(&s->kobj);
84563 kobject_put(&s->kobj);
84564 }
84565+#endif
84566
84567 /*
84568 * Need to buffer aliases during bootup until sysfs becomes
84569@@ -5336,6 +5419,7 @@ struct saved_alias {
84570
84571 static struct saved_alias *alias_list;
84572
84573+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
84574 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
84575 {
84576 struct saved_alias *al;
84577@@ -5358,6 +5442,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
84578 alias_list = al;
84579 return 0;
84580 }
84581+#endif
84582
84583 static int __init slab_sysfs_init(void)
84584 {
84585diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
84586index 1b7e22a..3fcd4f3 100644
84587--- a/mm/sparse-vmemmap.c
84588+++ b/mm/sparse-vmemmap.c
84589@@ -128,7 +128,7 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
84590 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
84591 if (!p)
84592 return NULL;
84593- pud_populate(&init_mm, pud, p);
84594+ pud_populate_kernel(&init_mm, pud, p);
84595 }
84596 return pud;
84597 }
84598@@ -140,7 +140,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
84599 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
84600 if (!p)
84601 return NULL;
84602- pgd_populate(&init_mm, pgd, p);
84603+ pgd_populate_kernel(&init_mm, pgd, p);
84604 }
84605 return pgd;
84606 }
84607diff --git a/mm/sparse.c b/mm/sparse.c
84608index 6b5fb76..db0c190 100644
84609--- a/mm/sparse.c
84610+++ b/mm/sparse.c
84611@@ -782,7 +782,7 @@ static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
84612
84613 for (i = 0; i < PAGES_PER_SECTION; i++) {
84614 if (PageHWPoison(&memmap[i])) {
84615- atomic_long_sub(1, &mce_bad_pages);
84616+ atomic_long_sub_unchecked(1, &mce_bad_pages);
84617 ClearPageHWPoison(&memmap[i]);
84618 }
84619 }
84620diff --git a/mm/swap.c b/mm/swap.c
84621index 6310dc2..3662b3f 100644
84622--- a/mm/swap.c
84623+++ b/mm/swap.c
84624@@ -30,6 +30,7 @@
84625 #include <linux/backing-dev.h>
84626 #include <linux/memcontrol.h>
84627 #include <linux/gfp.h>
84628+#include <linux/hugetlb.h>
84629
84630 #include "internal.h"
84631
84632@@ -72,6 +73,8 @@ static void __put_compound_page(struct page *page)
84633
84634 __page_cache_release(page);
84635 dtor = get_compound_page_dtor(page);
84636+ if (!PageHuge(page))
84637+ BUG_ON(dtor != free_compound_page);
84638 (*dtor)(page);
84639 }
84640
84641diff --git a/mm/swapfile.c b/mm/swapfile.c
84642index e97a0e5..b50e796 100644
84643--- a/mm/swapfile.c
84644+++ b/mm/swapfile.c
84645@@ -64,7 +64,7 @@ static DEFINE_MUTEX(swapon_mutex);
84646
84647 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
84648 /* Activity counter to indicate that a swapon or swapoff has occurred */
84649-static atomic_t proc_poll_event = ATOMIC_INIT(0);
84650+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
84651
84652 static inline unsigned char swap_count(unsigned char ent)
84653 {
84654@@ -1608,7 +1608,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
84655 }
84656 filp_close(swap_file, NULL);
84657 err = 0;
84658- atomic_inc(&proc_poll_event);
84659+ atomic_inc_unchecked(&proc_poll_event);
84660 wake_up_interruptible(&proc_poll_wait);
84661
84662 out_dput:
84663@@ -1625,8 +1625,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
84664
84665 poll_wait(file, &proc_poll_wait, wait);
84666
84667- if (seq->poll_event != atomic_read(&proc_poll_event)) {
84668- seq->poll_event = atomic_read(&proc_poll_event);
84669+ if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
84670+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
84671 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
84672 }
84673
84674@@ -1724,7 +1724,7 @@ static int swaps_open(struct inode *inode, struct file *file)
84675 return ret;
84676
84677 seq = file->private_data;
84678- seq->poll_event = atomic_read(&proc_poll_event);
84679+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
84680 return 0;
84681 }
84682
84683@@ -2066,7 +2066,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
84684 (frontswap_map) ? "FS" : "");
84685
84686 mutex_unlock(&swapon_mutex);
84687- atomic_inc(&proc_poll_event);
84688+ atomic_inc_unchecked(&proc_poll_event);
84689 wake_up_interruptible(&proc_poll_wait);
84690
84691 if (S_ISREG(inode->i_mode))
84692diff --git a/mm/util.c b/mm/util.c
84693index c55e26b..3f913a9 100644
84694--- a/mm/util.c
84695+++ b/mm/util.c
84696@@ -292,6 +292,12 @@ done:
84697 void arch_pick_mmap_layout(struct mm_struct *mm)
84698 {
84699 mm->mmap_base = TASK_UNMAPPED_BASE;
84700+
84701+#ifdef CONFIG_PAX_RANDMMAP
84702+ if (mm->pax_flags & MF_PAX_RANDMMAP)
84703+ mm->mmap_base += mm->delta_mmap;
84704+#endif
84705+
84706 mm->get_unmapped_area = arch_get_unmapped_area;
84707 mm->unmap_area = arch_unmap_area;
84708 }
84709diff --git a/mm/vmalloc.c b/mm/vmalloc.c
84710index 5123a16..f234a48 100644
84711--- a/mm/vmalloc.c
84712+++ b/mm/vmalloc.c
84713@@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
84714
84715 pte = pte_offset_kernel(pmd, addr);
84716 do {
84717- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
84718- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
84719+
84720+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
84721+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
84722+ BUG_ON(!pte_exec(*pte));
84723+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
84724+ continue;
84725+ }
84726+#endif
84727+
84728+ {
84729+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
84730+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
84731+ }
84732 } while (pte++, addr += PAGE_SIZE, addr != end);
84733 }
84734
84735@@ -100,16 +111,29 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
84736 pte = pte_alloc_kernel(pmd, addr);
84737 if (!pte)
84738 return -ENOMEM;
84739+
84740+ pax_open_kernel();
84741 do {
84742 struct page *page = pages[*nr];
84743
84744- if (WARN_ON(!pte_none(*pte)))
84745+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
84746+ if (pgprot_val(prot) & _PAGE_NX)
84747+#endif
84748+
84749+ if (!pte_none(*pte)) {
84750+ pax_close_kernel();
84751+ WARN_ON(1);
84752 return -EBUSY;
84753- if (WARN_ON(!page))
84754+ }
84755+ if (!page) {
84756+ pax_close_kernel();
84757+ WARN_ON(1);
84758 return -ENOMEM;
84759+ }
84760 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
84761 (*nr)++;
84762 } while (pte++, addr += PAGE_SIZE, addr != end);
84763+ pax_close_kernel();
84764 return 0;
84765 }
84766
84767@@ -119,7 +143,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
84768 pmd_t *pmd;
84769 unsigned long next;
84770
84771- pmd = pmd_alloc(&init_mm, pud, addr);
84772+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
84773 if (!pmd)
84774 return -ENOMEM;
84775 do {
84776@@ -136,7 +160,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
84777 pud_t *pud;
84778 unsigned long next;
84779
84780- pud = pud_alloc(&init_mm, pgd, addr);
84781+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
84782 if (!pud)
84783 return -ENOMEM;
84784 do {
84785@@ -191,11 +215,20 @@ int is_vmalloc_or_module_addr(const void *x)
84786 * and fall back on vmalloc() if that fails. Others
84787 * just put it in the vmalloc space.
84788 */
84789-#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
84790+#ifdef CONFIG_MODULES
84791+#ifdef MODULES_VADDR
84792 unsigned long addr = (unsigned long)x;
84793 if (addr >= MODULES_VADDR && addr < MODULES_END)
84794 return 1;
84795 #endif
84796+
84797+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
84798+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
84799+ return 1;
84800+#endif
84801+
84802+#endif
84803+
84804 return is_vmalloc_addr(x);
84805 }
84806
84807@@ -216,8 +249,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
84808
84809 if (!pgd_none(*pgd)) {
84810 pud_t *pud = pud_offset(pgd, addr);
84811+#ifdef CONFIG_X86
84812+ if (!pud_large(*pud))
84813+#endif
84814 if (!pud_none(*pud)) {
84815 pmd_t *pmd = pmd_offset(pud, addr);
84816+#ifdef CONFIG_X86
84817+ if (!pmd_large(*pmd))
84818+#endif
84819 if (!pmd_none(*pmd)) {
84820 pte_t *ptep, pte;
84821
84822@@ -329,7 +368,7 @@ static void purge_vmap_area_lazy(void);
84823 * Allocate a region of KVA of the specified size and alignment, within the
84824 * vstart and vend.
84825 */
84826-static struct vmap_area *alloc_vmap_area(unsigned long size,
84827+static __size_overflow(1) struct vmap_area *alloc_vmap_area(unsigned long size,
84828 unsigned long align,
84829 unsigned long vstart, unsigned long vend,
84830 int node, gfp_t gfp_mask)
84831@@ -1328,6 +1367,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
84832 struct vm_struct *area;
84833
84834 BUG_ON(in_interrupt());
84835+
84836+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
84837+ if (flags & VM_KERNEXEC) {
84838+ if (start != VMALLOC_START || end != VMALLOC_END)
84839+ return NULL;
84840+ start = (unsigned long)MODULES_EXEC_VADDR;
84841+ end = (unsigned long)MODULES_EXEC_END;
84842+ }
84843+#endif
84844+
84845 if (flags & VM_IOREMAP) {
84846 int bit = fls(size);
84847
84848@@ -1568,6 +1617,11 @@ void *vmap(struct page **pages, unsigned int count,
84849 if (count > totalram_pages)
84850 return NULL;
84851
84852+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
84853+ if (!(pgprot_val(prot) & _PAGE_NX))
84854+ flags |= VM_KERNEXEC;
84855+#endif
84856+
84857 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
84858 __builtin_return_address(0));
84859 if (!area)
84860@@ -1669,6 +1723,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
84861 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
84862 goto fail;
84863
84864+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
84865+ if (!(pgprot_val(prot) & _PAGE_NX))
84866+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
84867+ VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
84868+ else
84869+#endif
84870+
84871 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
84872 start, end, node, gfp_mask, caller);
84873 if (!area)
84874@@ -1842,10 +1903,9 @@ EXPORT_SYMBOL(vzalloc_node);
84875 * For tight control over page level allocator and protection flags
84876 * use __vmalloc() instead.
84877 */
84878-
84879 void *vmalloc_exec(unsigned long size)
84880 {
84881- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
84882+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
84883 -1, __builtin_return_address(0));
84884 }
84885
84886@@ -2136,6 +2196,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
84887 unsigned long uaddr = vma->vm_start;
84888 unsigned long usize = vma->vm_end - vma->vm_start;
84889
84890+ BUG_ON(vma->vm_mirror);
84891+
84892 if ((PAGE_SIZE-1) & (unsigned long)addr)
84893 return -EINVAL;
84894
84895@@ -2575,7 +2637,11 @@ static int s_show(struct seq_file *m, void *p)
84896 v->addr, v->addr + v->size, v->size);
84897
84898 if (v->caller)
84899+#ifdef CONFIG_GRKERNSEC_HIDESYM
84900+ seq_printf(m, " %pK", v->caller);
84901+#else
84902 seq_printf(m, " %pS", v->caller);
84903+#endif
84904
84905 if (v->nr_pages)
84906 seq_printf(m, " pages=%d", v->nr_pages);
84907diff --git a/mm/vmstat.c b/mm/vmstat.c
84908index 9800306..76b4b27 100644
84909--- a/mm/vmstat.c
84910+++ b/mm/vmstat.c
84911@@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
84912 *
84913 * vm_stat contains the global counters
84914 */
84915-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
84916+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
84917 EXPORT_SYMBOL(vm_stat);
84918
84919 #ifdef CONFIG_SMP
84920@@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
84921 v = p->vm_stat_diff[i];
84922 p->vm_stat_diff[i] = 0;
84923 local_irq_restore(flags);
84924- atomic_long_add(v, &zone->vm_stat[i]);
84925+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
84926 global_diff[i] += v;
84927 #ifdef CONFIG_NUMA
84928 /* 3 seconds idle till flush */
84929@@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
84930
84931 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
84932 if (global_diff[i])
84933- atomic_long_add(global_diff[i], &vm_stat[i]);
84934+ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
84935 }
84936
84937 void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
84938@@ -503,8 +503,8 @@ void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
84939 if (pset->vm_stat_diff[i]) {
84940 int v = pset->vm_stat_diff[i];
84941 pset->vm_stat_diff[i] = 0;
84942- atomic_long_add(v, &zone->vm_stat[i]);
84943- atomic_long_add(v, &vm_stat[i]);
84944+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
84945+ atomic_long_add_unchecked(v, &vm_stat[i]);
84946 }
84947 }
84948 #endif
84949@@ -1223,7 +1223,7 @@ static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb,
84950 return NOTIFY_OK;
84951 }
84952
84953-static struct notifier_block __cpuinitdata vmstat_notifier =
84954+static struct notifier_block vmstat_notifier =
84955 { &vmstat_cpuup_callback, NULL, 0 };
84956 #endif
84957
84958@@ -1238,10 +1238,20 @@ static int __init setup_vmstat(void)
84959 start_cpu_timer(cpu);
84960 #endif
84961 #ifdef CONFIG_PROC_FS
84962- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
84963- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
84964- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
84965- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
84966+ {
84967+ mode_t gr_mode = S_IRUGO;
84968+#ifdef CONFIG_GRKERNSEC_PROC_ADD
84969+ gr_mode = S_IRUSR;
84970+#endif
84971+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
84972+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
84973+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
84974+ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
84975+#else
84976+ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
84977+#endif
84978+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
84979+ }
84980 #endif
84981 return 0;
84982 }
84983diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
84984index acc74ad..be02639 100644
84985--- a/net/8021q/vlan.c
84986+++ b/net/8021q/vlan.c
84987@@ -108,6 +108,13 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
84988 if (vlan_id)
84989 vlan_vid_del(real_dev, vlan_id);
84990
84991+ /* Take it out of our own structures, but be sure to interlock with
84992+ * HW accelerating devices or SW vlan input packet processing if
84993+ * VLAN is not 0 (leave it there for 802.1p).
84994+ */
84995+ if (vlan_id)
84996+ vlan_vid_del(real_dev, vlan_id);
84997+
84998 /* Get rid of the vlan's reference to real_dev */
84999 dev_put(real_dev);
85000 }
85001@@ -485,7 +492,7 @@ out:
85002 return NOTIFY_DONE;
85003 }
85004
85005-static struct notifier_block vlan_notifier_block __read_mostly = {
85006+static struct notifier_block vlan_notifier_block = {
85007 .notifier_call = vlan_device_event,
85008 };
85009
85010@@ -560,8 +567,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
85011 err = -EPERM;
85012 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
85013 break;
85014- if ((args.u.name_type >= 0) &&
85015- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
85016+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
85017 struct vlan_net *vn;
85018
85019 vn = net_generic(net, vlan_net_id);
85020diff --git a/net/9p/mod.c b/net/9p/mod.c
85021index 6ab36ae..6f1841b 100644
85022--- a/net/9p/mod.c
85023+++ b/net/9p/mod.c
85024@@ -84,7 +84,7 @@ static LIST_HEAD(v9fs_trans_list);
85025 void v9fs_register_trans(struct p9_trans_module *m)
85026 {
85027 spin_lock(&v9fs_trans_lock);
85028- list_add_tail(&m->list, &v9fs_trans_list);
85029+ pax_list_add_tail((struct list_head *)&m->list, &v9fs_trans_list);
85030 spin_unlock(&v9fs_trans_lock);
85031 }
85032 EXPORT_SYMBOL(v9fs_register_trans);
85033@@ -97,7 +97,7 @@ EXPORT_SYMBOL(v9fs_register_trans);
85034 void v9fs_unregister_trans(struct p9_trans_module *m)
85035 {
85036 spin_lock(&v9fs_trans_lock);
85037- list_del_init(&m->list);
85038+ pax_list_del_init((struct list_head *)&m->list);
85039 spin_unlock(&v9fs_trans_lock);
85040 }
85041 EXPORT_SYMBOL(v9fs_unregister_trans);
85042diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
85043index 02efb25..41541a9 100644
85044--- a/net/9p/trans_fd.c
85045+++ b/net/9p/trans_fd.c
85046@@ -425,7 +425,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
85047 oldfs = get_fs();
85048 set_fs(get_ds());
85049 /* The cast to a user pointer is valid due to the set_fs() */
85050- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
85051+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
85052 set_fs(oldfs);
85053
85054 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
85055diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
85056index 876fbe8..8bbea9f 100644
85057--- a/net/atm/atm_misc.c
85058+++ b/net/atm/atm_misc.c
85059@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
85060 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
85061 return 1;
85062 atm_return(vcc, truesize);
85063- atomic_inc(&vcc->stats->rx_drop);
85064+ atomic_inc_unchecked(&vcc->stats->rx_drop);
85065 return 0;
85066 }
85067 EXPORT_SYMBOL(atm_charge);
85068@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
85069 }
85070 }
85071 atm_return(vcc, guess);
85072- atomic_inc(&vcc->stats->rx_drop);
85073+ atomic_inc_unchecked(&vcc->stats->rx_drop);
85074 return NULL;
85075 }
85076 EXPORT_SYMBOL(atm_alloc_charge);
85077@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
85078
85079 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
85080 {
85081-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
85082+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
85083 __SONET_ITEMS
85084 #undef __HANDLE_ITEM
85085 }
85086@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
85087
85088 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
85089 {
85090-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
85091+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
85092 __SONET_ITEMS
85093 #undef __HANDLE_ITEM
85094 }
85095diff --git a/net/atm/lec.h b/net/atm/lec.h
85096index a86aff9..3a0d6f6 100644
85097--- a/net/atm/lec.h
85098+++ b/net/atm/lec.h
85099@@ -48,7 +48,7 @@ struct lane2_ops {
85100 const u8 *tlvs, u32 sizeoftlvs);
85101 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
85102 const u8 *tlvs, u32 sizeoftlvs);
85103-};
85104+} __no_const;
85105
85106 /*
85107 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
85108diff --git a/net/atm/proc.c b/net/atm/proc.c
85109index 0d020de..011c7bb 100644
85110--- a/net/atm/proc.c
85111+++ b/net/atm/proc.c
85112@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
85113 const struct k_atm_aal_stats *stats)
85114 {
85115 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
85116- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
85117- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
85118- atomic_read(&stats->rx_drop));
85119+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
85120+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
85121+ atomic_read_unchecked(&stats->rx_drop));
85122 }
85123
85124 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
85125diff --git a/net/atm/resources.c b/net/atm/resources.c
85126index 0447d5d..3cf4728 100644
85127--- a/net/atm/resources.c
85128+++ b/net/atm/resources.c
85129@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
85130 static void copy_aal_stats(struct k_atm_aal_stats *from,
85131 struct atm_aal_stats *to)
85132 {
85133-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
85134+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
85135 __AAL_STAT_ITEMS
85136 #undef __HANDLE_ITEM
85137 }
85138@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
85139 static void subtract_aal_stats(struct k_atm_aal_stats *from,
85140 struct atm_aal_stats *to)
85141 {
85142-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
85143+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
85144 __AAL_STAT_ITEMS
85145 #undef __HANDLE_ITEM
85146 }
85147diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c
85148index d5744b7..506bae3 100644
85149--- a/net/ax25/sysctl_net_ax25.c
85150+++ b/net/ax25/sysctl_net_ax25.c
85151@@ -152,7 +152,7 @@ int ax25_register_dev_sysctl(ax25_dev *ax25_dev)
85152 {
85153 char path[sizeof("net/ax25/") + IFNAMSIZ];
85154 int k;
85155- struct ctl_table *table;
85156+ ctl_table_no_const *table;
85157
85158 table = kmemdup(ax25_param_table, sizeof(ax25_param_table), GFP_KERNEL);
85159 if (!table)
85160diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
85161index 1ee94d0..14beea2 100644
85162--- a/net/batman-adv/bat_iv_ogm.c
85163+++ b/net/batman-adv/bat_iv_ogm.c
85164@@ -63,7 +63,7 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
85165
85166 /* randomize initial seqno to avoid collision */
85167 get_random_bytes(&random_seqno, sizeof(random_seqno));
85168- atomic_set(&hard_iface->bat_iv.ogm_seqno, random_seqno);
85169+ atomic_set_unchecked(&hard_iface->bat_iv.ogm_seqno, random_seqno);
85170
85171 hard_iface->bat_iv.ogm_buff_len = BATADV_OGM_HLEN;
85172 ogm_buff = kmalloc(hard_iface->bat_iv.ogm_buff_len, GFP_ATOMIC);
85173@@ -615,9 +615,9 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
85174 batadv_ogm_packet = (struct batadv_ogm_packet *)(*ogm_buff);
85175
85176 /* change sequence number to network order */
85177- seqno = (uint32_t)atomic_read(&hard_iface->bat_iv.ogm_seqno);
85178+ seqno = (uint32_t)atomic_read_unchecked(&hard_iface->bat_iv.ogm_seqno);
85179 batadv_ogm_packet->seqno = htonl(seqno);
85180- atomic_inc(&hard_iface->bat_iv.ogm_seqno);
85181+ atomic_inc_unchecked(&hard_iface->bat_iv.ogm_seqno);
85182
85183 batadv_ogm_packet->ttvn = atomic_read(&bat_priv->tt.vn);
85184 batadv_ogm_packet->tt_crc = htons(bat_priv->tt.local_crc);
85185@@ -1022,7 +1022,7 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
85186 return;
85187
85188 /* could be changed by schedule_own_packet() */
85189- if_incoming_seqno = atomic_read(&if_incoming->bat_iv.ogm_seqno);
85190+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->bat_iv.ogm_seqno);
85191
85192 if (batadv_ogm_packet->flags & BATADV_DIRECTLINK)
85193 has_directlink_flag = 1;
85194diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
85195index f1d37cd..4190879 100644
85196--- a/net/batman-adv/hard-interface.c
85197+++ b/net/batman-adv/hard-interface.c
85198@@ -370,7 +370,7 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
85199 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
85200 dev_add_pack(&hard_iface->batman_adv_ptype);
85201
85202- atomic_set(&hard_iface->frag_seqno, 1);
85203+ atomic_set_unchecked(&hard_iface->frag_seqno, 1);
85204 batadv_info(hard_iface->soft_iface, "Adding interface: %s\n",
85205 hard_iface->net_dev->name);
85206
85207@@ -493,7 +493,7 @@ batadv_hardif_add_interface(struct net_device *net_dev)
85208 /* This can't be called via a bat_priv callback because
85209 * we have no bat_priv yet.
85210 */
85211- atomic_set(&hard_iface->bat_iv.ogm_seqno, 1);
85212+ atomic_set_unchecked(&hard_iface->bat_iv.ogm_seqno, 1);
85213 hard_iface->bat_iv.ogm_buff = NULL;
85214
85215 return hard_iface;
85216diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
85217index 6b548fd..fc32c8d 100644
85218--- a/net/batman-adv/soft-interface.c
85219+++ b/net/batman-adv/soft-interface.c
85220@@ -252,7 +252,7 @@ static int batadv_interface_tx(struct sk_buff *skb,
85221 primary_if->net_dev->dev_addr, ETH_ALEN);
85222
85223 /* set broadcast sequence number */
85224- seqno = atomic_inc_return(&bat_priv->bcast_seqno);
85225+ seqno = atomic_inc_return_unchecked(&bat_priv->bcast_seqno);
85226 bcast_packet->seqno = htonl(seqno);
85227
85228 batadv_add_bcast_packet_to_list(bat_priv, skb, brd_delay);
85229@@ -497,7 +497,7 @@ struct net_device *batadv_softif_create(const char *name)
85230 atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN);
85231
85232 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
85233- atomic_set(&bat_priv->bcast_seqno, 1);
85234+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
85235 atomic_set(&bat_priv->tt.vn, 0);
85236 atomic_set(&bat_priv->tt.local_changes, 0);
85237 atomic_set(&bat_priv->tt.ogm_append_cnt, 0);
85238diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
85239index ae9ac9a..11e0fe7 100644
85240--- a/net/batman-adv/types.h
85241+++ b/net/batman-adv/types.h
85242@@ -48,7 +48,7 @@
85243 struct batadv_hard_iface_bat_iv {
85244 unsigned char *ogm_buff;
85245 int ogm_buff_len;
85246- atomic_t ogm_seqno;
85247+ atomic_unchecked_t ogm_seqno;
85248 };
85249
85250 struct batadv_hard_iface {
85251@@ -56,7 +56,7 @@ struct batadv_hard_iface {
85252 int16_t if_num;
85253 char if_status;
85254 struct net_device *net_dev;
85255- atomic_t frag_seqno;
85256+ atomic_unchecked_t frag_seqno;
85257 struct kobject *hardif_obj;
85258 atomic_t refcount;
85259 struct packet_type batman_adv_ptype;
85260@@ -284,7 +284,7 @@ struct batadv_priv {
85261 atomic_t orig_interval; /* uint */
85262 atomic_t hop_penalty; /* uint */
85263 atomic_t log_level; /* uint */
85264- atomic_t bcast_seqno;
85265+ atomic_unchecked_t bcast_seqno;
85266 atomic_t bcast_queue_left;
85267 atomic_t batman_queue_left;
85268 char num_ifaces;
85269diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
85270index 10aff49..ea8e021 100644
85271--- a/net/batman-adv/unicast.c
85272+++ b/net/batman-adv/unicast.c
85273@@ -272,7 +272,7 @@ int batadv_frag_send_skb(struct sk_buff *skb, struct batadv_priv *bat_priv,
85274 frag1->flags = BATADV_UNI_FRAG_HEAD | large_tail;
85275 frag2->flags = large_tail;
85276
85277- seqno = atomic_add_return(2, &hard_iface->frag_seqno);
85278+ seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
85279 frag1->seqno = htons(seqno - 1);
85280 frag2->seqno = htons(seqno);
85281
85282diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
85283index 07f0739..3c42e34 100644
85284--- a/net/bluetooth/hci_sock.c
85285+++ b/net/bluetooth/hci_sock.c
85286@@ -934,7 +934,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
85287 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
85288 }
85289
85290- len = min_t(unsigned int, len, sizeof(uf));
85291+ len = min((size_t)len, sizeof(uf));
85292 if (copy_from_user(&uf, optval, len)) {
85293 err = -EFAULT;
85294 break;
85295diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
85296index 22e6583..426e2f3 100644
85297--- a/net/bluetooth/l2cap_core.c
85298+++ b/net/bluetooth/l2cap_core.c
85299@@ -3400,8 +3400,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
85300 break;
85301
85302 case L2CAP_CONF_RFC:
85303- if (olen == sizeof(rfc))
85304- memcpy(&rfc, (void *)val, olen);
85305+ if (olen != sizeof(rfc))
85306+ break;
85307+
85308+ memcpy(&rfc, (void *)val, olen);
85309
85310 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
85311 rfc.mode != chan->mode)
85312diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
85313index 1bcfb84..dad9f98 100644
85314--- a/net/bluetooth/l2cap_sock.c
85315+++ b/net/bluetooth/l2cap_sock.c
85316@@ -479,7 +479,8 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
85317 struct sock *sk = sock->sk;
85318 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
85319 struct l2cap_options opts;
85320- int len, err = 0;
85321+ int err = 0;
85322+ size_t len = optlen;
85323 u32 opt;
85324
85325 BT_DBG("sk %p", sk);
85326@@ -501,7 +502,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
85327 opts.max_tx = chan->max_tx;
85328 opts.txwin_size = chan->tx_win;
85329
85330- len = min_t(unsigned int, sizeof(opts), optlen);
85331+ len = min(sizeof(opts), len);
85332 if (copy_from_user((char *) &opts, optval, len)) {
85333 err = -EFAULT;
85334 break;
85335@@ -581,7 +582,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
85336 struct bt_security sec;
85337 struct bt_power pwr;
85338 struct l2cap_conn *conn;
85339- int len, err = 0;
85340+ int err = 0;
85341+ size_t len = optlen;
85342 u32 opt;
85343
85344 BT_DBG("sk %p", sk);
85345@@ -604,7 +606,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
85346
85347 sec.level = BT_SECURITY_LOW;
85348
85349- len = min_t(unsigned int, sizeof(sec), optlen);
85350+ len = min(sizeof(sec), len);
85351 if (copy_from_user((char *) &sec, optval, len)) {
85352 err = -EFAULT;
85353 break;
85354@@ -701,7 +703,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
85355
85356 pwr.force_active = BT_POWER_FORCE_ACTIVE_ON;
85357
85358- len = min_t(unsigned int, sizeof(pwr), optlen);
85359+ len = min(sizeof(pwr), len);
85360 if (copy_from_user((char *) &pwr, optval, len)) {
85361 err = -EFAULT;
85362 break;
85363diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
85364index ce3f665..2c7d08f 100644
85365--- a/net/bluetooth/rfcomm/sock.c
85366+++ b/net/bluetooth/rfcomm/sock.c
85367@@ -667,7 +667,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
85368 struct sock *sk = sock->sk;
85369 struct bt_security sec;
85370 int err = 0;
85371- size_t len;
85372+ size_t len = optlen;
85373 u32 opt;
85374
85375 BT_DBG("sk %p", sk);
85376@@ -689,7 +689,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
85377
85378 sec.level = BT_SECURITY_LOW;
85379
85380- len = min_t(unsigned int, sizeof(sec), optlen);
85381+ len = min(sizeof(sec), len);
85382 if (copy_from_user((char *) &sec, optval, len)) {
85383 err = -EFAULT;
85384 break;
85385diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
85386index bd6fd0f..6492cba 100644
85387--- a/net/bluetooth/rfcomm/tty.c
85388+++ b/net/bluetooth/rfcomm/tty.c
85389@@ -309,7 +309,7 @@ static void rfcomm_dev_del(struct rfcomm_dev *dev)
85390 BUG_ON(test_and_set_bit(RFCOMM_TTY_RELEASED, &dev->flags));
85391
85392 spin_lock_irqsave(&dev->port.lock, flags);
85393- if (dev->port.count > 0) {
85394+ if (atomic_read(&dev->port.count) > 0) {
85395 spin_unlock_irqrestore(&dev->port.lock, flags);
85396 return;
85397 }
85398@@ -664,10 +664,10 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
85399 return -ENODEV;
85400
85401 BT_DBG("dev %p dst %pMR channel %d opened %d", dev, &dev->dst,
85402- dev->channel, dev->port.count);
85403+ dev->channel, atomic_read(&dev->port.count));
85404
85405 spin_lock_irqsave(&dev->port.lock, flags);
85406- if (++dev->port.count > 1) {
85407+ if (atomic_inc_return(&dev->port.count) > 1) {
85408 spin_unlock_irqrestore(&dev->port.lock, flags);
85409 return 0;
85410 }
85411@@ -732,10 +732,10 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp)
85412 return;
85413
85414 BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc,
85415- dev->port.count);
85416+ atomic_read(&dev->port.count));
85417
85418 spin_lock_irqsave(&dev->port.lock, flags);
85419- if (!--dev->port.count) {
85420+ if (!atomic_dec_return(&dev->port.count)) {
85421 spin_unlock_irqrestore(&dev->port.lock, flags);
85422 if (dev->tty_dev->parent)
85423 device_move(dev->tty_dev, NULL, DPM_ORDER_DEV_LAST);
85424diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
85425index d9576e6..85f4f4e 100644
85426--- a/net/bridge/br_fdb.c
85427+++ b/net/bridge/br_fdb.c
85428@@ -386,7 +386,7 @@ static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
85429 return 0;
85430 br_warn(br, "adding interface %s with same address "
85431 "as a received packet\n",
85432- source->dev->name);
85433+ source ? source->dev->name : br->dev->name);
85434 fdb_delete(br, fdb);
85435 }
85436
85437diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
85438index 5fe2ff3..121d696 100644
85439--- a/net/bridge/netfilter/ebtables.c
85440+++ b/net/bridge/netfilter/ebtables.c
85441@@ -1523,7 +1523,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
85442 tmp.valid_hooks = t->table->valid_hooks;
85443 }
85444 mutex_unlock(&ebt_mutex);
85445- if (copy_to_user(user, &tmp, *len) != 0){
85446+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
85447 BUGPRINT("c2u Didn't work\n");
85448 ret = -EFAULT;
85449 break;
85450@@ -2327,7 +2327,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
85451 goto out;
85452 tmp.valid_hooks = t->valid_hooks;
85453
85454- if (copy_to_user(user, &tmp, *len) != 0) {
85455+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
85456 ret = -EFAULT;
85457 break;
85458 }
85459@@ -2338,7 +2338,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
85460 tmp.entries_size = t->table->entries_size;
85461 tmp.valid_hooks = t->table->valid_hooks;
85462
85463- if (copy_to_user(user, &tmp, *len) != 0) {
85464+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
85465 ret = -EFAULT;
85466 break;
85467 }
85468diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
85469index a376ec1..1fbd6be 100644
85470--- a/net/caif/cfctrl.c
85471+++ b/net/caif/cfctrl.c
85472@@ -10,6 +10,7 @@
85473 #include <linux/spinlock.h>
85474 #include <linux/slab.h>
85475 #include <linux/pkt_sched.h>
85476+#include <linux/sched.h>
85477 #include <net/caif/caif_layer.h>
85478 #include <net/caif/cfpkt.h>
85479 #include <net/caif/cfctrl.h>
85480@@ -43,8 +44,8 @@ struct cflayer *cfctrl_create(void)
85481 memset(&dev_info, 0, sizeof(dev_info));
85482 dev_info.id = 0xff;
85483 cfsrvl_init(&this->serv, 0, &dev_info, false);
85484- atomic_set(&this->req_seq_no, 1);
85485- atomic_set(&this->rsp_seq_no, 1);
85486+ atomic_set_unchecked(&this->req_seq_no, 1);
85487+ atomic_set_unchecked(&this->rsp_seq_no, 1);
85488 this->serv.layer.receive = cfctrl_recv;
85489 sprintf(this->serv.layer.name, "ctrl");
85490 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
85491@@ -130,8 +131,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
85492 struct cfctrl_request_info *req)
85493 {
85494 spin_lock_bh(&ctrl->info_list_lock);
85495- atomic_inc(&ctrl->req_seq_no);
85496- req->sequence_no = atomic_read(&ctrl->req_seq_no);
85497+ atomic_inc_unchecked(&ctrl->req_seq_no);
85498+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
85499 list_add_tail(&req->list, &ctrl->list);
85500 spin_unlock_bh(&ctrl->info_list_lock);
85501 }
85502@@ -149,7 +150,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
85503 if (p != first)
85504 pr_warn("Requests are not received in order\n");
85505
85506- atomic_set(&ctrl->rsp_seq_no,
85507+ atomic_set_unchecked(&ctrl->rsp_seq_no,
85508 p->sequence_no);
85509 list_del(&p->list);
85510 goto out;
85511diff --git a/net/can/af_can.c b/net/can/af_can.c
85512index ddac1ee..3ee0a78 100644
85513--- a/net/can/af_can.c
85514+++ b/net/can/af_can.c
85515@@ -872,7 +872,7 @@ static const struct net_proto_family can_family_ops = {
85516 };
85517
85518 /* notifier block for netdevice event */
85519-static struct notifier_block can_netdev_notifier __read_mostly = {
85520+static struct notifier_block can_netdev_notifier = {
85521 .notifier_call = can_notifier,
85522 };
85523
85524diff --git a/net/can/gw.c b/net/can/gw.c
85525index 574dda78e..3d2b3da 100644
85526--- a/net/can/gw.c
85527+++ b/net/can/gw.c
85528@@ -67,7 +67,6 @@ MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
85529 MODULE_ALIAS("can-gw");
85530
85531 static HLIST_HEAD(cgw_list);
85532-static struct notifier_block notifier;
85533
85534 static struct kmem_cache *cgw_cache __read_mostly;
85535
85536@@ -893,6 +892,10 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
85537 return err;
85538 }
85539
85540+static struct notifier_block notifier = {
85541+ .notifier_call = cgw_notifier
85542+};
85543+
85544 static __init int cgw_module_init(void)
85545 {
85546 printk(banner);
85547@@ -904,7 +907,6 @@ static __init int cgw_module_init(void)
85548 return -ENOMEM;
85549
85550 /* set notifier */
85551- notifier.notifier_call = cgw_notifier;
85552 register_netdevice_notifier(&notifier);
85553
85554 if (__rtnl_register(PF_CAN, RTM_GETROUTE, NULL, cgw_dump_jobs, NULL)) {
85555diff --git a/net/compat.c b/net/compat.c
85556index 79ae884..17c5c09 100644
85557--- a/net/compat.c
85558+++ b/net/compat.c
85559@@ -71,9 +71,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
85560 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
85561 __get_user(kmsg->msg_flags, &umsg->msg_flags))
85562 return -EFAULT;
85563- kmsg->msg_name = compat_ptr(tmp1);
85564- kmsg->msg_iov = compat_ptr(tmp2);
85565- kmsg->msg_control = compat_ptr(tmp3);
85566+ kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
85567+ kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
85568+ kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
85569 return 0;
85570 }
85571
85572@@ -85,7 +85,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
85573
85574 if (kern_msg->msg_namelen) {
85575 if (mode == VERIFY_READ) {
85576- int err = move_addr_to_kernel(kern_msg->msg_name,
85577+ int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
85578 kern_msg->msg_namelen,
85579 kern_address);
85580 if (err < 0)
85581@@ -96,7 +96,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
85582 kern_msg->msg_name = NULL;
85583
85584 tot_len = iov_from_user_compat_to_kern(kern_iov,
85585- (struct compat_iovec __user *)kern_msg->msg_iov,
85586+ (struct compat_iovec __force_user *)kern_msg->msg_iov,
85587 kern_msg->msg_iovlen);
85588 if (tot_len >= 0)
85589 kern_msg->msg_iov = kern_iov;
85590@@ -116,20 +116,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
85591
85592 #define CMSG_COMPAT_FIRSTHDR(msg) \
85593 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
85594- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
85595+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
85596 (struct compat_cmsghdr __user *)NULL)
85597
85598 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
85599 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
85600 (ucmlen) <= (unsigned long) \
85601 ((mhdr)->msg_controllen - \
85602- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
85603+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
85604
85605 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
85606 struct compat_cmsghdr __user *cmsg, int cmsg_len)
85607 {
85608 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
85609- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
85610+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
85611 msg->msg_controllen)
85612 return NULL;
85613 return (struct compat_cmsghdr __user *)ptr;
85614@@ -219,7 +219,7 @@ Efault:
85615
85616 int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data)
85617 {
85618- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
85619+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
85620 struct compat_cmsghdr cmhdr;
85621 struct compat_timeval ctv;
85622 struct compat_timespec cts[3];
85623@@ -275,7 +275,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
85624
85625 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
85626 {
85627- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
85628+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
85629 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
85630 int fdnum = scm->fp->count;
85631 struct file **fp = scm->fp->fp;
85632@@ -363,7 +363,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
85633 return -EFAULT;
85634 old_fs = get_fs();
85635 set_fs(KERNEL_DS);
85636- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
85637+ err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
85638 set_fs(old_fs);
85639
85640 return err;
85641@@ -424,7 +424,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
85642 len = sizeof(ktime);
85643 old_fs = get_fs();
85644 set_fs(KERNEL_DS);
85645- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
85646+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
85647 set_fs(old_fs);
85648
85649 if (!err) {
85650@@ -567,7 +567,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
85651 case MCAST_JOIN_GROUP:
85652 case MCAST_LEAVE_GROUP:
85653 {
85654- struct compat_group_req __user *gr32 = (void *)optval;
85655+ struct compat_group_req __user *gr32 = (void __user *)optval;
85656 struct group_req __user *kgr =
85657 compat_alloc_user_space(sizeof(struct group_req));
85658 u32 interface;
85659@@ -588,7 +588,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
85660 case MCAST_BLOCK_SOURCE:
85661 case MCAST_UNBLOCK_SOURCE:
85662 {
85663- struct compat_group_source_req __user *gsr32 = (void *)optval;
85664+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
85665 struct group_source_req __user *kgsr = compat_alloc_user_space(
85666 sizeof(struct group_source_req));
85667 u32 interface;
85668@@ -609,7 +609,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
85669 }
85670 case MCAST_MSFILTER:
85671 {
85672- struct compat_group_filter __user *gf32 = (void *)optval;
85673+ struct compat_group_filter __user *gf32 = (void __user *)optval;
85674 struct group_filter __user *kgf;
85675 u32 interface, fmode, numsrc;
85676
85677@@ -647,7 +647,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
85678 char __user *optval, int __user *optlen,
85679 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
85680 {
85681- struct compat_group_filter __user *gf32 = (void *)optval;
85682+ struct compat_group_filter __user *gf32 = (void __user *)optval;
85683 struct group_filter __user *kgf;
85684 int __user *koptlen;
85685 u32 interface, fmode, numsrc;
85686@@ -796,7 +796,7 @@ asmlinkage long compat_sys_socketcall(int call, u32 __user *args)
85687
85688 if (call < SYS_SOCKET || call > SYS_SENDMMSG)
85689 return -EINVAL;
85690- if (copy_from_user(a, args, nas[call]))
85691+ if (nas[call] > sizeof a || copy_from_user(a, args, nas[call]))
85692 return -EFAULT;
85693 a0 = a[0];
85694 a1 = a[1];
85695diff --git a/net/core/datagram.c b/net/core/datagram.c
85696index 368f9c3..f82d4a3 100644
85697--- a/net/core/datagram.c
85698+++ b/net/core/datagram.c
85699@@ -289,7 +289,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
85700 }
85701
85702 kfree_skb(skb);
85703- atomic_inc(&sk->sk_drops);
85704+ atomic_inc_unchecked(&sk->sk_drops);
85705 sk_mem_reclaim_partial(sk);
85706
85707 return err;
85708diff --git a/net/core/dev.c b/net/core/dev.c
85709index 5d9c43d..b471558 100644
85710--- a/net/core/dev.c
85711+++ b/net/core/dev.c
85712@@ -1250,9 +1250,13 @@ void dev_load(struct net *net, const char *name)
85713 if (no_module && capable(CAP_NET_ADMIN))
85714 no_module = request_module("netdev-%s", name);
85715 if (no_module && capable(CAP_SYS_MODULE)) {
85716+#ifdef CONFIG_GRKERNSEC_MODHARDEN
85717+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
85718+#else
85719 if (!request_module("%s", name))
85720 pr_warn("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
85721 name);
85722+#endif
85723 }
85724 }
85725 EXPORT_SYMBOL(dev_load);
85726@@ -1714,7 +1718,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
85727 {
85728 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
85729 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
85730- atomic_long_inc(&dev->rx_dropped);
85731+ atomic_long_inc_unchecked(&dev->rx_dropped);
85732 kfree_skb(skb);
85733 return NET_RX_DROP;
85734 }
85735@@ -1724,7 +1728,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
85736 nf_reset(skb);
85737
85738 if (unlikely(!is_skb_forwardable(dev, skb))) {
85739- atomic_long_inc(&dev->rx_dropped);
85740+ atomic_long_inc_unchecked(&dev->rx_dropped);
85741 kfree_skb(skb);
85742 return NET_RX_DROP;
85743 }
85744@@ -2179,7 +2183,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
85745
85746 struct dev_gso_cb {
85747 void (*destructor)(struct sk_buff *skb);
85748-};
85749+} __no_const;
85750
85751 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
85752
85753@@ -3052,7 +3056,7 @@ enqueue:
85754
85755 local_irq_restore(flags);
85756
85757- atomic_long_inc(&skb->dev->rx_dropped);
85758+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
85759 kfree_skb(skb);
85760 return NET_RX_DROP;
85761 }
85762@@ -3124,7 +3128,7 @@ int netif_rx_ni(struct sk_buff *skb)
85763 }
85764 EXPORT_SYMBOL(netif_rx_ni);
85765
85766-static void net_tx_action(struct softirq_action *h)
85767+static void net_tx_action(void)
85768 {
85769 struct softnet_data *sd = &__get_cpu_var(softnet_data);
85770
85771@@ -3462,7 +3466,7 @@ ncls:
85772 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
85773 } else {
85774 drop:
85775- atomic_long_inc(&skb->dev->rx_dropped);
85776+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
85777 kfree_skb(skb);
85778 /* Jamal, now you will not able to escape explaining
85779 * me how you were going to use this. :-)
85780@@ -4045,7 +4049,7 @@ void netif_napi_del(struct napi_struct *napi)
85781 }
85782 EXPORT_SYMBOL(netif_napi_del);
85783
85784-static void net_rx_action(struct softirq_action *h)
85785+static void net_rx_action(void)
85786 {
85787 struct softnet_data *sd = &__get_cpu_var(softnet_data);
85788 unsigned long time_limit = jiffies + 2;
85789@@ -4529,8 +4533,13 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
85790 else
85791 seq_printf(seq, "%04x", ntohs(pt->type));
85792
85793+#ifdef CONFIG_GRKERNSEC_HIDESYM
85794+ seq_printf(seq, " %-8s %p\n",
85795+ pt->dev ? pt->dev->name : "", NULL);
85796+#else
85797 seq_printf(seq, " %-8s %pF\n",
85798 pt->dev ? pt->dev->name : "", pt->func);
85799+#endif
85800 }
85801
85802 return 0;
85803@@ -6102,7 +6111,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
85804 } else {
85805 netdev_stats_to_stats64(storage, &dev->stats);
85806 }
85807- storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
85808+ storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
85809 return storage;
85810 }
85811 EXPORT_SYMBOL(dev_get_stats);
85812diff --git a/net/core/flow.c b/net/core/flow.c
85813index 3bad824..2071a55 100644
85814--- a/net/core/flow.c
85815+++ b/net/core/flow.c
85816@@ -61,7 +61,7 @@ struct flow_cache {
85817 struct timer_list rnd_timer;
85818 };
85819
85820-atomic_t flow_cache_genid = ATOMIC_INIT(0);
85821+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
85822 EXPORT_SYMBOL(flow_cache_genid);
85823 static struct flow_cache flow_cache_global;
85824 static struct kmem_cache *flow_cachep __read_mostly;
85825@@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
85826
85827 static int flow_entry_valid(struct flow_cache_entry *fle)
85828 {
85829- if (atomic_read(&flow_cache_genid) != fle->genid)
85830+ if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
85831 return 0;
85832 if (fle->object && !fle->object->ops->check(fle->object))
85833 return 0;
85834@@ -259,7 +259,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
85835 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
85836 fcp->hash_count++;
85837 }
85838- } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
85839+ } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
85840 flo = fle->object;
85841 if (!flo)
85842 goto ret_object;
85843@@ -280,7 +280,7 @@ nocache:
85844 }
85845 flo = resolver(net, key, family, dir, flo, ctx);
85846 if (fle) {
85847- fle->genid = atomic_read(&flow_cache_genid);
85848+ fle->genid = atomic_read_unchecked(&flow_cache_genid);
85849 if (!IS_ERR(flo))
85850 fle->object = flo;
85851 else
85852diff --git a/net/core/iovec.c b/net/core/iovec.c
85853index 7e7aeb0..2a998cb 100644
85854--- a/net/core/iovec.c
85855+++ b/net/core/iovec.c
85856@@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
85857 if (m->msg_namelen) {
85858 if (mode == VERIFY_READ) {
85859 void __user *namep;
85860- namep = (void __user __force *) m->msg_name;
85861+ namep = (void __force_user *) m->msg_name;
85862 err = move_addr_to_kernel(namep, m->msg_namelen,
85863 address);
85864 if (err < 0)
85865@@ -54,7 +54,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
85866 }
85867
85868 size = m->msg_iovlen * sizeof(struct iovec);
85869- if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
85870+ if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
85871 return -EFAULT;
85872
85873 m->msg_iov = iov;
85874diff --git a/net/core/neighbour.c b/net/core/neighbour.c
85875index c815f28..e6403f2 100644
85876--- a/net/core/neighbour.c
85877+++ b/net/core/neighbour.c
85878@@ -2776,7 +2776,7 @@ static int proc_unres_qlen(ctl_table *ctl, int write, void __user *buffer,
85879 size_t *lenp, loff_t *ppos)
85880 {
85881 int size, ret;
85882- ctl_table tmp = *ctl;
85883+ ctl_table_no_const tmp = *ctl;
85884
85885 tmp.extra1 = &zero;
85886 tmp.extra2 = &unres_qlen_max;
85887diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
85888index 28c5f5a..7edf2e2 100644
85889--- a/net/core/net-sysfs.c
85890+++ b/net/core/net-sysfs.c
85891@@ -1455,7 +1455,7 @@ void netdev_class_remove_file(struct class_attribute *class_attr)
85892 }
85893 EXPORT_SYMBOL(netdev_class_remove_file);
85894
85895-int netdev_kobject_init(void)
85896+int __init netdev_kobject_init(void)
85897 {
85898 kobj_ns_type_register(&net_ns_type_operations);
85899 return class_register(&net_class);
85900diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
85901index 8acce01..2e306bb 100644
85902--- a/net/core/net_namespace.c
85903+++ b/net/core/net_namespace.c
85904@@ -442,7 +442,7 @@ static int __register_pernet_operations(struct list_head *list,
85905 int error;
85906 LIST_HEAD(net_exit_list);
85907
85908- list_add_tail(&ops->list, list);
85909+ pax_list_add_tail((struct list_head *)&ops->list, list);
85910 if (ops->init || (ops->id && ops->size)) {
85911 for_each_net(net) {
85912 error = ops_init(ops, net);
85913@@ -455,7 +455,7 @@ static int __register_pernet_operations(struct list_head *list,
85914
85915 out_undo:
85916 /* If I have an error cleanup all namespaces I initialized */
85917- list_del(&ops->list);
85918+ pax_list_del((struct list_head *)&ops->list);
85919 ops_exit_list(ops, &net_exit_list);
85920 ops_free_list(ops, &net_exit_list);
85921 return error;
85922@@ -466,7 +466,7 @@ static void __unregister_pernet_operations(struct pernet_operations *ops)
85923 struct net *net;
85924 LIST_HEAD(net_exit_list);
85925
85926- list_del(&ops->list);
85927+ pax_list_del((struct list_head *)&ops->list);
85928 for_each_net(net)
85929 list_add_tail(&net->exit_list, &net_exit_list);
85930 ops_exit_list(ops, &net_exit_list);
85931@@ -600,7 +600,7 @@ int register_pernet_device(struct pernet_operations *ops)
85932 mutex_lock(&net_mutex);
85933 error = register_pernet_operations(&pernet_list, ops);
85934 if (!error && (first_device == &pernet_list))
85935- first_device = &ops->list;
85936+ first_device = (struct list_head *)&ops->list;
85937 mutex_unlock(&net_mutex);
85938 return error;
85939 }
85940diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
85941index 6212ec9..dd4ad3b 100644
85942--- a/net/core/rtnetlink.c
85943+++ b/net/core/rtnetlink.c
85944@@ -58,7 +58,7 @@ struct rtnl_link {
85945 rtnl_doit_func doit;
85946 rtnl_dumpit_func dumpit;
85947 rtnl_calcit_func calcit;
85948-};
85949+} __no_const;
85950
85951 static DEFINE_MUTEX(rtnl_mutex);
85952
85953@@ -299,10 +299,13 @@ int __rtnl_link_register(struct rtnl_link_ops *ops)
85954 if (rtnl_link_ops_get(ops->kind))
85955 return -EEXIST;
85956
85957- if (!ops->dellink)
85958- ops->dellink = unregister_netdevice_queue;
85959+ if (!ops->dellink) {
85960+ pax_open_kernel();
85961+ *(void **)&ops->dellink = unregister_netdevice_queue;
85962+ pax_close_kernel();
85963+ }
85964
85965- list_add_tail(&ops->list, &link_ops);
85966+ pax_list_add_tail((struct list_head *)&ops->list, &link_ops);
85967 return 0;
85968 }
85969 EXPORT_SYMBOL_GPL(__rtnl_link_register);
85970@@ -349,7 +352,7 @@ void __rtnl_link_unregister(struct rtnl_link_ops *ops)
85971 for_each_net(net) {
85972 __rtnl_kill_links(net, ops);
85973 }
85974- list_del(&ops->list);
85975+ pax_list_del((struct list_head *)&ops->list);
85976 }
85977 EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
85978
85979diff --git a/net/core/scm.c b/net/core/scm.c
85980index 2dc6cda..2159524 100644
85981--- a/net/core/scm.c
85982+++ b/net/core/scm.c
85983@@ -226,7 +226,7 @@ EXPORT_SYMBOL(__scm_send);
85984 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
85985 {
85986 struct cmsghdr __user *cm
85987- = (__force struct cmsghdr __user *)msg->msg_control;
85988+ = (struct cmsghdr __force_user *)msg->msg_control;
85989 struct cmsghdr cmhdr;
85990 int cmlen = CMSG_LEN(len);
85991 int err;
85992@@ -249,7 +249,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
85993 err = -EFAULT;
85994 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
85995 goto out;
85996- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
85997+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
85998 goto out;
85999 cmlen = CMSG_SPACE(len);
86000 if (msg->msg_controllen < cmlen)
86001@@ -265,7 +265,7 @@ EXPORT_SYMBOL(put_cmsg);
86002 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
86003 {
86004 struct cmsghdr __user *cm
86005- = (__force struct cmsghdr __user*)msg->msg_control;
86006+ = (struct cmsghdr __force_user *)msg->msg_control;
86007
86008 int fdmax = 0;
86009 int fdnum = scm->fp->count;
86010@@ -285,7 +285,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
86011 if (fdnum < fdmax)
86012 fdmax = fdnum;
86013
86014- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
86015+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
86016 i++, cmfptr++)
86017 {
86018 struct socket *sock;
86019diff --git a/net/core/sock.c b/net/core/sock.c
86020index bc131d4..029e378 100644
86021--- a/net/core/sock.c
86022+++ b/net/core/sock.c
86023@@ -388,7 +388,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
86024 struct sk_buff_head *list = &sk->sk_receive_queue;
86025
86026 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
86027- atomic_inc(&sk->sk_drops);
86028+ atomic_inc_unchecked(&sk->sk_drops);
86029 trace_sock_rcvqueue_full(sk, skb);
86030 return -ENOMEM;
86031 }
86032@@ -398,7 +398,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
86033 return err;
86034
86035 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
86036- atomic_inc(&sk->sk_drops);
86037+ atomic_inc_unchecked(&sk->sk_drops);
86038 return -ENOBUFS;
86039 }
86040
86041@@ -418,7 +418,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
86042 skb_dst_force(skb);
86043
86044 spin_lock_irqsave(&list->lock, flags);
86045- skb->dropcount = atomic_read(&sk->sk_drops);
86046+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
86047 __skb_queue_tail(list, skb);
86048 spin_unlock_irqrestore(&list->lock, flags);
86049
86050@@ -438,7 +438,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
86051 skb->dev = NULL;
86052
86053 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
86054- atomic_inc(&sk->sk_drops);
86055+ atomic_inc_unchecked(&sk->sk_drops);
86056 goto discard_and_relse;
86057 }
86058 if (nested)
86059@@ -456,7 +456,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
86060 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
86061 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
86062 bh_unlock_sock(sk);
86063- atomic_inc(&sk->sk_drops);
86064+ atomic_inc_unchecked(&sk->sk_drops);
86065 goto discard_and_relse;
86066 }
86067
86068@@ -930,12 +930,12 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
86069 struct timeval tm;
86070 } v;
86071
86072- int lv = sizeof(int);
86073- int len;
86074+ unsigned int lv = sizeof(int);
86075+ unsigned int len;
86076
86077 if (get_user(len, optlen))
86078 return -EFAULT;
86079- if (len < 0)
86080+ if (len > INT_MAX)
86081 return -EINVAL;
86082
86083 memset(&v, 0, sizeof(v));
86084@@ -1083,11 +1083,11 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
86085
86086 case SO_PEERNAME:
86087 {
86088- char address[128];
86089+ char address[_K_SS_MAXSIZE];
86090
86091 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
86092 return -ENOTCONN;
86093- if (lv < len)
86094+ if (lv < len || sizeof address < len)
86095 return -EINVAL;
86096 if (copy_to_user(optval, address, len))
86097 return -EFAULT;
86098@@ -1146,7 +1146,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
86099
86100 if (len > lv)
86101 len = lv;
86102- if (copy_to_user(optval, &v, len))
86103+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
86104 return -EFAULT;
86105 lenout:
86106 if (put_user(len, optlen))
86107@@ -2276,7 +2276,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
86108 */
86109 smp_wmb();
86110 atomic_set(&sk->sk_refcnt, 1);
86111- atomic_set(&sk->sk_drops, 0);
86112+ atomic_set_unchecked(&sk->sk_drops, 0);
86113 }
86114 EXPORT_SYMBOL(sock_init_data);
86115
86116diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
86117index 750f44f..922399c 100644
86118--- a/net/core/sock_diag.c
86119+++ b/net/core/sock_diag.c
86120@@ -9,26 +9,33 @@
86121 #include <linux/inet_diag.h>
86122 #include <linux/sock_diag.h>
86123
86124-static const struct sock_diag_handler *sock_diag_handlers[AF_MAX];
86125+static const struct sock_diag_handler *sock_diag_handlers[AF_MAX] __read_only;
86126 static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
86127 static DEFINE_MUTEX(sock_diag_table_mutex);
86128
86129 int sock_diag_check_cookie(void *sk, __u32 *cookie)
86130 {
86131+#ifndef CONFIG_GRKERNSEC_HIDESYM
86132 if ((cookie[0] != INET_DIAG_NOCOOKIE ||
86133 cookie[1] != INET_DIAG_NOCOOKIE) &&
86134 ((u32)(unsigned long)sk != cookie[0] ||
86135 (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
86136 return -ESTALE;
86137 else
86138+#endif
86139 return 0;
86140 }
86141 EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
86142
86143 void sock_diag_save_cookie(void *sk, __u32 *cookie)
86144 {
86145+#ifdef CONFIG_GRKERNSEC_HIDESYM
86146+ cookie[0] = 0;
86147+ cookie[1] = 0;
86148+#else
86149 cookie[0] = (u32)(unsigned long)sk;
86150 cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
86151+#endif
86152 }
86153 EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
86154
86155@@ -75,8 +82,11 @@ int sock_diag_register(const struct sock_diag_handler *hndl)
86156 mutex_lock(&sock_diag_table_mutex);
86157 if (sock_diag_handlers[hndl->family])
86158 err = -EBUSY;
86159- else
86160+ else {
86161+ pax_open_kernel();
86162 sock_diag_handlers[hndl->family] = hndl;
86163+ pax_close_kernel();
86164+ }
86165 mutex_unlock(&sock_diag_table_mutex);
86166
86167 return err;
86168@@ -92,26 +102,13 @@ void sock_diag_unregister(const struct sock_diag_handler *hnld)
86169
86170 mutex_lock(&sock_diag_table_mutex);
86171 BUG_ON(sock_diag_handlers[family] != hnld);
86172+ pax_open_kernel();
86173 sock_diag_handlers[family] = NULL;
86174+ pax_close_kernel();
86175 mutex_unlock(&sock_diag_table_mutex);
86176 }
86177 EXPORT_SYMBOL_GPL(sock_diag_unregister);
86178
86179-static const inline struct sock_diag_handler *sock_diag_lock_handler(int family)
86180-{
86181- if (sock_diag_handlers[family] == NULL)
86182- request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
86183- NETLINK_SOCK_DIAG, family);
86184-
86185- mutex_lock(&sock_diag_table_mutex);
86186- return sock_diag_handlers[family];
86187-}
86188-
86189-static inline void sock_diag_unlock_handler(const struct sock_diag_handler *h)
86190-{
86191- mutex_unlock(&sock_diag_table_mutex);
86192-}
86193-
86194 static int __sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
86195 {
86196 int err;
86197@@ -124,12 +121,17 @@ static int __sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
86198 if (req->sdiag_family >= AF_MAX)
86199 return -EINVAL;
86200
86201- hndl = sock_diag_lock_handler(req->sdiag_family);
86202+ if (sock_diag_handlers[req->sdiag_family] == NULL)
86203+ request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
86204+ NETLINK_SOCK_DIAG, req->sdiag_family);
86205+
86206+ mutex_lock(&sock_diag_table_mutex);
86207+ hndl = sock_diag_handlers[req->sdiag_family];
86208 if (hndl == NULL)
86209 err = -ENOENT;
86210 else
86211 err = hndl->dump(skb, nlh);
86212- sock_diag_unlock_handler(hndl);
86213+ mutex_unlock(&sock_diag_table_mutex);
86214
86215 return err;
86216 }
86217diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
86218index d1b0804..98cf5f7 100644
86219--- a/net/core/sysctl_net_core.c
86220+++ b/net/core/sysctl_net_core.c
86221@@ -26,7 +26,7 @@ static int rps_sock_flow_sysctl(ctl_table *table, int write,
86222 {
86223 unsigned int orig_size, size;
86224 int ret, i;
86225- ctl_table tmp = {
86226+ ctl_table_no_const tmp = {
86227 .data = &size,
86228 .maxlen = sizeof(size),
86229 .mode = table->mode
86230@@ -205,13 +205,12 @@ static struct ctl_table netns_core_table[] = {
86231
86232 static __net_init int sysctl_core_net_init(struct net *net)
86233 {
86234- struct ctl_table *tbl;
86235+ ctl_table_no_const *tbl = NULL;
86236
86237 net->core.sysctl_somaxconn = SOMAXCONN;
86238
86239- tbl = netns_core_table;
86240 if (!net_eq(net, &init_net)) {
86241- tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL);
86242+ tbl = kmemdup(netns_core_table, sizeof(netns_core_table), GFP_KERNEL);
86243 if (tbl == NULL)
86244 goto err_dup;
86245
86246@@ -221,17 +220,16 @@ static __net_init int sysctl_core_net_init(struct net *net)
86247 if (net->user_ns != &init_user_ns) {
86248 tbl[0].procname = NULL;
86249 }
86250- }
86251-
86252- net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
86253+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
86254+ } else
86255+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", netns_core_table);
86256 if (net->core.sysctl_hdr == NULL)
86257 goto err_reg;
86258
86259 return 0;
86260
86261 err_reg:
86262- if (tbl != netns_core_table)
86263- kfree(tbl);
86264+ kfree(tbl);
86265 err_dup:
86266 return -ENOMEM;
86267 }
86268@@ -246,7 +244,7 @@ static __net_exit void sysctl_core_net_exit(struct net *net)
86269 kfree(tbl);
86270 }
86271
86272-static __net_initdata struct pernet_operations sysctl_core_ops = {
86273+static __net_initconst struct pernet_operations sysctl_core_ops = {
86274 .init = sysctl_core_net_init,
86275 .exit = sysctl_core_net_exit,
86276 };
86277diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
86278index 307c322..78a4c6f 100644
86279--- a/net/decnet/af_decnet.c
86280+++ b/net/decnet/af_decnet.c
86281@@ -468,6 +468,7 @@ static struct proto dn_proto = {
86282 .sysctl_rmem = sysctl_decnet_rmem,
86283 .max_header = DN_MAX_NSP_DATA_HEADER + 64,
86284 .obj_size = sizeof(struct dn_sock),
86285+ .slab_flags = SLAB_USERCOPY,
86286 };
86287
86288 static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gfp)
86289diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
86290index a55eecc..dd8428c 100644
86291--- a/net/decnet/sysctl_net_decnet.c
86292+++ b/net/decnet/sysctl_net_decnet.c
86293@@ -174,7 +174,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
86294
86295 if (len > *lenp) len = *lenp;
86296
86297- if (copy_to_user(buffer, addr, len))
86298+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
86299 return -EFAULT;
86300
86301 *lenp = len;
86302@@ -237,7 +237,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
86303
86304 if (len > *lenp) len = *lenp;
86305
86306- if (copy_to_user(buffer, devname, len))
86307+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
86308 return -EFAULT;
86309
86310 *lenp = len;
86311diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
86312index fcf104e..95552d4 100644
86313--- a/net/ipv4/af_inet.c
86314+++ b/net/ipv4/af_inet.c
86315@@ -1717,13 +1717,9 @@ static int __init inet_init(void)
86316
86317 BUILD_BUG_ON(sizeof(struct inet_skb_parm) > sizeof(dummy_skb->cb));
86318
86319- sysctl_local_reserved_ports = kzalloc(65536 / 8, GFP_KERNEL);
86320- if (!sysctl_local_reserved_ports)
86321- goto out;
86322-
86323 rc = proto_register(&tcp_prot, 1);
86324 if (rc)
86325- goto out_free_reserved_ports;
86326+ goto out;
86327
86328 rc = proto_register(&udp_prot, 1);
86329 if (rc)
86330@@ -1832,8 +1828,6 @@ out_unregister_udp_proto:
86331 proto_unregister(&udp_prot);
86332 out_unregister_tcp_proto:
86333 proto_unregister(&tcp_prot);
86334-out_free_reserved_ports:
86335- kfree(sysctl_local_reserved_ports);
86336 goto out;
86337 }
86338
86339diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
86340index a69b4e4..dbccba5 100644
86341--- a/net/ipv4/ah4.c
86342+++ b/net/ipv4/ah4.c
86343@@ -421,7 +421,7 @@ static void ah4_err(struct sk_buff *skb, u32 info)
86344 return;
86345
86346 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) {
86347- atomic_inc(&flow_cache_genid);
86348+ atomic_inc_unchecked(&flow_cache_genid);
86349 rt_genid_bump(net);
86350
86351 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_AH, 0);
86352diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
86353index a8e4f26..25e5f40 100644
86354--- a/net/ipv4/devinet.c
86355+++ b/net/ipv4/devinet.c
86356@@ -1763,7 +1763,7 @@ static int ipv4_doint_and_flush(ctl_table *ctl, int write,
86357 #define DEVINET_SYSCTL_FLUSHING_ENTRY(attr, name) \
86358 DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, ipv4_doint_and_flush)
86359
86360-static struct devinet_sysctl_table {
86361+static const struct devinet_sysctl_table {
86362 struct ctl_table_header *sysctl_header;
86363 struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX];
86364 } devinet_sysctl = {
86365@@ -1881,7 +1881,7 @@ static __net_init int devinet_init_net(struct net *net)
86366 int err;
86367 struct ipv4_devconf *all, *dflt;
86368 #ifdef CONFIG_SYSCTL
86369- struct ctl_table *tbl = ctl_forward_entry;
86370+ ctl_table_no_const *tbl = NULL;
86371 struct ctl_table_header *forw_hdr;
86372 #endif
86373
86374@@ -1899,7 +1899,7 @@ static __net_init int devinet_init_net(struct net *net)
86375 goto err_alloc_dflt;
86376
86377 #ifdef CONFIG_SYSCTL
86378- tbl = kmemdup(tbl, sizeof(ctl_forward_entry), GFP_KERNEL);
86379+ tbl = kmemdup(ctl_forward_entry, sizeof(ctl_forward_entry), GFP_KERNEL);
86380 if (tbl == NULL)
86381 goto err_alloc_ctl;
86382
86383@@ -1919,7 +1919,10 @@ static __net_init int devinet_init_net(struct net *net)
86384 goto err_reg_dflt;
86385
86386 err = -ENOMEM;
86387- forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
86388+ if (!net_eq(net, &init_net))
86389+ forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
86390+ else
86391+ forw_hdr = register_net_sysctl(net, "net/ipv4", ctl_forward_entry);
86392 if (forw_hdr == NULL)
86393 goto err_reg_ctl;
86394 net->ipv4.forw_hdr = forw_hdr;
86395@@ -1935,8 +1938,7 @@ err_reg_ctl:
86396 err_reg_dflt:
86397 __devinet_sysctl_unregister(all);
86398 err_reg_all:
86399- if (tbl != ctl_forward_entry)
86400- kfree(tbl);
86401+ kfree(tbl);
86402 err_alloc_ctl:
86403 #endif
86404 if (dflt != &ipv4_devconf_dflt)
86405diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
86406index 3b4f0cd..8cb864c 100644
86407--- a/net/ipv4/esp4.c
86408+++ b/net/ipv4/esp4.c
86409@@ -503,7 +503,7 @@ static void esp4_err(struct sk_buff *skb, u32 info)
86410 return;
86411
86412 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) {
86413- atomic_inc(&flow_cache_genid);
86414+ atomic_inc_unchecked(&flow_cache_genid);
86415 rt_genid_bump(net);
86416
86417 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_ESP, 0);
86418diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
86419index 5cd75e2..f57ef39 100644
86420--- a/net/ipv4/fib_frontend.c
86421+++ b/net/ipv4/fib_frontend.c
86422@@ -1020,12 +1020,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
86423 #ifdef CONFIG_IP_ROUTE_MULTIPATH
86424 fib_sync_up(dev);
86425 #endif
86426- atomic_inc(&net->ipv4.dev_addr_genid);
86427+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
86428 rt_cache_flush(dev_net(dev));
86429 break;
86430 case NETDEV_DOWN:
86431 fib_del_ifaddr(ifa, NULL);
86432- atomic_inc(&net->ipv4.dev_addr_genid);
86433+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
86434 if (ifa->ifa_dev->ifa_list == NULL) {
86435 /* Last address was deleted from this interface.
86436 * Disable IP.
86437@@ -1061,7 +1061,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
86438 #ifdef CONFIG_IP_ROUTE_MULTIPATH
86439 fib_sync_up(dev);
86440 #endif
86441- atomic_inc(&net->ipv4.dev_addr_genid);
86442+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
86443 rt_cache_flush(net);
86444 break;
86445 case NETDEV_DOWN:
86446diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
86447index 4797a80..2bd54e9 100644
86448--- a/net/ipv4/fib_semantics.c
86449+++ b/net/ipv4/fib_semantics.c
86450@@ -767,7 +767,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
86451 nh->nh_saddr = inet_select_addr(nh->nh_dev,
86452 nh->nh_gw,
86453 nh->nh_parent->fib_scope);
86454- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
86455+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
86456
86457 return nh->nh_saddr;
86458 }
86459diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
86460index d0670f0..744ac80 100644
86461--- a/net/ipv4/inet_connection_sock.c
86462+++ b/net/ipv4/inet_connection_sock.c
86463@@ -37,7 +37,7 @@ struct local_ports sysctl_local_ports __read_mostly = {
86464 .range = { 32768, 61000 },
86465 };
86466
86467-unsigned long *sysctl_local_reserved_ports;
86468+unsigned long sysctl_local_reserved_ports[65536 / 8 / sizeof(unsigned long)];
86469 EXPORT_SYMBOL(sysctl_local_reserved_ports);
86470
86471 void inet_get_local_port_range(int *low, int *high)
86472diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
86473index fa3ae81..0dbe6b8 100644
86474--- a/net/ipv4/inet_hashtables.c
86475+++ b/net/ipv4/inet_hashtables.c
86476@@ -18,12 +18,15 @@
86477 #include <linux/sched.h>
86478 #include <linux/slab.h>
86479 #include <linux/wait.h>
86480+#include <linux/security.h>
86481
86482 #include <net/inet_connection_sock.h>
86483 #include <net/inet_hashtables.h>
86484 #include <net/secure_seq.h>
86485 #include <net/ip.h>
86486
86487+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
86488+
86489 /*
86490 * Allocate and initialize a new local port bind bucket.
86491 * The bindhash mutex for snum's hash chain must be held here.
86492@@ -540,6 +543,8 @@ ok:
86493 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
86494 spin_unlock(&head->lock);
86495
86496+ gr_update_task_in_ip_table(current, inet_sk(sk));
86497+
86498 if (tw) {
86499 inet_twsk_deschedule(tw, death_row);
86500 while (twrefcnt) {
86501diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
86502index 000e3d2..5472da3 100644
86503--- a/net/ipv4/inetpeer.c
86504+++ b/net/ipv4/inetpeer.c
86505@@ -503,8 +503,8 @@ relookup:
86506 if (p) {
86507 p->daddr = *daddr;
86508 atomic_set(&p->refcnt, 1);
86509- atomic_set(&p->rid, 0);
86510- atomic_set(&p->ip_id_count,
86511+ atomic_set_unchecked(&p->rid, 0);
86512+ atomic_set_unchecked(&p->ip_id_count,
86513 (daddr->family == AF_INET) ?
86514 secure_ip_id(daddr->addr.a4) :
86515 secure_ipv6_id(daddr->addr.a6));
86516diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
86517index a8fc332..4ca4ca65 100644
86518--- a/net/ipv4/ip_fragment.c
86519+++ b/net/ipv4/ip_fragment.c
86520@@ -319,7 +319,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
86521 return 0;
86522
86523 start = qp->rid;
86524- end = atomic_inc_return(&peer->rid);
86525+ end = atomic_inc_return_unchecked(&peer->rid);
86526 qp->rid = end;
86527
86528 rc = qp->q.fragments && (end - start) > max;
86529@@ -786,12 +786,11 @@ static struct ctl_table ip4_frags_ctl_table[] = {
86530
86531 static int __net_init ip4_frags_ns_ctl_register(struct net *net)
86532 {
86533- struct ctl_table *table;
86534+ ctl_table_no_const *table = NULL;
86535 struct ctl_table_header *hdr;
86536
86537- table = ip4_frags_ns_ctl_table;
86538 if (!net_eq(net, &init_net)) {
86539- table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
86540+ table = kmemdup(ip4_frags_ns_ctl_table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
86541 if (table == NULL)
86542 goto err_alloc;
86543
86544@@ -802,9 +801,10 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
86545 /* Don't export sysctls to unprivileged users */
86546 if (net->user_ns != &init_user_ns)
86547 table[0].procname = NULL;
86548- }
86549+ hdr = register_net_sysctl(net, "net/ipv4", table);
86550+ } else
86551+ hdr = register_net_sysctl(net, "net/ipv4", ip4_frags_ns_ctl_table);
86552
86553- hdr = register_net_sysctl(net, "net/ipv4", table);
86554 if (hdr == NULL)
86555 goto err_reg;
86556
86557@@ -812,8 +812,7 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
86558 return 0;
86559
86560 err_reg:
86561- if (!net_eq(net, &init_net))
86562- kfree(table);
86563+ kfree(table);
86564 err_alloc:
86565 return -ENOMEM;
86566 }
86567diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
86568index a85062b..2958a9b 100644
86569--- a/net/ipv4/ip_gre.c
86570+++ b/net/ipv4/ip_gre.c
86571@@ -124,7 +124,7 @@ static bool log_ecn_error = true;
86572 module_param(log_ecn_error, bool, 0644);
86573 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
86574
86575-static struct rtnl_link_ops ipgre_link_ops __read_mostly;
86576+static struct rtnl_link_ops ipgre_link_ops;
86577 static int ipgre_tunnel_init(struct net_device *dev);
86578 static void ipgre_tunnel_setup(struct net_device *dev);
86579 static int ipgre_tunnel_bind_dev(struct net_device *dev);
86580@@ -1753,7 +1753,7 @@ static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
86581 [IFLA_GRE_PMTUDISC] = { .type = NLA_U8 },
86582 };
86583
86584-static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
86585+static struct rtnl_link_ops ipgre_link_ops = {
86586 .kind = "gre",
86587 .maxtype = IFLA_GRE_MAX,
86588 .policy = ipgre_policy,
86589@@ -1766,7 +1766,7 @@ static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
86590 .fill_info = ipgre_fill_info,
86591 };
86592
86593-static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
86594+static struct rtnl_link_ops ipgre_tap_ops = {
86595 .kind = "gretap",
86596 .maxtype = IFLA_GRE_MAX,
86597 .policy = ipgre_policy,
86598diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
86599index d9c4f11..02b82dbc 100644
86600--- a/net/ipv4/ip_sockglue.c
86601+++ b/net/ipv4/ip_sockglue.c
86602@@ -1152,7 +1152,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
86603 len = min_t(unsigned int, len, opt->optlen);
86604 if (put_user(len, optlen))
86605 return -EFAULT;
86606- if (copy_to_user(optval, opt->__data, len))
86607+ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
86608+ copy_to_user(optval, opt->__data, len))
86609 return -EFAULT;
86610 return 0;
86611 }
86612@@ -1283,7 +1284,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
86613 if (sk->sk_type != SOCK_STREAM)
86614 return -ENOPROTOOPT;
86615
86616- msg.msg_control = optval;
86617+ msg.msg_control = (void __force_kernel *)optval;
86618 msg.msg_controllen = len;
86619 msg.msg_flags = flags;
86620
86621diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
86622index c3a4233..1412161 100644
86623--- a/net/ipv4/ip_vti.c
86624+++ b/net/ipv4/ip_vti.c
86625@@ -47,7 +47,7 @@
86626 #define HASH_SIZE 16
86627 #define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&(HASH_SIZE-1))
86628
86629-static struct rtnl_link_ops vti_link_ops __read_mostly;
86630+static struct rtnl_link_ops vti_link_ops;
86631
86632 static int vti_net_id __read_mostly;
86633 struct vti_net {
86634@@ -886,7 +886,7 @@ static const struct nla_policy vti_policy[IFLA_VTI_MAX + 1] = {
86635 [IFLA_VTI_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
86636 };
86637
86638-static struct rtnl_link_ops vti_link_ops __read_mostly = {
86639+static struct rtnl_link_ops vti_link_ops = {
86640 .kind = "vti",
86641 .maxtype = IFLA_VTI_MAX,
86642 .policy = vti_policy,
86643diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
86644index 9a46dae..5f793a0 100644
86645--- a/net/ipv4/ipcomp.c
86646+++ b/net/ipv4/ipcomp.c
86647@@ -48,7 +48,7 @@ static void ipcomp4_err(struct sk_buff *skb, u32 info)
86648 return;
86649
86650 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) {
86651- atomic_inc(&flow_cache_genid);
86652+ atomic_inc_unchecked(&flow_cache_genid);
86653 rt_genid_bump(net);
86654
86655 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_COMP, 0);
86656diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
86657index a2e50ae..e152b7c 100644
86658--- a/net/ipv4/ipconfig.c
86659+++ b/net/ipv4/ipconfig.c
86660@@ -323,7 +323,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
86661
86662 mm_segment_t oldfs = get_fs();
86663 set_fs(get_ds());
86664- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
86665+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
86666 set_fs(oldfs);
86667 return res;
86668 }
86669@@ -334,7 +334,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
86670
86671 mm_segment_t oldfs = get_fs();
86672 set_fs(get_ds());
86673- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
86674+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
86675 set_fs(oldfs);
86676 return res;
86677 }
86678@@ -345,7 +345,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
86679
86680 mm_segment_t oldfs = get_fs();
86681 set_fs(get_ds());
86682- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
86683+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
86684 set_fs(oldfs);
86685 return res;
86686 }
86687diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
86688index 191fc24..1b3b804 100644
86689--- a/net/ipv4/ipip.c
86690+++ b/net/ipv4/ipip.c
86691@@ -138,7 +138,7 @@ struct ipip_net {
86692 static int ipip_tunnel_init(struct net_device *dev);
86693 static void ipip_tunnel_setup(struct net_device *dev);
86694 static void ipip_dev_free(struct net_device *dev);
86695-static struct rtnl_link_ops ipip_link_ops __read_mostly;
86696+static struct rtnl_link_ops ipip_link_ops;
86697
86698 static struct rtnl_link_stats64 *ipip_get_stats64(struct net_device *dev,
86699 struct rtnl_link_stats64 *tot)
86700@@ -972,7 +972,7 @@ static const struct nla_policy ipip_policy[IFLA_IPTUN_MAX + 1] = {
86701 [IFLA_IPTUN_PMTUDISC] = { .type = NLA_U8 },
86702 };
86703
86704-static struct rtnl_link_ops ipip_link_ops __read_mostly = {
86705+static struct rtnl_link_ops ipip_link_ops = {
86706 .kind = "ipip",
86707 .maxtype = IFLA_IPTUN_MAX,
86708 .policy = ipip_policy,
86709diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
86710index 3ea4127..849297b 100644
86711--- a/net/ipv4/netfilter/arp_tables.c
86712+++ b/net/ipv4/netfilter/arp_tables.c
86713@@ -879,14 +879,14 @@ static int compat_table_info(const struct xt_table_info *info,
86714 #endif
86715
86716 static int get_info(struct net *net, void __user *user,
86717- const int *len, int compat)
86718+ int len, int compat)
86719 {
86720 char name[XT_TABLE_MAXNAMELEN];
86721 struct xt_table *t;
86722 int ret;
86723
86724- if (*len != sizeof(struct arpt_getinfo)) {
86725- duprintf("length %u != %Zu\n", *len,
86726+ if (len != sizeof(struct arpt_getinfo)) {
86727+ duprintf("length %u != %Zu\n", len,
86728 sizeof(struct arpt_getinfo));
86729 return -EINVAL;
86730 }
86731@@ -923,7 +923,7 @@ static int get_info(struct net *net, void __user *user,
86732 info.size = private->size;
86733 strcpy(info.name, name);
86734
86735- if (copy_to_user(user, &info, *len) != 0)
86736+ if (copy_to_user(user, &info, len) != 0)
86737 ret = -EFAULT;
86738 else
86739 ret = 0;
86740@@ -1682,7 +1682,7 @@ static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user,
86741
86742 switch (cmd) {
86743 case ARPT_SO_GET_INFO:
86744- ret = get_info(sock_net(sk), user, len, 1);
86745+ ret = get_info(sock_net(sk), user, *len, 1);
86746 break;
86747 case ARPT_SO_GET_ENTRIES:
86748 ret = compat_get_entries(sock_net(sk), user, len);
86749@@ -1727,7 +1727,7 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
86750
86751 switch (cmd) {
86752 case ARPT_SO_GET_INFO:
86753- ret = get_info(sock_net(sk), user, len, 0);
86754+ ret = get_info(sock_net(sk), user, *len, 0);
86755 break;
86756
86757 case ARPT_SO_GET_ENTRIES:
86758diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
86759index 17c5e06..1b91206 100644
86760--- a/net/ipv4/netfilter/ip_tables.c
86761+++ b/net/ipv4/netfilter/ip_tables.c
86762@@ -1068,14 +1068,14 @@ static int compat_table_info(const struct xt_table_info *info,
86763 #endif
86764
86765 static int get_info(struct net *net, void __user *user,
86766- const int *len, int compat)
86767+ int len, int compat)
86768 {
86769 char name[XT_TABLE_MAXNAMELEN];
86770 struct xt_table *t;
86771 int ret;
86772
86773- if (*len != sizeof(struct ipt_getinfo)) {
86774- duprintf("length %u != %zu\n", *len,
86775+ if (len != sizeof(struct ipt_getinfo)) {
86776+ duprintf("length %u != %zu\n", len,
86777 sizeof(struct ipt_getinfo));
86778 return -EINVAL;
86779 }
86780@@ -1112,7 +1112,7 @@ static int get_info(struct net *net, void __user *user,
86781 info.size = private->size;
86782 strcpy(info.name, name);
86783
86784- if (copy_to_user(user, &info, *len) != 0)
86785+ if (copy_to_user(user, &info, len) != 0)
86786 ret = -EFAULT;
86787 else
86788 ret = 0;
86789@@ -1966,7 +1966,7 @@ compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
86790
86791 switch (cmd) {
86792 case IPT_SO_GET_INFO:
86793- ret = get_info(sock_net(sk), user, len, 1);
86794+ ret = get_info(sock_net(sk), user, *len, 1);
86795 break;
86796 case IPT_SO_GET_ENTRIES:
86797 ret = compat_get_entries(sock_net(sk), user, len);
86798@@ -2013,7 +2013,7 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
86799
86800 switch (cmd) {
86801 case IPT_SO_GET_INFO:
86802- ret = get_info(sock_net(sk), user, len, 0);
86803+ ret = get_info(sock_net(sk), user, *len, 0);
86804 break;
86805
86806 case IPT_SO_GET_ENTRIES:
86807diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
86808index dc454cc..5bb917f 100644
86809--- a/net/ipv4/ping.c
86810+++ b/net/ipv4/ping.c
86811@@ -844,7 +844,7 @@ static void ping_format_sock(struct sock *sp, struct seq_file *f,
86812 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
86813 0, sock_i_ino(sp),
86814 atomic_read(&sp->sk_refcnt), sp,
86815- atomic_read(&sp->sk_drops), len);
86816+ atomic_read_unchecked(&sp->sk_drops), len);
86817 }
86818
86819 static int ping_seq_show(struct seq_file *seq, void *v)
86820diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
86821index 6f08991..55867ad 100644
86822--- a/net/ipv4/raw.c
86823+++ b/net/ipv4/raw.c
86824@@ -311,7 +311,7 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
86825 int raw_rcv(struct sock *sk, struct sk_buff *skb)
86826 {
86827 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
86828- atomic_inc(&sk->sk_drops);
86829+ atomic_inc_unchecked(&sk->sk_drops);
86830 kfree_skb(skb);
86831 return NET_RX_DROP;
86832 }
86833@@ -747,16 +747,20 @@ static int raw_init(struct sock *sk)
86834
86835 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
86836 {
86837+ struct icmp_filter filter;
86838+
86839 if (optlen > sizeof(struct icmp_filter))
86840 optlen = sizeof(struct icmp_filter);
86841- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
86842+ if (copy_from_user(&filter, optval, optlen))
86843 return -EFAULT;
86844+ raw_sk(sk)->filter = filter;
86845 return 0;
86846 }
86847
86848 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
86849 {
86850 int len, ret = -EFAULT;
86851+ struct icmp_filter filter;
86852
86853 if (get_user(len, optlen))
86854 goto out;
86855@@ -766,8 +770,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
86856 if (len > sizeof(struct icmp_filter))
86857 len = sizeof(struct icmp_filter);
86858 ret = -EFAULT;
86859- if (put_user(len, optlen) ||
86860- copy_to_user(optval, &raw_sk(sk)->filter, len))
86861+ filter = raw_sk(sk)->filter;
86862+ if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
86863 goto out;
86864 ret = 0;
86865 out: return ret;
86866@@ -998,7 +1002,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
86867 0, 0L, 0,
86868 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
86869 0, sock_i_ino(sp),
86870- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
86871+ atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
86872 }
86873
86874 static int raw_seq_show(struct seq_file *seq, void *v)
86875diff --git a/net/ipv4/route.c b/net/ipv4/route.c
86876index a0fcc47..32e2c89 100644
86877--- a/net/ipv4/route.c
86878+++ b/net/ipv4/route.c
86879@@ -2552,34 +2552,34 @@ static struct ctl_table ipv4_route_flush_table[] = {
86880 .maxlen = sizeof(int),
86881 .mode = 0200,
86882 .proc_handler = ipv4_sysctl_rtcache_flush,
86883+ .extra1 = &init_net,
86884 },
86885 { },
86886 };
86887
86888 static __net_init int sysctl_route_net_init(struct net *net)
86889 {
86890- struct ctl_table *tbl;
86891+ ctl_table_no_const *tbl = NULL;
86892
86893- tbl = ipv4_route_flush_table;
86894 if (!net_eq(net, &init_net)) {
86895- tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
86896+ tbl = kmemdup(ipv4_route_flush_table, sizeof(ipv4_route_flush_table), GFP_KERNEL);
86897 if (tbl == NULL)
86898 goto err_dup;
86899
86900 /* Don't export sysctls to unprivileged users */
86901 if (net->user_ns != &init_user_ns)
86902 tbl[0].procname = NULL;
86903- }
86904- tbl[0].extra1 = net;
86905+ tbl[0].extra1 = net;
86906+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
86907+ } else
86908+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", ipv4_route_flush_table);
86909
86910- net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
86911 if (net->ipv4.route_hdr == NULL)
86912 goto err_reg;
86913 return 0;
86914
86915 err_reg:
86916- if (tbl != ipv4_route_flush_table)
86917- kfree(tbl);
86918+ kfree(tbl);
86919 err_dup:
86920 return -ENOMEM;
86921 }
86922@@ -2602,7 +2602,7 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
86923
86924 static __net_init int rt_genid_init(struct net *net)
86925 {
86926- atomic_set(&net->rt_genid, 0);
86927+ atomic_set_unchecked(&net->rt_genid, 0);
86928 get_random_bytes(&net->ipv4.dev_addr_genid,
86929 sizeof(net->ipv4.dev_addr_genid));
86930 return 0;
86931diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
86932index d84400b..62e066e 100644
86933--- a/net/ipv4/sysctl_net_ipv4.c
86934+++ b/net/ipv4/sysctl_net_ipv4.c
86935@@ -54,7 +54,7 @@ static int ipv4_local_port_range(ctl_table *table, int write,
86936 {
86937 int ret;
86938 int range[2];
86939- ctl_table tmp = {
86940+ ctl_table_no_const tmp = {
86941 .data = &range,
86942 .maxlen = sizeof(range),
86943 .mode = table->mode,
86944@@ -107,7 +107,7 @@ static int ipv4_ping_group_range(ctl_table *table, int write,
86945 int ret;
86946 gid_t urange[2];
86947 kgid_t low, high;
86948- ctl_table tmp = {
86949+ ctl_table_no_const tmp = {
86950 .data = &urange,
86951 .maxlen = sizeof(urange),
86952 .mode = table->mode,
86953@@ -138,7 +138,7 @@ static int proc_tcp_congestion_control(ctl_table *ctl, int write,
86954 void __user *buffer, size_t *lenp, loff_t *ppos)
86955 {
86956 char val[TCP_CA_NAME_MAX];
86957- ctl_table tbl = {
86958+ ctl_table_no_const tbl = {
86959 .data = val,
86960 .maxlen = TCP_CA_NAME_MAX,
86961 };
86962@@ -157,7 +157,7 @@ static int proc_tcp_available_congestion_control(ctl_table *ctl,
86963 void __user *buffer, size_t *lenp,
86964 loff_t *ppos)
86965 {
86966- ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX, };
86967+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX, };
86968 int ret;
86969
86970 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
86971@@ -174,7 +174,7 @@ static int proc_allowed_congestion_control(ctl_table *ctl,
86972 void __user *buffer, size_t *lenp,
86973 loff_t *ppos)
86974 {
86975- ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX };
86976+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX };
86977 int ret;
86978
86979 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
86980@@ -200,15 +200,17 @@ static int ipv4_tcp_mem(ctl_table *ctl, int write,
86981 struct mem_cgroup *memcg;
86982 #endif
86983
86984- ctl_table tmp = {
86985+ ctl_table_no_const tmp = {
86986 .data = &vec,
86987 .maxlen = sizeof(vec),
86988 .mode = ctl->mode,
86989 };
86990
86991 if (!write) {
86992- ctl->data = &net->ipv4.sysctl_tcp_mem;
86993- return proc_doulongvec_minmax(ctl, write, buffer, lenp, ppos);
86994+ ctl_table_no_const tcp_mem = *ctl;
86995+
86996+ tcp_mem.data = &net->ipv4.sysctl_tcp_mem;
86997+ return proc_doulongvec_minmax(&tcp_mem, write, buffer, lenp, ppos);
86998 }
86999
87000 ret = proc_doulongvec_minmax(&tmp, write, buffer, lenp, ppos);
87001@@ -235,7 +237,7 @@ static int ipv4_tcp_mem(ctl_table *ctl, int write,
87002 int proc_tcp_fastopen_key(ctl_table *ctl, int write, void __user *buffer,
87003 size_t *lenp, loff_t *ppos)
87004 {
87005- ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
87006+ ctl_table_no_const tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
87007 struct tcp_fastopen_context *ctxt;
87008 int ret;
87009 u32 user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */
87010@@ -476,7 +478,7 @@ static struct ctl_table ipv4_table[] = {
87011 },
87012 {
87013 .procname = "ip_local_reserved_ports",
87014- .data = NULL, /* initialized in sysctl_ipv4_init */
87015+ .data = sysctl_local_reserved_ports,
87016 .maxlen = 65536,
87017 .mode = 0644,
87018 .proc_handler = proc_do_large_bitmap,
87019@@ -860,11 +862,10 @@ static struct ctl_table ipv4_net_table[] = {
87020
87021 static __net_init int ipv4_sysctl_init_net(struct net *net)
87022 {
87023- struct ctl_table *table;
87024+ ctl_table_no_const *table = NULL;
87025
87026- table = ipv4_net_table;
87027 if (!net_eq(net, &init_net)) {
87028- table = kmemdup(table, sizeof(ipv4_net_table), GFP_KERNEL);
87029+ table = kmemdup(ipv4_net_table, sizeof(ipv4_net_table), GFP_KERNEL);
87030 if (table == NULL)
87031 goto err_alloc;
87032
87033@@ -897,15 +898,17 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
87034
87035 tcp_init_mem(net);
87036
87037- net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
87038+ if (!net_eq(net, &init_net))
87039+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
87040+ else
87041+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", ipv4_net_table);
87042 if (net->ipv4.ipv4_hdr == NULL)
87043 goto err_reg;
87044
87045 return 0;
87046
87047 err_reg:
87048- if (!net_eq(net, &init_net))
87049- kfree(table);
87050+ kfree(table);
87051 err_alloc:
87052 return -ENOMEM;
87053 }
87054@@ -927,16 +930,6 @@ static __net_initdata struct pernet_operations ipv4_sysctl_ops = {
87055 static __init int sysctl_ipv4_init(void)
87056 {
87057 struct ctl_table_header *hdr;
87058- struct ctl_table *i;
87059-
87060- for (i = ipv4_table; i->procname; i++) {
87061- if (strcmp(i->procname, "ip_local_reserved_ports") == 0) {
87062- i->data = sysctl_local_reserved_ports;
87063- break;
87064- }
87065- }
87066- if (!i->procname)
87067- return -EINVAL;
87068
87069 hdr = register_net_sysctl(&init_net, "net/ipv4", ipv4_table);
87070 if (hdr == NULL)
87071diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
87072index 9841a71..ef60409 100644
87073--- a/net/ipv4/tcp_input.c
87074+++ b/net/ipv4/tcp_input.c
87075@@ -4730,7 +4730,7 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
87076 * simplifies code)
87077 */
87078 static void
87079-tcp_collapse(struct sock *sk, struct sk_buff_head *list,
87080+__intentional_overflow(5,6) tcp_collapse(struct sock *sk, struct sk_buff_head *list,
87081 struct sk_buff *head, struct sk_buff *tail,
87082 u32 start, u32 end)
87083 {
87084@@ -5847,6 +5847,7 @@ discard:
87085 tcp_paws_reject(&tp->rx_opt, 0))
87086 goto discard_and_undo;
87087
87088+#ifndef CONFIG_GRKERNSEC_NO_SIMULT_CONNECT
87089 if (th->syn) {
87090 /* We see SYN without ACK. It is attempt of
87091 * simultaneous connect with crossed SYNs.
87092@@ -5897,6 +5898,7 @@ discard:
87093 goto discard;
87094 #endif
87095 }
87096+#endif
87097 /* "fifth, if neither of the SYN or RST bits is set then
87098 * drop the segment and return."
87099 */
87100@@ -5941,7 +5943,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
87101 goto discard;
87102
87103 if (th->syn) {
87104- if (th->fin)
87105+ if (th->fin || th->urg || th->psh)
87106 goto discard;
87107 if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
87108 return 1;
87109diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
87110index d9130a9..00328ff 100644
87111--- a/net/ipv4/tcp_ipv4.c
87112+++ b/net/ipv4/tcp_ipv4.c
87113@@ -90,6 +90,10 @@ int sysctl_tcp_low_latency __read_mostly;
87114 EXPORT_SYMBOL(sysctl_tcp_low_latency);
87115
87116
87117+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
87118+extern int grsec_enable_blackhole;
87119+#endif
87120+
87121 #ifdef CONFIG_TCP_MD5SIG
87122 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
87123 __be32 daddr, __be32 saddr, const struct tcphdr *th);
87124@@ -1895,6 +1899,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
87125 return 0;
87126
87127 reset:
87128+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
87129+ if (!grsec_enable_blackhole)
87130+#endif
87131 tcp_v4_send_reset(rsk, skb);
87132 discard:
87133 kfree_skb(skb);
87134@@ -1994,12 +2001,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
87135 TCP_SKB_CB(skb)->sacked = 0;
87136
87137 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
87138- if (!sk)
87139+ if (!sk) {
87140+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
87141+ ret = 1;
87142+#endif
87143 goto no_tcp_socket;
87144-
87145+ }
87146 process:
87147- if (sk->sk_state == TCP_TIME_WAIT)
87148+ if (sk->sk_state == TCP_TIME_WAIT) {
87149+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
87150+ ret = 2;
87151+#endif
87152 goto do_time_wait;
87153+ }
87154
87155 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
87156 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
87157@@ -2050,6 +2064,10 @@ no_tcp_socket:
87158 bad_packet:
87159 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
87160 } else {
87161+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
87162+ if (!grsec_enable_blackhole || (ret == 1 &&
87163+ (skb->dev->flags & IFF_LOOPBACK)))
87164+#endif
87165 tcp_v4_send_reset(NULL, skb);
87166 }
87167
87168diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
87169index f35f2df..ccb5ca6 100644
87170--- a/net/ipv4/tcp_minisocks.c
87171+++ b/net/ipv4/tcp_minisocks.c
87172@@ -27,6 +27,10 @@
87173 #include <net/inet_common.h>
87174 #include <net/xfrm.h>
87175
87176+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
87177+extern int grsec_enable_blackhole;
87178+#endif
87179+
87180 int sysctl_tcp_syncookies __read_mostly = 1;
87181 EXPORT_SYMBOL(sysctl_tcp_syncookies);
87182
87183@@ -742,7 +746,10 @@ embryonic_reset:
87184 * avoid becoming vulnerable to outside attack aiming at
87185 * resetting legit local connections.
87186 */
87187- req->rsk_ops->send_reset(sk, skb);
87188+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
87189+ if (!grsec_enable_blackhole)
87190+#endif
87191+ req->rsk_ops->send_reset(sk, skb);
87192 } else if (fastopen) { /* received a valid RST pkt */
87193 reqsk_fastopen_remove(sk, req, true);
87194 tcp_reset(sk);
87195diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
87196index 4526fe6..1a34e43 100644
87197--- a/net/ipv4/tcp_probe.c
87198+++ b/net/ipv4/tcp_probe.c
87199@@ -204,7 +204,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
87200 if (cnt + width >= len)
87201 break;
87202
87203- if (copy_to_user(buf + cnt, tbuf, width))
87204+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
87205 return -EFAULT;
87206 cnt += width;
87207 }
87208diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
87209index b78aac3..e18230b 100644
87210--- a/net/ipv4/tcp_timer.c
87211+++ b/net/ipv4/tcp_timer.c
87212@@ -22,6 +22,10 @@
87213 #include <linux/gfp.h>
87214 #include <net/tcp.h>
87215
87216+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
87217+extern int grsec_lastack_retries;
87218+#endif
87219+
87220 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
87221 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
87222 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
87223@@ -185,6 +189,13 @@ static int tcp_write_timeout(struct sock *sk)
87224 }
87225 }
87226
87227+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
87228+ if ((sk->sk_state == TCP_LAST_ACK) &&
87229+ (grsec_lastack_retries > 0) &&
87230+ (grsec_lastack_retries < retry_until))
87231+ retry_until = grsec_lastack_retries;
87232+#endif
87233+
87234 if (retransmits_timed_out(sk, retry_until,
87235 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
87236 /* Has it gone just too far? */
87237diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
87238index 1f4d405..3524677 100644
87239--- a/net/ipv4/udp.c
87240+++ b/net/ipv4/udp.c
87241@@ -87,6 +87,7 @@
87242 #include <linux/types.h>
87243 #include <linux/fcntl.h>
87244 #include <linux/module.h>
87245+#include <linux/security.h>
87246 #include <linux/socket.h>
87247 #include <linux/sockios.h>
87248 #include <linux/igmp.h>
87249@@ -111,6 +112,10 @@
87250 #include <trace/events/skb.h>
87251 #include "udp_impl.h"
87252
87253+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
87254+extern int grsec_enable_blackhole;
87255+#endif
87256+
87257 struct udp_table udp_table __read_mostly;
87258 EXPORT_SYMBOL(udp_table);
87259
87260@@ -569,6 +574,9 @@ found:
87261 return s;
87262 }
87263
87264+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
87265+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
87266+
87267 /*
87268 * This routine is called by the ICMP module when it gets some
87269 * sort of error condition. If err < 0 then the socket should
87270@@ -864,9 +872,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
87271 dport = usin->sin_port;
87272 if (dport == 0)
87273 return -EINVAL;
87274+
87275+ err = gr_search_udp_sendmsg(sk, usin);
87276+ if (err)
87277+ return err;
87278 } else {
87279 if (sk->sk_state != TCP_ESTABLISHED)
87280 return -EDESTADDRREQ;
87281+
87282+ err = gr_search_udp_sendmsg(sk, NULL);
87283+ if (err)
87284+ return err;
87285+
87286 daddr = inet->inet_daddr;
87287 dport = inet->inet_dport;
87288 /* Open fast path for connected socket.
87289@@ -1108,7 +1125,7 @@ static unsigned int first_packet_length(struct sock *sk)
87290 udp_lib_checksum_complete(skb)) {
87291 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
87292 IS_UDPLITE(sk));
87293- atomic_inc(&sk->sk_drops);
87294+ atomic_inc_unchecked(&sk->sk_drops);
87295 __skb_unlink(skb, rcvq);
87296 __skb_queue_tail(&list_kill, skb);
87297 }
87298@@ -1194,6 +1211,10 @@ try_again:
87299 if (!skb)
87300 goto out;
87301
87302+ err = gr_search_udp_recvmsg(sk, skb);
87303+ if (err)
87304+ goto out_free;
87305+
87306 ulen = skb->len - sizeof(struct udphdr);
87307 copied = len;
87308 if (copied > ulen)
87309@@ -1227,7 +1248,7 @@ try_again:
87310 if (unlikely(err)) {
87311 trace_kfree_skb(skb, udp_recvmsg);
87312 if (!peeked) {
87313- atomic_inc(&sk->sk_drops);
87314+ atomic_inc_unchecked(&sk->sk_drops);
87315 UDP_INC_STATS_USER(sock_net(sk),
87316 UDP_MIB_INERRORS, is_udplite);
87317 }
87318@@ -1510,7 +1531,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
87319
87320 drop:
87321 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
87322- atomic_inc(&sk->sk_drops);
87323+ atomic_inc_unchecked(&sk->sk_drops);
87324 kfree_skb(skb);
87325 return -1;
87326 }
87327@@ -1529,7 +1550,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
87328 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
87329
87330 if (!skb1) {
87331- atomic_inc(&sk->sk_drops);
87332+ atomic_inc_unchecked(&sk->sk_drops);
87333 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
87334 IS_UDPLITE(sk));
87335 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
87336@@ -1698,6 +1719,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
87337 goto csum_error;
87338
87339 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
87340+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
87341+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
87342+#endif
87343 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
87344
87345 /*
87346@@ -2120,7 +2144,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
87347 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
87348 0, sock_i_ino(sp),
87349 atomic_read(&sp->sk_refcnt), sp,
87350- atomic_read(&sp->sk_drops), len);
87351+ atomic_read_unchecked(&sp->sk_drops), len);
87352 }
87353
87354 int udp4_seq_show(struct seq_file *seq, void *v)
87355diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
87356index a36d17e..96d099f 100644
87357--- a/net/ipv6/addrconf.c
87358+++ b/net/ipv6/addrconf.c
87359@@ -2272,7 +2272,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
87360 p.iph.ihl = 5;
87361 p.iph.protocol = IPPROTO_IPV6;
87362 p.iph.ttl = 64;
87363- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
87364+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
87365
87366 if (ops->ndo_do_ioctl) {
87367 mm_segment_t oldfs = get_fs();
87368@@ -4388,7 +4388,7 @@ int addrconf_sysctl_forward(ctl_table *ctl, int write,
87369 int *valp = ctl->data;
87370 int val = *valp;
87371 loff_t pos = *ppos;
87372- ctl_table lctl;
87373+ ctl_table_no_const lctl;
87374 int ret;
87375
87376 /*
87377@@ -4470,7 +4470,7 @@ int addrconf_sysctl_disable(ctl_table *ctl, int write,
87378 int *valp = ctl->data;
87379 int val = *valp;
87380 loff_t pos = *ppos;
87381- ctl_table lctl;
87382+ ctl_table_no_const lctl;
87383 int ret;
87384
87385 /*
87386diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
87387index fff5bdd..15194fb 100644
87388--- a/net/ipv6/icmp.c
87389+++ b/net/ipv6/icmp.c
87390@@ -973,7 +973,7 @@ ctl_table ipv6_icmp_table_template[] = {
87391
87392 struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net)
87393 {
87394- struct ctl_table *table;
87395+ ctl_table_no_const *table;
87396
87397 table = kmemdup(ipv6_icmp_table_template,
87398 sizeof(ipv6_icmp_table_template),
87399diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
87400index 131dd09..f7ed64f 100644
87401--- a/net/ipv6/ip6_gre.c
87402+++ b/net/ipv6/ip6_gre.c
87403@@ -73,7 +73,7 @@ struct ip6gre_net {
87404 struct net_device *fb_tunnel_dev;
87405 };
87406
87407-static struct rtnl_link_ops ip6gre_link_ops __read_mostly;
87408+static struct rtnl_link_ops ip6gre_link_ops;
87409 static int ip6gre_tunnel_init(struct net_device *dev);
87410 static void ip6gre_tunnel_setup(struct net_device *dev);
87411 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
87412@@ -1337,7 +1337,7 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev)
87413 }
87414
87415
87416-static struct inet6_protocol ip6gre_protocol __read_mostly = {
87417+static struct inet6_protocol ip6gre_protocol = {
87418 .handler = ip6gre_rcv,
87419 .err_handler = ip6gre_err,
87420 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
87421@@ -1671,7 +1671,7 @@ static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = {
87422 [IFLA_GRE_FLAGS] = { .type = NLA_U32 },
87423 };
87424
87425-static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
87426+static struct rtnl_link_ops ip6gre_link_ops = {
87427 .kind = "ip6gre",
87428 .maxtype = IFLA_GRE_MAX,
87429 .policy = ip6gre_policy,
87430@@ -1684,7 +1684,7 @@ static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
87431 .fill_info = ip6gre_fill_info,
87432 };
87433
87434-static struct rtnl_link_ops ip6gre_tap_ops __read_mostly = {
87435+static struct rtnl_link_ops ip6gre_tap_ops = {
87436 .kind = "ip6gretap",
87437 .maxtype = IFLA_GRE_MAX,
87438 .policy = ip6gre_policy,
87439diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
87440index a14f28b..b4b8956 100644
87441--- a/net/ipv6/ip6_tunnel.c
87442+++ b/net/ipv6/ip6_tunnel.c
87443@@ -87,7 +87,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
87444
87445 static int ip6_tnl_dev_init(struct net_device *dev);
87446 static void ip6_tnl_dev_setup(struct net_device *dev);
87447-static struct rtnl_link_ops ip6_link_ops __read_mostly;
87448+static struct rtnl_link_ops ip6_link_ops;
87449
87450 static int ip6_tnl_net_id __read_mostly;
87451 struct ip6_tnl_net {
87452@@ -1686,7 +1686,7 @@ static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = {
87453 [IFLA_IPTUN_PROTO] = { .type = NLA_U8 },
87454 };
87455
87456-static struct rtnl_link_ops ip6_link_ops __read_mostly = {
87457+static struct rtnl_link_ops ip6_link_ops = {
87458 .kind = "ip6tnl",
87459 .maxtype = IFLA_IPTUN_MAX,
87460 .policy = ip6_tnl_policy,
87461diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
87462index d1e2e8e..51c19ae 100644
87463--- a/net/ipv6/ipv6_sockglue.c
87464+++ b/net/ipv6/ipv6_sockglue.c
87465@@ -991,7 +991,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
87466 if (sk->sk_type != SOCK_STREAM)
87467 return -ENOPROTOOPT;
87468
87469- msg.msg_control = optval;
87470+ msg.msg_control = (void __force_kernel *)optval;
87471 msg.msg_controllen = len;
87472 msg.msg_flags = flags;
87473
87474diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
87475index 125a90d..2a11f36 100644
87476--- a/net/ipv6/netfilter/ip6_tables.c
87477+++ b/net/ipv6/netfilter/ip6_tables.c
87478@@ -1076,14 +1076,14 @@ static int compat_table_info(const struct xt_table_info *info,
87479 #endif
87480
87481 static int get_info(struct net *net, void __user *user,
87482- const int *len, int compat)
87483+ int len, int compat)
87484 {
87485 char name[XT_TABLE_MAXNAMELEN];
87486 struct xt_table *t;
87487 int ret;
87488
87489- if (*len != sizeof(struct ip6t_getinfo)) {
87490- duprintf("length %u != %zu\n", *len,
87491+ if (len != sizeof(struct ip6t_getinfo)) {
87492+ duprintf("length %u != %zu\n", len,
87493 sizeof(struct ip6t_getinfo));
87494 return -EINVAL;
87495 }
87496@@ -1120,7 +1120,7 @@ static int get_info(struct net *net, void __user *user,
87497 info.size = private->size;
87498 strcpy(info.name, name);
87499
87500- if (copy_to_user(user, &info, *len) != 0)
87501+ if (copy_to_user(user, &info, len) != 0)
87502 ret = -EFAULT;
87503 else
87504 ret = 0;
87505@@ -1974,7 +1974,7 @@ compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
87506
87507 switch (cmd) {
87508 case IP6T_SO_GET_INFO:
87509- ret = get_info(sock_net(sk), user, len, 1);
87510+ ret = get_info(sock_net(sk), user, *len, 1);
87511 break;
87512 case IP6T_SO_GET_ENTRIES:
87513 ret = compat_get_entries(sock_net(sk), user, len);
87514@@ -2021,7 +2021,7 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
87515
87516 switch (cmd) {
87517 case IP6T_SO_GET_INFO:
87518- ret = get_info(sock_net(sk), user, len, 0);
87519+ ret = get_info(sock_net(sk), user, *len, 0);
87520 break;
87521
87522 case IP6T_SO_GET_ENTRIES:
87523diff --git a/net/ipv6/netfilter/ip6t_NPT.c b/net/ipv6/netfilter/ip6t_NPT.c
87524index 83acc14..0ea43c7 100644
87525--- a/net/ipv6/netfilter/ip6t_NPT.c
87526+++ b/net/ipv6/netfilter/ip6t_NPT.c
87527@@ -57,7 +57,7 @@ static bool ip6t_npt_map_pfx(const struct ip6t_npt_tginfo *npt,
87528 if (pfx_len - i >= 32)
87529 mask = 0;
87530 else
87531- mask = htonl(~((1 << (pfx_len - i)) - 1));
87532+ mask = htonl((1 << (i - pfx_len + 32)) - 1);
87533
87534 idx = i / 32;
87535 addr->s6_addr32[idx] &= mask;
87536diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
87537index 2f3a018..8bca195 100644
87538--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
87539+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
87540@@ -89,12 +89,11 @@ static struct ctl_table nf_ct_frag6_sysctl_table[] = {
87541
87542 static int nf_ct_frag6_sysctl_register(struct net *net)
87543 {
87544- struct ctl_table *table;
87545+ ctl_table_no_const *table = NULL;
87546 struct ctl_table_header *hdr;
87547
87548- table = nf_ct_frag6_sysctl_table;
87549 if (!net_eq(net, &init_net)) {
87550- table = kmemdup(table, sizeof(nf_ct_frag6_sysctl_table),
87551+ table = kmemdup(nf_ct_frag6_sysctl_table, sizeof(nf_ct_frag6_sysctl_table),
87552 GFP_KERNEL);
87553 if (table == NULL)
87554 goto err_alloc;
87555@@ -102,9 +101,9 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
87556 table[0].data = &net->ipv6.frags.high_thresh;
87557 table[1].data = &net->ipv6.frags.low_thresh;
87558 table[2].data = &net->ipv6.frags.timeout;
87559- }
87560-
87561- hdr = register_net_sysctl(net, "net/netfilter", table);
87562+ hdr = register_net_sysctl(net, "net/netfilter", table);
87563+ } else
87564+ hdr = register_net_sysctl(net, "net/netfilter", nf_ct_frag6_sysctl_table);
87565 if (hdr == NULL)
87566 goto err_reg;
87567
87568@@ -112,8 +111,7 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
87569 return 0;
87570
87571 err_reg:
87572- if (!net_eq(net, &init_net))
87573- kfree(table);
87574+ kfree(table);
87575 err_alloc:
87576 return -ENOMEM;
87577 }
87578diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
87579index 70fa814..d70c28c 100644
87580--- a/net/ipv6/raw.c
87581+++ b/net/ipv6/raw.c
87582@@ -379,7 +379,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
87583 {
87584 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
87585 skb_checksum_complete(skb)) {
87586- atomic_inc(&sk->sk_drops);
87587+ atomic_inc_unchecked(&sk->sk_drops);
87588 kfree_skb(skb);
87589 return NET_RX_DROP;
87590 }
87591@@ -407,7 +407,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
87592 struct raw6_sock *rp = raw6_sk(sk);
87593
87594 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
87595- atomic_inc(&sk->sk_drops);
87596+ atomic_inc_unchecked(&sk->sk_drops);
87597 kfree_skb(skb);
87598 return NET_RX_DROP;
87599 }
87600@@ -431,7 +431,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
87601
87602 if (inet->hdrincl) {
87603 if (skb_checksum_complete(skb)) {
87604- atomic_inc(&sk->sk_drops);
87605+ atomic_inc_unchecked(&sk->sk_drops);
87606 kfree_skb(skb);
87607 return NET_RX_DROP;
87608 }
87609@@ -604,7 +604,7 @@ out:
87610 return err;
87611 }
87612
87613-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
87614+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
87615 struct flowi6 *fl6, struct dst_entry **dstp,
87616 unsigned int flags)
87617 {
87618@@ -916,12 +916,15 @@ do_confirm:
87619 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
87620 char __user *optval, int optlen)
87621 {
87622+ struct icmp6_filter filter;
87623+
87624 switch (optname) {
87625 case ICMPV6_FILTER:
87626 if (optlen > sizeof(struct icmp6_filter))
87627 optlen = sizeof(struct icmp6_filter);
87628- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
87629+ if (copy_from_user(&filter, optval, optlen))
87630 return -EFAULT;
87631+ raw6_sk(sk)->filter = filter;
87632 return 0;
87633 default:
87634 return -ENOPROTOOPT;
87635@@ -934,6 +937,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
87636 char __user *optval, int __user *optlen)
87637 {
87638 int len;
87639+ struct icmp6_filter filter;
87640
87641 switch (optname) {
87642 case ICMPV6_FILTER:
87643@@ -945,7 +949,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
87644 len = sizeof(struct icmp6_filter);
87645 if (put_user(len, optlen))
87646 return -EFAULT;
87647- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
87648+ filter = raw6_sk(sk)->filter;
87649+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
87650 return -EFAULT;
87651 return 0;
87652 default:
87653@@ -1253,7 +1258,7 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
87654 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
87655 0,
87656 sock_i_ino(sp),
87657- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
87658+ atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
87659 }
87660
87661 static int raw6_seq_show(struct seq_file *seq, void *v)
87662diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
87663index d9ba8a2..f3f9e14 100644
87664--- a/net/ipv6/reassembly.c
87665+++ b/net/ipv6/reassembly.c
87666@@ -608,12 +608,11 @@ static struct ctl_table ip6_frags_ctl_table[] = {
87667
87668 static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
87669 {
87670- struct ctl_table *table;
87671+ ctl_table_no_const *table = NULL;
87672 struct ctl_table_header *hdr;
87673
87674- table = ip6_frags_ns_ctl_table;
87675 if (!net_eq(net, &init_net)) {
87676- table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
87677+ table = kmemdup(ip6_frags_ns_ctl_table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
87678 if (table == NULL)
87679 goto err_alloc;
87680
87681@@ -624,9 +623,10 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
87682 /* Don't export sysctls to unprivileged users */
87683 if (net->user_ns != &init_user_ns)
87684 table[0].procname = NULL;
87685- }
87686+ hdr = register_net_sysctl(net, "net/ipv6", table);
87687+ } else
87688+ hdr = register_net_sysctl(net, "net/ipv6", ip6_frags_ns_ctl_table);
87689
87690- hdr = register_net_sysctl(net, "net/ipv6", table);
87691 if (hdr == NULL)
87692 goto err_reg;
87693
87694@@ -634,8 +634,7 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
87695 return 0;
87696
87697 err_reg:
87698- if (!net_eq(net, &init_net))
87699- kfree(table);
87700+ kfree(table);
87701 err_alloc:
87702 return -ENOMEM;
87703 }
87704diff --git a/net/ipv6/route.c b/net/ipv6/route.c
87705index 5845613..3af8fc7 100644
87706--- a/net/ipv6/route.c
87707+++ b/net/ipv6/route.c
87708@@ -2966,7 +2966,7 @@ ctl_table ipv6_route_table_template[] = {
87709
87710 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
87711 {
87712- struct ctl_table *table;
87713+ ctl_table_no_const *table;
87714
87715 table = kmemdup(ipv6_route_table_template,
87716 sizeof(ipv6_route_table_template),
87717diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
87718index cfba99b..20ca511 100644
87719--- a/net/ipv6/sit.c
87720+++ b/net/ipv6/sit.c
87721@@ -72,7 +72,7 @@ MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
87722 static int ipip6_tunnel_init(struct net_device *dev);
87723 static void ipip6_tunnel_setup(struct net_device *dev);
87724 static void ipip6_dev_free(struct net_device *dev);
87725-static struct rtnl_link_ops sit_link_ops __read_mostly;
87726+static struct rtnl_link_ops sit_link_ops;
87727
87728 static int sit_net_id __read_mostly;
87729 struct sit_net {
87730@@ -1463,7 +1463,7 @@ static const struct nla_policy ipip6_policy[IFLA_IPTUN_MAX + 1] = {
87731 #endif
87732 };
87733
87734-static struct rtnl_link_ops sit_link_ops __read_mostly = {
87735+static struct rtnl_link_ops sit_link_ops = {
87736 .kind = "sit",
87737 .maxtype = IFLA_IPTUN_MAX,
87738 .policy = ipip6_policy,
87739diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
87740index e85c48b..b8268d3 100644
87741--- a/net/ipv6/sysctl_net_ipv6.c
87742+++ b/net/ipv6/sysctl_net_ipv6.c
87743@@ -40,7 +40,7 @@ static ctl_table ipv6_rotable[] = {
87744
87745 static int __net_init ipv6_sysctl_net_init(struct net *net)
87746 {
87747- struct ctl_table *ipv6_table;
87748+ ctl_table_no_const *ipv6_table;
87749 struct ctl_table *ipv6_route_table;
87750 struct ctl_table *ipv6_icmp_table;
87751 int err;
87752diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
87753index 8d19346..e47216f 100644
87754--- a/net/ipv6/tcp_ipv6.c
87755+++ b/net/ipv6/tcp_ipv6.c
87756@@ -103,6 +103,10 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
87757 inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
87758 }
87759
87760+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
87761+extern int grsec_enable_blackhole;
87762+#endif
87763+
87764 static void tcp_v6_hash(struct sock *sk)
87765 {
87766 if (sk->sk_state != TCP_CLOSE) {
87767@@ -1440,6 +1444,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
87768 return 0;
87769
87770 reset:
87771+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
87772+ if (!grsec_enable_blackhole)
87773+#endif
87774 tcp_v6_send_reset(sk, skb);
87775 discard:
87776 if (opt_skb)
87777@@ -1521,12 +1528,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
87778 TCP_SKB_CB(skb)->sacked = 0;
87779
87780 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
87781- if (!sk)
87782+ if (!sk) {
87783+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
87784+ ret = 1;
87785+#endif
87786 goto no_tcp_socket;
87787+ }
87788
87789 process:
87790- if (sk->sk_state == TCP_TIME_WAIT)
87791+ if (sk->sk_state == TCP_TIME_WAIT) {
87792+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
87793+ ret = 2;
87794+#endif
87795 goto do_time_wait;
87796+ }
87797
87798 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
87799 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
87800@@ -1575,6 +1590,10 @@ no_tcp_socket:
87801 bad_packet:
87802 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
87803 } else {
87804+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
87805+ if (!grsec_enable_blackhole || (ret == 1 &&
87806+ (skb->dev->flags & IFF_LOOPBACK)))
87807+#endif
87808 tcp_v6_send_reset(NULL, skb);
87809 }
87810
87811diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
87812index fb08329..2d6919e 100644
87813--- a/net/ipv6/udp.c
87814+++ b/net/ipv6/udp.c
87815@@ -51,6 +51,10 @@
87816 #include <trace/events/skb.h>
87817 #include "udp_impl.h"
87818
87819+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
87820+extern int grsec_enable_blackhole;
87821+#endif
87822+
87823 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
87824 {
87825 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
87826@@ -395,7 +399,7 @@ try_again:
87827 if (unlikely(err)) {
87828 trace_kfree_skb(skb, udpv6_recvmsg);
87829 if (!peeked) {
87830- atomic_inc(&sk->sk_drops);
87831+ atomic_inc_unchecked(&sk->sk_drops);
87832 if (is_udp4)
87833 UDP_INC_STATS_USER(sock_net(sk),
87834 UDP_MIB_INERRORS,
87835@@ -633,7 +637,7 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
87836 return rc;
87837 drop:
87838 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
87839- atomic_inc(&sk->sk_drops);
87840+ atomic_inc_unchecked(&sk->sk_drops);
87841 kfree_skb(skb);
87842 return -1;
87843 }
87844@@ -691,7 +695,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
87845 if (likely(skb1 == NULL))
87846 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
87847 if (!skb1) {
87848- atomic_inc(&sk->sk_drops);
87849+ atomic_inc_unchecked(&sk->sk_drops);
87850 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
87851 IS_UDPLITE(sk));
87852 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
87853@@ -862,6 +866,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
87854 goto discard;
87855
87856 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
87857+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
87858+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
87859+#endif
87860 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
87861
87862 kfree_skb(skb);
87863@@ -1379,7 +1386,7 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
87864 0,
87865 sock_i_ino(sp),
87866 atomic_read(&sp->sk_refcnt), sp,
87867- atomic_read(&sp->sk_drops));
87868+ atomic_read_unchecked(&sp->sk_drops));
87869 }
87870
87871 int udp6_seq_show(struct seq_file *seq, void *v)
87872diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
87873index a68c88c..d55b0c5 100644
87874--- a/net/irda/ircomm/ircomm_tty.c
87875+++ b/net/irda/ircomm/ircomm_tty.c
87876@@ -312,12 +312,12 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
87877 add_wait_queue(&port->open_wait, &wait);
87878
87879 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
87880- __FILE__, __LINE__, tty->driver->name, port->count);
87881+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
87882
87883 spin_lock_irqsave(&port->lock, flags);
87884 if (!tty_hung_up_p(filp)) {
87885 extra_count = 1;
87886- port->count--;
87887+ atomic_dec(&port->count);
87888 }
87889 spin_unlock_irqrestore(&port->lock, flags);
87890 port->blocked_open++;
87891@@ -353,7 +353,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
87892 }
87893
87894 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
87895- __FILE__, __LINE__, tty->driver->name, port->count);
87896+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
87897
87898 schedule();
87899 }
87900@@ -364,13 +364,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
87901 if (extra_count) {
87902 /* ++ is not atomic, so this should be protected - Jean II */
87903 spin_lock_irqsave(&port->lock, flags);
87904- port->count++;
87905+ atomic_inc(&port->count);
87906 spin_unlock_irqrestore(&port->lock, flags);
87907 }
87908 port->blocked_open--;
87909
87910 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
87911- __FILE__, __LINE__, tty->driver->name, port->count);
87912+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
87913
87914 if (!retval)
87915 port->flags |= ASYNC_NORMAL_ACTIVE;
87916@@ -444,12 +444,12 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
87917
87918 /* ++ is not atomic, so this should be protected - Jean II */
87919 spin_lock_irqsave(&self->port.lock, flags);
87920- self->port.count++;
87921+ atomic_inc(&self->port.count);
87922 spin_unlock_irqrestore(&self->port.lock, flags);
87923 tty_port_tty_set(&self->port, tty);
87924
87925 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
87926- self->line, self->port.count);
87927+ self->line, atomic_read(&self->port.count));
87928
87929 /* Not really used by us, but lets do it anyway */
87930 tty->low_latency = (self->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
87931@@ -986,7 +986,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
87932 tty_kref_put(port->tty);
87933 }
87934 port->tty = NULL;
87935- port->count = 0;
87936+ atomic_set(&port->count, 0);
87937 spin_unlock_irqrestore(&port->lock, flags);
87938
87939 wake_up_interruptible(&port->open_wait);
87940@@ -1343,7 +1343,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
87941 seq_putc(m, '\n');
87942
87943 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
87944- seq_printf(m, "Open count: %d\n", self->port.count);
87945+ seq_printf(m, "Open count: %d\n", atomic_read(&self->port.count));
87946 seq_printf(m, "Max data size: %d\n", self->max_data_size);
87947 seq_printf(m, "Max header size: %d\n", self->max_header_size);
87948
87949diff --git a/net/irda/iriap.c b/net/irda/iriap.c
87950index e71e85b..29340a9 100644
87951--- a/net/irda/iriap.c
87952+++ b/net/irda/iriap.c
87953@@ -495,8 +495,11 @@ static void iriap_getvaluebyclass_confirm(struct iriap_cb *self,
87954 /* case CS_ISO_8859_9: */
87955 /* case CS_UNICODE: */
87956 default:
87957- IRDA_DEBUG(0, "%s(), charset %s, not supported\n",
87958- __func__, ias_charset_types[charset]);
87959+ IRDA_DEBUG(0, "%s(), charset [%d] %s, not supported\n",
87960+ __func__, charset,
87961+ charset < ARRAY_SIZE(ias_charset_types) ?
87962+ ias_charset_types[charset] :
87963+ "(unknown)");
87964
87965 /* Aborting, close connection! */
87966 iriap_disconnect_request(self);
87967diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
87968index cd6f7a9..e63fe89 100644
87969--- a/net/iucv/af_iucv.c
87970+++ b/net/iucv/af_iucv.c
87971@@ -782,10 +782,10 @@ static int iucv_sock_autobind(struct sock *sk)
87972
87973 write_lock_bh(&iucv_sk_list.lock);
87974
87975- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
87976+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
87977 while (__iucv_get_sock_by_name(name)) {
87978 sprintf(name, "%08x",
87979- atomic_inc_return(&iucv_sk_list.autobind_name));
87980+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
87981 }
87982
87983 write_unlock_bh(&iucv_sk_list.lock);
87984diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
87985index df08250..02021fe 100644
87986--- a/net/iucv/iucv.c
87987+++ b/net/iucv/iucv.c
87988@@ -690,7 +690,7 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
87989 return NOTIFY_OK;
87990 }
87991
87992-static struct notifier_block __refdata iucv_cpu_notifier = {
87993+static struct notifier_block iucv_cpu_notifier = {
87994 .notifier_call = iucv_cpu_notify,
87995 };
87996
87997diff --git a/net/key/af_key.c b/net/key/af_key.c
87998index 5b426a6..970032b 100644
87999--- a/net/key/af_key.c
88000+++ b/net/key/af_key.c
88001@@ -3019,10 +3019,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
88002 static u32 get_acqseq(void)
88003 {
88004 u32 res;
88005- static atomic_t acqseq;
88006+ static atomic_unchecked_t acqseq;
88007
88008 do {
88009- res = atomic_inc_return(&acqseq);
88010+ res = atomic_inc_return_unchecked(&acqseq);
88011 } while (!res);
88012 return res;
88013 }
88014diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
88015index 0479c64..9e72ff4 100644
88016--- a/net/mac80211/cfg.c
88017+++ b/net/mac80211/cfg.c
88018@@ -790,7 +790,7 @@ static int ieee80211_set_monitor_channel(struct wiphy *wiphy,
88019 ret = ieee80211_vif_use_channel(sdata, chandef,
88020 IEEE80211_CHANCTX_EXCLUSIVE);
88021 }
88022- } else if (local->open_count == local->monitors) {
88023+ } else if (local_read(&local->open_count) == local->monitors) {
88024 local->_oper_channel = chandef->chan;
88025 local->_oper_channel_type = cfg80211_get_chandef_type(chandef);
88026 ieee80211_hw_config(local, 0);
88027@@ -2499,7 +2499,7 @@ static int ieee80211_cancel_roc(struct ieee80211_local *local,
88028 list_del(&dep->list);
88029 mutex_unlock(&local->mtx);
88030
88031- ieee80211_roc_notify_destroy(dep);
88032+ ieee80211_roc_notify_destroy(dep, true);
88033 return 0;
88034 }
88035
88036@@ -2539,7 +2539,7 @@ static int ieee80211_cancel_roc(struct ieee80211_local *local,
88037 ieee80211_start_next_roc(local);
88038 mutex_unlock(&local->mtx);
88039
88040- ieee80211_roc_notify_destroy(found);
88041+ ieee80211_roc_notify_destroy(found, true);
88042 } else {
88043 /* work may be pending so use it all the time */
88044 found->abort = true;
88045@@ -2549,6 +2549,8 @@ static int ieee80211_cancel_roc(struct ieee80211_local *local,
88046
88047 /* work will clean up etc */
88048 flush_delayed_work(&found->work);
88049+ WARN_ON(!found->to_be_freed);
88050+ kfree(found);
88051 }
88052
88053 return 0;
88054@@ -2716,7 +2718,7 @@ static void ieee80211_mgmt_frame_register(struct wiphy *wiphy,
88055 else
88056 local->probe_req_reg--;
88057
88058- if (!local->open_count)
88059+ if (!local_read(&local->open_count))
88060 break;
88061
88062 ieee80211_queue_work(&local->hw, &local->reconfig_filter);
88063diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
88064index 2ed065c..bec0c2b 100644
88065--- a/net/mac80211/ieee80211_i.h
88066+++ b/net/mac80211/ieee80211_i.h
88067@@ -28,6 +28,7 @@
88068 #include <net/ieee80211_radiotap.h>
88069 #include <net/cfg80211.h>
88070 #include <net/mac80211.h>
88071+#include <asm/local.h>
88072 #include "key.h"
88073 #include "sta_info.h"
88074 #include "debug.h"
88075@@ -346,6 +347,7 @@ struct ieee80211_roc_work {
88076 struct ieee80211_channel *chan;
88077
88078 bool started, abort, hw_begun, notified;
88079+ bool to_be_freed;
88080
88081 unsigned long hw_start_time;
88082
88083@@ -909,7 +911,7 @@ struct ieee80211_local {
88084 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
88085 spinlock_t queue_stop_reason_lock;
88086
88087- int open_count;
88088+ local_t open_count;
88089 int monitors, cooked_mntrs;
88090 /* number of interfaces with corresponding FIF_ flags */
88091 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
88092@@ -1363,7 +1365,7 @@ void ieee80211_offchannel_return(struct ieee80211_local *local);
88093 void ieee80211_roc_setup(struct ieee80211_local *local);
88094 void ieee80211_start_next_roc(struct ieee80211_local *local);
88095 void ieee80211_roc_purge(struct ieee80211_sub_if_data *sdata);
88096-void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc);
88097+void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc, bool free);
88098 void ieee80211_sw_roc_work(struct work_struct *work);
88099 void ieee80211_handle_roc_started(struct ieee80211_roc_work *roc);
88100
88101diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
88102index 8be854e..ad72a69 100644
88103--- a/net/mac80211/iface.c
88104+++ b/net/mac80211/iface.c
88105@@ -546,7 +546,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
88106 break;
88107 }
88108
88109- if (local->open_count == 0) {
88110+ if (local_read(&local->open_count) == 0) {
88111 res = drv_start(local);
88112 if (res)
88113 goto err_del_bss;
88114@@ -591,7 +591,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
88115 break;
88116 }
88117
88118- if (local->monitors == 0 && local->open_count == 0) {
88119+ if (local->monitors == 0 && local_read(&local->open_count) == 0) {
88120 res = ieee80211_add_virtual_monitor(local);
88121 if (res)
88122 goto err_stop;
88123@@ -699,7 +699,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
88124 mutex_unlock(&local->mtx);
88125
88126 if (coming_up)
88127- local->open_count++;
88128+ local_inc(&local->open_count);
88129
88130 if (hw_reconf_flags)
88131 ieee80211_hw_config(local, hw_reconf_flags);
88132@@ -713,7 +713,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
88133 err_del_interface:
88134 drv_remove_interface(local, sdata);
88135 err_stop:
88136- if (!local->open_count)
88137+ if (!local_read(&local->open_count))
88138 drv_stop(local);
88139 err_del_bss:
88140 sdata->bss = NULL;
88141@@ -827,7 +827,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
88142 }
88143
88144 if (going_down)
88145- local->open_count--;
88146+ local_dec(&local->open_count);
88147
88148 switch (sdata->vif.type) {
88149 case NL80211_IFTYPE_AP_VLAN:
88150@@ -884,7 +884,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
88151
88152 ieee80211_recalc_ps(local, -1);
88153
88154- if (local->open_count == 0) {
88155+ if (local_read(&local->open_count) == 0) {
88156 if (local->ops->napi_poll)
88157 napi_disable(&local->napi);
88158 ieee80211_clear_tx_pending(local);
88159@@ -910,7 +910,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
88160 }
88161 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
88162
88163- if (local->monitors == local->open_count && local->monitors > 0)
88164+ if (local->monitors == local_read(&local->open_count) && local->monitors > 0)
88165 ieee80211_add_virtual_monitor(local);
88166 }
88167
88168diff --git a/net/mac80211/main.c b/net/mac80211/main.c
88169index 1b087ff..bf600e9 100644
88170--- a/net/mac80211/main.c
88171+++ b/net/mac80211/main.c
88172@@ -181,7 +181,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
88173 changed &= ~(IEEE80211_CONF_CHANGE_CHANNEL |
88174 IEEE80211_CONF_CHANGE_POWER);
88175
88176- if (changed && local->open_count) {
88177+ if (changed && local_read(&local->open_count)) {
88178 ret = drv_config(local, changed);
88179 /*
88180 * Goal:
88181diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
88182index a3ad4c3..7acbdaa 100644
88183--- a/net/mac80211/offchannel.c
88184+++ b/net/mac80211/offchannel.c
88185@@ -299,10 +299,13 @@ void ieee80211_start_next_roc(struct ieee80211_local *local)
88186 }
88187 }
88188
88189-void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc)
88190+void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc, bool free)
88191 {
88192 struct ieee80211_roc_work *dep, *tmp;
88193
88194+ if (WARN_ON(roc->to_be_freed))
88195+ return;
88196+
88197 /* was never transmitted */
88198 if (roc->frame) {
88199 cfg80211_mgmt_tx_status(&roc->sdata->wdev,
88200@@ -318,9 +321,12 @@ void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc)
88201 GFP_KERNEL);
88202
88203 list_for_each_entry_safe(dep, tmp, &roc->dependents, list)
88204- ieee80211_roc_notify_destroy(dep);
88205+ ieee80211_roc_notify_destroy(dep, true);
88206
88207- kfree(roc);
88208+ if (free)
88209+ kfree(roc);
88210+ else
88211+ roc->to_be_freed = true;
88212 }
88213
88214 void ieee80211_sw_roc_work(struct work_struct *work)
88215@@ -333,6 +339,9 @@ void ieee80211_sw_roc_work(struct work_struct *work)
88216
88217 mutex_lock(&local->mtx);
88218
88219+ if (roc->to_be_freed)
88220+ goto out_unlock;
88221+
88222 if (roc->abort)
88223 goto finish;
88224
88225@@ -372,7 +381,7 @@ void ieee80211_sw_roc_work(struct work_struct *work)
88226 finish:
88227 list_del(&roc->list);
88228 started = roc->started;
88229- ieee80211_roc_notify_destroy(roc);
88230+ ieee80211_roc_notify_destroy(roc, !roc->abort);
88231
88232 if (started) {
88233 drv_flush(local, false);
88234@@ -412,7 +421,7 @@ static void ieee80211_hw_roc_done(struct work_struct *work)
88235
88236 list_del(&roc->list);
88237
88238- ieee80211_roc_notify_destroy(roc);
88239+ ieee80211_roc_notify_destroy(roc, true);
88240
88241 /* if there's another roc, start it now */
88242 ieee80211_start_next_roc(local);
88243@@ -462,12 +471,14 @@ void ieee80211_roc_purge(struct ieee80211_sub_if_data *sdata)
88244 list_for_each_entry_safe(roc, tmp, &tmp_list, list) {
88245 if (local->ops->remain_on_channel) {
88246 list_del(&roc->list);
88247- ieee80211_roc_notify_destroy(roc);
88248+ ieee80211_roc_notify_destroy(roc, true);
88249 } else {
88250 ieee80211_queue_delayed_work(&local->hw, &roc->work, 0);
88251
88252 /* work will clean up etc */
88253 flush_delayed_work(&roc->work);
88254+ WARN_ON(!roc->to_be_freed);
88255+ kfree(roc);
88256 }
88257 }
88258
88259diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
88260index 79a48f3..5e185c9 100644
88261--- a/net/mac80211/pm.c
88262+++ b/net/mac80211/pm.c
88263@@ -35,7 +35,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
88264 struct sta_info *sta;
88265 struct ieee80211_chanctx *ctx;
88266
88267- if (!local->open_count)
88268+ if (!local_read(&local->open_count))
88269 goto suspend;
88270
88271 ieee80211_scan_cancel(local);
88272@@ -73,7 +73,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
88273 cancel_work_sync(&local->dynamic_ps_enable_work);
88274 del_timer_sync(&local->dynamic_ps_timer);
88275
88276- local->wowlan = wowlan && local->open_count;
88277+ local->wowlan = wowlan && local_read(&local->open_count);
88278 if (local->wowlan) {
88279 int err = drv_suspend(local, wowlan);
88280 if (err < 0) {
88281@@ -187,7 +187,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
88282 mutex_unlock(&local->chanctx_mtx);
88283
88284 /* stop hardware - this must stop RX */
88285- if (local->open_count)
88286+ if (local_read(&local->open_count))
88287 ieee80211_stop_device(local);
88288
88289 suspend:
88290diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
88291index dd88381..eef4dd6 100644
88292--- a/net/mac80211/rate.c
88293+++ b/net/mac80211/rate.c
88294@@ -493,7 +493,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
88295
88296 ASSERT_RTNL();
88297
88298- if (local->open_count)
88299+ if (local_read(&local->open_count))
88300 return -EBUSY;
88301
88302 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
88303diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
88304index c97a065..ff61928 100644
88305--- a/net/mac80211/rc80211_pid_debugfs.c
88306+++ b/net/mac80211/rc80211_pid_debugfs.c
88307@@ -193,7 +193,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
88308
88309 spin_unlock_irqrestore(&events->lock, status);
88310
88311- if (copy_to_user(buf, pb, p))
88312+ if (p > sizeof(pb) || copy_to_user(buf, pb, p))
88313 return -EFAULT;
88314
88315 return p;
88316diff --git a/net/mac80211/util.c b/net/mac80211/util.c
88317index f11e8c5..08d0013 100644
88318--- a/net/mac80211/util.c
88319+++ b/net/mac80211/util.c
88320@@ -1380,7 +1380,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
88321 }
88322 #endif
88323 /* everything else happens only if HW was up & running */
88324- if (!local->open_count)
88325+ if (!local_read(&local->open_count))
88326 goto wake_up;
88327
88328 /*
88329diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
88330index 49e96df..63a51c3 100644
88331--- a/net/netfilter/Kconfig
88332+++ b/net/netfilter/Kconfig
88333@@ -936,6 +936,16 @@ config NETFILTER_XT_MATCH_ESP
88334
88335 To compile it as a module, choose M here. If unsure, say N.
88336
88337+config NETFILTER_XT_MATCH_GRADM
88338+ tristate '"gradm" match support'
88339+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
88340+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
88341+ ---help---
88342+ The gradm match allows to match on grsecurity RBAC being enabled.
88343+ It is useful when iptables rules are applied early on bootup to
88344+ prevent connections to the machine (except from a trusted host)
88345+ while the RBAC system is disabled.
88346+
88347 config NETFILTER_XT_MATCH_HASHLIMIT
88348 tristate '"hashlimit" match support'
88349 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
88350diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
88351index 3259697..54d5393 100644
88352--- a/net/netfilter/Makefile
88353+++ b/net/netfilter/Makefile
88354@@ -109,6 +109,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
88355 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
88356 obj-$(CONFIG_NETFILTER_XT_MATCH_ECN) += xt_ecn.o
88357 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
88358+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
88359 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
88360 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
88361 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
88362diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
88363index 6d6d8f2..a676749 100644
88364--- a/net/netfilter/ipset/ip_set_core.c
88365+++ b/net/netfilter/ipset/ip_set_core.c
88366@@ -1800,7 +1800,7 @@ done:
88367 return ret;
88368 }
88369
88370-static struct nf_sockopt_ops so_set __read_mostly = {
88371+static struct nf_sockopt_ops so_set = {
88372 .pf = PF_INET,
88373 .get_optmin = SO_IP_SET,
88374 .get_optmax = SO_IP_SET + 1,
88375diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
88376index 30e764a..c3b6a9d 100644
88377--- a/net/netfilter/ipvs/ip_vs_conn.c
88378+++ b/net/netfilter/ipvs/ip_vs_conn.c
88379@@ -554,7 +554,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
88380 /* Increase the refcnt counter of the dest */
88381 atomic_inc(&dest->refcnt);
88382
88383- conn_flags = atomic_read(&dest->conn_flags);
88384+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
88385 if (cp->protocol != IPPROTO_UDP)
88386 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
88387 flags = cp->flags;
88388@@ -899,7 +899,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
88389 atomic_set(&cp->refcnt, 1);
88390
88391 atomic_set(&cp->n_control, 0);
88392- atomic_set(&cp->in_pkts, 0);
88393+ atomic_set_unchecked(&cp->in_pkts, 0);
88394
88395 atomic_inc(&ipvs->conn_count);
88396 if (flags & IP_VS_CONN_F_NO_CPORT)
88397@@ -1180,7 +1180,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
88398
88399 /* Don't drop the entry if its number of incoming packets is not
88400 located in [0, 8] */
88401- i = atomic_read(&cp->in_pkts);
88402+ i = atomic_read_unchecked(&cp->in_pkts);
88403 if (i > 8 || i < 0) return 0;
88404
88405 if (!todrop_rate[i]) return 0;
88406diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
88407index 47edf5a..235b07d 100644
88408--- a/net/netfilter/ipvs/ip_vs_core.c
88409+++ b/net/netfilter/ipvs/ip_vs_core.c
88410@@ -559,7 +559,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
88411 ret = cp->packet_xmit(skb, cp, pd->pp, iph);
88412 /* do not touch skb anymore */
88413
88414- atomic_inc(&cp->in_pkts);
88415+ atomic_inc_unchecked(&cp->in_pkts);
88416 ip_vs_conn_put(cp);
88417 return ret;
88418 }
88419@@ -1691,7 +1691,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
88420 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
88421 pkts = sysctl_sync_threshold(ipvs);
88422 else
88423- pkts = atomic_add_return(1, &cp->in_pkts);
88424+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
88425
88426 if (ipvs->sync_state & IP_VS_STATE_MASTER)
88427 ip_vs_sync_conn(net, cp, pkts);
88428diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
88429index ec664cb..7f34a77 100644
88430--- a/net/netfilter/ipvs/ip_vs_ctl.c
88431+++ b/net/netfilter/ipvs/ip_vs_ctl.c
88432@@ -787,7 +787,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
88433 ip_vs_rs_hash(ipvs, dest);
88434 write_unlock_bh(&ipvs->rs_lock);
88435 }
88436- atomic_set(&dest->conn_flags, conn_flags);
88437+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
88438
88439 /* bind the service */
88440 if (!dest->svc) {
88441@@ -1688,7 +1688,7 @@ proc_do_sync_ports(ctl_table *table, int write,
88442 * align with netns init in ip_vs_control_net_init()
88443 */
88444
88445-static struct ctl_table vs_vars[] = {
88446+static ctl_table_no_const vs_vars[] __read_only = {
88447 {
88448 .procname = "amemthresh",
88449 .maxlen = sizeof(int),
88450@@ -2081,7 +2081,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
88451 " %-7s %-6d %-10d %-10d\n",
88452 &dest->addr.in6,
88453 ntohs(dest->port),
88454- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
88455+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
88456 atomic_read(&dest->weight),
88457 atomic_read(&dest->activeconns),
88458 atomic_read(&dest->inactconns));
88459@@ -2092,7 +2092,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
88460 "%-7s %-6d %-10d %-10d\n",
88461 ntohl(dest->addr.ip),
88462 ntohs(dest->port),
88463- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
88464+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
88465 atomic_read(&dest->weight),
88466 atomic_read(&dest->activeconns),
88467 atomic_read(&dest->inactconns));
88468@@ -2562,7 +2562,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
88469
88470 entry.addr = dest->addr.ip;
88471 entry.port = dest->port;
88472- entry.conn_flags = atomic_read(&dest->conn_flags);
88473+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
88474 entry.weight = atomic_read(&dest->weight);
88475 entry.u_threshold = dest->u_threshold;
88476 entry.l_threshold = dest->l_threshold;
88477@@ -3098,7 +3098,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
88478 if (nla_put(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr) ||
88479 nla_put_u16(skb, IPVS_DEST_ATTR_PORT, dest->port) ||
88480 nla_put_u32(skb, IPVS_DEST_ATTR_FWD_METHOD,
88481- (atomic_read(&dest->conn_flags) &
88482+ (atomic_read_unchecked(&dest->conn_flags) &
88483 IP_VS_CONN_F_FWD_MASK)) ||
88484 nla_put_u32(skb, IPVS_DEST_ATTR_WEIGHT,
88485 atomic_read(&dest->weight)) ||
88486@@ -3688,7 +3688,7 @@ static int __net_init ip_vs_control_net_init_sysctl(struct net *net)
88487 {
88488 int idx;
88489 struct netns_ipvs *ipvs = net_ipvs(net);
88490- struct ctl_table *tbl;
88491+ ctl_table_no_const *tbl;
88492
88493 atomic_set(&ipvs->dropentry, 0);
88494 spin_lock_init(&ipvs->dropentry_lock);
88495diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
88496index fdd89b9..bd96aa9 100644
88497--- a/net/netfilter/ipvs/ip_vs_lblc.c
88498+++ b/net/netfilter/ipvs/ip_vs_lblc.c
88499@@ -115,7 +115,7 @@ struct ip_vs_lblc_table {
88500 * IPVS LBLC sysctl table
88501 */
88502 #ifdef CONFIG_SYSCTL
88503-static ctl_table vs_vars_table[] = {
88504+static ctl_table_no_const vs_vars_table[] __read_only = {
88505 {
88506 .procname = "lblc_expiration",
88507 .data = NULL,
88508diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
88509index c03b6a3..8ce3681 100644
88510--- a/net/netfilter/ipvs/ip_vs_lblcr.c
88511+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
88512@@ -288,7 +288,7 @@ struct ip_vs_lblcr_table {
88513 * IPVS LBLCR sysctl table
88514 */
88515
88516-static ctl_table vs_vars_table[] = {
88517+static ctl_table_no_const vs_vars_table[] __read_only = {
88518 {
88519 .procname = "lblcr_expiration",
88520 .data = NULL,
88521diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
88522index 44fd10c..2a163b3 100644
88523--- a/net/netfilter/ipvs/ip_vs_sync.c
88524+++ b/net/netfilter/ipvs/ip_vs_sync.c
88525@@ -596,7 +596,7 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
88526 cp = cp->control;
88527 if (cp) {
88528 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
88529- pkts = atomic_add_return(1, &cp->in_pkts);
88530+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
88531 else
88532 pkts = sysctl_sync_threshold(ipvs);
88533 ip_vs_sync_conn(net, cp->control, pkts);
88534@@ -758,7 +758,7 @@ control:
88535 if (!cp)
88536 return;
88537 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
88538- pkts = atomic_add_return(1, &cp->in_pkts);
88539+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
88540 else
88541 pkts = sysctl_sync_threshold(ipvs);
88542 goto sloop;
88543@@ -885,7 +885,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
88544
88545 if (opt)
88546 memcpy(&cp->in_seq, opt, sizeof(*opt));
88547- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
88548+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
88549 cp->state = state;
88550 cp->old_state = cp->state;
88551 /*
88552diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
88553index ee6b7a9..f9a89f6 100644
88554--- a/net/netfilter/ipvs/ip_vs_xmit.c
88555+++ b/net/netfilter/ipvs/ip_vs_xmit.c
88556@@ -1210,7 +1210,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
88557 else
88558 rc = NF_ACCEPT;
88559 /* do not touch skb anymore */
88560- atomic_inc(&cp->in_pkts);
88561+ atomic_inc_unchecked(&cp->in_pkts);
88562 goto out;
88563 }
88564
88565@@ -1332,7 +1332,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
88566 else
88567 rc = NF_ACCEPT;
88568 /* do not touch skb anymore */
88569- atomic_inc(&cp->in_pkts);
88570+ atomic_inc_unchecked(&cp->in_pkts);
88571 goto out;
88572 }
88573
88574diff --git a/net/netfilter/nf_conntrack_acct.c b/net/netfilter/nf_conntrack_acct.c
88575index 7df424e..a527b02 100644
88576--- a/net/netfilter/nf_conntrack_acct.c
88577+++ b/net/netfilter/nf_conntrack_acct.c
88578@@ -60,7 +60,7 @@ static struct nf_ct_ext_type acct_extend __read_mostly = {
88579 #ifdef CONFIG_SYSCTL
88580 static int nf_conntrack_acct_init_sysctl(struct net *net)
88581 {
88582- struct ctl_table *table;
88583+ ctl_table_no_const *table;
88584
88585 table = kmemdup(acct_sysctl_table, sizeof(acct_sysctl_table),
88586 GFP_KERNEL);
88587diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
88588index e4a0c4f..c263f28 100644
88589--- a/net/netfilter/nf_conntrack_core.c
88590+++ b/net/netfilter/nf_conntrack_core.c
88591@@ -1529,6 +1529,10 @@ err_extend:
88592 #define DYING_NULLS_VAL ((1<<30)+1)
88593 #define TEMPLATE_NULLS_VAL ((1<<30)+2)
88594
88595+#ifdef CONFIG_GRKERNSEC_HIDESYM
88596+static atomic_unchecked_t conntrack_cache_id = ATOMIC_INIT(0);
88597+#endif
88598+
88599 static int nf_conntrack_init_net(struct net *net)
88600 {
88601 int ret;
88602@@ -1543,7 +1547,11 @@ static int nf_conntrack_init_net(struct net *net)
88603 goto err_stat;
88604 }
88605
88606+#ifdef CONFIG_GRKERNSEC_HIDESYM
88607+ net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%08lx", atomic_inc_return_unchecked(&conntrack_cache_id));
88608+#else
88609 net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
88610+#endif
88611 if (!net->ct.slabname) {
88612 ret = -ENOMEM;
88613 goto err_slabname;
88614diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
88615index faa978f..1afb18f 100644
88616--- a/net/netfilter/nf_conntrack_ecache.c
88617+++ b/net/netfilter/nf_conntrack_ecache.c
88618@@ -186,7 +186,7 @@ static struct nf_ct_ext_type event_extend __read_mostly = {
88619 #ifdef CONFIG_SYSCTL
88620 static int nf_conntrack_event_init_sysctl(struct net *net)
88621 {
88622- struct ctl_table *table;
88623+ ctl_table_no_const *table;
88624
88625 table = kmemdup(event_sysctl_table, sizeof(event_sysctl_table),
88626 GFP_KERNEL);
88627diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
88628index 884f2b3..d53b33a 100644
88629--- a/net/netfilter/nf_conntrack_helper.c
88630+++ b/net/netfilter/nf_conntrack_helper.c
88631@@ -55,7 +55,7 @@ static struct ctl_table helper_sysctl_table[] = {
88632
88633 static int nf_conntrack_helper_init_sysctl(struct net *net)
88634 {
88635- struct ctl_table *table;
88636+ ctl_table_no_const *table;
88637
88638 table = kmemdup(helper_sysctl_table, sizeof(helper_sysctl_table),
88639 GFP_KERNEL);
88640diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
88641index 51e928d..72a413a 100644
88642--- a/net/netfilter/nf_conntrack_proto.c
88643+++ b/net/netfilter/nf_conntrack_proto.c
88644@@ -51,7 +51,7 @@ nf_ct_register_sysctl(struct net *net,
88645
88646 static void
88647 nf_ct_unregister_sysctl(struct ctl_table_header **header,
88648- struct ctl_table **table,
88649+ ctl_table_no_const **table,
88650 unsigned int users)
88651 {
88652 if (users > 0)
88653diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
88654index e7185c6..4ad6c9c 100644
88655--- a/net/netfilter/nf_conntrack_standalone.c
88656+++ b/net/netfilter/nf_conntrack_standalone.c
88657@@ -470,7 +470,7 @@ static ctl_table nf_ct_netfilter_table[] = {
88658
88659 static int nf_conntrack_standalone_init_sysctl(struct net *net)
88660 {
88661- struct ctl_table *table;
88662+ ctl_table_no_const *table;
88663
88664 if (net_eq(net, &init_net)) {
88665 nf_ct_netfilter_header =
88666diff --git a/net/netfilter/nf_conntrack_timestamp.c b/net/netfilter/nf_conntrack_timestamp.c
88667index 7ea8026..bc9512d 100644
88668--- a/net/netfilter/nf_conntrack_timestamp.c
88669+++ b/net/netfilter/nf_conntrack_timestamp.c
88670@@ -42,7 +42,7 @@ static struct nf_ct_ext_type tstamp_extend __read_mostly = {
88671 #ifdef CONFIG_SYSCTL
88672 static int nf_conntrack_tstamp_init_sysctl(struct net *net)
88673 {
88674- struct ctl_table *table;
88675+ ctl_table_no_const *table;
88676
88677 table = kmemdup(tstamp_sysctl_table, sizeof(tstamp_sysctl_table),
88678 GFP_KERNEL);
88679diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
88680index 9e31269..bc4c1b7 100644
88681--- a/net/netfilter/nf_log.c
88682+++ b/net/netfilter/nf_log.c
88683@@ -215,7 +215,7 @@ static const struct file_operations nflog_file_ops = {
88684
88685 #ifdef CONFIG_SYSCTL
88686 static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3];
88687-static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1];
88688+static ctl_table_no_const nf_log_sysctl_table[NFPROTO_NUMPROTO+1] __read_only;
88689 static struct ctl_table_header *nf_log_dir_header;
88690
88691 static int nf_log_proc_dostring(ctl_table *table, int write,
88692@@ -246,14 +246,16 @@ static int nf_log_proc_dostring(ctl_table *table, int write,
88693 rcu_assign_pointer(nf_loggers[tindex], logger);
88694 mutex_unlock(&nf_log_mutex);
88695 } else {
88696+ ctl_table_no_const nf_log_table = *table;
88697+
88698 mutex_lock(&nf_log_mutex);
88699 logger = rcu_dereference_protected(nf_loggers[tindex],
88700 lockdep_is_held(&nf_log_mutex));
88701 if (!logger)
88702- table->data = "NONE";
88703+ nf_log_table.data = "NONE";
88704 else
88705- table->data = logger->name;
88706- r = proc_dostring(table, write, buffer, lenp, ppos);
88707+ nf_log_table.data = logger->name;
88708+ r = proc_dostring(&nf_log_table, write, buffer, lenp, ppos);
88709 mutex_unlock(&nf_log_mutex);
88710 }
88711
88712diff --git a/net/netfilter/nf_sockopt.c b/net/netfilter/nf_sockopt.c
88713index f042ae5..30ea486 100644
88714--- a/net/netfilter/nf_sockopt.c
88715+++ b/net/netfilter/nf_sockopt.c
88716@@ -45,7 +45,7 @@ int nf_register_sockopt(struct nf_sockopt_ops *reg)
88717 }
88718 }
88719
88720- list_add(&reg->list, &nf_sockopts);
88721+ pax_list_add((struct list_head *)&reg->list, &nf_sockopts);
88722 out:
88723 mutex_unlock(&nf_sockopt_mutex);
88724 return ret;
88725@@ -55,7 +55,7 @@ EXPORT_SYMBOL(nf_register_sockopt);
88726 void nf_unregister_sockopt(struct nf_sockopt_ops *reg)
88727 {
88728 mutex_lock(&nf_sockopt_mutex);
88729- list_del(&reg->list);
88730+ pax_list_del((struct list_head *)&reg->list);
88731 mutex_unlock(&nf_sockopt_mutex);
88732 }
88733 EXPORT_SYMBOL(nf_unregister_sockopt);
88734diff --git a/net/netfilter/nfnetlink_acct.c b/net/netfilter/nfnetlink_acct.c
88735index 589d686..dc3fd5d 100644
88736--- a/net/netfilter/nfnetlink_acct.c
88737+++ b/net/netfilter/nfnetlink_acct.c
88738@@ -49,6 +49,8 @@ nfnl_acct_new(struct sock *nfnl, struct sk_buff *skb,
88739 return -EINVAL;
88740
88741 acct_name = nla_data(tb[NFACCT_NAME]);
88742+ if (strlen(acct_name) == 0)
88743+ return -EINVAL;
88744
88745 list_for_each_entry(nfacct, &nfnl_acct_list, head) {
88746 if (strncmp(nfacct->name, acct_name, NFACCT_NAME_MAX) != 0)
88747diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
88748index 92fd8ec..3f6ea4b 100644
88749--- a/net/netfilter/nfnetlink_log.c
88750+++ b/net/netfilter/nfnetlink_log.c
88751@@ -72,7 +72,7 @@ struct nfulnl_instance {
88752 };
88753
88754 static DEFINE_SPINLOCK(instances_lock);
88755-static atomic_t global_seq;
88756+static atomic_unchecked_t global_seq;
88757
88758 #define INSTANCE_BUCKETS 16
88759 static struct hlist_head instance_table[INSTANCE_BUCKETS];
88760@@ -537,7 +537,7 @@ __build_packet_message(struct nfulnl_instance *inst,
88761 /* global sequence number */
88762 if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) &&
88763 nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL,
88764- htonl(atomic_inc_return(&global_seq))))
88765+ htonl(atomic_inc_return_unchecked(&global_seq))))
88766 goto nla_put_failure;
88767
88768 if (data_len) {
88769diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c
88770index 3158d87..39006c9 100644
88771--- a/net/netfilter/nfnetlink_queue_core.c
88772+++ b/net/netfilter/nfnetlink_queue_core.c
88773@@ -1064,8 +1064,10 @@ static int __init nfnetlink_queue_init(void)
88774
88775 #ifdef CONFIG_PROC_FS
88776 if (!proc_create("nfnetlink_queue", 0440,
88777- proc_net_netfilter, &nfqnl_file_ops))
88778+ proc_net_netfilter, &nfqnl_file_ops)) {
88779+ status = -ENOMEM;
88780 goto cleanup_subsys;
88781+ }
88782 #endif
88783
88784 register_netdevice_notifier(&nfqnl_dev_notifier);
88785diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
88786new file mode 100644
88787index 0000000..c566332
88788--- /dev/null
88789+++ b/net/netfilter/xt_gradm.c
88790@@ -0,0 +1,51 @@
88791+/*
88792+ * gradm match for netfilter
88793