]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-2.9.1-3.8.10-201304292056.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.9.1-3.8.10-201304292056.patch
CommitLineData
5a8246f6
PK
1diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2index b89a739..b47493f 100644
3--- a/Documentation/dontdiff
4+++ b/Documentation/dontdiff
5@@ -2,9 +2,11 @@
6 *.aux
7 *.bin
8 *.bz2
9+*.c.[012]*.*
10 *.cis
11 *.cpio
12 *.csp
13+*.dbg
14 *.dsp
15 *.dvi
16 *.elf
17@@ -14,6 +16,7 @@
18 *.gcov
19 *.gen.S
20 *.gif
21+*.gmo
22 *.grep
23 *.grp
24 *.gz
25@@ -48,14 +51,17 @@
26 *.tab.h
27 *.tex
28 *.ver
29+*.vim
30 *.xml
31 *.xz
32 *_MODULES
33+*_reg_safe.h
34 *_vga16.c
35 *~
36 \#*#
37 *.9
38-.*
39+.[^g]*
40+.gen*
41 .*.d
42 .mm
43 53c700_d.h
44@@ -69,6 +75,7 @@ Image
45 Module.markers
46 Module.symvers
47 PENDING
48+PERF*
49 SCCS
50 System.map*
51 TAGS
52@@ -80,6 +87,7 @@ aic7*seq.h*
53 aicasm
54 aicdb.h*
55 altivec*.c
56+ashldi3.S
57 asm-offsets.h
58 asm_offsets.h
59 autoconf.h*
60@@ -92,19 +100,24 @@ bounds.h
61 bsetup
62 btfixupprep
63 build
64+builtin-policy.h
65 bvmlinux
66 bzImage*
67 capability_names.h
68 capflags.c
69 classlist.h*
70+clut_vga16.c
71+common-cmds.h
72 comp*.log
73 compile.h*
74 conf
75 config
76 config-*
77 config_data.h*
78+config.c
79 config.mak
80 config.mak.autogen
81+config.tmp
82 conmakehash
83 consolemap_deftbl.c*
84 cpustr.h
85@@ -115,9 +128,11 @@ devlist.h*
86 dnotify_test
87 docproc
88 dslm
89+dtc-lexer.lex.c
90 elf2ecoff
91 elfconfig.h*
92 evergreen_reg_safe.h
93+exception_policy.conf
94 fixdep
95 flask.h
96 fore200e_mkfirm
97@@ -125,12 +140,15 @@ fore200e_pca_fw.c*
98 gconf
99 gconf.glade.h
100 gen-devlist
101+gen-kdb_cmds.c
102 gen_crc32table
103 gen_init_cpio
104 generated
105 genheaders
106 genksyms
107 *_gray256.c
108+hash
109+hid-example
110 hpet_example
111 hugepage-mmap
112 hugepage-shm
113@@ -145,14 +163,14 @@ int32.c
114 int4.c
115 int8.c
116 kallsyms
117-kconfig
118+kern_constants.h
119 keywords.c
120 ksym.c*
121 ksym.h*
122 kxgettext
123 lex.c
124 lex.*.c
125-linux
126+lib1funcs.S
127 logo_*.c
128 logo_*_clut224.c
129 logo_*_mono.c
130@@ -162,14 +180,15 @@ mach-types.h
131 machtypes.h
132 map
133 map_hugetlb
134-media
135 mconf
136+mdp
137 miboot*
138 mk_elfconfig
139 mkboot
140 mkbugboot
141 mkcpustr
142 mkdep
143+mkpiggy
144 mkprep
145 mkregtable
146 mktables
147@@ -185,6 +204,8 @@ oui.c*
148 page-types
149 parse.c
150 parse.h
151+parse-events*
152+pasyms.h
153 patches*
154 pca200e.bin
155 pca200e_ecd.bin2
156@@ -194,6 +215,7 @@ perf-archive
157 piggyback
158 piggy.gzip
159 piggy.S
160+pmu-*
161 pnmtologo
162 ppc_defs.h*
163 pss_boot.h
164@@ -203,7 +225,10 @@ r200_reg_safe.h
165 r300_reg_safe.h
166 r420_reg_safe.h
167 r600_reg_safe.h
168+realmode.lds
169+realmode.relocs
170 recordmcount
171+regdb.c
172 relocs
173 rlim_names.h
174 rn50_reg_safe.h
175@@ -213,8 +238,12 @@ series
176 setup
177 setup.bin
178 setup.elf
179+signing_key*
180+size_overflow_hash.h
181 sImage
182+slabinfo
183 sm_tbl*
184+sortextable
185 split-include
186 syscalltab.h
187 tables.c
188@@ -224,6 +253,7 @@ tftpboot.img
189 timeconst.h
190 times.h*
191 trix_boot.h
192+user_constants.h
193 utsrelease.h*
194 vdso-syms.lds
195 vdso.lds
196@@ -235,13 +265,17 @@ vdso32.lds
197 vdso32.so.dbg
198 vdso64.lds
199 vdso64.so.dbg
200+vdsox32.lds
201+vdsox32-syms.lds
202 version.h*
203 vmImage
204 vmlinux
205 vmlinux-*
206 vmlinux.aout
207 vmlinux.bin.all
208+vmlinux.bin.bz2
209 vmlinux.lds
210+vmlinux.relocs
211 vmlinuz
212 voffset.h
213 vsyscall.lds
214@@ -249,9 +283,12 @@ vsyscall_32.lds
215 wanxlfw.inc
216 uImage
217 unifdef
218+utsrelease.h
219 wakeup.bin
220 wakeup.elf
221 wakeup.lds
222+x509*
223 zImage*
224 zconf.hash.c
225+zconf.lex.c
226 zoffset.h
227diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
228index 986614d..e8bfedc 100644
229--- a/Documentation/kernel-parameters.txt
230+++ b/Documentation/kernel-parameters.txt
231@@ -922,6 +922,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
232 Format: <unsigned int> such that (rxsize & ~0x1fffc0) == 0.
233 Default: 1024
234
235+ grsec_proc_gid= [GRKERNSEC_PROC_USERGROUP] Chooses GID to
236+ ignore grsecurity's /proc restrictions
237+
238+
239 hashdist= [KNL,NUMA] Large hashes allocated during boot
240 are distributed across NUMA nodes. Defaults on
241 for 64-bit NUMA, off otherwise.
242@@ -2121,6 +2125,18 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
243 the specified number of seconds. This is to be used if
244 your oopses keep scrolling off the screen.
245
246+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
247+ virtualization environments that don't cope well with the
248+ expand down segment used by UDEREF on X86-32 or the frequent
249+ page table updates on X86-64.
250+
251+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
252+
253+ pax_extra_latent_entropy
254+ Enable a very simple form of latent entropy extraction
255+ from the first 4GB of memory as the bootmem allocator
256+ passes the memory pages to the buddy allocator.
257+
258 pcbit= [HW,ISDN]
259
260 pcd. [PARIDE]
261diff --git a/Makefile b/Makefile
262index e2b10b9..f916aa5 100644
263--- a/Makefile
264+++ b/Makefile
265@@ -241,8 +241,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
266
267 HOSTCC = gcc
268 HOSTCXX = g++
269-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
270-HOSTCXXFLAGS = -O2
271+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
272+HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
273+HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
274
275 # Decide whether to build built-in, modular, or both.
276 # Normally, just do built-in.
277@@ -414,8 +415,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
278 # Rules shared between *config targets and build targets
279
280 # Basic helpers built in scripts/
281-PHONY += scripts_basic
282-scripts_basic:
283+PHONY += scripts_basic gcc-plugins
284+scripts_basic: gcc-plugins
285 $(Q)$(MAKE) $(build)=scripts/basic
286 $(Q)rm -f .tmp_quiet_recordmcount
287
288@@ -575,6 +576,65 @@ else
289 KBUILD_CFLAGS += -O2
290 endif
291
292+ifndef DISABLE_PAX_PLUGINS
293+ifeq ($(call cc-ifversion, -ge, 0408, y), y)
294+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCXX)" "$(HOSTCXX)" "$(CC)")
295+else
296+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)" "$(CC)")
297+endif
298+ifneq ($(PLUGINCC),)
299+ifdef CONFIG_PAX_CONSTIFY_PLUGIN
300+CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
301+endif
302+ifdef CONFIG_PAX_MEMORY_STACKLEAK
303+STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
304+STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
305+endif
306+ifdef CONFIG_KALLOCSTAT_PLUGIN
307+KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
308+endif
309+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
310+KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
311+KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
312+KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
313+endif
314+ifdef CONFIG_CHECKER_PLUGIN
315+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
316+CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
317+endif
318+endif
319+COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
320+ifdef CONFIG_PAX_SIZE_OVERFLOW
321+SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
322+endif
323+ifdef CONFIG_PAX_LATENT_ENTROPY
324+LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN
325+endif
326+ifdef CONFIG_PAX_MEMORY_STRUCTLEAK
327+STRUCTLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/structleak_plugin.so -DSTRUCTLEAK_PLUGIN
328+endif
329+GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
330+GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
331+GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY_PLUGIN_CFLAGS) $(STRUCTLEAK_PLUGIN_CFLAGS)
332+GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
333+export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGINS_AFLAGS CONSTIFY_PLUGIN
334+ifeq ($(KBUILD_EXTMOD),)
335+gcc-plugins:
336+ $(Q)$(MAKE) $(build)=tools/gcc
337+else
338+gcc-plugins: ;
339+endif
340+else
341+gcc-plugins:
342+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
343+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
344+else
345+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
346+endif
347+ $(Q)echo "PAX_MEMORY_STACKLEAK, constification, PAX_LATENT_ENTROPY and other features will be less secure. PAX_SIZE_OVERFLOW will not be active."
348+endif
349+endif
350+
351 include $(srctree)/arch/$(SRCARCH)/Makefile
352
353 ifdef CONFIG_READABLE_ASM
354@@ -731,7 +791,7 @@ export mod_sign_cmd
355
356
357 ifeq ($(KBUILD_EXTMOD),)
358-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
359+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
360
361 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
362 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
363@@ -778,6 +838,8 @@ endif
364
365 # The actual objects are generated when descending,
366 # make sure no implicit rule kicks in
367+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
368+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
369 $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
370
371 # Handle descending into subdirectories listed in $(vmlinux-dirs)
372@@ -787,7 +849,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
373 # Error messages still appears in the original language
374
375 PHONY += $(vmlinux-dirs)
376-$(vmlinux-dirs): prepare scripts
377+$(vmlinux-dirs): gcc-plugins prepare scripts
378 $(Q)$(MAKE) $(build)=$@
379
380 # Store (new) KERNELRELASE string in include/config/kernel.release
381@@ -831,6 +893,7 @@ prepare0: archprepare FORCE
382 $(Q)$(MAKE) $(build)=.
383
384 # All the preparing..
385+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
386 prepare: prepare0
387
388 # Generate some files
389@@ -938,6 +1001,8 @@ all: modules
390 # using awk while concatenating to the final file.
391
392 PHONY += modules
393+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
394+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
395 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
396 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
397 @$(kecho) ' Building modules, stage 2.';
398@@ -953,7 +1018,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
399
400 # Target to prepare building external modules
401 PHONY += modules_prepare
402-modules_prepare: prepare scripts
403+modules_prepare: gcc-plugins prepare scripts
404
405 # Target to install modules
406 PHONY += modules_install
407@@ -1019,7 +1084,7 @@ MRPROPER_FILES += .config .config.old .version .old_version $(version_h) \
408 Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
409 signing_key.priv signing_key.x509 x509.genkey \
410 extra_certificates signing_key.x509.keyid \
411- signing_key.x509.signer
412+ signing_key.x509.signer tools/gcc/size_overflow_hash.h
413
414 # clean - Delete most, but leave enough to build external modules
415 #
416@@ -1059,6 +1124,7 @@ distclean: mrproper
417 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
418 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
419 -o -name '.*.rej' \
420+ -o -name '.*.rej' -o -name '*.so' \
421 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
422 -type f -print | xargs rm -f
423
424@@ -1219,6 +1285,8 @@ PHONY += $(module-dirs) modules
425 $(module-dirs): crmodverdir $(objtree)/Module.symvers
426 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
427
428+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
429+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
430 modules: $(module-dirs)
431 @$(kecho) ' Building modules, stage 2.';
432 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
433@@ -1355,17 +1423,21 @@ else
434 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
435 endif
436
437-%.s: %.c prepare scripts FORCE
438+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
439+%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
440+%.s: %.c gcc-plugins prepare scripts FORCE
441 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
442 %.i: %.c prepare scripts FORCE
443 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
444-%.o: %.c prepare scripts FORCE
445+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
446+%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
447+%.o: %.c gcc-plugins prepare scripts FORCE
448 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
449 %.lst: %.c prepare scripts FORCE
450 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
451-%.s: %.S prepare scripts FORCE
452+%.s: %.S gcc-plugins prepare scripts FORCE
453 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
454-%.o: %.S prepare scripts FORCE
455+%.o: %.S gcc-plugins prepare scripts FORCE
456 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
457 %.symtypes: %.c prepare scripts FORCE
458 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
459@@ -1375,11 +1447,15 @@ endif
460 $(cmd_crmodverdir)
461 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
462 $(build)=$(build-dir)
463-%/: prepare scripts FORCE
464+%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
465+%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
466+%/: gcc-plugins prepare scripts FORCE
467 $(cmd_crmodverdir)
468 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
469 $(build)=$(build-dir)
470-%.ko: prepare scripts FORCE
471+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
472+%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
473+%.ko: gcc-plugins prepare scripts FORCE
474 $(cmd_crmodverdir)
475 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
476 $(build)=$(build-dir) $(@:.ko=.o)
477diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
478index c2cbe4f..f7264b4 100644
479--- a/arch/alpha/include/asm/atomic.h
480+++ b/arch/alpha/include/asm/atomic.h
481@@ -250,6 +250,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
482 #define atomic_dec(v) atomic_sub(1,(v))
483 #define atomic64_dec(v) atomic64_sub(1,(v))
484
485+#define atomic64_read_unchecked(v) atomic64_read(v)
486+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
487+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
488+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
489+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
490+#define atomic64_inc_unchecked(v) atomic64_inc(v)
491+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
492+#define atomic64_dec_unchecked(v) atomic64_dec(v)
493+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
494+
495 #define smp_mb__before_atomic_dec() smp_mb()
496 #define smp_mb__after_atomic_dec() smp_mb()
497 #define smp_mb__before_atomic_inc() smp_mb()
498diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
499index ad368a9..fbe0f25 100644
500--- a/arch/alpha/include/asm/cache.h
501+++ b/arch/alpha/include/asm/cache.h
502@@ -4,19 +4,19 @@
503 #ifndef __ARCH_ALPHA_CACHE_H
504 #define __ARCH_ALPHA_CACHE_H
505
506+#include <linux/const.h>
507
508 /* Bytes per L1 (data) cache line. */
509 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
510-# define L1_CACHE_BYTES 64
511 # define L1_CACHE_SHIFT 6
512 #else
513 /* Both EV4 and EV5 are write-through, read-allocate,
514 direct-mapped, physical.
515 */
516-# define L1_CACHE_BYTES 32
517 # define L1_CACHE_SHIFT 5
518 #endif
519
520+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
521 #define SMP_CACHE_BYTES L1_CACHE_BYTES
522
523 #endif
524diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
525index 968d999..d36b2df 100644
526--- a/arch/alpha/include/asm/elf.h
527+++ b/arch/alpha/include/asm/elf.h
528@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
529
530 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
531
532+#ifdef CONFIG_PAX_ASLR
533+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
534+
535+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
536+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
537+#endif
538+
539 /* $0 is set by ld.so to a pointer to a function which might be
540 registered using atexit. This provides a mean for the dynamic
541 linker to call DT_FINI functions for shared libraries that have
542diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
543index bc2a0da..8ad11ee 100644
544--- a/arch/alpha/include/asm/pgalloc.h
545+++ b/arch/alpha/include/asm/pgalloc.h
546@@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
547 pgd_set(pgd, pmd);
548 }
549
550+static inline void
551+pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
552+{
553+ pgd_populate(mm, pgd, pmd);
554+}
555+
556 extern pgd_t *pgd_alloc(struct mm_struct *mm);
557
558 static inline void
559diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
560index 81a4342..348b927 100644
561--- a/arch/alpha/include/asm/pgtable.h
562+++ b/arch/alpha/include/asm/pgtable.h
563@@ -102,6 +102,17 @@ struct vm_area_struct;
564 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
565 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
566 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
567+
568+#ifdef CONFIG_PAX_PAGEEXEC
569+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
570+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
571+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
572+#else
573+# define PAGE_SHARED_NOEXEC PAGE_SHARED
574+# define PAGE_COPY_NOEXEC PAGE_COPY
575+# define PAGE_READONLY_NOEXEC PAGE_READONLY
576+#endif
577+
578 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
579
580 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
581diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
582index 2fd00b7..cfd5069 100644
583--- a/arch/alpha/kernel/module.c
584+++ b/arch/alpha/kernel/module.c
585@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
586
587 /* The small sections were sorted to the end of the segment.
588 The following should definitely cover them. */
589- gp = (u64)me->module_core + me->core_size - 0x8000;
590+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
591 got = sechdrs[me->arch.gotsecindex].sh_addr;
592
593 for (i = 0; i < n; i++) {
594diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
595index 14db93e..47bed62 100644
596--- a/arch/alpha/kernel/osf_sys.c
597+++ b/arch/alpha/kernel/osf_sys.c
598@@ -1295,16 +1295,16 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
599 generic version except that we know how to honor ADDR_LIMIT_32BIT. */
600
601 static unsigned long
602-arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
603- unsigned long limit)
604+arch_get_unmapped_area_1(struct file *filp, unsigned long addr, unsigned long len,
605+ unsigned long limit, unsigned long flags)
606 {
607 struct vm_area_struct *vma = find_vma(current->mm, addr);
608-
609+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
610 while (1) {
611 /* At this point: (!vma || addr < vma->vm_end). */
612 if (limit - len < addr)
613 return -ENOMEM;
614- if (!vma || addr + len <= vma->vm_start)
615+ if (check_heap_stack_gap(vma, addr, len, offset))
616 return addr;
617 addr = vma->vm_end;
618 vma = vma->vm_next;
619@@ -1340,20 +1340,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
620 merely specific addresses, but regions of memory -- perhaps
621 this feature should be incorporated into all ports? */
622
623+#ifdef CONFIG_PAX_RANDMMAP
624+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
625+#endif
626+
627 if (addr) {
628- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
629+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(addr), len, limit, flags);
630 if (addr != (unsigned long) -ENOMEM)
631 return addr;
632 }
633
634 /* Next, try allocating at TASK_UNMAPPED_BASE. */
635- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
636- len, limit);
637+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(current->mm->mmap_base), len, limit, flags);
638+
639 if (addr != (unsigned long) -ENOMEM)
640 return addr;
641
642 /* Finally, try allocating in low memory. */
643- addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit);
644+ addr = arch_get_unmapped_area_1 (filp, PAGE_SIZE, len, limit, flags);
645
646 return addr;
647 }
648diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
649index 0c4132d..88f0d53 100644
650--- a/arch/alpha/mm/fault.c
651+++ b/arch/alpha/mm/fault.c
652@@ -53,6 +53,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
653 __reload_thread(pcb);
654 }
655
656+#ifdef CONFIG_PAX_PAGEEXEC
657+/*
658+ * PaX: decide what to do with offenders (regs->pc = fault address)
659+ *
660+ * returns 1 when task should be killed
661+ * 2 when patched PLT trampoline was detected
662+ * 3 when unpatched PLT trampoline was detected
663+ */
664+static int pax_handle_fetch_fault(struct pt_regs *regs)
665+{
666+
667+#ifdef CONFIG_PAX_EMUPLT
668+ int err;
669+
670+ do { /* PaX: patched PLT emulation #1 */
671+ unsigned int ldah, ldq, jmp;
672+
673+ err = get_user(ldah, (unsigned int *)regs->pc);
674+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
675+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
676+
677+ if (err)
678+ break;
679+
680+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
681+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
682+ jmp == 0x6BFB0000U)
683+ {
684+ unsigned long r27, addr;
685+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
686+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
687+
688+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
689+ err = get_user(r27, (unsigned long *)addr);
690+ if (err)
691+ break;
692+
693+ regs->r27 = r27;
694+ regs->pc = r27;
695+ return 2;
696+ }
697+ } while (0);
698+
699+ do { /* PaX: patched PLT emulation #2 */
700+ unsigned int ldah, lda, br;
701+
702+ err = get_user(ldah, (unsigned int *)regs->pc);
703+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
704+ err |= get_user(br, (unsigned int *)(regs->pc+8));
705+
706+ if (err)
707+ break;
708+
709+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
710+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
711+ (br & 0xFFE00000U) == 0xC3E00000U)
712+ {
713+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
714+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
715+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
716+
717+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
718+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
719+ return 2;
720+ }
721+ } while (0);
722+
723+ do { /* PaX: unpatched PLT emulation */
724+ unsigned int br;
725+
726+ err = get_user(br, (unsigned int *)regs->pc);
727+
728+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
729+ unsigned int br2, ldq, nop, jmp;
730+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
731+
732+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
733+ err = get_user(br2, (unsigned int *)addr);
734+ err |= get_user(ldq, (unsigned int *)(addr+4));
735+ err |= get_user(nop, (unsigned int *)(addr+8));
736+ err |= get_user(jmp, (unsigned int *)(addr+12));
737+ err |= get_user(resolver, (unsigned long *)(addr+16));
738+
739+ if (err)
740+ break;
741+
742+ if (br2 == 0xC3600000U &&
743+ ldq == 0xA77B000CU &&
744+ nop == 0x47FF041FU &&
745+ jmp == 0x6B7B0000U)
746+ {
747+ regs->r28 = regs->pc+4;
748+ regs->r27 = addr+16;
749+ regs->pc = resolver;
750+ return 3;
751+ }
752+ }
753+ } while (0);
754+#endif
755+
756+ return 1;
757+}
758+
759+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
760+{
761+ unsigned long i;
762+
763+ printk(KERN_ERR "PAX: bytes at PC: ");
764+ for (i = 0; i < 5; i++) {
765+ unsigned int c;
766+ if (get_user(c, (unsigned int *)pc+i))
767+ printk(KERN_CONT "???????? ");
768+ else
769+ printk(KERN_CONT "%08x ", c);
770+ }
771+ printk("\n");
772+}
773+#endif
774
775 /*
776 * This routine handles page faults. It determines the address,
777@@ -133,8 +251,29 @@ retry:
778 good_area:
779 si_code = SEGV_ACCERR;
780 if (cause < 0) {
781- if (!(vma->vm_flags & VM_EXEC))
782+ if (!(vma->vm_flags & VM_EXEC)) {
783+
784+#ifdef CONFIG_PAX_PAGEEXEC
785+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
786+ goto bad_area;
787+
788+ up_read(&mm->mmap_sem);
789+ switch (pax_handle_fetch_fault(regs)) {
790+
791+#ifdef CONFIG_PAX_EMUPLT
792+ case 2:
793+ case 3:
794+ return;
795+#endif
796+
797+ }
798+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
799+ do_group_exit(SIGKILL);
800+#else
801 goto bad_area;
802+#endif
803+
804+ }
805 } else if (!cause) {
806 /* Allow reads even for write-only mappings */
807 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
808diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
809index 67874b8..9aa2d62 100644
810--- a/arch/arm/Kconfig
811+++ b/arch/arm/Kconfig
812@@ -1427,6 +1427,16 @@ config ARM_ERRATA_775420
813 to deadlock. This workaround puts DSB before executing ISB if
814 an abort may occur on cache maintenance.
815
816+config ARM_ERRATA_798181
817+ bool "ARM errata: TLBI/DSB failure on Cortex-A15"
818+ depends on CPU_V7 && SMP
819+ help
820+ On Cortex-A15 (r0p0..r3p2) the TLBI*IS/DSB operations are not
821+ adequately shooting down all use of the old entries. This
822+ option enables the Linux kernel workaround for this erratum
823+ which sends an IPI to the CPUs that are running the same ASID
824+ as the one being invalidated.
825+
826 endmenu
827
828 source "arch/arm/common/Kconfig"
829@@ -1813,7 +1823,7 @@ config ALIGNMENT_TRAP
830
831 config UACCESS_WITH_MEMCPY
832 bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user()"
833- depends on MMU
834+ depends on MMU && !PAX_MEMORY_UDEREF
835 default y if CPU_FEROCEON
836 help
837 Implement faster copy_to_user and clear_user methods for CPU
838diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c
839index 87dfa902..3a523fc 100644
840--- a/arch/arm/common/gic.c
841+++ b/arch/arm/common/gic.c
842@@ -81,7 +81,7 @@ static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
843 * Supported arch specific GIC irq extension.
844 * Default make them NULL.
845 */
846-struct irq_chip gic_arch_extn = {
847+irq_chip_no_const gic_arch_extn __read_only = {
848 .irq_eoi = NULL,
849 .irq_mask = NULL,
850 .irq_unmask = NULL,
851@@ -329,7 +329,7 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
852 chained_irq_exit(chip, desc);
853 }
854
855-static struct irq_chip gic_chip = {
856+static irq_chip_no_const gic_chip __read_only = {
857 .name = "GIC",
858 .irq_mask = gic_mask_irq,
859 .irq_unmask = gic_unmask_irq,
860diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
861index c79f61f..9ac0642 100644
862--- a/arch/arm/include/asm/atomic.h
863+++ b/arch/arm/include/asm/atomic.h
864@@ -17,17 +17,35 @@
865 #include <asm/barrier.h>
866 #include <asm/cmpxchg.h>
867
868+#ifdef CONFIG_GENERIC_ATOMIC64
869+#include <asm-generic/atomic64.h>
870+#endif
871+
872 #define ATOMIC_INIT(i) { (i) }
873
874 #ifdef __KERNEL__
875
876+#define _ASM_EXTABLE(from, to) \
877+" .pushsection __ex_table,\"a\"\n"\
878+" .align 3\n" \
879+" .long " #from ", " #to"\n" \
880+" .popsection"
881+
882 /*
883 * On ARM, ordinary assignment (str instruction) doesn't clear the local
884 * strex/ldrex monitor on some implementations. The reason we can use it for
885 * atomic_set() is the clrex or dummy strex done on every exception return.
886 */
887 #define atomic_read(v) (*(volatile int *)&(v)->counter)
888+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
889+{
890+ return v->counter;
891+}
892 #define atomic_set(v,i) (((v)->counter) = (i))
893+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
894+{
895+ v->counter = i;
896+}
897
898 #if __LINUX_ARM_ARCH__ >= 6
899
900@@ -42,6 +60,35 @@ static inline void atomic_add(int i, atomic_t *v)
901 int result;
902
903 __asm__ __volatile__("@ atomic_add\n"
904+"1: ldrex %1, [%3]\n"
905+" adds %0, %1, %4\n"
906+
907+#ifdef CONFIG_PAX_REFCOUNT
908+" bvc 3f\n"
909+"2: bkpt 0xf103\n"
910+"3:\n"
911+#endif
912+
913+" strex %1, %0, [%3]\n"
914+" teq %1, #0\n"
915+" bne 1b"
916+
917+#ifdef CONFIG_PAX_REFCOUNT
918+"\n4:\n"
919+ _ASM_EXTABLE(2b, 4b)
920+#endif
921+
922+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
923+ : "r" (&v->counter), "Ir" (i)
924+ : "cc");
925+}
926+
927+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
928+{
929+ unsigned long tmp;
930+ int result;
931+
932+ __asm__ __volatile__("@ atomic_add_unchecked\n"
933 "1: ldrex %0, [%3]\n"
934 " add %0, %0, %4\n"
935 " strex %1, %0, [%3]\n"
936@@ -60,6 +107,42 @@ static inline int atomic_add_return(int i, atomic_t *v)
937 smp_mb();
938
939 __asm__ __volatile__("@ atomic_add_return\n"
940+"1: ldrex %1, [%3]\n"
941+" adds %0, %1, %4\n"
942+
943+#ifdef CONFIG_PAX_REFCOUNT
944+" bvc 3f\n"
945+" mov %0, %1\n"
946+"2: bkpt 0xf103\n"
947+"3:\n"
948+#endif
949+
950+" strex %1, %0, [%3]\n"
951+" teq %1, #0\n"
952+" bne 1b"
953+
954+#ifdef CONFIG_PAX_REFCOUNT
955+"\n4:\n"
956+ _ASM_EXTABLE(2b, 4b)
957+#endif
958+
959+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
960+ : "r" (&v->counter), "Ir" (i)
961+ : "cc");
962+
963+ smp_mb();
964+
965+ return result;
966+}
967+
968+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
969+{
970+ unsigned long tmp;
971+ int result;
972+
973+ smp_mb();
974+
975+ __asm__ __volatile__("@ atomic_add_return_unchecked\n"
976 "1: ldrex %0, [%3]\n"
977 " add %0, %0, %4\n"
978 " strex %1, %0, [%3]\n"
979@@ -80,6 +163,35 @@ static inline void atomic_sub(int i, atomic_t *v)
980 int result;
981
982 __asm__ __volatile__("@ atomic_sub\n"
983+"1: ldrex %1, [%3]\n"
984+" subs %0, %1, %4\n"
985+
986+#ifdef CONFIG_PAX_REFCOUNT
987+" bvc 3f\n"
988+"2: bkpt 0xf103\n"
989+"3:\n"
990+#endif
991+
992+" strex %1, %0, [%3]\n"
993+" teq %1, #0\n"
994+" bne 1b"
995+
996+#ifdef CONFIG_PAX_REFCOUNT
997+"\n4:\n"
998+ _ASM_EXTABLE(2b, 4b)
999+#endif
1000+
1001+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1002+ : "r" (&v->counter), "Ir" (i)
1003+ : "cc");
1004+}
1005+
1006+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
1007+{
1008+ unsigned long tmp;
1009+ int result;
1010+
1011+ __asm__ __volatile__("@ atomic_sub_unchecked\n"
1012 "1: ldrex %0, [%3]\n"
1013 " sub %0, %0, %4\n"
1014 " strex %1, %0, [%3]\n"
1015@@ -98,11 +210,25 @@ static inline int atomic_sub_return(int i, atomic_t *v)
1016 smp_mb();
1017
1018 __asm__ __volatile__("@ atomic_sub_return\n"
1019-"1: ldrex %0, [%3]\n"
1020-" sub %0, %0, %4\n"
1021+"1: ldrex %1, [%3]\n"
1022+" subs %0, %1, %4\n"
1023+
1024+#ifdef CONFIG_PAX_REFCOUNT
1025+" bvc 3f\n"
1026+" mov %0, %1\n"
1027+"2: bkpt 0xf103\n"
1028+"3:\n"
1029+#endif
1030+
1031 " strex %1, %0, [%3]\n"
1032 " teq %1, #0\n"
1033 " bne 1b"
1034+
1035+#ifdef CONFIG_PAX_REFCOUNT
1036+"\n4:\n"
1037+ _ASM_EXTABLE(2b, 4b)
1038+#endif
1039+
1040 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1041 : "r" (&v->counter), "Ir" (i)
1042 : "cc");
1043@@ -134,6 +260,28 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
1044 return oldval;
1045 }
1046
1047+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
1048+{
1049+ unsigned long oldval, res;
1050+
1051+ smp_mb();
1052+
1053+ do {
1054+ __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
1055+ "ldrex %1, [%3]\n"
1056+ "mov %0, #0\n"
1057+ "teq %1, %4\n"
1058+ "strexeq %0, %5, [%3]\n"
1059+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1060+ : "r" (&ptr->counter), "Ir" (old), "r" (new)
1061+ : "cc");
1062+ } while (res);
1063+
1064+ smp_mb();
1065+
1066+ return oldval;
1067+}
1068+
1069 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1070 {
1071 unsigned long tmp, tmp2;
1072@@ -167,7 +315,17 @@ static inline int atomic_add_return(int i, atomic_t *v)
1073
1074 return val;
1075 }
1076+
1077+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
1078+{
1079+ return atomic_add_return(i, v);
1080+}
1081+
1082 #define atomic_add(i, v) (void) atomic_add_return(i, v)
1083+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
1084+{
1085+ (void) atomic_add_return(i, v);
1086+}
1087
1088 static inline int atomic_sub_return(int i, atomic_t *v)
1089 {
1090@@ -182,6 +340,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
1091 return val;
1092 }
1093 #define atomic_sub(i, v) (void) atomic_sub_return(i, v)
1094+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
1095+{
1096+ (void) atomic_sub_return(i, v);
1097+}
1098
1099 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1100 {
1101@@ -197,6 +359,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1102 return ret;
1103 }
1104
1105+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
1106+{
1107+ return atomic_cmpxchg(v, old, new);
1108+}
1109+
1110 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1111 {
1112 unsigned long flags;
1113@@ -209,6 +376,10 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1114 #endif /* __LINUX_ARM_ARCH__ */
1115
1116 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
1117+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
1118+{
1119+ return xchg(&v->counter, new);
1120+}
1121
1122 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1123 {
1124@@ -221,11 +392,27 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1125 }
1126
1127 #define atomic_inc(v) atomic_add(1, v)
1128+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
1129+{
1130+ atomic_add_unchecked(1, v);
1131+}
1132 #define atomic_dec(v) atomic_sub(1, v)
1133+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
1134+{
1135+ atomic_sub_unchecked(1, v);
1136+}
1137
1138 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
1139+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
1140+{
1141+ return atomic_add_return_unchecked(1, v) == 0;
1142+}
1143 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
1144 #define atomic_inc_return(v) (atomic_add_return(1, v))
1145+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
1146+{
1147+ return atomic_add_return_unchecked(1, v);
1148+}
1149 #define atomic_dec_return(v) (atomic_sub_return(1, v))
1150 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
1151
1152@@ -241,6 +428,14 @@ typedef struct {
1153 u64 __aligned(8) counter;
1154 } atomic64_t;
1155
1156+#ifdef CONFIG_PAX_REFCOUNT
1157+typedef struct {
1158+ u64 __aligned(8) counter;
1159+} atomic64_unchecked_t;
1160+#else
1161+typedef atomic64_t atomic64_unchecked_t;
1162+#endif
1163+
1164 #define ATOMIC64_INIT(i) { (i) }
1165
1166 static inline u64 atomic64_read(const atomic64_t *v)
1167@@ -256,6 +451,19 @@ static inline u64 atomic64_read(const atomic64_t *v)
1168 return result;
1169 }
1170
1171+static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *v)
1172+{
1173+ u64 result;
1174+
1175+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1176+" ldrexd %0, %H0, [%1]"
1177+ : "=&r" (result)
1178+ : "r" (&v->counter), "Qo" (v->counter)
1179+ );
1180+
1181+ return result;
1182+}
1183+
1184 static inline void atomic64_set(atomic64_t *v, u64 i)
1185 {
1186 u64 tmp;
1187@@ -270,6 +478,20 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
1188 : "cc");
1189 }
1190
1191+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, u64 i)
1192+{
1193+ u64 tmp;
1194+
1195+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1196+"1: ldrexd %0, %H0, [%2]\n"
1197+" strexd %0, %3, %H3, [%2]\n"
1198+" teq %0, #0\n"
1199+" bne 1b"
1200+ : "=&r" (tmp), "=Qo" (v->counter)
1201+ : "r" (&v->counter), "r" (i)
1202+ : "cc");
1203+}
1204+
1205 static inline void atomic64_add(u64 i, atomic64_t *v)
1206 {
1207 u64 result;
1208@@ -278,6 +500,36 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1209 __asm__ __volatile__("@ atomic64_add\n"
1210 "1: ldrexd %0, %H0, [%3]\n"
1211 " adds %0, %0, %4\n"
1212+" adcs %H0, %H0, %H4\n"
1213+
1214+#ifdef CONFIG_PAX_REFCOUNT
1215+" bvc 3f\n"
1216+"2: bkpt 0xf103\n"
1217+"3:\n"
1218+#endif
1219+
1220+" strexd %1, %0, %H0, [%3]\n"
1221+" teq %1, #0\n"
1222+" bne 1b"
1223+
1224+#ifdef CONFIG_PAX_REFCOUNT
1225+"\n4:\n"
1226+ _ASM_EXTABLE(2b, 4b)
1227+#endif
1228+
1229+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1230+ : "r" (&v->counter), "r" (i)
1231+ : "cc");
1232+}
1233+
1234+static inline void atomic64_add_unchecked(u64 i, atomic64_unchecked_t *v)
1235+{
1236+ u64 result;
1237+ unsigned long tmp;
1238+
1239+ __asm__ __volatile__("@ atomic64_add_unchecked\n"
1240+"1: ldrexd %0, %H0, [%3]\n"
1241+" adds %0, %0, %4\n"
1242 " adc %H0, %H0, %H4\n"
1243 " strexd %1, %0, %H0, [%3]\n"
1244 " teq %1, #0\n"
1245@@ -289,12 +541,49 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1246
1247 static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
1248 {
1249- u64 result;
1250- unsigned long tmp;
1251+ u64 result, tmp;
1252
1253 smp_mb();
1254
1255 __asm__ __volatile__("@ atomic64_add_return\n"
1256+"1: ldrexd %1, %H1, [%3]\n"
1257+" adds %0, %1, %4\n"
1258+" adcs %H0, %H1, %H4\n"
1259+
1260+#ifdef CONFIG_PAX_REFCOUNT
1261+" bvc 3f\n"
1262+" mov %0, %1\n"
1263+" mov %H0, %H1\n"
1264+"2: bkpt 0xf103\n"
1265+"3:\n"
1266+#endif
1267+
1268+" strexd %1, %0, %H0, [%3]\n"
1269+" teq %1, #0\n"
1270+" bne 1b"
1271+
1272+#ifdef CONFIG_PAX_REFCOUNT
1273+"\n4:\n"
1274+ _ASM_EXTABLE(2b, 4b)
1275+#endif
1276+
1277+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1278+ : "r" (&v->counter), "r" (i)
1279+ : "cc");
1280+
1281+ smp_mb();
1282+
1283+ return result;
1284+}
1285+
1286+static inline u64 atomic64_add_return_unchecked(u64 i, atomic64_unchecked_t *v)
1287+{
1288+ u64 result;
1289+ unsigned long tmp;
1290+
1291+ smp_mb();
1292+
1293+ __asm__ __volatile__("@ atomic64_add_return_unchecked\n"
1294 "1: ldrexd %0, %H0, [%3]\n"
1295 " adds %0, %0, %4\n"
1296 " adc %H0, %H0, %H4\n"
1297@@ -318,6 +607,36 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1298 __asm__ __volatile__("@ atomic64_sub\n"
1299 "1: ldrexd %0, %H0, [%3]\n"
1300 " subs %0, %0, %4\n"
1301+" sbcs %H0, %H0, %H4\n"
1302+
1303+#ifdef CONFIG_PAX_REFCOUNT
1304+" bvc 3f\n"
1305+"2: bkpt 0xf103\n"
1306+"3:\n"
1307+#endif
1308+
1309+" strexd %1, %0, %H0, [%3]\n"
1310+" teq %1, #0\n"
1311+" bne 1b"
1312+
1313+#ifdef CONFIG_PAX_REFCOUNT
1314+"\n4:\n"
1315+ _ASM_EXTABLE(2b, 4b)
1316+#endif
1317+
1318+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1319+ : "r" (&v->counter), "r" (i)
1320+ : "cc");
1321+}
1322+
1323+static inline void atomic64_sub_unchecked(u64 i, atomic64_unchecked_t *v)
1324+{
1325+ u64 result;
1326+ unsigned long tmp;
1327+
1328+ __asm__ __volatile__("@ atomic64_sub_unchecked\n"
1329+"1: ldrexd %0, %H0, [%3]\n"
1330+" subs %0, %0, %4\n"
1331 " sbc %H0, %H0, %H4\n"
1332 " strexd %1, %0, %H0, [%3]\n"
1333 " teq %1, #0\n"
1334@@ -329,18 +648,32 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1335
1336 static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
1337 {
1338- u64 result;
1339- unsigned long tmp;
1340+ u64 result, tmp;
1341
1342 smp_mb();
1343
1344 __asm__ __volatile__("@ atomic64_sub_return\n"
1345-"1: ldrexd %0, %H0, [%3]\n"
1346-" subs %0, %0, %4\n"
1347-" sbc %H0, %H0, %H4\n"
1348+"1: ldrexd %1, %H1, [%3]\n"
1349+" subs %0, %1, %4\n"
1350+" sbcs %H0, %H1, %H4\n"
1351+
1352+#ifdef CONFIG_PAX_REFCOUNT
1353+" bvc 3f\n"
1354+" mov %0, %1\n"
1355+" mov %H0, %H1\n"
1356+"2: bkpt 0xf103\n"
1357+"3:\n"
1358+#endif
1359+
1360 " strexd %1, %0, %H0, [%3]\n"
1361 " teq %1, #0\n"
1362 " bne 1b"
1363+
1364+#ifdef CONFIG_PAX_REFCOUNT
1365+"\n4:\n"
1366+ _ASM_EXTABLE(2b, 4b)
1367+#endif
1368+
1369 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1370 : "r" (&v->counter), "r" (i)
1371 : "cc");
1372@@ -374,6 +707,30 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
1373 return oldval;
1374 }
1375
1376+static inline u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old, u64 new)
1377+{
1378+ u64 oldval;
1379+ unsigned long res;
1380+
1381+ smp_mb();
1382+
1383+ do {
1384+ __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
1385+ "ldrexd %1, %H1, [%3]\n"
1386+ "mov %0, #0\n"
1387+ "teq %1, %4\n"
1388+ "teqeq %H1, %H4\n"
1389+ "strexdeq %0, %5, %H5, [%3]"
1390+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1391+ : "r" (&ptr->counter), "r" (old), "r" (new)
1392+ : "cc");
1393+ } while (res);
1394+
1395+ smp_mb();
1396+
1397+ return oldval;
1398+}
1399+
1400 static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1401 {
1402 u64 result;
1403@@ -397,21 +754,34 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1404
1405 static inline u64 atomic64_dec_if_positive(atomic64_t *v)
1406 {
1407- u64 result;
1408- unsigned long tmp;
1409+ u64 result, tmp;
1410
1411 smp_mb();
1412
1413 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
1414-"1: ldrexd %0, %H0, [%3]\n"
1415-" subs %0, %0, #1\n"
1416-" sbc %H0, %H0, #0\n"
1417+"1: ldrexd %1, %H1, [%3]\n"
1418+" subs %0, %1, #1\n"
1419+" sbcs %H0, %H1, #0\n"
1420+
1421+#ifdef CONFIG_PAX_REFCOUNT
1422+" bvc 3f\n"
1423+" mov %0, %1\n"
1424+" mov %H0, %H1\n"
1425+"2: bkpt 0xf103\n"
1426+"3:\n"
1427+#endif
1428+
1429 " teq %H0, #0\n"
1430-" bmi 2f\n"
1431+" bmi 4f\n"
1432 " strexd %1, %0, %H0, [%3]\n"
1433 " teq %1, #0\n"
1434 " bne 1b\n"
1435-"2:"
1436+"4:\n"
1437+
1438+#ifdef CONFIG_PAX_REFCOUNT
1439+ _ASM_EXTABLE(2b, 4b)
1440+#endif
1441+
1442 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1443 : "r" (&v->counter)
1444 : "cc");
1445@@ -434,13 +804,25 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1446 " teq %0, %5\n"
1447 " teqeq %H0, %H5\n"
1448 " moveq %1, #0\n"
1449-" beq 2f\n"
1450+" beq 4f\n"
1451 " adds %0, %0, %6\n"
1452-" adc %H0, %H0, %H6\n"
1453+" adcs %H0, %H0, %H6\n"
1454+
1455+#ifdef CONFIG_PAX_REFCOUNT
1456+" bvc 3f\n"
1457+"2: bkpt 0xf103\n"
1458+"3:\n"
1459+#endif
1460+
1461 " strexd %2, %0, %H0, [%4]\n"
1462 " teq %2, #0\n"
1463 " bne 1b\n"
1464-"2:"
1465+"4:\n"
1466+
1467+#ifdef CONFIG_PAX_REFCOUNT
1468+ _ASM_EXTABLE(2b, 4b)
1469+#endif
1470+
1471 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
1472 : "r" (&v->counter), "r" (u), "r" (a)
1473 : "cc");
1474@@ -453,10 +835,13 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1475
1476 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
1477 #define atomic64_inc(v) atomic64_add(1LL, (v))
1478+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
1479 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
1480+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
1481 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
1482 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
1483 #define atomic64_dec(v) atomic64_sub(1LL, (v))
1484+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
1485 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
1486 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
1487 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
1488diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
1489index 75fe66b..ba3dee4 100644
1490--- a/arch/arm/include/asm/cache.h
1491+++ b/arch/arm/include/asm/cache.h
1492@@ -4,8 +4,10 @@
1493 #ifndef __ASMARM_CACHE_H
1494 #define __ASMARM_CACHE_H
1495
1496+#include <linux/const.h>
1497+
1498 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
1499-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1500+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1501
1502 /*
1503 * Memory returned by kmalloc() may be used for DMA, so we must make
1504@@ -24,5 +26,6 @@
1505 #endif
1506
1507 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
1508+#define __read_only __attribute__ ((__section__(".data..read_only")))
1509
1510 #endif
1511diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1512index e1489c5..d418304 100644
1513--- a/arch/arm/include/asm/cacheflush.h
1514+++ b/arch/arm/include/asm/cacheflush.h
1515@@ -116,7 +116,7 @@ struct cpu_cache_fns {
1516 void (*dma_unmap_area)(const void *, size_t, int);
1517
1518 void (*dma_flush_range)(const void *, const void *);
1519-};
1520+} __no_const;
1521
1522 /*
1523 * Select the calling method
1524diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h
1525index 6dcc164..b14d917 100644
1526--- a/arch/arm/include/asm/checksum.h
1527+++ b/arch/arm/include/asm/checksum.h
1528@@ -37,7 +37,19 @@ __wsum
1529 csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
1530
1531 __wsum
1532-csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1533+__csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1534+
1535+static inline __wsum
1536+csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr)
1537+{
1538+ __wsum ret;
1539+ pax_open_userland();
1540+ ret = __csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
1541+ pax_close_userland();
1542+ return ret;
1543+}
1544+
1545+
1546
1547 /*
1548 * Fold a partial checksum without adding pseudo headers
1549diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
1550index 7eb18c1..e38b6d2 100644
1551--- a/arch/arm/include/asm/cmpxchg.h
1552+++ b/arch/arm/include/asm/cmpxchg.h
1553@@ -102,6 +102,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
1554
1555 #define xchg(ptr,x) \
1556 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1557+#define xchg_unchecked(ptr,x) \
1558+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1559
1560 #include <asm-generic/cmpxchg-local.h>
1561
1562diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
1563index 6ddbe44..b5e38b1 100644
1564--- a/arch/arm/include/asm/domain.h
1565+++ b/arch/arm/include/asm/domain.h
1566@@ -48,18 +48,37 @@
1567 * Domain types
1568 */
1569 #define DOMAIN_NOACCESS 0
1570-#define DOMAIN_CLIENT 1
1571 #ifdef CONFIG_CPU_USE_DOMAINS
1572+#define DOMAIN_USERCLIENT 1
1573+#define DOMAIN_KERNELCLIENT 1
1574 #define DOMAIN_MANAGER 3
1575+#define DOMAIN_VECTORS DOMAIN_USER
1576 #else
1577+
1578+#ifdef CONFIG_PAX_KERNEXEC
1579 #define DOMAIN_MANAGER 1
1580+#define DOMAIN_KERNEXEC 3
1581+#else
1582+#define DOMAIN_MANAGER 1
1583+#endif
1584+
1585+#ifdef CONFIG_PAX_MEMORY_UDEREF
1586+#define DOMAIN_USERCLIENT 0
1587+#define DOMAIN_UDEREF 1
1588+#define DOMAIN_VECTORS DOMAIN_KERNEL
1589+#else
1590+#define DOMAIN_USERCLIENT 1
1591+#define DOMAIN_VECTORS DOMAIN_USER
1592+#endif
1593+#define DOMAIN_KERNELCLIENT 1
1594+
1595 #endif
1596
1597 #define domain_val(dom,type) ((type) << (2*(dom)))
1598
1599 #ifndef __ASSEMBLY__
1600
1601-#ifdef CONFIG_CPU_USE_DOMAINS
1602+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
1603 static inline void set_domain(unsigned val)
1604 {
1605 asm volatile(
1606@@ -68,15 +87,7 @@ static inline void set_domain(unsigned val)
1607 isb();
1608 }
1609
1610-#define modify_domain(dom,type) \
1611- do { \
1612- struct thread_info *thread = current_thread_info(); \
1613- unsigned int domain = thread->cpu_domain; \
1614- domain &= ~domain_val(dom, DOMAIN_MANAGER); \
1615- thread->cpu_domain = domain | domain_val(dom, type); \
1616- set_domain(thread->cpu_domain); \
1617- } while (0)
1618-
1619+extern void modify_domain(unsigned int dom, unsigned int type);
1620 #else
1621 static inline void set_domain(unsigned val) { }
1622 static inline void modify_domain(unsigned dom, unsigned type) { }
1623diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
1624index 38050b1..9d90e8b 100644
1625--- a/arch/arm/include/asm/elf.h
1626+++ b/arch/arm/include/asm/elf.h
1627@@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1628 the loader. We need to make sure that it is out of the way of the program
1629 that it will "exec", and that there is sufficient room for the brk. */
1630
1631-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1632+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1633+
1634+#ifdef CONFIG_PAX_ASLR
1635+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
1636+
1637+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1638+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1639+#endif
1640
1641 /* When the program starts, a1 contains a pointer to a function to be
1642 registered with atexit, as per the SVR4 ABI. A value of 0 means we
1643@@ -126,8 +133,4 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1644 extern void elf_set_personality(const struct elf32_hdr *);
1645 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
1646
1647-struct mm_struct;
1648-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1649-#define arch_randomize_brk arch_randomize_brk
1650-
1651 #endif
1652diff --git a/arch/arm/include/asm/fncpy.h b/arch/arm/include/asm/fncpy.h
1653index de53547..52b9a28 100644
1654--- a/arch/arm/include/asm/fncpy.h
1655+++ b/arch/arm/include/asm/fncpy.h
1656@@ -81,7 +81,9 @@
1657 BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) || \
1658 (__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1))); \
1659 \
1660+ pax_open_kernel(); \
1661 memcpy(dest_buf, (void const *)(__funcp_address & ~1), size); \
1662+ pax_close_kernel(); \
1663 flush_icache_range((unsigned long)(dest_buf), \
1664 (unsigned long)(dest_buf) + (size)); \
1665 \
1666diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
1667index e42cf59..7b94b8f 100644
1668--- a/arch/arm/include/asm/futex.h
1669+++ b/arch/arm/include/asm/futex.h
1670@@ -50,6 +50,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1671 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1672 return -EFAULT;
1673
1674+ pax_open_userland();
1675+
1676 smp_mb();
1677 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
1678 "1: ldrex %1, [%4]\n"
1679@@ -65,6 +67,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1680 : "cc", "memory");
1681 smp_mb();
1682
1683+ pax_close_userland();
1684+
1685 *uval = val;
1686 return ret;
1687 }
1688@@ -95,6 +99,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1689 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1690 return -EFAULT;
1691
1692+ pax_open_userland();
1693+
1694 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
1695 "1: " TUSER(ldr) " %1, [%4]\n"
1696 " teq %1, %2\n"
1697@@ -105,6 +111,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1698 : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
1699 : "cc", "memory");
1700
1701+ pax_close_userland();
1702+
1703 *uval = val;
1704 return ret;
1705 }
1706@@ -127,6 +135,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1707 return -EFAULT;
1708
1709 pagefault_disable(); /* implies preempt_disable() */
1710+ pax_open_userland();
1711
1712 switch (op) {
1713 case FUTEX_OP_SET:
1714@@ -148,6 +157,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1715 ret = -ENOSYS;
1716 }
1717
1718+ pax_close_userland();
1719 pagefault_enable(); /* subsumes preempt_enable() */
1720
1721 if (!ret) {
1722diff --git a/arch/arm/include/asm/hardware/gic.h b/arch/arm/include/asm/hardware/gic.h
1723index 4b1ce6c..bea3f73 100644
1724--- a/arch/arm/include/asm/hardware/gic.h
1725+++ b/arch/arm/include/asm/hardware/gic.h
1726@@ -34,9 +34,10 @@
1727
1728 #ifndef __ASSEMBLY__
1729 #include <linux/irqdomain.h>
1730+#include <linux/irq.h>
1731 struct device_node;
1732
1733-extern struct irq_chip gic_arch_extn;
1734+extern irq_chip_no_const gic_arch_extn;
1735
1736 void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
1737 u32 offset, struct device_node *);
1738diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h
1739index 8c5e828..91b99ab 100644
1740--- a/arch/arm/include/asm/highmem.h
1741+++ b/arch/arm/include/asm/highmem.h
1742@@ -41,6 +41,13 @@ extern void kunmap_high(struct page *page);
1743 #endif
1744 #endif
1745
1746+/*
1747+ * Needed to be able to broadcast the TLB invalidation for kmap.
1748+ */
1749+#ifdef CONFIG_ARM_ERRATA_798181
1750+#undef ARCH_NEEDS_KMAP_HIGH_GET
1751+#endif
1752+
1753 #ifdef ARCH_NEEDS_KMAP_HIGH_GET
1754 extern void *kmap_high_get(struct page *page);
1755 #else
1756diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1757index 83eb2f7..ed77159 100644
1758--- a/arch/arm/include/asm/kmap_types.h
1759+++ b/arch/arm/include/asm/kmap_types.h
1760@@ -4,6 +4,6 @@
1761 /*
1762 * This is the "bare minimum". AIO seems to require this.
1763 */
1764-#define KM_TYPE_NR 16
1765+#define KM_TYPE_NR 17
1766
1767 #endif
1768diff --git a/arch/arm/include/asm/mach/dma.h b/arch/arm/include/asm/mach/dma.h
1769index 9e614a1..3302cca 100644
1770--- a/arch/arm/include/asm/mach/dma.h
1771+++ b/arch/arm/include/asm/mach/dma.h
1772@@ -22,7 +22,7 @@ struct dma_ops {
1773 int (*residue)(unsigned int, dma_t *); /* optional */
1774 int (*setspeed)(unsigned int, dma_t *, int); /* optional */
1775 const char *type;
1776-};
1777+} __do_const;
1778
1779 struct dma_struct {
1780 void *addr; /* single DMA address */
1781diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
1782index 2fe141f..192dc01 100644
1783--- a/arch/arm/include/asm/mach/map.h
1784+++ b/arch/arm/include/asm/mach/map.h
1785@@ -27,13 +27,16 @@ struct map_desc {
1786 #define MT_MINICLEAN 6
1787 #define MT_LOW_VECTORS 7
1788 #define MT_HIGH_VECTORS 8
1789-#define MT_MEMORY 9
1790+#define MT_MEMORY_RWX 9
1791 #define MT_ROM 10
1792-#define MT_MEMORY_NONCACHED 11
1793+#define MT_MEMORY_NONCACHED_RX 11
1794 #define MT_MEMORY_DTCM 12
1795 #define MT_MEMORY_ITCM 13
1796 #define MT_MEMORY_SO 14
1797 #define MT_MEMORY_DMA_READY 15
1798+#define MT_MEMORY_RW 16
1799+#define MT_MEMORY_RX 17
1800+#define MT_MEMORY_NONCACHED_RW 18
1801
1802 #ifdef CONFIG_MMU
1803 extern void iotable_init(struct map_desc *, int);
1804diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h
1805index 863a661..a7b85e0 100644
1806--- a/arch/arm/include/asm/mmu_context.h
1807+++ b/arch/arm/include/asm/mmu_context.h
1808@@ -27,6 +27,8 @@ void __check_vmalloc_seq(struct mm_struct *mm);
1809 void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
1810 #define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; })
1811
1812+DECLARE_PER_CPU(atomic64_t, active_asids);
1813+
1814 #else /* !CONFIG_CPU_HAS_ASID */
1815
1816 #ifdef CONFIG_MMU
1817diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
1818index 53426c6..c7baff3 100644
1819--- a/arch/arm/include/asm/outercache.h
1820+++ b/arch/arm/include/asm/outercache.h
1821@@ -35,7 +35,7 @@ struct outer_cache_fns {
1822 #endif
1823 void (*set_debug)(unsigned long);
1824 void (*resume)(void);
1825-};
1826+} __no_const;
1827
1828 #ifdef CONFIG_OUTER_CACHE
1829
1830diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
1831index 812a494..71fc0b6 100644
1832--- a/arch/arm/include/asm/page.h
1833+++ b/arch/arm/include/asm/page.h
1834@@ -114,7 +114,7 @@ struct cpu_user_fns {
1835 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
1836 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
1837 unsigned long vaddr, struct vm_area_struct *vma);
1838-};
1839+} __no_const;
1840
1841 #ifdef MULTI_USER
1842 extern struct cpu_user_fns cpu_user;
1843diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
1844index 943504f..c37a730 100644
1845--- a/arch/arm/include/asm/pgalloc.h
1846+++ b/arch/arm/include/asm/pgalloc.h
1847@@ -17,6 +17,7 @@
1848 #include <asm/processor.h>
1849 #include <asm/cacheflush.h>
1850 #include <asm/tlbflush.h>
1851+#include <asm/system_info.h>
1852
1853 #define check_pgt_cache() do { } while (0)
1854
1855@@ -43,6 +44,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1856 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
1857 }
1858
1859+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1860+{
1861+ pud_populate(mm, pud, pmd);
1862+}
1863+
1864 #else /* !CONFIG_ARM_LPAE */
1865
1866 /*
1867@@ -51,6 +57,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1868 #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
1869 #define pmd_free(mm, pmd) do { } while (0)
1870 #define pud_populate(mm,pmd,pte) BUG()
1871+#define pud_populate_kernel(mm,pmd,pte) BUG()
1872
1873 #endif /* CONFIG_ARM_LPAE */
1874
1875@@ -126,6 +133,19 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
1876 __free_page(pte);
1877 }
1878
1879+static inline void __section_update(pmd_t *pmdp, unsigned long addr, pmdval_t prot)
1880+{
1881+#ifdef CONFIG_ARM_LPAE
1882+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1883+#else
1884+ if (addr & SECTION_SIZE)
1885+ pmdp[1] = __pmd(pmd_val(pmdp[1]) | prot);
1886+ else
1887+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1888+#endif
1889+ flush_pmd_entry(pmdp);
1890+}
1891+
1892 static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
1893 pmdval_t prot)
1894 {
1895@@ -155,7 +175,7 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
1896 static inline void
1897 pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
1898 {
1899- __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE);
1900+ __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE | __supported_pmd_mask);
1901 }
1902 #define pmd_pgtable(pmd) pmd_page(pmd)
1903
1904diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
1905index 5cfba15..f415e1a 100644
1906--- a/arch/arm/include/asm/pgtable-2level-hwdef.h
1907+++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
1908@@ -20,12 +20,15 @@
1909 #define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0)
1910 #define PMD_TYPE_TABLE (_AT(pmdval_t, 1) << 0)
1911 #define PMD_TYPE_SECT (_AT(pmdval_t, 2) << 0)
1912+#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 2) /* v7 */
1913 #define PMD_BIT4 (_AT(pmdval_t, 1) << 4)
1914 #define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5)
1915 #define PMD_PROTECTION (_AT(pmdval_t, 1) << 9) /* v5 */
1916+
1917 /*
1918 * - section
1919 */
1920+#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
1921 #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
1922 #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
1923 #define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */
1924@@ -37,6 +40,7 @@
1925 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 17) /* v6 */
1926 #define PMD_SECT_SUPER (_AT(pmdval_t, 1) << 18) /* v6 */
1927 #define PMD_SECT_AF (_AT(pmdval_t, 0))
1928+#define PMD_SECT_RDONLY (_AT(pmdval_t, 0))
1929
1930 #define PMD_SECT_UNCACHED (_AT(pmdval_t, 0))
1931 #define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE)
1932@@ -66,6 +70,7 @@
1933 * - extended small page/tiny page
1934 */
1935 #define PTE_EXT_XN (_AT(pteval_t, 1) << 0) /* v6 */
1936+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 2) /* v7 */
1937 #define PTE_EXT_AP_MASK (_AT(pteval_t, 3) << 4)
1938 #define PTE_EXT_AP0 (_AT(pteval_t, 1) << 4)
1939 #define PTE_EXT_AP1 (_AT(pteval_t, 2) << 4)
1940diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
1941index f97ee02..07f1be5 100644
1942--- a/arch/arm/include/asm/pgtable-2level.h
1943+++ b/arch/arm/include/asm/pgtable-2level.h
1944@@ -125,6 +125,7 @@
1945 #define L_PTE_XN (_AT(pteval_t, 1) << 9)
1946 #define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */
1947 #define L_PTE_NONE (_AT(pteval_t, 1) << 11)
1948+#define L_PTE_PXN (_AT(pteval_t, 1) << 12) /* v7*/
1949
1950 /*
1951 * These are the memory types, defined to be compatible with
1952diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h
1953index d795282..a43ea90 100644
1954--- a/arch/arm/include/asm/pgtable-3level-hwdef.h
1955+++ b/arch/arm/include/asm/pgtable-3level-hwdef.h
1956@@ -32,15 +32,18 @@
1957 #define PMD_TYPE_SECT (_AT(pmdval_t, 1) << 0)
1958 #define PMD_BIT4 (_AT(pmdval_t, 0))
1959 #define PMD_DOMAIN(x) (_AT(pmdval_t, 0))
1960+#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 59) /* PXNTable */
1961
1962 /*
1963 * - section
1964 */
1965 #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
1966 #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
1967+#define PMD_SECT_RDONLY (_AT(pmdval_t, 1) << 7)
1968 #define PMD_SECT_S (_AT(pmdval_t, 3) << 8)
1969 #define PMD_SECT_AF (_AT(pmdval_t, 1) << 10)
1970 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 11)
1971+#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 53)
1972 #define PMD_SECT_XN (_AT(pmdval_t, 1) << 54)
1973 #define PMD_SECT_AP_WRITE (_AT(pmdval_t, 0))
1974 #define PMD_SECT_AP_READ (_AT(pmdval_t, 0))
1975@@ -66,6 +69,7 @@
1976 #define PTE_EXT_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
1977 #define PTE_EXT_AF (_AT(pteval_t, 1) << 10) /* Access Flag */
1978 #define PTE_EXT_NG (_AT(pteval_t, 1) << 11) /* nG */
1979+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 53) /* PXN */
1980 #define PTE_EXT_XN (_AT(pteval_t, 1) << 54) /* XN */
1981
1982 /*
1983diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
1984index a3f3792..7b932a6 100644
1985--- a/arch/arm/include/asm/pgtable-3level.h
1986+++ b/arch/arm/include/asm/pgtable-3level.h
1987@@ -74,6 +74,7 @@
1988 #define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */
1989 #define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
1990 #define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */
1991+#define L_PTE_PXN (_AT(pteval_t, 1) << 53) /* PXN */
1992 #define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
1993 #define L_PTE_DIRTY (_AT(pteval_t, 1) << 55) /* unused */
1994 #define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56) /* unused */
1995@@ -82,6 +83,7 @@
1996 /*
1997 * To be used in assembly code with the upper page attributes.
1998 */
1999+#define L_PTE_PXN_HIGH (1 << (53 - 32))
2000 #define L_PTE_XN_HIGH (1 << (54 - 32))
2001 #define L_PTE_DIRTY_HIGH (1 << (55 - 32))
2002
2003diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
2004index c094749..a6ff605 100644
2005--- a/arch/arm/include/asm/pgtable.h
2006+++ b/arch/arm/include/asm/pgtable.h
2007@@ -30,6 +30,9 @@
2008 #include <asm/pgtable-2level.h>
2009 #endif
2010
2011+#define ktla_ktva(addr) (addr)
2012+#define ktva_ktla(addr) (addr)
2013+
2014 /*
2015 * Just any arbitrary offset to the start of the vmalloc VM area: the
2016 * current 8MB value just means that there will be a 8MB "hole" after the
2017@@ -45,6 +48,9 @@
2018 #define LIBRARY_TEXT_START 0x0c000000
2019
2020 #ifndef __ASSEMBLY__
2021+extern pteval_t __supported_pte_mask;
2022+extern pmdval_t __supported_pmd_mask;
2023+
2024 extern void __pte_error(const char *file, int line, pte_t);
2025 extern void __pmd_error(const char *file, int line, pmd_t);
2026 extern void __pgd_error(const char *file, int line, pgd_t);
2027@@ -53,6 +59,50 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2028 #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
2029 #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd)
2030
2031+#define __HAVE_ARCH_PAX_OPEN_KERNEL
2032+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
2033+
2034+#ifdef CONFIG_PAX_KERNEXEC
2035+#include <asm/domain.h>
2036+#include <linux/thread_info.h>
2037+#include <linux/preempt.h>
2038+#endif
2039+
2040+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2041+static inline int test_domain(int domain, int domaintype)
2042+{
2043+ return ((current_thread_info()->cpu_domain) & domain_val(domain, 3)) == domain_val(domain, domaintype);
2044+}
2045+#endif
2046+
2047+#ifdef CONFIG_PAX_KERNEXEC
2048+static inline unsigned long pax_open_kernel(void) {
2049+#ifdef CONFIG_ARM_LPAE
2050+ /* TODO */
2051+#else
2052+ preempt_disable();
2053+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC));
2054+ modify_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC);
2055+#endif
2056+ return 0;
2057+}
2058+
2059+static inline unsigned long pax_close_kernel(void) {
2060+#ifdef CONFIG_ARM_LPAE
2061+ /* TODO */
2062+#else
2063+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_MANAGER));
2064+ /* DOMAIN_MANAGER = "client" under KERNEXEC */
2065+ modify_domain(DOMAIN_KERNEL, DOMAIN_MANAGER);
2066+ preempt_enable_no_resched();
2067+#endif
2068+ return 0;
2069+}
2070+#else
2071+static inline unsigned long pax_open_kernel(void) { return 0; }
2072+static inline unsigned long pax_close_kernel(void) { return 0; }
2073+#endif
2074+
2075 /*
2076 * This is the lowest virtual address we can permit any user space
2077 * mapping to be mapped at. This is particularly important for
2078@@ -63,8 +113,8 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2079 /*
2080 * The pgprot_* and protection_map entries will be fixed up in runtime
2081 * to include the cachable and bufferable bits based on memory policy,
2082- * as well as any architecture dependent bits like global/ASID and SMP
2083- * shared mapping bits.
2084+ * as well as any architecture dependent bits like global/ASID, PXN,
2085+ * and SMP shared mapping bits.
2086 */
2087 #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
2088
2089@@ -241,7 +291,7 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
2090 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
2091 {
2092 const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER |
2093- L_PTE_NONE | L_PTE_VALID;
2094+ L_PTE_NONE | L_PTE_VALID | __supported_pte_mask;
2095 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
2096 return pte;
2097 }
2098diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h
2099index f3628fb..a0672dd 100644
2100--- a/arch/arm/include/asm/proc-fns.h
2101+++ b/arch/arm/include/asm/proc-fns.h
2102@@ -75,7 +75,7 @@ extern struct processor {
2103 unsigned int suspend_size;
2104 void (*do_suspend)(void *);
2105 void (*do_resume)(void *);
2106-} processor;
2107+} __do_const processor;
2108
2109 #ifndef MULTI_CPU
2110 extern void cpu_proc_init(void);
2111diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h
2112index 06e7d50..8a8e251 100644
2113--- a/arch/arm/include/asm/processor.h
2114+++ b/arch/arm/include/asm/processor.h
2115@@ -65,9 +65,8 @@ struct thread_struct {
2116 regs->ARM_cpsr |= PSR_ENDSTATE; \
2117 regs->ARM_pc = pc & ~1; /* pc */ \
2118 regs->ARM_sp = sp; /* sp */ \
2119- regs->ARM_r2 = stack[2]; /* r2 (envp) */ \
2120- regs->ARM_r1 = stack[1]; /* r1 (argv) */ \
2121- regs->ARM_r0 = stack[0]; /* r0 (argc) */ \
2122+ /* r2 (envp), r1 (argv), r0 (argc) */ \
2123+ (void)copy_from_user(&regs->ARM_r0, (const char __user *)stack, 3 * sizeof(unsigned long)); \
2124 nommu_start_thread(regs); \
2125 })
2126
2127diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
2128index d3a22be..3a69ad5 100644
2129--- a/arch/arm/include/asm/smp.h
2130+++ b/arch/arm/include/asm/smp.h
2131@@ -107,7 +107,7 @@ struct smp_operations {
2132 int (*cpu_disable)(unsigned int cpu);
2133 #endif
2134 #endif
2135-};
2136+} __no_const;
2137
2138 /*
2139 * set platform specific SMP operations
2140diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
2141index cddda1f..ff357f7 100644
2142--- a/arch/arm/include/asm/thread_info.h
2143+++ b/arch/arm/include/asm/thread_info.h
2144@@ -77,9 +77,9 @@ struct thread_info {
2145 .flags = 0, \
2146 .preempt_count = INIT_PREEMPT_COUNT, \
2147 .addr_limit = KERNEL_DS, \
2148- .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2149- domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2150- domain_val(DOMAIN_IO, DOMAIN_CLIENT), \
2151+ .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) | \
2152+ domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT) | \
2153+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT), \
2154 .restart_block = { \
2155 .fn = do_no_restart_syscall, \
2156 }, \
2157@@ -152,6 +152,12 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2158 #define TIF_SYSCALL_AUDIT 9
2159 #define TIF_SYSCALL_TRACEPOINT 10
2160 #define TIF_SECCOMP 11 /* seccomp syscall filtering active */
2161+
2162+/* within 8 bits of TIF_SYSCALL_TRACE
2163+ * to meet flexible second operand requirements
2164+ */
2165+#define TIF_GRSEC_SETXID 12
2166+
2167 #define TIF_USING_IWMMXT 17
2168 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
2169 #define TIF_RESTORE_SIGMASK 20
2170@@ -165,10 +171,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2171 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
2172 #define _TIF_SECCOMP (1 << TIF_SECCOMP)
2173 #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
2174+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
2175
2176 /* Checks for any syscall work in entry-common.S */
2177 #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
2178- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
2179+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | _TIF_GRSEC_SETXID)
2180
2181 /*
2182 * Change these and you break ASM code in entry-common.S
2183diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
2184index 6e924d3..a9f3ddf 100644
2185--- a/arch/arm/include/asm/tlbflush.h
2186+++ b/arch/arm/include/asm/tlbflush.h
2187@@ -430,6 +430,21 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
2188 }
2189 }
2190
2191+#ifdef CONFIG_ARM_ERRATA_798181
2192+static inline void dummy_flush_tlb_a15_erratum(void)
2193+{
2194+ /*
2195+ * Dummy TLBIMVAIS. Using the unmapped address 0 and ASID 0.
2196+ */
2197+ asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (0));
2198+ dsb();
2199+}
2200+#else
2201+static inline void dummy_flush_tlb_a15_erratum(void)
2202+{
2203+}
2204+#endif
2205+
2206 /*
2207 * flush_pmd_entry
2208 *
2209diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
2210index 7e1f760..752fcb7 100644
2211--- a/arch/arm/include/asm/uaccess.h
2212+++ b/arch/arm/include/asm/uaccess.h
2213@@ -18,6 +18,7 @@
2214 #include <asm/domain.h>
2215 #include <asm/unified.h>
2216 #include <asm/compiler.h>
2217+#include <asm/pgtable.h>
2218
2219 #define VERIFY_READ 0
2220 #define VERIFY_WRITE 1
2221@@ -60,10 +61,34 @@ extern int __put_user_bad(void);
2222 #define USER_DS TASK_SIZE
2223 #define get_fs() (current_thread_info()->addr_limit)
2224
2225+static inline void pax_open_userland(void)
2226+{
2227+
2228+#ifdef CONFIG_PAX_MEMORY_UDEREF
2229+ if (get_fs() == USER_DS) {
2230+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_UDEREF));
2231+ modify_domain(DOMAIN_USER, DOMAIN_UDEREF);
2232+ }
2233+#endif
2234+
2235+}
2236+
2237+static inline void pax_close_userland(void)
2238+{
2239+
2240+#ifdef CONFIG_PAX_MEMORY_UDEREF
2241+ if (get_fs() == USER_DS) {
2242+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_NOACCESS));
2243+ modify_domain(DOMAIN_USER, DOMAIN_NOACCESS);
2244+ }
2245+#endif
2246+
2247+}
2248+
2249 static inline void set_fs(mm_segment_t fs)
2250 {
2251 current_thread_info()->addr_limit = fs;
2252- modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
2253+ modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_KERNELCLIENT : DOMAIN_MANAGER);
2254 }
2255
2256 #define segment_eq(a,b) ((a) == (b))
2257@@ -143,8 +168,12 @@ extern int __get_user_4(void *);
2258
2259 #define get_user(x,p) \
2260 ({ \
2261+ int __e; \
2262 might_fault(); \
2263- __get_user_check(x,p); \
2264+ pax_open_userland(); \
2265+ __e = __get_user_check(x,p); \
2266+ pax_close_userland(); \
2267+ __e; \
2268 })
2269
2270 extern int __put_user_1(void *, unsigned int);
2271@@ -188,8 +217,12 @@ extern int __put_user_8(void *, unsigned long long);
2272
2273 #define put_user(x,p) \
2274 ({ \
2275+ int __e; \
2276 might_fault(); \
2277- __put_user_check(x,p); \
2278+ pax_open_userland(); \
2279+ __e = __put_user_check(x,p); \
2280+ pax_close_userland(); \
2281+ __e; \
2282 })
2283
2284 #else /* CONFIG_MMU */
2285@@ -230,13 +263,17 @@ static inline void set_fs(mm_segment_t fs)
2286 #define __get_user(x,ptr) \
2287 ({ \
2288 long __gu_err = 0; \
2289+ pax_open_userland(); \
2290 __get_user_err((x),(ptr),__gu_err); \
2291+ pax_close_userland(); \
2292 __gu_err; \
2293 })
2294
2295 #define __get_user_error(x,ptr,err) \
2296 ({ \
2297+ pax_open_userland(); \
2298 __get_user_err((x),(ptr),err); \
2299+ pax_close_userland(); \
2300 (void) 0; \
2301 })
2302
2303@@ -312,13 +349,17 @@ do { \
2304 #define __put_user(x,ptr) \
2305 ({ \
2306 long __pu_err = 0; \
2307+ pax_open_userland(); \
2308 __put_user_err((x),(ptr),__pu_err); \
2309+ pax_close_userland(); \
2310 __pu_err; \
2311 })
2312
2313 #define __put_user_error(x,ptr,err) \
2314 ({ \
2315+ pax_open_userland(); \
2316 __put_user_err((x),(ptr),err); \
2317+ pax_close_userland(); \
2318 (void) 0; \
2319 })
2320
2321@@ -418,11 +459,44 @@ do { \
2322
2323
2324 #ifdef CONFIG_MMU
2325-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
2326-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
2327+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
2328+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
2329+
2330+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
2331+{
2332+ unsigned long ret;
2333+
2334+ check_object_size(to, n, false);
2335+ pax_open_userland();
2336+ ret = ___copy_from_user(to, from, n);
2337+ pax_close_userland();
2338+ return ret;
2339+}
2340+
2341+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
2342+{
2343+ unsigned long ret;
2344+
2345+ check_object_size(from, n, true);
2346+ pax_open_userland();
2347+ ret = ___copy_to_user(to, from, n);
2348+ pax_close_userland();
2349+ return ret;
2350+}
2351+
2352 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
2353-extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
2354+extern unsigned long __must_check ___clear_user(void __user *addr, unsigned long n);
2355 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
2356+
2357+static inline unsigned long __must_check __clear_user(void __user *addr, unsigned long n)
2358+{
2359+ unsigned long ret;
2360+ pax_open_userland();
2361+ ret = ___clear_user(addr, n);
2362+ pax_close_userland();
2363+ return ret;
2364+}
2365+
2366 #else
2367 #define __copy_from_user(to,from,n) (memcpy(to, (void __force *)from, n), 0)
2368 #define __copy_to_user(to,from,n) (memcpy((void __force *)to, from, n), 0)
2369@@ -431,6 +505,9 @@ extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned l
2370
2371 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2372 {
2373+ if ((long)n < 0)
2374+ return n;
2375+
2376 if (access_ok(VERIFY_READ, from, n))
2377 n = __copy_from_user(to, from, n);
2378 else /* security hole - plug it */
2379@@ -440,6 +517,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
2380
2381 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2382 {
2383+ if ((long)n < 0)
2384+ return n;
2385+
2386 if (access_ok(VERIFY_WRITE, to, n))
2387 n = __copy_to_user(to, from, n);
2388 return n;
2389diff --git a/arch/arm/include/uapi/asm/ptrace.h b/arch/arm/include/uapi/asm/ptrace.h
2390index 96ee092..37f1844 100644
2391--- a/arch/arm/include/uapi/asm/ptrace.h
2392+++ b/arch/arm/include/uapi/asm/ptrace.h
2393@@ -73,7 +73,7 @@
2394 * ARMv7 groups of PSR bits
2395 */
2396 #define APSR_MASK 0xf80f0000 /* N, Z, C, V, Q and GE flags */
2397-#define PSR_ISET_MASK 0x01000010 /* ISA state (J, T) mask */
2398+#define PSR_ISET_MASK 0x01000020 /* ISA state (J, T) mask */
2399 #define PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
2400 #define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */
2401
2402diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
2403index 60d3b73..d27ee09 100644
2404--- a/arch/arm/kernel/armksyms.c
2405+++ b/arch/arm/kernel/armksyms.c
2406@@ -89,9 +89,9 @@ EXPORT_SYMBOL(__memzero);
2407 #ifdef CONFIG_MMU
2408 EXPORT_SYMBOL(copy_page);
2409
2410-EXPORT_SYMBOL(__copy_from_user);
2411-EXPORT_SYMBOL(__copy_to_user);
2412-EXPORT_SYMBOL(__clear_user);
2413+EXPORT_SYMBOL(___copy_from_user);
2414+EXPORT_SYMBOL(___copy_to_user);
2415+EXPORT_SYMBOL(___clear_user);
2416
2417 EXPORT_SYMBOL(__get_user_1);
2418 EXPORT_SYMBOL(__get_user_2);
2419diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
2420index 0f82098..3dbd3ee 100644
2421--- a/arch/arm/kernel/entry-armv.S
2422+++ b/arch/arm/kernel/entry-armv.S
2423@@ -47,6 +47,87 @@
2424 9997:
2425 .endm
2426
2427+ .macro pax_enter_kernel
2428+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2429+ @ make aligned space for saved DACR
2430+ sub sp, sp, #8
2431+ @ save regs
2432+ stmdb sp!, {r1, r2}
2433+ @ read DACR from cpu_domain into r1
2434+ mov r2, sp
2435+ @ assume 8K pages, since we have to split the immediate in two
2436+ bic r2, r2, #(0x1fc0)
2437+ bic r2, r2, #(0x3f)
2438+ ldr r1, [r2, #TI_CPU_DOMAIN]
2439+ @ store old DACR on stack
2440+ str r1, [sp, #8]
2441+#ifdef CONFIG_PAX_KERNEXEC
2442+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2443+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2444+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2445+#endif
2446+#ifdef CONFIG_PAX_MEMORY_UDEREF
2447+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2448+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2449+#endif
2450+ @ write r1 to current_thread_info()->cpu_domain
2451+ str r1, [r2, #TI_CPU_DOMAIN]
2452+ @ write r1 to DACR
2453+ mcr p15, 0, r1, c3, c0, 0
2454+ @ instruction sync
2455+ instr_sync
2456+ @ restore regs
2457+ ldmia sp!, {r1, r2}
2458+#endif
2459+ .endm
2460+
2461+ .macro pax_open_userland
2462+#ifdef CONFIG_PAX_MEMORY_UDEREF
2463+ @ save regs
2464+ stmdb sp!, {r0, r1}
2465+ @ read DACR from cpu_domain into r1
2466+ mov r0, sp
2467+ @ assume 8K pages, since we have to split the immediate in two
2468+ bic r0, r0, #(0x1fc0)
2469+ bic r0, r0, #(0x3f)
2470+ ldr r1, [r0, #TI_CPU_DOMAIN]
2471+ @ set current DOMAIN_USER to DOMAIN_CLIENT
2472+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2473+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2474+ @ write r1 to current_thread_info()->cpu_domain
2475+ str r1, [r0, #TI_CPU_DOMAIN]
2476+ @ write r1 to DACR
2477+ mcr p15, 0, r1, c3, c0, 0
2478+ @ instruction sync
2479+ instr_sync
2480+ @ restore regs
2481+ ldmia sp!, {r0, r1}
2482+#endif
2483+ .endm
2484+
2485+ .macro pax_close_userland
2486+#ifdef CONFIG_PAX_MEMORY_UDEREF
2487+ @ save regs
2488+ stmdb sp!, {r0, r1}
2489+ @ read DACR from cpu_domain into r1
2490+ mov r0, sp
2491+ @ assume 8K pages, since we have to split the immediate in two
2492+ bic r0, r0, #(0x1fc0)
2493+ bic r0, r0, #(0x3f)
2494+ ldr r1, [r0, #TI_CPU_DOMAIN]
2495+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2496+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2497+ @ write r1 to current_thread_info()->cpu_domain
2498+ str r1, [r0, #TI_CPU_DOMAIN]
2499+ @ write r1 to DACR
2500+ mcr p15, 0, r1, c3, c0, 0
2501+ @ instruction sync
2502+ instr_sync
2503+ @ restore regs
2504+ ldmia sp!, {r0, r1}
2505+#endif
2506+ .endm
2507+
2508 .macro pabt_helper
2509 @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
2510 #ifdef MULTI_PABORT
2511@@ -89,11 +170,15 @@
2512 * Invalid mode handlers
2513 */
2514 .macro inv_entry, reason
2515+
2516+ pax_enter_kernel
2517+
2518 sub sp, sp, #S_FRAME_SIZE
2519 ARM( stmib sp, {r1 - lr} )
2520 THUMB( stmia sp, {r0 - r12} )
2521 THUMB( str sp, [sp, #S_SP] )
2522 THUMB( str lr, [sp, #S_LR] )
2523+
2524 mov r1, #\reason
2525 .endm
2526
2527@@ -149,7 +234,11 @@ ENDPROC(__und_invalid)
2528 .macro svc_entry, stack_hole=0
2529 UNWIND(.fnstart )
2530 UNWIND(.save {r0 - pc} )
2531+
2532+ pax_enter_kernel
2533+
2534 sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2535+
2536 #ifdef CONFIG_THUMB2_KERNEL
2537 SPFIX( str r0, [sp] ) @ temporarily saved
2538 SPFIX( mov r0, sp )
2539@@ -164,7 +253,12 @@ ENDPROC(__und_invalid)
2540 ldmia r0, {r3 - r5}
2541 add r7, sp, #S_SP - 4 @ here for interlock avoidance
2542 mov r6, #-1 @ "" "" "" ""
2543+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2544+ @ offset sp by 8 as done in pax_enter_kernel
2545+ add r2, sp, #(S_FRAME_SIZE + \stack_hole + 4)
2546+#else
2547 add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2548+#endif
2549 SPFIX( addeq r2, r2, #4 )
2550 str r3, [sp, #-4]! @ save the "real" r0 copied
2551 @ from the exception stack
2552@@ -359,6 +453,9 @@ ENDPROC(__pabt_svc)
2553 .macro usr_entry
2554 UNWIND(.fnstart )
2555 UNWIND(.cantunwind ) @ don't unwind the user space
2556+
2557+ pax_enter_kernel_user
2558+
2559 sub sp, sp, #S_FRAME_SIZE
2560 ARM( stmib sp, {r1 - r12} )
2561 THUMB( stmia sp, {r0 - r12} )
2562@@ -456,7 +553,9 @@ __und_usr:
2563 tst r3, #PSR_T_BIT @ Thumb mode?
2564 bne __und_usr_thumb
2565 sub r4, r2, #4 @ ARM instr at LR - 4
2566+ pax_open_userland
2567 1: ldrt r0, [r4]
2568+ pax_close_userland
2569 #ifdef CONFIG_CPU_ENDIAN_BE8
2570 rev r0, r0 @ little endian instruction
2571 #endif
2572@@ -491,10 +590,14 @@ __und_usr_thumb:
2573 */
2574 .arch armv6t2
2575 #endif
2576+ pax_open_userland
2577 2: ldrht r5, [r4]
2578+ pax_close_userland
2579 cmp r5, #0xe800 @ 32bit instruction if xx != 0
2580 blo __und_usr_fault_16 @ 16bit undefined instruction
2581+ pax_open_userland
2582 3: ldrht r0, [r2]
2583+ pax_close_userland
2584 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
2585 str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
2586 orr r0, r0, r5, lsl #16
2587@@ -733,7 +836,7 @@ ENTRY(__switch_to)
2588 THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack
2589 THUMB( str sp, [ip], #4 )
2590 THUMB( str lr, [ip], #4 )
2591-#ifdef CONFIG_CPU_USE_DOMAINS
2592+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC)
2593 ldr r6, [r2, #TI_CPU_DOMAIN]
2594 #endif
2595 set_tls r3, r4, r5
2596@@ -742,7 +845,7 @@ ENTRY(__switch_to)
2597 ldr r8, =__stack_chk_guard
2598 ldr r7, [r7, #TSK_STACK_CANARY]
2599 #endif
2600-#ifdef CONFIG_CPU_USE_DOMAINS
2601+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC)
2602 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
2603 #endif
2604 mov r5, r0
2605diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
2606index a6c301e..908821b 100644
2607--- a/arch/arm/kernel/entry-common.S
2608+++ b/arch/arm/kernel/entry-common.S
2609@@ -10,18 +10,46 @@
2610
2611 #include <asm/unistd.h>
2612 #include <asm/ftrace.h>
2613+#include <asm/domain.h>
2614 #include <asm/unwind.h>
2615
2616+#include "entry-header.S"
2617+
2618 #ifdef CONFIG_NEED_RET_TO_USER
2619 #include <mach/entry-macro.S>
2620 #else
2621 .macro arch_ret_to_user, tmp1, tmp2
2622+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2623+ @ save regs
2624+ stmdb sp!, {r1, r2}
2625+ @ read DACR from cpu_domain into r1
2626+ mov r2, sp
2627+ @ assume 8K pages, since we have to split the immediate in two
2628+ bic r2, r2, #(0x1fc0)
2629+ bic r2, r2, #(0x3f)
2630+ ldr r1, [r2, #TI_CPU_DOMAIN]
2631+#ifdef CONFIG_PAX_KERNEXEC
2632+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2633+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2634+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2635+#endif
2636+#ifdef CONFIG_PAX_MEMORY_UDEREF
2637+ @ set current DOMAIN_USER to DOMAIN_UDEREF
2638+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2639+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2640+#endif
2641+ @ write r1 to current_thread_info()->cpu_domain
2642+ str r1, [r2, #TI_CPU_DOMAIN]
2643+ @ write r1 to DACR
2644+ mcr p15, 0, r1, c3, c0, 0
2645+ @ instruction sync
2646+ instr_sync
2647+ @ restore regs
2648+ ldmia sp!, {r1, r2}
2649+#endif
2650 .endm
2651 #endif
2652
2653-#include "entry-header.S"
2654-
2655-
2656 .align 5
2657 /*
2658 * This is the fast syscall return path. We do as little as
2659@@ -339,6 +367,7 @@ ENDPROC(ftrace_stub)
2660
2661 .align 5
2662 ENTRY(vector_swi)
2663+
2664 sub sp, sp, #S_FRAME_SIZE
2665 stmia sp, {r0 - r12} @ Calling r0 - r12
2666 ARM( add r8, sp, #S_PC )
2667@@ -388,6 +417,12 @@ ENTRY(vector_swi)
2668 ldr scno, [lr, #-4] @ get SWI instruction
2669 #endif
2670
2671+ /*
2672+ * do this here to avoid a performance hit of wrapping the code above
2673+ * that directly dereferences userland to parse the SWI instruction
2674+ */
2675+ pax_enter_kernel_user
2676+
2677 #ifdef CONFIG_ALIGNMENT_TRAP
2678 ldr ip, __cr_alignment
2679 ldr ip, [ip]
2680diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
2681index 9a8531e..812e287 100644
2682--- a/arch/arm/kernel/entry-header.S
2683+++ b/arch/arm/kernel/entry-header.S
2684@@ -73,9 +73,66 @@
2685 msr cpsr_c, \rtemp @ switch back to the SVC mode
2686 .endm
2687
2688+ .macro pax_enter_kernel_user
2689+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2690+ @ save regs
2691+ stmdb sp!, {r0, r1}
2692+ @ read DACR from cpu_domain into r1
2693+ mov r0, sp
2694+ @ assume 8K pages, since we have to split the immediate in two
2695+ bic r0, r0, #(0x1fc0)
2696+ bic r0, r0, #(0x3f)
2697+ ldr r1, [r0, #TI_CPU_DOMAIN]
2698+#ifdef CONFIG_PAX_MEMORY_UDEREF
2699+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2700+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2701+#endif
2702+#ifdef CONFIG_PAX_KERNEXEC
2703+ @ set current DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2704+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2705+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2706+#endif
2707+ @ write r1 to current_thread_info()->cpu_domain
2708+ str r1, [r0, #TI_CPU_DOMAIN]
2709+ @ write r1 to DACR
2710+ mcr p15, 0, r1, c3, c0, 0
2711+ @ instruction sync
2712+ instr_sync
2713+ @ restore regs
2714+ ldmia sp!, {r0, r1}
2715+#endif
2716+ .endm
2717+
2718+ .macro pax_exit_kernel
2719+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2720+ @ save regs
2721+ stmdb sp!, {r0, r1}
2722+ @ read old DACR from stack into r1
2723+ ldr r1, [sp, #(8 + S_SP)]
2724+ sub r1, r1, #8
2725+ ldr r1, [r1]
2726+
2727+ @ write r1 to current_thread_info()->cpu_domain
2728+ mov r0, sp
2729+ @ assume 8K pages, since we have to split the immediate in two
2730+ bic r0, r0, #(0x1fc0)
2731+ bic r0, r0, #(0x3f)
2732+ str r1, [r0, #TI_CPU_DOMAIN]
2733+ @ write r1 to DACR
2734+ mcr p15, 0, r1, c3, c0, 0
2735+ @ instruction sync
2736+ instr_sync
2737+ @ restore regs
2738+ ldmia sp!, {r0, r1}
2739+#endif
2740+ .endm
2741+
2742 #ifndef CONFIG_THUMB2_KERNEL
2743 .macro svc_exit, rpsr
2744 msr spsr_cxsf, \rpsr
2745+
2746+ pax_exit_kernel
2747+
2748 #if defined(CONFIG_CPU_V6)
2749 ldr r0, [sp]
2750 strex r1, r2, [sp] @ clear the exclusive monitor
2751@@ -121,6 +178,9 @@
2752 .endm
2753 #else /* CONFIG_THUMB2_KERNEL */
2754 .macro svc_exit, rpsr
2755+
2756+ pax_exit_kernel
2757+
2758 ldr lr, [sp, #S_SP] @ top of the stack
2759 ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
2760 clrex @ clear the exclusive monitor
2761diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
2762index 2adda11..7fbe958 100644
2763--- a/arch/arm/kernel/fiq.c
2764+++ b/arch/arm/kernel/fiq.c
2765@@ -82,7 +82,9 @@ void set_fiq_handler(void *start, unsigned int length)
2766 #if defined(CONFIG_CPU_USE_DOMAINS)
2767 memcpy((void *)0xffff001c, start, length);
2768 #else
2769+ pax_open_kernel();
2770 memcpy(vectors_page + 0x1c, start, length);
2771+ pax_close_kernel();
2772 #endif
2773 flush_icache_range(0xffff001c, 0xffff001c + length);
2774 if (!vectors_high())
2775diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
2776index e0eb9a1..caee108 100644
2777--- a/arch/arm/kernel/head.S
2778+++ b/arch/arm/kernel/head.S
2779@@ -52,7 +52,9 @@
2780 .equ swapper_pg_dir, KERNEL_RAM_VADDR - PG_DIR_SIZE
2781
2782 .macro pgtbl, rd, phys
2783- add \rd, \phys, #TEXT_OFFSET - PG_DIR_SIZE
2784+ mov \rd, #TEXT_OFFSET
2785+ sub \rd, #PG_DIR_SIZE
2786+ add \rd, \rd, \phys
2787 .endm
2788
2789 /*
2790@@ -267,7 +269,7 @@ __create_page_tables:
2791 addne r6, r6, #1 << SECTION_SHIFT
2792 strne r6, [r3]
2793
2794-#if defined(CONFIG_LPAE) && defined(CONFIG_CPU_ENDIAN_BE8)
2795+#if defined(CONFIG_ARM_LPAE) && defined(CONFIG_CPU_ENDIAN_BE8)
2796 sub r4, r4, #4 @ Fixup page table pointer
2797 @ for 64-bit descriptors
2798 #endif
2799@@ -434,7 +436,7 @@ __enable_mmu:
2800 mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2801 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2802 domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
2803- domain_val(DOMAIN_IO, DOMAIN_CLIENT))
2804+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT))
2805 mcr p15, 0, r5, c3, c0, 0 @ load domain access register
2806 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
2807 #endif
2808diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
2809index 5ff2e77..556d030 100644
2810--- a/arch/arm/kernel/hw_breakpoint.c
2811+++ b/arch/arm/kernel/hw_breakpoint.c
2812@@ -1011,7 +1011,7 @@ static int __cpuinit dbg_reset_notify(struct notifier_block *self,
2813 return NOTIFY_OK;
2814 }
2815
2816-static struct notifier_block __cpuinitdata dbg_reset_nb = {
2817+static struct notifier_block dbg_reset_nb = {
2818 .notifier_call = dbg_reset_notify,
2819 };
2820
2821diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
2822index 1e9be5d..03edbc2 100644
2823--- a/arch/arm/kernel/module.c
2824+++ b/arch/arm/kernel/module.c
2825@@ -37,12 +37,37 @@
2826 #endif
2827
2828 #ifdef CONFIG_MMU
2829-void *module_alloc(unsigned long size)
2830+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
2831 {
2832+ if (!size || PAGE_ALIGN(size) > MODULES_END - MODULES_VADDR)
2833+ return NULL;
2834 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
2835- GFP_KERNEL, PAGE_KERNEL_EXEC, -1,
2836+ GFP_KERNEL, prot, -1,
2837 __builtin_return_address(0));
2838 }
2839+
2840+void *module_alloc(unsigned long size)
2841+{
2842+
2843+#ifdef CONFIG_PAX_KERNEXEC
2844+ return __module_alloc(size, PAGE_KERNEL);
2845+#else
2846+ return __module_alloc(size, PAGE_KERNEL_EXEC);
2847+#endif
2848+
2849+}
2850+
2851+#ifdef CONFIG_PAX_KERNEXEC
2852+void module_free_exec(struct module *mod, void *module_region)
2853+{
2854+ module_free(mod, module_region);
2855+}
2856+
2857+void *module_alloc_exec(unsigned long size)
2858+{
2859+ return __module_alloc(size, PAGE_KERNEL_EXEC);
2860+}
2861+#endif
2862 #endif
2863
2864 int
2865diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
2866index 07314af..c46655c 100644
2867--- a/arch/arm/kernel/patch.c
2868+++ b/arch/arm/kernel/patch.c
2869@@ -18,6 +18,7 @@ void __kprobes __patch_text(void *addr, unsigned int insn)
2870 bool thumb2 = IS_ENABLED(CONFIG_THUMB2_KERNEL);
2871 int size;
2872
2873+ pax_open_kernel();
2874 if (thumb2 && __opcode_is_thumb16(insn)) {
2875 *(u16 *)addr = __opcode_to_mem_thumb16(insn);
2876 size = sizeof(u16);
2877@@ -39,6 +40,7 @@ void __kprobes __patch_text(void *addr, unsigned int insn)
2878 *(u32 *)addr = insn;
2879 size = sizeof(u32);
2880 }
2881+ pax_close_kernel();
2882
2883 flush_icache_range((uintptr_t)(addr),
2884 (uintptr_t)(addr) + size);
2885diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
2886index 5f66206..dce492f 100644
2887--- a/arch/arm/kernel/perf_event_cpu.c
2888+++ b/arch/arm/kernel/perf_event_cpu.c
2889@@ -171,7 +171,7 @@ static int __cpuinit cpu_pmu_notify(struct notifier_block *b,
2890 return NOTIFY_OK;
2891 }
2892
2893-static struct notifier_block __cpuinitdata cpu_pmu_hotplug_notifier = {
2894+static struct notifier_block cpu_pmu_hotplug_notifier = {
2895 .notifier_call = cpu_pmu_notify,
2896 };
2897
2898diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
2899index c6dec5f..e0fddd1 100644
2900--- a/arch/arm/kernel/process.c
2901+++ b/arch/arm/kernel/process.c
2902@@ -28,7 +28,6 @@
2903 #include <linux/tick.h>
2904 #include <linux/utsname.h>
2905 #include <linux/uaccess.h>
2906-#include <linux/random.h>
2907 #include <linux/hw_breakpoint.h>
2908 #include <linux/cpuidle.h>
2909 #include <linux/leds.h>
2910@@ -256,9 +255,10 @@ void machine_power_off(void)
2911 machine_shutdown();
2912 if (pm_power_off)
2913 pm_power_off();
2914+ BUG();
2915 }
2916
2917-void machine_restart(char *cmd)
2918+__noreturn void machine_restart(char *cmd)
2919 {
2920 machine_shutdown();
2921
2922@@ -283,8 +283,8 @@ void __show_regs(struct pt_regs *regs)
2923 init_utsname()->release,
2924 (int)strcspn(init_utsname()->version, " "),
2925 init_utsname()->version);
2926- print_symbol("PC is at %s\n", instruction_pointer(regs));
2927- print_symbol("LR is at %s\n", regs->ARM_lr);
2928+ printk("PC is at %pA\n", instruction_pointer(regs));
2929+ printk("LR is at %pA\n", regs->ARM_lr);
2930 printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
2931 "sp : %08lx ip : %08lx fp : %08lx\n",
2932 regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
2933@@ -452,12 +452,6 @@ unsigned long get_wchan(struct task_struct *p)
2934 return 0;
2935 }
2936
2937-unsigned long arch_randomize_brk(struct mm_struct *mm)
2938-{
2939- unsigned long range_end = mm->brk + 0x02000000;
2940- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
2941-}
2942-
2943 #ifdef CONFIG_MMU
2944 /*
2945 * The vectors page is always readable from user space for the
2946@@ -470,9 +464,8 @@ static int __init gate_vma_init(void)
2947 {
2948 gate_vma.vm_start = 0xffff0000;
2949 gate_vma.vm_end = 0xffff0000 + PAGE_SIZE;
2950- gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
2951- gate_vma.vm_flags = VM_READ | VM_EXEC |
2952- VM_MAYREAD | VM_MAYEXEC;
2953+ gate_vma.vm_flags = VM_NONE;
2954+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
2955 return 0;
2956 }
2957 arch_initcall(gate_vma_init);
2958diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
2959index 03deeff..741ce88 100644
2960--- a/arch/arm/kernel/ptrace.c
2961+++ b/arch/arm/kernel/ptrace.c
2962@@ -937,10 +937,19 @@ static int tracehook_report_syscall(struct pt_regs *regs,
2963 return current_thread_info()->syscall;
2964 }
2965
2966+#ifdef CONFIG_GRKERNSEC_SETXID
2967+extern void gr_delayed_cred_worker(void);
2968+#endif
2969+
2970 asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
2971 {
2972 current_thread_info()->syscall = scno;
2973
2974+#ifdef CONFIG_GRKERNSEC_SETXID
2975+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
2976+ gr_delayed_cred_worker();
2977+#endif
2978+
2979 /* Do the secure computing check first; failures should be fast. */
2980 if (secure_computing(scno) == -1)
2981 return -1;
2982diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
2983index 3f6cbb2..39305c7 100644
2984--- a/arch/arm/kernel/setup.c
2985+++ b/arch/arm/kernel/setup.c
2986@@ -97,21 +97,23 @@ EXPORT_SYMBOL(system_serial_high);
2987 unsigned int elf_hwcap __read_mostly;
2988 EXPORT_SYMBOL(elf_hwcap);
2989
2990+pteval_t __supported_pte_mask __read_only;
2991+pmdval_t __supported_pmd_mask __read_only;
2992
2993 #ifdef MULTI_CPU
2994-struct processor processor __read_mostly;
2995+struct processor processor;
2996 #endif
2997 #ifdef MULTI_TLB
2998-struct cpu_tlb_fns cpu_tlb __read_mostly;
2999+struct cpu_tlb_fns cpu_tlb __read_only;
3000 #endif
3001 #ifdef MULTI_USER
3002-struct cpu_user_fns cpu_user __read_mostly;
3003+struct cpu_user_fns cpu_user __read_only;
3004 #endif
3005 #ifdef MULTI_CACHE
3006-struct cpu_cache_fns cpu_cache __read_mostly;
3007+struct cpu_cache_fns cpu_cache __read_only;
3008 #endif
3009 #ifdef CONFIG_OUTER_CACHE
3010-struct outer_cache_fns outer_cache __read_mostly;
3011+struct outer_cache_fns outer_cache __read_only;
3012 EXPORT_SYMBOL(outer_cache);
3013 #endif
3014
3015@@ -236,9 +238,13 @@ static int __get_cpu_architecture(void)
3016 asm("mrc p15, 0, %0, c0, c1, 4"
3017 : "=r" (mmfr0));
3018 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
3019- (mmfr0 & 0x000000f0) >= 0x00000030)
3020+ (mmfr0 & 0x000000f0) >= 0x00000030) {
3021 cpu_arch = CPU_ARCH_ARMv7;
3022- else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3023+ if ((mmfr0 & 0x0000000f) == 0x00000005 || (mmfr0 & 0x0000000f) == 0x00000004) {
3024+ __supported_pte_mask |= L_PTE_PXN;
3025+ __supported_pmd_mask |= PMD_PXNTABLE;
3026+ }
3027+ } else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3028 (mmfr0 & 0x000000f0) == 0x00000020)
3029 cpu_arch = CPU_ARCH_ARMv6;
3030 else
3031@@ -462,7 +468,7 @@ static void __init setup_processor(void)
3032 __cpu_architecture = __get_cpu_architecture();
3033
3034 #ifdef MULTI_CPU
3035- processor = *list->proc;
3036+ memcpy((void *)&processor, list->proc, sizeof processor);
3037 #endif
3038 #ifdef MULTI_TLB
3039 cpu_tlb = *list->tlb;
3040@@ -524,7 +530,7 @@ int __init arm_add_memory(phys_addr_t start, phys_addr_t size)
3041 size -= start & ~PAGE_MASK;
3042 bank->start = PAGE_ALIGN(start);
3043
3044-#ifndef CONFIG_LPAE
3045+#ifndef CONFIG_ARM_LPAE
3046 if (bank->start + size < bank->start) {
3047 printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in "
3048 "32-bit physical address space\n", (long long)start);
3049diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
3050index 56f72d2..6924200 100644
3051--- a/arch/arm/kernel/signal.c
3052+++ b/arch/arm/kernel/signal.c
3053@@ -433,22 +433,14 @@ setup_return(struct pt_regs *regs, struct k_sigaction *ka,
3054 __put_user(sigreturn_codes[idx+1], rc+1))
3055 return 1;
3056
3057- if (cpsr & MODE32_BIT) {
3058- /*
3059- * 32-bit code can use the new high-page
3060- * signal return code support.
3061- */
3062- retcode = KERN_SIGRETURN_CODE + (idx << 2) + thumb;
3063- } else {
3064- /*
3065- * Ensure that the instruction cache sees
3066- * the return code written onto the stack.
3067- */
3068- flush_icache_range((unsigned long)rc,
3069- (unsigned long)(rc + 2));
3070+ /*
3071+ * Ensure that the instruction cache sees
3072+ * the return code written onto the stack.
3073+ */
3074+ flush_icache_range((unsigned long)rc,
3075+ (unsigned long)(rc + 2));
3076
3077- retcode = ((unsigned long)rc) + thumb;
3078- }
3079+ retcode = ((unsigned long)rc) + thumb;
3080 }
3081
3082 regs->ARM_r0 = usig;
3083diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
3084index 58af91c..343ce99 100644
3085--- a/arch/arm/kernel/smp.c
3086+++ b/arch/arm/kernel/smp.c
3087@@ -70,7 +70,7 @@ enum ipi_msg_type {
3088
3089 static DECLARE_COMPLETION(cpu_running);
3090
3091-static struct smp_operations smp_ops;
3092+static struct smp_operations smp_ops __read_only;
3093
3094 void __init smp_set_ops(struct smp_operations *ops)
3095 {
3096diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c
3097index 02c5d2c..e5695ad 100644
3098--- a/arch/arm/kernel/smp_tlb.c
3099+++ b/arch/arm/kernel/smp_tlb.c
3100@@ -12,6 +12,7 @@
3101
3102 #include <asm/smp_plat.h>
3103 #include <asm/tlbflush.h>
3104+#include <asm/mmu_context.h>
3105
3106 /**********************************************************************/
3107
3108@@ -64,12 +65,72 @@ static inline void ipi_flush_tlb_kernel_range(void *arg)
3109 local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end);
3110 }
3111
3112+#ifdef CONFIG_ARM_ERRATA_798181
3113+static int erratum_a15_798181(void)
3114+{
3115+ unsigned int midr = read_cpuid_id();
3116+
3117+ /* Cortex-A15 r0p0..r3p2 affected */
3118+ if ((midr & 0xff0ffff0) != 0x410fc0f0 || midr > 0x413fc0f2)
3119+ return 0;
3120+ return 1;
3121+}
3122+#else
3123+static int erratum_a15_798181(void)
3124+{
3125+ return 0;
3126+}
3127+#endif
3128+
3129+static void ipi_flush_tlb_a15_erratum(void *arg)
3130+{
3131+ dmb();
3132+}
3133+
3134+static void broadcast_tlb_a15_erratum(void)
3135+{
3136+ if (!erratum_a15_798181())
3137+ return;
3138+
3139+ dummy_flush_tlb_a15_erratum();
3140+ smp_call_function_many(cpu_online_mask, ipi_flush_tlb_a15_erratum,
3141+ NULL, 1);
3142+}
3143+
3144+static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm)
3145+{
3146+ int cpu;
3147+ cpumask_t mask = { CPU_BITS_NONE };
3148+
3149+ if (!erratum_a15_798181())
3150+ return;
3151+
3152+ dummy_flush_tlb_a15_erratum();
3153+ for_each_online_cpu(cpu) {
3154+ if (cpu == smp_processor_id())
3155+ continue;
3156+ /*
3157+ * We only need to send an IPI if the other CPUs are running
3158+ * the same ASID as the one being invalidated. There is no
3159+ * need for locking around the active_asids check since the
3160+ * switch_mm() function has at least one dmb() (as required by
3161+ * this workaround) in case a context switch happens on
3162+ * another CPU after the condition below.
3163+ */
3164+ if (atomic64_read(&mm->context.id) ==
3165+ atomic64_read(&per_cpu(active_asids, cpu)))
3166+ cpumask_set_cpu(cpu, &mask);
3167+ }
3168+ smp_call_function_many(&mask, ipi_flush_tlb_a15_erratum, NULL, 1);
3169+}
3170+
3171 void flush_tlb_all(void)
3172 {
3173 if (tlb_ops_need_broadcast())
3174 on_each_cpu(ipi_flush_tlb_all, NULL, 1);
3175 else
3176 local_flush_tlb_all();
3177+ broadcast_tlb_a15_erratum();
3178 }
3179
3180 void flush_tlb_mm(struct mm_struct *mm)
3181@@ -78,6 +139,7 @@ void flush_tlb_mm(struct mm_struct *mm)
3182 on_each_cpu_mask(mm_cpumask(mm), ipi_flush_tlb_mm, mm, 1);
3183 else
3184 local_flush_tlb_mm(mm);
3185+ broadcast_tlb_mm_a15_erratum(mm);
3186 }
3187
3188 void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
3189@@ -90,6 +152,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
3190 &ta, 1);
3191 } else
3192 local_flush_tlb_page(vma, uaddr);
3193+ broadcast_tlb_mm_a15_erratum(vma->vm_mm);
3194 }
3195
3196 void flush_tlb_kernel_page(unsigned long kaddr)
3197@@ -100,6 +163,7 @@ void flush_tlb_kernel_page(unsigned long kaddr)
3198 on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1);
3199 } else
3200 local_flush_tlb_kernel_page(kaddr);
3201+ broadcast_tlb_a15_erratum();
3202 }
3203
3204 void flush_tlb_range(struct vm_area_struct *vma,
3205@@ -114,6 +178,7 @@ void flush_tlb_range(struct vm_area_struct *vma,
3206 &ta, 1);
3207 } else
3208 local_flush_tlb_range(vma, start, end);
3209+ broadcast_tlb_mm_a15_erratum(vma->vm_mm);
3210 }
3211
3212 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
3213@@ -125,5 +190,6 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
3214 on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1);
3215 } else
3216 local_flush_tlb_kernel_range(start, end);
3217+ broadcast_tlb_a15_erratum();
3218 }
3219
3220diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
3221index b0179b8..829510e 100644
3222--- a/arch/arm/kernel/traps.c
3223+++ b/arch/arm/kernel/traps.c
3224@@ -57,7 +57,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
3225 void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
3226 {
3227 #ifdef CONFIG_KALLSYMS
3228- printk("[<%08lx>] (%pS) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
3229+ printk("[<%08lx>] (%pA) from [<%08lx>] (%pA)\n", where, (void *)where, from, (void *)from);
3230 #else
3231 printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
3232 #endif
3233@@ -266,6 +266,8 @@ static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
3234 static int die_owner = -1;
3235 static unsigned int die_nest_count;
3236
3237+extern void gr_handle_kernel_exploit(void);
3238+
3239 static unsigned long oops_begin(void)
3240 {
3241 int cpu;
3242@@ -308,6 +310,9 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
3243 panic("Fatal exception in interrupt");
3244 if (panic_on_oops)
3245 panic("Fatal exception");
3246+
3247+ gr_handle_kernel_exploit();
3248+
3249 if (signr)
3250 do_exit(signr);
3251 }
3252@@ -601,7 +606,9 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
3253 * The user helper at 0xffff0fe0 must be used instead.
3254 * (see entry-armv.S for details)
3255 */
3256+ pax_open_kernel();
3257 *((unsigned int *)0xffff0ff0) = regs->ARM_r0;
3258+ pax_close_kernel();
3259 }
3260 return 0;
3261
3262@@ -841,13 +848,10 @@ void __init early_trap_init(void *vectors_base)
3263 */
3264 kuser_get_tls_init(vectors);
3265
3266- /*
3267- * Copy signal return handlers into the vector page, and
3268- * set sigreturn to be a pointer to these.
3269- */
3270- memcpy((void *)(vectors + KERN_SIGRETURN_CODE - CONFIG_VECTORS_BASE),
3271- sigreturn_codes, sizeof(sigreturn_codes));
3272-
3273 flush_icache_range(vectors, vectors + PAGE_SIZE);
3274- modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
3275+
3276+#ifndef CONFIG_PAX_MEMORY_UDEREF
3277+ modify_domain(DOMAIN_USER, DOMAIN_USERCLIENT);
3278+#endif
3279+
3280 }
3281diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
3282index 11c1785..1b209f4 100644
3283--- a/arch/arm/kernel/vmlinux.lds.S
3284+++ b/arch/arm/kernel/vmlinux.lds.S
3285@@ -8,7 +8,11 @@
3286 #include <asm/thread_info.h>
3287 #include <asm/memory.h>
3288 #include <asm/page.h>
3289-
3290+
3291+#ifdef CONFIG_PAX_KERNEXEC
3292+#include <asm/pgtable.h>
3293+#endif
3294+
3295 #define PROC_INFO \
3296 . = ALIGN(4); \
3297 VMLINUX_SYMBOL(__proc_info_begin) = .; \
3298@@ -90,6 +94,11 @@ SECTIONS
3299 _text = .;
3300 HEAD_TEXT
3301 }
3302+
3303+#ifdef CONFIG_PAX_KERNEXEC
3304+ . = ALIGN(1<<SECTION_SHIFT);
3305+#endif
3306+
3307 .text : { /* Real text segment */
3308 _stext = .; /* Text and read-only data */
3309 __exception_text_start = .;
3310@@ -112,6 +121,8 @@ SECTIONS
3311 ARM_CPU_KEEP(PROC_INFO)
3312 }
3313
3314+ _etext = .; /* End of text section */
3315+
3316 RO_DATA(PAGE_SIZE)
3317
3318 . = ALIGN(4);
3319@@ -142,7 +153,9 @@ SECTIONS
3320
3321 NOTES
3322
3323- _etext = .; /* End of text and rodata section */
3324+#ifdef CONFIG_PAX_KERNEXEC
3325+ . = ALIGN(1<<SECTION_SHIFT);
3326+#endif
3327
3328 #ifndef CONFIG_XIP_KERNEL
3329 . = ALIGN(PAGE_SIZE);
3330@@ -203,6 +216,11 @@ SECTIONS
3331 . = PAGE_OFFSET + TEXT_OFFSET;
3332 #else
3333 __init_end = .;
3334+
3335+#ifdef CONFIG_PAX_KERNEXEC
3336+ . = ALIGN(1<<SECTION_SHIFT);
3337+#endif
3338+
3339 . = ALIGN(THREAD_SIZE);
3340 __data_loc = .;
3341 #endif
3342diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
3343index 14a0d98..7771a7d 100644
3344--- a/arch/arm/lib/clear_user.S
3345+++ b/arch/arm/lib/clear_user.S
3346@@ -12,14 +12,14 @@
3347
3348 .text
3349
3350-/* Prototype: int __clear_user(void *addr, size_t sz)
3351+/* Prototype: int ___clear_user(void *addr, size_t sz)
3352 * Purpose : clear some user memory
3353 * Params : addr - user memory address to clear
3354 * : sz - number of bytes to clear
3355 * Returns : number of bytes NOT cleared
3356 */
3357 ENTRY(__clear_user_std)
3358-WEAK(__clear_user)
3359+WEAK(___clear_user)
3360 stmfd sp!, {r1, lr}
3361 mov r2, #0
3362 cmp r1, #4
3363@@ -44,7 +44,7 @@ WEAK(__clear_user)
3364 USER( strnebt r2, [r0])
3365 mov r0, #0
3366 ldmfd sp!, {r1, pc}
3367-ENDPROC(__clear_user)
3368+ENDPROC(___clear_user)
3369 ENDPROC(__clear_user_std)
3370
3371 .pushsection .fixup,"ax"
3372diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
3373index 66a477a..bee61d3 100644
3374--- a/arch/arm/lib/copy_from_user.S
3375+++ b/arch/arm/lib/copy_from_user.S
3376@@ -16,7 +16,7 @@
3377 /*
3378 * Prototype:
3379 *
3380- * size_t __copy_from_user(void *to, const void *from, size_t n)
3381+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
3382 *
3383 * Purpose:
3384 *
3385@@ -84,11 +84,11 @@
3386
3387 .text
3388
3389-ENTRY(__copy_from_user)
3390+ENTRY(___copy_from_user)
3391
3392 #include "copy_template.S"
3393
3394-ENDPROC(__copy_from_user)
3395+ENDPROC(___copy_from_user)
3396
3397 .pushsection .fixup,"ax"
3398 .align 0
3399diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
3400index 6ee2f67..d1cce76 100644
3401--- a/arch/arm/lib/copy_page.S
3402+++ b/arch/arm/lib/copy_page.S
3403@@ -10,6 +10,7 @@
3404 * ASM optimised string functions
3405 */
3406 #include <linux/linkage.h>
3407+#include <linux/const.h>
3408 #include <asm/assembler.h>
3409 #include <asm/asm-offsets.h>
3410 #include <asm/cache.h>
3411diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
3412index d066df6..df28194 100644
3413--- a/arch/arm/lib/copy_to_user.S
3414+++ b/arch/arm/lib/copy_to_user.S
3415@@ -16,7 +16,7 @@
3416 /*
3417 * Prototype:
3418 *
3419- * size_t __copy_to_user(void *to, const void *from, size_t n)
3420+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
3421 *
3422 * Purpose:
3423 *
3424@@ -88,11 +88,11 @@
3425 .text
3426
3427 ENTRY(__copy_to_user_std)
3428-WEAK(__copy_to_user)
3429+WEAK(___copy_to_user)
3430
3431 #include "copy_template.S"
3432
3433-ENDPROC(__copy_to_user)
3434+ENDPROC(___copy_to_user)
3435 ENDPROC(__copy_to_user_std)
3436
3437 .pushsection .fixup,"ax"
3438diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
3439index 7d08b43..f7ca7ea 100644
3440--- a/arch/arm/lib/csumpartialcopyuser.S
3441+++ b/arch/arm/lib/csumpartialcopyuser.S
3442@@ -57,8 +57,8 @@
3443 * Returns : r0 = checksum, [[sp, #0], #0] = 0 or -EFAULT
3444 */
3445
3446-#define FN_ENTRY ENTRY(csum_partial_copy_from_user)
3447-#define FN_EXIT ENDPROC(csum_partial_copy_from_user)
3448+#define FN_ENTRY ENTRY(__csum_partial_copy_from_user)
3449+#define FN_EXIT ENDPROC(__csum_partial_copy_from_user)
3450
3451 #include "csumpartialcopygeneric.S"
3452
3453diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
3454index 6b93f6a..4aa5e85 100644
3455--- a/arch/arm/lib/delay.c
3456+++ b/arch/arm/lib/delay.c
3457@@ -28,7 +28,7 @@
3458 /*
3459 * Default to the loop-based delay implementation.
3460 */
3461-struct arm_delay_ops arm_delay_ops = {
3462+struct arm_delay_ops arm_delay_ops __read_only = {
3463 .delay = __loop_delay,
3464 .const_udelay = __loop_const_udelay,
3465 .udelay = __loop_udelay,
3466diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
3467index 025f742..8432b08 100644
3468--- a/arch/arm/lib/uaccess_with_memcpy.c
3469+++ b/arch/arm/lib/uaccess_with_memcpy.c
3470@@ -104,7 +104,7 @@ out:
3471 }
3472
3473 unsigned long
3474-__copy_to_user(void __user *to, const void *from, unsigned long n)
3475+___copy_to_user(void __user *to, const void *from, unsigned long n)
3476 {
3477 /*
3478 * This test is stubbed out of the main function above to keep
3479diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
3480index bac21a5..b67ef8e 100644
3481--- a/arch/arm/mach-kirkwood/common.c
3482+++ b/arch/arm/mach-kirkwood/common.c
3483@@ -150,7 +150,16 @@ static void clk_gate_fn_disable(struct clk_hw *hw)
3484 clk_gate_ops.disable(hw);
3485 }
3486
3487-static struct clk_ops clk_gate_fn_ops;
3488+static int clk_gate_fn_is_enabled(struct clk_hw *hw)
3489+{
3490+ return clk_gate_ops.is_enabled(hw);
3491+}
3492+
3493+static struct clk_ops clk_gate_fn_ops = {
3494+ .enable = clk_gate_fn_enable,
3495+ .disable = clk_gate_fn_disable,
3496+ .is_enabled = clk_gate_fn_is_enabled,
3497+};
3498
3499 static struct clk __init *clk_register_gate_fn(struct device *dev,
3500 const char *name,
3501@@ -184,14 +193,6 @@ static struct clk __init *clk_register_gate_fn(struct device *dev,
3502 gate_fn->fn_en = fn_en;
3503 gate_fn->fn_dis = fn_dis;
3504
3505- /* ops is the gate ops, but with our enable/disable functions */
3506- if (clk_gate_fn_ops.enable != clk_gate_fn_enable ||
3507- clk_gate_fn_ops.disable != clk_gate_fn_disable) {
3508- clk_gate_fn_ops = clk_gate_ops;
3509- clk_gate_fn_ops.enable = clk_gate_fn_enable;
3510- clk_gate_fn_ops.disable = clk_gate_fn_disable;
3511- }
3512-
3513 clk = clk_register(dev, &gate_fn->gate.hw);
3514
3515 if (IS_ERR(clk))
3516diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
3517index 0abb30f..54064da 100644
3518--- a/arch/arm/mach-omap2/board-n8x0.c
3519+++ b/arch/arm/mach-omap2/board-n8x0.c
3520@@ -631,7 +631,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
3521 }
3522 #endif
3523
3524-static struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
3525+static struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
3526 .late_init = n8x0_menelaus_late_init,
3527 };
3528
3529diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
3530index 8033cb7..2f7cb62 100644
3531--- a/arch/arm/mach-omap2/gpmc.c
3532+++ b/arch/arm/mach-omap2/gpmc.c
3533@@ -139,7 +139,6 @@ struct omap3_gpmc_regs {
3534 };
3535
3536 static struct gpmc_client_irq gpmc_client_irq[GPMC_NR_IRQ];
3537-static struct irq_chip gpmc_irq_chip;
3538 static unsigned gpmc_irq_start;
3539
3540 static struct resource gpmc_mem_root;
3541@@ -700,6 +699,18 @@ static void gpmc_irq_noop(struct irq_data *data) { }
3542
3543 static unsigned int gpmc_irq_noop_ret(struct irq_data *data) { return 0; }
3544
3545+static struct irq_chip gpmc_irq_chip = {
3546+ .name = "gpmc",
3547+ .irq_startup = gpmc_irq_noop_ret,
3548+ .irq_enable = gpmc_irq_enable,
3549+ .irq_disable = gpmc_irq_disable,
3550+ .irq_shutdown = gpmc_irq_noop,
3551+ .irq_ack = gpmc_irq_noop,
3552+ .irq_mask = gpmc_irq_noop,
3553+ .irq_unmask = gpmc_irq_noop,
3554+
3555+};
3556+
3557 static int gpmc_setup_irq(void)
3558 {
3559 int i;
3560@@ -714,15 +725,6 @@ static int gpmc_setup_irq(void)
3561 return gpmc_irq_start;
3562 }
3563
3564- gpmc_irq_chip.name = "gpmc";
3565- gpmc_irq_chip.irq_startup = gpmc_irq_noop_ret;
3566- gpmc_irq_chip.irq_enable = gpmc_irq_enable;
3567- gpmc_irq_chip.irq_disable = gpmc_irq_disable;
3568- gpmc_irq_chip.irq_shutdown = gpmc_irq_noop;
3569- gpmc_irq_chip.irq_ack = gpmc_irq_noop;
3570- gpmc_irq_chip.irq_mask = gpmc_irq_noop;
3571- gpmc_irq_chip.irq_unmask = gpmc_irq_noop;
3572-
3573 gpmc_client_irq[0].bitmask = GPMC_IRQ_FIFOEVENTENABLE;
3574 gpmc_client_irq[1].bitmask = GPMC_IRQ_COUNT_EVENT;
3575
3576diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
3577index 5d3b4f4..ddba3c0 100644
3578--- a/arch/arm/mach-omap2/omap-wakeupgen.c
3579+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
3580@@ -340,7 +340,7 @@ static int __cpuinit irq_cpu_hotplug_notify(struct notifier_block *self,
3581 return NOTIFY_OK;
3582 }
3583
3584-static struct notifier_block __refdata irq_hotplug_notifier = {
3585+static struct notifier_block irq_hotplug_notifier = {
3586 .notifier_call = irq_cpu_hotplug_notify,
3587 };
3588
3589diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
3590index e065daa..7b1ad9b 100644
3591--- a/arch/arm/mach-omap2/omap_device.c
3592+++ b/arch/arm/mach-omap2/omap_device.c
3593@@ -686,7 +686,7 @@ void omap_device_delete(struct omap_device *od)
3594 * passes along the return value of omap_device_build_ss().
3595 */
3596 struct platform_device __init *omap_device_build(const char *pdev_name, int pdev_id,
3597- struct omap_hwmod *oh, void *pdata,
3598+ struct omap_hwmod *oh, const void *pdata,
3599 int pdata_len,
3600 struct omap_device_pm_latency *pm_lats,
3601 int pm_lats_cnt, int is_early_device)
3602@@ -720,7 +720,7 @@ struct platform_device __init *omap_device_build(const char *pdev_name, int pdev
3603 */
3604 struct platform_device __init *omap_device_build_ss(const char *pdev_name, int pdev_id,
3605 struct omap_hwmod **ohs, int oh_cnt,
3606- void *pdata, int pdata_len,
3607+ const void *pdata, int pdata_len,
3608 struct omap_device_pm_latency *pm_lats,
3609 int pm_lats_cnt, int is_early_device)
3610 {
3611diff --git a/arch/arm/mach-omap2/omap_device.h b/arch/arm/mach-omap2/omap_device.h
3612index 0933c59..42b8e2d 100644
3613--- a/arch/arm/mach-omap2/omap_device.h
3614+++ b/arch/arm/mach-omap2/omap_device.h
3615@@ -91,14 +91,14 @@ int omap_device_shutdown(struct platform_device *pdev);
3616 /* Core code interface */
3617
3618 struct platform_device *omap_device_build(const char *pdev_name, int pdev_id,
3619- struct omap_hwmod *oh, void *pdata,
3620+ struct omap_hwmod *oh, const void *pdata,
3621 int pdata_len,
3622 struct omap_device_pm_latency *pm_lats,
3623 int pm_lats_cnt, int is_early_device);
3624
3625 struct platform_device *omap_device_build_ss(const char *pdev_name, int pdev_id,
3626 struct omap_hwmod **oh, int oh_cnt,
3627- void *pdata, int pdata_len,
3628+ const void *pdata, int pdata_len,
3629 struct omap_device_pm_latency *pm_lats,
3630 int pm_lats_cnt, int is_early_device);
3631
3632diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
3633index 4653efb..8c60bf7 100644
3634--- a/arch/arm/mach-omap2/omap_hwmod.c
3635+++ b/arch/arm/mach-omap2/omap_hwmod.c
3636@@ -189,10 +189,10 @@ struct omap_hwmod_soc_ops {
3637 int (*init_clkdm)(struct omap_hwmod *oh);
3638 void (*update_context_lost)(struct omap_hwmod *oh);
3639 int (*get_context_lost)(struct omap_hwmod *oh);
3640-};
3641+} __no_const;
3642
3643 /* soc_ops: adapts the omap_hwmod code to the currently-booted SoC */
3644-static struct omap_hwmod_soc_ops soc_ops;
3645+static struct omap_hwmod_soc_ops soc_ops __read_only;
3646
3647 /* omap_hwmod_list contains all registered struct omap_hwmods */
3648 static LIST_HEAD(omap_hwmod_list);
3649diff --git a/arch/arm/mach-omap2/wd_timer.c b/arch/arm/mach-omap2/wd_timer.c
3650index 7c2b4ed..b2ea51f 100644
3651--- a/arch/arm/mach-omap2/wd_timer.c
3652+++ b/arch/arm/mach-omap2/wd_timer.c
3653@@ -110,7 +110,9 @@ static int __init omap_init_wdt(void)
3654 struct omap_hwmod *oh;
3655 char *oh_name = "wd_timer2";
3656 char *dev_name = "omap_wdt";
3657- struct omap_wd_timer_platform_data pdata;
3658+ static struct omap_wd_timer_platform_data pdata = {
3659+ .read_reset_sources = prm_read_reset_sources
3660+ };
3661
3662 if (!cpu_class_is_omap2() || of_have_populated_dt())
3663 return 0;
3664@@ -121,8 +123,6 @@ static int __init omap_init_wdt(void)
3665 return -EINVAL;
3666 }
3667
3668- pdata.read_reset_sources = prm_read_reset_sources;
3669-
3670 pdev = omap_device_build(dev_name, id, oh, &pdata,
3671 sizeof(struct omap_wd_timer_platform_data),
3672 NULL, 0, 0);
3673diff --git a/arch/arm/mach-ux500/include/mach/setup.h b/arch/arm/mach-ux500/include/mach/setup.h
3674index 6be4c4d..32ac32a 100644
3675--- a/arch/arm/mach-ux500/include/mach/setup.h
3676+++ b/arch/arm/mach-ux500/include/mach/setup.h
3677@@ -38,13 +38,6 @@ extern struct sys_timer ux500_timer;
3678 .type = MT_DEVICE, \
3679 }
3680
3681-#define __MEM_DEV_DESC(x, sz) { \
3682- .virtual = IO_ADDRESS(x), \
3683- .pfn = __phys_to_pfn(x), \
3684- .length = sz, \
3685- .type = MT_MEMORY, \
3686-}
3687-
3688 extern struct smp_operations ux500_smp_ops;
3689 extern void ux500_cpu_die(unsigned int cpu);
3690
3691diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
3692index 3fd629d..8b1aca9 100644
3693--- a/arch/arm/mm/Kconfig
3694+++ b/arch/arm/mm/Kconfig
3695@@ -425,7 +425,7 @@ config CPU_32v5
3696
3697 config CPU_32v6
3698 bool
3699- select CPU_USE_DOMAINS if CPU_V6 && MMU
3700+ select CPU_USE_DOMAINS if CPU_V6 && MMU && !PAX_KERNEXEC
3701 select TLS_REG_EMUL if !CPU_32v6K && !MMU
3702
3703 config CPU_32v6K
3704@@ -577,6 +577,7 @@ config CPU_CP15_MPU
3705
3706 config CPU_USE_DOMAINS
3707 bool
3708+ depends on !ARM_LPAE && !PAX_KERNEXEC
3709 help
3710 This option enables or disables the use of domain switching
3711 via the set_fs() function.
3712diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
3713index db26e2e..ee44569 100644
3714--- a/arch/arm/mm/alignment.c
3715+++ b/arch/arm/mm/alignment.c
3716@@ -211,10 +211,12 @@ union offset_union {
3717 #define __get16_unaligned_check(ins,val,addr) \
3718 do { \
3719 unsigned int err = 0, v, a = addr; \
3720+ pax_open_userland(); \
3721 __get8_unaligned_check(ins,v,a,err); \
3722 val = v << ((BE) ? 8 : 0); \
3723 __get8_unaligned_check(ins,v,a,err); \
3724 val |= v << ((BE) ? 0 : 8); \
3725+ pax_close_userland(); \
3726 if (err) \
3727 goto fault; \
3728 } while (0)
3729@@ -228,6 +230,7 @@ union offset_union {
3730 #define __get32_unaligned_check(ins,val,addr) \
3731 do { \
3732 unsigned int err = 0, v, a = addr; \
3733+ pax_open_userland(); \
3734 __get8_unaligned_check(ins,v,a,err); \
3735 val = v << ((BE) ? 24 : 0); \
3736 __get8_unaligned_check(ins,v,a,err); \
3737@@ -236,6 +239,7 @@ union offset_union {
3738 val |= v << ((BE) ? 8 : 16); \
3739 __get8_unaligned_check(ins,v,a,err); \
3740 val |= v << ((BE) ? 0 : 24); \
3741+ pax_close_userland(); \
3742 if (err) \
3743 goto fault; \
3744 } while (0)
3745@@ -249,6 +253,7 @@ union offset_union {
3746 #define __put16_unaligned_check(ins,val,addr) \
3747 do { \
3748 unsigned int err = 0, v = val, a = addr; \
3749+ pax_open_userland(); \
3750 __asm__( FIRST_BYTE_16 \
3751 ARM( "1: "ins" %1, [%2], #1\n" ) \
3752 THUMB( "1: "ins" %1, [%2]\n" ) \
3753@@ -268,6 +273,7 @@ union offset_union {
3754 " .popsection\n" \
3755 : "=r" (err), "=&r" (v), "=&r" (a) \
3756 : "0" (err), "1" (v), "2" (a)); \
3757+ pax_close_userland(); \
3758 if (err) \
3759 goto fault; \
3760 } while (0)
3761@@ -281,6 +287,7 @@ union offset_union {
3762 #define __put32_unaligned_check(ins,val,addr) \
3763 do { \
3764 unsigned int err = 0, v = val, a = addr; \
3765+ pax_open_userland(); \
3766 __asm__( FIRST_BYTE_32 \
3767 ARM( "1: "ins" %1, [%2], #1\n" ) \
3768 THUMB( "1: "ins" %1, [%2]\n" ) \
3769@@ -310,6 +317,7 @@ union offset_union {
3770 " .popsection\n" \
3771 : "=r" (err), "=&r" (v), "=&r" (a) \
3772 : "0" (err), "1" (v), "2" (a)); \
3773+ pax_close_userland(); \
3774 if (err) \
3775 goto fault; \
3776 } while (0)
3777diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
3778index d07df17..59d5493 100644
3779--- a/arch/arm/mm/context.c
3780+++ b/arch/arm/mm/context.c
3781@@ -45,7 +45,7 @@ static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
3782 static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
3783 static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
3784
3785-static DEFINE_PER_CPU(atomic64_t, active_asids);
3786+DEFINE_PER_CPU(atomic64_t, active_asids);
3787 static DEFINE_PER_CPU(u64, reserved_asids);
3788 static cpumask_t tlb_flush_pending;
3789
3790@@ -209,8 +209,10 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
3791 atomic64_set(&mm->context.id, asid);
3792 }
3793
3794- if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending))
3795+ if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) {
3796 local_flush_tlb_all();
3797+ dummy_flush_tlb_a15_erratum();
3798+ }
3799
3800 atomic64_set(&per_cpu(active_asids, cpu), asid);
3801 cpumask_set_cpu(cpu, mm_cpumask(mm));
3802diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
3803index 5dbf13f..1a60561 100644
3804--- a/arch/arm/mm/fault.c
3805+++ b/arch/arm/mm/fault.c
3806@@ -25,6 +25,7 @@
3807 #include <asm/system_misc.h>
3808 #include <asm/system_info.h>
3809 #include <asm/tlbflush.h>
3810+#include <asm/sections.h>
3811
3812 #include "fault.h"
3813
3814@@ -138,6 +139,20 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
3815 if (fixup_exception(regs))
3816 return;
3817
3818+#ifdef CONFIG_PAX_KERNEXEC
3819+ if ((fsr & FSR_WRITE) &&
3820+ (((unsigned long)_stext <= addr && addr < init_mm.end_code) ||
3821+ (MODULES_VADDR <= addr && addr < MODULES_END)))
3822+ {
3823+ if (current->signal->curr_ip)
3824+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3825+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
3826+ else
3827+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
3828+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
3829+ }
3830+#endif
3831+
3832 /*
3833 * No handler, we'll have to terminate things with extreme prejudice.
3834 */
3835@@ -174,6 +189,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
3836 }
3837 #endif
3838
3839+#ifdef CONFIG_PAX_PAGEEXEC
3840+ if (fsr & FSR_LNX_PF) {
3841+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
3842+ do_group_exit(SIGKILL);
3843+ }
3844+#endif
3845+
3846 tsk->thread.address = addr;
3847 tsk->thread.error_code = fsr;
3848 tsk->thread.trap_no = 14;
3849@@ -398,6 +420,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
3850 }
3851 #endif /* CONFIG_MMU */
3852
3853+#ifdef CONFIG_PAX_PAGEEXEC
3854+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3855+{
3856+ long i;
3857+
3858+ printk(KERN_ERR "PAX: bytes at PC: ");
3859+ for (i = 0; i < 20; i++) {
3860+ unsigned char c;
3861+ if (get_user(c, (__force unsigned char __user *)pc+i))
3862+ printk(KERN_CONT "?? ");
3863+ else
3864+ printk(KERN_CONT "%02x ", c);
3865+ }
3866+ printk("\n");
3867+
3868+ printk(KERN_ERR "PAX: bytes at SP-4: ");
3869+ for (i = -1; i < 20; i++) {
3870+ unsigned long c;
3871+ if (get_user(c, (__force unsigned long __user *)sp+i))
3872+ printk(KERN_CONT "???????? ");
3873+ else
3874+ printk(KERN_CONT "%08lx ", c);
3875+ }
3876+ printk("\n");
3877+}
3878+#endif
3879+
3880 /*
3881 * First Level Translation Fault Handler
3882 *
3883@@ -543,9 +592,22 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
3884 const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
3885 struct siginfo info;
3886
3887+#ifdef CONFIG_PAX_MEMORY_UDEREF
3888+ if (addr < TASK_SIZE && is_domain_fault(fsr)) {
3889+ if (current->signal->curr_ip)
3890+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3891+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3892+ else
3893+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
3894+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3895+ goto die;
3896+ }
3897+#endif
3898+
3899 if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
3900 return;
3901
3902+die:
3903 printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n",
3904 inf->name, fsr, addr);
3905
3906@@ -575,9 +637,49 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
3907 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
3908 struct siginfo info;
3909
3910+ if (user_mode(regs)) {
3911+ if (addr == 0xffff0fe0UL) {
3912+ /*
3913+ * PaX: __kuser_get_tls emulation
3914+ */
3915+ regs->ARM_r0 = current_thread_info()->tp_value;
3916+ regs->ARM_pc = regs->ARM_lr;
3917+ return;
3918+ }
3919+ }
3920+
3921+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
3922+ else if (is_domain_fault(ifsr) || is_xn_fault(ifsr)) {
3923+ if (current->signal->curr_ip)
3924+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3925+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
3926+ addr >= TASK_SIZE ? "non-executable kernel" : "userland", addr);
3927+ else
3928+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", current->comm, task_pid_nr(current),
3929+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
3930+ addr >= TASK_SIZE ? "non-executable kernel" : "userland", addr);
3931+ goto die;
3932+ }
3933+#endif
3934+
3935+#ifdef CONFIG_PAX_REFCOUNT
3936+ if (fsr_fs(ifsr) == FAULT_CODE_DEBUG) {
3937+ unsigned int bkpt;
3938+
3939+ if (!probe_kernel_address((unsigned int *)addr, bkpt) && bkpt == 0xe12f1073) {
3940+ current->thread.error_code = ifsr;
3941+ current->thread.trap_no = 0;
3942+ pax_report_refcount_overflow(regs);
3943+ fixup_exception(regs);
3944+ return;
3945+ }
3946+ }
3947+#endif
3948+
3949 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
3950 return;
3951
3952+die:
3953 printk(KERN_ALERT "Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
3954 inf->name, ifsr, addr);
3955
3956diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
3957index cf08bdf..772656c 100644
3958--- a/arch/arm/mm/fault.h
3959+++ b/arch/arm/mm/fault.h
3960@@ -3,6 +3,7 @@
3961
3962 /*
3963 * Fault status register encodings. We steal bit 31 for our own purposes.
3964+ * Set when the FSR value is from an instruction fault.
3965 */
3966 #define FSR_LNX_PF (1 << 31)
3967 #define FSR_WRITE (1 << 11)
3968@@ -22,6 +23,17 @@ static inline int fsr_fs(unsigned int fsr)
3969 }
3970 #endif
3971
3972+/* valid for LPAE and !LPAE */
3973+static inline int is_xn_fault(unsigned int fsr)
3974+{
3975+ return ((fsr_fs(fsr) & 0x3c) == 0xc);
3976+}
3977+
3978+static inline int is_domain_fault(unsigned int fsr)
3979+{
3980+ return ((fsr_fs(fsr) & 0xD) == 0x9);
3981+}
3982+
3983 void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
3984 unsigned long search_exception_table(unsigned long addr);
3985
3986diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
3987index ad722f1..763fdd3 100644
3988--- a/arch/arm/mm/init.c
3989+++ b/arch/arm/mm/init.c
3990@@ -30,6 +30,8 @@
3991 #include <asm/setup.h>
3992 #include <asm/tlb.h>
3993 #include <asm/fixmap.h>
3994+#include <asm/system_info.h>
3995+#include <asm/cp15.h>
3996
3997 #include <asm/mach/arch.h>
3998 #include <asm/mach/map.h>
3999@@ -736,7 +738,46 @@ void free_initmem(void)
4000 {
4001 #ifdef CONFIG_HAVE_TCM
4002 extern char __tcm_start, __tcm_end;
4003+#endif
4004
4005+#ifdef CONFIG_PAX_KERNEXEC
4006+ unsigned long addr;
4007+ pgd_t *pgd;
4008+ pud_t *pud;
4009+ pmd_t *pmd;
4010+ int cpu_arch = cpu_architecture();
4011+ unsigned int cr = get_cr();
4012+
4013+ if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
4014+ /* make pages tables, etc before .text NX */
4015+ for (addr = PAGE_OFFSET; addr < (unsigned long)_stext; addr += SECTION_SIZE) {
4016+ pgd = pgd_offset_k(addr);
4017+ pud = pud_offset(pgd, addr);
4018+ pmd = pmd_offset(pud, addr);
4019+ __section_update(pmd, addr, PMD_SECT_XN);
4020+ }
4021+ /* make init NX */
4022+ for (addr = (unsigned long)__init_begin; addr < (unsigned long)_sdata; addr += SECTION_SIZE) {
4023+ pgd = pgd_offset_k(addr);
4024+ pud = pud_offset(pgd, addr);
4025+ pmd = pmd_offset(pud, addr);
4026+ __section_update(pmd, addr, PMD_SECT_XN);
4027+ }
4028+ /* make kernel code/rodata RX */
4029+ for (addr = (unsigned long)_stext; addr < (unsigned long)__init_begin; addr += SECTION_SIZE) {
4030+ pgd = pgd_offset_k(addr);
4031+ pud = pud_offset(pgd, addr);
4032+ pmd = pmd_offset(pud, addr);
4033+#ifdef CONFIG_ARM_LPAE
4034+ __section_update(pmd, addr, PMD_SECT_RDONLY);
4035+#else
4036+ __section_update(pmd, addr, PMD_SECT_APX|PMD_SECT_AP_WRITE);
4037+#endif
4038+ }
4039+ }
4040+#endif
4041+
4042+#ifdef CONFIG_HAVE_TCM
4043 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
4044 totalram_pages += free_area(__phys_to_pfn(__pa(&__tcm_start)),
4045 __phys_to_pfn(__pa(&__tcm_end)),
4046diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
4047index 88fd86c..7a224ce 100644
4048--- a/arch/arm/mm/ioremap.c
4049+++ b/arch/arm/mm/ioremap.c
4050@@ -335,9 +335,9 @@ __arm_ioremap_exec(unsigned long phys_addr, size_t size, bool cached)
4051 unsigned int mtype;
4052
4053 if (cached)
4054- mtype = MT_MEMORY;
4055+ mtype = MT_MEMORY_RX;
4056 else
4057- mtype = MT_MEMORY_NONCACHED;
4058+ mtype = MT_MEMORY_NONCACHED_RX;
4059
4060 return __arm_ioremap_caller(phys_addr, size, mtype,
4061 __builtin_return_address(0));
4062diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
4063index 10062ce..aa96dd7 100644
4064--- a/arch/arm/mm/mmap.c
4065+++ b/arch/arm/mm/mmap.c
4066@@ -59,6 +59,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4067 struct vm_area_struct *vma;
4068 int do_align = 0;
4069 int aliasing = cache_is_vipt_aliasing();
4070+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4071 struct vm_unmapped_area_info info;
4072
4073 /*
4074@@ -81,6 +82,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4075 if (len > TASK_SIZE)
4076 return -ENOMEM;
4077
4078+#ifdef CONFIG_PAX_RANDMMAP
4079+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4080+#endif
4081+
4082 if (addr) {
4083 if (do_align)
4084 addr = COLOUR_ALIGN(addr, pgoff);
4085@@ -88,8 +93,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4086 addr = PAGE_ALIGN(addr);
4087
4088 vma = find_vma(mm, addr);
4089- if (TASK_SIZE - len >= addr &&
4090- (!vma || addr + len <= vma->vm_start))
4091+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4092 return addr;
4093 }
4094
4095@@ -112,6 +116,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4096 unsigned long addr = addr0;
4097 int do_align = 0;
4098 int aliasing = cache_is_vipt_aliasing();
4099+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4100 struct vm_unmapped_area_info info;
4101
4102 /*
4103@@ -132,6 +137,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4104 return addr;
4105 }
4106
4107+#ifdef CONFIG_PAX_RANDMMAP
4108+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4109+#endif
4110+
4111 /* requesting a specific address */
4112 if (addr) {
4113 if (do_align)
4114@@ -139,8 +148,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4115 else
4116 addr = PAGE_ALIGN(addr);
4117 vma = find_vma(mm, addr);
4118- if (TASK_SIZE - len >= addr &&
4119- (!vma || addr + len <= vma->vm_start))
4120+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4121 return addr;
4122 }
4123
4124@@ -162,6 +170,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4125 VM_BUG_ON(addr != -ENOMEM);
4126 info.flags = 0;
4127 info.low_limit = mm->mmap_base;
4128+
4129+#ifdef CONFIG_PAX_RANDMMAP
4130+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4131+ info.low_limit += mm->delta_mmap;
4132+#endif
4133+
4134 info.high_limit = TASK_SIZE;
4135 addr = vm_unmapped_area(&info);
4136 }
4137@@ -173,6 +187,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4138 {
4139 unsigned long random_factor = 0UL;
4140
4141+#ifdef CONFIG_PAX_RANDMMAP
4142+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4143+#endif
4144+
4145 /* 8 bits of randomness in 20 address space bits */
4146 if ((current->flags & PF_RANDOMIZE) &&
4147 !(current->personality & ADDR_NO_RANDOMIZE))
4148@@ -180,10 +198,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4149
4150 if (mmap_is_legacy()) {
4151 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4152+
4153+#ifdef CONFIG_PAX_RANDMMAP
4154+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4155+ mm->mmap_base += mm->delta_mmap;
4156+#endif
4157+
4158 mm->get_unmapped_area = arch_get_unmapped_area;
4159 mm->unmap_area = arch_unmap_area;
4160 } else {
4161 mm->mmap_base = mmap_base(random_factor);
4162+
4163+#ifdef CONFIG_PAX_RANDMMAP
4164+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4165+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4166+#endif
4167+
4168 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4169 mm->unmap_area = arch_unmap_area_topdown;
4170 }
4171diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
4172index ce328c7..35b88dc 100644
4173--- a/arch/arm/mm/mmu.c
4174+++ b/arch/arm/mm/mmu.c
4175@@ -35,6 +35,23 @@
4176
4177 #include "mm.h"
4178
4179+
4180+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
4181+void modify_domain(unsigned int dom, unsigned int type)
4182+{
4183+ struct thread_info *thread = current_thread_info();
4184+ unsigned int domain = thread->cpu_domain;
4185+ /*
4186+ * DOMAIN_MANAGER might be defined to some other value,
4187+ * use the arch-defined constant
4188+ */
4189+ domain &= ~domain_val(dom, 3);
4190+ thread->cpu_domain = domain | domain_val(dom, type);
4191+ set_domain(thread->cpu_domain);
4192+}
4193+EXPORT_SYMBOL(modify_domain);
4194+#endif
4195+
4196 /*
4197 * empty_zero_page is a special page that is used for
4198 * zero-initialized data and COW.
4199@@ -195,10 +212,18 @@ void adjust_cr(unsigned long mask, unsigned long set)
4200 }
4201 #endif
4202
4203-#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN
4204+#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY
4205 #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
4206
4207-static struct mem_type mem_types[] = {
4208+#ifdef CONFIG_PAX_KERNEXEC
4209+#define L_PTE_KERNEXEC L_PTE_RDONLY
4210+#define PMD_SECT_KERNEXEC PMD_SECT_RDONLY
4211+#else
4212+#define L_PTE_KERNEXEC L_PTE_DIRTY
4213+#define PMD_SECT_KERNEXEC PMD_SECT_AP_WRITE
4214+#endif
4215+
4216+static struct mem_type mem_types[] __read_only = {
4217 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
4218 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
4219 L_PTE_SHARED,
4220@@ -227,16 +252,16 @@ static struct mem_type mem_types[] = {
4221 [MT_UNCACHED] = {
4222 .prot_pte = PROT_PTE_DEVICE,
4223 .prot_l1 = PMD_TYPE_TABLE,
4224- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4225+ .prot_sect = PROT_SECT_DEVICE,
4226 .domain = DOMAIN_IO,
4227 },
4228 [MT_CACHECLEAN] = {
4229- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4230+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4231 .domain = DOMAIN_KERNEL,
4232 },
4233 #ifndef CONFIG_ARM_LPAE
4234 [MT_MINICLEAN] = {
4235- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
4236+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE | PMD_SECT_RDONLY,
4237 .domain = DOMAIN_KERNEL,
4238 },
4239 #endif
4240@@ -244,36 +269,54 @@ static struct mem_type mem_types[] = {
4241 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4242 L_PTE_RDONLY,
4243 .prot_l1 = PMD_TYPE_TABLE,
4244- .domain = DOMAIN_USER,
4245+ .domain = DOMAIN_VECTORS,
4246 },
4247 [MT_HIGH_VECTORS] = {
4248 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4249- L_PTE_USER | L_PTE_RDONLY,
4250+ L_PTE_RDONLY,
4251 .prot_l1 = PMD_TYPE_TABLE,
4252- .domain = DOMAIN_USER,
4253+ .domain = DOMAIN_VECTORS,
4254 },
4255- [MT_MEMORY] = {
4256+ [MT_MEMORY_RWX] = {
4257 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4258 .prot_l1 = PMD_TYPE_TABLE,
4259 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4260 .domain = DOMAIN_KERNEL,
4261 },
4262+ [MT_MEMORY_RW] = {
4263+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4264+ .prot_l1 = PMD_TYPE_TABLE,
4265+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4266+ .domain = DOMAIN_KERNEL,
4267+ },
4268+ [MT_MEMORY_RX] = {
4269+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
4270+ .prot_l1 = PMD_TYPE_TABLE,
4271+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4272+ .domain = DOMAIN_KERNEL,
4273+ },
4274 [MT_ROM] = {
4275- .prot_sect = PMD_TYPE_SECT,
4276+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4277 .domain = DOMAIN_KERNEL,
4278 },
4279- [MT_MEMORY_NONCACHED] = {
4280+ [MT_MEMORY_NONCACHED_RW] = {
4281 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4282 L_PTE_MT_BUFFERABLE,
4283 .prot_l1 = PMD_TYPE_TABLE,
4284 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4285 .domain = DOMAIN_KERNEL,
4286 },
4287+ [MT_MEMORY_NONCACHED_RX] = {
4288+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC |
4289+ L_PTE_MT_BUFFERABLE,
4290+ .prot_l1 = PMD_TYPE_TABLE,
4291+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4292+ .domain = DOMAIN_KERNEL,
4293+ },
4294 [MT_MEMORY_DTCM] = {
4295- .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4296- L_PTE_XN,
4297+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4298 .prot_l1 = PMD_TYPE_TABLE,
4299- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4300+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4301 .domain = DOMAIN_KERNEL,
4302 },
4303 [MT_MEMORY_ITCM] = {
4304@@ -283,10 +326,10 @@ static struct mem_type mem_types[] = {
4305 },
4306 [MT_MEMORY_SO] = {
4307 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4308- L_PTE_MT_UNCACHED | L_PTE_XN,
4309+ L_PTE_MT_UNCACHED,
4310 .prot_l1 = PMD_TYPE_TABLE,
4311 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S |
4312- PMD_SECT_UNCACHED | PMD_SECT_XN,
4313+ PMD_SECT_UNCACHED,
4314 .domain = DOMAIN_KERNEL,
4315 },
4316 [MT_MEMORY_DMA_READY] = {
4317@@ -371,9 +414,35 @@ static void __init build_mem_type_table(void)
4318 * to prevent speculative instruction fetches.
4319 */
4320 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN;
4321+ mem_types[MT_DEVICE].prot_pte |= L_PTE_XN;
4322 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
4323+ mem_types[MT_DEVICE_NONSHARED].prot_pte |= L_PTE_XN;
4324 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
4325+ mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_XN;
4326 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
4327+ mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_XN;
4328+
4329+ /* Mark other regions on ARMv6+ as execute-never */
4330+
4331+#ifdef CONFIG_PAX_KERNEXEC
4332+ mem_types[MT_UNCACHED].prot_sect |= PMD_SECT_XN;
4333+ mem_types[MT_UNCACHED].prot_pte |= L_PTE_XN;
4334+ mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_XN;
4335+ mem_types[MT_CACHECLEAN].prot_pte |= L_PTE_XN;
4336+#ifndef CONFIG_ARM_LPAE
4337+ mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_XN;
4338+ mem_types[MT_MINICLEAN].prot_pte |= L_PTE_XN;
4339+#endif
4340+ mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_XN;
4341+ mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_XN;
4342+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_XN;
4343+ mem_types[MT_MEMORY_NONCACHED_RW].prot_pte |= PMD_SECT_XN;
4344+ mem_types[MT_MEMORY_DTCM].prot_sect |= PMD_SECT_XN;
4345+ mem_types[MT_MEMORY_DTCM].prot_pte |= L_PTE_XN;
4346+#endif
4347+
4348+ mem_types[MT_MEMORY_SO].prot_sect |= PMD_SECT_XN;
4349+ mem_types[MT_MEMORY_SO].prot_pte |= L_PTE_XN;
4350 }
4351 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
4352 /*
4353@@ -432,6 +501,9 @@ static void __init build_mem_type_table(void)
4354 * from SVC mode and no access from userspace.
4355 */
4356 mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4357+#ifdef CONFIG_PAX_KERNEXEC
4358+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4359+#endif
4360 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4361 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4362 #endif
4363@@ -448,11 +520,17 @@ static void __init build_mem_type_table(void)
4364 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
4365 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
4366 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
4367- mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
4368- mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
4369+ mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
4370+ mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
4371+ mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
4372+ mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;
4373+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_S;
4374+ mem_types[MT_MEMORY_RX].prot_pte |= L_PTE_SHARED;
4375 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
4376- mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
4377- mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
4378+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_S;
4379+ mem_types[MT_MEMORY_NONCACHED_RW].prot_pte |= L_PTE_SHARED;
4380+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= PMD_SECT_S;
4381+ mem_types[MT_MEMORY_NONCACHED_RX].prot_pte |= L_PTE_SHARED;
4382 }
4383 }
4384
4385@@ -463,15 +541,20 @@ static void __init build_mem_type_table(void)
4386 if (cpu_arch >= CPU_ARCH_ARMv6) {
4387 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
4388 /* Non-cacheable Normal is XCB = 001 */
4389- mem_types[MT_MEMORY_NONCACHED].prot_sect |=
4390+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |=
4391+ PMD_SECT_BUFFERED;
4392+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |=
4393 PMD_SECT_BUFFERED;
4394 } else {
4395 /* For both ARMv6 and non-TEX-remapping ARMv7 */
4396- mem_types[MT_MEMORY_NONCACHED].prot_sect |=
4397+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |=
4398+ PMD_SECT_TEX(1);
4399+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |=
4400 PMD_SECT_TEX(1);
4401 }
4402 } else {
4403- mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4404+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_BUFFERABLE;
4405+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= PMD_SECT_BUFFERABLE;
4406 }
4407
4408 #ifdef CONFIG_ARM_LPAE
4409@@ -487,6 +570,8 @@ static void __init build_mem_type_table(void)
4410 vecs_pgprot |= PTE_EXT_AF;
4411 #endif
4412
4413+ user_pgprot |= __supported_pte_mask;
4414+
4415 for (i = 0; i < 16; i++) {
4416 pteval_t v = pgprot_val(protection_map[i]);
4417 protection_map[i] = __pgprot(v | user_pgprot);
4418@@ -501,10 +586,15 @@ static void __init build_mem_type_table(void)
4419
4420 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
4421 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
4422- mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
4423- mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
4424+ mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
4425+ mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
4426+ mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
4427+ mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
4428+ mem_types[MT_MEMORY_RX].prot_sect |= ecc_mask | cp->pmd;
4429+ mem_types[MT_MEMORY_RX].prot_pte |= kern_pgprot;
4430 mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
4431- mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
4432+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= ecc_mask;
4433+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= ecc_mask;
4434 mem_types[MT_ROM].prot_sect |= cp->pmd;
4435
4436 switch (cp->pmd) {
4437@@ -1105,18 +1195,15 @@ void __init arm_mm_memblock_reserve(void)
4438 * called function. This means you can't use any function or debugging
4439 * method which may touch any device, otherwise the kernel _will_ crash.
4440 */
4441+
4442+static char vectors[PAGE_SIZE] __read_only __aligned(PAGE_SIZE);
4443+
4444 static void __init devicemaps_init(struct machine_desc *mdesc)
4445 {
4446 struct map_desc map;
4447 unsigned long addr;
4448- void *vectors;
4449
4450- /*
4451- * Allocate the vector page early.
4452- */
4453- vectors = early_alloc(PAGE_SIZE);
4454-
4455- early_trap_init(vectors);
4456+ early_trap_init(&vectors);
4457
4458 for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
4459 pmd_clear(pmd_off_k(addr));
4460@@ -1156,7 +1243,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
4461 * location (0xffff0000). If we aren't using high-vectors, also
4462 * create a mapping at the low-vectors virtual address.
4463 */
4464- map.pfn = __phys_to_pfn(virt_to_phys(vectors));
4465+ map.pfn = __phys_to_pfn(virt_to_phys(&vectors));
4466 map.virtual = 0xffff0000;
4467 map.length = PAGE_SIZE;
4468 map.type = MT_HIGH_VECTORS;
4469@@ -1214,8 +1301,39 @@ static void __init map_lowmem(void)
4470 map.pfn = __phys_to_pfn(start);
4471 map.virtual = __phys_to_virt(start);
4472 map.length = end - start;
4473- map.type = MT_MEMORY;
4474
4475+#ifdef CONFIG_PAX_KERNEXEC
4476+ if (map.virtual <= (unsigned long)_stext && ((unsigned long)_end < (map.virtual + map.length))) {
4477+ struct map_desc kernel;
4478+ struct map_desc initmap;
4479+
4480+ /* when freeing initmem we will make this RW */
4481+ initmap.pfn = __phys_to_pfn(__pa(__init_begin));
4482+ initmap.virtual = (unsigned long)__init_begin;
4483+ initmap.length = _sdata - __init_begin;
4484+ initmap.type = MT_MEMORY_RWX;
4485+ create_mapping(&initmap);
4486+
4487+ /* when freeing initmem we will make this RX */
4488+ kernel.pfn = __phys_to_pfn(__pa(_stext));
4489+ kernel.virtual = (unsigned long)_stext;
4490+ kernel.length = __init_begin - _stext;
4491+ kernel.type = MT_MEMORY_RWX;
4492+ create_mapping(&kernel);
4493+
4494+ if (map.virtual < (unsigned long)_stext) {
4495+ map.length = (unsigned long)_stext - map.virtual;
4496+ map.type = MT_MEMORY_RWX;
4497+ create_mapping(&map);
4498+ }
4499+
4500+ map.pfn = __phys_to_pfn(__pa(_sdata));
4501+ map.virtual = (unsigned long)_sdata;
4502+ map.length = end - __pa(_sdata);
4503+ }
4504+#endif
4505+
4506+ map.type = MT_MEMORY_RW;
4507 create_mapping(&map);
4508 }
4509 }
4510diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S
4511index 6d98c13..3cfb174 100644
4512--- a/arch/arm/mm/proc-v7-2level.S
4513+++ b/arch/arm/mm/proc-v7-2level.S
4514@@ -99,6 +99,9 @@ ENTRY(cpu_v7_set_pte_ext)
4515 tst r1, #L_PTE_XN
4516 orrne r3, r3, #PTE_EXT_XN
4517
4518+ tst r1, #L_PTE_PXN
4519+ orrne r3, r3, #PTE_EXT_PXN
4520+
4521 tst r1, #L_PTE_YOUNG
4522 tstne r1, #L_PTE_VALID
4523 #ifndef CONFIG_CPU_USE_DOMAINS
4524diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
4525index a5bc92d..0bb4730 100644
4526--- a/arch/arm/plat-omap/sram.c
4527+++ b/arch/arm/plat-omap/sram.c
4528@@ -93,6 +93,8 @@ void __init omap_map_sram(unsigned long start, unsigned long size,
4529 * Looks like we need to preserve some bootloader code at the
4530 * beginning of SRAM for jumping to flash for reboot to work...
4531 */
4532+ pax_open_kernel();
4533 memset_io(omap_sram_base + omap_sram_skip, 0,
4534 omap_sram_size - omap_sram_skip);
4535+ pax_close_kernel();
4536 }
4537diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
4538index f5144cd..71f6d1f 100644
4539--- a/arch/arm/plat-samsung/include/plat/dma-ops.h
4540+++ b/arch/arm/plat-samsung/include/plat/dma-ops.h
4541@@ -47,7 +47,7 @@ struct samsung_dma_ops {
4542 int (*started)(unsigned ch);
4543 int (*flush)(unsigned ch);
4544 int (*stop)(unsigned ch);
4545-};
4546+} __no_const;
4547
4548 extern void *samsung_dmadev_get_ops(void);
4549 extern void *s3c_dma_get_ops(void);
4550diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
4551index 0c3ba9f..95722b3 100644
4552--- a/arch/arm64/kernel/debug-monitors.c
4553+++ b/arch/arm64/kernel/debug-monitors.c
4554@@ -151,7 +151,7 @@ static int __cpuinit os_lock_notify(struct notifier_block *self,
4555 return NOTIFY_OK;
4556 }
4557
4558-static struct notifier_block __cpuinitdata os_lock_nb = {
4559+static struct notifier_block os_lock_nb = {
4560 .notifier_call = os_lock_notify,
4561 };
4562
4563diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
4564index 5ab825c..96aaec8 100644
4565--- a/arch/arm64/kernel/hw_breakpoint.c
4566+++ b/arch/arm64/kernel/hw_breakpoint.c
4567@@ -831,7 +831,7 @@ static int __cpuinit hw_breakpoint_reset_notify(struct notifier_block *self,
4568 return NOTIFY_OK;
4569 }
4570
4571-static struct notifier_block __cpuinitdata hw_breakpoint_reset_nb = {
4572+static struct notifier_block hw_breakpoint_reset_nb = {
4573 .notifier_call = hw_breakpoint_reset_notify,
4574 };
4575
4576diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
4577index c3a58a1..78fbf54 100644
4578--- a/arch/avr32/include/asm/cache.h
4579+++ b/arch/avr32/include/asm/cache.h
4580@@ -1,8 +1,10 @@
4581 #ifndef __ASM_AVR32_CACHE_H
4582 #define __ASM_AVR32_CACHE_H
4583
4584+#include <linux/const.h>
4585+
4586 #define L1_CACHE_SHIFT 5
4587-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4588+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4589
4590 /*
4591 * Memory returned by kmalloc() may be used for DMA, so we must make
4592diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
4593index e2c3287..6c4f98c 100644
4594--- a/arch/avr32/include/asm/elf.h
4595+++ b/arch/avr32/include/asm/elf.h
4596@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
4597 the loader. We need to make sure that it is out of the way of the program
4598 that it will "exec", and that there is sufficient room for the brk. */
4599
4600-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
4601+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
4602
4603+#ifdef CONFIG_PAX_ASLR
4604+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
4605+
4606+#define PAX_DELTA_MMAP_LEN 15
4607+#define PAX_DELTA_STACK_LEN 15
4608+#endif
4609
4610 /* This yields a mask that user programs can use to figure out what
4611 instruction set this CPU supports. This could be done in user space,
4612diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
4613index 479330b..53717a8 100644
4614--- a/arch/avr32/include/asm/kmap_types.h
4615+++ b/arch/avr32/include/asm/kmap_types.h
4616@@ -2,9 +2,9 @@
4617 #define __ASM_AVR32_KMAP_TYPES_H
4618
4619 #ifdef CONFIG_DEBUG_HIGHMEM
4620-# define KM_TYPE_NR 29
4621+# define KM_TYPE_NR 30
4622 #else
4623-# define KM_TYPE_NR 14
4624+# define KM_TYPE_NR 15
4625 #endif
4626
4627 #endif /* __ASM_AVR32_KMAP_TYPES_H */
4628diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
4629index b2f2d2d..d1c85cb 100644
4630--- a/arch/avr32/mm/fault.c
4631+++ b/arch/avr32/mm/fault.c
4632@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
4633
4634 int exception_trace = 1;
4635
4636+#ifdef CONFIG_PAX_PAGEEXEC
4637+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4638+{
4639+ unsigned long i;
4640+
4641+ printk(KERN_ERR "PAX: bytes at PC: ");
4642+ for (i = 0; i < 20; i++) {
4643+ unsigned char c;
4644+ if (get_user(c, (unsigned char *)pc+i))
4645+ printk(KERN_CONT "???????? ");
4646+ else
4647+ printk(KERN_CONT "%02x ", c);
4648+ }
4649+ printk("\n");
4650+}
4651+#endif
4652+
4653 /*
4654 * This routine handles page faults. It determines the address and the
4655 * problem, and then passes it off to one of the appropriate routines.
4656@@ -174,6 +191,16 @@ bad_area:
4657 up_read(&mm->mmap_sem);
4658
4659 if (user_mode(regs)) {
4660+
4661+#ifdef CONFIG_PAX_PAGEEXEC
4662+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
4663+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
4664+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
4665+ do_group_exit(SIGKILL);
4666+ }
4667+ }
4668+#endif
4669+
4670 if (exception_trace && printk_ratelimit())
4671 printk("%s%s[%d]: segfault at %08lx pc %08lx "
4672 "sp %08lx ecr %lu\n",
4673diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
4674index 568885a..f8008df 100644
4675--- a/arch/blackfin/include/asm/cache.h
4676+++ b/arch/blackfin/include/asm/cache.h
4677@@ -7,6 +7,7 @@
4678 #ifndef __ARCH_BLACKFIN_CACHE_H
4679 #define __ARCH_BLACKFIN_CACHE_H
4680
4681+#include <linux/const.h>
4682 #include <linux/linkage.h> /* for asmlinkage */
4683
4684 /*
4685@@ -14,7 +15,7 @@
4686 * Blackfin loads 32 bytes for cache
4687 */
4688 #define L1_CACHE_SHIFT 5
4689-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4690+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4691 #define SMP_CACHE_BYTES L1_CACHE_BYTES
4692
4693 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
4694diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
4695index aea2718..3639a60 100644
4696--- a/arch/cris/include/arch-v10/arch/cache.h
4697+++ b/arch/cris/include/arch-v10/arch/cache.h
4698@@ -1,8 +1,9 @@
4699 #ifndef _ASM_ARCH_CACHE_H
4700 #define _ASM_ARCH_CACHE_H
4701
4702+#include <linux/const.h>
4703 /* Etrax 100LX have 32-byte cache-lines. */
4704-#define L1_CACHE_BYTES 32
4705 #define L1_CACHE_SHIFT 5
4706+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4707
4708 #endif /* _ASM_ARCH_CACHE_H */
4709diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
4710index 7caf25d..ee65ac5 100644
4711--- a/arch/cris/include/arch-v32/arch/cache.h
4712+++ b/arch/cris/include/arch-v32/arch/cache.h
4713@@ -1,11 +1,12 @@
4714 #ifndef _ASM_CRIS_ARCH_CACHE_H
4715 #define _ASM_CRIS_ARCH_CACHE_H
4716
4717+#include <linux/const.h>
4718 #include <arch/hwregs/dma.h>
4719
4720 /* A cache-line is 32 bytes. */
4721-#define L1_CACHE_BYTES 32
4722 #define L1_CACHE_SHIFT 5
4723+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4724
4725 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
4726
4727diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
4728index b86329d..6709906 100644
4729--- a/arch/frv/include/asm/atomic.h
4730+++ b/arch/frv/include/asm/atomic.h
4731@@ -186,6 +186,16 @@ static inline void atomic64_dec(atomic64_t *v)
4732 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
4733 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
4734
4735+#define atomic64_read_unchecked(v) atomic64_read(v)
4736+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4737+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4738+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4739+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4740+#define atomic64_inc_unchecked(v) atomic64_inc(v)
4741+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4742+#define atomic64_dec_unchecked(v) atomic64_dec(v)
4743+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4744+
4745 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
4746 {
4747 int c, old;
4748diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
4749index 2797163..c2a401d 100644
4750--- a/arch/frv/include/asm/cache.h
4751+++ b/arch/frv/include/asm/cache.h
4752@@ -12,10 +12,11 @@
4753 #ifndef __ASM_CACHE_H
4754 #define __ASM_CACHE_H
4755
4756+#include <linux/const.h>
4757
4758 /* bytes per L1 cache line */
4759 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
4760-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4761+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4762
4763 #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
4764 #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
4765diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
4766index 43901f2..0d8b865 100644
4767--- a/arch/frv/include/asm/kmap_types.h
4768+++ b/arch/frv/include/asm/kmap_types.h
4769@@ -2,6 +2,6 @@
4770 #ifndef _ASM_KMAP_TYPES_H
4771 #define _ASM_KMAP_TYPES_H
4772
4773-#define KM_TYPE_NR 17
4774+#define KM_TYPE_NR 18
4775
4776 #endif
4777diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
4778index 385fd30..3aaf4fe 100644
4779--- a/arch/frv/mm/elf-fdpic.c
4780+++ b/arch/frv/mm/elf-fdpic.c
4781@@ -61,6 +61,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4782 {
4783 struct vm_area_struct *vma;
4784 unsigned long limit;
4785+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
4786
4787 if (len > TASK_SIZE)
4788 return -ENOMEM;
4789@@ -73,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4790 if (addr) {
4791 addr = PAGE_ALIGN(addr);
4792 vma = find_vma(current->mm, addr);
4793- if (TASK_SIZE - len >= addr &&
4794- (!vma || addr + len <= vma->vm_start))
4795+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4796 goto success;
4797 }
4798
4799@@ -89,7 +89,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4800 for (; vma; vma = vma->vm_next) {
4801 if (addr > limit)
4802 break;
4803- if (addr + len <= vma->vm_start)
4804+ if (check_heap_stack_gap(vma, addr, len, offset))
4805 goto success;
4806 addr = vma->vm_end;
4807 }
4808@@ -104,7 +104,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4809 for (; vma; vma = vma->vm_next) {
4810 if (addr > limit)
4811 break;
4812- if (addr + len <= vma->vm_start)
4813+ if (check_heap_stack_gap(vma, addr, len, offset))
4814 goto success;
4815 addr = vma->vm_end;
4816 }
4817diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
4818index f4ca594..adc72fd6 100644
4819--- a/arch/hexagon/include/asm/cache.h
4820+++ b/arch/hexagon/include/asm/cache.h
4821@@ -21,9 +21,11 @@
4822 #ifndef __ASM_CACHE_H
4823 #define __ASM_CACHE_H
4824
4825+#include <linux/const.h>
4826+
4827 /* Bytes per L1 cache line */
4828-#define L1_CACHE_SHIFT (5)
4829-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4830+#define L1_CACHE_SHIFT 5
4831+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4832
4833 #define __cacheline_aligned __aligned(L1_CACHE_BYTES)
4834 #define ____cacheline_aligned __aligned(L1_CACHE_BYTES)
4835diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
4836index 6e6fe18..a6ae668 100644
4837--- a/arch/ia64/include/asm/atomic.h
4838+++ b/arch/ia64/include/asm/atomic.h
4839@@ -208,6 +208,16 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
4840 #define atomic64_inc(v) atomic64_add(1, (v))
4841 #define atomic64_dec(v) atomic64_sub(1, (v))
4842
4843+#define atomic64_read_unchecked(v) atomic64_read(v)
4844+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4845+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4846+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4847+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4848+#define atomic64_inc_unchecked(v) atomic64_inc(v)
4849+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4850+#define atomic64_dec_unchecked(v) atomic64_dec(v)
4851+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4852+
4853 /* Atomic operations are already serializing */
4854 #define smp_mb__before_atomic_dec() barrier()
4855 #define smp_mb__after_atomic_dec() barrier()
4856diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
4857index 988254a..e1ee885 100644
4858--- a/arch/ia64/include/asm/cache.h
4859+++ b/arch/ia64/include/asm/cache.h
4860@@ -1,6 +1,7 @@
4861 #ifndef _ASM_IA64_CACHE_H
4862 #define _ASM_IA64_CACHE_H
4863
4864+#include <linux/const.h>
4865
4866 /*
4867 * Copyright (C) 1998-2000 Hewlett-Packard Co
4868@@ -9,7 +10,7 @@
4869
4870 /* Bytes per L1 (data) cache line. */
4871 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
4872-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4873+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4874
4875 #ifdef CONFIG_SMP
4876 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
4877diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
4878index b5298eb..67c6e62 100644
4879--- a/arch/ia64/include/asm/elf.h
4880+++ b/arch/ia64/include/asm/elf.h
4881@@ -42,6 +42,13 @@
4882 */
4883 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
4884
4885+#ifdef CONFIG_PAX_ASLR
4886+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
4887+
4888+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
4889+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
4890+#endif
4891+
4892 #define PT_IA_64_UNWIND 0x70000001
4893
4894 /* IA-64 relocations: */
4895diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
4896index 96a8d92..617a1cf 100644
4897--- a/arch/ia64/include/asm/pgalloc.h
4898+++ b/arch/ia64/include/asm/pgalloc.h
4899@@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
4900 pgd_val(*pgd_entry) = __pa(pud);
4901 }
4902
4903+static inline void
4904+pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
4905+{
4906+ pgd_populate(mm, pgd_entry, pud);
4907+}
4908+
4909 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
4910 {
4911 return quicklist_alloc(0, GFP_KERNEL, NULL);
4912@@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
4913 pud_val(*pud_entry) = __pa(pmd);
4914 }
4915
4916+static inline void
4917+pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
4918+{
4919+ pud_populate(mm, pud_entry, pmd);
4920+}
4921+
4922 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
4923 {
4924 return quicklist_alloc(0, GFP_KERNEL, NULL);
4925diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
4926index 815810c..d60bd4c 100644
4927--- a/arch/ia64/include/asm/pgtable.h
4928+++ b/arch/ia64/include/asm/pgtable.h
4929@@ -12,7 +12,7 @@
4930 * David Mosberger-Tang <davidm@hpl.hp.com>
4931 */
4932
4933-
4934+#include <linux/const.h>
4935 #include <asm/mman.h>
4936 #include <asm/page.h>
4937 #include <asm/processor.h>
4938@@ -142,6 +142,17 @@
4939 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
4940 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
4941 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
4942+
4943+#ifdef CONFIG_PAX_PAGEEXEC
4944+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
4945+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
4946+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
4947+#else
4948+# define PAGE_SHARED_NOEXEC PAGE_SHARED
4949+# define PAGE_READONLY_NOEXEC PAGE_READONLY
4950+# define PAGE_COPY_NOEXEC PAGE_COPY
4951+#endif
4952+
4953 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
4954 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
4955 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
4956diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
4957index 54ff557..70c88b7 100644
4958--- a/arch/ia64/include/asm/spinlock.h
4959+++ b/arch/ia64/include/asm/spinlock.h
4960@@ -71,7 +71,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
4961 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
4962
4963 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
4964- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
4965+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
4966 }
4967
4968 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
4969diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
4970index 449c8c0..18965fb 100644
4971--- a/arch/ia64/include/asm/uaccess.h
4972+++ b/arch/ia64/include/asm/uaccess.h
4973@@ -240,12 +240,24 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
4974 static inline unsigned long
4975 __copy_to_user (void __user *to, const void *from, unsigned long count)
4976 {
4977+ if (count > INT_MAX)
4978+ return count;
4979+
4980+ if (!__builtin_constant_p(count))
4981+ check_object_size(from, count, true);
4982+
4983 return __copy_user(to, (__force void __user *) from, count);
4984 }
4985
4986 static inline unsigned long
4987 __copy_from_user (void *to, const void __user *from, unsigned long count)
4988 {
4989+ if (count > INT_MAX)
4990+ return count;
4991+
4992+ if (!__builtin_constant_p(count))
4993+ check_object_size(to, count, false);
4994+
4995 return __copy_user((__force void __user *) to, from, count);
4996 }
4997
4998@@ -255,10 +267,13 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
4999 ({ \
5000 void __user *__cu_to = (to); \
5001 const void *__cu_from = (from); \
5002- long __cu_len = (n); \
5003+ unsigned long __cu_len = (n); \
5004 \
5005- if (__access_ok(__cu_to, __cu_len, get_fs())) \
5006+ if (__cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) { \
5007+ if (!__builtin_constant_p(n)) \
5008+ check_object_size(__cu_from, __cu_len, true); \
5009 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
5010+ } \
5011 __cu_len; \
5012 })
5013
5014@@ -266,11 +281,14 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
5015 ({ \
5016 void *__cu_to = (to); \
5017 const void __user *__cu_from = (from); \
5018- long __cu_len = (n); \
5019+ unsigned long __cu_len = (n); \
5020 \
5021 __chk_user_ptr(__cu_from); \
5022- if (__access_ok(__cu_from, __cu_len, get_fs())) \
5023+ if (__cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) { \
5024+ if (!__builtin_constant_p(n)) \
5025+ check_object_size(__cu_to, __cu_len, false); \
5026 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
5027+ } \
5028 __cu_len; \
5029 })
5030
5031diff --git a/arch/ia64/kernel/err_inject.c b/arch/ia64/kernel/err_inject.c
5032index 2d67317..07d8bfa 100644
5033--- a/arch/ia64/kernel/err_inject.c
5034+++ b/arch/ia64/kernel/err_inject.c
5035@@ -256,7 +256,7 @@ static int __cpuinit err_inject_cpu_callback(struct notifier_block *nfb,
5036 return NOTIFY_OK;
5037 }
5038
5039-static struct notifier_block __cpuinitdata err_inject_cpu_notifier =
5040+static struct notifier_block err_inject_cpu_notifier =
5041 {
5042 .notifier_call = err_inject_cpu_callback,
5043 };
5044diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
5045index 65bf9cd..794f06b 100644
5046--- a/arch/ia64/kernel/mca.c
5047+++ b/arch/ia64/kernel/mca.c
5048@@ -1922,7 +1922,7 @@ static int __cpuinit mca_cpu_callback(struct notifier_block *nfb,
5049 return NOTIFY_OK;
5050 }
5051
5052-static struct notifier_block mca_cpu_notifier __cpuinitdata = {
5053+static struct notifier_block mca_cpu_notifier = {
5054 .notifier_call = mca_cpu_callback
5055 };
5056
5057diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
5058index 24603be..948052d 100644
5059--- a/arch/ia64/kernel/module.c
5060+++ b/arch/ia64/kernel/module.c
5061@@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
5062 void
5063 module_free (struct module *mod, void *module_region)
5064 {
5065- if (mod && mod->arch.init_unw_table &&
5066- module_region == mod->module_init) {
5067+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
5068 unw_remove_unwind_table(mod->arch.init_unw_table);
5069 mod->arch.init_unw_table = NULL;
5070 }
5071@@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
5072 }
5073
5074 static inline int
5075+in_init_rx (const struct module *mod, uint64_t addr)
5076+{
5077+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
5078+}
5079+
5080+static inline int
5081+in_init_rw (const struct module *mod, uint64_t addr)
5082+{
5083+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
5084+}
5085+
5086+static inline int
5087 in_init (const struct module *mod, uint64_t addr)
5088 {
5089- return addr - (uint64_t) mod->module_init < mod->init_size;
5090+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
5091+}
5092+
5093+static inline int
5094+in_core_rx (const struct module *mod, uint64_t addr)
5095+{
5096+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
5097+}
5098+
5099+static inline int
5100+in_core_rw (const struct module *mod, uint64_t addr)
5101+{
5102+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
5103 }
5104
5105 static inline int
5106 in_core (const struct module *mod, uint64_t addr)
5107 {
5108- return addr - (uint64_t) mod->module_core < mod->core_size;
5109+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
5110 }
5111
5112 static inline int
5113@@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
5114 break;
5115
5116 case RV_BDREL:
5117- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
5118+ if (in_init_rx(mod, val))
5119+ val -= (uint64_t) mod->module_init_rx;
5120+ else if (in_init_rw(mod, val))
5121+ val -= (uint64_t) mod->module_init_rw;
5122+ else if (in_core_rx(mod, val))
5123+ val -= (uint64_t) mod->module_core_rx;
5124+ else if (in_core_rw(mod, val))
5125+ val -= (uint64_t) mod->module_core_rw;
5126 break;
5127
5128 case RV_LTV:
5129@@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
5130 * addresses have been selected...
5131 */
5132 uint64_t gp;
5133- if (mod->core_size > MAX_LTOFF)
5134+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
5135 /*
5136 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
5137 * at the end of the module.
5138 */
5139- gp = mod->core_size - MAX_LTOFF / 2;
5140+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
5141 else
5142- gp = mod->core_size / 2;
5143- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
5144+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
5145+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
5146 mod->arch.gp = gp;
5147 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
5148 }
5149diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
5150index 77597e5..189dd62f 100644
5151--- a/arch/ia64/kernel/palinfo.c
5152+++ b/arch/ia64/kernel/palinfo.c
5153@@ -977,7 +977,7 @@ create_palinfo_proc_entries(unsigned int cpu)
5154 struct proc_dir_entry **pdir;
5155 struct proc_dir_entry *cpu_dir;
5156 int j;
5157- char cpustr[sizeof(CPUSTR)];
5158+ char cpustr[3+4+1];
5159
5160
5161 /*
5162@@ -1045,7 +1045,7 @@ static int __cpuinit palinfo_cpu_callback(struct notifier_block *nfb,
5163 return NOTIFY_OK;
5164 }
5165
5166-static struct notifier_block __refdata palinfo_cpu_notifier =
5167+static struct notifier_block palinfo_cpu_notifier =
5168 {
5169 .notifier_call = palinfo_cpu_callback,
5170 .priority = 0,
5171diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
5172index 79802e5..1a89ec5 100644
5173--- a/arch/ia64/kernel/salinfo.c
5174+++ b/arch/ia64/kernel/salinfo.c
5175@@ -616,7 +616,7 @@ salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu
5176 return NOTIFY_OK;
5177 }
5178
5179-static struct notifier_block salinfo_cpu_notifier __cpuinitdata =
5180+static struct notifier_block salinfo_cpu_notifier =
5181 {
5182 .notifier_call = salinfo_cpu_callback,
5183 .priority = 0,
5184diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
5185index d9439ef..d0cac6b 100644
5186--- a/arch/ia64/kernel/sys_ia64.c
5187+++ b/arch/ia64/kernel/sys_ia64.c
5188@@ -28,6 +28,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5189 unsigned long start_addr, align_mask = PAGE_SIZE - 1;
5190 struct mm_struct *mm = current->mm;
5191 struct vm_area_struct *vma;
5192+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
5193
5194 if (len > RGN_MAP_LIMIT)
5195 return -ENOMEM;
5196@@ -43,6 +44,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5197 if (REGION_NUMBER(addr) == RGN_HPAGE)
5198 addr = 0;
5199 #endif
5200+
5201+#ifdef CONFIG_PAX_RANDMMAP
5202+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5203+ addr = mm->free_area_cache;
5204+ else
5205+#endif
5206+
5207 if (!addr)
5208 addr = mm->free_area_cache;
5209
5210@@ -61,14 +69,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5211 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
5212 /* At this point: (!vma || addr < vma->vm_end). */
5213 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
5214- if (start_addr != TASK_UNMAPPED_BASE) {
5215+ if (start_addr != mm->mmap_base) {
5216 /* Start a new search --- just in case we missed some holes. */
5217- addr = TASK_UNMAPPED_BASE;
5218+ addr = mm->mmap_base;
5219 goto full_search;
5220 }
5221 return -ENOMEM;
5222 }
5223- if (!vma || addr + len <= vma->vm_start) {
5224+ if (check_heap_stack_gap(vma, addr, len, offset)) {
5225 /* Remember the address where we stopped this search: */
5226 mm->free_area_cache = addr + len;
5227 return addr;
5228diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
5229index dc00b2c..cce53c2 100644
5230--- a/arch/ia64/kernel/topology.c
5231+++ b/arch/ia64/kernel/topology.c
5232@@ -445,7 +445,7 @@ static int __cpuinit cache_cpu_callback(struct notifier_block *nfb,
5233 return NOTIFY_OK;
5234 }
5235
5236-static struct notifier_block __cpuinitdata cache_cpu_notifier =
5237+static struct notifier_block cache_cpu_notifier =
5238 {
5239 .notifier_call = cache_cpu_callback
5240 };
5241diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
5242index 0ccb28f..8992469 100644
5243--- a/arch/ia64/kernel/vmlinux.lds.S
5244+++ b/arch/ia64/kernel/vmlinux.lds.S
5245@@ -198,7 +198,7 @@ SECTIONS {
5246 /* Per-cpu data: */
5247 . = ALIGN(PERCPU_PAGE_SIZE);
5248 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
5249- __phys_per_cpu_start = __per_cpu_load;
5250+ __phys_per_cpu_start = per_cpu_load;
5251 /*
5252 * ensure percpu data fits
5253 * into percpu page size
5254diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
5255index 6cf0341..d352594 100644
5256--- a/arch/ia64/mm/fault.c
5257+++ b/arch/ia64/mm/fault.c
5258@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
5259 return pte_present(pte);
5260 }
5261
5262+#ifdef CONFIG_PAX_PAGEEXEC
5263+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5264+{
5265+ unsigned long i;
5266+
5267+ printk(KERN_ERR "PAX: bytes at PC: ");
5268+ for (i = 0; i < 8; i++) {
5269+ unsigned int c;
5270+ if (get_user(c, (unsigned int *)pc+i))
5271+ printk(KERN_CONT "???????? ");
5272+ else
5273+ printk(KERN_CONT "%08x ", c);
5274+ }
5275+ printk("\n");
5276+}
5277+#endif
5278+
5279 # define VM_READ_BIT 0
5280 # define VM_WRITE_BIT 1
5281 # define VM_EXEC_BIT 2
5282@@ -149,8 +166,21 @@ retry:
5283 if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
5284 goto bad_area;
5285
5286- if ((vma->vm_flags & mask) != mask)
5287+ if ((vma->vm_flags & mask) != mask) {
5288+
5289+#ifdef CONFIG_PAX_PAGEEXEC
5290+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
5291+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
5292+ goto bad_area;
5293+
5294+ up_read(&mm->mmap_sem);
5295+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
5296+ do_group_exit(SIGKILL);
5297+ }
5298+#endif
5299+
5300 goto bad_area;
5301+ }
5302
5303 /*
5304 * If for any reason at all we couldn't handle the fault, make
5305diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
5306index 5ca674b..127c3cb 100644
5307--- a/arch/ia64/mm/hugetlbpage.c
5308+++ b/arch/ia64/mm/hugetlbpage.c
5309@@ -149,6 +149,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5310 unsigned long pgoff, unsigned long flags)
5311 {
5312 struct vm_area_struct *vmm;
5313+ unsigned long offset = gr_rand_threadstack_offset(current->mm, file, flags);
5314
5315 if (len > RGN_MAP_LIMIT)
5316 return -ENOMEM;
5317@@ -171,7 +172,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5318 /* At this point: (!vmm || addr < vmm->vm_end). */
5319 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
5320 return -ENOMEM;
5321- if (!vmm || (addr + len) <= vmm->vm_start)
5322+ if (check_heap_stack_gap(vmm, addr, len, offset))
5323 return addr;
5324 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
5325 }
5326diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
5327index b755ea9..b9a969e 100644
5328--- a/arch/ia64/mm/init.c
5329+++ b/arch/ia64/mm/init.c
5330@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
5331 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
5332 vma->vm_end = vma->vm_start + PAGE_SIZE;
5333 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
5334+
5335+#ifdef CONFIG_PAX_PAGEEXEC
5336+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
5337+ vma->vm_flags &= ~VM_EXEC;
5338+
5339+#ifdef CONFIG_PAX_MPROTECT
5340+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
5341+ vma->vm_flags &= ~VM_MAYEXEC;
5342+#endif
5343+
5344+ }
5345+#endif
5346+
5347 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5348 down_write(&current->mm->mmap_sem);
5349 if (insert_vm_struct(current->mm, vma)) {
5350diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
5351index 40b3ee9..8c2c112 100644
5352--- a/arch/m32r/include/asm/cache.h
5353+++ b/arch/m32r/include/asm/cache.h
5354@@ -1,8 +1,10 @@
5355 #ifndef _ASM_M32R_CACHE_H
5356 #define _ASM_M32R_CACHE_H
5357
5358+#include <linux/const.h>
5359+
5360 /* L1 cache line size */
5361 #define L1_CACHE_SHIFT 4
5362-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5363+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5364
5365 #endif /* _ASM_M32R_CACHE_H */
5366diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
5367index 82abd15..d95ae5d 100644
5368--- a/arch/m32r/lib/usercopy.c
5369+++ b/arch/m32r/lib/usercopy.c
5370@@ -14,6 +14,9 @@
5371 unsigned long
5372 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5373 {
5374+ if ((long)n < 0)
5375+ return n;
5376+
5377 prefetch(from);
5378 if (access_ok(VERIFY_WRITE, to, n))
5379 __copy_user(to,from,n);
5380@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5381 unsigned long
5382 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
5383 {
5384+ if ((long)n < 0)
5385+ return n;
5386+
5387 prefetchw(to);
5388 if (access_ok(VERIFY_READ, from, n))
5389 __copy_user_zeroing(to,from,n);
5390diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
5391index 0395c51..5f26031 100644
5392--- a/arch/m68k/include/asm/cache.h
5393+++ b/arch/m68k/include/asm/cache.h
5394@@ -4,9 +4,11 @@
5395 #ifndef __ARCH_M68K_CACHE_H
5396 #define __ARCH_M68K_CACHE_H
5397
5398+#include <linux/const.h>
5399+
5400 /* bytes per L1 cache line */
5401 #define L1_CACHE_SHIFT 4
5402-#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
5403+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5404
5405 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
5406
5407diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
5408index 4efe96a..60e8699 100644
5409--- a/arch/microblaze/include/asm/cache.h
5410+++ b/arch/microblaze/include/asm/cache.h
5411@@ -13,11 +13,12 @@
5412 #ifndef _ASM_MICROBLAZE_CACHE_H
5413 #define _ASM_MICROBLAZE_CACHE_H
5414
5415+#include <linux/const.h>
5416 #include <asm/registers.h>
5417
5418 #define L1_CACHE_SHIFT 5
5419 /* word-granular cache in microblaze */
5420-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5421+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5422
5423 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5424
5425diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
5426index 01cc6ba..bcb7a5d 100644
5427--- a/arch/mips/include/asm/atomic.h
5428+++ b/arch/mips/include/asm/atomic.h
5429@@ -21,6 +21,10 @@
5430 #include <asm/cmpxchg.h>
5431 #include <asm/war.h>
5432
5433+#ifdef CONFIG_GENERIC_ATOMIC64
5434+#include <asm-generic/atomic64.h>
5435+#endif
5436+
5437 #define ATOMIC_INIT(i) { (i) }
5438
5439 /*
5440@@ -759,6 +763,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
5441 */
5442 #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
5443
5444+#define atomic64_read_unchecked(v) atomic64_read(v)
5445+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5446+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5447+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5448+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5449+#define atomic64_inc_unchecked(v) atomic64_inc(v)
5450+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5451+#define atomic64_dec_unchecked(v) atomic64_dec(v)
5452+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5453+
5454 #endif /* CONFIG_64BIT */
5455
5456 /*
5457diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
5458index b4db69f..8f3b093 100644
5459--- a/arch/mips/include/asm/cache.h
5460+++ b/arch/mips/include/asm/cache.h
5461@@ -9,10 +9,11 @@
5462 #ifndef _ASM_CACHE_H
5463 #define _ASM_CACHE_H
5464
5465+#include <linux/const.h>
5466 #include <kmalloc.h>
5467
5468 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
5469-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5470+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5471
5472 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
5473 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5474diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
5475index 455c0ac..ad65fbe 100644
5476--- a/arch/mips/include/asm/elf.h
5477+++ b/arch/mips/include/asm/elf.h
5478@@ -372,13 +372,16 @@ extern const char *__elf_platform;
5479 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
5480 #endif
5481
5482+#ifdef CONFIG_PAX_ASLR
5483+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
5484+
5485+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5486+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5487+#endif
5488+
5489 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
5490 struct linux_binprm;
5491 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
5492 int uses_interp);
5493
5494-struct mm_struct;
5495-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
5496-#define arch_randomize_brk arch_randomize_brk
5497-
5498 #endif /* _ASM_ELF_H */
5499diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h
5500index c1f6afa..38cc6e9 100644
5501--- a/arch/mips/include/asm/exec.h
5502+++ b/arch/mips/include/asm/exec.h
5503@@ -12,6 +12,6 @@
5504 #ifndef _ASM_EXEC_H
5505 #define _ASM_EXEC_H
5506
5507-extern unsigned long arch_align_stack(unsigned long sp);
5508+#define arch_align_stack(x) ((x) & ~0xfUL)
5509
5510 #endif /* _ASM_EXEC_H */
5511diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
5512index 21bff32..9f0c3b8 100644
5513--- a/arch/mips/include/asm/page.h
5514+++ b/arch/mips/include/asm/page.h
5515@@ -96,7 +96,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
5516 #ifdef CONFIG_CPU_MIPS32
5517 typedef struct { unsigned long pte_low, pte_high; } pte_t;
5518 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
5519- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
5520+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
5521 #else
5522 typedef struct { unsigned long long pte; } pte_t;
5523 #define pte_val(x) ((x).pte)
5524diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
5525index 881d18b..cea38bc 100644
5526--- a/arch/mips/include/asm/pgalloc.h
5527+++ b/arch/mips/include/asm/pgalloc.h
5528@@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
5529 {
5530 set_pud(pud, __pud((unsigned long)pmd));
5531 }
5532+
5533+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
5534+{
5535+ pud_populate(mm, pud, pmd);
5536+}
5537 #endif
5538
5539 /*
5540diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
5541index b2050b9..d71bb1b 100644
5542--- a/arch/mips/include/asm/thread_info.h
5543+++ b/arch/mips/include/asm/thread_info.h
5544@@ -111,6 +111,8 @@ register struct thread_info *__current_thread_info __asm__("$28");
5545 #define TIF_32BIT_ADDR 23 /* 32-bit address space (o32/n32) */
5546 #define TIF_FPUBOUND 24 /* thread bound to FPU-full CPU set */
5547 #define TIF_LOAD_WATCH 25 /* If set, load watch registers */
5548+/* li takes a 32bit immediate */
5549+#define TIF_GRSEC_SETXID 29 /* update credentials on syscall entry/exit */
5550 #define TIF_SYSCALL_TRACE 31 /* syscall trace active */
5551
5552 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
5553@@ -126,15 +128,18 @@ register struct thread_info *__current_thread_info __asm__("$28");
5554 #define _TIF_32BIT_ADDR (1<<TIF_32BIT_ADDR)
5555 #define _TIF_FPUBOUND (1<<TIF_FPUBOUND)
5556 #define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH)
5557+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
5558+
5559+#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
5560
5561 /* work to do in syscall_trace_leave() */
5562-#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)
5563+#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
5564
5565 /* work to do on interrupt/exception return */
5566 #define _TIF_WORK_MASK \
5567 (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_NOTIFY_RESUME)
5568 /* work to do on any return to u-space */
5569-#define _TIF_ALLWORK_MASK (_TIF_WORK_MASK | _TIF_WORK_SYSCALL_EXIT)
5570+#define _TIF_ALLWORK_MASK (_TIF_WORK_MASK | _TIF_WORK_SYSCALL_EXIT | _TIF_GRSEC_SETXID)
5571
5572 #endif /* __KERNEL__ */
5573
5574diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
5575index 9fdd8bc..4bd7f1a 100644
5576--- a/arch/mips/kernel/binfmt_elfn32.c
5577+++ b/arch/mips/kernel/binfmt_elfn32.c
5578@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
5579 #undef ELF_ET_DYN_BASE
5580 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
5581
5582+#ifdef CONFIG_PAX_ASLR
5583+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
5584+
5585+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5586+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5587+#endif
5588+
5589 #include <asm/processor.h>
5590 #include <linux/module.h>
5591 #include <linux/elfcore.h>
5592diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
5593index ff44823..97f8906 100644
5594--- a/arch/mips/kernel/binfmt_elfo32.c
5595+++ b/arch/mips/kernel/binfmt_elfo32.c
5596@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
5597 #undef ELF_ET_DYN_BASE
5598 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
5599
5600+#ifdef CONFIG_PAX_ASLR
5601+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
5602+
5603+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5604+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5605+#endif
5606+
5607 #include <asm/processor.h>
5608
5609 /*
5610diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
5611index a11c6f9..be5e164 100644
5612--- a/arch/mips/kernel/process.c
5613+++ b/arch/mips/kernel/process.c
5614@@ -460,15 +460,3 @@ unsigned long get_wchan(struct task_struct *task)
5615 out:
5616 return pc;
5617 }
5618-
5619-/*
5620- * Don't forget that the stack pointer must be aligned on a 8 bytes
5621- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
5622- */
5623-unsigned long arch_align_stack(unsigned long sp)
5624-{
5625- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
5626- sp -= get_random_int() & ~PAGE_MASK;
5627-
5628- return sp & ALMASK;
5629-}
5630diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
5631index 4812c6d..2069554 100644
5632--- a/arch/mips/kernel/ptrace.c
5633+++ b/arch/mips/kernel/ptrace.c
5634@@ -528,6 +528,10 @@ static inline int audit_arch(void)
5635 return arch;
5636 }
5637
5638+#ifdef CONFIG_GRKERNSEC_SETXID
5639+extern void gr_delayed_cred_worker(void);
5640+#endif
5641+
5642 /*
5643 * Notification of system call entry/exit
5644 * - triggered by current->work.syscall_trace
5645@@ -537,6 +541,11 @@ asmlinkage void syscall_trace_enter(struct pt_regs *regs)
5646 /* do the secure computing check first */
5647 secure_computing_strict(regs->regs[2]);
5648
5649+#ifdef CONFIG_GRKERNSEC_SETXID
5650+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
5651+ gr_delayed_cred_worker();
5652+#endif
5653+
5654 if (!(current->ptrace & PT_PTRACED))
5655 goto out;
5656
5657diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
5658index d20a4bc..7096ae5 100644
5659--- a/arch/mips/kernel/scall32-o32.S
5660+++ b/arch/mips/kernel/scall32-o32.S
5661@@ -52,7 +52,7 @@ NESTED(handle_sys, PT_SIZE, sp)
5662
5663 stack_done:
5664 lw t0, TI_FLAGS($28) # syscall tracing enabled?
5665- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
5666+ li t1, _TIF_SYSCALL_WORK
5667 and t0, t1
5668 bnez t0, syscall_trace_entry # -> yes
5669
5670diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
5671index b64f642..0fe6eab 100644
5672--- a/arch/mips/kernel/scall64-64.S
5673+++ b/arch/mips/kernel/scall64-64.S
5674@@ -54,7 +54,7 @@ NESTED(handle_sys64, PT_SIZE, sp)
5675
5676 sd a3, PT_R26(sp) # save a3 for syscall restarting
5677
5678- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
5679+ li t1, _TIF_SYSCALL_WORK
5680 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
5681 and t0, t1, t0
5682 bnez t0, syscall_trace_entry
5683diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
5684index c29ac19..c592d05 100644
5685--- a/arch/mips/kernel/scall64-n32.S
5686+++ b/arch/mips/kernel/scall64-n32.S
5687@@ -47,7 +47,7 @@ NESTED(handle_sysn32, PT_SIZE, sp)
5688
5689 sd a3, PT_R26(sp) # save a3 for syscall restarting
5690
5691- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
5692+ li t1, _TIF_SYSCALL_WORK
5693 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
5694 and t0, t1, t0
5695 bnez t0, n32_syscall_trace_entry
5696diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
5697index cf3e75e..72e93fe 100644
5698--- a/arch/mips/kernel/scall64-o32.S
5699+++ b/arch/mips/kernel/scall64-o32.S
5700@@ -81,7 +81,7 @@ NESTED(handle_sys, PT_SIZE, sp)
5701 PTR 4b, bad_stack
5702 .previous
5703
5704- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
5705+ li t1, _TIF_SYSCALL_WORK
5706 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
5707 and t0, t1, t0
5708 bnez t0, trace_a_syscall
5709diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
5710index ddcec1e..c7f983e 100644
5711--- a/arch/mips/mm/fault.c
5712+++ b/arch/mips/mm/fault.c
5713@@ -27,6 +27,23 @@
5714 #include <asm/highmem.h> /* For VMALLOC_END */
5715 #include <linux/kdebug.h>
5716
5717+#ifdef CONFIG_PAX_PAGEEXEC
5718+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5719+{
5720+ unsigned long i;
5721+
5722+ printk(KERN_ERR "PAX: bytes at PC: ");
5723+ for (i = 0; i < 5; i++) {
5724+ unsigned int c;
5725+ if (get_user(c, (unsigned int *)pc+i))
5726+ printk(KERN_CONT "???????? ");
5727+ else
5728+ printk(KERN_CONT "%08x ", c);
5729+ }
5730+ printk("\n");
5731+}
5732+#endif
5733+
5734 /*
5735 * This routine handles page faults. It determines the address,
5736 * and the problem, and then passes it off to one of the appropriate
5737diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
5738index 7e5fe27..479a219 100644
5739--- a/arch/mips/mm/mmap.c
5740+++ b/arch/mips/mm/mmap.c
5741@@ -59,6 +59,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
5742 struct vm_area_struct *vma;
5743 unsigned long addr = addr0;
5744 int do_color_align;
5745+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
5746 struct vm_unmapped_area_info info;
5747
5748 if (unlikely(len > TASK_SIZE))
5749@@ -84,6 +85,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
5750 do_color_align = 1;
5751
5752 /* requesting a specific address */
5753+
5754+#ifdef CONFIG_PAX_RANDMMAP
5755+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
5756+#endif
5757+
5758 if (addr) {
5759 if (do_color_align)
5760 addr = COLOUR_ALIGN(addr, pgoff);
5761@@ -91,8 +97,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
5762 addr = PAGE_ALIGN(addr);
5763
5764 vma = find_vma(mm, addr);
5765- if (TASK_SIZE - len >= addr &&
5766- (!vma || addr + len <= vma->vm_start))
5767+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len, offset))
5768 return addr;
5769 }
5770
5771@@ -146,6 +151,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5772 {
5773 unsigned long random_factor = 0UL;
5774
5775+#ifdef CONFIG_PAX_RANDMMAP
5776+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
5777+#endif
5778+
5779 if (current->flags & PF_RANDOMIZE) {
5780 random_factor = get_random_int();
5781 random_factor = random_factor << PAGE_SHIFT;
5782@@ -157,42 +166,27 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5783
5784 if (mmap_is_legacy()) {
5785 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
5786+
5787+#ifdef CONFIG_PAX_RANDMMAP
5788+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5789+ mm->mmap_base += mm->delta_mmap;
5790+#endif
5791+
5792 mm->get_unmapped_area = arch_get_unmapped_area;
5793 mm->unmap_area = arch_unmap_area;
5794 } else {
5795 mm->mmap_base = mmap_base(random_factor);
5796+
5797+#ifdef CONFIG_PAX_RANDMMAP
5798+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5799+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
5800+#endif
5801+
5802 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
5803 mm->unmap_area = arch_unmap_area_topdown;
5804 }
5805 }
5806
5807-static inline unsigned long brk_rnd(void)
5808-{
5809- unsigned long rnd = get_random_int();
5810-
5811- rnd = rnd << PAGE_SHIFT;
5812- /* 8MB for 32bit, 256MB for 64bit */
5813- if (TASK_IS_32BIT_ADDR)
5814- rnd = rnd & 0x7ffffful;
5815- else
5816- rnd = rnd & 0xffffffful;
5817-
5818- return rnd;
5819-}
5820-
5821-unsigned long arch_randomize_brk(struct mm_struct *mm)
5822-{
5823- unsigned long base = mm->brk;
5824- unsigned long ret;
5825-
5826- ret = PAGE_ALIGN(base + brk_rnd());
5827-
5828- if (ret < mm->brk)
5829- return mm->brk;
5830-
5831- return ret;
5832-}
5833-
5834 int __virt_addr_valid(const volatile void *kaddr)
5835 {
5836 return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
5837diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
5838index 967d144..db12197 100644
5839--- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
5840+++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
5841@@ -11,12 +11,14 @@
5842 #ifndef _ASM_PROC_CACHE_H
5843 #define _ASM_PROC_CACHE_H
5844
5845+#include <linux/const.h>
5846+
5847 /* L1 cache */
5848
5849 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
5850 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
5851-#define L1_CACHE_BYTES 16 /* bytes per entry */
5852 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
5853+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
5854 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
5855
5856 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
5857diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
5858index bcb5df2..84fabd2 100644
5859--- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
5860+++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
5861@@ -16,13 +16,15 @@
5862 #ifndef _ASM_PROC_CACHE_H
5863 #define _ASM_PROC_CACHE_H
5864
5865+#include <linux/const.h>
5866+
5867 /*
5868 * L1 cache
5869 */
5870 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
5871 #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
5872-#define L1_CACHE_BYTES 32 /* bytes per entry */
5873 #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
5874+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
5875 #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
5876
5877 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
5878diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
5879index 4ce7a01..449202a 100644
5880--- a/arch/openrisc/include/asm/cache.h
5881+++ b/arch/openrisc/include/asm/cache.h
5882@@ -19,11 +19,13 @@
5883 #ifndef __ASM_OPENRISC_CACHE_H
5884 #define __ASM_OPENRISC_CACHE_H
5885
5886+#include <linux/const.h>
5887+
5888 /* FIXME: How can we replace these with values from the CPU...
5889 * they shouldn't be hard-coded!
5890 */
5891
5892-#define L1_CACHE_BYTES 16
5893 #define L1_CACHE_SHIFT 4
5894+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5895
5896 #endif /* __ASM_OPENRISC_CACHE_H */
5897diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
5898index af9cf30..2aae9b2 100644
5899--- a/arch/parisc/include/asm/atomic.h
5900+++ b/arch/parisc/include/asm/atomic.h
5901@@ -229,6 +229,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
5902
5903 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
5904
5905+#define atomic64_read_unchecked(v) atomic64_read(v)
5906+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5907+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5908+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5909+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5910+#define atomic64_inc_unchecked(v) atomic64_inc(v)
5911+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5912+#define atomic64_dec_unchecked(v) atomic64_dec(v)
5913+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5914+
5915 #endif /* !CONFIG_64BIT */
5916
5917
5918diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
5919index 47f11c7..3420df2 100644
5920--- a/arch/parisc/include/asm/cache.h
5921+++ b/arch/parisc/include/asm/cache.h
5922@@ -5,6 +5,7 @@
5923 #ifndef __ARCH_PARISC_CACHE_H
5924 #define __ARCH_PARISC_CACHE_H
5925
5926+#include <linux/const.h>
5927
5928 /*
5929 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
5930@@ -15,13 +16,13 @@
5931 * just ruin performance.
5932 */
5933 #ifdef CONFIG_PA20
5934-#define L1_CACHE_BYTES 64
5935 #define L1_CACHE_SHIFT 6
5936 #else
5937-#define L1_CACHE_BYTES 32
5938 #define L1_CACHE_SHIFT 5
5939 #endif
5940
5941+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5942+
5943 #ifndef __ASSEMBLY__
5944
5945 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5946diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
5947index 19f6cb1..6c78cf2 100644
5948--- a/arch/parisc/include/asm/elf.h
5949+++ b/arch/parisc/include/asm/elf.h
5950@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
5951
5952 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
5953
5954+#ifdef CONFIG_PAX_ASLR
5955+#define PAX_ELF_ET_DYN_BASE 0x10000UL
5956+
5957+#define PAX_DELTA_MMAP_LEN 16
5958+#define PAX_DELTA_STACK_LEN 16
5959+#endif
5960+
5961 /* This yields a mask that user programs can use to figure out what
5962 instruction set this CPU supports. This could be done in user space,
5963 but it's not easy, and we've already done it here. */
5964diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
5965index fc987a1..6e068ef 100644
5966--- a/arch/parisc/include/asm/pgalloc.h
5967+++ b/arch/parisc/include/asm/pgalloc.h
5968@@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
5969 (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
5970 }
5971
5972+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
5973+{
5974+ pgd_populate(mm, pgd, pmd);
5975+}
5976+
5977 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
5978 {
5979 pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
5980@@ -93,6 +98,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
5981 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
5982 #define pmd_free(mm, x) do { } while (0)
5983 #define pgd_populate(mm, pmd, pte) BUG()
5984+#define pgd_populate_kernel(mm, pmd, pte) BUG()
5985
5986 #endif
5987
5988diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
5989index 7df49fa..a3eb445 100644
5990--- a/arch/parisc/include/asm/pgtable.h
5991+++ b/arch/parisc/include/asm/pgtable.h
5992@@ -16,6 +16,8 @@
5993 #include <asm/processor.h>
5994 #include <asm/cache.h>
5995
5996+extern spinlock_t pa_dbit_lock;
5997+
5998 /*
5999 * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel
6000 * memory. For the return value to be meaningful, ADDR must be >=
6001@@ -44,8 +46,11 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
6002
6003 #define set_pte_at(mm, addr, ptep, pteval) \
6004 do { \
6005+ unsigned long flags; \
6006+ spin_lock_irqsave(&pa_dbit_lock, flags); \
6007 set_pte(ptep, pteval); \
6008 purge_tlb_entries(mm, addr); \
6009+ spin_unlock_irqrestore(&pa_dbit_lock, flags); \
6010 } while (0)
6011
6012 #endif /* !__ASSEMBLY__ */
6013@@ -218,6 +223,17 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
6014 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
6015 #define PAGE_COPY PAGE_EXECREAD
6016 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
6017+
6018+#ifdef CONFIG_PAX_PAGEEXEC
6019+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
6020+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
6021+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
6022+#else
6023+# define PAGE_SHARED_NOEXEC PAGE_SHARED
6024+# define PAGE_COPY_NOEXEC PAGE_COPY
6025+# define PAGE_READONLY_NOEXEC PAGE_READONLY
6026+#endif
6027+
6028 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
6029 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
6030 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
6031@@ -435,48 +451,46 @@ extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
6032
6033 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
6034 {
6035-#ifdef CONFIG_SMP
6036+ pte_t pte;
6037+ unsigned long flags;
6038+
6039 if (!pte_young(*ptep))
6040 return 0;
6041- return test_and_clear_bit(xlate_pabit(_PAGE_ACCESSED_BIT), &pte_val(*ptep));
6042-#else
6043- pte_t pte = *ptep;
6044- if (!pte_young(pte))
6045+
6046+ spin_lock_irqsave(&pa_dbit_lock, flags);
6047+ pte = *ptep;
6048+ if (!pte_young(pte)) {
6049+ spin_unlock_irqrestore(&pa_dbit_lock, flags);
6050 return 0;
6051- set_pte_at(vma->vm_mm, addr, ptep, pte_mkold(pte));
6052+ }
6053+ set_pte(ptep, pte_mkold(pte));
6054+ purge_tlb_entries(vma->vm_mm, addr);
6055+ spin_unlock_irqrestore(&pa_dbit_lock, flags);
6056 return 1;
6057-#endif
6058 }
6059
6060-extern spinlock_t pa_dbit_lock;
6061-
6062 struct mm_struct;
6063 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
6064 {
6065 pte_t old_pte;
6066+ unsigned long flags;
6067
6068- spin_lock(&pa_dbit_lock);
6069+ spin_lock_irqsave(&pa_dbit_lock, flags);
6070 old_pte = *ptep;
6071 pte_clear(mm,addr,ptep);
6072- spin_unlock(&pa_dbit_lock);
6073+ purge_tlb_entries(mm, addr);
6074+ spin_unlock_irqrestore(&pa_dbit_lock, flags);
6075
6076 return old_pte;
6077 }
6078
6079 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
6080 {
6081-#ifdef CONFIG_SMP
6082- unsigned long new, old;
6083-
6084- do {
6085- old = pte_val(*ptep);
6086- new = pte_val(pte_wrprotect(__pte (old)));
6087- } while (cmpxchg((unsigned long *) ptep, old, new) != old);
6088+ unsigned long flags;
6089+ spin_lock_irqsave(&pa_dbit_lock, flags);
6090+ set_pte(ptep, pte_wrprotect(*ptep));
6091 purge_tlb_entries(mm, addr);
6092-#else
6093- pte_t old_pte = *ptep;
6094- set_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
6095-#endif
6096+ spin_unlock_irqrestore(&pa_dbit_lock, flags);
6097 }
6098
6099 #define pte_same(A,B) (pte_val(A) == pte_val(B))
6100diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
6101index 4ba2c93..f5e3974 100644
6102--- a/arch/parisc/include/asm/uaccess.h
6103+++ b/arch/parisc/include/asm/uaccess.h
6104@@ -251,10 +251,10 @@ static inline unsigned long __must_check copy_from_user(void *to,
6105 const void __user *from,
6106 unsigned long n)
6107 {
6108- int sz = __compiletime_object_size(to);
6109+ size_t sz = __compiletime_object_size(to);
6110 int ret = -EFAULT;
6111
6112- if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
6113+ if (likely(sz == (size_t)-1 || !__builtin_constant_p(n) || sz >= n))
6114 ret = __copy_from_user(to, from, n);
6115 else
6116 copy_from_user_overflow();
6117diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
6118index b89a85a..a9891fa 100644
6119--- a/arch/parisc/kernel/cache.c
6120+++ b/arch/parisc/kernel/cache.c
6121@@ -426,14 +426,11 @@ void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
6122 /* Note: purge_tlb_entries can be called at startup with
6123 no context. */
6124
6125- /* Disable preemption while we play with %sr1. */
6126- preempt_disable();
6127+ purge_tlb_start(flags);
6128 mtsp(mm->context, 1);
6129- purge_tlb_start(flags);
6130 pdtlb(addr);
6131 pitlb(addr);
6132 purge_tlb_end(flags);
6133- preempt_enable();
6134 }
6135 EXPORT_SYMBOL(purge_tlb_entries);
6136
6137diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
6138index 2a625fb..9908930 100644
6139--- a/arch/parisc/kernel/module.c
6140+++ b/arch/parisc/kernel/module.c
6141@@ -98,16 +98,38 @@
6142
6143 /* three functions to determine where in the module core
6144 * or init pieces the location is */
6145+static inline int in_init_rx(struct module *me, void *loc)
6146+{
6147+ return (loc >= me->module_init_rx &&
6148+ loc < (me->module_init_rx + me->init_size_rx));
6149+}
6150+
6151+static inline int in_init_rw(struct module *me, void *loc)
6152+{
6153+ return (loc >= me->module_init_rw &&
6154+ loc < (me->module_init_rw + me->init_size_rw));
6155+}
6156+
6157 static inline int in_init(struct module *me, void *loc)
6158 {
6159- return (loc >= me->module_init &&
6160- loc <= (me->module_init + me->init_size));
6161+ return in_init_rx(me, loc) || in_init_rw(me, loc);
6162+}
6163+
6164+static inline int in_core_rx(struct module *me, void *loc)
6165+{
6166+ return (loc >= me->module_core_rx &&
6167+ loc < (me->module_core_rx + me->core_size_rx));
6168+}
6169+
6170+static inline int in_core_rw(struct module *me, void *loc)
6171+{
6172+ return (loc >= me->module_core_rw &&
6173+ loc < (me->module_core_rw + me->core_size_rw));
6174 }
6175
6176 static inline int in_core(struct module *me, void *loc)
6177 {
6178- return (loc >= me->module_core &&
6179- loc <= (me->module_core + me->core_size));
6180+ return in_core_rx(me, loc) || in_core_rw(me, loc);
6181 }
6182
6183 static inline int in_local(struct module *me, void *loc)
6184@@ -371,13 +393,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
6185 }
6186
6187 /* align things a bit */
6188- me->core_size = ALIGN(me->core_size, 16);
6189- me->arch.got_offset = me->core_size;
6190- me->core_size += gots * sizeof(struct got_entry);
6191+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
6192+ me->arch.got_offset = me->core_size_rw;
6193+ me->core_size_rw += gots * sizeof(struct got_entry);
6194
6195- me->core_size = ALIGN(me->core_size, 16);
6196- me->arch.fdesc_offset = me->core_size;
6197- me->core_size += fdescs * sizeof(Elf_Fdesc);
6198+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
6199+ me->arch.fdesc_offset = me->core_size_rw;
6200+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
6201
6202 me->arch.got_max = gots;
6203 me->arch.fdesc_max = fdescs;
6204@@ -395,7 +417,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
6205
6206 BUG_ON(value == 0);
6207
6208- got = me->module_core + me->arch.got_offset;
6209+ got = me->module_core_rw + me->arch.got_offset;
6210 for (i = 0; got[i].addr; i++)
6211 if (got[i].addr == value)
6212 goto out;
6213@@ -413,7 +435,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
6214 #ifdef CONFIG_64BIT
6215 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
6216 {
6217- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
6218+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
6219
6220 if (!value) {
6221 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
6222@@ -431,7 +453,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
6223
6224 /* Create new one */
6225 fdesc->addr = value;
6226- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
6227+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
6228 return (Elf_Addr)fdesc;
6229 }
6230 #endif /* CONFIG_64BIT */
6231@@ -843,7 +865,7 @@ register_unwind_table(struct module *me,
6232
6233 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
6234 end = table + sechdrs[me->arch.unwind_section].sh_size;
6235- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
6236+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
6237
6238 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
6239 me->arch.unwind_section, table, end, gp);
6240diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
6241index f76c108..92bad82 100644
6242--- a/arch/parisc/kernel/sys_parisc.c
6243+++ b/arch/parisc/kernel/sys_parisc.c
6244@@ -33,9 +33,11 @@
6245 #include <linux/utsname.h>
6246 #include <linux/personality.h>
6247
6248-static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
6249+static unsigned long get_unshared_area(struct file *filp, unsigned long addr, unsigned long len,
6250+ unsigned long flags)
6251 {
6252 struct vm_area_struct *vma;
6253+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
6254
6255 addr = PAGE_ALIGN(addr);
6256
6257@@ -43,7 +45,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
6258 /* At this point: (!vma || addr < vma->vm_end). */
6259 if (TASK_SIZE - len < addr)
6260 return -ENOMEM;
6261- if (!vma || addr + len <= vma->vm_start)
6262+ if (check_heap_stack_gap(vma, addr, len, offset))
6263 return addr;
6264 addr = vma->vm_end;
6265 }
6266@@ -67,11 +69,12 @@ static int get_offset(struct address_space *mapping)
6267 return offset & 0x3FF000;
6268 }
6269
6270-static unsigned long get_shared_area(struct address_space *mapping,
6271- unsigned long addr, unsigned long len, unsigned long pgoff)
6272+static unsigned long get_shared_area(struct file *filp, struct address_space *mapping,
6273+ unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
6274 {
6275 struct vm_area_struct *vma;
6276 int offset = mapping ? get_offset(mapping) : 0;
6277+ unsigned long rand_offset = gr_rand_threadstack_offset(current->mm, filp, flags);
6278
6279 offset = (offset + (pgoff << PAGE_SHIFT)) & 0x3FF000;
6280
6281@@ -81,7 +84,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
6282 /* At this point: (!vma || addr < vma->vm_end). */
6283 if (TASK_SIZE - len < addr)
6284 return -ENOMEM;
6285- if (!vma || addr + len <= vma->vm_start)
6286+ if (check_heap_stack_gap(vma, addr, len, rand_offset))
6287 return addr;
6288 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
6289 if (addr < vma->vm_end) /* handle wraparound */
6290@@ -100,14 +103,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
6291 if (flags & MAP_FIXED)
6292 return addr;
6293 if (!addr)
6294- addr = TASK_UNMAPPED_BASE;
6295+ addr = current->mm->mmap_base;
6296
6297 if (filp) {
6298- addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
6299+ addr = get_shared_area(filp, filp->f_mapping, addr, len, pgoff, flags);
6300 } else if(flags & MAP_SHARED) {
6301- addr = get_shared_area(NULL, addr, len, pgoff);
6302+ addr = get_shared_area(filp, NULL, addr, len, pgoff, flags);
6303 } else {
6304- addr = get_unshared_area(addr, len);
6305+ addr = get_unshared_area(filp, addr, len, flags);
6306 }
6307 return addr;
6308 }
6309diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
6310index 45ba99f..8e22c33 100644
6311--- a/arch/parisc/kernel/traps.c
6312+++ b/arch/parisc/kernel/traps.c
6313@@ -732,9 +732,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
6314
6315 down_read(&current->mm->mmap_sem);
6316 vma = find_vma(current->mm,regs->iaoq[0]);
6317- if (vma && (regs->iaoq[0] >= vma->vm_start)
6318- && (vma->vm_flags & VM_EXEC)) {
6319-
6320+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
6321 fault_address = regs->iaoq[0];
6322 fault_space = regs->iasq[0];
6323
6324diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
6325index 18162ce..94de376 100644
6326--- a/arch/parisc/mm/fault.c
6327+++ b/arch/parisc/mm/fault.c
6328@@ -15,6 +15,7 @@
6329 #include <linux/sched.h>
6330 #include <linux/interrupt.h>
6331 #include <linux/module.h>
6332+#include <linux/unistd.h>
6333
6334 #include <asm/uaccess.h>
6335 #include <asm/traps.h>
6336@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
6337 static unsigned long
6338 parisc_acctyp(unsigned long code, unsigned int inst)
6339 {
6340- if (code == 6 || code == 16)
6341+ if (code == 6 || code == 7 || code == 16)
6342 return VM_EXEC;
6343
6344 switch (inst & 0xf0000000) {
6345@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
6346 }
6347 #endif
6348
6349+#ifdef CONFIG_PAX_PAGEEXEC
6350+/*
6351+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
6352+ *
6353+ * returns 1 when task should be killed
6354+ * 2 when rt_sigreturn trampoline was detected
6355+ * 3 when unpatched PLT trampoline was detected
6356+ */
6357+static int pax_handle_fetch_fault(struct pt_regs *regs)
6358+{
6359+
6360+#ifdef CONFIG_PAX_EMUPLT
6361+ int err;
6362+
6363+ do { /* PaX: unpatched PLT emulation */
6364+ unsigned int bl, depwi;
6365+
6366+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
6367+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
6368+
6369+ if (err)
6370+ break;
6371+
6372+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
6373+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
6374+
6375+ err = get_user(ldw, (unsigned int *)addr);
6376+ err |= get_user(bv, (unsigned int *)(addr+4));
6377+ err |= get_user(ldw2, (unsigned int *)(addr+8));
6378+
6379+ if (err)
6380+ break;
6381+
6382+ if (ldw == 0x0E801096U &&
6383+ bv == 0xEAC0C000U &&
6384+ ldw2 == 0x0E881095U)
6385+ {
6386+ unsigned int resolver, map;
6387+
6388+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
6389+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
6390+ if (err)
6391+ break;
6392+
6393+ regs->gr[20] = instruction_pointer(regs)+8;
6394+ regs->gr[21] = map;
6395+ regs->gr[22] = resolver;
6396+ regs->iaoq[0] = resolver | 3UL;
6397+ regs->iaoq[1] = regs->iaoq[0] + 4;
6398+ return 3;
6399+ }
6400+ }
6401+ } while (0);
6402+#endif
6403+
6404+#ifdef CONFIG_PAX_EMUTRAMP
6405+
6406+#ifndef CONFIG_PAX_EMUSIGRT
6407+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
6408+ return 1;
6409+#endif
6410+
6411+ do { /* PaX: rt_sigreturn emulation */
6412+ unsigned int ldi1, ldi2, bel, nop;
6413+
6414+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
6415+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
6416+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
6417+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
6418+
6419+ if (err)
6420+ break;
6421+
6422+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
6423+ ldi2 == 0x3414015AU &&
6424+ bel == 0xE4008200U &&
6425+ nop == 0x08000240U)
6426+ {
6427+ regs->gr[25] = (ldi1 & 2) >> 1;
6428+ regs->gr[20] = __NR_rt_sigreturn;
6429+ regs->gr[31] = regs->iaoq[1] + 16;
6430+ regs->sr[0] = regs->iasq[1];
6431+ regs->iaoq[0] = 0x100UL;
6432+ regs->iaoq[1] = regs->iaoq[0] + 4;
6433+ regs->iasq[0] = regs->sr[2];
6434+ regs->iasq[1] = regs->sr[2];
6435+ return 2;
6436+ }
6437+ } while (0);
6438+#endif
6439+
6440+ return 1;
6441+}
6442+
6443+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6444+{
6445+ unsigned long i;
6446+
6447+ printk(KERN_ERR "PAX: bytes at PC: ");
6448+ for (i = 0; i < 5; i++) {
6449+ unsigned int c;
6450+ if (get_user(c, (unsigned int *)pc+i))
6451+ printk(KERN_CONT "???????? ");
6452+ else
6453+ printk(KERN_CONT "%08x ", c);
6454+ }
6455+ printk("\n");
6456+}
6457+#endif
6458+
6459 int fixup_exception(struct pt_regs *regs)
6460 {
6461 const struct exception_table_entry *fix;
6462@@ -192,8 +303,33 @@ good_area:
6463
6464 acc_type = parisc_acctyp(code,regs->iir);
6465
6466- if ((vma->vm_flags & acc_type) != acc_type)
6467+ if ((vma->vm_flags & acc_type) != acc_type) {
6468+
6469+#ifdef CONFIG_PAX_PAGEEXEC
6470+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
6471+ (address & ~3UL) == instruction_pointer(regs))
6472+ {
6473+ up_read(&mm->mmap_sem);
6474+ switch (pax_handle_fetch_fault(regs)) {
6475+
6476+#ifdef CONFIG_PAX_EMUPLT
6477+ case 3:
6478+ return;
6479+#endif
6480+
6481+#ifdef CONFIG_PAX_EMUTRAMP
6482+ case 2:
6483+ return;
6484+#endif
6485+
6486+ }
6487+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
6488+ do_group_exit(SIGKILL);
6489+ }
6490+#endif
6491+
6492 goto bad_area;
6493+ }
6494
6495 /*
6496 * If for any reason at all we couldn't handle the fault, make
6497diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
6498index e3b1d41..8e81edf 100644
6499--- a/arch/powerpc/include/asm/atomic.h
6500+++ b/arch/powerpc/include/asm/atomic.h
6501@@ -523,6 +523,16 @@ static __inline__ long atomic64_inc_not_zero(atomic64_t *v)
6502 return t1;
6503 }
6504
6505+#define atomic64_read_unchecked(v) atomic64_read(v)
6506+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
6507+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
6508+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
6509+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
6510+#define atomic64_inc_unchecked(v) atomic64_inc(v)
6511+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
6512+#define atomic64_dec_unchecked(v) atomic64_dec(v)
6513+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
6514+
6515 #endif /* __powerpc64__ */
6516
6517 #endif /* __KERNEL__ */
6518diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
6519index 9e495c9..b6878e5 100644
6520--- a/arch/powerpc/include/asm/cache.h
6521+++ b/arch/powerpc/include/asm/cache.h
6522@@ -3,6 +3,7 @@
6523
6524 #ifdef __KERNEL__
6525
6526+#include <linux/const.h>
6527
6528 /* bytes per L1 cache line */
6529 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
6530@@ -22,7 +23,7 @@
6531 #define L1_CACHE_SHIFT 7
6532 #endif
6533
6534-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6535+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6536
6537 #define SMP_CACHE_BYTES L1_CACHE_BYTES
6538
6539diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
6540index 6abf0a1..459d0f1 100644
6541--- a/arch/powerpc/include/asm/elf.h
6542+++ b/arch/powerpc/include/asm/elf.h
6543@@ -28,8 +28,19 @@
6544 the loader. We need to make sure that it is out of the way of the program
6545 that it will "exec", and that there is sufficient room for the brk. */
6546
6547-extern unsigned long randomize_et_dyn(unsigned long base);
6548-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
6549+#define ELF_ET_DYN_BASE (0x20000000)
6550+
6551+#ifdef CONFIG_PAX_ASLR
6552+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
6553+
6554+#ifdef __powerpc64__
6555+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
6556+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
6557+#else
6558+#define PAX_DELTA_MMAP_LEN 15
6559+#define PAX_DELTA_STACK_LEN 15
6560+#endif
6561+#endif
6562
6563 /*
6564 * Our registers are always unsigned longs, whether we're a 32 bit
6565@@ -124,10 +135,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
6566 (0x7ff >> (PAGE_SHIFT - 12)) : \
6567 (0x3ffff >> (PAGE_SHIFT - 12)))
6568
6569-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
6570-#define arch_randomize_brk arch_randomize_brk
6571-
6572-
6573 #ifdef CONFIG_SPU_BASE
6574 /* Notes used in ET_CORE. Note name is "SPU/<fd>/<filename>". */
6575 #define NT_SPU 1
6576diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h
6577index 8196e9c..d83a9f3 100644
6578--- a/arch/powerpc/include/asm/exec.h
6579+++ b/arch/powerpc/include/asm/exec.h
6580@@ -4,6 +4,6 @@
6581 #ifndef _ASM_POWERPC_EXEC_H
6582 #define _ASM_POWERPC_EXEC_H
6583
6584-extern unsigned long arch_align_stack(unsigned long sp);
6585+#define arch_align_stack(x) ((x) & ~0xfUL)
6586
6587 #endif /* _ASM_POWERPC_EXEC_H */
6588diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
6589index 5acabbd..7ea14fa 100644
6590--- a/arch/powerpc/include/asm/kmap_types.h
6591+++ b/arch/powerpc/include/asm/kmap_types.h
6592@@ -10,7 +10,7 @@
6593 * 2 of the License, or (at your option) any later version.
6594 */
6595
6596-#define KM_TYPE_NR 16
6597+#define KM_TYPE_NR 17
6598
6599 #endif /* __KERNEL__ */
6600 #endif /* _ASM_POWERPC_KMAP_TYPES_H */
6601diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
6602index 8565c25..2865190 100644
6603--- a/arch/powerpc/include/asm/mman.h
6604+++ b/arch/powerpc/include/asm/mman.h
6605@@ -24,7 +24,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
6606 }
6607 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
6608
6609-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
6610+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
6611 {
6612 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
6613 }
6614diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
6615index f072e97..b436dee 100644
6616--- a/arch/powerpc/include/asm/page.h
6617+++ b/arch/powerpc/include/asm/page.h
6618@@ -220,8 +220,9 @@ extern long long virt_phys_offset;
6619 * and needs to be executable. This means the whole heap ends
6620 * up being executable.
6621 */
6622-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
6623- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6624+#define VM_DATA_DEFAULT_FLAGS32 \
6625+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
6626+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6627
6628 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
6629 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6630@@ -249,6 +250,9 @@ extern long long virt_phys_offset;
6631 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
6632 #endif
6633
6634+#define ktla_ktva(addr) (addr)
6635+#define ktva_ktla(addr) (addr)
6636+
6637 /*
6638 * Use the top bit of the higher-level page table entries to indicate whether
6639 * the entries we point to contain hugepages. This works because we know that
6640diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
6641index cd915d6..c10cee8 100644
6642--- a/arch/powerpc/include/asm/page_64.h
6643+++ b/arch/powerpc/include/asm/page_64.h
6644@@ -154,15 +154,18 @@ do { \
6645 * stack by default, so in the absence of a PT_GNU_STACK program header
6646 * we turn execute permission off.
6647 */
6648-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
6649- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6650+#define VM_STACK_DEFAULT_FLAGS32 \
6651+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
6652+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6653
6654 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
6655 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6656
6657+#ifndef CONFIG_PAX_PAGEEXEC
6658 #define VM_STACK_DEFAULT_FLAGS \
6659 (is_32bit_task() ? \
6660 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
6661+#endif
6662
6663 #include <asm-generic/getorder.h>
6664
6665diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
6666index 292725c..f87ae14 100644
6667--- a/arch/powerpc/include/asm/pgalloc-64.h
6668+++ b/arch/powerpc/include/asm/pgalloc-64.h
6669@@ -50,6 +50,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
6670 #ifndef CONFIG_PPC_64K_PAGES
6671
6672 #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
6673+#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD))
6674
6675 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
6676 {
6677@@ -67,6 +68,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6678 pud_set(pud, (unsigned long)pmd);
6679 }
6680
6681+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6682+{
6683+ pud_populate(mm, pud, pmd);
6684+}
6685+
6686 #define pmd_populate(mm, pmd, pte_page) \
6687 pmd_populate_kernel(mm, pmd, page_address(pte_page))
6688 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
6689@@ -76,6 +82,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6690 #else /* CONFIG_PPC_64K_PAGES */
6691
6692 #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
6693+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
6694
6695 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
6696 pte_t *pte)
6697diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
6698index a9cbd3b..3b67efa 100644
6699--- a/arch/powerpc/include/asm/pgtable.h
6700+++ b/arch/powerpc/include/asm/pgtable.h
6701@@ -2,6 +2,7 @@
6702 #define _ASM_POWERPC_PGTABLE_H
6703 #ifdef __KERNEL__
6704
6705+#include <linux/const.h>
6706 #ifndef __ASSEMBLY__
6707 #include <asm/processor.h> /* For TASK_SIZE */
6708 #include <asm/mmu.h>
6709diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
6710index 4aad413..85d86bf 100644
6711--- a/arch/powerpc/include/asm/pte-hash32.h
6712+++ b/arch/powerpc/include/asm/pte-hash32.h
6713@@ -21,6 +21,7 @@
6714 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
6715 #define _PAGE_USER 0x004 /* usermode access allowed */
6716 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
6717+#define _PAGE_EXEC _PAGE_GUARDED
6718 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
6719 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
6720 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
6721diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
6722index 3d5c9dc..62f8414 100644
6723--- a/arch/powerpc/include/asm/reg.h
6724+++ b/arch/powerpc/include/asm/reg.h
6725@@ -215,6 +215,7 @@
6726 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
6727 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
6728 #define DSISR_NOHPTE 0x40000000 /* no translation found */
6729+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
6730 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
6731 #define DSISR_ISSTORE 0x02000000 /* access was a store */
6732 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
6733diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
6734index 195ce2a..ab5c614 100644
6735--- a/arch/powerpc/include/asm/smp.h
6736+++ b/arch/powerpc/include/asm/smp.h
6737@@ -50,7 +50,7 @@ struct smp_ops_t {
6738 int (*cpu_disable)(void);
6739 void (*cpu_die)(unsigned int nr);
6740 int (*cpu_bootable)(unsigned int nr);
6741-};
6742+} __no_const;
6743
6744 extern void smp_send_debugger_break(void);
6745 extern void start_secondary_resume(void);
6746diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
6747index 406b7b9..af63426 100644
6748--- a/arch/powerpc/include/asm/thread_info.h
6749+++ b/arch/powerpc/include/asm/thread_info.h
6750@@ -97,7 +97,6 @@ static inline struct thread_info *current_thread_info(void)
6751 #define TIF_PERFMON_CTXSW 6 /* perfmon needs ctxsw calls */
6752 #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
6753 #define TIF_SINGLESTEP 8 /* singlestepping active */
6754-#define TIF_MEMDIE 9 /* is terminating due to OOM killer */
6755 #define TIF_SECCOMP 10 /* secure computing */
6756 #define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */
6757 #define TIF_NOERROR 12 /* Force successful syscall return */
6758@@ -106,6 +105,9 @@ static inline struct thread_info *current_thread_info(void)
6759 #define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */
6760 #define TIF_EMULATE_STACK_STORE 16 /* Is an instruction emulation
6761 for stack store? */
6762+#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
6763+/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
6764+#define TIF_GRSEC_SETXID 9 /* update credentials on syscall entry/exit */
6765
6766 /* as above, but as bit values */
6767 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
6768@@ -124,8 +126,10 @@ static inline struct thread_info *current_thread_info(void)
6769 #define _TIF_UPROBE (1<<TIF_UPROBE)
6770 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
6771 #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
6772+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
6773 #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
6774- _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT)
6775+ _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
6776+ _TIF_GRSEC_SETXID)
6777
6778 #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
6779 _TIF_NOTIFY_RESUME | _TIF_UPROBE)
6780diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
6781index 4db4959..aba5c41 100644
6782--- a/arch/powerpc/include/asm/uaccess.h
6783+++ b/arch/powerpc/include/asm/uaccess.h
6784@@ -318,52 +318,6 @@ do { \
6785 extern unsigned long __copy_tofrom_user(void __user *to,
6786 const void __user *from, unsigned long size);
6787
6788-#ifndef __powerpc64__
6789-
6790-static inline unsigned long copy_from_user(void *to,
6791- const void __user *from, unsigned long n)
6792-{
6793- unsigned long over;
6794-
6795- if (access_ok(VERIFY_READ, from, n))
6796- return __copy_tofrom_user((__force void __user *)to, from, n);
6797- if ((unsigned long)from < TASK_SIZE) {
6798- over = (unsigned long)from + n - TASK_SIZE;
6799- return __copy_tofrom_user((__force void __user *)to, from,
6800- n - over) + over;
6801- }
6802- return n;
6803-}
6804-
6805-static inline unsigned long copy_to_user(void __user *to,
6806- const void *from, unsigned long n)
6807-{
6808- unsigned long over;
6809-
6810- if (access_ok(VERIFY_WRITE, to, n))
6811- return __copy_tofrom_user(to, (__force void __user *)from, n);
6812- if ((unsigned long)to < TASK_SIZE) {
6813- over = (unsigned long)to + n - TASK_SIZE;
6814- return __copy_tofrom_user(to, (__force void __user *)from,
6815- n - over) + over;
6816- }
6817- return n;
6818-}
6819-
6820-#else /* __powerpc64__ */
6821-
6822-#define __copy_in_user(to, from, size) \
6823- __copy_tofrom_user((to), (from), (size))
6824-
6825-extern unsigned long copy_from_user(void *to, const void __user *from,
6826- unsigned long n);
6827-extern unsigned long copy_to_user(void __user *to, const void *from,
6828- unsigned long n);
6829-extern unsigned long copy_in_user(void __user *to, const void __user *from,
6830- unsigned long n);
6831-
6832-#endif /* __powerpc64__ */
6833-
6834 static inline unsigned long __copy_from_user_inatomic(void *to,
6835 const void __user *from, unsigned long n)
6836 {
6837@@ -387,6 +341,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
6838 if (ret == 0)
6839 return 0;
6840 }
6841+
6842+ if (!__builtin_constant_p(n))
6843+ check_object_size(to, n, false);
6844+
6845 return __copy_tofrom_user((__force void __user *)to, from, n);
6846 }
6847
6848@@ -413,6 +371,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
6849 if (ret == 0)
6850 return 0;
6851 }
6852+
6853+ if (!__builtin_constant_p(n))
6854+ check_object_size(from, n, true);
6855+
6856 return __copy_tofrom_user(to, (__force const void __user *)from, n);
6857 }
6858
6859@@ -430,6 +392,92 @@ static inline unsigned long __copy_to_user(void __user *to,
6860 return __copy_to_user_inatomic(to, from, size);
6861 }
6862
6863+#ifndef __powerpc64__
6864+
6865+static inline unsigned long __must_check copy_from_user(void *to,
6866+ const void __user *from, unsigned long n)
6867+{
6868+ unsigned long over;
6869+
6870+ if ((long)n < 0)
6871+ return n;
6872+
6873+ if (access_ok(VERIFY_READ, from, n)) {
6874+ if (!__builtin_constant_p(n))
6875+ check_object_size(to, n, false);
6876+ return __copy_tofrom_user((__force void __user *)to, from, n);
6877+ }
6878+ if ((unsigned long)from < TASK_SIZE) {
6879+ over = (unsigned long)from + n - TASK_SIZE;
6880+ if (!__builtin_constant_p(n - over))
6881+ check_object_size(to, n - over, false);
6882+ return __copy_tofrom_user((__force void __user *)to, from,
6883+ n - over) + over;
6884+ }
6885+ return n;
6886+}
6887+
6888+static inline unsigned long __must_check copy_to_user(void __user *to,
6889+ const void *from, unsigned long n)
6890+{
6891+ unsigned long over;
6892+
6893+ if ((long)n < 0)
6894+ return n;
6895+
6896+ if (access_ok(VERIFY_WRITE, to, n)) {
6897+ if (!__builtin_constant_p(n))
6898+ check_object_size(from, n, true);
6899+ return __copy_tofrom_user(to, (__force void __user *)from, n);
6900+ }
6901+ if ((unsigned long)to < TASK_SIZE) {
6902+ over = (unsigned long)to + n - TASK_SIZE;
6903+ if (!__builtin_constant_p(n))
6904+ check_object_size(from, n - over, true);
6905+ return __copy_tofrom_user(to, (__force void __user *)from,
6906+ n - over) + over;
6907+ }
6908+ return n;
6909+}
6910+
6911+#else /* __powerpc64__ */
6912+
6913+#define __copy_in_user(to, from, size) \
6914+ __copy_tofrom_user((to), (from), (size))
6915+
6916+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
6917+{
6918+ if ((long)n < 0 || n > INT_MAX)
6919+ return n;
6920+
6921+ if (!__builtin_constant_p(n))
6922+ check_object_size(to, n, false);
6923+
6924+ if (likely(access_ok(VERIFY_READ, from, n)))
6925+ n = __copy_from_user(to, from, n);
6926+ else
6927+ memset(to, 0, n);
6928+ return n;
6929+}
6930+
6931+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
6932+{
6933+ if ((long)n < 0 || n > INT_MAX)
6934+ return n;
6935+
6936+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
6937+ if (!__builtin_constant_p(n))
6938+ check_object_size(from, n, true);
6939+ n = __copy_to_user(to, from, n);
6940+ }
6941+ return n;
6942+}
6943+
6944+extern unsigned long copy_in_user(void __user *to, const void __user *from,
6945+ unsigned long n);
6946+
6947+#endif /* __powerpc64__ */
6948+
6949 extern unsigned long __clear_user(void __user *addr, unsigned long size);
6950
6951 static inline unsigned long clear_user(void __user *addr, unsigned long size)
6952diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
6953index 4684e33..acc4d19e 100644
6954--- a/arch/powerpc/kernel/exceptions-64e.S
6955+++ b/arch/powerpc/kernel/exceptions-64e.S
6956@@ -715,6 +715,7 @@ storage_fault_common:
6957 std r14,_DAR(r1)
6958 std r15,_DSISR(r1)
6959 addi r3,r1,STACK_FRAME_OVERHEAD
6960+ bl .save_nvgprs
6961 mr r4,r14
6962 mr r5,r15
6963 ld r14,PACA_EXGEN+EX_R14(r13)
6964@@ -723,8 +724,7 @@ storage_fault_common:
6965 cmpdi r3,0
6966 bne- 1f
6967 b .ret_from_except_lite
6968-1: bl .save_nvgprs
6969- mr r5,r3
6970+1: mr r5,r3
6971 addi r3,r1,STACK_FRAME_OVERHEAD
6972 ld r4,_DAR(r1)
6973 bl .bad_page_fault
6974diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
6975index 3684cbd..bc89eab 100644
6976--- a/arch/powerpc/kernel/exceptions-64s.S
6977+++ b/arch/powerpc/kernel/exceptions-64s.S
6978@@ -1206,10 +1206,10 @@ handle_page_fault:
6979 11: ld r4,_DAR(r1)
6980 ld r5,_DSISR(r1)
6981 addi r3,r1,STACK_FRAME_OVERHEAD
6982+ bl .save_nvgprs
6983 bl .do_page_fault
6984 cmpdi r3,0
6985 beq+ 12f
6986- bl .save_nvgprs
6987 mr r5,r3
6988 addi r3,r1,STACK_FRAME_OVERHEAD
6989 lwz r4,_DAR(r1)
6990diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
6991index 2e3200c..72095ce 100644
6992--- a/arch/powerpc/kernel/module_32.c
6993+++ b/arch/powerpc/kernel/module_32.c
6994@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
6995 me->arch.core_plt_section = i;
6996 }
6997 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
6998- printk("Module doesn't contain .plt or .init.plt sections.\n");
6999+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
7000 return -ENOEXEC;
7001 }
7002
7003@@ -192,11 +192,16 @@ static uint32_t do_plt_call(void *location,
7004
7005 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
7006 /* Init, or core PLT? */
7007- if (location >= mod->module_core
7008- && location < mod->module_core + mod->core_size)
7009+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
7010+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
7011 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
7012- else
7013+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
7014+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
7015 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
7016+ else {
7017+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
7018+ return ~0UL;
7019+ }
7020
7021 /* Find this entry, or if that fails, the next avail. entry */
7022 while (entry->jump[0]) {
7023diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
7024index 8143067..21ae55b 100644
7025--- a/arch/powerpc/kernel/process.c
7026+++ b/arch/powerpc/kernel/process.c
7027@@ -680,8 +680,8 @@ void show_regs(struct pt_regs * regs)
7028 * Lookup NIP late so we have the best change of getting the
7029 * above info out without failing
7030 */
7031- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
7032- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
7033+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
7034+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
7035 #endif
7036 show_stack(current, (unsigned long *) regs->gpr[1]);
7037 if (!user_mode(regs))
7038@@ -1129,10 +1129,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
7039 newsp = stack[0];
7040 ip = stack[STACK_FRAME_LR_SAVE];
7041 if (!firstframe || ip != lr) {
7042- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
7043+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
7044 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
7045 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
7046- printk(" (%pS)",
7047+ printk(" (%pA)",
7048 (void *)current->ret_stack[curr_frame].ret);
7049 curr_frame--;
7050 }
7051@@ -1152,7 +1152,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
7052 struct pt_regs *regs = (struct pt_regs *)
7053 (sp + STACK_FRAME_OVERHEAD);
7054 lr = regs->link;
7055- printk("--- Exception: %lx at %pS\n LR = %pS\n",
7056+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
7057 regs->trap, (void *)regs->nip, (void *)lr);
7058 firstframe = 1;
7059 }
7060@@ -1194,58 +1194,3 @@ void __ppc64_runlatch_off(void)
7061 mtspr(SPRN_CTRLT, ctrl);
7062 }
7063 #endif /* CONFIG_PPC64 */
7064-
7065-unsigned long arch_align_stack(unsigned long sp)
7066-{
7067- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
7068- sp -= get_random_int() & ~PAGE_MASK;
7069- return sp & ~0xf;
7070-}
7071-
7072-static inline unsigned long brk_rnd(void)
7073-{
7074- unsigned long rnd = 0;
7075-
7076- /* 8MB for 32bit, 1GB for 64bit */
7077- if (is_32bit_task())
7078- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
7079- else
7080- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
7081-
7082- return rnd << PAGE_SHIFT;
7083-}
7084-
7085-unsigned long arch_randomize_brk(struct mm_struct *mm)
7086-{
7087- unsigned long base = mm->brk;
7088- unsigned long ret;
7089-
7090-#ifdef CONFIG_PPC_STD_MMU_64
7091- /*
7092- * If we are using 1TB segments and we are allowed to randomise
7093- * the heap, we can put it above 1TB so it is backed by a 1TB
7094- * segment. Otherwise the heap will be in the bottom 1TB
7095- * which always uses 256MB segments and this may result in a
7096- * performance penalty.
7097- */
7098- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
7099- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
7100-#endif
7101-
7102- ret = PAGE_ALIGN(base + brk_rnd());
7103-
7104- if (ret < mm->brk)
7105- return mm->brk;
7106-
7107- return ret;
7108-}
7109-
7110-unsigned long randomize_et_dyn(unsigned long base)
7111-{
7112- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
7113-
7114- if (ret < base)
7115- return base;
7116-
7117- return ret;
7118-}
7119diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
7120index c497000..8fde506 100644
7121--- a/arch/powerpc/kernel/ptrace.c
7122+++ b/arch/powerpc/kernel/ptrace.c
7123@@ -1737,6 +1737,10 @@ long arch_ptrace(struct task_struct *child, long request,
7124 return ret;
7125 }
7126
7127+#ifdef CONFIG_GRKERNSEC_SETXID
7128+extern void gr_delayed_cred_worker(void);
7129+#endif
7130+
7131 /*
7132 * We must return the syscall number to actually look up in the table.
7133 * This can be -1L to skip running any syscall at all.
7134@@ -1747,6 +1751,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
7135
7136 secure_computing_strict(regs->gpr[0]);
7137
7138+#ifdef CONFIG_GRKERNSEC_SETXID
7139+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
7140+ gr_delayed_cred_worker();
7141+#endif
7142+
7143 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
7144 tracehook_report_syscall_entry(regs))
7145 /*
7146@@ -1781,6 +1790,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
7147 {
7148 int step;
7149
7150+#ifdef CONFIG_GRKERNSEC_SETXID
7151+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
7152+ gr_delayed_cred_worker();
7153+#endif
7154+
7155 audit_syscall_exit(regs);
7156
7157 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
7158diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
7159index 804e323..79181c1 100644
7160--- a/arch/powerpc/kernel/signal_32.c
7161+++ b/arch/powerpc/kernel/signal_32.c
7162@@ -851,7 +851,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
7163 /* Save user registers on the stack */
7164 frame = &rt_sf->uc.uc_mcontext;
7165 addr = frame;
7166- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
7167+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
7168 if (save_user_regs(regs, frame, 0, 1))
7169 goto badframe;
7170 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
7171diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
7172index 1ca045d..139c3f7 100644
7173--- a/arch/powerpc/kernel/signal_64.c
7174+++ b/arch/powerpc/kernel/signal_64.c
7175@@ -430,7 +430,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
7176 current->thread.fpscr.val = 0;
7177
7178 /* Set up to return from userspace. */
7179- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
7180+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
7181 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
7182 } else {
7183 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
7184diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
7185index 3ce1f86..c30e629 100644
7186--- a/arch/powerpc/kernel/sysfs.c
7187+++ b/arch/powerpc/kernel/sysfs.c
7188@@ -522,7 +522,7 @@ static int __cpuinit sysfs_cpu_notify(struct notifier_block *self,
7189 return NOTIFY_OK;
7190 }
7191
7192-static struct notifier_block __cpuinitdata sysfs_cpu_nb = {
7193+static struct notifier_block sysfs_cpu_nb = {
7194 .notifier_call = sysfs_cpu_notify,
7195 };
7196
7197diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
7198index 3251840..3f7c77a 100644
7199--- a/arch/powerpc/kernel/traps.c
7200+++ b/arch/powerpc/kernel/traps.c
7201@@ -133,6 +133,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
7202 return flags;
7203 }
7204
7205+extern void gr_handle_kernel_exploit(void);
7206+
7207 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
7208 int signr)
7209 {
7210@@ -182,6 +184,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
7211 panic("Fatal exception in interrupt");
7212 if (panic_on_oops)
7213 panic("Fatal exception");
7214+
7215+ gr_handle_kernel_exploit();
7216+
7217 do_exit(signr);
7218 }
7219
7220diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
7221index 1b2076f..835e4be 100644
7222--- a/arch/powerpc/kernel/vdso.c
7223+++ b/arch/powerpc/kernel/vdso.c
7224@@ -34,6 +34,7 @@
7225 #include <asm/firmware.h>
7226 #include <asm/vdso.h>
7227 #include <asm/vdso_datapage.h>
7228+#include <asm/mman.h>
7229
7230 #include "setup.h"
7231
7232@@ -218,7 +219,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
7233 vdso_base = VDSO32_MBASE;
7234 #endif
7235
7236- current->mm->context.vdso_base = 0;
7237+ current->mm->context.vdso_base = ~0UL;
7238
7239 /* vDSO has a problem and was disabled, just don't "enable" it for the
7240 * process
7241@@ -238,7 +239,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
7242 vdso_base = get_unmapped_area(NULL, vdso_base,
7243 (vdso_pages << PAGE_SHIFT) +
7244 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
7245- 0, 0);
7246+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
7247 if (IS_ERR_VALUE(vdso_base)) {
7248 rc = vdso_base;
7249 goto fail_mmapsem;
7250diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
7251index 5eea6f3..5d10396 100644
7252--- a/arch/powerpc/lib/usercopy_64.c
7253+++ b/arch/powerpc/lib/usercopy_64.c
7254@@ -9,22 +9,6 @@
7255 #include <linux/module.h>
7256 #include <asm/uaccess.h>
7257
7258-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
7259-{
7260- if (likely(access_ok(VERIFY_READ, from, n)))
7261- n = __copy_from_user(to, from, n);
7262- else
7263- memset(to, 0, n);
7264- return n;
7265-}
7266-
7267-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
7268-{
7269- if (likely(access_ok(VERIFY_WRITE, to, n)))
7270- n = __copy_to_user(to, from, n);
7271- return n;
7272-}
7273-
7274 unsigned long copy_in_user(void __user *to, const void __user *from,
7275 unsigned long n)
7276 {
7277@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
7278 return n;
7279 }
7280
7281-EXPORT_SYMBOL(copy_from_user);
7282-EXPORT_SYMBOL(copy_to_user);
7283 EXPORT_SYMBOL(copy_in_user);
7284
7285diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
7286index 3a8489a..6a63b3b 100644
7287--- a/arch/powerpc/mm/fault.c
7288+++ b/arch/powerpc/mm/fault.c
7289@@ -32,6 +32,10 @@
7290 #include <linux/perf_event.h>
7291 #include <linux/magic.h>
7292 #include <linux/ratelimit.h>
7293+#include <linux/slab.h>
7294+#include <linux/pagemap.h>
7295+#include <linux/compiler.h>
7296+#include <linux/unistd.h>
7297
7298 #include <asm/firmware.h>
7299 #include <asm/page.h>
7300@@ -68,6 +72,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
7301 }
7302 #endif
7303
7304+#ifdef CONFIG_PAX_PAGEEXEC
7305+/*
7306+ * PaX: decide what to do with offenders (regs->nip = fault address)
7307+ *
7308+ * returns 1 when task should be killed
7309+ */
7310+static int pax_handle_fetch_fault(struct pt_regs *regs)
7311+{
7312+ return 1;
7313+}
7314+
7315+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
7316+{
7317+ unsigned long i;
7318+
7319+ printk(KERN_ERR "PAX: bytes at PC: ");
7320+ for (i = 0; i < 5; i++) {
7321+ unsigned int c;
7322+ if (get_user(c, (unsigned int __user *)pc+i))
7323+ printk(KERN_CONT "???????? ");
7324+ else
7325+ printk(KERN_CONT "%08x ", c);
7326+ }
7327+ printk("\n");
7328+}
7329+#endif
7330+
7331 /*
7332 * Check whether the instruction at regs->nip is a store using
7333 * an update addressing form which will update r1.
7334@@ -213,7 +244,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
7335 * indicate errors in DSISR but can validly be set in SRR1.
7336 */
7337 if (trap == 0x400)
7338- error_code &= 0x48200000;
7339+ error_code &= 0x58200000;
7340 else
7341 is_write = error_code & DSISR_ISSTORE;
7342 #else
7343@@ -364,7 +395,7 @@ good_area:
7344 * "undefined". Of those that can be set, this is the only
7345 * one which seems bad.
7346 */
7347- if (error_code & 0x10000000)
7348+ if (error_code & DSISR_GUARDED)
7349 /* Guarded storage error. */
7350 goto bad_area;
7351 #endif /* CONFIG_8xx */
7352@@ -379,7 +410,7 @@ good_area:
7353 * processors use the same I/D cache coherency mechanism
7354 * as embedded.
7355 */
7356- if (error_code & DSISR_PROTFAULT)
7357+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
7358 goto bad_area;
7359 #endif /* CONFIG_PPC_STD_MMU */
7360
7361@@ -462,6 +493,23 @@ bad_area:
7362 bad_area_nosemaphore:
7363 /* User mode accesses cause a SIGSEGV */
7364 if (user_mode(regs)) {
7365+
7366+#ifdef CONFIG_PAX_PAGEEXEC
7367+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
7368+#ifdef CONFIG_PPC_STD_MMU
7369+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
7370+#else
7371+ if (is_exec && regs->nip == address) {
7372+#endif
7373+ switch (pax_handle_fetch_fault(regs)) {
7374+ }
7375+
7376+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
7377+ do_group_exit(SIGKILL);
7378+ }
7379+ }
7380+#endif
7381+
7382 _exception(SIGSEGV, regs, code, address);
7383 return 0;
7384 }
7385diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
7386index 67a42ed..cd463e0 100644
7387--- a/arch/powerpc/mm/mmap_64.c
7388+++ b/arch/powerpc/mm/mmap_64.c
7389@@ -57,6 +57,10 @@ static unsigned long mmap_rnd(void)
7390 {
7391 unsigned long rnd = 0;
7392
7393+#ifdef CONFIG_PAX_RANDMMAP
7394+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7395+#endif
7396+
7397 if (current->flags & PF_RANDOMIZE) {
7398 /* 8MB for 32bit, 1GB for 64bit */
7399 if (is_32bit_task())
7400@@ -91,10 +95,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7401 */
7402 if (mmap_is_legacy()) {
7403 mm->mmap_base = TASK_UNMAPPED_BASE;
7404+
7405+#ifdef CONFIG_PAX_RANDMMAP
7406+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7407+ mm->mmap_base += mm->delta_mmap;
7408+#endif
7409+
7410 mm->get_unmapped_area = arch_get_unmapped_area;
7411 mm->unmap_area = arch_unmap_area;
7412 } else {
7413 mm->mmap_base = mmap_base();
7414+
7415+#ifdef CONFIG_PAX_RANDMMAP
7416+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7417+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7418+#endif
7419+
7420 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
7421 mm->unmap_area = arch_unmap_area_topdown;
7422 }
7423diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
7424index e779642..e5bb889 100644
7425--- a/arch/powerpc/mm/mmu_context_nohash.c
7426+++ b/arch/powerpc/mm/mmu_context_nohash.c
7427@@ -363,7 +363,7 @@ static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self,
7428 return NOTIFY_OK;
7429 }
7430
7431-static struct notifier_block __cpuinitdata mmu_context_cpu_nb = {
7432+static struct notifier_block mmu_context_cpu_nb = {
7433 .notifier_call = mmu_context_cpu_notify,
7434 };
7435
7436diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
7437index bba87ca..c346a33 100644
7438--- a/arch/powerpc/mm/numa.c
7439+++ b/arch/powerpc/mm/numa.c
7440@@ -932,7 +932,7 @@ static void __init *careful_zallocation(int nid, unsigned long size,
7441 return ret;
7442 }
7443
7444-static struct notifier_block __cpuinitdata ppc64_numa_nb = {
7445+static struct notifier_block ppc64_numa_nb = {
7446 .notifier_call = cpu_numa_callback,
7447 .priority = 1 /* Must run before sched domains notifier. */
7448 };
7449diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
7450index cf9dada..241529f 100644
7451--- a/arch/powerpc/mm/slice.c
7452+++ b/arch/powerpc/mm/slice.c
7453@@ -103,7 +103,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
7454 if ((mm->task_size - len) < addr)
7455 return 0;
7456 vma = find_vma(mm, addr);
7457- return (!vma || (addr + len) <= vma->vm_start);
7458+ return check_heap_stack_gap(vma, addr, len, 0);
7459 }
7460
7461 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
7462@@ -272,7 +272,7 @@ full_search:
7463 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
7464 continue;
7465 }
7466- if (!vma || addr + len <= vma->vm_start) {
7467+ if (check_heap_stack_gap(vma, addr, len, 0)) {
7468 /*
7469 * Remember the place where we stopped the search:
7470 */
7471@@ -329,10 +329,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
7472 }
7473 }
7474
7475- addr = mm->mmap_base;
7476- while (addr > len) {
7477+ if (mm->mmap_base < len)
7478+ addr = -ENOMEM;
7479+ else
7480+ addr = mm->mmap_base - len;
7481+
7482+ while (!IS_ERR_VALUE(addr)) {
7483 /* Go down by chunk size */
7484- addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
7485+ addr = _ALIGN_DOWN(addr, 1ul << pshift);
7486
7487 /* Check for hit with different page size */
7488 mask = slice_range_to_mask(addr, len);
7489@@ -352,7 +356,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
7490 * return with success:
7491 */
7492 vma = find_vma(mm, addr);
7493- if (!vma || (addr + len) <= vma->vm_start) {
7494+ if (check_heap_stack_gap(vma, addr, len, 0)) {
7495 /* remember the address as a hint for next time */
7496 if (use_cache)
7497 mm->free_area_cache = addr;
7498@@ -364,7 +368,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
7499 mm->cached_hole_size = vma->vm_start - addr;
7500
7501 /* try just below the current vma->vm_start */
7502- addr = vma->vm_start;
7503+ addr = skip_heap_stack_gap(vma, len, 0);
7504 }
7505
7506 /*
7507@@ -442,6 +446,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
7508 if (fixed && addr > (mm->task_size - len))
7509 return -EINVAL;
7510
7511+#ifdef CONFIG_PAX_RANDMMAP
7512+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
7513+ addr = 0;
7514+#endif
7515+
7516 /* If hint, make sure it matches our alignment restrictions */
7517 if (!fixed && addr) {
7518 addr = _ALIGN_UP(addr, 1ul << pshift);
7519diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
7520index 0cfece4..2f1a0e5 100644
7521--- a/arch/powerpc/platforms/cell/spufs/file.c
7522+++ b/arch/powerpc/platforms/cell/spufs/file.c
7523@@ -281,9 +281,9 @@ spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
7524 return VM_FAULT_NOPAGE;
7525 }
7526
7527-static int spufs_mem_mmap_access(struct vm_area_struct *vma,
7528+static ssize_t spufs_mem_mmap_access(struct vm_area_struct *vma,
7529 unsigned long address,
7530- void *buf, int len, int write)
7531+ void *buf, size_t len, int write)
7532 {
7533 struct spu_context *ctx = vma->vm_file->private_data;
7534 unsigned long offset = address - vma->vm_start;
7535diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
7536index bdb738a..49c9f95 100644
7537--- a/arch/powerpc/platforms/powermac/smp.c
7538+++ b/arch/powerpc/platforms/powermac/smp.c
7539@@ -885,7 +885,7 @@ static int smp_core99_cpu_notify(struct notifier_block *self,
7540 return NOTIFY_OK;
7541 }
7542
7543-static struct notifier_block __cpuinitdata smp_core99_cpu_nb = {
7544+static struct notifier_block smp_core99_cpu_nb = {
7545 .notifier_call = smp_core99_cpu_notify,
7546 };
7547 #endif /* CONFIG_HOTPLUG_CPU */
7548diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
7549index c797832..ce575c8 100644
7550--- a/arch/s390/include/asm/atomic.h
7551+++ b/arch/s390/include/asm/atomic.h
7552@@ -326,6 +326,16 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
7553 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
7554 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
7555
7556+#define atomic64_read_unchecked(v) atomic64_read(v)
7557+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
7558+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
7559+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
7560+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
7561+#define atomic64_inc_unchecked(v) atomic64_inc(v)
7562+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
7563+#define atomic64_dec_unchecked(v) atomic64_dec(v)
7564+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
7565+
7566 #define smp_mb__before_atomic_dec() smp_mb()
7567 #define smp_mb__after_atomic_dec() smp_mb()
7568 #define smp_mb__before_atomic_inc() smp_mb()
7569diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
7570index 4d7ccac..d03d0ad 100644
7571--- a/arch/s390/include/asm/cache.h
7572+++ b/arch/s390/include/asm/cache.h
7573@@ -9,8 +9,10 @@
7574 #ifndef __ARCH_S390_CACHE_H
7575 #define __ARCH_S390_CACHE_H
7576
7577-#define L1_CACHE_BYTES 256
7578+#include <linux/const.h>
7579+
7580 #define L1_CACHE_SHIFT 8
7581+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7582 #define NET_SKB_PAD 32
7583
7584 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
7585diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
7586index 178ff96..8c93bd1 100644
7587--- a/arch/s390/include/asm/elf.h
7588+++ b/arch/s390/include/asm/elf.h
7589@@ -160,8 +160,14 @@ extern unsigned int vdso_enabled;
7590 the loader. We need to make sure that it is out of the way of the program
7591 that it will "exec", and that there is sufficient room for the brk. */
7592
7593-extern unsigned long randomize_et_dyn(unsigned long base);
7594-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
7595+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
7596+
7597+#ifdef CONFIG_PAX_ASLR
7598+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
7599+
7600+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
7601+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
7602+#endif
7603
7604 /* This yields a mask that user programs can use to figure out what
7605 instruction set this CPU supports. */
7606@@ -210,9 +216,6 @@ struct linux_binprm;
7607 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
7608 int arch_setup_additional_pages(struct linux_binprm *, int);
7609
7610-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
7611-#define arch_randomize_brk arch_randomize_brk
7612-
7613 void *fill_cpu_elf_notes(void *ptr, struct save_area *sa);
7614
7615 #endif
7616diff --git a/arch/s390/include/asm/exec.h b/arch/s390/include/asm/exec.h
7617index c4a93d6..4d2a9b4 100644
7618--- a/arch/s390/include/asm/exec.h
7619+++ b/arch/s390/include/asm/exec.h
7620@@ -7,6 +7,6 @@
7621 #ifndef __ASM_EXEC_H
7622 #define __ASM_EXEC_H
7623
7624-extern unsigned long arch_align_stack(unsigned long sp);
7625+#define arch_align_stack(x) ((x) & ~0xfUL)
7626
7627 #endif /* __ASM_EXEC_H */
7628diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
7629index 34268df..ea97318 100644
7630--- a/arch/s390/include/asm/uaccess.h
7631+++ b/arch/s390/include/asm/uaccess.h
7632@@ -252,6 +252,10 @@ static inline unsigned long __must_check
7633 copy_to_user(void __user *to, const void *from, unsigned long n)
7634 {
7635 might_fault();
7636+
7637+ if ((long)n < 0)
7638+ return n;
7639+
7640 if (access_ok(VERIFY_WRITE, to, n))
7641 n = __copy_to_user(to, from, n);
7642 return n;
7643@@ -277,6 +281,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
7644 static inline unsigned long __must_check
7645 __copy_from_user(void *to, const void __user *from, unsigned long n)
7646 {
7647+ if ((long)n < 0)
7648+ return n;
7649+
7650 if (__builtin_constant_p(n) && (n <= 256))
7651 return uaccess.copy_from_user_small(n, from, to);
7652 else
7653@@ -308,10 +315,14 @@ __compiletime_warning("copy_from_user() buffer size is not provably correct")
7654 static inline unsigned long __must_check
7655 copy_from_user(void *to, const void __user *from, unsigned long n)
7656 {
7657- unsigned int sz = __compiletime_object_size(to);
7658+ size_t sz = __compiletime_object_size(to);
7659
7660 might_fault();
7661- if (unlikely(sz != -1 && sz < n)) {
7662+
7663+ if ((long)n < 0)
7664+ return n;
7665+
7666+ if (unlikely(sz != (size_t)-1 && sz < n)) {
7667 copy_from_user_overflow();
7668 return n;
7669 }
7670diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
7671index 4610dea..cf0af21 100644
7672--- a/arch/s390/kernel/module.c
7673+++ b/arch/s390/kernel/module.c
7674@@ -171,11 +171,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
7675
7676 /* Increase core size by size of got & plt and set start
7677 offsets for got and plt. */
7678- me->core_size = ALIGN(me->core_size, 4);
7679- me->arch.got_offset = me->core_size;
7680- me->core_size += me->arch.got_size;
7681- me->arch.plt_offset = me->core_size;
7682- me->core_size += me->arch.plt_size;
7683+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
7684+ me->arch.got_offset = me->core_size_rw;
7685+ me->core_size_rw += me->arch.got_size;
7686+ me->arch.plt_offset = me->core_size_rx;
7687+ me->core_size_rx += me->arch.plt_size;
7688 return 0;
7689 }
7690
7691@@ -252,7 +252,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7692 if (info->got_initialized == 0) {
7693 Elf_Addr *gotent;
7694
7695- gotent = me->module_core + me->arch.got_offset +
7696+ gotent = me->module_core_rw + me->arch.got_offset +
7697 info->got_offset;
7698 *gotent = val;
7699 info->got_initialized = 1;
7700@@ -276,7 +276,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7701 else if (r_type == R_390_GOTENT ||
7702 r_type == R_390_GOTPLTENT)
7703 *(unsigned int *) loc =
7704- (val + (Elf_Addr) me->module_core - loc) >> 1;
7705+ (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
7706 else if (r_type == R_390_GOT64 ||
7707 r_type == R_390_GOTPLT64)
7708 *(unsigned long *) loc = val;
7709@@ -290,7 +290,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7710 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
7711 if (info->plt_initialized == 0) {
7712 unsigned int *ip;
7713- ip = me->module_core + me->arch.plt_offset +
7714+ ip = me->module_core_rx + me->arch.plt_offset +
7715 info->plt_offset;
7716 #ifndef CONFIG_64BIT
7717 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
7718@@ -315,7 +315,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7719 val - loc + 0xffffUL < 0x1ffffeUL) ||
7720 (r_type == R_390_PLT32DBL &&
7721 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
7722- val = (Elf_Addr) me->module_core +
7723+ val = (Elf_Addr) me->module_core_rx +
7724 me->arch.plt_offset +
7725 info->plt_offset;
7726 val += rela->r_addend - loc;
7727@@ -337,7 +337,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7728 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
7729 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
7730 val = val + rela->r_addend -
7731- ((Elf_Addr) me->module_core + me->arch.got_offset);
7732+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
7733 if (r_type == R_390_GOTOFF16)
7734 *(unsigned short *) loc = val;
7735 else if (r_type == R_390_GOTOFF32)
7736@@ -347,7 +347,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7737 break;
7738 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
7739 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
7740- val = (Elf_Addr) me->module_core + me->arch.got_offset +
7741+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
7742 rela->r_addend - loc;
7743 if (r_type == R_390_GOTPC)
7744 *(unsigned int *) loc = val;
7745diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
7746index 536d645..4a5bd9e 100644
7747--- a/arch/s390/kernel/process.c
7748+++ b/arch/s390/kernel/process.c
7749@@ -250,39 +250,3 @@ unsigned long get_wchan(struct task_struct *p)
7750 }
7751 return 0;
7752 }
7753-
7754-unsigned long arch_align_stack(unsigned long sp)
7755-{
7756- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
7757- sp -= get_random_int() & ~PAGE_MASK;
7758- return sp & ~0xf;
7759-}
7760-
7761-static inline unsigned long brk_rnd(void)
7762-{
7763- /* 8MB for 32bit, 1GB for 64bit */
7764- if (is_32bit_task())
7765- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
7766- else
7767- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
7768-}
7769-
7770-unsigned long arch_randomize_brk(struct mm_struct *mm)
7771-{
7772- unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
7773-
7774- if (ret < mm->brk)
7775- return mm->brk;
7776- return ret;
7777-}
7778-
7779-unsigned long randomize_et_dyn(unsigned long base)
7780-{
7781- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
7782-
7783- if (!(current->flags & PF_RANDOMIZE))
7784- return base;
7785- if (ret < base)
7786- return base;
7787- return ret;
7788-}
7789diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
7790index c59a5ef..3fae59c 100644
7791--- a/arch/s390/mm/mmap.c
7792+++ b/arch/s390/mm/mmap.c
7793@@ -90,10 +90,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7794 */
7795 if (mmap_is_legacy()) {
7796 mm->mmap_base = TASK_UNMAPPED_BASE;
7797+
7798+#ifdef CONFIG_PAX_RANDMMAP
7799+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7800+ mm->mmap_base += mm->delta_mmap;
7801+#endif
7802+
7803 mm->get_unmapped_area = arch_get_unmapped_area;
7804 mm->unmap_area = arch_unmap_area;
7805 } else {
7806 mm->mmap_base = mmap_base();
7807+
7808+#ifdef CONFIG_PAX_RANDMMAP
7809+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7810+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7811+#endif
7812+
7813 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
7814 mm->unmap_area = arch_unmap_area_topdown;
7815 }
7816@@ -172,10 +184,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7817 */
7818 if (mmap_is_legacy()) {
7819 mm->mmap_base = TASK_UNMAPPED_BASE;
7820+
7821+#ifdef CONFIG_PAX_RANDMMAP
7822+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7823+ mm->mmap_base += mm->delta_mmap;
7824+#endif
7825+
7826 mm->get_unmapped_area = s390_get_unmapped_area;
7827 mm->unmap_area = arch_unmap_area;
7828 } else {
7829 mm->mmap_base = mmap_base();
7830+
7831+#ifdef CONFIG_PAX_RANDMMAP
7832+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7833+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7834+#endif
7835+
7836 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
7837 mm->unmap_area = arch_unmap_area_topdown;
7838 }
7839diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
7840index ae3d59f..f65f075 100644
7841--- a/arch/score/include/asm/cache.h
7842+++ b/arch/score/include/asm/cache.h
7843@@ -1,7 +1,9 @@
7844 #ifndef _ASM_SCORE_CACHE_H
7845 #define _ASM_SCORE_CACHE_H
7846
7847+#include <linux/const.h>
7848+
7849 #define L1_CACHE_SHIFT 4
7850-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7851+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7852
7853 #endif /* _ASM_SCORE_CACHE_H */
7854diff --git a/arch/score/include/asm/exec.h b/arch/score/include/asm/exec.h
7855index f9f3cd5..58ff438 100644
7856--- a/arch/score/include/asm/exec.h
7857+++ b/arch/score/include/asm/exec.h
7858@@ -1,6 +1,6 @@
7859 #ifndef _ASM_SCORE_EXEC_H
7860 #define _ASM_SCORE_EXEC_H
7861
7862-extern unsigned long arch_align_stack(unsigned long sp);
7863+#define arch_align_stack(x) (x)
7864
7865 #endif /* _ASM_SCORE_EXEC_H */
7866diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
7867index 7956846..5f37677 100644
7868--- a/arch/score/kernel/process.c
7869+++ b/arch/score/kernel/process.c
7870@@ -134,8 +134,3 @@ unsigned long get_wchan(struct task_struct *task)
7871
7872 return task_pt_regs(task)->cp0_epc;
7873 }
7874-
7875-unsigned long arch_align_stack(unsigned long sp)
7876-{
7877- return sp;
7878-}
7879diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
7880index ef9e555..331bd29 100644
7881--- a/arch/sh/include/asm/cache.h
7882+++ b/arch/sh/include/asm/cache.h
7883@@ -9,10 +9,11 @@
7884 #define __ASM_SH_CACHE_H
7885 #ifdef __KERNEL__
7886
7887+#include <linux/const.h>
7888 #include <linux/init.h>
7889 #include <cpu/cache.h>
7890
7891-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7892+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7893
7894 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
7895
7896diff --git a/arch/sh/kernel/cpu/sh4a/smp-shx3.c b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
7897index 03f2b55..b027032 100644
7898--- a/arch/sh/kernel/cpu/sh4a/smp-shx3.c
7899+++ b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
7900@@ -143,7 +143,7 @@ shx3_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
7901 return NOTIFY_OK;
7902 }
7903
7904-static struct notifier_block __cpuinitdata shx3_cpu_notifier = {
7905+static struct notifier_block shx3_cpu_notifier = {
7906 .notifier_call = shx3_cpu_callback,
7907 };
7908
7909diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
7910index 6777177..cb5e44f 100644
7911--- a/arch/sh/mm/mmap.c
7912+++ b/arch/sh/mm/mmap.c
7913@@ -36,6 +36,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7914 struct mm_struct *mm = current->mm;
7915 struct vm_area_struct *vma;
7916 int do_colour_align;
7917+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
7918 struct vm_unmapped_area_info info;
7919
7920 if (flags & MAP_FIXED) {
7921@@ -55,6 +56,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7922 if (filp || (flags & MAP_SHARED))
7923 do_colour_align = 1;
7924
7925+#ifdef CONFIG_PAX_RANDMMAP
7926+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7927+#endif
7928+
7929 if (addr) {
7930 if (do_colour_align)
7931 addr = COLOUR_ALIGN(addr, pgoff);
7932@@ -62,14 +67,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7933 addr = PAGE_ALIGN(addr);
7934
7935 vma = find_vma(mm, addr);
7936- if (TASK_SIZE - len >= addr &&
7937- (!vma || addr + len <= vma->vm_start))
7938+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
7939 return addr;
7940 }
7941
7942 info.flags = 0;
7943 info.length = len;
7944- info.low_limit = TASK_UNMAPPED_BASE;
7945+ info.low_limit = mm->mmap_base;
7946 info.high_limit = TASK_SIZE;
7947 info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
7948 info.align_offset = pgoff << PAGE_SHIFT;
7949@@ -85,6 +89,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7950 struct mm_struct *mm = current->mm;
7951 unsigned long addr = addr0;
7952 int do_colour_align;
7953+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
7954 struct vm_unmapped_area_info info;
7955
7956 if (flags & MAP_FIXED) {
7957@@ -104,6 +109,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7958 if (filp || (flags & MAP_SHARED))
7959 do_colour_align = 1;
7960
7961+#ifdef CONFIG_PAX_RANDMMAP
7962+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7963+#endif
7964+
7965 /* requesting a specific address */
7966 if (addr) {
7967 if (do_colour_align)
7968@@ -112,8 +121,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7969 addr = PAGE_ALIGN(addr);
7970
7971 vma = find_vma(mm, addr);
7972- if (TASK_SIZE - len >= addr &&
7973- (!vma || addr + len <= vma->vm_start))
7974+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
7975 return addr;
7976 }
7977
7978@@ -135,6 +143,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7979 VM_BUG_ON(addr != -ENOMEM);
7980 info.flags = 0;
7981 info.low_limit = TASK_UNMAPPED_BASE;
7982+
7983+#ifdef CONFIG_PAX_RANDMMAP
7984+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7985+ info.low_limit += mm->delta_mmap;
7986+#endif
7987+
7988 info.high_limit = TASK_SIZE;
7989 addr = vm_unmapped_area(&info);
7990 }
7991diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
7992index be56a24..443328f 100644
7993--- a/arch/sparc/include/asm/atomic_64.h
7994+++ b/arch/sparc/include/asm/atomic_64.h
7995@@ -14,18 +14,40 @@
7996 #define ATOMIC64_INIT(i) { (i) }
7997
7998 #define atomic_read(v) (*(volatile int *)&(v)->counter)
7999+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
8000+{
8001+ return v->counter;
8002+}
8003 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
8004+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
8005+{
8006+ return v->counter;
8007+}
8008
8009 #define atomic_set(v, i) (((v)->counter) = i)
8010+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
8011+{
8012+ v->counter = i;
8013+}
8014 #define atomic64_set(v, i) (((v)->counter) = i)
8015+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
8016+{
8017+ v->counter = i;
8018+}
8019
8020 extern void atomic_add(int, atomic_t *);
8021+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
8022 extern void atomic64_add(long, atomic64_t *);
8023+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
8024 extern void atomic_sub(int, atomic_t *);
8025+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
8026 extern void atomic64_sub(long, atomic64_t *);
8027+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
8028
8029 extern int atomic_add_ret(int, atomic_t *);
8030+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
8031 extern long atomic64_add_ret(long, atomic64_t *);
8032+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
8033 extern int atomic_sub_ret(int, atomic_t *);
8034 extern long atomic64_sub_ret(long, atomic64_t *);
8035
8036@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
8037 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
8038
8039 #define atomic_inc_return(v) atomic_add_ret(1, v)
8040+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
8041+{
8042+ return atomic_add_ret_unchecked(1, v);
8043+}
8044 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
8045+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
8046+{
8047+ return atomic64_add_ret_unchecked(1, v);
8048+}
8049
8050 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
8051 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
8052
8053 #define atomic_add_return(i, v) atomic_add_ret(i, v)
8054+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
8055+{
8056+ return atomic_add_ret_unchecked(i, v);
8057+}
8058 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
8059+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
8060+{
8061+ return atomic64_add_ret_unchecked(i, v);
8062+}
8063
8064 /*
8065 * atomic_inc_and_test - increment and test
8066@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
8067 * other cases.
8068 */
8069 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
8070+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
8071+{
8072+ return atomic_inc_return_unchecked(v) == 0;
8073+}
8074 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
8075
8076 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
8077@@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomic64_t *);
8078 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
8079
8080 #define atomic_inc(v) atomic_add(1, v)
8081+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
8082+{
8083+ atomic_add_unchecked(1, v);
8084+}
8085 #define atomic64_inc(v) atomic64_add(1, v)
8086+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
8087+{
8088+ atomic64_add_unchecked(1, v);
8089+}
8090
8091 #define atomic_dec(v) atomic_sub(1, v)
8092+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
8093+{
8094+ atomic_sub_unchecked(1, v);
8095+}
8096 #define atomic64_dec(v) atomic64_sub(1, v)
8097+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
8098+{
8099+ atomic64_sub_unchecked(1, v);
8100+}
8101
8102 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
8103 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
8104
8105 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
8106+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
8107+{
8108+ return cmpxchg(&v->counter, old, new);
8109+}
8110 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
8111+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
8112+{
8113+ return xchg(&v->counter, new);
8114+}
8115
8116 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
8117 {
8118- int c, old;
8119+ int c, old, new;
8120 c = atomic_read(v);
8121 for (;;) {
8122- if (unlikely(c == (u)))
8123+ if (unlikely(c == u))
8124 break;
8125- old = atomic_cmpxchg((v), c, c + (a));
8126+
8127+ asm volatile("addcc %2, %0, %0\n"
8128+
8129+#ifdef CONFIG_PAX_REFCOUNT
8130+ "tvs %%icc, 6\n"
8131+#endif
8132+
8133+ : "=r" (new)
8134+ : "0" (c), "ir" (a)
8135+ : "cc");
8136+
8137+ old = atomic_cmpxchg(v, c, new);
8138 if (likely(old == c))
8139 break;
8140 c = old;
8141@@ -88,20 +165,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
8142 #define atomic64_cmpxchg(v, o, n) \
8143 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
8144 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
8145+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
8146+{
8147+ return xchg(&v->counter, new);
8148+}
8149
8150 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
8151 {
8152- long c, old;
8153+ long c, old, new;
8154 c = atomic64_read(v);
8155 for (;;) {
8156- if (unlikely(c == (u)))
8157+ if (unlikely(c == u))
8158 break;
8159- old = atomic64_cmpxchg((v), c, c + (a));
8160+
8161+ asm volatile("addcc %2, %0, %0\n"
8162+
8163+#ifdef CONFIG_PAX_REFCOUNT
8164+ "tvs %%xcc, 6\n"
8165+#endif
8166+
8167+ : "=r" (new)
8168+ : "0" (c), "ir" (a)
8169+ : "cc");
8170+
8171+ old = atomic64_cmpxchg(v, c, new);
8172 if (likely(old == c))
8173 break;
8174 c = old;
8175 }
8176- return c != (u);
8177+ return c != u;
8178 }
8179
8180 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
8181diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
8182index 5bb6991..5c2132e 100644
8183--- a/arch/sparc/include/asm/cache.h
8184+++ b/arch/sparc/include/asm/cache.h
8185@@ -7,10 +7,12 @@
8186 #ifndef _SPARC_CACHE_H
8187 #define _SPARC_CACHE_H
8188
8189+#include <linux/const.h>
8190+
8191 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
8192
8193 #define L1_CACHE_SHIFT 5
8194-#define L1_CACHE_BYTES 32
8195+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8196
8197 #ifdef CONFIG_SPARC32
8198 #define SMP_CACHE_BYTES_SHIFT 5
8199diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
8200index ac74a2c..a9e58af 100644
8201--- a/arch/sparc/include/asm/elf_32.h
8202+++ b/arch/sparc/include/asm/elf_32.h
8203@@ -114,6 +114,13 @@ typedef struct {
8204
8205 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
8206
8207+#ifdef CONFIG_PAX_ASLR
8208+#define PAX_ELF_ET_DYN_BASE 0x10000UL
8209+
8210+#define PAX_DELTA_MMAP_LEN 16
8211+#define PAX_DELTA_STACK_LEN 16
8212+#endif
8213+
8214 /* This yields a mask that user programs can use to figure out what
8215 instruction set this cpu supports. This can NOT be done in userspace
8216 on Sparc. */
8217diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
8218index 370ca1e..d4f4a98 100644
8219--- a/arch/sparc/include/asm/elf_64.h
8220+++ b/arch/sparc/include/asm/elf_64.h
8221@@ -189,6 +189,13 @@ typedef struct {
8222 #define ELF_ET_DYN_BASE 0x0000010000000000UL
8223 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
8224
8225+#ifdef CONFIG_PAX_ASLR
8226+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
8227+
8228+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
8229+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
8230+#endif
8231+
8232 extern unsigned long sparc64_elf_hwcap;
8233 #define ELF_HWCAP sparc64_elf_hwcap
8234
8235diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
8236index 9b1c36d..209298b 100644
8237--- a/arch/sparc/include/asm/pgalloc_32.h
8238+++ b/arch/sparc/include/asm/pgalloc_32.h
8239@@ -33,6 +33,7 @@ static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
8240 }
8241
8242 #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
8243+#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
8244
8245 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm,
8246 unsigned long address)
8247diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
8248index bcfe063..b333142 100644
8249--- a/arch/sparc/include/asm/pgalloc_64.h
8250+++ b/arch/sparc/include/asm/pgalloc_64.h
8251@@ -26,6 +26,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
8252 }
8253
8254 #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
8255+#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD))
8256
8257 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
8258 {
8259diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
8260index 6fc1348..390c50a 100644
8261--- a/arch/sparc/include/asm/pgtable_32.h
8262+++ b/arch/sparc/include/asm/pgtable_32.h
8263@@ -50,6 +50,9 @@ extern unsigned long calc_highpages(void);
8264 #define PAGE_SHARED SRMMU_PAGE_SHARED
8265 #define PAGE_COPY SRMMU_PAGE_COPY
8266 #define PAGE_READONLY SRMMU_PAGE_RDONLY
8267+#define PAGE_SHARED_NOEXEC SRMMU_PAGE_SHARED_NOEXEC
8268+#define PAGE_COPY_NOEXEC SRMMU_PAGE_COPY_NOEXEC
8269+#define PAGE_READONLY_NOEXEC SRMMU_PAGE_RDONLY_NOEXEC
8270 #define PAGE_KERNEL SRMMU_PAGE_KERNEL
8271
8272 /* Top-level page directory - dummy used by init-mm.
8273@@ -62,18 +65,18 @@ extern unsigned long ptr_in_current_pgd;
8274
8275 /* xwr */
8276 #define __P000 PAGE_NONE
8277-#define __P001 PAGE_READONLY
8278-#define __P010 PAGE_COPY
8279-#define __P011 PAGE_COPY
8280+#define __P001 PAGE_READONLY_NOEXEC
8281+#define __P010 PAGE_COPY_NOEXEC
8282+#define __P011 PAGE_COPY_NOEXEC
8283 #define __P100 PAGE_READONLY
8284 #define __P101 PAGE_READONLY
8285 #define __P110 PAGE_COPY
8286 #define __P111 PAGE_COPY
8287
8288 #define __S000 PAGE_NONE
8289-#define __S001 PAGE_READONLY
8290-#define __S010 PAGE_SHARED
8291-#define __S011 PAGE_SHARED
8292+#define __S001 PAGE_READONLY_NOEXEC
8293+#define __S010 PAGE_SHARED_NOEXEC
8294+#define __S011 PAGE_SHARED_NOEXEC
8295 #define __S100 PAGE_READONLY
8296 #define __S101 PAGE_READONLY
8297 #define __S110 PAGE_SHARED
8298diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
8299index 08fcce9..7619f2f 100644
8300--- a/arch/sparc/include/asm/pgtable_64.h
8301+++ b/arch/sparc/include/asm/pgtable_64.h
8302@@ -915,6 +915,7 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma,
8303 return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
8304 }
8305
8306+#include <asm/tlbflush.h>
8307 #include <asm-generic/pgtable.h>
8308
8309 /* We provide our own get_unmapped_area to cope with VA holes and
8310diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
8311index 79da178..c2eede8 100644
8312--- a/arch/sparc/include/asm/pgtsrmmu.h
8313+++ b/arch/sparc/include/asm/pgtsrmmu.h
8314@@ -115,6 +115,11 @@
8315 SRMMU_EXEC | SRMMU_REF)
8316 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
8317 SRMMU_EXEC | SRMMU_REF)
8318+
8319+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
8320+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
8321+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
8322+
8323 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
8324 SRMMU_DIRTY | SRMMU_REF)
8325
8326diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
8327index 9689176..63c18ea 100644
8328--- a/arch/sparc/include/asm/spinlock_64.h
8329+++ b/arch/sparc/include/asm/spinlock_64.h
8330@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
8331
8332 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
8333
8334-static void inline arch_read_lock(arch_rwlock_t *lock)
8335+static inline void arch_read_lock(arch_rwlock_t *lock)
8336 {
8337 unsigned long tmp1, tmp2;
8338
8339 __asm__ __volatile__ (
8340 "1: ldsw [%2], %0\n"
8341 " brlz,pn %0, 2f\n"
8342-"4: add %0, 1, %1\n"
8343+"4: addcc %0, 1, %1\n"
8344+
8345+#ifdef CONFIG_PAX_REFCOUNT
8346+" tvs %%icc, 6\n"
8347+#endif
8348+
8349 " cas [%2], %0, %1\n"
8350 " cmp %0, %1\n"
8351 " bne,pn %%icc, 1b\n"
8352@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
8353 " .previous"
8354 : "=&r" (tmp1), "=&r" (tmp2)
8355 : "r" (lock)
8356- : "memory");
8357+ : "memory", "cc");
8358 }
8359
8360-static int inline arch_read_trylock(arch_rwlock_t *lock)
8361+static inline int arch_read_trylock(arch_rwlock_t *lock)
8362 {
8363 int tmp1, tmp2;
8364
8365@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
8366 "1: ldsw [%2], %0\n"
8367 " brlz,a,pn %0, 2f\n"
8368 " mov 0, %0\n"
8369-" add %0, 1, %1\n"
8370+" addcc %0, 1, %1\n"
8371+
8372+#ifdef CONFIG_PAX_REFCOUNT
8373+" tvs %%icc, 6\n"
8374+#endif
8375+
8376 " cas [%2], %0, %1\n"
8377 " cmp %0, %1\n"
8378 " bne,pn %%icc, 1b\n"
8379@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
8380 return tmp1;
8381 }
8382
8383-static void inline arch_read_unlock(arch_rwlock_t *lock)
8384+static inline void arch_read_unlock(arch_rwlock_t *lock)
8385 {
8386 unsigned long tmp1, tmp2;
8387
8388 __asm__ __volatile__(
8389 "1: lduw [%2], %0\n"
8390-" sub %0, 1, %1\n"
8391+" subcc %0, 1, %1\n"
8392+
8393+#ifdef CONFIG_PAX_REFCOUNT
8394+" tvs %%icc, 6\n"
8395+#endif
8396+
8397 " cas [%2], %0, %1\n"
8398 " cmp %0, %1\n"
8399 " bne,pn %%xcc, 1b\n"
8400@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
8401 : "memory");
8402 }
8403
8404-static void inline arch_write_lock(arch_rwlock_t *lock)
8405+static inline void arch_write_lock(arch_rwlock_t *lock)
8406 {
8407 unsigned long mask, tmp1, tmp2;
8408
8409@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
8410 : "memory");
8411 }
8412
8413-static void inline arch_write_unlock(arch_rwlock_t *lock)
8414+static inline void arch_write_unlock(arch_rwlock_t *lock)
8415 {
8416 __asm__ __volatile__(
8417 " stw %%g0, [%0]"
8418@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
8419 : "memory");
8420 }
8421
8422-static int inline arch_write_trylock(arch_rwlock_t *lock)
8423+static inline int arch_write_trylock(arch_rwlock_t *lock)
8424 {
8425 unsigned long mask, tmp1, tmp2, result;
8426
8427diff --git a/arch/sparc/include/asm/switch_to_64.h b/arch/sparc/include/asm/switch_to_64.h
8428index cad36f5..c7de332 100644
8429--- a/arch/sparc/include/asm/switch_to_64.h
8430+++ b/arch/sparc/include/asm/switch_to_64.h
8431@@ -18,8 +18,7 @@ do { \
8432 * and 2 stores in this critical code path. -DaveM
8433 */
8434 #define switch_to(prev, next, last) \
8435-do { flush_tlb_pending(); \
8436- save_and_clear_fpu(); \
8437+do { save_and_clear_fpu(); \
8438 /* If you are tempted to conditionalize the following */ \
8439 /* so that ASI is only written if it changes, think again. */ \
8440 __asm__ __volatile__("wr %%g0, %0, %%asi" \
8441diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
8442index 25849ae..924c54b 100644
8443--- a/arch/sparc/include/asm/thread_info_32.h
8444+++ b/arch/sparc/include/asm/thread_info_32.h
8445@@ -49,6 +49,8 @@ struct thread_info {
8446 unsigned long w_saved;
8447
8448 struct restart_block restart_block;
8449+
8450+ unsigned long lowest_stack;
8451 };
8452
8453 /*
8454diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
8455index 269bd92..e46a9b8 100644
8456--- a/arch/sparc/include/asm/thread_info_64.h
8457+++ b/arch/sparc/include/asm/thread_info_64.h
8458@@ -63,6 +63,8 @@ struct thread_info {
8459 struct pt_regs *kern_una_regs;
8460 unsigned int kern_una_insn;
8461
8462+ unsigned long lowest_stack;
8463+
8464 unsigned long fpregs[0] __attribute__ ((aligned(64)));
8465 };
8466
8467@@ -192,10 +194,11 @@ register struct thread_info *current_thread_info_reg asm("g6");
8468 #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
8469 /* flag bit 6 is available */
8470 #define TIF_32BIT 7 /* 32-bit binary */
8471-/* flag bit 8 is available */
8472+#define TIF_GRSEC_SETXID 8 /* update credentials on syscall entry/exit */
8473 #define TIF_SECCOMP 9 /* secure computing */
8474 #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
8475 #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
8476+
8477 /* NOTE: Thread flags >= 12 should be ones we have no interest
8478 * in using in assembly, else we can't use the mask as
8479 * an immediate value in instructions such as andcc.
8480@@ -214,12 +217,18 @@ register struct thread_info *current_thread_info_reg asm("g6");
8481 #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
8482 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
8483 #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
8484+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
8485
8486 #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
8487 _TIF_DO_NOTIFY_RESUME_MASK | \
8488 _TIF_NEED_RESCHED)
8489 #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
8490
8491+#define _TIF_WORK_SYSCALL \
8492+ (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
8493+ _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
8494+
8495+
8496 /*
8497 * Thread-synchronous status.
8498 *
8499diff --git a/arch/sparc/include/asm/tlbflush_64.h b/arch/sparc/include/asm/tlbflush_64.h
8500index 2ef4634..f0d6a97 100644
8501--- a/arch/sparc/include/asm/tlbflush_64.h
8502+++ b/arch/sparc/include/asm/tlbflush_64.h
8503@@ -11,24 +11,40 @@
8504 struct tlb_batch {
8505 struct mm_struct *mm;
8506 unsigned long tlb_nr;
8507+ unsigned long active;
8508 unsigned long vaddrs[TLB_BATCH_NR];
8509 };
8510
8511 extern void flush_tsb_kernel_range(unsigned long start, unsigned long end);
8512 extern void flush_tsb_user(struct tlb_batch *tb);
8513+extern void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr);
8514
8515 /* TLB flush operations. */
8516
8517+static inline void flush_tlb_mm(struct mm_struct *mm)
8518+{
8519+}
8520+
8521+static inline void flush_tlb_page(struct vm_area_struct *vma,
8522+ unsigned long vmaddr)
8523+{
8524+}
8525+
8526+static inline void flush_tlb_range(struct vm_area_struct *vma,
8527+ unsigned long start, unsigned long end)
8528+{
8529+}
8530+
8531+#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
8532+
8533 extern void flush_tlb_pending(void);
8534-
8535-#define flush_tlb_range(vma,start,end) \
8536- do { (void)(start); flush_tlb_pending(); } while (0)
8537-#define flush_tlb_page(vma,addr) flush_tlb_pending()
8538-#define flush_tlb_mm(mm) flush_tlb_pending()
8539+extern void arch_enter_lazy_mmu_mode(void);
8540+extern void arch_leave_lazy_mmu_mode(void);
8541+#define arch_flush_lazy_mmu_mode() do {} while (0)
8542
8543 /* Local cpu only. */
8544 extern void __flush_tlb_all(void);
8545-
8546+extern void __flush_tlb_page(unsigned long context, unsigned long vaddr);
8547 extern void __flush_tlb_kernel_range(unsigned long start, unsigned long end);
8548
8549 #ifndef CONFIG_SMP
8550@@ -38,15 +54,24 @@ do { flush_tsb_kernel_range(start,end); \
8551 __flush_tlb_kernel_range(start,end); \
8552 } while (0)
8553
8554+static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
8555+{
8556+ __flush_tlb_page(CTX_HWBITS(mm->context), vaddr);
8557+}
8558+
8559 #else /* CONFIG_SMP */
8560
8561 extern void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end);
8562+extern void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr);
8563
8564 #define flush_tlb_kernel_range(start, end) \
8565 do { flush_tsb_kernel_range(start,end); \
8566 smp_flush_tlb_kernel_range(start, end); \
8567 } while (0)
8568
8569+#define global_flush_tlb_page(mm, vaddr) \
8570+ smp_flush_tlb_page(mm, vaddr)
8571+
8572 #endif /* ! CONFIG_SMP */
8573
8574 #endif /* _SPARC64_TLBFLUSH_H */
8575diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
8576index 0167d26..767bb0c 100644
8577--- a/arch/sparc/include/asm/uaccess.h
8578+++ b/arch/sparc/include/asm/uaccess.h
8579@@ -1,5 +1,6 @@
8580 #ifndef ___ASM_SPARC_UACCESS_H
8581 #define ___ASM_SPARC_UACCESS_H
8582+
8583 #if defined(__sparc__) && defined(__arch64__)
8584 #include <asm/uaccess_64.h>
8585 #else
8586diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
8587index 53a28dd..50c38c3 100644
8588--- a/arch/sparc/include/asm/uaccess_32.h
8589+++ b/arch/sparc/include/asm/uaccess_32.h
8590@@ -250,27 +250,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
8591
8592 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
8593 {
8594- if (n && __access_ok((unsigned long) to, n))
8595+ if ((long)n < 0)
8596+ return n;
8597+
8598+ if (n && __access_ok((unsigned long) to, n)) {
8599+ if (!__builtin_constant_p(n))
8600+ check_object_size(from, n, true);
8601 return __copy_user(to, (__force void __user *) from, n);
8602- else
8603+ } else
8604 return n;
8605 }
8606
8607 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
8608 {
8609+ if ((long)n < 0)
8610+ return n;
8611+
8612+ if (!__builtin_constant_p(n))
8613+ check_object_size(from, n, true);
8614+
8615 return __copy_user(to, (__force void __user *) from, n);
8616 }
8617
8618 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
8619 {
8620- if (n && __access_ok((unsigned long) from, n))
8621+ if ((long)n < 0)
8622+ return n;
8623+
8624+ if (n && __access_ok((unsigned long) from, n)) {
8625+ if (!__builtin_constant_p(n))
8626+ check_object_size(to, n, false);
8627 return __copy_user((__force void __user *) to, from, n);
8628- else
8629+ } else
8630 return n;
8631 }
8632
8633 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
8634 {
8635+ if ((long)n < 0)
8636+ return n;
8637+
8638 return __copy_user((__force void __user *) to, from, n);
8639 }
8640
8641diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
8642index e562d3c..191f176 100644
8643--- a/arch/sparc/include/asm/uaccess_64.h
8644+++ b/arch/sparc/include/asm/uaccess_64.h
8645@@ -10,6 +10,7 @@
8646 #include <linux/compiler.h>
8647 #include <linux/string.h>
8648 #include <linux/thread_info.h>
8649+#include <linux/kernel.h>
8650 #include <asm/asi.h>
8651 #include <asm/spitfire.h>
8652 #include <asm-generic/uaccess-unaligned.h>
8653@@ -214,8 +215,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
8654 static inline unsigned long __must_check
8655 copy_from_user(void *to, const void __user *from, unsigned long size)
8656 {
8657- unsigned long ret = ___copy_from_user(to, from, size);
8658+ unsigned long ret;
8659
8660+ if ((long)size < 0 || size > INT_MAX)
8661+ return size;
8662+
8663+ if (!__builtin_constant_p(size))
8664+ check_object_size(to, size, false);
8665+
8666+ ret = ___copy_from_user(to, from, size);
8667 if (unlikely(ret))
8668 ret = copy_from_user_fixup(to, from, size);
8669
8670@@ -231,8 +239,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
8671 static inline unsigned long __must_check
8672 copy_to_user(void __user *to, const void *from, unsigned long size)
8673 {
8674- unsigned long ret = ___copy_to_user(to, from, size);
8675+ unsigned long ret;
8676
8677+ if ((long)size < 0 || size > INT_MAX)
8678+ return size;
8679+
8680+ if (!__builtin_constant_p(size))
8681+ check_object_size(from, size, true);
8682+
8683+ ret = ___copy_to_user(to, from, size);
8684 if (unlikely(ret))
8685 ret = copy_to_user_fixup(to, from, size);
8686 return ret;
8687diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
8688index 6cf591b..b49e65a 100644
8689--- a/arch/sparc/kernel/Makefile
8690+++ b/arch/sparc/kernel/Makefile
8691@@ -3,7 +3,7 @@
8692 #
8693
8694 asflags-y := -ansi
8695-ccflags-y := -Werror
8696+#ccflags-y := -Werror
8697
8698 extra-y := head_$(BITS).o
8699
8700diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
8701index be8e862..5b50b12 100644
8702--- a/arch/sparc/kernel/process_32.c
8703+++ b/arch/sparc/kernel/process_32.c
8704@@ -126,14 +126,14 @@ void show_regs(struct pt_regs *r)
8705
8706 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
8707 r->psr, r->pc, r->npc, r->y, print_tainted());
8708- printk("PC: <%pS>\n", (void *) r->pc);
8709+ printk("PC: <%pA>\n", (void *) r->pc);
8710 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
8711 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
8712 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
8713 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
8714 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
8715 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
8716- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
8717+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
8718
8719 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
8720 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
8721@@ -168,7 +168,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
8722 rw = (struct reg_window32 *) fp;
8723 pc = rw->ins[7];
8724 printk("[%08lx : ", pc);
8725- printk("%pS ] ", (void *) pc);
8726+ printk("%pA ] ", (void *) pc);
8727 fp = rw->ins[6];
8728 } while (++count < 16);
8729 printk("\n");
8730diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
8731index cdb80b2..5ca141d 100644
8732--- a/arch/sparc/kernel/process_64.c
8733+++ b/arch/sparc/kernel/process_64.c
8734@@ -181,14 +181,14 @@ static void show_regwindow(struct pt_regs *regs)
8735 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
8736 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
8737 if (regs->tstate & TSTATE_PRIV)
8738- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
8739+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
8740 }
8741
8742 void show_regs(struct pt_regs *regs)
8743 {
8744 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
8745 regs->tpc, regs->tnpc, regs->y, print_tainted());
8746- printk("TPC: <%pS>\n", (void *) regs->tpc);
8747+ printk("TPC: <%pA>\n", (void *) regs->tpc);
8748 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
8749 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
8750 regs->u_regs[3]);
8751@@ -201,7 +201,7 @@ void show_regs(struct pt_regs *regs)
8752 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
8753 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
8754 regs->u_regs[15]);
8755- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
8756+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
8757 show_regwindow(regs);
8758 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
8759 }
8760@@ -290,7 +290,7 @@ void arch_trigger_all_cpu_backtrace(void)
8761 ((tp && tp->task) ? tp->task->pid : -1));
8762
8763 if (gp->tstate & TSTATE_PRIV) {
8764- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
8765+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
8766 (void *) gp->tpc,
8767 (void *) gp->o7,
8768 (void *) gp->i7,
8769diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c
8770index 1303021..c2a6321 100644
8771--- a/arch/sparc/kernel/prom_common.c
8772+++ b/arch/sparc/kernel/prom_common.c
8773@@ -143,7 +143,7 @@ static int __init prom_common_nextprop(phandle node, char *prev, char *buf)
8774
8775 unsigned int prom_early_allocated __initdata;
8776
8777-static struct of_pdt_ops prom_sparc_ops __initdata = {
8778+static struct of_pdt_ops prom_sparc_ops __initconst = {
8779 .nextprop = prom_common_nextprop,
8780 .getproplen = prom_getproplen,
8781 .getproperty = prom_getproperty,
8782diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
8783index 7ff45e4..a58f271 100644
8784--- a/arch/sparc/kernel/ptrace_64.c
8785+++ b/arch/sparc/kernel/ptrace_64.c
8786@@ -1057,6 +1057,10 @@ long arch_ptrace(struct task_struct *child, long request,
8787 return ret;
8788 }
8789
8790+#ifdef CONFIG_GRKERNSEC_SETXID
8791+extern void gr_delayed_cred_worker(void);
8792+#endif
8793+
8794 asmlinkage int syscall_trace_enter(struct pt_regs *regs)
8795 {
8796 int ret = 0;
8797@@ -1064,6 +1068,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
8798 /* do the secure computing check first */
8799 secure_computing_strict(regs->u_regs[UREG_G1]);
8800
8801+#ifdef CONFIG_GRKERNSEC_SETXID
8802+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
8803+ gr_delayed_cred_worker();
8804+#endif
8805+
8806 if (test_thread_flag(TIF_SYSCALL_TRACE))
8807 ret = tracehook_report_syscall_entry(regs);
8808
8809@@ -1084,6 +1093,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
8810
8811 asmlinkage void syscall_trace_leave(struct pt_regs *regs)
8812 {
8813+#ifdef CONFIG_GRKERNSEC_SETXID
8814+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
8815+ gr_delayed_cred_worker();
8816+#endif
8817+
8818 audit_syscall_exit(regs);
8819
8820 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
8821diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
8822index 537eb66..ca64d2a 100644
8823--- a/arch/sparc/kernel/smp_64.c
8824+++ b/arch/sparc/kernel/smp_64.c
8825@@ -849,7 +849,7 @@ void smp_tsb_sync(struct mm_struct *mm)
8826 }
8827
8828 extern unsigned long xcall_flush_tlb_mm;
8829-extern unsigned long xcall_flush_tlb_pending;
8830+extern unsigned long xcall_flush_tlb_page;
8831 extern unsigned long xcall_flush_tlb_kernel_range;
8832 extern unsigned long xcall_fetch_glob_regs;
8833 extern unsigned long xcall_fetch_glob_pmu;
8834@@ -1074,23 +1074,56 @@ local_flush_and_out:
8835 put_cpu();
8836 }
8837
8838+struct tlb_pending_info {
8839+ unsigned long ctx;
8840+ unsigned long nr;
8841+ unsigned long *vaddrs;
8842+};
8843+
8844+static void tlb_pending_func(void *info)
8845+{
8846+ struct tlb_pending_info *t = info;
8847+
8848+ __flush_tlb_pending(t->ctx, t->nr, t->vaddrs);
8849+}
8850+
8851 void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs)
8852 {
8853 u32 ctx = CTX_HWBITS(mm->context);
8854+ struct tlb_pending_info info;
8855 int cpu = get_cpu();
8856
8857+ info.ctx = ctx;
8858+ info.nr = nr;
8859+ info.vaddrs = vaddrs;
8860+
8861 if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
8862 cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
8863 else
8864- smp_cross_call_masked(&xcall_flush_tlb_pending,
8865- ctx, nr, (unsigned long) vaddrs,
8866- mm_cpumask(mm));
8867+ smp_call_function_many(mm_cpumask(mm), tlb_pending_func,
8868+ &info, 1);
8869
8870 __flush_tlb_pending(ctx, nr, vaddrs);
8871
8872 put_cpu();
8873 }
8874
8875+void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
8876+{
8877+ unsigned long context = CTX_HWBITS(mm->context);
8878+ int cpu = get_cpu();
8879+
8880+ if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
8881+ cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
8882+ else
8883+ smp_cross_call_masked(&xcall_flush_tlb_page,
8884+ context, vaddr, 0,
8885+ mm_cpumask(mm));
8886+ __flush_tlb_page(context, vaddr);
8887+
8888+ put_cpu();
8889+}
8890+
8891 void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end)
8892 {
8893 start &= PAGE_MASK;
8894diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
8895index 2da0bdc..79128d2 100644
8896--- a/arch/sparc/kernel/sys_sparc_32.c
8897+++ b/arch/sparc/kernel/sys_sparc_32.c
8898@@ -52,7 +52,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
8899 if (len > TASK_SIZE - PAGE_SIZE)
8900 return -ENOMEM;
8901 if (!addr)
8902- addr = TASK_UNMAPPED_BASE;
8903+ addr = current->mm->mmap_base;
8904
8905 info.flags = 0;
8906 info.length = len;
8907diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
8908index 708bc29..f0129cb 100644
8909--- a/arch/sparc/kernel/sys_sparc_64.c
8910+++ b/arch/sparc/kernel/sys_sparc_64.c
8911@@ -90,13 +90,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
8912 struct vm_area_struct * vma;
8913 unsigned long task_size = TASK_SIZE;
8914 int do_color_align;
8915+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
8916 struct vm_unmapped_area_info info;
8917
8918 if (flags & MAP_FIXED) {
8919 /* We do not accept a shared mapping if it would violate
8920 * cache aliasing constraints.
8921 */
8922- if ((flags & MAP_SHARED) &&
8923+ if ((filp || (flags & MAP_SHARED)) &&
8924 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
8925 return -EINVAL;
8926 return addr;
8927@@ -111,6 +112,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
8928 if (filp || (flags & MAP_SHARED))
8929 do_color_align = 1;
8930
8931+#ifdef CONFIG_PAX_RANDMMAP
8932+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
8933+#endif
8934+
8935 if (addr) {
8936 if (do_color_align)
8937 addr = COLOR_ALIGN(addr, pgoff);
8938@@ -118,14 +123,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
8939 addr = PAGE_ALIGN(addr);
8940
8941 vma = find_vma(mm, addr);
8942- if (task_size - len >= addr &&
8943- (!vma || addr + len <= vma->vm_start))
8944+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
8945 return addr;
8946 }
8947
8948 info.flags = 0;
8949 info.length = len;
8950- info.low_limit = TASK_UNMAPPED_BASE;
8951+ info.low_limit = mm->mmap_base;
8952 info.high_limit = min(task_size, VA_EXCLUDE_START);
8953 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
8954 info.align_offset = pgoff << PAGE_SHIFT;
8955@@ -134,6 +138,12 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
8956 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
8957 VM_BUG_ON(addr != -ENOMEM);
8958 info.low_limit = VA_EXCLUDE_END;
8959+
8960+#ifdef CONFIG_PAX_RANDMMAP
8961+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8962+ info.low_limit += mm->delta_mmap;
8963+#endif
8964+
8965 info.high_limit = task_size;
8966 addr = vm_unmapped_area(&info);
8967 }
8968@@ -151,6 +161,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8969 unsigned long task_size = STACK_TOP32;
8970 unsigned long addr = addr0;
8971 int do_color_align;
8972+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
8973 struct vm_unmapped_area_info info;
8974
8975 /* This should only ever run for 32-bit processes. */
8976@@ -160,7 +171,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8977 /* We do not accept a shared mapping if it would violate
8978 * cache aliasing constraints.
8979 */
8980- if ((flags & MAP_SHARED) &&
8981+ if ((filp || (flags & MAP_SHARED)) &&
8982 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
8983 return -EINVAL;
8984 return addr;
8985@@ -173,6 +184,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8986 if (filp || (flags & MAP_SHARED))
8987 do_color_align = 1;
8988
8989+#ifdef CONFIG_PAX_RANDMMAP
8990+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
8991+#endif
8992+
8993 /* requesting a specific address */
8994 if (addr) {
8995 if (do_color_align)
8996@@ -181,8 +196,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8997 addr = PAGE_ALIGN(addr);
8998
8999 vma = find_vma(mm, addr);
9000- if (task_size - len >= addr &&
9001- (!vma || addr + len <= vma->vm_start))
9002+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
9003 return addr;
9004 }
9005
9006@@ -204,6 +218,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9007 VM_BUG_ON(addr != -ENOMEM);
9008 info.flags = 0;
9009 info.low_limit = TASK_UNMAPPED_BASE;
9010+
9011+#ifdef CONFIG_PAX_RANDMMAP
9012+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9013+ info.low_limit += mm->delta_mmap;
9014+#endif
9015+
9016 info.high_limit = STACK_TOP32;
9017 addr = vm_unmapped_area(&info);
9018 }
9019@@ -264,6 +284,10 @@ static unsigned long mmap_rnd(void)
9020 {
9021 unsigned long rnd = 0UL;
9022
9023+#ifdef CONFIG_PAX_RANDMMAP
9024+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9025+#endif
9026+
9027 if (current->flags & PF_RANDOMIZE) {
9028 unsigned long val = get_random_int();
9029 if (test_thread_flag(TIF_32BIT))
9030@@ -289,6 +313,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9031 gap == RLIM_INFINITY ||
9032 sysctl_legacy_va_layout) {
9033 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
9034+
9035+#ifdef CONFIG_PAX_RANDMMAP
9036+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9037+ mm->mmap_base += mm->delta_mmap;
9038+#endif
9039+
9040 mm->get_unmapped_area = arch_get_unmapped_area;
9041 mm->unmap_area = arch_unmap_area;
9042 } else {
9043@@ -301,6 +331,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9044 gap = (task_size / 6 * 5);
9045
9046 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
9047+
9048+#ifdef CONFIG_PAX_RANDMMAP
9049+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9050+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
9051+#endif
9052+
9053 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
9054 mm->unmap_area = arch_unmap_area_topdown;
9055 }
9056diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
9057index e0fed77..604a7e5 100644
9058--- a/arch/sparc/kernel/syscalls.S
9059+++ b/arch/sparc/kernel/syscalls.S
9060@@ -58,7 +58,7 @@ sys32_rt_sigreturn:
9061 #endif
9062 .align 32
9063 1: ldx [%g6 + TI_FLAGS], %l5
9064- andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
9065+ andcc %l5, _TIF_WORK_SYSCALL, %g0
9066 be,pt %icc, rtrap
9067 nop
9068 call syscall_trace_leave
9069@@ -190,7 +190,7 @@ linux_sparc_syscall32:
9070
9071 srl %i5, 0, %o5 ! IEU1
9072 srl %i2, 0, %o2 ! IEU0 Group
9073- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
9074+ andcc %l0, _TIF_WORK_SYSCALL, %g0
9075 bne,pn %icc, linux_syscall_trace32 ! CTI
9076 mov %i0, %l5 ! IEU1
9077 call %l7 ! CTI Group brk forced
9078@@ -213,7 +213,7 @@ linux_sparc_syscall:
9079
9080 mov %i3, %o3 ! IEU1
9081 mov %i4, %o4 ! IEU0 Group
9082- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
9083+ andcc %l0, _TIF_WORK_SYSCALL, %g0
9084 bne,pn %icc, linux_syscall_trace ! CTI Group
9085 mov %i0, %l5 ! IEU0
9086 2: call %l7 ! CTI Group brk forced
9087@@ -229,7 +229,7 @@ ret_sys_call:
9088
9089 cmp %o0, -ERESTART_RESTARTBLOCK
9090 bgeu,pn %xcc, 1f
9091- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
9092+ andcc %l0, _TIF_WORK_SYSCALL, %g0
9093 ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
9094
9095 2:
9096diff --git a/arch/sparc/kernel/sysfs.c b/arch/sparc/kernel/sysfs.c
9097index 654e8aa..45f431b 100644
9098--- a/arch/sparc/kernel/sysfs.c
9099+++ b/arch/sparc/kernel/sysfs.c
9100@@ -266,7 +266,7 @@ static int __cpuinit sysfs_cpu_notify(struct notifier_block *self,
9101 return NOTIFY_OK;
9102 }
9103
9104-static struct notifier_block __cpuinitdata sysfs_cpu_nb = {
9105+static struct notifier_block sysfs_cpu_nb = {
9106 .notifier_call = sysfs_cpu_notify,
9107 };
9108
9109diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
9110index a5785ea..405c5f7 100644
9111--- a/arch/sparc/kernel/traps_32.c
9112+++ b/arch/sparc/kernel/traps_32.c
9113@@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
9114 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
9115 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
9116
9117+extern void gr_handle_kernel_exploit(void);
9118+
9119 void die_if_kernel(char *str, struct pt_regs *regs)
9120 {
9121 static int die_counter;
9122@@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
9123 count++ < 30 &&
9124 (((unsigned long) rw) >= PAGE_OFFSET) &&
9125 !(((unsigned long) rw) & 0x7)) {
9126- printk("Caller[%08lx]: %pS\n", rw->ins[7],
9127+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
9128 (void *) rw->ins[7]);
9129 rw = (struct reg_window32 *)rw->ins[6];
9130 }
9131 }
9132 printk("Instruction DUMP:");
9133 instruction_dump ((unsigned long *) regs->pc);
9134- if(regs->psr & PSR_PS)
9135+ if(regs->psr & PSR_PS) {
9136+ gr_handle_kernel_exploit();
9137 do_exit(SIGKILL);
9138+ }
9139 do_exit(SIGSEGV);
9140 }
9141
9142diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
9143index e7ecf15..6520e65 100644
9144--- a/arch/sparc/kernel/traps_64.c
9145+++ b/arch/sparc/kernel/traps_64.c
9146@@ -76,7 +76,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
9147 i + 1,
9148 p->trapstack[i].tstate, p->trapstack[i].tpc,
9149 p->trapstack[i].tnpc, p->trapstack[i].tt);
9150- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
9151+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
9152 }
9153 }
9154
9155@@ -96,6 +96,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
9156
9157 lvl -= 0x100;
9158 if (regs->tstate & TSTATE_PRIV) {
9159+
9160+#ifdef CONFIG_PAX_REFCOUNT
9161+ if (lvl == 6)
9162+ pax_report_refcount_overflow(regs);
9163+#endif
9164+
9165 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
9166 die_if_kernel(buffer, regs);
9167 }
9168@@ -114,11 +120,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
9169 void bad_trap_tl1(struct pt_regs *regs, long lvl)
9170 {
9171 char buffer[32];
9172-
9173+
9174 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
9175 0, lvl, SIGTRAP) == NOTIFY_STOP)
9176 return;
9177
9178+#ifdef CONFIG_PAX_REFCOUNT
9179+ if (lvl == 6)
9180+ pax_report_refcount_overflow(regs);
9181+#endif
9182+
9183 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
9184
9185 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
9186@@ -1142,7 +1153,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
9187 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
9188 printk("%s" "ERROR(%d): ",
9189 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
9190- printk("TPC<%pS>\n", (void *) regs->tpc);
9191+ printk("TPC<%pA>\n", (void *) regs->tpc);
9192 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
9193 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
9194 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
9195@@ -1749,7 +1760,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
9196 smp_processor_id(),
9197 (type & 0x1) ? 'I' : 'D',
9198 regs->tpc);
9199- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
9200+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
9201 panic("Irrecoverable Cheetah+ parity error.");
9202 }
9203
9204@@ -1757,7 +1768,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
9205 smp_processor_id(),
9206 (type & 0x1) ? 'I' : 'D',
9207 regs->tpc);
9208- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
9209+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
9210 }
9211
9212 struct sun4v_error_entry {
9213@@ -2104,9 +2115,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
9214
9215 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
9216 regs->tpc, tl);
9217- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
9218+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
9219 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
9220- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
9221+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
9222 (void *) regs->u_regs[UREG_I7]);
9223 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
9224 "pte[%lx] error[%lx]\n",
9225@@ -2128,9 +2139,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
9226
9227 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
9228 regs->tpc, tl);
9229- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
9230+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
9231 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
9232- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
9233+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
9234 (void *) regs->u_regs[UREG_I7]);
9235 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
9236 "pte[%lx] error[%lx]\n",
9237@@ -2336,13 +2347,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
9238 fp = (unsigned long)sf->fp + STACK_BIAS;
9239 }
9240
9241- printk(" [%016lx] %pS\n", pc, (void *) pc);
9242+ printk(" [%016lx] %pA\n", pc, (void *) pc);
9243 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
9244 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
9245 int index = tsk->curr_ret_stack;
9246 if (tsk->ret_stack && index >= graph) {
9247 pc = tsk->ret_stack[index - graph].ret;
9248- printk(" [%016lx] %pS\n", pc, (void *) pc);
9249+ printk(" [%016lx] %pA\n", pc, (void *) pc);
9250 graph++;
9251 }
9252 }
9253@@ -2367,6 +2378,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
9254 return (struct reg_window *) (fp + STACK_BIAS);
9255 }
9256
9257+extern void gr_handle_kernel_exploit(void);
9258+
9259 void die_if_kernel(char *str, struct pt_regs *regs)
9260 {
9261 static int die_counter;
9262@@ -2395,7 +2408,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
9263 while (rw &&
9264 count++ < 30 &&
9265 kstack_valid(tp, (unsigned long) rw)) {
9266- printk("Caller[%016lx]: %pS\n", rw->ins[7],
9267+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
9268 (void *) rw->ins[7]);
9269
9270 rw = kernel_stack_up(rw);
9271@@ -2408,8 +2421,10 @@ void die_if_kernel(char *str, struct pt_regs *regs)
9272 }
9273 user_instruction_dump ((unsigned int __user *) regs->tpc);
9274 }
9275- if (regs->tstate & TSTATE_PRIV)
9276+ if (regs->tstate & TSTATE_PRIV) {
9277+ gr_handle_kernel_exploit();
9278 do_exit(SIGKILL);
9279+ }
9280 do_exit(SIGSEGV);
9281 }
9282 EXPORT_SYMBOL(die_if_kernel);
9283diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
9284index 8201c25e..072a2a7 100644
9285--- a/arch/sparc/kernel/unaligned_64.c
9286+++ b/arch/sparc/kernel/unaligned_64.c
9287@@ -286,7 +286,7 @@ static void log_unaligned(struct pt_regs *regs)
9288 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
9289
9290 if (__ratelimit(&ratelimit)) {
9291- printk("Kernel unaligned access at TPC[%lx] %pS\n",
9292+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
9293 regs->tpc, (void *) regs->tpc);
9294 }
9295 }
9296diff --git a/arch/sparc/kernel/us3_cpufreq.c b/arch/sparc/kernel/us3_cpufreq.c
9297index eb1624b..f9f4ddb 100644
9298--- a/arch/sparc/kernel/us3_cpufreq.c
9299+++ b/arch/sparc/kernel/us3_cpufreq.c
9300@@ -18,14 +18,12 @@
9301 #include <asm/head.h>
9302 #include <asm/timer.h>
9303
9304-static struct cpufreq_driver *cpufreq_us3_driver;
9305-
9306 struct us3_freq_percpu_info {
9307 struct cpufreq_frequency_table table[4];
9308 };
9309
9310 /* Indexed by cpu number. */
9311-static struct us3_freq_percpu_info *us3_freq_table;
9312+static struct us3_freq_percpu_info us3_freq_table[NR_CPUS];
9313
9314 /* UltraSPARC-III has three dividers: 1, 2, and 32. These are controlled
9315 * in the Safari config register.
9316@@ -191,12 +189,25 @@ static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
9317
9318 static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
9319 {
9320- if (cpufreq_us3_driver)
9321- us3_set_cpu_divider_index(policy->cpu, 0);
9322+ us3_set_cpu_divider_index(policy->cpu, 0);
9323
9324 return 0;
9325 }
9326
9327+static int __init us3_freq_init(void);
9328+static void __exit us3_freq_exit(void);
9329+
9330+static struct cpufreq_driver cpufreq_us3_driver = {
9331+ .init = us3_freq_cpu_init,
9332+ .verify = us3_freq_verify,
9333+ .target = us3_freq_target,
9334+ .get = us3_freq_get,
9335+ .exit = us3_freq_cpu_exit,
9336+ .owner = THIS_MODULE,
9337+ .name = "UltraSPARC-III",
9338+
9339+};
9340+
9341 static int __init us3_freq_init(void)
9342 {
9343 unsigned long manuf, impl, ver;
9344@@ -213,57 +224,15 @@ static int __init us3_freq_init(void)
9345 (impl == CHEETAH_IMPL ||
9346 impl == CHEETAH_PLUS_IMPL ||
9347 impl == JAGUAR_IMPL ||
9348- impl == PANTHER_IMPL)) {
9349- struct cpufreq_driver *driver;
9350-
9351- ret = -ENOMEM;
9352- driver = kzalloc(sizeof(struct cpufreq_driver), GFP_KERNEL);
9353- if (!driver)
9354- goto err_out;
9355-
9356- us3_freq_table = kzalloc(
9357- (NR_CPUS * sizeof(struct us3_freq_percpu_info)),
9358- GFP_KERNEL);
9359- if (!us3_freq_table)
9360- goto err_out;
9361-
9362- driver->init = us3_freq_cpu_init;
9363- driver->verify = us3_freq_verify;
9364- driver->target = us3_freq_target;
9365- driver->get = us3_freq_get;
9366- driver->exit = us3_freq_cpu_exit;
9367- driver->owner = THIS_MODULE,
9368- strcpy(driver->name, "UltraSPARC-III");
9369-
9370- cpufreq_us3_driver = driver;
9371- ret = cpufreq_register_driver(driver);
9372- if (ret)
9373- goto err_out;
9374-
9375- return 0;
9376-
9377-err_out:
9378- if (driver) {
9379- kfree(driver);
9380- cpufreq_us3_driver = NULL;
9381- }
9382- kfree(us3_freq_table);
9383- us3_freq_table = NULL;
9384- return ret;
9385- }
9386+ impl == PANTHER_IMPL))
9387+ return cpufreq_register_driver(cpufreq_us3_driver);
9388
9389 return -ENODEV;
9390 }
9391
9392 static void __exit us3_freq_exit(void)
9393 {
9394- if (cpufreq_us3_driver) {
9395- cpufreq_unregister_driver(cpufreq_us3_driver);
9396- kfree(cpufreq_us3_driver);
9397- cpufreq_us3_driver = NULL;
9398- kfree(us3_freq_table);
9399- us3_freq_table = NULL;
9400- }
9401+ cpufreq_unregister_driver(cpufreq_us3_driver);
9402 }
9403
9404 MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
9405diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
9406index 8410065f2..4fd4ca22 100644
9407--- a/arch/sparc/lib/Makefile
9408+++ b/arch/sparc/lib/Makefile
9409@@ -2,7 +2,7 @@
9410 #
9411
9412 asflags-y := -ansi -DST_DIV0=0x02
9413-ccflags-y := -Werror
9414+#ccflags-y := -Werror
9415
9416 lib-$(CONFIG_SPARC32) += ashrdi3.o
9417 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
9418diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
9419index 85c233d..68500e0 100644
9420--- a/arch/sparc/lib/atomic_64.S
9421+++ b/arch/sparc/lib/atomic_64.S
9422@@ -17,7 +17,12 @@
9423 ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
9424 BACKOFF_SETUP(%o2)
9425 1: lduw [%o1], %g1
9426- add %g1, %o0, %g7
9427+ addcc %g1, %o0, %g7
9428+
9429+#ifdef CONFIG_PAX_REFCOUNT
9430+ tvs %icc, 6
9431+#endif
9432+
9433 cas [%o1], %g1, %g7
9434 cmp %g1, %g7
9435 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
9436@@ -27,10 +32,28 @@ ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
9437 2: BACKOFF_SPIN(%o2, %o3, 1b)
9438 ENDPROC(atomic_add)
9439
9440+ENTRY(atomic_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
9441+ BACKOFF_SETUP(%o2)
9442+1: lduw [%o1], %g1
9443+ add %g1, %o0, %g7
9444+ cas [%o1], %g1, %g7
9445+ cmp %g1, %g7
9446+ bne,pn %icc, 2f
9447+ nop
9448+ retl
9449+ nop
9450+2: BACKOFF_SPIN(%o2, %o3, 1b)
9451+ENDPROC(atomic_add_unchecked)
9452+
9453 ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
9454 BACKOFF_SETUP(%o2)
9455 1: lduw [%o1], %g1
9456- sub %g1, %o0, %g7
9457+ subcc %g1, %o0, %g7
9458+
9459+#ifdef CONFIG_PAX_REFCOUNT
9460+ tvs %icc, 6
9461+#endif
9462+
9463 cas [%o1], %g1, %g7
9464 cmp %g1, %g7
9465 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
9466@@ -40,10 +63,28 @@ ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
9467 2: BACKOFF_SPIN(%o2, %o3, 1b)
9468 ENDPROC(atomic_sub)
9469
9470+ENTRY(atomic_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
9471+ BACKOFF_SETUP(%o2)
9472+1: lduw [%o1], %g1
9473+ sub %g1, %o0, %g7
9474+ cas [%o1], %g1, %g7
9475+ cmp %g1, %g7
9476+ bne,pn %icc, 2f
9477+ nop
9478+ retl
9479+ nop
9480+2: BACKOFF_SPIN(%o2, %o3, 1b)
9481+ENDPROC(atomic_sub_unchecked)
9482+
9483 ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
9484 BACKOFF_SETUP(%o2)
9485 1: lduw [%o1], %g1
9486- add %g1, %o0, %g7
9487+ addcc %g1, %o0, %g7
9488+
9489+#ifdef CONFIG_PAX_REFCOUNT
9490+ tvs %icc, 6
9491+#endif
9492+
9493 cas [%o1], %g1, %g7
9494 cmp %g1, %g7
9495 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
9496@@ -53,10 +94,29 @@ ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
9497 2: BACKOFF_SPIN(%o2, %o3, 1b)
9498 ENDPROC(atomic_add_ret)
9499
9500+ENTRY(atomic_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
9501+ BACKOFF_SETUP(%o2)
9502+1: lduw [%o1], %g1
9503+ addcc %g1, %o0, %g7
9504+ cas [%o1], %g1, %g7
9505+ cmp %g1, %g7
9506+ bne,pn %icc, 2f
9507+ add %g7, %o0, %g7
9508+ sra %g7, 0, %o0
9509+ retl
9510+ nop
9511+2: BACKOFF_SPIN(%o2, %o3, 1b)
9512+ENDPROC(atomic_add_ret_unchecked)
9513+
9514 ENTRY(atomic_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
9515 BACKOFF_SETUP(%o2)
9516 1: lduw [%o1], %g1
9517- sub %g1, %o0, %g7
9518+ subcc %g1, %o0, %g7
9519+
9520+#ifdef CONFIG_PAX_REFCOUNT
9521+ tvs %icc, 6
9522+#endif
9523+
9524 cas [%o1], %g1, %g7
9525 cmp %g1, %g7
9526 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
9527@@ -69,7 +129,12 @@ ENDPROC(atomic_sub_ret)
9528 ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
9529 BACKOFF_SETUP(%o2)
9530 1: ldx [%o1], %g1
9531- add %g1, %o0, %g7
9532+ addcc %g1, %o0, %g7
9533+
9534+#ifdef CONFIG_PAX_REFCOUNT
9535+ tvs %xcc, 6
9536+#endif
9537+
9538 casx [%o1], %g1, %g7
9539 cmp %g1, %g7
9540 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
9541@@ -79,10 +144,28 @@ ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
9542 2: BACKOFF_SPIN(%o2, %o3, 1b)
9543 ENDPROC(atomic64_add)
9544
9545+ENTRY(atomic64_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
9546+ BACKOFF_SETUP(%o2)
9547+1: ldx [%o1], %g1
9548+ addcc %g1, %o0, %g7
9549+ casx [%o1], %g1, %g7
9550+ cmp %g1, %g7
9551+ bne,pn %xcc, 2f
9552+ nop
9553+ retl
9554+ nop
9555+2: BACKOFF_SPIN(%o2, %o3, 1b)
9556+ENDPROC(atomic64_add_unchecked)
9557+
9558 ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
9559 BACKOFF_SETUP(%o2)
9560 1: ldx [%o1], %g1
9561- sub %g1, %o0, %g7
9562+ subcc %g1, %o0, %g7
9563+
9564+#ifdef CONFIG_PAX_REFCOUNT
9565+ tvs %xcc, 6
9566+#endif
9567+
9568 casx [%o1], %g1, %g7
9569 cmp %g1, %g7
9570 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
9571@@ -92,10 +175,28 @@ ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
9572 2: BACKOFF_SPIN(%o2, %o3, 1b)
9573 ENDPROC(atomic64_sub)
9574
9575+ENTRY(atomic64_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
9576+ BACKOFF_SETUP(%o2)
9577+1: ldx [%o1], %g1
9578+ subcc %g1, %o0, %g7
9579+ casx [%o1], %g1, %g7
9580+ cmp %g1, %g7
9581+ bne,pn %xcc, 2f
9582+ nop
9583+ retl
9584+ nop
9585+2: BACKOFF_SPIN(%o2, %o3, 1b)
9586+ENDPROC(atomic64_sub_unchecked)
9587+
9588 ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
9589 BACKOFF_SETUP(%o2)
9590 1: ldx [%o1], %g1
9591- add %g1, %o0, %g7
9592+ addcc %g1, %o0, %g7
9593+
9594+#ifdef CONFIG_PAX_REFCOUNT
9595+ tvs %xcc, 6
9596+#endif
9597+
9598 casx [%o1], %g1, %g7
9599 cmp %g1, %g7
9600 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
9601@@ -105,10 +206,29 @@ ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
9602 2: BACKOFF_SPIN(%o2, %o3, 1b)
9603 ENDPROC(atomic64_add_ret)
9604
9605+ENTRY(atomic64_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
9606+ BACKOFF_SETUP(%o2)
9607+1: ldx [%o1], %g1
9608+ addcc %g1, %o0, %g7
9609+ casx [%o1], %g1, %g7
9610+ cmp %g1, %g7
9611+ bne,pn %xcc, 2f
9612+ add %g7, %o0, %g7
9613+ mov %g7, %o0
9614+ retl
9615+ nop
9616+2: BACKOFF_SPIN(%o2, %o3, 1b)
9617+ENDPROC(atomic64_add_ret_unchecked)
9618+
9619 ENTRY(atomic64_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
9620 BACKOFF_SETUP(%o2)
9621 1: ldx [%o1], %g1
9622- sub %g1, %o0, %g7
9623+ subcc %g1, %o0, %g7
9624+
9625+#ifdef CONFIG_PAX_REFCOUNT
9626+ tvs %xcc, 6
9627+#endif
9628+
9629 casx [%o1], %g1, %g7
9630 cmp %g1, %g7
9631 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
9632diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
9633index 0c4e35e..745d3e4 100644
9634--- a/arch/sparc/lib/ksyms.c
9635+++ b/arch/sparc/lib/ksyms.c
9636@@ -109,12 +109,18 @@ EXPORT_SYMBOL(__downgrade_write);
9637
9638 /* Atomic counter implementation. */
9639 EXPORT_SYMBOL(atomic_add);
9640+EXPORT_SYMBOL(atomic_add_unchecked);
9641 EXPORT_SYMBOL(atomic_add_ret);
9642+EXPORT_SYMBOL(atomic_add_ret_unchecked);
9643 EXPORT_SYMBOL(atomic_sub);
9644+EXPORT_SYMBOL(atomic_sub_unchecked);
9645 EXPORT_SYMBOL(atomic_sub_ret);
9646 EXPORT_SYMBOL(atomic64_add);
9647+EXPORT_SYMBOL(atomic64_add_unchecked);
9648 EXPORT_SYMBOL(atomic64_add_ret);
9649+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
9650 EXPORT_SYMBOL(atomic64_sub);
9651+EXPORT_SYMBOL(atomic64_sub_unchecked);
9652 EXPORT_SYMBOL(atomic64_sub_ret);
9653 EXPORT_SYMBOL(atomic64_dec_if_positive);
9654
9655diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
9656index 30c3ecc..736f015 100644
9657--- a/arch/sparc/mm/Makefile
9658+++ b/arch/sparc/mm/Makefile
9659@@ -2,7 +2,7 @@
9660 #
9661
9662 asflags-y := -ansi
9663-ccflags-y := -Werror
9664+#ccflags-y := -Werror
9665
9666 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
9667 obj-y += fault_$(BITS).o
9668diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
9669index e98bfda..ea8d221 100644
9670--- a/arch/sparc/mm/fault_32.c
9671+++ b/arch/sparc/mm/fault_32.c
9672@@ -21,6 +21,9 @@
9673 #include <linux/perf_event.h>
9674 #include <linux/interrupt.h>
9675 #include <linux/kdebug.h>
9676+#include <linux/slab.h>
9677+#include <linux/pagemap.h>
9678+#include <linux/compiler.h>
9679
9680 #include <asm/page.h>
9681 #include <asm/pgtable.h>
9682@@ -159,6 +162,277 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
9683 return safe_compute_effective_address(regs, insn);
9684 }
9685
9686+#ifdef CONFIG_PAX_PAGEEXEC
9687+#ifdef CONFIG_PAX_DLRESOLVE
9688+static void pax_emuplt_close(struct vm_area_struct *vma)
9689+{
9690+ vma->vm_mm->call_dl_resolve = 0UL;
9691+}
9692+
9693+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
9694+{
9695+ unsigned int *kaddr;
9696+
9697+ vmf->page = alloc_page(GFP_HIGHUSER);
9698+ if (!vmf->page)
9699+ return VM_FAULT_OOM;
9700+
9701+ kaddr = kmap(vmf->page);
9702+ memset(kaddr, 0, PAGE_SIZE);
9703+ kaddr[0] = 0x9DE3BFA8U; /* save */
9704+ flush_dcache_page(vmf->page);
9705+ kunmap(vmf->page);
9706+ return VM_FAULT_MAJOR;
9707+}
9708+
9709+static const struct vm_operations_struct pax_vm_ops = {
9710+ .close = pax_emuplt_close,
9711+ .fault = pax_emuplt_fault
9712+};
9713+
9714+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
9715+{
9716+ int ret;
9717+
9718+ INIT_LIST_HEAD(&vma->anon_vma_chain);
9719+ vma->vm_mm = current->mm;
9720+ vma->vm_start = addr;
9721+ vma->vm_end = addr + PAGE_SIZE;
9722+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
9723+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
9724+ vma->vm_ops = &pax_vm_ops;
9725+
9726+ ret = insert_vm_struct(current->mm, vma);
9727+ if (ret)
9728+ return ret;
9729+
9730+ ++current->mm->total_vm;
9731+ return 0;
9732+}
9733+#endif
9734+
9735+/*
9736+ * PaX: decide what to do with offenders (regs->pc = fault address)
9737+ *
9738+ * returns 1 when task should be killed
9739+ * 2 when patched PLT trampoline was detected
9740+ * 3 when unpatched PLT trampoline was detected
9741+ */
9742+static int pax_handle_fetch_fault(struct pt_regs *regs)
9743+{
9744+
9745+#ifdef CONFIG_PAX_EMUPLT
9746+ int err;
9747+
9748+ do { /* PaX: patched PLT emulation #1 */
9749+ unsigned int sethi1, sethi2, jmpl;
9750+
9751+ err = get_user(sethi1, (unsigned int *)regs->pc);
9752+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
9753+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
9754+
9755+ if (err)
9756+ break;
9757+
9758+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
9759+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
9760+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
9761+ {
9762+ unsigned int addr;
9763+
9764+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
9765+ addr = regs->u_regs[UREG_G1];
9766+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
9767+ regs->pc = addr;
9768+ regs->npc = addr+4;
9769+ return 2;
9770+ }
9771+ } while (0);
9772+
9773+ do { /* PaX: patched PLT emulation #2 */
9774+ unsigned int ba;
9775+
9776+ err = get_user(ba, (unsigned int *)regs->pc);
9777+
9778+ if (err)
9779+ break;
9780+
9781+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
9782+ unsigned int addr;
9783+
9784+ if ((ba & 0xFFC00000U) == 0x30800000U)
9785+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
9786+ else
9787+ addr = regs->pc + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
9788+ regs->pc = addr;
9789+ regs->npc = addr+4;
9790+ return 2;
9791+ }
9792+ } while (0);
9793+
9794+ do { /* PaX: patched PLT emulation #3 */
9795+ unsigned int sethi, bajmpl, nop;
9796+
9797+ err = get_user(sethi, (unsigned int *)regs->pc);
9798+ err |= get_user(bajmpl, (unsigned int *)(regs->pc+4));
9799+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
9800+
9801+ if (err)
9802+ break;
9803+
9804+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
9805+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
9806+ nop == 0x01000000U)
9807+ {
9808+ unsigned int addr;
9809+
9810+ addr = (sethi & 0x003FFFFFU) << 10;
9811+ regs->u_regs[UREG_G1] = addr;
9812+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
9813+ addr += (((bajmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
9814+ else
9815+ addr = regs->pc + ((((bajmpl | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
9816+ regs->pc = addr;
9817+ regs->npc = addr+4;
9818+ return 2;
9819+ }
9820+ } while (0);
9821+
9822+ do { /* PaX: unpatched PLT emulation step 1 */
9823+ unsigned int sethi, ba, nop;
9824+
9825+ err = get_user(sethi, (unsigned int *)regs->pc);
9826+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
9827+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
9828+
9829+ if (err)
9830+ break;
9831+
9832+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
9833+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
9834+ nop == 0x01000000U)
9835+ {
9836+ unsigned int addr, save, call;
9837+
9838+ if ((ba & 0xFFC00000U) == 0x30800000U)
9839+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
9840+ else
9841+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
9842+
9843+ err = get_user(save, (unsigned int *)addr);
9844+ err |= get_user(call, (unsigned int *)(addr+4));
9845+ err |= get_user(nop, (unsigned int *)(addr+8));
9846+ if (err)
9847+ break;
9848+
9849+#ifdef CONFIG_PAX_DLRESOLVE
9850+ if (save == 0x9DE3BFA8U &&
9851+ (call & 0xC0000000U) == 0x40000000U &&
9852+ nop == 0x01000000U)
9853+ {
9854+ struct vm_area_struct *vma;
9855+ unsigned long call_dl_resolve;
9856+
9857+ down_read(&current->mm->mmap_sem);
9858+ call_dl_resolve = current->mm->call_dl_resolve;
9859+ up_read(&current->mm->mmap_sem);
9860+ if (likely(call_dl_resolve))
9861+ goto emulate;
9862+
9863+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
9864+
9865+ down_write(&current->mm->mmap_sem);
9866+ if (current->mm->call_dl_resolve) {
9867+ call_dl_resolve = current->mm->call_dl_resolve;
9868+ up_write(&current->mm->mmap_sem);
9869+ if (vma)
9870+ kmem_cache_free(vm_area_cachep, vma);
9871+ goto emulate;
9872+ }
9873+
9874+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
9875+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
9876+ up_write(&current->mm->mmap_sem);
9877+ if (vma)
9878+ kmem_cache_free(vm_area_cachep, vma);
9879+ return 1;
9880+ }
9881+
9882+ if (pax_insert_vma(vma, call_dl_resolve)) {
9883+ up_write(&current->mm->mmap_sem);
9884+ kmem_cache_free(vm_area_cachep, vma);
9885+ return 1;
9886+ }
9887+
9888+ current->mm->call_dl_resolve = call_dl_resolve;
9889+ up_write(&current->mm->mmap_sem);
9890+
9891+emulate:
9892+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
9893+ regs->pc = call_dl_resolve;
9894+ regs->npc = addr+4;
9895+ return 3;
9896+ }
9897+#endif
9898+
9899+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
9900+ if ((save & 0xFFC00000U) == 0x05000000U &&
9901+ (call & 0xFFFFE000U) == 0x85C0A000U &&
9902+ nop == 0x01000000U)
9903+ {
9904+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
9905+ regs->u_regs[UREG_G2] = addr + 4;
9906+ addr = (save & 0x003FFFFFU) << 10;
9907+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
9908+ regs->pc = addr;
9909+ regs->npc = addr+4;
9910+ return 3;
9911+ }
9912+ }
9913+ } while (0);
9914+
9915+ do { /* PaX: unpatched PLT emulation step 2 */
9916+ unsigned int save, call, nop;
9917+
9918+ err = get_user(save, (unsigned int *)(regs->pc-4));
9919+ err |= get_user(call, (unsigned int *)regs->pc);
9920+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
9921+ if (err)
9922+ break;
9923+
9924+ if (save == 0x9DE3BFA8U &&
9925+ (call & 0xC0000000U) == 0x40000000U &&
9926+ nop == 0x01000000U)
9927+ {
9928+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
9929+
9930+ regs->u_regs[UREG_RETPC] = regs->pc;
9931+ regs->pc = dl_resolve;
9932+ regs->npc = dl_resolve+4;
9933+ return 3;
9934+ }
9935+ } while (0);
9936+#endif
9937+
9938+ return 1;
9939+}
9940+
9941+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
9942+{
9943+ unsigned long i;
9944+
9945+ printk(KERN_ERR "PAX: bytes at PC: ");
9946+ for (i = 0; i < 8; i++) {
9947+ unsigned int c;
9948+ if (get_user(c, (unsigned int *)pc+i))
9949+ printk(KERN_CONT "???????? ");
9950+ else
9951+ printk(KERN_CONT "%08x ", c);
9952+ }
9953+ printk("\n");
9954+}
9955+#endif
9956+
9957 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
9958 int text_fault)
9959 {
9960@@ -230,6 +504,24 @@ good_area:
9961 if (!(vma->vm_flags & VM_WRITE))
9962 goto bad_area;
9963 } else {
9964+
9965+#ifdef CONFIG_PAX_PAGEEXEC
9966+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
9967+ up_read(&mm->mmap_sem);
9968+ switch (pax_handle_fetch_fault(regs)) {
9969+
9970+#ifdef CONFIG_PAX_EMUPLT
9971+ case 2:
9972+ case 3:
9973+ return;
9974+#endif
9975+
9976+ }
9977+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
9978+ do_group_exit(SIGKILL);
9979+ }
9980+#endif
9981+
9982 /* Allow reads even for write-only mappings */
9983 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
9984 goto bad_area;
9985diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
9986index 5062ff3..e0b75f3 100644
9987--- a/arch/sparc/mm/fault_64.c
9988+++ b/arch/sparc/mm/fault_64.c
9989@@ -21,6 +21,9 @@
9990 #include <linux/kprobes.h>
9991 #include <linux/kdebug.h>
9992 #include <linux/percpu.h>
9993+#include <linux/slab.h>
9994+#include <linux/pagemap.h>
9995+#include <linux/compiler.h>
9996
9997 #include <asm/page.h>
9998 #include <asm/pgtable.h>
9999@@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
10000 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
10001 regs->tpc);
10002 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
10003- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
10004+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
10005 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
10006 dump_stack();
10007 unhandled_fault(regs->tpc, current, regs);
10008@@ -270,6 +273,466 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
10009 show_regs(regs);
10010 }
10011
10012+#ifdef CONFIG_PAX_PAGEEXEC
10013+#ifdef CONFIG_PAX_DLRESOLVE
10014+static void pax_emuplt_close(struct vm_area_struct *vma)
10015+{
10016+ vma->vm_mm->call_dl_resolve = 0UL;
10017+}
10018+
10019+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
10020+{
10021+ unsigned int *kaddr;
10022+
10023+ vmf->page = alloc_page(GFP_HIGHUSER);
10024+ if (!vmf->page)
10025+ return VM_FAULT_OOM;
10026+
10027+ kaddr = kmap(vmf->page);
10028+ memset(kaddr, 0, PAGE_SIZE);
10029+ kaddr[0] = 0x9DE3BFA8U; /* save */
10030+ flush_dcache_page(vmf->page);
10031+ kunmap(vmf->page);
10032+ return VM_FAULT_MAJOR;
10033+}
10034+
10035+static const struct vm_operations_struct pax_vm_ops = {
10036+ .close = pax_emuplt_close,
10037+ .fault = pax_emuplt_fault
10038+};
10039+
10040+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
10041+{
10042+ int ret;
10043+
10044+ INIT_LIST_HEAD(&vma->anon_vma_chain);
10045+ vma->vm_mm = current->mm;
10046+ vma->vm_start = addr;
10047+ vma->vm_end = addr + PAGE_SIZE;
10048+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
10049+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
10050+ vma->vm_ops = &pax_vm_ops;
10051+
10052+ ret = insert_vm_struct(current->mm, vma);
10053+ if (ret)
10054+ return ret;
10055+
10056+ ++current->mm->total_vm;
10057+ return 0;
10058+}
10059+#endif
10060+
10061+/*
10062+ * PaX: decide what to do with offenders (regs->tpc = fault address)
10063+ *
10064+ * returns 1 when task should be killed
10065+ * 2 when patched PLT trampoline was detected
10066+ * 3 when unpatched PLT trampoline was detected
10067+ */
10068+static int pax_handle_fetch_fault(struct pt_regs *regs)
10069+{
10070+
10071+#ifdef CONFIG_PAX_EMUPLT
10072+ int err;
10073+
10074+ do { /* PaX: patched PLT emulation #1 */
10075+ unsigned int sethi1, sethi2, jmpl;
10076+
10077+ err = get_user(sethi1, (unsigned int *)regs->tpc);
10078+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
10079+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
10080+
10081+ if (err)
10082+ break;
10083+
10084+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
10085+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
10086+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
10087+ {
10088+ unsigned long addr;
10089+
10090+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
10091+ addr = regs->u_regs[UREG_G1];
10092+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
10093+
10094+ if (test_thread_flag(TIF_32BIT))
10095+ addr &= 0xFFFFFFFFUL;
10096+
10097+ regs->tpc = addr;
10098+ regs->tnpc = addr+4;
10099+ return 2;
10100+ }
10101+ } while (0);
10102+
10103+ do { /* PaX: patched PLT emulation #2 */
10104+ unsigned int ba;
10105+
10106+ err = get_user(ba, (unsigned int *)regs->tpc);
10107+
10108+ if (err)
10109+ break;
10110+
10111+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
10112+ unsigned long addr;
10113+
10114+ if ((ba & 0xFFC00000U) == 0x30800000U)
10115+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
10116+ else
10117+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
10118+
10119+ if (test_thread_flag(TIF_32BIT))
10120+ addr &= 0xFFFFFFFFUL;
10121+
10122+ regs->tpc = addr;
10123+ regs->tnpc = addr+4;
10124+ return 2;
10125+ }
10126+ } while (0);
10127+
10128+ do { /* PaX: patched PLT emulation #3 */
10129+ unsigned int sethi, bajmpl, nop;
10130+
10131+ err = get_user(sethi, (unsigned int *)regs->tpc);
10132+ err |= get_user(bajmpl, (unsigned int *)(regs->tpc+4));
10133+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
10134+
10135+ if (err)
10136+ break;
10137+
10138+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
10139+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
10140+ nop == 0x01000000U)
10141+ {
10142+ unsigned long addr;
10143+
10144+ addr = (sethi & 0x003FFFFFU) << 10;
10145+ regs->u_regs[UREG_G1] = addr;
10146+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
10147+ addr += (((bajmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
10148+ else
10149+ addr = regs->tpc + ((((bajmpl | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
10150+
10151+ if (test_thread_flag(TIF_32BIT))
10152+ addr &= 0xFFFFFFFFUL;
10153+
10154+ regs->tpc = addr;
10155+ regs->tnpc = addr+4;
10156+ return 2;
10157+ }
10158+ } while (0);
10159+
10160+ do { /* PaX: patched PLT emulation #4 */
10161+ unsigned int sethi, mov1, call, mov2;
10162+
10163+ err = get_user(sethi, (unsigned int *)regs->tpc);
10164+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
10165+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
10166+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
10167+
10168+ if (err)
10169+ break;
10170+
10171+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
10172+ mov1 == 0x8210000FU &&
10173+ (call & 0xC0000000U) == 0x40000000U &&
10174+ mov2 == 0x9E100001U)
10175+ {
10176+ unsigned long addr;
10177+
10178+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
10179+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
10180+
10181+ if (test_thread_flag(TIF_32BIT))
10182+ addr &= 0xFFFFFFFFUL;
10183+
10184+ regs->tpc = addr;
10185+ regs->tnpc = addr+4;
10186+ return 2;
10187+ }
10188+ } while (0);
10189+
10190+ do { /* PaX: patched PLT emulation #5 */
10191+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
10192+
10193+ err = get_user(sethi, (unsigned int *)regs->tpc);
10194+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
10195+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
10196+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
10197+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
10198+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
10199+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
10200+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
10201+
10202+ if (err)
10203+ break;
10204+
10205+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
10206+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
10207+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
10208+ (or1 & 0xFFFFE000U) == 0x82106000U &&
10209+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
10210+ sllx == 0x83287020U &&
10211+ jmpl == 0x81C04005U &&
10212+ nop == 0x01000000U)
10213+ {
10214+ unsigned long addr;
10215+
10216+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
10217+ regs->u_regs[UREG_G1] <<= 32;
10218+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
10219+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
10220+ regs->tpc = addr;
10221+ regs->tnpc = addr+4;
10222+ return 2;
10223+ }
10224+ } while (0);
10225+
10226+ do { /* PaX: patched PLT emulation #6 */
10227+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
10228+
10229+ err = get_user(sethi, (unsigned int *)regs->tpc);
10230+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
10231+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
10232+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
10233+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
10234+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
10235+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
10236+
10237+ if (err)
10238+ break;
10239+
10240+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
10241+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
10242+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
10243+ sllx == 0x83287020U &&
10244+ (or & 0xFFFFE000U) == 0x8A116000U &&
10245+ jmpl == 0x81C04005U &&
10246+ nop == 0x01000000U)
10247+ {
10248+ unsigned long addr;
10249+
10250+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
10251+ regs->u_regs[UREG_G1] <<= 32;
10252+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
10253+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
10254+ regs->tpc = addr;
10255+ regs->tnpc = addr+4;
10256+ return 2;
10257+ }
10258+ } while (0);
10259+
10260+ do { /* PaX: unpatched PLT emulation step 1 */
10261+ unsigned int sethi, ba, nop;
10262+
10263+ err = get_user(sethi, (unsigned int *)regs->tpc);
10264+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
10265+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
10266+
10267+ if (err)
10268+ break;
10269+
10270+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
10271+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
10272+ nop == 0x01000000U)
10273+ {
10274+ unsigned long addr;
10275+ unsigned int save, call;
10276+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
10277+
10278+ if ((ba & 0xFFC00000U) == 0x30800000U)
10279+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
10280+ else
10281+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
10282+
10283+ if (test_thread_flag(TIF_32BIT))
10284+ addr &= 0xFFFFFFFFUL;
10285+
10286+ err = get_user(save, (unsigned int *)addr);
10287+ err |= get_user(call, (unsigned int *)(addr+4));
10288+ err |= get_user(nop, (unsigned int *)(addr+8));
10289+ if (err)
10290+ break;
10291+
10292+#ifdef CONFIG_PAX_DLRESOLVE
10293+ if (save == 0x9DE3BFA8U &&
10294+ (call & 0xC0000000U) == 0x40000000U &&
10295+ nop == 0x01000000U)
10296+ {
10297+ struct vm_area_struct *vma;
10298+ unsigned long call_dl_resolve;
10299+
10300+ down_read(&current->mm->mmap_sem);
10301+ call_dl_resolve = current->mm->call_dl_resolve;
10302+ up_read(&current->mm->mmap_sem);
10303+ if (likely(call_dl_resolve))
10304+ goto emulate;
10305+
10306+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
10307+
10308+ down_write(&current->mm->mmap_sem);
10309+ if (current->mm->call_dl_resolve) {
10310+ call_dl_resolve = current->mm->call_dl_resolve;
10311+ up_write(&current->mm->mmap_sem);
10312+ if (vma)
10313+ kmem_cache_free(vm_area_cachep, vma);
10314+ goto emulate;
10315+ }
10316+
10317+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
10318+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
10319+ up_write(&current->mm->mmap_sem);
10320+ if (vma)
10321+ kmem_cache_free(vm_area_cachep, vma);
10322+ return 1;
10323+ }
10324+
10325+ if (pax_insert_vma(vma, call_dl_resolve)) {
10326+ up_write(&current->mm->mmap_sem);
10327+ kmem_cache_free(vm_area_cachep, vma);
10328+ return 1;
10329+ }
10330+
10331+ current->mm->call_dl_resolve = call_dl_resolve;
10332+ up_write(&current->mm->mmap_sem);
10333+
10334+emulate:
10335+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
10336+ regs->tpc = call_dl_resolve;
10337+ regs->tnpc = addr+4;
10338+ return 3;
10339+ }
10340+#endif
10341+
10342+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
10343+ if ((save & 0xFFC00000U) == 0x05000000U &&
10344+ (call & 0xFFFFE000U) == 0x85C0A000U &&
10345+ nop == 0x01000000U)
10346+ {
10347+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
10348+ regs->u_regs[UREG_G2] = addr + 4;
10349+ addr = (save & 0x003FFFFFU) << 10;
10350+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
10351+
10352+ if (test_thread_flag(TIF_32BIT))
10353+ addr &= 0xFFFFFFFFUL;
10354+
10355+ regs->tpc = addr;
10356+ regs->tnpc = addr+4;
10357+ return 3;
10358+ }
10359+
10360+ /* PaX: 64-bit PLT stub */
10361+ err = get_user(sethi1, (unsigned int *)addr);
10362+ err |= get_user(sethi2, (unsigned int *)(addr+4));
10363+ err |= get_user(or1, (unsigned int *)(addr+8));
10364+ err |= get_user(or2, (unsigned int *)(addr+12));
10365+ err |= get_user(sllx, (unsigned int *)(addr+16));
10366+ err |= get_user(add, (unsigned int *)(addr+20));
10367+ err |= get_user(jmpl, (unsigned int *)(addr+24));
10368+ err |= get_user(nop, (unsigned int *)(addr+28));
10369+ if (err)
10370+ break;
10371+
10372+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
10373+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
10374+ (or1 & 0xFFFFE000U) == 0x88112000U &&
10375+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
10376+ sllx == 0x89293020U &&
10377+ add == 0x8A010005U &&
10378+ jmpl == 0x89C14000U &&
10379+ nop == 0x01000000U)
10380+ {
10381+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
10382+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
10383+ regs->u_regs[UREG_G4] <<= 32;
10384+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
10385+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
10386+ regs->u_regs[UREG_G4] = addr + 24;
10387+ addr = regs->u_regs[UREG_G5];
10388+ regs->tpc = addr;
10389+ regs->tnpc = addr+4;
10390+ return 3;
10391+ }
10392+ }
10393+ } while (0);
10394+
10395+#ifdef CONFIG_PAX_DLRESOLVE
10396+ do { /* PaX: unpatched PLT emulation step 2 */
10397+ unsigned int save, call, nop;
10398+
10399+ err = get_user(save, (unsigned int *)(regs->tpc-4));
10400+ err |= get_user(call, (unsigned int *)regs->tpc);
10401+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
10402+ if (err)
10403+ break;
10404+
10405+ if (save == 0x9DE3BFA8U &&
10406+ (call & 0xC0000000U) == 0x40000000U &&
10407+ nop == 0x01000000U)
10408+ {
10409+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
10410+
10411+ if (test_thread_flag(TIF_32BIT))
10412+ dl_resolve &= 0xFFFFFFFFUL;
10413+
10414+ regs->u_regs[UREG_RETPC] = regs->tpc;
10415+ regs->tpc = dl_resolve;
10416+ regs->tnpc = dl_resolve+4;
10417+ return 3;
10418+ }
10419+ } while (0);
10420+#endif
10421+
10422+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
10423+ unsigned int sethi, ba, nop;
10424+
10425+ err = get_user(sethi, (unsigned int *)regs->tpc);
10426+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
10427+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
10428+
10429+ if (err)
10430+ break;
10431+
10432+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
10433+ (ba & 0xFFF00000U) == 0x30600000U &&
10434+ nop == 0x01000000U)
10435+ {
10436+ unsigned long addr;
10437+
10438+ addr = (sethi & 0x003FFFFFU) << 10;
10439+ regs->u_regs[UREG_G1] = addr;
10440+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
10441+
10442+ if (test_thread_flag(TIF_32BIT))
10443+ addr &= 0xFFFFFFFFUL;
10444+
10445+ regs->tpc = addr;
10446+ regs->tnpc = addr+4;
10447+ return 2;
10448+ }
10449+ } while (0);
10450+
10451+#endif
10452+
10453+ return 1;
10454+}
10455+
10456+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
10457+{
10458+ unsigned long i;
10459+
10460+ printk(KERN_ERR "PAX: bytes at PC: ");
10461+ for (i = 0; i < 8; i++) {
10462+ unsigned int c;
10463+ if (get_user(c, (unsigned int *)pc+i))
10464+ printk(KERN_CONT "???????? ");
10465+ else
10466+ printk(KERN_CONT "%08x ", c);
10467+ }
10468+ printk("\n");
10469+}
10470+#endif
10471+
10472 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
10473 {
10474 struct mm_struct *mm = current->mm;
10475@@ -341,6 +804,29 @@ retry:
10476 if (!vma)
10477 goto bad_area;
10478
10479+#ifdef CONFIG_PAX_PAGEEXEC
10480+ /* PaX: detect ITLB misses on non-exec pages */
10481+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
10482+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
10483+ {
10484+ if (address != regs->tpc)
10485+ goto good_area;
10486+
10487+ up_read(&mm->mmap_sem);
10488+ switch (pax_handle_fetch_fault(regs)) {
10489+
10490+#ifdef CONFIG_PAX_EMUPLT
10491+ case 2:
10492+ case 3:
10493+ return;
10494+#endif
10495+
10496+ }
10497+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
10498+ do_group_exit(SIGKILL);
10499+ }
10500+#endif
10501+
10502 /* Pure DTLB misses do not tell us whether the fault causing
10503 * load/store/atomic was a write or not, it only says that there
10504 * was no match. So in such a case we (carefully) read the
10505diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
10506index d2b5944..bd813f2 100644
10507--- a/arch/sparc/mm/hugetlbpage.c
10508+++ b/arch/sparc/mm/hugetlbpage.c
10509@@ -38,7 +38,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
10510
10511 info.flags = 0;
10512 info.length = len;
10513- info.low_limit = TASK_UNMAPPED_BASE;
10514+ info.low_limit = mm->mmap_base;
10515 info.high_limit = min(task_size, VA_EXCLUDE_START);
10516 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
10517 info.align_offset = 0;
10518@@ -47,6 +47,12 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
10519 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
10520 VM_BUG_ON(addr != -ENOMEM);
10521 info.low_limit = VA_EXCLUDE_END;
10522+
10523+#ifdef CONFIG_PAX_RANDMMAP
10524+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10525+ info.low_limit += mm->delta_mmap;
10526+#endif
10527+
10528 info.high_limit = task_size;
10529 addr = vm_unmapped_area(&info);
10530 }
10531@@ -85,6 +91,12 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10532 VM_BUG_ON(addr != -ENOMEM);
10533 info.flags = 0;
10534 info.low_limit = TASK_UNMAPPED_BASE;
10535+
10536+#ifdef CONFIG_PAX_RANDMMAP
10537+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10538+ info.low_limit += mm->delta_mmap;
10539+#endif
10540+
10541 info.high_limit = STACK_TOP32;
10542 addr = vm_unmapped_area(&info);
10543 }
10544@@ -99,6 +111,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
10545 struct mm_struct *mm = current->mm;
10546 struct vm_area_struct *vma;
10547 unsigned long task_size = TASK_SIZE;
10548+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
10549
10550 if (test_thread_flag(TIF_32BIT))
10551 task_size = STACK_TOP32;
10552@@ -114,11 +127,14 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
10553 return addr;
10554 }
10555
10556+#ifdef CONFIG_PAX_RANDMMAP
10557+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10558+#endif
10559+
10560 if (addr) {
10561 addr = ALIGN(addr, HPAGE_SIZE);
10562 vma = find_vma(mm, addr);
10563- if (task_size - len >= addr &&
10564- (!vma || addr + len <= vma->vm_start))
10565+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
10566 return addr;
10567 }
10568 if (mm->get_unmapped_area == arch_get_unmapped_area)
10569diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
10570index ba6ae7f..83d89bc 100644
10571--- a/arch/sparc/mm/tlb.c
10572+++ b/arch/sparc/mm/tlb.c
10573@@ -24,11 +24,17 @@ static DEFINE_PER_CPU(struct tlb_batch, tlb_batch);
10574 void flush_tlb_pending(void)
10575 {
10576 struct tlb_batch *tb = &get_cpu_var(tlb_batch);
10577+ struct mm_struct *mm = tb->mm;
10578
10579- if (tb->tlb_nr) {
10580- flush_tsb_user(tb);
10581+ if (!tb->tlb_nr)
10582+ goto out;
10583
10584- if (CTX_VALID(tb->mm->context)) {
10585+ flush_tsb_user(tb);
10586+
10587+ if (CTX_VALID(mm->context)) {
10588+ if (tb->tlb_nr == 1) {
10589+ global_flush_tlb_page(mm, tb->vaddrs[0]);
10590+ } else {
10591 #ifdef CONFIG_SMP
10592 smp_flush_tlb_pending(tb->mm, tb->tlb_nr,
10593 &tb->vaddrs[0]);
10594@@ -37,12 +43,30 @@ void flush_tlb_pending(void)
10595 tb->tlb_nr, &tb->vaddrs[0]);
10596 #endif
10597 }
10598- tb->tlb_nr = 0;
10599 }
10600
10601+ tb->tlb_nr = 0;
10602+
10603+out:
10604 put_cpu_var(tlb_batch);
10605 }
10606
10607+void arch_enter_lazy_mmu_mode(void)
10608+{
10609+ struct tlb_batch *tb = &__get_cpu_var(tlb_batch);
10610+
10611+ tb->active = 1;
10612+}
10613+
10614+void arch_leave_lazy_mmu_mode(void)
10615+{
10616+ struct tlb_batch *tb = &__get_cpu_var(tlb_batch);
10617+
10618+ if (tb->tlb_nr)
10619+ flush_tlb_pending();
10620+ tb->active = 0;
10621+}
10622+
10623 static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
10624 bool exec)
10625 {
10626@@ -60,6 +84,12 @@ static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
10627 nr = 0;
10628 }
10629
10630+ if (!tb->active) {
10631+ global_flush_tlb_page(mm, vaddr);
10632+ flush_tsb_user_page(mm, vaddr);
10633+ goto out;
10634+ }
10635+
10636 if (nr == 0)
10637 tb->mm = mm;
10638
10639@@ -68,6 +98,7 @@ static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
10640 if (nr >= TLB_BATCH_NR)
10641 flush_tlb_pending();
10642
10643+out:
10644 put_cpu_var(tlb_batch);
10645 }
10646
10647diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
10648index 428982b..2cc3bce 100644
10649--- a/arch/sparc/mm/tsb.c
10650+++ b/arch/sparc/mm/tsb.c
10651@@ -7,11 +7,10 @@
10652 #include <linux/preempt.h>
10653 #include <linux/slab.h>
10654 #include <asm/page.h>
10655-#include <asm/tlbflush.h>
10656-#include <asm/tlb.h>
10657-#include <asm/mmu_context.h>
10658 #include <asm/pgtable.h>
10659+#include <asm/mmu_context.h>
10660 #include <asm/tsb.h>
10661+#include <asm/tlb.h>
10662 #include <asm/oplib.h>
10663
10664 extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
10665@@ -46,23 +45,27 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end)
10666 }
10667 }
10668
10669+static void __flush_tsb_one_entry(unsigned long tsb, unsigned long v,
10670+ unsigned long hash_shift,
10671+ unsigned long nentries)
10672+{
10673+ unsigned long tag, ent, hash;
10674+
10675+ v &= ~0x1UL;
10676+ hash = tsb_hash(v, hash_shift, nentries);
10677+ ent = tsb + (hash * sizeof(struct tsb));
10678+ tag = (v >> 22UL);
10679+
10680+ tsb_flush(ent, tag);
10681+}
10682+
10683 static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift,
10684 unsigned long tsb, unsigned long nentries)
10685 {
10686 unsigned long i;
10687
10688- for (i = 0; i < tb->tlb_nr; i++) {
10689- unsigned long v = tb->vaddrs[i];
10690- unsigned long tag, ent, hash;
10691-
10692- v &= ~0x1UL;
10693-
10694- hash = tsb_hash(v, hash_shift, nentries);
10695- ent = tsb + (hash * sizeof(struct tsb));
10696- tag = (v >> 22UL);
10697-
10698- tsb_flush(ent, tag);
10699- }
10700+ for (i = 0; i < tb->tlb_nr; i++)
10701+ __flush_tsb_one_entry(tsb, tb->vaddrs[i], hash_shift, nentries);
10702 }
10703
10704 void flush_tsb_user(struct tlb_batch *tb)
10705@@ -90,6 +93,30 @@ void flush_tsb_user(struct tlb_batch *tb)
10706 spin_unlock_irqrestore(&mm->context.lock, flags);
10707 }
10708
10709+void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr)
10710+{
10711+ unsigned long nentries, base, flags;
10712+
10713+ spin_lock_irqsave(&mm->context.lock, flags);
10714+
10715+ base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
10716+ nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
10717+ if (tlb_type == cheetah_plus || tlb_type == hypervisor)
10718+ base = __pa(base);
10719+ __flush_tsb_one_entry(base, vaddr, PAGE_SHIFT, nentries);
10720+
10721+#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
10722+ if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
10723+ base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
10724+ nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
10725+ if (tlb_type == cheetah_plus || tlb_type == hypervisor)
10726+ base = __pa(base);
10727+ __flush_tsb_one_entry(base, vaddr, HPAGE_SHIFT, nentries);
10728+ }
10729+#endif
10730+ spin_unlock_irqrestore(&mm->context.lock, flags);
10731+}
10732+
10733 #define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_8K
10734 #define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_8K
10735
10736diff --git a/arch/sparc/mm/ultra.S b/arch/sparc/mm/ultra.S
10737index f8e13d4..432aa0c 100644
10738--- a/arch/sparc/mm/ultra.S
10739+++ b/arch/sparc/mm/ultra.S
10740@@ -53,6 +53,33 @@ __flush_tlb_mm: /* 18 insns */
10741 nop
10742
10743 .align 32
10744+ .globl __flush_tlb_page
10745+__flush_tlb_page: /* 22 insns */
10746+ /* %o0 = context, %o1 = vaddr */
10747+ rdpr %pstate, %g7
10748+ andn %g7, PSTATE_IE, %g2
10749+ wrpr %g2, %pstate
10750+ mov SECONDARY_CONTEXT, %o4
10751+ ldxa [%o4] ASI_DMMU, %g2
10752+ stxa %o0, [%o4] ASI_DMMU
10753+ andcc %o1, 1, %g0
10754+ andn %o1, 1, %o3
10755+ be,pn %icc, 1f
10756+ or %o3, 0x10, %o3
10757+ stxa %g0, [%o3] ASI_IMMU_DEMAP
10758+1: stxa %g0, [%o3] ASI_DMMU_DEMAP
10759+ membar #Sync
10760+ stxa %g2, [%o4] ASI_DMMU
10761+ sethi %hi(KERNBASE), %o4
10762+ flush %o4
10763+ retl
10764+ wrpr %g7, 0x0, %pstate
10765+ nop
10766+ nop
10767+ nop
10768+ nop
10769+
10770+ .align 32
10771 .globl __flush_tlb_pending
10772 __flush_tlb_pending: /* 26 insns */
10773 /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
10774@@ -203,6 +230,31 @@ __cheetah_flush_tlb_mm: /* 19 insns */
10775 retl
10776 wrpr %g7, 0x0, %pstate
10777
10778+__cheetah_flush_tlb_page: /* 22 insns */
10779+ /* %o0 = context, %o1 = vaddr */
10780+ rdpr %pstate, %g7
10781+ andn %g7, PSTATE_IE, %g2
10782+ wrpr %g2, 0x0, %pstate
10783+ wrpr %g0, 1, %tl
10784+ mov PRIMARY_CONTEXT, %o4
10785+ ldxa [%o4] ASI_DMMU, %g2
10786+ srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o3
10787+ sllx %o3, CTX_PGSZ1_NUC_SHIFT, %o3
10788+ or %o0, %o3, %o0 /* Preserve nucleus page size fields */
10789+ stxa %o0, [%o4] ASI_DMMU
10790+ andcc %o1, 1, %g0
10791+ be,pn %icc, 1f
10792+ andn %o1, 1, %o3
10793+ stxa %g0, [%o3] ASI_IMMU_DEMAP
10794+1: stxa %g0, [%o3] ASI_DMMU_DEMAP
10795+ membar #Sync
10796+ stxa %g2, [%o4] ASI_DMMU
10797+ sethi %hi(KERNBASE), %o4
10798+ flush %o4
10799+ wrpr %g0, 0, %tl
10800+ retl
10801+ wrpr %g7, 0x0, %pstate
10802+
10803 __cheetah_flush_tlb_pending: /* 27 insns */
10804 /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
10805 rdpr %pstate, %g7
10806@@ -269,6 +321,20 @@ __hypervisor_flush_tlb_mm: /* 10 insns */
10807 retl
10808 nop
10809
10810+__hypervisor_flush_tlb_page: /* 11 insns */
10811+ /* %o0 = context, %o1 = vaddr */
10812+ mov %o0, %g2
10813+ mov %o1, %o0 /* ARG0: vaddr + IMMU-bit */
10814+ mov %g2, %o1 /* ARG1: mmu context */
10815+ mov HV_MMU_ALL, %o2 /* ARG2: flags */
10816+ srlx %o0, PAGE_SHIFT, %o0
10817+ sllx %o0, PAGE_SHIFT, %o0
10818+ ta HV_MMU_UNMAP_ADDR_TRAP
10819+ brnz,pn %o0, __hypervisor_tlb_tl0_error
10820+ mov HV_MMU_UNMAP_ADDR_TRAP, %o1
10821+ retl
10822+ nop
10823+
10824 __hypervisor_flush_tlb_pending: /* 16 insns */
10825 /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
10826 sllx %o1, 3, %g1
10827@@ -339,6 +405,13 @@ cheetah_patch_cachetlbops:
10828 call tlb_patch_one
10829 mov 19, %o2
10830
10831+ sethi %hi(__flush_tlb_page), %o0
10832+ or %o0, %lo(__flush_tlb_page), %o0
10833+ sethi %hi(__cheetah_flush_tlb_page), %o1
10834+ or %o1, %lo(__cheetah_flush_tlb_page), %o1
10835+ call tlb_patch_one
10836+ mov 22, %o2
10837+
10838 sethi %hi(__flush_tlb_pending), %o0
10839 or %o0, %lo(__flush_tlb_pending), %o0
10840 sethi %hi(__cheetah_flush_tlb_pending), %o1
10841@@ -397,10 +470,9 @@ xcall_flush_tlb_mm: /* 21 insns */
10842 nop
10843 nop
10844
10845- .globl xcall_flush_tlb_pending
10846-xcall_flush_tlb_pending: /* 21 insns */
10847- /* %g5=context, %g1=nr, %g7=vaddrs[] */
10848- sllx %g1, 3, %g1
10849+ .globl xcall_flush_tlb_page
10850+xcall_flush_tlb_page: /* 17 insns */
10851+ /* %g5=context, %g1=vaddr */
10852 mov PRIMARY_CONTEXT, %g4
10853 ldxa [%g4] ASI_DMMU, %g2
10854 srlx %g2, CTX_PGSZ1_NUC_SHIFT, %g4
10855@@ -408,20 +480,16 @@ xcall_flush_tlb_pending: /* 21 insns */
10856 or %g5, %g4, %g5
10857 mov PRIMARY_CONTEXT, %g4
10858 stxa %g5, [%g4] ASI_DMMU
10859-1: sub %g1, (1 << 3), %g1
10860- ldx [%g7 + %g1], %g5
10861- andcc %g5, 0x1, %g0
10862+ andcc %g1, 0x1, %g0
10863 be,pn %icc, 2f
10864-
10865- andn %g5, 0x1, %g5
10866+ andn %g1, 0x1, %g5
10867 stxa %g0, [%g5] ASI_IMMU_DEMAP
10868 2: stxa %g0, [%g5] ASI_DMMU_DEMAP
10869 membar #Sync
10870- brnz,pt %g1, 1b
10871- nop
10872 stxa %g2, [%g4] ASI_DMMU
10873 retry
10874 nop
10875+ nop
10876
10877 .globl xcall_flush_tlb_kernel_range
10878 xcall_flush_tlb_kernel_range: /* 25 insns */
10879@@ -656,15 +724,13 @@ __hypervisor_xcall_flush_tlb_mm: /* 21 insns */
10880 membar #Sync
10881 retry
10882
10883- .globl __hypervisor_xcall_flush_tlb_pending
10884-__hypervisor_xcall_flush_tlb_pending: /* 21 insns */
10885- /* %g5=ctx, %g1=nr, %g7=vaddrs[], %g2,%g3,%g4,g6=scratch */
10886- sllx %g1, 3, %g1
10887+ .globl __hypervisor_xcall_flush_tlb_page
10888+__hypervisor_xcall_flush_tlb_page: /* 17 insns */
10889+ /* %g5=ctx, %g1=vaddr */
10890 mov %o0, %g2
10891 mov %o1, %g3
10892 mov %o2, %g4
10893-1: sub %g1, (1 << 3), %g1
10894- ldx [%g7 + %g1], %o0 /* ARG0: virtual address */
10895+ mov %g1, %o0 /* ARG0: virtual address */
10896 mov %g5, %o1 /* ARG1: mmu context */
10897 mov HV_MMU_ALL, %o2 /* ARG2: flags */
10898 srlx %o0, PAGE_SHIFT, %o0
10899@@ -673,8 +739,6 @@ __hypervisor_xcall_flush_tlb_pending: /* 21 insns */
10900 mov HV_MMU_UNMAP_ADDR_TRAP, %g6
10901 brnz,a,pn %o0, __hypervisor_tlb_xcall_error
10902 mov %o0, %g5
10903- brnz,pt %g1, 1b
10904- nop
10905 mov %g2, %o0
10906 mov %g3, %o1
10907 mov %g4, %o2
10908@@ -757,6 +821,13 @@ hypervisor_patch_cachetlbops:
10909 call tlb_patch_one
10910 mov 10, %o2
10911
10912+ sethi %hi(__flush_tlb_page), %o0
10913+ or %o0, %lo(__flush_tlb_page), %o0
10914+ sethi %hi(__hypervisor_flush_tlb_page), %o1
10915+ or %o1, %lo(__hypervisor_flush_tlb_page), %o1
10916+ call tlb_patch_one
10917+ mov 11, %o2
10918+
10919 sethi %hi(__flush_tlb_pending), %o0
10920 or %o0, %lo(__flush_tlb_pending), %o0
10921 sethi %hi(__hypervisor_flush_tlb_pending), %o1
10922@@ -788,12 +859,12 @@ hypervisor_patch_cachetlbops:
10923 call tlb_patch_one
10924 mov 21, %o2
10925
10926- sethi %hi(xcall_flush_tlb_pending), %o0
10927- or %o0, %lo(xcall_flush_tlb_pending), %o0
10928- sethi %hi(__hypervisor_xcall_flush_tlb_pending), %o1
10929- or %o1, %lo(__hypervisor_xcall_flush_tlb_pending), %o1
10930+ sethi %hi(xcall_flush_tlb_page), %o0
10931+ or %o0, %lo(xcall_flush_tlb_page), %o0
10932+ sethi %hi(__hypervisor_xcall_flush_tlb_page), %o1
10933+ or %o1, %lo(__hypervisor_xcall_flush_tlb_page), %o1
10934 call tlb_patch_one
10935- mov 21, %o2
10936+ mov 17, %o2
10937
10938 sethi %hi(xcall_flush_tlb_kernel_range), %o0
10939 or %o0, %lo(xcall_flush_tlb_kernel_range), %o0
10940diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
10941index f4500c6..889656c 100644
10942--- a/arch/tile/include/asm/atomic_64.h
10943+++ b/arch/tile/include/asm/atomic_64.h
10944@@ -143,6 +143,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
10945
10946 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
10947
10948+#define atomic64_read_unchecked(v) atomic64_read(v)
10949+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
10950+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
10951+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
10952+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
10953+#define atomic64_inc_unchecked(v) atomic64_inc(v)
10954+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
10955+#define atomic64_dec_unchecked(v) atomic64_dec(v)
10956+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
10957+
10958 /* Atomic dec and inc don't implement barrier, so provide them if needed. */
10959 #define smp_mb__before_atomic_dec() smp_mb()
10960 #define smp_mb__after_atomic_dec() smp_mb()
10961diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
10962index a9a5299..0fce79e 100644
10963--- a/arch/tile/include/asm/cache.h
10964+++ b/arch/tile/include/asm/cache.h
10965@@ -15,11 +15,12 @@
10966 #ifndef _ASM_TILE_CACHE_H
10967 #define _ASM_TILE_CACHE_H
10968
10969+#include <linux/const.h>
10970 #include <arch/chip.h>
10971
10972 /* bytes per L1 data cache line */
10973 #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
10974-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
10975+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10976
10977 /* bytes per L2 cache line */
10978 #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
10979diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
10980index 9ab078a..d6635c2 100644
10981--- a/arch/tile/include/asm/uaccess.h
10982+++ b/arch/tile/include/asm/uaccess.h
10983@@ -403,9 +403,9 @@ static inline unsigned long __must_check copy_from_user(void *to,
10984 const void __user *from,
10985 unsigned long n)
10986 {
10987- int sz = __compiletime_object_size(to);
10988+ size_t sz = __compiletime_object_size(to);
10989
10990- if (likely(sz == -1 || sz >= n))
10991+ if (likely(sz == (size_t)-1 || sz >= n))
10992 n = _copy_from_user(to, from, n);
10993 else
10994 copy_from_user_overflow();
10995diff --git a/arch/um/Makefile b/arch/um/Makefile
10996index 133f7de..1d6f2f1 100644
10997--- a/arch/um/Makefile
10998+++ b/arch/um/Makefile
10999@@ -62,6 +62,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
11000 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
11001 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
11002
11003+ifdef CONSTIFY_PLUGIN
11004+USER_CFLAGS += -fplugin-arg-constify_plugin-no-constify
11005+endif
11006+
11007 #This will adjust *FLAGS accordingly to the platform.
11008 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
11009
11010diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
11011index 19e1bdd..3665b77 100644
11012--- a/arch/um/include/asm/cache.h
11013+++ b/arch/um/include/asm/cache.h
11014@@ -1,6 +1,7 @@
11015 #ifndef __UM_CACHE_H
11016 #define __UM_CACHE_H
11017
11018+#include <linux/const.h>
11019
11020 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
11021 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
11022@@ -12,6 +13,6 @@
11023 # define L1_CACHE_SHIFT 5
11024 #endif
11025
11026-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
11027+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
11028
11029 #endif
11030diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
11031index 2e0a6b1..a64d0f5 100644
11032--- a/arch/um/include/asm/kmap_types.h
11033+++ b/arch/um/include/asm/kmap_types.h
11034@@ -8,6 +8,6 @@
11035
11036 /* No more #include "asm/arch/kmap_types.h" ! */
11037
11038-#define KM_TYPE_NR 14
11039+#define KM_TYPE_NR 15
11040
11041 #endif
11042diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
11043index 5ff53d9..5850cdf 100644
11044--- a/arch/um/include/asm/page.h
11045+++ b/arch/um/include/asm/page.h
11046@@ -14,6 +14,9 @@
11047 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
11048 #define PAGE_MASK (~(PAGE_SIZE-1))
11049
11050+#define ktla_ktva(addr) (addr)
11051+#define ktva_ktla(addr) (addr)
11052+
11053 #ifndef __ASSEMBLY__
11054
11055 struct page;
11056diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
11057index 0032f92..cd151e0 100644
11058--- a/arch/um/include/asm/pgtable-3level.h
11059+++ b/arch/um/include/asm/pgtable-3level.h
11060@@ -58,6 +58,7 @@
11061 #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
11062 #define pud_populate(mm, pud, pmd) \
11063 set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
11064+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
11065
11066 #ifdef CONFIG_64BIT
11067 #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
11068diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
11069index b462b13..e7a19aa 100644
11070--- a/arch/um/kernel/process.c
11071+++ b/arch/um/kernel/process.c
11072@@ -386,22 +386,6 @@ int singlestepping(void * t)
11073 return 2;
11074 }
11075
11076-/*
11077- * Only x86 and x86_64 have an arch_align_stack().
11078- * All other arches have "#define arch_align_stack(x) (x)"
11079- * in their asm/system.h
11080- * As this is included in UML from asm-um/system-generic.h,
11081- * we can use it to behave as the subarch does.
11082- */
11083-#ifndef arch_align_stack
11084-unsigned long arch_align_stack(unsigned long sp)
11085-{
11086- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
11087- sp -= get_random_int() % 8192;
11088- return sp & ~0xf;
11089-}
11090-#endif
11091-
11092 unsigned long get_wchan(struct task_struct *p)
11093 {
11094 unsigned long stack_page, sp, ip;
11095diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
11096index ad8f795..2c7eec6 100644
11097--- a/arch/unicore32/include/asm/cache.h
11098+++ b/arch/unicore32/include/asm/cache.h
11099@@ -12,8 +12,10 @@
11100 #ifndef __UNICORE_CACHE_H__
11101 #define __UNICORE_CACHE_H__
11102
11103-#define L1_CACHE_SHIFT (5)
11104-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
11105+#include <linux/const.h>
11106+
11107+#define L1_CACHE_SHIFT 5
11108+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
11109
11110 /*
11111 * Memory returned by kmalloc() may be used for DMA, so we must make
11112diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
11113index 0694d09..58ea1a1 100644
11114--- a/arch/x86/Kconfig
11115+++ b/arch/x86/Kconfig
11116@@ -238,7 +238,7 @@ config X86_HT
11117
11118 config X86_32_LAZY_GS
11119 def_bool y
11120- depends on X86_32 && !CC_STACKPROTECTOR
11121+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
11122
11123 config ARCH_HWEIGHT_CFLAGS
11124 string
11125@@ -1031,6 +1031,7 @@ config MICROCODE_OLD_INTERFACE
11126
11127 config X86_MSR
11128 tristate "/dev/cpu/*/msr - Model-specific register support"
11129+ depends on !GRKERNSEC_KMEM
11130 ---help---
11131 This device gives privileged processes access to the x86
11132 Model-Specific Registers (MSRs). It is a character device with
11133@@ -1054,7 +1055,7 @@ choice
11134
11135 config NOHIGHMEM
11136 bool "off"
11137- depends on !X86_NUMAQ
11138+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
11139 ---help---
11140 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
11141 However, the address space of 32-bit x86 processors is only 4
11142@@ -1091,7 +1092,7 @@ config NOHIGHMEM
11143
11144 config HIGHMEM4G
11145 bool "4GB"
11146- depends on !X86_NUMAQ
11147+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
11148 ---help---
11149 Select this if you have a 32-bit processor and between 1 and 4
11150 gigabytes of physical RAM.
11151@@ -1145,7 +1146,7 @@ config PAGE_OFFSET
11152 hex
11153 default 0xB0000000 if VMSPLIT_3G_OPT
11154 default 0x80000000 if VMSPLIT_2G
11155- default 0x78000000 if VMSPLIT_2G_OPT
11156+ default 0x70000000 if VMSPLIT_2G_OPT
11157 default 0x40000000 if VMSPLIT_1G
11158 default 0xC0000000
11159 depends on X86_32
11160@@ -1542,6 +1543,7 @@ config SECCOMP
11161
11162 config CC_STACKPROTECTOR
11163 bool "Enable -fstack-protector buffer overflow detection"
11164+ depends on X86_64 || !PAX_MEMORY_UDEREF
11165 ---help---
11166 This option turns on the -fstack-protector GCC feature. This
11167 feature puts, at the beginning of functions, a canary value on
11168@@ -1662,6 +1664,8 @@ config X86_NEED_RELOCS
11169 config PHYSICAL_ALIGN
11170 hex "Alignment value to which kernel should be aligned" if X86_32
11171 default "0x1000000"
11172+ range 0x200000 0x1000000 if PAX_KERNEXEC && X86_PAE
11173+ range 0x400000 0x1000000 if PAX_KERNEXEC && !X86_PAE
11174 range 0x2000 0x1000000
11175 ---help---
11176 This value puts the alignment restrictions on physical address
11177@@ -1737,9 +1741,10 @@ config DEBUG_HOTPLUG_CPU0
11178 If unsure, say N.
11179
11180 config COMPAT_VDSO
11181- def_bool y
11182+ def_bool n
11183 prompt "Compat VDSO support"
11184 depends on X86_32 || IA32_EMULATION
11185+ depends on !PAX_PAGEEXEC && !PAX_SEGMEXEC && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
11186 ---help---
11187 Map the 32-bit VDSO to the predictable old-style address too.
11188
11189diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
11190index c026cca..14657ae 100644
11191--- a/arch/x86/Kconfig.cpu
11192+++ b/arch/x86/Kconfig.cpu
11193@@ -319,7 +319,7 @@ config X86_PPRO_FENCE
11194
11195 config X86_F00F_BUG
11196 def_bool y
11197- depends on M586MMX || M586TSC || M586 || M486
11198+ depends on (M586MMX || M586TSC || M586 || M486) && !PAX_KERNEXEC
11199
11200 config X86_INVD_BUG
11201 def_bool y
11202@@ -327,7 +327,7 @@ config X86_INVD_BUG
11203
11204 config X86_ALIGNMENT_16
11205 def_bool y
11206- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
11207+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
11208
11209 config X86_INTEL_USERCOPY
11210 def_bool y
11211@@ -373,7 +373,7 @@ config X86_CMPXCHG64
11212 # generates cmov.
11213 config X86_CMOV
11214 def_bool y
11215- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
11216+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
11217
11218 config X86_MINIMUM_CPU_FAMILY
11219 int
11220diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
11221index b322f12..652d0d9 100644
11222--- a/arch/x86/Kconfig.debug
11223+++ b/arch/x86/Kconfig.debug
11224@@ -84,7 +84,7 @@ config X86_PTDUMP
11225 config DEBUG_RODATA
11226 bool "Write protect kernel read-only data structures"
11227 default y
11228- depends on DEBUG_KERNEL
11229+ depends on DEBUG_KERNEL && BROKEN
11230 ---help---
11231 Mark the kernel read-only data as write-protected in the pagetables,
11232 in order to catch accidental (and incorrect) writes to such const
11233@@ -102,7 +102,7 @@ config DEBUG_RODATA_TEST
11234
11235 config DEBUG_SET_MODULE_RONX
11236 bool "Set loadable kernel module data as NX and text as RO"
11237- depends on MODULES
11238+ depends on MODULES && BROKEN
11239 ---help---
11240 This option helps catch unintended modifications to loadable
11241 kernel module's text and read-only data. It also prevents execution
11242@@ -294,7 +294,7 @@ config OPTIMIZE_INLINING
11243
11244 config DEBUG_STRICT_USER_COPY_CHECKS
11245 bool "Strict copy size checks"
11246- depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
11247+ depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING && !PAX_SIZE_OVERFLOW
11248 ---help---
11249 Enabling this option turns a certain set of sanity checks for user
11250 copy operations into compile time failures.
11251diff --git a/arch/x86/Makefile b/arch/x86/Makefile
11252index e71fc42..7829607 100644
11253--- a/arch/x86/Makefile
11254+++ b/arch/x86/Makefile
11255@@ -50,6 +50,7 @@ else
11256 UTS_MACHINE := x86_64
11257 CHECKFLAGS += -D__x86_64__ -m64
11258
11259+ biarch := $(call cc-option,-m64)
11260 KBUILD_AFLAGS += -m64
11261 KBUILD_CFLAGS += -m64
11262
11263@@ -230,3 +231,12 @@ define archhelp
11264 echo ' FDARGS="..." arguments for the booted kernel'
11265 echo ' FDINITRD=file initrd for the booted kernel'
11266 endef
11267+
11268+define OLD_LD
11269+
11270+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
11271+*** Please upgrade your binutils to 2.18 or newer
11272+endef
11273+
11274+archprepare:
11275+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
11276diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
11277index 379814b..add62ce 100644
11278--- a/arch/x86/boot/Makefile
11279+++ b/arch/x86/boot/Makefile
11280@@ -65,6 +65,9 @@ KBUILD_CFLAGS := $(USERINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
11281 $(call cc-option, -fno-stack-protector) \
11282 $(call cc-option, -mpreferred-stack-boundary=2)
11283 KBUILD_CFLAGS += $(call cc-option, -m32)
11284+ifdef CONSTIFY_PLUGIN
11285+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
11286+endif
11287 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
11288 GCOV_PROFILE := n
11289
11290diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
11291index 878e4b9..20537ab 100644
11292--- a/arch/x86/boot/bitops.h
11293+++ b/arch/x86/boot/bitops.h
11294@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
11295 u8 v;
11296 const u32 *p = (const u32 *)addr;
11297
11298- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
11299+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
11300 return v;
11301 }
11302
11303@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
11304
11305 static inline void set_bit(int nr, void *addr)
11306 {
11307- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
11308+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
11309 }
11310
11311 #endif /* BOOT_BITOPS_H */
11312diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
11313index 18997e5..83d9c67 100644
11314--- a/arch/x86/boot/boot.h
11315+++ b/arch/x86/boot/boot.h
11316@@ -85,7 +85,7 @@ static inline void io_delay(void)
11317 static inline u16 ds(void)
11318 {
11319 u16 seg;
11320- asm("movw %%ds,%0" : "=rm" (seg));
11321+ asm volatile("movw %%ds,%0" : "=rm" (seg));
11322 return seg;
11323 }
11324
11325@@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t addr)
11326 static inline int memcmp(const void *s1, const void *s2, size_t len)
11327 {
11328 u8 diff;
11329- asm("repe; cmpsb; setnz %0"
11330+ asm volatile("repe; cmpsb; setnz %0"
11331 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
11332 return diff;
11333 }
11334diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
11335index 5ef205c..342191d 100644
11336--- a/arch/x86/boot/compressed/Makefile
11337+++ b/arch/x86/boot/compressed/Makefile
11338@@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
11339 KBUILD_CFLAGS += $(cflags-y)
11340 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
11341 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
11342+ifdef CONSTIFY_PLUGIN
11343+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
11344+endif
11345
11346 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
11347 GCOV_PROFILE := n
11348diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
11349index c205035..5853587 100644
11350--- a/arch/x86/boot/compressed/eboot.c
11351+++ b/arch/x86/boot/compressed/eboot.c
11352@@ -150,7 +150,6 @@ again:
11353 *addr = max_addr;
11354 }
11355
11356-free_pool:
11357 efi_call_phys1(sys_table->boottime->free_pool, map);
11358
11359 fail:
11360@@ -214,7 +213,6 @@ static efi_status_t low_alloc(unsigned long size, unsigned long align,
11361 if (i == map_size / desc_size)
11362 status = EFI_NOT_FOUND;
11363
11364-free_pool:
11365 efi_call_phys1(sys_table->boottime->free_pool, map);
11366 fail:
11367 return status;
11368diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
11369index 1e3184f..0d11e2e 100644
11370--- a/arch/x86/boot/compressed/head_32.S
11371+++ b/arch/x86/boot/compressed/head_32.S
11372@@ -118,7 +118,7 @@ preferred_addr:
11373 notl %eax
11374 andl %eax, %ebx
11375 #else
11376- movl $LOAD_PHYSICAL_ADDR, %ebx
11377+ movl $____LOAD_PHYSICAL_ADDR, %ebx
11378 #endif
11379
11380 /* Target address to relocate to for decompression */
11381@@ -204,7 +204,7 @@ relocated:
11382 * and where it was actually loaded.
11383 */
11384 movl %ebp, %ebx
11385- subl $LOAD_PHYSICAL_ADDR, %ebx
11386+ subl $____LOAD_PHYSICAL_ADDR, %ebx
11387 jz 2f /* Nothing to be done if loaded at compiled addr. */
11388 /*
11389 * Process relocations.
11390@@ -212,8 +212,7 @@ relocated:
11391
11392 1: subl $4, %edi
11393 movl (%edi), %ecx
11394- testl %ecx, %ecx
11395- jz 2f
11396+ jecxz 2f
11397 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
11398 jmp 1b
11399 2:
11400diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
11401index f5d1aaa..cce11dc 100644
11402--- a/arch/x86/boot/compressed/head_64.S
11403+++ b/arch/x86/boot/compressed/head_64.S
11404@@ -91,7 +91,7 @@ ENTRY(startup_32)
11405 notl %eax
11406 andl %eax, %ebx
11407 #else
11408- movl $LOAD_PHYSICAL_ADDR, %ebx
11409+ movl $____LOAD_PHYSICAL_ADDR, %ebx
11410 #endif
11411
11412 /* Target address to relocate to for decompression */
11413@@ -273,7 +273,7 @@ preferred_addr:
11414 notq %rax
11415 andq %rax, %rbp
11416 #else
11417- movq $LOAD_PHYSICAL_ADDR, %rbp
11418+ movq $____LOAD_PHYSICAL_ADDR, %rbp
11419 #endif
11420
11421 /* Target address to relocate to for decompression */
11422diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
11423index 88f7ff6..ed695dd 100644
11424--- a/arch/x86/boot/compressed/misc.c
11425+++ b/arch/x86/boot/compressed/misc.c
11426@@ -303,7 +303,7 @@ static void parse_elf(void *output)
11427 case PT_LOAD:
11428 #ifdef CONFIG_RELOCATABLE
11429 dest = output;
11430- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
11431+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
11432 #else
11433 dest = (void *)(phdr->p_paddr);
11434 #endif
11435@@ -352,7 +352,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
11436 error("Destination address too large");
11437 #endif
11438 #ifndef CONFIG_RELOCATABLE
11439- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
11440+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
11441 error("Wrong destination address");
11442 #endif
11443
11444diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
11445index 4d3ff03..e4972ff 100644
11446--- a/arch/x86/boot/cpucheck.c
11447+++ b/arch/x86/boot/cpucheck.c
11448@@ -74,7 +74,7 @@ static int has_fpu(void)
11449 u16 fcw = -1, fsw = -1;
11450 u32 cr0;
11451
11452- asm("movl %%cr0,%0" : "=r" (cr0));
11453+ asm volatile("movl %%cr0,%0" : "=r" (cr0));
11454 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
11455 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
11456 asm volatile("movl %0,%%cr0" : : "r" (cr0));
11457@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
11458 {
11459 u32 f0, f1;
11460
11461- asm("pushfl ; "
11462+ asm volatile("pushfl ; "
11463 "pushfl ; "
11464 "popl %0 ; "
11465 "movl %0,%1 ; "
11466@@ -115,7 +115,7 @@ static void get_flags(void)
11467 set_bit(X86_FEATURE_FPU, cpu.flags);
11468
11469 if (has_eflag(X86_EFLAGS_ID)) {
11470- asm("cpuid"
11471+ asm volatile("cpuid"
11472 : "=a" (max_intel_level),
11473 "=b" (cpu_vendor[0]),
11474 "=d" (cpu_vendor[1]),
11475@@ -124,7 +124,7 @@ static void get_flags(void)
11476
11477 if (max_intel_level >= 0x00000001 &&
11478 max_intel_level <= 0x0000ffff) {
11479- asm("cpuid"
11480+ asm volatile("cpuid"
11481 : "=a" (tfms),
11482 "=c" (cpu.flags[4]),
11483 "=d" (cpu.flags[0])
11484@@ -136,7 +136,7 @@ static void get_flags(void)
11485 cpu.model += ((tfms >> 16) & 0xf) << 4;
11486 }
11487
11488- asm("cpuid"
11489+ asm volatile("cpuid"
11490 : "=a" (max_amd_level)
11491 : "a" (0x80000000)
11492 : "ebx", "ecx", "edx");
11493@@ -144,7 +144,7 @@ static void get_flags(void)
11494 if (max_amd_level >= 0x80000001 &&
11495 max_amd_level <= 0x8000ffff) {
11496 u32 eax = 0x80000001;
11497- asm("cpuid"
11498+ asm volatile("cpuid"
11499 : "+a" (eax),
11500 "=c" (cpu.flags[6]),
11501 "=d" (cpu.flags[1])
11502@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
11503 u32 ecx = MSR_K7_HWCR;
11504 u32 eax, edx;
11505
11506- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
11507+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
11508 eax &= ~(1 << 15);
11509- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
11510+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
11511
11512 get_flags(); /* Make sure it really did something */
11513 err = check_flags();
11514@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
11515 u32 ecx = MSR_VIA_FCR;
11516 u32 eax, edx;
11517
11518- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
11519+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
11520 eax |= (1<<1)|(1<<7);
11521- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
11522+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
11523
11524 set_bit(X86_FEATURE_CX8, cpu.flags);
11525 err = check_flags();
11526@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
11527 u32 eax, edx;
11528 u32 level = 1;
11529
11530- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
11531- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
11532- asm("cpuid"
11533+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
11534+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
11535+ asm volatile("cpuid"
11536 : "+a" (level), "=d" (cpu.flags[0])
11537 : : "ecx", "ebx");
11538- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
11539+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
11540
11541 err = check_flags();
11542 }
11543diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
11544index 944ce59..87ee37a 100644
11545--- a/arch/x86/boot/header.S
11546+++ b/arch/x86/boot/header.S
11547@@ -401,10 +401,14 @@ setup_data: .quad 0 # 64-bit physical pointer to
11548 # single linked list of
11549 # struct setup_data
11550
11551-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
11552+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
11553
11554 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
11555+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
11556+#define VO_INIT_SIZE (VO__end - VO__text - __PAGE_OFFSET - ____LOAD_PHYSICAL_ADDR)
11557+#else
11558 #define VO_INIT_SIZE (VO__end - VO__text)
11559+#endif
11560 #if ZO_INIT_SIZE > VO_INIT_SIZE
11561 #define INIT_SIZE ZO_INIT_SIZE
11562 #else
11563diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
11564index db75d07..8e6d0af 100644
11565--- a/arch/x86/boot/memory.c
11566+++ b/arch/x86/boot/memory.c
11567@@ -19,7 +19,7 @@
11568
11569 static int detect_memory_e820(void)
11570 {
11571- int count = 0;
11572+ unsigned int count = 0;
11573 struct biosregs ireg, oreg;
11574 struct e820entry *desc = boot_params.e820_map;
11575 static struct e820entry buf; /* static so it is zeroed */
11576diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
11577index 11e8c6e..fdbb1ed 100644
11578--- a/arch/x86/boot/video-vesa.c
11579+++ b/arch/x86/boot/video-vesa.c
11580@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
11581
11582 boot_params.screen_info.vesapm_seg = oreg.es;
11583 boot_params.screen_info.vesapm_off = oreg.di;
11584+ boot_params.screen_info.vesapm_size = oreg.cx;
11585 }
11586
11587 /*
11588diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
11589index 43eda28..5ab5fdb 100644
11590--- a/arch/x86/boot/video.c
11591+++ b/arch/x86/boot/video.c
11592@@ -96,7 +96,7 @@ static void store_mode_params(void)
11593 static unsigned int get_entry(void)
11594 {
11595 char entry_buf[4];
11596- int i, len = 0;
11597+ unsigned int i, len = 0;
11598 int key;
11599 unsigned int v;
11600
11601diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
11602index 5b577d5..3c1fed4 100644
11603--- a/arch/x86/crypto/aes-x86_64-asm_64.S
11604+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
11605@@ -8,6 +8,8 @@
11606 * including this sentence is retained in full.
11607 */
11608
11609+#include <asm/alternative-asm.h>
11610+
11611 .extern crypto_ft_tab
11612 .extern crypto_it_tab
11613 .extern crypto_fl_tab
11614@@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
11615 je B192; \
11616 leaq 32(r9),r9;
11617
11618+#define ret pax_force_retaddr 0, 1; ret
11619+
11620 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
11621 movq r1,r2; \
11622 movq r3,r4; \
11623diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
11624index 3470624..201259d 100644
11625--- a/arch/x86/crypto/aesni-intel_asm.S
11626+++ b/arch/x86/crypto/aesni-intel_asm.S
11627@@ -31,6 +31,7 @@
11628
11629 #include <linux/linkage.h>
11630 #include <asm/inst.h>
11631+#include <asm/alternative-asm.h>
11632
11633 #ifdef __x86_64__
11634 .data
11635@@ -1436,7 +1437,9 @@ _return_T_done_decrypt:
11636 pop %r14
11637 pop %r13
11638 pop %r12
11639+ pax_force_retaddr 0, 1
11640 ret
11641+ENDPROC(aesni_gcm_dec)
11642
11643
11644 /*****************************************************************************
11645@@ -1699,7 +1702,9 @@ _return_T_done_encrypt:
11646 pop %r14
11647 pop %r13
11648 pop %r12
11649+ pax_force_retaddr 0, 1
11650 ret
11651+ENDPROC(aesni_gcm_enc)
11652
11653 #endif
11654
11655@@ -1714,6 +1719,7 @@ _key_expansion_256a:
11656 pxor %xmm1, %xmm0
11657 movaps %xmm0, (TKEYP)
11658 add $0x10, TKEYP
11659+ pax_force_retaddr_bts
11660 ret
11661
11662 .align 4
11663@@ -1738,6 +1744,7 @@ _key_expansion_192a:
11664 shufps $0b01001110, %xmm2, %xmm1
11665 movaps %xmm1, 0x10(TKEYP)
11666 add $0x20, TKEYP
11667+ pax_force_retaddr_bts
11668 ret
11669
11670 .align 4
11671@@ -1757,6 +1764,7 @@ _key_expansion_192b:
11672
11673 movaps %xmm0, (TKEYP)
11674 add $0x10, TKEYP
11675+ pax_force_retaddr_bts
11676 ret
11677
11678 .align 4
11679@@ -1769,6 +1777,7 @@ _key_expansion_256b:
11680 pxor %xmm1, %xmm2
11681 movaps %xmm2, (TKEYP)
11682 add $0x10, TKEYP
11683+ pax_force_retaddr_bts
11684 ret
11685
11686 /*
11687@@ -1881,7 +1890,9 @@ ENTRY(aesni_set_key)
11688 #ifndef __x86_64__
11689 popl KEYP
11690 #endif
11691+ pax_force_retaddr 0, 1
11692 ret
11693+ENDPROC(aesni_set_key)
11694
11695 /*
11696 * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
11697@@ -1902,7 +1913,9 @@ ENTRY(aesni_enc)
11698 popl KLEN
11699 popl KEYP
11700 #endif
11701+ pax_force_retaddr 0, 1
11702 ret
11703+ENDPROC(aesni_enc)
11704
11705 /*
11706 * _aesni_enc1: internal ABI
11707@@ -1959,6 +1972,7 @@ _aesni_enc1:
11708 AESENC KEY STATE
11709 movaps 0x70(TKEYP), KEY
11710 AESENCLAST KEY STATE
11711+ pax_force_retaddr_bts
11712 ret
11713
11714 /*
11715@@ -2067,6 +2081,7 @@ _aesni_enc4:
11716 AESENCLAST KEY STATE2
11717 AESENCLAST KEY STATE3
11718 AESENCLAST KEY STATE4
11719+ pax_force_retaddr_bts
11720 ret
11721
11722 /*
11723@@ -2089,7 +2104,9 @@ ENTRY(aesni_dec)
11724 popl KLEN
11725 popl KEYP
11726 #endif
11727+ pax_force_retaddr 0, 1
11728 ret
11729+ENDPROC(aesni_dec)
11730
11731 /*
11732 * _aesni_dec1: internal ABI
11733@@ -2146,6 +2163,7 @@ _aesni_dec1:
11734 AESDEC KEY STATE
11735 movaps 0x70(TKEYP), KEY
11736 AESDECLAST KEY STATE
11737+ pax_force_retaddr_bts
11738 ret
11739
11740 /*
11741@@ -2254,6 +2272,7 @@ _aesni_dec4:
11742 AESDECLAST KEY STATE2
11743 AESDECLAST KEY STATE3
11744 AESDECLAST KEY STATE4
11745+ pax_force_retaddr_bts
11746 ret
11747
11748 /*
11749@@ -2311,7 +2330,9 @@ ENTRY(aesni_ecb_enc)
11750 popl KEYP
11751 popl LEN
11752 #endif
11753+ pax_force_retaddr 0, 1
11754 ret
11755+ENDPROC(aesni_ecb_enc)
11756
11757 /*
11758 * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
11759@@ -2369,7 +2390,9 @@ ENTRY(aesni_ecb_dec)
11760 popl KEYP
11761 popl LEN
11762 #endif
11763+ pax_force_retaddr 0, 1
11764 ret
11765+ENDPROC(aesni_ecb_dec)
11766
11767 /*
11768 * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
11769@@ -2410,7 +2433,9 @@ ENTRY(aesni_cbc_enc)
11770 popl LEN
11771 popl IVP
11772 #endif
11773+ pax_force_retaddr 0, 1
11774 ret
11775+ENDPROC(aesni_cbc_enc)
11776
11777 /*
11778 * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
11779@@ -2500,7 +2525,9 @@ ENTRY(aesni_cbc_dec)
11780 popl LEN
11781 popl IVP
11782 #endif
11783+ pax_force_retaddr 0, 1
11784 ret
11785+ENDPROC(aesni_cbc_dec)
11786
11787 #ifdef __x86_64__
11788 .align 16
11789@@ -2526,6 +2553,7 @@ _aesni_inc_init:
11790 mov $1, TCTR_LOW
11791 MOVQ_R64_XMM TCTR_LOW INC
11792 MOVQ_R64_XMM CTR TCTR_LOW
11793+ pax_force_retaddr_bts
11794 ret
11795
11796 /*
11797@@ -2554,6 +2582,7 @@ _aesni_inc:
11798 .Linc_low:
11799 movaps CTR, IV
11800 PSHUFB_XMM BSWAP_MASK IV
11801+ pax_force_retaddr_bts
11802 ret
11803
11804 /*
11805@@ -2614,5 +2643,7 @@ ENTRY(aesni_ctr_enc)
11806 .Lctr_enc_ret:
11807 movups IV, (IVP)
11808 .Lctr_enc_just_ret:
11809+ pax_force_retaddr 0, 1
11810 ret
11811+ENDPROC(aesni_ctr_enc)
11812 #endif
11813diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
11814index 391d245..67f35c2 100644
11815--- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
11816+++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
11817@@ -20,6 +20,8 @@
11818 *
11819 */
11820
11821+#include <asm/alternative-asm.h>
11822+
11823 .file "blowfish-x86_64-asm.S"
11824 .text
11825
11826@@ -151,9 +153,11 @@ __blowfish_enc_blk:
11827 jnz __enc_xor;
11828
11829 write_block();
11830+ pax_force_retaddr 0, 1
11831 ret;
11832 __enc_xor:
11833 xor_block();
11834+ pax_force_retaddr 0, 1
11835 ret;
11836
11837 .align 8
11838@@ -188,6 +192,7 @@ blowfish_dec_blk:
11839
11840 movq %r11, %rbp;
11841
11842+ pax_force_retaddr 0, 1
11843 ret;
11844
11845 /**********************************************************************
11846@@ -342,6 +347,7 @@ __blowfish_enc_blk_4way:
11847
11848 popq %rbx;
11849 popq %rbp;
11850+ pax_force_retaddr 0, 1
11851 ret;
11852
11853 __enc_xor4:
11854@@ -349,6 +355,7 @@ __enc_xor4:
11855
11856 popq %rbx;
11857 popq %rbp;
11858+ pax_force_retaddr 0, 1
11859 ret;
11860
11861 .align 8
11862@@ -386,5 +393,6 @@ blowfish_dec_blk_4way:
11863 popq %rbx;
11864 popq %rbp;
11865
11866+ pax_force_retaddr 0, 1
11867 ret;
11868
11869diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S
11870index 0b33743..7a56206 100644
11871--- a/arch/x86/crypto/camellia-x86_64-asm_64.S
11872+++ b/arch/x86/crypto/camellia-x86_64-asm_64.S
11873@@ -20,6 +20,8 @@
11874 *
11875 */
11876
11877+#include <asm/alternative-asm.h>
11878+
11879 .file "camellia-x86_64-asm_64.S"
11880 .text
11881
11882@@ -229,12 +231,14 @@ __enc_done:
11883 enc_outunpack(mov, RT1);
11884
11885 movq RRBP, %rbp;
11886+ pax_force_retaddr 0, 1
11887 ret;
11888
11889 __enc_xor:
11890 enc_outunpack(xor, RT1);
11891
11892 movq RRBP, %rbp;
11893+ pax_force_retaddr 0, 1
11894 ret;
11895
11896 .global camellia_dec_blk;
11897@@ -275,6 +279,7 @@ __dec_rounds16:
11898 dec_outunpack();
11899
11900 movq RRBP, %rbp;
11901+ pax_force_retaddr 0, 1
11902 ret;
11903
11904 /**********************************************************************
11905@@ -468,6 +473,7 @@ __enc2_done:
11906
11907 movq RRBP, %rbp;
11908 popq %rbx;
11909+ pax_force_retaddr 0, 1
11910 ret;
11911
11912 __enc2_xor:
11913@@ -475,6 +481,7 @@ __enc2_xor:
11914
11915 movq RRBP, %rbp;
11916 popq %rbx;
11917+ pax_force_retaddr 0, 1
11918 ret;
11919
11920 .global camellia_dec_blk_2way;
11921@@ -517,4 +524,5 @@ __dec2_rounds16:
11922
11923 movq RRBP, %rbp;
11924 movq RXOR, %rbx;
11925+ pax_force_retaddr 0, 1
11926 ret;
11927diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
11928index 15b00ac..2071784 100644
11929--- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
11930+++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
11931@@ -23,6 +23,8 @@
11932 *
11933 */
11934
11935+#include <asm/alternative-asm.h>
11936+
11937 .file "cast5-avx-x86_64-asm_64.S"
11938
11939 .extern cast_s1
11940@@ -281,6 +283,7 @@ __skip_enc:
11941 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
11942 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
11943
11944+ pax_force_retaddr 0, 1
11945 ret;
11946
11947 .align 16
11948@@ -353,6 +356,7 @@ __dec_tail:
11949 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
11950 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
11951
11952+ pax_force_retaddr 0, 1
11953 ret;
11954
11955 __skip_dec:
11956@@ -392,6 +396,7 @@ cast5_ecb_enc_16way:
11957 vmovdqu RR4, (6*4*4)(%r11);
11958 vmovdqu RL4, (7*4*4)(%r11);
11959
11960+ pax_force_retaddr
11961 ret;
11962
11963 .align 16
11964@@ -427,6 +432,7 @@ cast5_ecb_dec_16way:
11965 vmovdqu RR4, (6*4*4)(%r11);
11966 vmovdqu RL4, (7*4*4)(%r11);
11967
11968+ pax_force_retaddr
11969 ret;
11970
11971 .align 16
11972@@ -479,6 +485,7 @@ cast5_cbc_dec_16way:
11973
11974 popq %r12;
11975
11976+ pax_force_retaddr
11977 ret;
11978
11979 .align 16
11980@@ -555,4 +562,5 @@ cast5_ctr_16way:
11981
11982 popq %r12;
11983
11984+ pax_force_retaddr
11985 ret;
11986diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
11987index 2569d0d..637c289 100644
11988--- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
11989+++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
11990@@ -23,6 +23,8 @@
11991 *
11992 */
11993
11994+#include <asm/alternative-asm.h>
11995+
11996 #include "glue_helper-asm-avx.S"
11997
11998 .file "cast6-avx-x86_64-asm_64.S"
11999@@ -294,6 +296,7 @@ __cast6_enc_blk8:
12000 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
12001 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
12002
12003+ pax_force_retaddr 0, 1
12004 ret;
12005
12006 .align 8
12007@@ -340,6 +343,7 @@ __cast6_dec_blk8:
12008 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
12009 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
12010
12011+ pax_force_retaddr 0, 1
12012 ret;
12013
12014 .align 8
12015@@ -361,6 +365,7 @@ cast6_ecb_enc_8way:
12016
12017 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
12018
12019+ pax_force_retaddr
12020 ret;
12021
12022 .align 8
12023@@ -382,6 +387,7 @@ cast6_ecb_dec_8way:
12024
12025 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
12026
12027+ pax_force_retaddr
12028 ret;
12029
12030 .align 8
12031@@ -408,6 +414,7 @@ cast6_cbc_dec_8way:
12032
12033 popq %r12;
12034
12035+ pax_force_retaddr
12036 ret;
12037
12038 .align 8
12039@@ -436,4 +443,5 @@ cast6_ctr_8way:
12040
12041 popq %r12;
12042
12043+ pax_force_retaddr
12044 ret;
12045diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
12046index 6214a9b..1f4fc9a 100644
12047--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
12048+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
12049@@ -1,3 +1,5 @@
12050+#include <asm/alternative-asm.h>
12051+
12052 # enter ECRYPT_encrypt_bytes
12053 .text
12054 .p2align 5
12055@@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
12056 add %r11,%rsp
12057 mov %rdi,%rax
12058 mov %rsi,%rdx
12059+ pax_force_retaddr 0, 1
12060 ret
12061 # bytesatleast65:
12062 ._bytesatleast65:
12063@@ -891,6 +894,7 @@ ECRYPT_keysetup:
12064 add %r11,%rsp
12065 mov %rdi,%rax
12066 mov %rsi,%rdx
12067+ pax_force_retaddr
12068 ret
12069 # enter ECRYPT_ivsetup
12070 .text
12071@@ -917,4 +921,5 @@ ECRYPT_ivsetup:
12072 add %r11,%rsp
12073 mov %rdi,%rax
12074 mov %rsi,%rdx
12075+ pax_force_retaddr
12076 ret
12077diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
12078index 02b0e9f..cf4cf5c 100644
12079--- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
12080+++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
12081@@ -24,6 +24,8 @@
12082 *
12083 */
12084
12085+#include <asm/alternative-asm.h>
12086+
12087 #include "glue_helper-asm-avx.S"
12088
12089 .file "serpent-avx-x86_64-asm_64.S"
12090@@ -618,6 +620,7 @@ __serpent_enc_blk8_avx:
12091 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
12092 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
12093
12094+ pax_force_retaddr
12095 ret;
12096
12097 .align 8
12098@@ -673,6 +676,7 @@ __serpent_dec_blk8_avx:
12099 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
12100 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
12101
12102+ pax_force_retaddr
12103 ret;
12104
12105 .align 8
12106@@ -692,6 +696,7 @@ serpent_ecb_enc_8way_avx:
12107
12108 store_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
12109
12110+ pax_force_retaddr
12111 ret;
12112
12113 .align 8
12114@@ -711,6 +716,7 @@ serpent_ecb_dec_8way_avx:
12115
12116 store_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
12117
12118+ pax_force_retaddr
12119 ret;
12120
12121 .align 8
12122@@ -730,6 +736,7 @@ serpent_cbc_dec_8way_avx:
12123
12124 store_cbc_8way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
12125
12126+ pax_force_retaddr
12127 ret;
12128
12129 .align 8
12130@@ -751,4 +758,5 @@ serpent_ctr_8way_avx:
12131
12132 store_ctr_8way(%rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
12133
12134+ pax_force_retaddr
12135 ret;
12136diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
12137index 3ee1ff0..cbc568b 100644
12138--- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
12139+++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
12140@@ -24,6 +24,8 @@
12141 *
12142 */
12143
12144+#include <asm/alternative-asm.h>
12145+
12146 .file "serpent-sse2-x86_64-asm_64.S"
12147 .text
12148
12149@@ -692,12 +694,14 @@ __serpent_enc_blk_8way:
12150 write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
12151 write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
12152
12153+ pax_force_retaddr
12154 ret;
12155
12156 __enc_xor8:
12157 xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
12158 xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
12159
12160+ pax_force_retaddr
12161 ret;
12162
12163 .align 8
12164@@ -755,4 +759,5 @@ serpent_dec_blk_8way:
12165 write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
12166 write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
12167
12168+ pax_force_retaddr
12169 ret;
12170diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
12171index 49d6987..df66bd4 100644
12172--- a/arch/x86/crypto/sha1_ssse3_asm.S
12173+++ b/arch/x86/crypto/sha1_ssse3_asm.S
12174@@ -28,6 +28,8 @@
12175 * (at your option) any later version.
12176 */
12177
12178+#include <asm/alternative-asm.h>
12179+
12180 #define CTX %rdi // arg1
12181 #define BUF %rsi // arg2
12182 #define CNT %rdx // arg3
12183@@ -104,6 +106,7 @@
12184 pop %r12
12185 pop %rbp
12186 pop %rbx
12187+ pax_force_retaddr 0, 1
12188 ret
12189
12190 .size \name, .-\name
12191diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
12192index ebac16b..8092eb9 100644
12193--- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
12194+++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
12195@@ -23,6 +23,8 @@
12196 *
12197 */
12198
12199+#include <asm/alternative-asm.h>
12200+
12201 #include "glue_helper-asm-avx.S"
12202
12203 .file "twofish-avx-x86_64-asm_64.S"
12204@@ -283,6 +285,7 @@ __twofish_enc_blk8:
12205 outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
12206 outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
12207
12208+ pax_force_retaddr 0, 1
12209 ret;
12210
12211 .align 8
12212@@ -324,6 +327,7 @@ __twofish_dec_blk8:
12213 outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
12214 outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
12215
12216+ pax_force_retaddr 0, 1
12217 ret;
12218
12219 .align 8
12220@@ -345,6 +349,7 @@ twofish_ecb_enc_8way:
12221
12222 store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
12223
12224+ pax_force_retaddr 0, 1
12225 ret;
12226
12227 .align 8
12228@@ -366,6 +371,7 @@ twofish_ecb_dec_8way:
12229
12230 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
12231
12232+ pax_force_retaddr 0, 1
12233 ret;
12234
12235 .align 8
12236@@ -392,6 +398,7 @@ twofish_cbc_dec_8way:
12237
12238 popq %r12;
12239
12240+ pax_force_retaddr 0, 1
12241 ret;
12242
12243 .align 8
12244@@ -420,4 +427,5 @@ twofish_ctr_8way:
12245
12246 popq %r12;
12247
12248+ pax_force_retaddr 0, 1
12249 ret;
12250diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
12251index 5b012a2..36d5364 100644
12252--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
12253+++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
12254@@ -20,6 +20,8 @@
12255 *
12256 */
12257
12258+#include <asm/alternative-asm.h>
12259+
12260 .file "twofish-x86_64-asm-3way.S"
12261 .text
12262
12263@@ -260,6 +262,7 @@ __twofish_enc_blk_3way:
12264 popq %r13;
12265 popq %r14;
12266 popq %r15;
12267+ pax_force_retaddr 0, 1
12268 ret;
12269
12270 __enc_xor3:
12271@@ -271,6 +274,7 @@ __enc_xor3:
12272 popq %r13;
12273 popq %r14;
12274 popq %r15;
12275+ pax_force_retaddr 0, 1
12276 ret;
12277
12278 .global twofish_dec_blk_3way
12279@@ -312,5 +316,6 @@ twofish_dec_blk_3way:
12280 popq %r13;
12281 popq %r14;
12282 popq %r15;
12283+ pax_force_retaddr 0, 1
12284 ret;
12285
12286diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
12287index 7bcf3fc..f53832f 100644
12288--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
12289+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
12290@@ -21,6 +21,7 @@
12291 .text
12292
12293 #include <asm/asm-offsets.h>
12294+#include <asm/alternative-asm.h>
12295
12296 #define a_offset 0
12297 #define b_offset 4
12298@@ -268,6 +269,7 @@ twofish_enc_blk:
12299
12300 popq R1
12301 movq $1,%rax
12302+ pax_force_retaddr 0, 1
12303 ret
12304
12305 twofish_dec_blk:
12306@@ -319,4 +321,5 @@ twofish_dec_blk:
12307
12308 popq R1
12309 movq $1,%rax
12310+ pax_force_retaddr 0, 1
12311 ret
12312diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
12313index a703af1..f5b9c36 100644
12314--- a/arch/x86/ia32/ia32_aout.c
12315+++ b/arch/x86/ia32/ia32_aout.c
12316@@ -159,6 +159,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
12317 unsigned long dump_start, dump_size;
12318 struct user32 dump;
12319
12320+ memset(&dump, 0, sizeof(dump));
12321+
12322 fs = get_fs();
12323 set_fs(KERNEL_DS);
12324 has_dumped = 1;
12325diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
12326index a1daf4a..f8c4537 100644
12327--- a/arch/x86/ia32/ia32_signal.c
12328+++ b/arch/x86/ia32/ia32_signal.c
12329@@ -348,7 +348,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
12330 sp -= frame_size;
12331 /* Align the stack pointer according to the i386 ABI,
12332 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
12333- sp = ((sp + 4) & -16ul) - 4;
12334+ sp = ((sp - 12) & -16ul) - 4;
12335 return (void __user *) sp;
12336 }
12337
12338@@ -406,7 +406,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
12339 * These are actually not used anymore, but left because some
12340 * gdb versions depend on them as a marker.
12341 */
12342- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
12343+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
12344 } put_user_catch(err);
12345
12346 if (err)
12347@@ -448,7 +448,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
12348 0xb8,
12349 __NR_ia32_rt_sigreturn,
12350 0x80cd,
12351- 0,
12352+ 0
12353 };
12354
12355 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
12356@@ -471,16 +471,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
12357
12358 if (ka->sa.sa_flags & SA_RESTORER)
12359 restorer = ka->sa.sa_restorer;
12360+ else if (current->mm->context.vdso)
12361+ /* Return stub is in 32bit vsyscall page */
12362+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
12363 else
12364- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
12365- rt_sigreturn);
12366+ restorer = &frame->retcode;
12367 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
12368
12369 /*
12370 * Not actually used anymore, but left because some gdb
12371 * versions need it.
12372 */
12373- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
12374+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
12375 } put_user_catch(err);
12376
12377 err |= copy_siginfo_to_user32(&frame->info, info);
12378diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
12379index 142c4ce..19b683f 100644
12380--- a/arch/x86/ia32/ia32entry.S
12381+++ b/arch/x86/ia32/ia32entry.S
12382@@ -15,8 +15,10 @@
12383 #include <asm/irqflags.h>
12384 #include <asm/asm.h>
12385 #include <asm/smap.h>
12386+#include <asm/pgtable.h>
12387 #include <linux/linkage.h>
12388 #include <linux/err.h>
12389+#include <asm/alternative-asm.h>
12390
12391 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
12392 #include <linux/elf-em.h>
12393@@ -96,6 +98,32 @@ ENTRY(native_irq_enable_sysexit)
12394 ENDPROC(native_irq_enable_sysexit)
12395 #endif
12396
12397+ .macro pax_enter_kernel_user
12398+ pax_set_fptr_mask
12399+#ifdef CONFIG_PAX_MEMORY_UDEREF
12400+ call pax_enter_kernel_user
12401+#endif
12402+ .endm
12403+
12404+ .macro pax_exit_kernel_user
12405+#ifdef CONFIG_PAX_MEMORY_UDEREF
12406+ call pax_exit_kernel_user
12407+#endif
12408+#ifdef CONFIG_PAX_RANDKSTACK
12409+ pushq %rax
12410+ pushq %r11
12411+ call pax_randomize_kstack
12412+ popq %r11
12413+ popq %rax
12414+#endif
12415+ .endm
12416+
12417+.macro pax_erase_kstack
12418+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12419+ call pax_erase_kstack
12420+#endif
12421+.endm
12422+
12423 /*
12424 * 32bit SYSENTER instruction entry.
12425 *
12426@@ -122,12 +150,6 @@ ENTRY(ia32_sysenter_target)
12427 CFI_REGISTER rsp,rbp
12428 SWAPGS_UNSAFE_STACK
12429 movq PER_CPU_VAR(kernel_stack), %rsp
12430- addq $(KERNEL_STACK_OFFSET),%rsp
12431- /*
12432- * No need to follow this irqs on/off section: the syscall
12433- * disabled irqs, here we enable it straight after entry:
12434- */
12435- ENABLE_INTERRUPTS(CLBR_NONE)
12436 movl %ebp,%ebp /* zero extension */
12437 pushq_cfi $__USER32_DS
12438 /*CFI_REL_OFFSET ss,0*/
12439@@ -135,24 +157,44 @@ ENTRY(ia32_sysenter_target)
12440 CFI_REL_OFFSET rsp,0
12441 pushfq_cfi
12442 /*CFI_REL_OFFSET rflags,0*/
12443- movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
12444- CFI_REGISTER rip,r10
12445+ orl $X86_EFLAGS_IF,(%rsp)
12446+ GET_THREAD_INFO(%r11)
12447+ movl TI_sysenter_return(%r11), %r11d
12448+ CFI_REGISTER rip,r11
12449 pushq_cfi $__USER32_CS
12450 /*CFI_REL_OFFSET cs,0*/
12451 movl %eax, %eax
12452- pushq_cfi %r10
12453+ pushq_cfi %r11
12454 CFI_REL_OFFSET rip,0
12455 pushq_cfi %rax
12456 cld
12457 SAVE_ARGS 0,1,0
12458+ pax_enter_kernel_user
12459+
12460+#ifdef CONFIG_PAX_RANDKSTACK
12461+ pax_erase_kstack
12462+#endif
12463+
12464+ /*
12465+ * No need to follow this irqs on/off section: the syscall
12466+ * disabled irqs, here we enable it straight after entry:
12467+ */
12468+ ENABLE_INTERRUPTS(CLBR_NONE)
12469 /* no need to do an access_ok check here because rbp has been
12470 32bit zero extended */
12471+
12472+#ifdef CONFIG_PAX_MEMORY_UDEREF
12473+ mov $PAX_USER_SHADOW_BASE,%r11
12474+ add %r11,%rbp
12475+#endif
12476+
12477 ASM_STAC
12478 1: movl (%rbp),%ebp
12479 _ASM_EXTABLE(1b,ia32_badarg)
12480 ASM_CLAC
12481- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
12482- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
12483+ GET_THREAD_INFO(%r11)
12484+ orl $TS_COMPAT,TI_status(%r11)
12485+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
12486 CFI_REMEMBER_STATE
12487 jnz sysenter_tracesys
12488 cmpq $(IA32_NR_syscalls-1),%rax
12489@@ -162,12 +204,15 @@ sysenter_do_call:
12490 sysenter_dispatch:
12491 call *ia32_sys_call_table(,%rax,8)
12492 movq %rax,RAX-ARGOFFSET(%rsp)
12493+ GET_THREAD_INFO(%r11)
12494 DISABLE_INTERRUPTS(CLBR_NONE)
12495 TRACE_IRQS_OFF
12496- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
12497+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
12498 jnz sysexit_audit
12499 sysexit_from_sys_call:
12500- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
12501+ pax_exit_kernel_user
12502+ pax_erase_kstack
12503+ andl $~TS_COMPAT,TI_status(%r11)
12504 /* clear IF, that popfq doesn't enable interrupts early */
12505 andl $~0x200,EFLAGS-R11(%rsp)
12506 movl RIP-R11(%rsp),%edx /* User %eip */
12507@@ -193,6 +238,9 @@ sysexit_from_sys_call:
12508 movl %eax,%esi /* 2nd arg: syscall number */
12509 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
12510 call __audit_syscall_entry
12511+
12512+ pax_erase_kstack
12513+
12514 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
12515 cmpq $(IA32_NR_syscalls-1),%rax
12516 ja ia32_badsys
12517@@ -204,7 +252,7 @@ sysexit_from_sys_call:
12518 .endm
12519
12520 .macro auditsys_exit exit
12521- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
12522+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
12523 jnz ia32_ret_from_sys_call
12524 TRACE_IRQS_ON
12525 ENABLE_INTERRUPTS(CLBR_NONE)
12526@@ -215,11 +263,12 @@ sysexit_from_sys_call:
12527 1: setbe %al /* 1 if error, 0 if not */
12528 movzbl %al,%edi /* zero-extend that into %edi */
12529 call __audit_syscall_exit
12530+ GET_THREAD_INFO(%r11)
12531 movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
12532 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
12533 DISABLE_INTERRUPTS(CLBR_NONE)
12534 TRACE_IRQS_OFF
12535- testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
12536+ testl %edi,TI_flags(%r11)
12537 jz \exit
12538 CLEAR_RREGS -ARGOFFSET
12539 jmp int_with_check
12540@@ -237,7 +286,7 @@ sysexit_audit:
12541
12542 sysenter_tracesys:
12543 #ifdef CONFIG_AUDITSYSCALL
12544- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
12545+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
12546 jz sysenter_auditsys
12547 #endif
12548 SAVE_REST
12549@@ -249,6 +298,9 @@ sysenter_tracesys:
12550 RESTORE_REST
12551 cmpq $(IA32_NR_syscalls-1),%rax
12552 ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
12553+
12554+ pax_erase_kstack
12555+
12556 jmp sysenter_do_call
12557 CFI_ENDPROC
12558 ENDPROC(ia32_sysenter_target)
12559@@ -276,19 +328,25 @@ ENDPROC(ia32_sysenter_target)
12560 ENTRY(ia32_cstar_target)
12561 CFI_STARTPROC32 simple
12562 CFI_SIGNAL_FRAME
12563- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
12564+ CFI_DEF_CFA rsp,0
12565 CFI_REGISTER rip,rcx
12566 /*CFI_REGISTER rflags,r11*/
12567 SWAPGS_UNSAFE_STACK
12568 movl %esp,%r8d
12569 CFI_REGISTER rsp,r8
12570 movq PER_CPU_VAR(kernel_stack),%rsp
12571+ SAVE_ARGS 8*6,0,0
12572+ pax_enter_kernel_user
12573+
12574+#ifdef CONFIG_PAX_RANDKSTACK
12575+ pax_erase_kstack
12576+#endif
12577+
12578 /*
12579 * No need to follow this irqs on/off section: the syscall
12580 * disabled irqs and here we enable it straight after entry:
12581 */
12582 ENABLE_INTERRUPTS(CLBR_NONE)
12583- SAVE_ARGS 8,0,0
12584 movl %eax,%eax /* zero extension */
12585 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
12586 movq %rcx,RIP-ARGOFFSET(%rsp)
12587@@ -304,12 +362,19 @@ ENTRY(ia32_cstar_target)
12588 /* no need to do an access_ok check here because r8 has been
12589 32bit zero extended */
12590 /* hardware stack frame is complete now */
12591+
12592+#ifdef CONFIG_PAX_MEMORY_UDEREF
12593+ mov $PAX_USER_SHADOW_BASE,%r11
12594+ add %r11,%r8
12595+#endif
12596+
12597 ASM_STAC
12598 1: movl (%r8),%r9d
12599 _ASM_EXTABLE(1b,ia32_badarg)
12600 ASM_CLAC
12601- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
12602- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
12603+ GET_THREAD_INFO(%r11)
12604+ orl $TS_COMPAT,TI_status(%r11)
12605+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
12606 CFI_REMEMBER_STATE
12607 jnz cstar_tracesys
12608 cmpq $IA32_NR_syscalls-1,%rax
12609@@ -319,12 +384,15 @@ cstar_do_call:
12610 cstar_dispatch:
12611 call *ia32_sys_call_table(,%rax,8)
12612 movq %rax,RAX-ARGOFFSET(%rsp)
12613+ GET_THREAD_INFO(%r11)
12614 DISABLE_INTERRUPTS(CLBR_NONE)
12615 TRACE_IRQS_OFF
12616- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
12617+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
12618 jnz sysretl_audit
12619 sysretl_from_sys_call:
12620- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
12621+ pax_exit_kernel_user
12622+ pax_erase_kstack
12623+ andl $~TS_COMPAT,TI_status(%r11)
12624 RESTORE_ARGS 0,-ARG_SKIP,0,0,0
12625 movl RIP-ARGOFFSET(%rsp),%ecx
12626 CFI_REGISTER rip,rcx
12627@@ -352,7 +420,7 @@ sysretl_audit:
12628
12629 cstar_tracesys:
12630 #ifdef CONFIG_AUDITSYSCALL
12631- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
12632+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
12633 jz cstar_auditsys
12634 #endif
12635 xchgl %r9d,%ebp
12636@@ -366,6 +434,9 @@ cstar_tracesys:
12637 xchgl %ebp,%r9d
12638 cmpq $(IA32_NR_syscalls-1),%rax
12639 ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
12640+
12641+ pax_erase_kstack
12642+
12643 jmp cstar_do_call
12644 END(ia32_cstar_target)
12645
12646@@ -407,19 +478,26 @@ ENTRY(ia32_syscall)
12647 CFI_REL_OFFSET rip,RIP-RIP
12648 PARAVIRT_ADJUST_EXCEPTION_FRAME
12649 SWAPGS
12650- /*
12651- * No need to follow this irqs on/off section: the syscall
12652- * disabled irqs and here we enable it straight after entry:
12653- */
12654- ENABLE_INTERRUPTS(CLBR_NONE)
12655 movl %eax,%eax
12656 pushq_cfi %rax
12657 cld
12658 /* note the registers are not zero extended to the sf.
12659 this could be a problem. */
12660 SAVE_ARGS 0,1,0
12661- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
12662- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
12663+ pax_enter_kernel_user
12664+
12665+#ifdef CONFIG_PAX_RANDKSTACK
12666+ pax_erase_kstack
12667+#endif
12668+
12669+ /*
12670+ * No need to follow this irqs on/off section: the syscall
12671+ * disabled irqs and here we enable it straight after entry:
12672+ */
12673+ ENABLE_INTERRUPTS(CLBR_NONE)
12674+ GET_THREAD_INFO(%r11)
12675+ orl $TS_COMPAT,TI_status(%r11)
12676+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
12677 jnz ia32_tracesys
12678 cmpq $(IA32_NR_syscalls-1),%rax
12679 ja ia32_badsys
12680@@ -442,6 +520,9 @@ ia32_tracesys:
12681 RESTORE_REST
12682 cmpq $(IA32_NR_syscalls-1),%rax
12683 ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
12684+
12685+ pax_erase_kstack
12686+
12687 jmp ia32_do_call
12688 END(ia32_syscall)
12689
12690diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
12691index d0b689b..6811ddc 100644
12692--- a/arch/x86/ia32/sys_ia32.c
12693+++ b/arch/x86/ia32/sys_ia32.c
12694@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
12695 */
12696 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
12697 {
12698- typeof(ubuf->st_uid) uid = 0;
12699- typeof(ubuf->st_gid) gid = 0;
12700+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
12701+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
12702 SET_UID(uid, from_kuid_munged(current_user_ns(), stat->uid));
12703 SET_GID(gid, from_kgid_munged(current_user_ns(), stat->gid));
12704 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
12705@@ -303,7 +303,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
12706 mm_segment_t old_fs = get_fs();
12707
12708 set_fs(KERNEL_DS);
12709- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
12710+ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
12711 set_fs(old_fs);
12712 if (put_compat_timespec(&t, interval))
12713 return -EFAULT;
12714@@ -313,13 +313,13 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
12715 asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
12716 compat_size_t sigsetsize)
12717 {
12718- sigset_t s;
12719+ sigset_t s = { };
12720 compat_sigset_t s32;
12721 int ret;
12722 mm_segment_t old_fs = get_fs();
12723
12724 set_fs(KERNEL_DS);
12725- ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
12726+ ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
12727 set_fs(old_fs);
12728 if (!ret) {
12729 switch (_NSIG_WORDS) {
12730@@ -344,7 +344,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
12731 if (copy_siginfo_from_user32(&info, uinfo))
12732 return -EFAULT;
12733 set_fs(KERNEL_DS);
12734- ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
12735+ ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
12736 set_fs(old_fs);
12737 return ret;
12738 }
12739@@ -376,7 +376,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
12740 return -EFAULT;
12741
12742 set_fs(KERNEL_DS);
12743- ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
12744+ ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
12745 count);
12746 set_fs(old_fs);
12747
12748diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
12749index 372231c..a5aa1a1 100644
12750--- a/arch/x86/include/asm/alternative-asm.h
12751+++ b/arch/x86/include/asm/alternative-asm.h
12752@@ -18,6 +18,45 @@
12753 .endm
12754 #endif
12755
12756+#ifdef KERNEXEC_PLUGIN
12757+ .macro pax_force_retaddr_bts rip=0
12758+ btsq $63,\rip(%rsp)
12759+ .endm
12760+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
12761+ .macro pax_force_retaddr rip=0, reload=0
12762+ btsq $63,\rip(%rsp)
12763+ .endm
12764+ .macro pax_force_fptr ptr
12765+ btsq $63,\ptr
12766+ .endm
12767+ .macro pax_set_fptr_mask
12768+ .endm
12769+#endif
12770+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
12771+ .macro pax_force_retaddr rip=0, reload=0
12772+ .if \reload
12773+ pax_set_fptr_mask
12774+ .endif
12775+ orq %r10,\rip(%rsp)
12776+ .endm
12777+ .macro pax_force_fptr ptr
12778+ orq %r10,\ptr
12779+ .endm
12780+ .macro pax_set_fptr_mask
12781+ movabs $0x8000000000000000,%r10
12782+ .endm
12783+#endif
12784+#else
12785+ .macro pax_force_retaddr rip=0, reload=0
12786+ .endm
12787+ .macro pax_force_fptr ptr
12788+ .endm
12789+ .macro pax_force_retaddr_bts rip=0
12790+ .endm
12791+ .macro pax_set_fptr_mask
12792+ .endm
12793+#endif
12794+
12795 .macro altinstruction_entry orig alt feature orig_len alt_len
12796 .long \orig - .
12797 .long \alt - .
12798diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
12799index 58ed6d9..f1cbe58 100644
12800--- a/arch/x86/include/asm/alternative.h
12801+++ b/arch/x86/include/asm/alternative.h
12802@@ -105,7 +105,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
12803 ".pushsection .discard,\"aw\",@progbits\n" \
12804 DISCARD_ENTRY(1) \
12805 ".popsection\n" \
12806- ".pushsection .altinstr_replacement, \"ax\"\n" \
12807+ ".pushsection .altinstr_replacement, \"a\"\n" \
12808 ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
12809 ".popsection"
12810
12811@@ -119,7 +119,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
12812 DISCARD_ENTRY(1) \
12813 DISCARD_ENTRY(2) \
12814 ".popsection\n" \
12815- ".pushsection .altinstr_replacement, \"ax\"\n" \
12816+ ".pushsection .altinstr_replacement, \"a\"\n" \
12817 ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \
12818 ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
12819 ".popsection"
12820diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
12821index 3388034..050f0b9 100644
12822--- a/arch/x86/include/asm/apic.h
12823+++ b/arch/x86/include/asm/apic.h
12824@@ -44,7 +44,7 @@ static inline void generic_apic_probe(void)
12825
12826 #ifdef CONFIG_X86_LOCAL_APIC
12827
12828-extern unsigned int apic_verbosity;
12829+extern int apic_verbosity;
12830 extern int local_apic_timer_c2_ok;
12831
12832 extern int disable_apic;
12833diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
12834index 20370c6..a2eb9b0 100644
12835--- a/arch/x86/include/asm/apm.h
12836+++ b/arch/x86/include/asm/apm.h
12837@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
12838 __asm__ __volatile__(APM_DO_ZERO_SEGS
12839 "pushl %%edi\n\t"
12840 "pushl %%ebp\n\t"
12841- "lcall *%%cs:apm_bios_entry\n\t"
12842+ "lcall *%%ss:apm_bios_entry\n\t"
12843 "setc %%al\n\t"
12844 "popl %%ebp\n\t"
12845 "popl %%edi\n\t"
12846@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
12847 __asm__ __volatile__(APM_DO_ZERO_SEGS
12848 "pushl %%edi\n\t"
12849 "pushl %%ebp\n\t"
12850- "lcall *%%cs:apm_bios_entry\n\t"
12851+ "lcall *%%ss:apm_bios_entry\n\t"
12852 "setc %%bl\n\t"
12853 "popl %%ebp\n\t"
12854 "popl %%edi\n\t"
12855diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
12856index 722aa3b..3a0bb27 100644
12857--- a/arch/x86/include/asm/atomic.h
12858+++ b/arch/x86/include/asm/atomic.h
12859@@ -22,7 +22,18 @@
12860 */
12861 static inline int atomic_read(const atomic_t *v)
12862 {
12863- return (*(volatile int *)&(v)->counter);
12864+ return (*(volatile const int *)&(v)->counter);
12865+}
12866+
12867+/**
12868+ * atomic_read_unchecked - read atomic variable
12869+ * @v: pointer of type atomic_unchecked_t
12870+ *
12871+ * Atomically reads the value of @v.
12872+ */
12873+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
12874+{
12875+ return (*(volatile const int *)&(v)->counter);
12876 }
12877
12878 /**
12879@@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *v, int i)
12880 }
12881
12882 /**
12883+ * atomic_set_unchecked - set atomic variable
12884+ * @v: pointer of type atomic_unchecked_t
12885+ * @i: required value
12886+ *
12887+ * Atomically sets the value of @v to @i.
12888+ */
12889+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
12890+{
12891+ v->counter = i;
12892+}
12893+
12894+/**
12895 * atomic_add - add integer to atomic variable
12896 * @i: integer value to add
12897 * @v: pointer of type atomic_t
12898@@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *v, int i)
12899 */
12900 static inline void atomic_add(int i, atomic_t *v)
12901 {
12902- asm volatile(LOCK_PREFIX "addl %1,%0"
12903+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
12904+
12905+#ifdef CONFIG_PAX_REFCOUNT
12906+ "jno 0f\n"
12907+ LOCK_PREFIX "subl %1,%0\n"
12908+ "int $4\n0:\n"
12909+ _ASM_EXTABLE(0b, 0b)
12910+#endif
12911+
12912+ : "+m" (v->counter)
12913+ : "ir" (i));
12914+}
12915+
12916+/**
12917+ * atomic_add_unchecked - add integer to atomic variable
12918+ * @i: integer value to add
12919+ * @v: pointer of type atomic_unchecked_t
12920+ *
12921+ * Atomically adds @i to @v.
12922+ */
12923+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
12924+{
12925+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
12926 : "+m" (v->counter)
12927 : "ir" (i));
12928 }
12929@@ -60,7 +105,29 @@ static inline void atomic_add(int i, atomic_t *v)
12930 */
12931 static inline void atomic_sub(int i, atomic_t *v)
12932 {
12933- asm volatile(LOCK_PREFIX "subl %1,%0"
12934+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
12935+
12936+#ifdef CONFIG_PAX_REFCOUNT
12937+ "jno 0f\n"
12938+ LOCK_PREFIX "addl %1,%0\n"
12939+ "int $4\n0:\n"
12940+ _ASM_EXTABLE(0b, 0b)
12941+#endif
12942+
12943+ : "+m" (v->counter)
12944+ : "ir" (i));
12945+}
12946+
12947+/**
12948+ * atomic_sub_unchecked - subtract integer from atomic variable
12949+ * @i: integer value to subtract
12950+ * @v: pointer of type atomic_unchecked_t
12951+ *
12952+ * Atomically subtracts @i from @v.
12953+ */
12954+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
12955+{
12956+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
12957 : "+m" (v->counter)
12958 : "ir" (i));
12959 }
12960@@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
12961 {
12962 unsigned char c;
12963
12964- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
12965+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
12966+
12967+#ifdef CONFIG_PAX_REFCOUNT
12968+ "jno 0f\n"
12969+ LOCK_PREFIX "addl %2,%0\n"
12970+ "int $4\n0:\n"
12971+ _ASM_EXTABLE(0b, 0b)
12972+#endif
12973+
12974+ "sete %1\n"
12975 : "+m" (v->counter), "=qm" (c)
12976 : "ir" (i) : "memory");
12977 return c;
12978@@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
12979 */
12980 static inline void atomic_inc(atomic_t *v)
12981 {
12982- asm volatile(LOCK_PREFIX "incl %0"
12983+ asm volatile(LOCK_PREFIX "incl %0\n"
12984+
12985+#ifdef CONFIG_PAX_REFCOUNT
12986+ "jno 0f\n"
12987+ LOCK_PREFIX "decl %0\n"
12988+ "int $4\n0:\n"
12989+ _ASM_EXTABLE(0b, 0b)
12990+#endif
12991+
12992+ : "+m" (v->counter));
12993+}
12994+
12995+/**
12996+ * atomic_inc_unchecked - increment atomic variable
12997+ * @v: pointer of type atomic_unchecked_t
12998+ *
12999+ * Atomically increments @v by 1.
13000+ */
13001+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
13002+{
13003+ asm volatile(LOCK_PREFIX "incl %0\n"
13004 : "+m" (v->counter));
13005 }
13006
13007@@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *v)
13008 */
13009 static inline void atomic_dec(atomic_t *v)
13010 {
13011- asm volatile(LOCK_PREFIX "decl %0"
13012+ asm volatile(LOCK_PREFIX "decl %0\n"
13013+
13014+#ifdef CONFIG_PAX_REFCOUNT
13015+ "jno 0f\n"
13016+ LOCK_PREFIX "incl %0\n"
13017+ "int $4\n0:\n"
13018+ _ASM_EXTABLE(0b, 0b)
13019+#endif
13020+
13021+ : "+m" (v->counter));
13022+}
13023+
13024+/**
13025+ * atomic_dec_unchecked - decrement atomic variable
13026+ * @v: pointer of type atomic_unchecked_t
13027+ *
13028+ * Atomically decrements @v by 1.
13029+ */
13030+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
13031+{
13032+ asm volatile(LOCK_PREFIX "decl %0\n"
13033 : "+m" (v->counter));
13034 }
13035
13036@@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
13037 {
13038 unsigned char c;
13039
13040- asm volatile(LOCK_PREFIX "decl %0; sete %1"
13041+ asm volatile(LOCK_PREFIX "decl %0\n"
13042+
13043+#ifdef CONFIG_PAX_REFCOUNT
13044+ "jno 0f\n"
13045+ LOCK_PREFIX "incl %0\n"
13046+ "int $4\n0:\n"
13047+ _ASM_EXTABLE(0b, 0b)
13048+#endif
13049+
13050+ "sete %1\n"
13051 : "+m" (v->counter), "=qm" (c)
13052 : : "memory");
13053 return c != 0;
13054@@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
13055 {
13056 unsigned char c;
13057
13058- asm volatile(LOCK_PREFIX "incl %0; sete %1"
13059+ asm volatile(LOCK_PREFIX "incl %0\n"
13060+
13061+#ifdef CONFIG_PAX_REFCOUNT
13062+ "jno 0f\n"
13063+ LOCK_PREFIX "decl %0\n"
13064+ "int $4\n0:\n"
13065+ _ASM_EXTABLE(0b, 0b)
13066+#endif
13067+
13068+ "sete %1\n"
13069+ : "+m" (v->counter), "=qm" (c)
13070+ : : "memory");
13071+ return c != 0;
13072+}
13073+
13074+/**
13075+ * atomic_inc_and_test_unchecked - increment and test
13076+ * @v: pointer of type atomic_unchecked_t
13077+ *
13078+ * Atomically increments @v by 1
13079+ * and returns true if the result is zero, or false for all
13080+ * other cases.
13081+ */
13082+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
13083+{
13084+ unsigned char c;
13085+
13086+ asm volatile(LOCK_PREFIX "incl %0\n"
13087+ "sete %1\n"
13088 : "+m" (v->counter), "=qm" (c)
13089 : : "memory");
13090 return c != 0;
13091@@ -157,7 +310,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
13092 {
13093 unsigned char c;
13094
13095- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
13096+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
13097+
13098+#ifdef CONFIG_PAX_REFCOUNT
13099+ "jno 0f\n"
13100+ LOCK_PREFIX "subl %2,%0\n"
13101+ "int $4\n0:\n"
13102+ _ASM_EXTABLE(0b, 0b)
13103+#endif
13104+
13105+ "sets %1\n"
13106 : "+m" (v->counter), "=qm" (c)
13107 : "ir" (i) : "memory");
13108 return c;
13109@@ -172,6 +334,18 @@ static inline int atomic_add_negative(int i, atomic_t *v)
13110 */
13111 static inline int atomic_add_return(int i, atomic_t *v)
13112 {
13113+ return i + xadd_check_overflow(&v->counter, i);
13114+}
13115+
13116+/**
13117+ * atomic_add_return_unchecked - add integer and return
13118+ * @i: integer value to add
13119+ * @v: pointer of type atomic_unchecked_t
13120+ *
13121+ * Atomically adds @i to @v and returns @i + @v
13122+ */
13123+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
13124+{
13125 return i + xadd(&v->counter, i);
13126 }
13127
13128@@ -188,6 +362,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
13129 }
13130
13131 #define atomic_inc_return(v) (atomic_add_return(1, v))
13132+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
13133+{
13134+ return atomic_add_return_unchecked(1, v);
13135+}
13136 #define atomic_dec_return(v) (atomic_sub_return(1, v))
13137
13138 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
13139@@ -195,11 +373,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
13140 return cmpxchg(&v->counter, old, new);
13141 }
13142
13143+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
13144+{
13145+ return cmpxchg(&v->counter, old, new);
13146+}
13147+
13148 static inline int atomic_xchg(atomic_t *v, int new)
13149 {
13150 return xchg(&v->counter, new);
13151 }
13152
13153+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
13154+{
13155+ return xchg(&v->counter, new);
13156+}
13157+
13158 /**
13159 * __atomic_add_unless - add unless the number is already a given value
13160 * @v: pointer of type atomic_t
13161@@ -211,12 +399,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
13162 */
13163 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
13164 {
13165- int c, old;
13166+ int c, old, new;
13167 c = atomic_read(v);
13168 for (;;) {
13169- if (unlikely(c == (u)))
13170+ if (unlikely(c == u))
13171 break;
13172- old = atomic_cmpxchg((v), c, c + (a));
13173+
13174+ asm volatile("addl %2,%0\n"
13175+
13176+#ifdef CONFIG_PAX_REFCOUNT
13177+ "jno 0f\n"
13178+ "subl %2,%0\n"
13179+ "int $4\n0:\n"
13180+ _ASM_EXTABLE(0b, 0b)
13181+#endif
13182+
13183+ : "=r" (new)
13184+ : "0" (c), "ir" (a));
13185+
13186+ old = atomic_cmpxchg(v, c, new);
13187 if (likely(old == c))
13188 break;
13189 c = old;
13190@@ -225,6 +426,49 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
13191 }
13192
13193 /**
13194+ * atomic_inc_not_zero_hint - increment if not null
13195+ * @v: pointer of type atomic_t
13196+ * @hint: probable value of the atomic before the increment
13197+ *
13198+ * This version of atomic_inc_not_zero() gives a hint of probable
13199+ * value of the atomic. This helps processor to not read the memory
13200+ * before doing the atomic read/modify/write cycle, lowering
13201+ * number of bus transactions on some arches.
13202+ *
13203+ * Returns: 0 if increment was not done, 1 otherwise.
13204+ */
13205+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
13206+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
13207+{
13208+ int val, c = hint, new;
13209+
13210+ /* sanity test, should be removed by compiler if hint is a constant */
13211+ if (!hint)
13212+ return __atomic_add_unless(v, 1, 0);
13213+
13214+ do {
13215+ asm volatile("incl %0\n"
13216+
13217+#ifdef CONFIG_PAX_REFCOUNT
13218+ "jno 0f\n"
13219+ "decl %0\n"
13220+ "int $4\n0:\n"
13221+ _ASM_EXTABLE(0b, 0b)
13222+#endif
13223+
13224+ : "=r" (new)
13225+ : "0" (c));
13226+
13227+ val = atomic_cmpxchg(v, c, new);
13228+ if (val == c)
13229+ return 1;
13230+ c = val;
13231+ } while (c);
13232+
13233+ return 0;
13234+}
13235+
13236+/**
13237 * atomic_inc_short - increment of a short integer
13238 * @v: pointer to type int
13239 *
13240@@ -253,14 +497,37 @@ static inline void atomic_or_long(unsigned long *v1, unsigned long v2)
13241 #endif
13242
13243 /* These are x86-specific, used by some header files */
13244-#define atomic_clear_mask(mask, addr) \
13245- asm volatile(LOCK_PREFIX "andl %0,%1" \
13246- : : "r" (~(mask)), "m" (*(addr)) : "memory")
13247+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
13248+{
13249+ asm volatile(LOCK_PREFIX "andl %1,%0"
13250+ : "+m" (v->counter)
13251+ : "r" (~(mask))
13252+ : "memory");
13253+}
13254
13255-#define atomic_set_mask(mask, addr) \
13256- asm volatile(LOCK_PREFIX "orl %0,%1" \
13257- : : "r" ((unsigned)(mask)), "m" (*(addr)) \
13258- : "memory")
13259+static inline void atomic_clear_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
13260+{
13261+ asm volatile(LOCK_PREFIX "andl %1,%0"
13262+ : "+m" (v->counter)
13263+ : "r" (~(mask))
13264+ : "memory");
13265+}
13266+
13267+static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
13268+{
13269+ asm volatile(LOCK_PREFIX "orl %1,%0"
13270+ : "+m" (v->counter)
13271+ : "r" (mask)
13272+ : "memory");
13273+}
13274+
13275+static inline void atomic_set_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
13276+{
13277+ asm volatile(LOCK_PREFIX "orl %1,%0"
13278+ : "+m" (v->counter)
13279+ : "r" (mask)
13280+ : "memory");
13281+}
13282
13283 /* Atomic operations are already serializing on x86 */
13284 #define smp_mb__before_atomic_dec() barrier()
13285diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
13286index b154de7..aadebd8 100644
13287--- a/arch/x86/include/asm/atomic64_32.h
13288+++ b/arch/x86/include/asm/atomic64_32.h
13289@@ -12,6 +12,14 @@ typedef struct {
13290 u64 __aligned(8) counter;
13291 } atomic64_t;
13292
13293+#ifdef CONFIG_PAX_REFCOUNT
13294+typedef struct {
13295+ u64 __aligned(8) counter;
13296+} atomic64_unchecked_t;
13297+#else
13298+typedef atomic64_t atomic64_unchecked_t;
13299+#endif
13300+
13301 #define ATOMIC64_INIT(val) { (val) }
13302
13303 #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
13304@@ -37,21 +45,31 @@ typedef struct {
13305 ATOMIC64_DECL_ONE(sym##_386)
13306
13307 ATOMIC64_DECL_ONE(add_386);
13308+ATOMIC64_DECL_ONE(add_unchecked_386);
13309 ATOMIC64_DECL_ONE(sub_386);
13310+ATOMIC64_DECL_ONE(sub_unchecked_386);
13311 ATOMIC64_DECL_ONE(inc_386);
13312+ATOMIC64_DECL_ONE(inc_unchecked_386);
13313 ATOMIC64_DECL_ONE(dec_386);
13314+ATOMIC64_DECL_ONE(dec_unchecked_386);
13315 #endif
13316
13317 #define alternative_atomic64(f, out, in...) \
13318 __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
13319
13320 ATOMIC64_DECL(read);
13321+ATOMIC64_DECL(read_unchecked);
13322 ATOMIC64_DECL(set);
13323+ATOMIC64_DECL(set_unchecked);
13324 ATOMIC64_DECL(xchg);
13325 ATOMIC64_DECL(add_return);
13326+ATOMIC64_DECL(add_return_unchecked);
13327 ATOMIC64_DECL(sub_return);
13328+ATOMIC64_DECL(sub_return_unchecked);
13329 ATOMIC64_DECL(inc_return);
13330+ATOMIC64_DECL(inc_return_unchecked);
13331 ATOMIC64_DECL(dec_return);
13332+ATOMIC64_DECL(dec_return_unchecked);
13333 ATOMIC64_DECL(dec_if_positive);
13334 ATOMIC64_DECL(inc_not_zero);
13335 ATOMIC64_DECL(add_unless);
13336@@ -77,6 +95,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
13337 }
13338
13339 /**
13340+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
13341+ * @p: pointer to type atomic64_unchecked_t
13342+ * @o: expected value
13343+ * @n: new value
13344+ *
13345+ * Atomically sets @v to @n if it was equal to @o and returns
13346+ * the old value.
13347+ */
13348+
13349+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
13350+{
13351+ return cmpxchg64(&v->counter, o, n);
13352+}
13353+
13354+/**
13355 * atomic64_xchg - xchg atomic64 variable
13356 * @v: pointer to type atomic64_t
13357 * @n: value to assign
13358@@ -112,6 +145,22 @@ static inline void atomic64_set(atomic64_t *v, long long i)
13359 }
13360
13361 /**
13362+ * atomic64_set_unchecked - set atomic64 variable
13363+ * @v: pointer to type atomic64_unchecked_t
13364+ * @n: value to assign
13365+ *
13366+ * Atomically sets the value of @v to @n.
13367+ */
13368+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
13369+{
13370+ unsigned high = (unsigned)(i >> 32);
13371+ unsigned low = (unsigned)i;
13372+ alternative_atomic64(set, /* no output */,
13373+ "S" (v), "b" (low), "c" (high)
13374+ : "eax", "edx", "memory");
13375+}
13376+
13377+/**
13378 * atomic64_read - read atomic64 variable
13379 * @v: pointer to type atomic64_t
13380 *
13381@@ -125,6 +174,19 @@ static inline long long atomic64_read(const atomic64_t *v)
13382 }
13383
13384 /**
13385+ * atomic64_read_unchecked - read atomic64 variable
13386+ * @v: pointer to type atomic64_unchecked_t
13387+ *
13388+ * Atomically reads the value of @v and returns it.
13389+ */
13390+static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
13391+{
13392+ long long r;
13393+ alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
13394+ return r;
13395+ }
13396+
13397+/**
13398 * atomic64_add_return - add and return
13399 * @i: integer value to add
13400 * @v: pointer to type atomic64_t
13401@@ -139,6 +201,21 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
13402 return i;
13403 }
13404
13405+/**
13406+ * atomic64_add_return_unchecked - add and return
13407+ * @i: integer value to add
13408+ * @v: pointer to type atomic64_unchecked_t
13409+ *
13410+ * Atomically adds @i to @v and returns @i + *@v
13411+ */
13412+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
13413+{
13414+ alternative_atomic64(add_return_unchecked,
13415+ ASM_OUTPUT2("+A" (i), "+c" (v)),
13416+ ASM_NO_INPUT_CLOBBER("memory"));
13417+ return i;
13418+}
13419+
13420 /*
13421 * Other variants with different arithmetic operators:
13422 */
13423@@ -158,6 +235,14 @@ static inline long long atomic64_inc_return(atomic64_t *v)
13424 return a;
13425 }
13426
13427+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
13428+{
13429+ long long a;
13430+ alternative_atomic64(inc_return_unchecked, "=&A" (a),
13431+ "S" (v) : "memory", "ecx");
13432+ return a;
13433+}
13434+
13435 static inline long long atomic64_dec_return(atomic64_t *v)
13436 {
13437 long long a;
13438@@ -182,6 +267,21 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
13439 }
13440
13441 /**
13442+ * atomic64_add_unchecked - add integer to atomic64 variable
13443+ * @i: integer value to add
13444+ * @v: pointer to type atomic64_unchecked_t
13445+ *
13446+ * Atomically adds @i to @v.
13447+ */
13448+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
13449+{
13450+ __alternative_atomic64(add_unchecked, add_return_unchecked,
13451+ ASM_OUTPUT2("+A" (i), "+c" (v)),
13452+ ASM_NO_INPUT_CLOBBER("memory"));
13453+ return i;
13454+}
13455+
13456+/**
13457 * atomic64_sub - subtract the atomic64 variable
13458 * @i: integer value to subtract
13459 * @v: pointer to type atomic64_t
13460diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
13461index 0e1cbfc..5623683 100644
13462--- a/arch/x86/include/asm/atomic64_64.h
13463+++ b/arch/x86/include/asm/atomic64_64.h
13464@@ -18,7 +18,19 @@
13465 */
13466 static inline long atomic64_read(const atomic64_t *v)
13467 {
13468- return (*(volatile long *)&(v)->counter);
13469+ return (*(volatile const long *)&(v)->counter);
13470+}
13471+
13472+/**
13473+ * atomic64_read_unchecked - read atomic64 variable
13474+ * @v: pointer of type atomic64_unchecked_t
13475+ *
13476+ * Atomically reads the value of @v.
13477+ * Doesn't imply a read memory barrier.
13478+ */
13479+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
13480+{
13481+ return (*(volatile const long *)&(v)->counter);
13482 }
13483
13484 /**
13485@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
13486 }
13487
13488 /**
13489+ * atomic64_set_unchecked - set atomic64 variable
13490+ * @v: pointer to type atomic64_unchecked_t
13491+ * @i: required value
13492+ *
13493+ * Atomically sets the value of @v to @i.
13494+ */
13495+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
13496+{
13497+ v->counter = i;
13498+}
13499+
13500+/**
13501 * atomic64_add - add integer to atomic64 variable
13502 * @i: integer value to add
13503 * @v: pointer to type atomic64_t
13504@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
13505 */
13506 static inline void atomic64_add(long i, atomic64_t *v)
13507 {
13508+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
13509+
13510+#ifdef CONFIG_PAX_REFCOUNT
13511+ "jno 0f\n"
13512+ LOCK_PREFIX "subq %1,%0\n"
13513+ "int $4\n0:\n"
13514+ _ASM_EXTABLE(0b, 0b)
13515+#endif
13516+
13517+ : "=m" (v->counter)
13518+ : "er" (i), "m" (v->counter));
13519+}
13520+
13521+/**
13522+ * atomic64_add_unchecked - add integer to atomic64 variable
13523+ * @i: integer value to add
13524+ * @v: pointer to type atomic64_unchecked_t
13525+ *
13526+ * Atomically adds @i to @v.
13527+ */
13528+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
13529+{
13530 asm volatile(LOCK_PREFIX "addq %1,%0"
13531 : "=m" (v->counter)
13532 : "er" (i), "m" (v->counter));
13533@@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
13534 */
13535 static inline void atomic64_sub(long i, atomic64_t *v)
13536 {
13537- asm volatile(LOCK_PREFIX "subq %1,%0"
13538+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
13539+
13540+#ifdef CONFIG_PAX_REFCOUNT
13541+ "jno 0f\n"
13542+ LOCK_PREFIX "addq %1,%0\n"
13543+ "int $4\n0:\n"
13544+ _ASM_EXTABLE(0b, 0b)
13545+#endif
13546+
13547+ : "=m" (v->counter)
13548+ : "er" (i), "m" (v->counter));
13549+}
13550+
13551+/**
13552+ * atomic64_sub_unchecked - subtract the atomic64 variable
13553+ * @i: integer value to subtract
13554+ * @v: pointer to type atomic64_unchecked_t
13555+ *
13556+ * Atomically subtracts @i from @v.
13557+ */
13558+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
13559+{
13560+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
13561 : "=m" (v->counter)
13562 : "er" (i), "m" (v->counter));
13563 }
13564@@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
13565 {
13566 unsigned char c;
13567
13568- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
13569+ asm volatile(LOCK_PREFIX "subq %2,%0\n"
13570+
13571+#ifdef CONFIG_PAX_REFCOUNT
13572+ "jno 0f\n"
13573+ LOCK_PREFIX "addq %2,%0\n"
13574+ "int $4\n0:\n"
13575+ _ASM_EXTABLE(0b, 0b)
13576+#endif
13577+
13578+ "sete %1\n"
13579 : "=m" (v->counter), "=qm" (c)
13580 : "er" (i), "m" (v->counter) : "memory");
13581 return c;
13582@@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
13583 */
13584 static inline void atomic64_inc(atomic64_t *v)
13585 {
13586+ asm volatile(LOCK_PREFIX "incq %0\n"
13587+
13588+#ifdef CONFIG_PAX_REFCOUNT
13589+ "jno 0f\n"
13590+ LOCK_PREFIX "decq %0\n"
13591+ "int $4\n0:\n"
13592+ _ASM_EXTABLE(0b, 0b)
13593+#endif
13594+
13595+ : "=m" (v->counter)
13596+ : "m" (v->counter));
13597+}
13598+
13599+/**
13600+ * atomic64_inc_unchecked - increment atomic64 variable
13601+ * @v: pointer to type atomic64_unchecked_t
13602+ *
13603+ * Atomically increments @v by 1.
13604+ */
13605+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
13606+{
13607 asm volatile(LOCK_PREFIX "incq %0"
13608 : "=m" (v->counter)
13609 : "m" (v->counter));
13610@@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64_t *v)
13611 */
13612 static inline void atomic64_dec(atomic64_t *v)
13613 {
13614- asm volatile(LOCK_PREFIX "decq %0"
13615+ asm volatile(LOCK_PREFIX "decq %0\n"
13616+
13617+#ifdef CONFIG_PAX_REFCOUNT
13618+ "jno 0f\n"
13619+ LOCK_PREFIX "incq %0\n"
13620+ "int $4\n0:\n"
13621+ _ASM_EXTABLE(0b, 0b)
13622+#endif
13623+
13624+ : "=m" (v->counter)
13625+ : "m" (v->counter));
13626+}
13627+
13628+/**
13629+ * atomic64_dec_unchecked - decrement atomic64 variable
13630+ * @v: pointer to type atomic64_t
13631+ *
13632+ * Atomically decrements @v by 1.
13633+ */
13634+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
13635+{
13636+ asm volatile(LOCK_PREFIX "decq %0\n"
13637 : "=m" (v->counter)
13638 : "m" (v->counter));
13639 }
13640@@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
13641 {
13642 unsigned char c;
13643
13644- asm volatile(LOCK_PREFIX "decq %0; sete %1"
13645+ asm volatile(LOCK_PREFIX "decq %0\n"
13646+
13647+#ifdef CONFIG_PAX_REFCOUNT
13648+ "jno 0f\n"
13649+ LOCK_PREFIX "incq %0\n"
13650+ "int $4\n0:\n"
13651+ _ASM_EXTABLE(0b, 0b)
13652+#endif
13653+
13654+ "sete %1\n"
13655 : "=m" (v->counter), "=qm" (c)
13656 : "m" (v->counter) : "memory");
13657 return c != 0;
13658@@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
13659 {
13660 unsigned char c;
13661
13662- asm volatile(LOCK_PREFIX "incq %0; sete %1"
13663+ asm volatile(LOCK_PREFIX "incq %0\n"
13664+
13665+#ifdef CONFIG_PAX_REFCOUNT
13666+ "jno 0f\n"
13667+ LOCK_PREFIX "decq %0\n"
13668+ "int $4\n0:\n"
13669+ _ASM_EXTABLE(0b, 0b)
13670+#endif
13671+
13672+ "sete %1\n"
13673 : "=m" (v->counter), "=qm" (c)
13674 : "m" (v->counter) : "memory");
13675 return c != 0;
13676@@ -155,7 +292,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
13677 {
13678 unsigned char c;
13679
13680- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
13681+ asm volatile(LOCK_PREFIX "addq %2,%0\n"
13682+
13683+#ifdef CONFIG_PAX_REFCOUNT
13684+ "jno 0f\n"
13685+ LOCK_PREFIX "subq %2,%0\n"
13686+ "int $4\n0:\n"
13687+ _ASM_EXTABLE(0b, 0b)
13688+#endif
13689+
13690+ "sets %1\n"
13691 : "=m" (v->counter), "=qm" (c)
13692 : "er" (i), "m" (v->counter) : "memory");
13693 return c;
13694@@ -170,6 +316,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
13695 */
13696 static inline long atomic64_add_return(long i, atomic64_t *v)
13697 {
13698+ return i + xadd_check_overflow(&v->counter, i);
13699+}
13700+
13701+/**
13702+ * atomic64_add_return_unchecked - add and return
13703+ * @i: integer value to add
13704+ * @v: pointer to type atomic64_unchecked_t
13705+ *
13706+ * Atomically adds @i to @v and returns @i + @v
13707+ */
13708+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
13709+{
13710 return i + xadd(&v->counter, i);
13711 }
13712
13713@@ -179,6 +337,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
13714 }
13715
13716 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
13717+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
13718+{
13719+ return atomic64_add_return_unchecked(1, v);
13720+}
13721 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
13722
13723 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
13724@@ -186,6 +348,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
13725 return cmpxchg(&v->counter, old, new);
13726 }
13727
13728+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
13729+{
13730+ return cmpxchg(&v->counter, old, new);
13731+}
13732+
13733 static inline long atomic64_xchg(atomic64_t *v, long new)
13734 {
13735 return xchg(&v->counter, new);
13736@@ -202,17 +369,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
13737 */
13738 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
13739 {
13740- long c, old;
13741+ long c, old, new;
13742 c = atomic64_read(v);
13743 for (;;) {
13744- if (unlikely(c == (u)))
13745+ if (unlikely(c == u))
13746 break;
13747- old = atomic64_cmpxchg((v), c, c + (a));
13748+
13749+ asm volatile("add %2,%0\n"
13750+
13751+#ifdef CONFIG_PAX_REFCOUNT
13752+ "jno 0f\n"
13753+ "sub %2,%0\n"
13754+ "int $4\n0:\n"
13755+ _ASM_EXTABLE(0b, 0b)
13756+#endif
13757+
13758+ : "=r" (new)
13759+ : "0" (c), "ir" (a));
13760+
13761+ old = atomic64_cmpxchg(v, c, new);
13762 if (likely(old == c))
13763 break;
13764 c = old;
13765 }
13766- return c != (u);
13767+ return c != u;
13768 }
13769
13770 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
13771diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
13772index 6dfd019..28e188d 100644
13773--- a/arch/x86/include/asm/bitops.h
13774+++ b/arch/x86/include/asm/bitops.h
13775@@ -40,7 +40,7 @@
13776 * a mask operation on a byte.
13777 */
13778 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
13779-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
13780+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
13781 #define CONST_MASK(nr) (1 << ((nr) & 7))
13782
13783 /**
13784@@ -486,7 +486,7 @@ static inline int fls(int x)
13785 * at position 64.
13786 */
13787 #ifdef CONFIG_X86_64
13788-static __always_inline int fls64(__u64 x)
13789+static __always_inline long fls64(__u64 x)
13790 {
13791 int bitpos = -1;
13792 /*
13793diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
13794index 4fa687a..60f2d39 100644
13795--- a/arch/x86/include/asm/boot.h
13796+++ b/arch/x86/include/asm/boot.h
13797@@ -6,10 +6,15 @@
13798 #include <uapi/asm/boot.h>
13799
13800 /* Physical address where kernel should be loaded. */
13801-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
13802+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
13803 + (CONFIG_PHYSICAL_ALIGN - 1)) \
13804 & ~(CONFIG_PHYSICAL_ALIGN - 1))
13805
13806+#ifndef __ASSEMBLY__
13807+extern unsigned char __LOAD_PHYSICAL_ADDR[];
13808+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
13809+#endif
13810+
13811 /* Minimum kernel alignment, as a power of two */
13812 #ifdef CONFIG_X86_64
13813 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
13814diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
13815index 48f99f1..d78ebf9 100644
13816--- a/arch/x86/include/asm/cache.h
13817+++ b/arch/x86/include/asm/cache.h
13818@@ -5,12 +5,13 @@
13819
13820 /* L1 cache line size */
13821 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
13822-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
13823+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
13824
13825 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
13826+#define __read_only __attribute__((__section__(".data..read_only")))
13827
13828 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
13829-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
13830+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
13831
13832 #ifdef CONFIG_X86_VSMP
13833 #ifdef CONFIG_SMP
13834diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
13835index 9863ee3..4a1f8e1 100644
13836--- a/arch/x86/include/asm/cacheflush.h
13837+++ b/arch/x86/include/asm/cacheflush.h
13838@@ -27,7 +27,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
13839 unsigned long pg_flags = pg->flags & _PGMT_MASK;
13840
13841 if (pg_flags == _PGMT_DEFAULT)
13842- return -1;
13843+ return ~0UL;
13844 else if (pg_flags == _PGMT_WC)
13845 return _PAGE_CACHE_WC;
13846 else if (pg_flags == _PGMT_UC_MINUS)
13847diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
13848index 46fc474..b02b0f9 100644
13849--- a/arch/x86/include/asm/checksum_32.h
13850+++ b/arch/x86/include/asm/checksum_32.h
13851@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
13852 int len, __wsum sum,
13853 int *src_err_ptr, int *dst_err_ptr);
13854
13855+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
13856+ int len, __wsum sum,
13857+ int *src_err_ptr, int *dst_err_ptr);
13858+
13859+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
13860+ int len, __wsum sum,
13861+ int *src_err_ptr, int *dst_err_ptr);
13862+
13863 /*
13864 * Note: when you get a NULL pointer exception here this means someone
13865 * passed in an incorrect kernel address to one of these functions.
13866@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
13867 int *err_ptr)
13868 {
13869 might_sleep();
13870- return csum_partial_copy_generic((__force void *)src, dst,
13871+ return csum_partial_copy_generic_from_user((__force void *)src, dst,
13872 len, sum, err_ptr, NULL);
13873 }
13874
13875@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
13876 {
13877 might_sleep();
13878 if (access_ok(VERIFY_WRITE, dst, len))
13879- return csum_partial_copy_generic(src, (__force void *)dst,
13880+ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
13881 len, sum, NULL, err_ptr);
13882
13883 if (len)
13884diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
13885index 8d871ea..c1a0dc9 100644
13886--- a/arch/x86/include/asm/cmpxchg.h
13887+++ b/arch/x86/include/asm/cmpxchg.h
13888@@ -14,8 +14,12 @@ extern void __cmpxchg_wrong_size(void)
13889 __compiletime_error("Bad argument size for cmpxchg");
13890 extern void __xadd_wrong_size(void)
13891 __compiletime_error("Bad argument size for xadd");
13892+extern void __xadd_check_overflow_wrong_size(void)
13893+ __compiletime_error("Bad argument size for xadd_check_overflow");
13894 extern void __add_wrong_size(void)
13895 __compiletime_error("Bad argument size for add");
13896+extern void __add_check_overflow_wrong_size(void)
13897+ __compiletime_error("Bad argument size for add_check_overflow");
13898
13899 /*
13900 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
13901@@ -67,6 +71,34 @@ extern void __add_wrong_size(void)
13902 __ret; \
13903 })
13904
13905+#define __xchg_op_check_overflow(ptr, arg, op, lock) \
13906+ ({ \
13907+ __typeof__ (*(ptr)) __ret = (arg); \
13908+ switch (sizeof(*(ptr))) { \
13909+ case __X86_CASE_L: \
13910+ asm volatile (lock #op "l %0, %1\n" \
13911+ "jno 0f\n" \
13912+ "mov %0,%1\n" \
13913+ "int $4\n0:\n" \
13914+ _ASM_EXTABLE(0b, 0b) \
13915+ : "+r" (__ret), "+m" (*(ptr)) \
13916+ : : "memory", "cc"); \
13917+ break; \
13918+ case __X86_CASE_Q: \
13919+ asm volatile (lock #op "q %q0, %1\n" \
13920+ "jno 0f\n" \
13921+ "mov %0,%1\n" \
13922+ "int $4\n0:\n" \
13923+ _ASM_EXTABLE(0b, 0b) \
13924+ : "+r" (__ret), "+m" (*(ptr)) \
13925+ : : "memory", "cc"); \
13926+ break; \
13927+ default: \
13928+ __ ## op ## _check_overflow_wrong_size(); \
13929+ } \
13930+ __ret; \
13931+ })
13932+
13933 /*
13934 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
13935 * Since this is generally used to protect other memory information, we
13936@@ -167,6 +199,9 @@ extern void __add_wrong_size(void)
13937 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
13938 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
13939
13940+#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock)
13941+#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
13942+
13943 #define __add(ptr, inc, lock) \
13944 ({ \
13945 __typeof__ (*(ptr)) __ret = (inc); \
13946diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
13947index 59c6c40..5e0b22c 100644
13948--- a/arch/x86/include/asm/compat.h
13949+++ b/arch/x86/include/asm/compat.h
13950@@ -41,7 +41,7 @@ typedef s64 __attribute__((aligned(4))) compat_s64;
13951 typedef u32 compat_uint_t;
13952 typedef u32 compat_ulong_t;
13953 typedef u64 __attribute__((aligned(4))) compat_u64;
13954-typedef u32 compat_uptr_t;
13955+typedef u32 __user compat_uptr_t;
13956
13957 struct compat_timespec {
13958 compat_time_t tv_sec;
13959diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
13960index 2d9075e..b75a844 100644
13961--- a/arch/x86/include/asm/cpufeature.h
13962+++ b/arch/x86/include/asm/cpufeature.h
13963@@ -206,7 +206,7 @@
13964 #define X86_FEATURE_BMI1 (9*32+ 3) /* 1st group bit manipulation extensions */
13965 #define X86_FEATURE_HLE (9*32+ 4) /* Hardware Lock Elision */
13966 #define X86_FEATURE_AVX2 (9*32+ 5) /* AVX2 instructions */
13967-#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Protection */
13968+#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Prevention */
13969 #define X86_FEATURE_BMI2 (9*32+ 8) /* 2nd group bit manipulation extensions */
13970 #define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */
13971 #define X86_FEATURE_INVPCID (9*32+10) /* Invalidate Processor Context ID */
13972@@ -375,7 +375,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
13973 ".section .discard,\"aw\",@progbits\n"
13974 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
13975 ".previous\n"
13976- ".section .altinstr_replacement,\"ax\"\n"
13977+ ".section .altinstr_replacement,\"a\"\n"
13978 "3: movb $1,%0\n"
13979 "4:\n"
13980 ".previous\n"
13981diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
13982index 8bf1c06..b6ae785 100644
13983--- a/arch/x86/include/asm/desc.h
13984+++ b/arch/x86/include/asm/desc.h
13985@@ -4,6 +4,7 @@
13986 #include <asm/desc_defs.h>
13987 #include <asm/ldt.h>
13988 #include <asm/mmu.h>
13989+#include <asm/pgtable.h>
13990
13991 #include <linux/smp.h>
13992 #include <linux/percpu.h>
13993@@ -17,6 +18,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
13994
13995 desc->type = (info->read_exec_only ^ 1) << 1;
13996 desc->type |= info->contents << 2;
13997+ desc->type |= info->seg_not_present ^ 1;
13998
13999 desc->s = 1;
14000 desc->dpl = 0x3;
14001@@ -35,19 +37,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
14002 }
14003
14004 extern struct desc_ptr idt_descr;
14005-extern gate_desc idt_table[];
14006 extern struct desc_ptr nmi_idt_descr;
14007-extern gate_desc nmi_idt_table[];
14008-
14009-struct gdt_page {
14010- struct desc_struct gdt[GDT_ENTRIES];
14011-} __attribute__((aligned(PAGE_SIZE)));
14012-
14013-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
14014+extern gate_desc idt_table[256];
14015+extern gate_desc nmi_idt_table[256];
14016
14017+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
14018 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
14019 {
14020- return per_cpu(gdt_page, cpu).gdt;
14021+ return cpu_gdt_table[cpu];
14022 }
14023
14024 #ifdef CONFIG_X86_64
14025@@ -72,8 +69,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
14026 unsigned long base, unsigned dpl, unsigned flags,
14027 unsigned short seg)
14028 {
14029- gate->a = (seg << 16) | (base & 0xffff);
14030- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
14031+ gate->gate.offset_low = base;
14032+ gate->gate.seg = seg;
14033+ gate->gate.reserved = 0;
14034+ gate->gate.type = type;
14035+ gate->gate.s = 0;
14036+ gate->gate.dpl = dpl;
14037+ gate->gate.p = 1;
14038+ gate->gate.offset_high = base >> 16;
14039 }
14040
14041 #endif
14042@@ -118,12 +121,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
14043
14044 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
14045 {
14046+ pax_open_kernel();
14047 memcpy(&idt[entry], gate, sizeof(*gate));
14048+ pax_close_kernel();
14049 }
14050
14051 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
14052 {
14053+ pax_open_kernel();
14054 memcpy(&ldt[entry], desc, 8);
14055+ pax_close_kernel();
14056 }
14057
14058 static inline void
14059@@ -137,7 +144,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
14060 default: size = sizeof(*gdt); break;
14061 }
14062
14063+ pax_open_kernel();
14064 memcpy(&gdt[entry], desc, size);
14065+ pax_close_kernel();
14066 }
14067
14068 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
14069@@ -210,7 +219,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
14070
14071 static inline void native_load_tr_desc(void)
14072 {
14073+ pax_open_kernel();
14074 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
14075+ pax_close_kernel();
14076 }
14077
14078 static inline void native_load_gdt(const struct desc_ptr *dtr)
14079@@ -247,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
14080 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
14081 unsigned int i;
14082
14083+ pax_open_kernel();
14084 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
14085 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
14086+ pax_close_kernel();
14087 }
14088
14089 #define _LDT_empty(info) \
14090@@ -287,7 +300,7 @@ static inline void load_LDT(mm_context_t *pc)
14091 preempt_enable();
14092 }
14093
14094-static inline unsigned long get_desc_base(const struct desc_struct *desc)
14095+static inline unsigned long __intentional_overflow(-1) get_desc_base(const struct desc_struct *desc)
14096 {
14097 return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
14098 }
14099@@ -311,7 +324,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
14100 }
14101
14102 #ifdef CONFIG_X86_64
14103-static inline void set_nmi_gate(int gate, void *addr)
14104+static inline void set_nmi_gate(int gate, const void *addr)
14105 {
14106 gate_desc s;
14107
14108@@ -320,7 +333,7 @@ static inline void set_nmi_gate(int gate, void *addr)
14109 }
14110 #endif
14111
14112-static inline void _set_gate(int gate, unsigned type, void *addr,
14113+static inline void _set_gate(int gate, unsigned type, const void *addr,
14114 unsigned dpl, unsigned ist, unsigned seg)
14115 {
14116 gate_desc s;
14117@@ -339,7 +352,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
14118 * Pentium F0 0F bugfix can have resulted in the mapped
14119 * IDT being write-protected.
14120 */
14121-static inline void set_intr_gate(unsigned int n, void *addr)
14122+static inline void set_intr_gate(unsigned int n, const void *addr)
14123 {
14124 BUG_ON((unsigned)n > 0xFF);
14125 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
14126@@ -369,19 +382,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
14127 /*
14128 * This routine sets up an interrupt gate at directory privilege level 3.
14129 */
14130-static inline void set_system_intr_gate(unsigned int n, void *addr)
14131+static inline void set_system_intr_gate(unsigned int n, const void *addr)
14132 {
14133 BUG_ON((unsigned)n > 0xFF);
14134 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
14135 }
14136
14137-static inline void set_system_trap_gate(unsigned int n, void *addr)
14138+static inline void set_system_trap_gate(unsigned int n, const void *addr)
14139 {
14140 BUG_ON((unsigned)n > 0xFF);
14141 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
14142 }
14143
14144-static inline void set_trap_gate(unsigned int n, void *addr)
14145+static inline void set_trap_gate(unsigned int n, const void *addr)
14146 {
14147 BUG_ON((unsigned)n > 0xFF);
14148 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
14149@@ -390,19 +403,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
14150 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
14151 {
14152 BUG_ON((unsigned)n > 0xFF);
14153- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
14154+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
14155 }
14156
14157-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
14158+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
14159 {
14160 BUG_ON((unsigned)n > 0xFF);
14161 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
14162 }
14163
14164-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
14165+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
14166 {
14167 BUG_ON((unsigned)n > 0xFF);
14168 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
14169 }
14170
14171+#ifdef CONFIG_X86_32
14172+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
14173+{
14174+ struct desc_struct d;
14175+
14176+ if (likely(limit))
14177+ limit = (limit - 1UL) >> PAGE_SHIFT;
14178+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
14179+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
14180+}
14181+#endif
14182+
14183 #endif /* _ASM_X86_DESC_H */
14184diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
14185index 278441f..b95a174 100644
14186--- a/arch/x86/include/asm/desc_defs.h
14187+++ b/arch/x86/include/asm/desc_defs.h
14188@@ -31,6 +31,12 @@ struct desc_struct {
14189 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
14190 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
14191 };
14192+ struct {
14193+ u16 offset_low;
14194+ u16 seg;
14195+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
14196+ unsigned offset_high: 16;
14197+ } gate;
14198 };
14199 } __attribute__((packed));
14200
14201diff --git a/arch/x86/include/asm/div64.h b/arch/x86/include/asm/div64.h
14202index ced283a..ffe04cc 100644
14203--- a/arch/x86/include/asm/div64.h
14204+++ b/arch/x86/include/asm/div64.h
14205@@ -39,7 +39,7 @@
14206 __mod; \
14207 })
14208
14209-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
14210+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
14211 {
14212 union {
14213 u64 v64;
14214diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
14215index 9c999c1..3860cb8 100644
14216--- a/arch/x86/include/asm/elf.h
14217+++ b/arch/x86/include/asm/elf.h
14218@@ -243,7 +243,25 @@ extern int force_personality32;
14219 the loader. We need to make sure that it is out of the way of the program
14220 that it will "exec", and that there is sufficient room for the brk. */
14221
14222+#ifdef CONFIG_PAX_SEGMEXEC
14223+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
14224+#else
14225 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
14226+#endif
14227+
14228+#ifdef CONFIG_PAX_ASLR
14229+#ifdef CONFIG_X86_32
14230+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
14231+
14232+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
14233+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
14234+#else
14235+#define PAX_ELF_ET_DYN_BASE 0x400000UL
14236+
14237+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
14238+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
14239+#endif
14240+#endif
14241
14242 /* This yields a mask that user programs can use to figure out what
14243 instruction set this CPU supports. This could be done in user space,
14244@@ -296,16 +314,12 @@ do { \
14245
14246 #define ARCH_DLINFO \
14247 do { \
14248- if (vdso_enabled) \
14249- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
14250- (unsigned long)current->mm->context.vdso); \
14251+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
14252 } while (0)
14253
14254 #define ARCH_DLINFO_X32 \
14255 do { \
14256- if (vdso_enabled) \
14257- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
14258- (unsigned long)current->mm->context.vdso); \
14259+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
14260 } while (0)
14261
14262 #define AT_SYSINFO 32
14263@@ -320,7 +334,7 @@ else \
14264
14265 #endif /* !CONFIG_X86_32 */
14266
14267-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
14268+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
14269
14270 #define VDSO_ENTRY \
14271 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
14272@@ -336,9 +350,6 @@ extern int x32_setup_additional_pages(struct linux_binprm *bprm,
14273 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
14274 #define compat_arch_setup_additional_pages syscall32_setup_pages
14275
14276-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
14277-#define arch_randomize_brk arch_randomize_brk
14278-
14279 /*
14280 * True on X86_32 or when emulating IA32 on X86_64
14281 */
14282diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
14283index 75ce3f4..882e801 100644
14284--- a/arch/x86/include/asm/emergency-restart.h
14285+++ b/arch/x86/include/asm/emergency-restart.h
14286@@ -13,6 +13,6 @@ enum reboot_type {
14287
14288 extern enum reboot_type reboot_type;
14289
14290-extern void machine_emergency_restart(void);
14291+extern void machine_emergency_restart(void) __noreturn;
14292
14293 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
14294diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
14295index 41ab26e..a88c9e6 100644
14296--- a/arch/x86/include/asm/fpu-internal.h
14297+++ b/arch/x86/include/asm/fpu-internal.h
14298@@ -126,7 +126,9 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
14299 ({ \
14300 int err; \
14301 asm volatile(ASM_STAC "\n" \
14302- "1:" #insn "\n\t" \
14303+ "1:" \
14304+ __copyuser_seg \
14305+ #insn "\n\t" \
14306 "2: " ASM_CLAC "\n" \
14307 ".section .fixup,\"ax\"\n" \
14308 "3: movl $-1,%[err]\n" \
14309@@ -299,7 +301,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
14310 "emms\n\t" /* clear stack tags */
14311 "fildl %P[addr]", /* set F?P to defined value */
14312 X86_FEATURE_FXSAVE_LEAK,
14313- [addr] "m" (tsk->thread.fpu.has_fpu));
14314+ [addr] "m" (init_tss[raw_smp_processor_id()].x86_tss.sp0));
14315
14316 return fpu_restore_checking(&tsk->thread.fpu);
14317 }
14318diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
14319index be27ba1..8f13ff9 100644
14320--- a/arch/x86/include/asm/futex.h
14321+++ b/arch/x86/include/asm/futex.h
14322@@ -12,6 +12,7 @@
14323 #include <asm/smap.h>
14324
14325 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
14326+ typecheck(u32 __user *, uaddr); \
14327 asm volatile("\t" ASM_STAC "\n" \
14328 "1:\t" insn "\n" \
14329 "2:\t" ASM_CLAC "\n" \
14330@@ -20,15 +21,16 @@
14331 "\tjmp\t2b\n" \
14332 "\t.previous\n" \
14333 _ASM_EXTABLE(1b, 3b) \
14334- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
14335+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr)) \
14336 : "i" (-EFAULT), "0" (oparg), "1" (0))
14337
14338 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
14339+ typecheck(u32 __user *, uaddr); \
14340 asm volatile("\t" ASM_STAC "\n" \
14341 "1:\tmovl %2, %0\n" \
14342 "\tmovl\t%0, %3\n" \
14343 "\t" insn "\n" \
14344- "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \
14345+ "2:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %2\n" \
14346 "\tjnz\t1b\n" \
14347 "3:\t" ASM_CLAC "\n" \
14348 "\t.section .fixup,\"ax\"\n" \
14349@@ -38,7 +40,7 @@
14350 _ASM_EXTABLE(1b, 4b) \
14351 _ASM_EXTABLE(2b, 4b) \
14352 : "=&a" (oldval), "=&r" (ret), \
14353- "+m" (*uaddr), "=&r" (tem) \
14354+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
14355 : "r" (oparg), "i" (-EFAULT), "1" (0))
14356
14357 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
14358@@ -59,10 +61,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
14359
14360 switch (op) {
14361 case FUTEX_OP_SET:
14362- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
14363+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
14364 break;
14365 case FUTEX_OP_ADD:
14366- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
14367+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
14368 uaddr, oparg);
14369 break;
14370 case FUTEX_OP_OR:
14371@@ -116,14 +118,14 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
14372 return -EFAULT;
14373
14374 asm volatile("\t" ASM_STAC "\n"
14375- "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
14376+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
14377 "2:\t" ASM_CLAC "\n"
14378 "\t.section .fixup, \"ax\"\n"
14379 "3:\tmov %3, %0\n"
14380 "\tjmp 2b\n"
14381 "\t.previous\n"
14382 _ASM_EXTABLE(1b, 3b)
14383- : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
14384+ : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
14385 : "i" (-EFAULT), "r" (newval), "1" (oldval)
14386 : "memory"
14387 );
14388diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
14389index eb92a6e..b98b2f4 100644
14390--- a/arch/x86/include/asm/hw_irq.h
14391+++ b/arch/x86/include/asm/hw_irq.h
14392@@ -136,8 +136,8 @@ extern void setup_ioapic_dest(void);
14393 extern void enable_IO_APIC(void);
14394
14395 /* Statistics */
14396-extern atomic_t irq_err_count;
14397-extern atomic_t irq_mis_count;
14398+extern atomic_unchecked_t irq_err_count;
14399+extern atomic_unchecked_t irq_mis_count;
14400
14401 /* EISA */
14402 extern void eisa_set_level_irq(unsigned int irq);
14403diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
14404index a203659..9889f1c 100644
14405--- a/arch/x86/include/asm/i8259.h
14406+++ b/arch/x86/include/asm/i8259.h
14407@@ -62,7 +62,7 @@ struct legacy_pic {
14408 void (*init)(int auto_eoi);
14409 int (*irq_pending)(unsigned int irq);
14410 void (*make_irq)(unsigned int irq);
14411-};
14412+} __do_const;
14413
14414 extern struct legacy_pic *legacy_pic;
14415 extern struct legacy_pic null_legacy_pic;
14416diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
14417index d8e8eef..1765f78 100644
14418--- a/arch/x86/include/asm/io.h
14419+++ b/arch/x86/include/asm/io.h
14420@@ -51,12 +51,12 @@ static inline void name(type val, volatile void __iomem *addr) \
14421 "m" (*(volatile type __force *)addr) barrier); }
14422
14423 build_mmio_read(readb, "b", unsigned char, "=q", :"memory")
14424-build_mmio_read(readw, "w", unsigned short, "=r", :"memory")
14425-build_mmio_read(readl, "l", unsigned int, "=r", :"memory")
14426+build_mmio_read(__intentional_overflow(-1) readw, "w", unsigned short, "=r", :"memory")
14427+build_mmio_read(__intentional_overflow(-1) readl, "l", unsigned int, "=r", :"memory")
14428
14429 build_mmio_read(__readb, "b", unsigned char, "=q", )
14430-build_mmio_read(__readw, "w", unsigned short, "=r", )
14431-build_mmio_read(__readl, "l", unsigned int, "=r", )
14432+build_mmio_read(__intentional_overflow(-1) __readw, "w", unsigned short, "=r", )
14433+build_mmio_read(__intentional_overflow(-1) __readl, "l", unsigned int, "=r", )
14434
14435 build_mmio_write(writeb, "b", unsigned char, "q", :"memory")
14436 build_mmio_write(writew, "w", unsigned short, "r", :"memory")
14437@@ -184,7 +184,7 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
14438 return ioremap_nocache(offset, size);
14439 }
14440
14441-extern void iounmap(volatile void __iomem *addr);
14442+extern void iounmap(const volatile void __iomem *addr);
14443
14444 extern void set_iounmap_nonlazy(void);
14445
14446@@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
14447
14448 #include <linux/vmalloc.h>
14449
14450+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
14451+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
14452+{
14453+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
14454+}
14455+
14456+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
14457+{
14458+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
14459+}
14460+
14461 /*
14462 * Convert a virtual cached pointer to an uncached pointer
14463 */
14464diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
14465index bba3cf8..06bc8da 100644
14466--- a/arch/x86/include/asm/irqflags.h
14467+++ b/arch/x86/include/asm/irqflags.h
14468@@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
14469 sti; \
14470 sysexit
14471
14472+#define GET_CR0_INTO_RDI mov %cr0, %rdi
14473+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
14474+#define GET_CR3_INTO_RDI mov %cr3, %rdi
14475+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
14476+
14477 #else
14478 #define INTERRUPT_RETURN iret
14479 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
14480diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
14481index d3ddd17..c9fb0cc 100644
14482--- a/arch/x86/include/asm/kprobes.h
14483+++ b/arch/x86/include/asm/kprobes.h
14484@@ -38,13 +38,8 @@ typedef u8 kprobe_opcode_t;
14485 #define RELATIVEJUMP_SIZE 5
14486 #define RELATIVECALL_OPCODE 0xe8
14487 #define RELATIVE_ADDR_SIZE 4
14488-#define MAX_STACK_SIZE 64
14489-#define MIN_STACK_SIZE(ADDR) \
14490- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
14491- THREAD_SIZE - (unsigned long)(ADDR))) \
14492- ? (MAX_STACK_SIZE) \
14493- : (((unsigned long)current_thread_info()) + \
14494- THREAD_SIZE - (unsigned long)(ADDR)))
14495+#define MAX_STACK_SIZE 64UL
14496+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
14497
14498 #define flush_insn_slot(p) do { } while (0)
14499
14500diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
14501index 2d89e39..baee879 100644
14502--- a/arch/x86/include/asm/local.h
14503+++ b/arch/x86/include/asm/local.h
14504@@ -10,33 +10,97 @@ typedef struct {
14505 atomic_long_t a;
14506 } local_t;
14507
14508+typedef struct {
14509+ atomic_long_unchecked_t a;
14510+} local_unchecked_t;
14511+
14512 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
14513
14514 #define local_read(l) atomic_long_read(&(l)->a)
14515+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
14516 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
14517+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
14518
14519 static inline void local_inc(local_t *l)
14520 {
14521- asm volatile(_ASM_INC "%0"
14522+ asm volatile(_ASM_INC "%0\n"
14523+
14524+#ifdef CONFIG_PAX_REFCOUNT
14525+ "jno 0f\n"
14526+ _ASM_DEC "%0\n"
14527+ "int $4\n0:\n"
14528+ _ASM_EXTABLE(0b, 0b)
14529+#endif
14530+
14531+ : "+m" (l->a.counter));
14532+}
14533+
14534+static inline void local_inc_unchecked(local_unchecked_t *l)
14535+{
14536+ asm volatile(_ASM_INC "%0\n"
14537 : "+m" (l->a.counter));
14538 }
14539
14540 static inline void local_dec(local_t *l)
14541 {
14542- asm volatile(_ASM_DEC "%0"
14543+ asm volatile(_ASM_DEC "%0\n"
14544+
14545+#ifdef CONFIG_PAX_REFCOUNT
14546+ "jno 0f\n"
14547+ _ASM_INC "%0\n"
14548+ "int $4\n0:\n"
14549+ _ASM_EXTABLE(0b, 0b)
14550+#endif
14551+
14552+ : "+m" (l->a.counter));
14553+}
14554+
14555+static inline void local_dec_unchecked(local_unchecked_t *l)
14556+{
14557+ asm volatile(_ASM_DEC "%0\n"
14558 : "+m" (l->a.counter));
14559 }
14560
14561 static inline void local_add(long i, local_t *l)
14562 {
14563- asm volatile(_ASM_ADD "%1,%0"
14564+ asm volatile(_ASM_ADD "%1,%0\n"
14565+
14566+#ifdef CONFIG_PAX_REFCOUNT
14567+ "jno 0f\n"
14568+ _ASM_SUB "%1,%0\n"
14569+ "int $4\n0:\n"
14570+ _ASM_EXTABLE(0b, 0b)
14571+#endif
14572+
14573+ : "+m" (l->a.counter)
14574+ : "ir" (i));
14575+}
14576+
14577+static inline void local_add_unchecked(long i, local_unchecked_t *l)
14578+{
14579+ asm volatile(_ASM_ADD "%1,%0\n"
14580 : "+m" (l->a.counter)
14581 : "ir" (i));
14582 }
14583
14584 static inline void local_sub(long i, local_t *l)
14585 {
14586- asm volatile(_ASM_SUB "%1,%0"
14587+ asm volatile(_ASM_SUB "%1,%0\n"
14588+
14589+#ifdef CONFIG_PAX_REFCOUNT
14590+ "jno 0f\n"
14591+ _ASM_ADD "%1,%0\n"
14592+ "int $4\n0:\n"
14593+ _ASM_EXTABLE(0b, 0b)
14594+#endif
14595+
14596+ : "+m" (l->a.counter)
14597+ : "ir" (i));
14598+}
14599+
14600+static inline void local_sub_unchecked(long i, local_unchecked_t *l)
14601+{
14602+ asm volatile(_ASM_SUB "%1,%0\n"
14603 : "+m" (l->a.counter)
14604 : "ir" (i));
14605 }
14606@@ -54,7 +118,16 @@ static inline int local_sub_and_test(long i, local_t *l)
14607 {
14608 unsigned char c;
14609
14610- asm volatile(_ASM_SUB "%2,%0; sete %1"
14611+ asm volatile(_ASM_SUB "%2,%0\n"
14612+
14613+#ifdef CONFIG_PAX_REFCOUNT
14614+ "jno 0f\n"
14615+ _ASM_ADD "%2,%0\n"
14616+ "int $4\n0:\n"
14617+ _ASM_EXTABLE(0b, 0b)
14618+#endif
14619+
14620+ "sete %1\n"
14621 : "+m" (l->a.counter), "=qm" (c)
14622 : "ir" (i) : "memory");
14623 return c;
14624@@ -72,7 +145,16 @@ static inline int local_dec_and_test(local_t *l)
14625 {
14626 unsigned char c;
14627
14628- asm volatile(_ASM_DEC "%0; sete %1"
14629+ asm volatile(_ASM_DEC "%0\n"
14630+
14631+#ifdef CONFIG_PAX_REFCOUNT
14632+ "jno 0f\n"
14633+ _ASM_INC "%0\n"
14634+ "int $4\n0:\n"
14635+ _ASM_EXTABLE(0b, 0b)
14636+#endif
14637+
14638+ "sete %1\n"
14639 : "+m" (l->a.counter), "=qm" (c)
14640 : : "memory");
14641 return c != 0;
14642@@ -90,7 +172,16 @@ static inline int local_inc_and_test(local_t *l)
14643 {
14644 unsigned char c;
14645
14646- asm volatile(_ASM_INC "%0; sete %1"
14647+ asm volatile(_ASM_INC "%0\n"
14648+
14649+#ifdef CONFIG_PAX_REFCOUNT
14650+ "jno 0f\n"
14651+ _ASM_DEC "%0\n"
14652+ "int $4\n0:\n"
14653+ _ASM_EXTABLE(0b, 0b)
14654+#endif
14655+
14656+ "sete %1\n"
14657 : "+m" (l->a.counter), "=qm" (c)
14658 : : "memory");
14659 return c != 0;
14660@@ -109,7 +200,16 @@ static inline int local_add_negative(long i, local_t *l)
14661 {
14662 unsigned char c;
14663
14664- asm volatile(_ASM_ADD "%2,%0; sets %1"
14665+ asm volatile(_ASM_ADD "%2,%0\n"
14666+
14667+#ifdef CONFIG_PAX_REFCOUNT
14668+ "jno 0f\n"
14669+ _ASM_SUB "%2,%0\n"
14670+ "int $4\n0:\n"
14671+ _ASM_EXTABLE(0b, 0b)
14672+#endif
14673+
14674+ "sets %1\n"
14675 : "+m" (l->a.counter), "=qm" (c)
14676 : "ir" (i) : "memory");
14677 return c;
14678@@ -125,6 +225,30 @@ static inline int local_add_negative(long i, local_t *l)
14679 static inline long local_add_return(long i, local_t *l)
14680 {
14681 long __i = i;
14682+ asm volatile(_ASM_XADD "%0, %1\n"
14683+
14684+#ifdef CONFIG_PAX_REFCOUNT
14685+ "jno 0f\n"
14686+ _ASM_MOV "%0,%1\n"
14687+ "int $4\n0:\n"
14688+ _ASM_EXTABLE(0b, 0b)
14689+#endif
14690+
14691+ : "+r" (i), "+m" (l->a.counter)
14692+ : : "memory");
14693+ return i + __i;
14694+}
14695+
14696+/**
14697+ * local_add_return_unchecked - add and return
14698+ * @i: integer value to add
14699+ * @l: pointer to type local_unchecked_t
14700+ *
14701+ * Atomically adds @i to @l and returns @i + @l
14702+ */
14703+static inline long local_add_return_unchecked(long i, local_unchecked_t *l)
14704+{
14705+ long __i = i;
14706 asm volatile(_ASM_XADD "%0, %1;"
14707 : "+r" (i), "+m" (l->a.counter)
14708 : : "memory");
14709@@ -141,6 +265,8 @@ static inline long local_sub_return(long i, local_t *l)
14710
14711 #define local_cmpxchg(l, o, n) \
14712 (cmpxchg_local(&((l)->a.counter), (o), (n)))
14713+#define local_cmpxchg_unchecked(l, o, n) \
14714+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
14715 /* Always has a lock prefix */
14716 #define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
14717
14718diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
14719new file mode 100644
14720index 0000000..2bfd3ba
14721--- /dev/null
14722+++ b/arch/x86/include/asm/mman.h
14723@@ -0,0 +1,15 @@
14724+#ifndef _X86_MMAN_H
14725+#define _X86_MMAN_H
14726+
14727+#include <uapi/asm/mman.h>
14728+
14729+#ifdef __KERNEL__
14730+#ifndef __ASSEMBLY__
14731+#ifdef CONFIG_X86_32
14732+#define arch_mmap_check i386_mmap_check
14733+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags);
14734+#endif
14735+#endif
14736+#endif
14737+
14738+#endif /* X86_MMAN_H */
14739diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
14740index 5f55e69..e20bfb1 100644
14741--- a/arch/x86/include/asm/mmu.h
14742+++ b/arch/x86/include/asm/mmu.h
14743@@ -9,7 +9,7 @@
14744 * we put the segment information here.
14745 */
14746 typedef struct {
14747- void *ldt;
14748+ struct desc_struct *ldt;
14749 int size;
14750
14751 #ifdef CONFIG_X86_64
14752@@ -18,7 +18,19 @@ typedef struct {
14753 #endif
14754
14755 struct mutex lock;
14756- void *vdso;
14757+ unsigned long vdso;
14758+
14759+#ifdef CONFIG_X86_32
14760+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
14761+ unsigned long user_cs_base;
14762+ unsigned long user_cs_limit;
14763+
14764+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
14765+ cpumask_t cpu_user_cs_mask;
14766+#endif
14767+
14768+#endif
14769+#endif
14770 } mm_context_t;
14771
14772 #ifdef CONFIG_SMP
14773diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
14774index cdbf367..adb37ac 100644
14775--- a/arch/x86/include/asm/mmu_context.h
14776+++ b/arch/x86/include/asm/mmu_context.h
14777@@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
14778
14779 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
14780 {
14781+
14782+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
14783+ unsigned int i;
14784+ pgd_t *pgd;
14785+
14786+ pax_open_kernel();
14787+ pgd = get_cpu_pgd(smp_processor_id());
14788+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
14789+ set_pgd_batched(pgd+i, native_make_pgd(0));
14790+ pax_close_kernel();
14791+#endif
14792+
14793 #ifdef CONFIG_SMP
14794 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
14795 this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
14796@@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
14797 struct task_struct *tsk)
14798 {
14799 unsigned cpu = smp_processor_id();
14800+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
14801+ int tlbstate = TLBSTATE_OK;
14802+#endif
14803
14804 if (likely(prev != next)) {
14805 #ifdef CONFIG_SMP
14806+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
14807+ tlbstate = this_cpu_read(cpu_tlbstate.state);
14808+#endif
14809 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
14810 this_cpu_write(cpu_tlbstate.active_mm, next);
14811 #endif
14812 cpumask_set_cpu(cpu, mm_cpumask(next));
14813
14814 /* Re-load page tables */
14815+#ifdef CONFIG_PAX_PER_CPU_PGD
14816+ pax_open_kernel();
14817+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd);
14818+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd);
14819+ pax_close_kernel();
14820+ load_cr3(get_cpu_pgd(cpu));
14821+#else
14822 load_cr3(next->pgd);
14823+#endif
14824
14825 /* stop flush ipis for the previous mm */
14826 cpumask_clear_cpu(cpu, mm_cpumask(prev));
14827@@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
14828 */
14829 if (unlikely(prev->context.ldt != next->context.ldt))
14830 load_LDT_nolock(&next->context);
14831- }
14832+
14833+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
14834+ if (!(__supported_pte_mask & _PAGE_NX)) {
14835+ smp_mb__before_clear_bit();
14836+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
14837+ smp_mb__after_clear_bit();
14838+ cpu_set(cpu, next->context.cpu_user_cs_mask);
14839+ }
14840+#endif
14841+
14842+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
14843+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
14844+ prev->context.user_cs_limit != next->context.user_cs_limit))
14845+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
14846 #ifdef CONFIG_SMP
14847+ else if (unlikely(tlbstate != TLBSTATE_OK))
14848+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
14849+#endif
14850+#endif
14851+
14852+ }
14853 else {
14854+
14855+#ifdef CONFIG_PAX_PER_CPU_PGD
14856+ pax_open_kernel();
14857+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd);
14858+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd);
14859+ pax_close_kernel();
14860+ load_cr3(get_cpu_pgd(cpu));
14861+#endif
14862+
14863+#ifdef CONFIG_SMP
14864 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
14865 BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
14866
14867@@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
14868 * tlb flush IPI delivery. We must reload CR3
14869 * to make sure to use no freed page tables.
14870 */
14871+
14872+#ifndef CONFIG_PAX_PER_CPU_PGD
14873 load_cr3(next->pgd);
14874+#endif
14875+
14876 load_LDT_nolock(&next->context);
14877+
14878+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
14879+ if (!(__supported_pte_mask & _PAGE_NX))
14880+ cpu_set(cpu, next->context.cpu_user_cs_mask);
14881+#endif
14882+
14883+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
14884+#ifdef CONFIG_PAX_PAGEEXEC
14885+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
14886+#endif
14887+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
14888+#endif
14889+
14890 }
14891+#endif
14892 }
14893-#endif
14894 }
14895
14896 #define activate_mm(prev, next) \
14897diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
14898index e3b7819..b257c64 100644
14899--- a/arch/x86/include/asm/module.h
14900+++ b/arch/x86/include/asm/module.h
14901@@ -5,6 +5,7 @@
14902
14903 #ifdef CONFIG_X86_64
14904 /* X86_64 does not define MODULE_PROC_FAMILY */
14905+#define MODULE_PROC_FAMILY ""
14906 #elif defined CONFIG_M486
14907 #define MODULE_PROC_FAMILY "486 "
14908 #elif defined CONFIG_M586
14909@@ -57,8 +58,20 @@
14910 #error unknown processor family
14911 #endif
14912
14913-#ifdef CONFIG_X86_32
14914-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
14915+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
14916+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
14917+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
14918+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
14919+#else
14920+#define MODULE_PAX_KERNEXEC ""
14921 #endif
14922
14923+#ifdef CONFIG_PAX_MEMORY_UDEREF
14924+#define MODULE_PAX_UDEREF "UDEREF "
14925+#else
14926+#define MODULE_PAX_UDEREF ""
14927+#endif
14928+
14929+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
14930+
14931 #endif /* _ASM_X86_MODULE_H */
14932diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
14933index c0fa356..07a498a 100644
14934--- a/arch/x86/include/asm/nmi.h
14935+++ b/arch/x86/include/asm/nmi.h
14936@@ -42,11 +42,11 @@ struct nmiaction {
14937 nmi_handler_t handler;
14938 unsigned long flags;
14939 const char *name;
14940-};
14941+} __do_const;
14942
14943 #define register_nmi_handler(t, fn, fg, n, init...) \
14944 ({ \
14945- static struct nmiaction init fn##_na = { \
14946+ static const struct nmiaction init fn##_na = { \
14947 .handler = (fn), \
14948 .name = (n), \
14949 .flags = (fg), \
14950@@ -54,7 +54,7 @@ struct nmiaction {
14951 __register_nmi_handler((t), &fn##_na); \
14952 })
14953
14954-int __register_nmi_handler(unsigned int, struct nmiaction *);
14955+int __register_nmi_handler(unsigned int, const struct nmiaction *);
14956
14957 void unregister_nmi_handler(unsigned int, const char *);
14958
14959diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
14960index 320f7bb..e89f8f8 100644
14961--- a/arch/x86/include/asm/page_64_types.h
14962+++ b/arch/x86/include/asm/page_64_types.h
14963@@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
14964
14965 /* duplicated to the one in bootmem.h */
14966 extern unsigned long max_pfn;
14967-extern unsigned long phys_base;
14968+extern const unsigned long phys_base;
14969
14970 extern unsigned long __phys_addr(unsigned long);
14971 #define __phys_reloc_hide(x) (x)
14972diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
14973index 7361e47..16dc226 100644
14974--- a/arch/x86/include/asm/paravirt.h
14975+++ b/arch/x86/include/asm/paravirt.h
14976@@ -564,7 +564,7 @@ static inline pmd_t __pmd(pmdval_t val)
14977 return (pmd_t) { ret };
14978 }
14979
14980-static inline pmdval_t pmd_val(pmd_t pmd)
14981+static inline __intentional_overflow(-1) pmdval_t pmd_val(pmd_t pmd)
14982 {
14983 pmdval_t ret;
14984
14985@@ -630,6 +630,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
14986 val);
14987 }
14988
14989+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
14990+{
14991+ pgdval_t val = native_pgd_val(pgd);
14992+
14993+ if (sizeof(pgdval_t) > sizeof(long))
14994+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
14995+ val, (u64)val >> 32);
14996+ else
14997+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
14998+ val);
14999+}
15000+
15001 static inline void pgd_clear(pgd_t *pgdp)
15002 {
15003 set_pgd(pgdp, __pgd(0));
15004@@ -714,6 +726,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
15005 pv_mmu_ops.set_fixmap(idx, phys, flags);
15006 }
15007
15008+#ifdef CONFIG_PAX_KERNEXEC
15009+static inline unsigned long pax_open_kernel(void)
15010+{
15011+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
15012+}
15013+
15014+static inline unsigned long pax_close_kernel(void)
15015+{
15016+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
15017+}
15018+#else
15019+static inline unsigned long pax_open_kernel(void) { return 0; }
15020+static inline unsigned long pax_close_kernel(void) { return 0; }
15021+#endif
15022+
15023 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
15024
15025 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
15026@@ -930,7 +957,7 @@ extern void default_banner(void);
15027
15028 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
15029 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
15030-#define PARA_INDIRECT(addr) *%cs:addr
15031+#define PARA_INDIRECT(addr) *%ss:addr
15032 #endif
15033
15034 #define INTERRUPT_RETURN \
15035@@ -1005,6 +1032,21 @@ extern void default_banner(void);
15036 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
15037 CLBR_NONE, \
15038 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
15039+
15040+#define GET_CR0_INTO_RDI \
15041+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
15042+ mov %rax,%rdi
15043+
15044+#define SET_RDI_INTO_CR0 \
15045+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
15046+
15047+#define GET_CR3_INTO_RDI \
15048+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
15049+ mov %rax,%rdi
15050+
15051+#define SET_RDI_INTO_CR3 \
15052+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
15053+
15054 #endif /* CONFIG_X86_32 */
15055
15056 #endif /* __ASSEMBLY__ */
15057diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
15058index b3b0ec1..b1cd3eb 100644
15059--- a/arch/x86/include/asm/paravirt_types.h
15060+++ b/arch/x86/include/asm/paravirt_types.h
15061@@ -84,7 +84,7 @@ struct pv_init_ops {
15062 */
15063 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
15064 unsigned long addr, unsigned len);
15065-};
15066+} __no_const;
15067
15068
15069 struct pv_lazy_ops {
15070@@ -98,7 +98,7 @@ struct pv_time_ops {
15071 unsigned long long (*sched_clock)(void);
15072 unsigned long long (*steal_clock)(int cpu);
15073 unsigned long (*get_tsc_khz)(void);
15074-};
15075+} __no_const;
15076
15077 struct pv_cpu_ops {
15078 /* hooks for various privileged instructions */
15079@@ -192,7 +192,7 @@ struct pv_cpu_ops {
15080
15081 void (*start_context_switch)(struct task_struct *prev);
15082 void (*end_context_switch)(struct task_struct *next);
15083-};
15084+} __no_const;
15085
15086 struct pv_irq_ops {
15087 /*
15088@@ -223,7 +223,7 @@ struct pv_apic_ops {
15089 unsigned long start_eip,
15090 unsigned long start_esp);
15091 #endif
15092-};
15093+} __no_const;
15094
15095 struct pv_mmu_ops {
15096 unsigned long (*read_cr2)(void);
15097@@ -313,6 +313,7 @@ struct pv_mmu_ops {
15098 struct paravirt_callee_save make_pud;
15099
15100 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
15101+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
15102 #endif /* PAGETABLE_LEVELS == 4 */
15103 #endif /* PAGETABLE_LEVELS >= 3 */
15104
15105@@ -324,6 +325,12 @@ struct pv_mmu_ops {
15106 an mfn. We can tell which is which from the index. */
15107 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
15108 phys_addr_t phys, pgprot_t flags);
15109+
15110+#ifdef CONFIG_PAX_KERNEXEC
15111+ unsigned long (*pax_open_kernel)(void);
15112+ unsigned long (*pax_close_kernel)(void);
15113+#endif
15114+
15115 };
15116
15117 struct arch_spinlock;
15118@@ -334,7 +341,7 @@ struct pv_lock_ops {
15119 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
15120 int (*spin_trylock)(struct arch_spinlock *lock);
15121 void (*spin_unlock)(struct arch_spinlock *lock);
15122-};
15123+} __no_const;
15124
15125 /* This contains all the paravirt structures: we get a convenient
15126 * number for each function using the offset which we use to indicate
15127diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
15128index b4389a4..7024269 100644
15129--- a/arch/x86/include/asm/pgalloc.h
15130+++ b/arch/x86/include/asm/pgalloc.h
15131@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
15132 pmd_t *pmd, pte_t *pte)
15133 {
15134 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
15135+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
15136+}
15137+
15138+static inline void pmd_populate_user(struct mm_struct *mm,
15139+ pmd_t *pmd, pte_t *pte)
15140+{
15141+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
15142 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
15143 }
15144
15145@@ -99,12 +106,22 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
15146
15147 #ifdef CONFIG_X86_PAE
15148 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
15149+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
15150+{
15151+ pud_populate(mm, pudp, pmd);
15152+}
15153 #else /* !CONFIG_X86_PAE */
15154 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
15155 {
15156 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
15157 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
15158 }
15159+
15160+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
15161+{
15162+ paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
15163+ set_pud(pud, __pud(_KERNPG_TABLE | __pa(pmd)));
15164+}
15165 #endif /* CONFIG_X86_PAE */
15166
15167 #if PAGETABLE_LEVELS > 3
15168@@ -114,6 +131,12 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
15169 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
15170 }
15171
15172+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
15173+{
15174+ paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
15175+ set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(pud)));
15176+}
15177+
15178 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
15179 {
15180 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
15181diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
15182index f2b489c..4f7e2e5 100644
15183--- a/arch/x86/include/asm/pgtable-2level.h
15184+++ b/arch/x86/include/asm/pgtable-2level.h
15185@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
15186
15187 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
15188 {
15189+ pax_open_kernel();
15190 *pmdp = pmd;
15191+ pax_close_kernel();
15192 }
15193
15194 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
15195diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
15196index 4cc9f2b..5fd9226 100644
15197--- a/arch/x86/include/asm/pgtable-3level.h
15198+++ b/arch/x86/include/asm/pgtable-3level.h
15199@@ -92,12 +92,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
15200
15201 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
15202 {
15203+ pax_open_kernel();
15204 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
15205+ pax_close_kernel();
15206 }
15207
15208 static inline void native_set_pud(pud_t *pudp, pud_t pud)
15209 {
15210+ pax_open_kernel();
15211 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
15212+ pax_close_kernel();
15213 }
15214
15215 /*
15216diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
15217index 1c1a955..50f828c 100644
15218--- a/arch/x86/include/asm/pgtable.h
15219+++ b/arch/x86/include/asm/pgtable.h
15220@@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
15221
15222 #ifndef __PAGETABLE_PUD_FOLDED
15223 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
15224+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
15225 #define pgd_clear(pgd) native_pgd_clear(pgd)
15226 #endif
15227
15228@@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
15229
15230 #define arch_end_context_switch(prev) do {} while(0)
15231
15232+#define pax_open_kernel() native_pax_open_kernel()
15233+#define pax_close_kernel() native_pax_close_kernel()
15234 #endif /* CONFIG_PARAVIRT */
15235
15236+#define __HAVE_ARCH_PAX_OPEN_KERNEL
15237+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
15238+
15239+#ifdef CONFIG_PAX_KERNEXEC
15240+static inline unsigned long native_pax_open_kernel(void)
15241+{
15242+ unsigned long cr0;
15243+
15244+ preempt_disable();
15245+ barrier();
15246+ cr0 = read_cr0() ^ X86_CR0_WP;
15247+ BUG_ON(cr0 & X86_CR0_WP);
15248+ write_cr0(cr0);
15249+ return cr0 ^ X86_CR0_WP;
15250+}
15251+
15252+static inline unsigned long native_pax_close_kernel(void)
15253+{
15254+ unsigned long cr0;
15255+
15256+ cr0 = read_cr0() ^ X86_CR0_WP;
15257+ BUG_ON(!(cr0 & X86_CR0_WP));
15258+ write_cr0(cr0);
15259+ barrier();
15260+ preempt_enable_no_resched();
15261+ return cr0 ^ X86_CR0_WP;
15262+}
15263+#else
15264+static inline unsigned long native_pax_open_kernel(void) { return 0; }
15265+static inline unsigned long native_pax_close_kernel(void) { return 0; }
15266+#endif
15267+
15268 /*
15269 * The following only work if pte_present() is true.
15270 * Undefined behaviour if not..
15271 */
15272+static inline int pte_user(pte_t pte)
15273+{
15274+ return pte_val(pte) & _PAGE_USER;
15275+}
15276+
15277 static inline int pte_dirty(pte_t pte)
15278 {
15279 return pte_flags(pte) & _PAGE_DIRTY;
15280@@ -200,9 +240,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
15281 return pte_clear_flags(pte, _PAGE_RW);
15282 }
15283
15284+static inline pte_t pte_mkread(pte_t pte)
15285+{
15286+ return __pte(pte_val(pte) | _PAGE_USER);
15287+}
15288+
15289 static inline pte_t pte_mkexec(pte_t pte)
15290 {
15291- return pte_clear_flags(pte, _PAGE_NX);
15292+#ifdef CONFIG_X86_PAE
15293+ if (__supported_pte_mask & _PAGE_NX)
15294+ return pte_clear_flags(pte, _PAGE_NX);
15295+ else
15296+#endif
15297+ return pte_set_flags(pte, _PAGE_USER);
15298+}
15299+
15300+static inline pte_t pte_exprotect(pte_t pte)
15301+{
15302+#ifdef CONFIG_X86_PAE
15303+ if (__supported_pte_mask & _PAGE_NX)
15304+ return pte_set_flags(pte, _PAGE_NX);
15305+ else
15306+#endif
15307+ return pte_clear_flags(pte, _PAGE_USER);
15308 }
15309
15310 static inline pte_t pte_mkdirty(pte_t pte)
15311@@ -394,6 +454,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
15312 #endif
15313
15314 #ifndef __ASSEMBLY__
15315+
15316+#ifdef CONFIG_PAX_PER_CPU_PGD
15317+extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
15318+static inline pgd_t *get_cpu_pgd(unsigned int cpu)
15319+{
15320+ return cpu_pgd[cpu];
15321+}
15322+#endif
15323+
15324 #include <linux/mm_types.h>
15325
15326 static inline int pte_none(pte_t pte)
15327@@ -583,7 +652,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
15328
15329 static inline int pgd_bad(pgd_t pgd)
15330 {
15331- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
15332+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
15333 }
15334
15335 static inline int pgd_none(pgd_t pgd)
15336@@ -606,7 +675,12 @@ static inline int pgd_none(pgd_t pgd)
15337 * pgd_offset() returns a (pgd_t *)
15338 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
15339 */
15340-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
15341+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
15342+
15343+#ifdef CONFIG_PAX_PER_CPU_PGD
15344+#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
15345+#endif
15346+
15347 /*
15348 * a shortcut which implies the use of the kernel's pgd, instead
15349 * of a process's
15350@@ -617,6 +691,20 @@ static inline int pgd_none(pgd_t pgd)
15351 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
15352 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
15353
15354+#ifdef CONFIG_X86_32
15355+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
15356+#else
15357+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
15358+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
15359+
15360+#ifdef CONFIG_PAX_MEMORY_UDEREF
15361+#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
15362+#else
15363+#define PAX_USER_SHADOW_BASE (_AC(0,UL))
15364+#endif
15365+
15366+#endif
15367+
15368 #ifndef __ASSEMBLY__
15369
15370 extern int direct_gbpages;
15371@@ -781,11 +869,23 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
15372 * dst and src can be on the same page, but the range must not overlap,
15373 * and must not cross a page boundary.
15374 */
15375-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
15376+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
15377 {
15378- memcpy(dst, src, count * sizeof(pgd_t));
15379+ pax_open_kernel();
15380+ while (count--)
15381+ *dst++ = *src++;
15382+ pax_close_kernel();
15383 }
15384
15385+#ifdef CONFIG_PAX_PER_CPU_PGD
15386+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src);
15387+#endif
15388+
15389+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
15390+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src);
15391+#else
15392+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src) {}
15393+#endif
15394
15395 #include <asm-generic/pgtable.h>
15396 #endif /* __ASSEMBLY__ */
15397diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
15398index 8faa215..a8a17ea 100644
15399--- a/arch/x86/include/asm/pgtable_32.h
15400+++ b/arch/x86/include/asm/pgtable_32.h
15401@@ -25,9 +25,6 @@
15402 struct mm_struct;
15403 struct vm_area_struct;
15404
15405-extern pgd_t swapper_pg_dir[1024];
15406-extern pgd_t initial_page_table[1024];
15407-
15408 static inline void pgtable_cache_init(void) { }
15409 static inline void check_pgt_cache(void) { }
15410 void paging_init(void);
15411@@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
15412 # include <asm/pgtable-2level.h>
15413 #endif
15414
15415+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
15416+extern pgd_t initial_page_table[PTRS_PER_PGD];
15417+#ifdef CONFIG_X86_PAE
15418+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
15419+#endif
15420+
15421 #if defined(CONFIG_HIGHPTE)
15422 #define pte_offset_map(dir, address) \
15423 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
15424@@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
15425 /* Clear a kernel PTE and flush it from the TLB */
15426 #define kpte_clear_flush(ptep, vaddr) \
15427 do { \
15428+ pax_open_kernel(); \
15429 pte_clear(&init_mm, (vaddr), (ptep)); \
15430+ pax_close_kernel(); \
15431 __flush_tlb_one((vaddr)); \
15432 } while (0)
15433
15434@@ -75,6 +80,9 @@ do { \
15435
15436 #endif /* !__ASSEMBLY__ */
15437
15438+#define HAVE_ARCH_UNMAPPED_AREA
15439+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
15440+
15441 /*
15442 * kern_addr_valid() is (1) for FLATMEM and (0) for
15443 * SPARSEMEM and DISCONTIGMEM
15444diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
15445index ed5903b..c7fe163 100644
15446--- a/arch/x86/include/asm/pgtable_32_types.h
15447+++ b/arch/x86/include/asm/pgtable_32_types.h
15448@@ -8,7 +8,7 @@
15449 */
15450 #ifdef CONFIG_X86_PAE
15451 # include <asm/pgtable-3level_types.h>
15452-# define PMD_SIZE (1UL << PMD_SHIFT)
15453+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
15454 # define PMD_MASK (~(PMD_SIZE - 1))
15455 #else
15456 # include <asm/pgtable-2level_types.h>
15457@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
15458 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
15459 #endif
15460
15461+#ifdef CONFIG_PAX_KERNEXEC
15462+#ifndef __ASSEMBLY__
15463+extern unsigned char MODULES_EXEC_VADDR[];
15464+extern unsigned char MODULES_EXEC_END[];
15465+#endif
15466+#include <asm/boot.h>
15467+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
15468+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
15469+#else
15470+#define ktla_ktva(addr) (addr)
15471+#define ktva_ktla(addr) (addr)
15472+#endif
15473+
15474 #define MODULES_VADDR VMALLOC_START
15475 #define MODULES_END VMALLOC_END
15476 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
15477diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
15478index 47356f9..deb94a2 100644
15479--- a/arch/x86/include/asm/pgtable_64.h
15480+++ b/arch/x86/include/asm/pgtable_64.h
15481@@ -16,10 +16,14 @@
15482
15483 extern pud_t level3_kernel_pgt[512];
15484 extern pud_t level3_ident_pgt[512];
15485+extern pud_t level3_vmalloc_start_pgt[512];
15486+extern pud_t level3_vmalloc_end_pgt[512];
15487+extern pud_t level3_vmemmap_pgt[512];
15488+extern pud_t level2_vmemmap_pgt[512];
15489 extern pmd_t level2_kernel_pgt[512];
15490 extern pmd_t level2_fixmap_pgt[512];
15491-extern pmd_t level2_ident_pgt[512];
15492-extern pgd_t init_level4_pgt[];
15493+extern pmd_t level2_ident_pgt[512*2];
15494+extern pgd_t init_level4_pgt[512];
15495
15496 #define swapper_pg_dir init_level4_pgt
15497
15498@@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
15499
15500 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
15501 {
15502+ pax_open_kernel();
15503 *pmdp = pmd;
15504+ pax_close_kernel();
15505 }
15506
15507 static inline void native_pmd_clear(pmd_t *pmd)
15508@@ -97,7 +103,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
15509
15510 static inline void native_set_pud(pud_t *pudp, pud_t pud)
15511 {
15512+ pax_open_kernel();
15513 *pudp = pud;
15514+ pax_close_kernel();
15515 }
15516
15517 static inline void native_pud_clear(pud_t *pud)
15518@@ -107,6 +115,13 @@ static inline void native_pud_clear(pud_t *pud)
15519
15520 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
15521 {
15522+ pax_open_kernel();
15523+ *pgdp = pgd;
15524+ pax_close_kernel();
15525+}
15526+
15527+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
15528+{
15529 *pgdp = pgd;
15530 }
15531
15532diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
15533index 766ea16..5b96cb3 100644
15534--- a/arch/x86/include/asm/pgtable_64_types.h
15535+++ b/arch/x86/include/asm/pgtable_64_types.h
15536@@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
15537 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
15538 #define MODULES_END _AC(0xffffffffff000000, UL)
15539 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
15540+#define MODULES_EXEC_VADDR MODULES_VADDR
15541+#define MODULES_EXEC_END MODULES_END
15542+
15543+#define ktla_ktva(addr) (addr)
15544+#define ktva_ktla(addr) (addr)
15545
15546 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
15547diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
15548index 3c32db8..1ddccf5 100644
15549--- a/arch/x86/include/asm/pgtable_types.h
15550+++ b/arch/x86/include/asm/pgtable_types.h
15551@@ -16,13 +16,12 @@
15552 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
15553 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
15554 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
15555-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
15556+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
15557 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
15558 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
15559 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
15560-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
15561-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
15562-#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
15563+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
15564+#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
15565 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
15566
15567 /* If _PAGE_BIT_PRESENT is clear, we use these: */
15568@@ -40,7 +39,6 @@
15569 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
15570 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
15571 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
15572-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
15573 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
15574 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
15575 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
15576@@ -57,8 +55,10 @@
15577
15578 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
15579 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
15580-#else
15581+#elif defined(CONFIG_KMEMCHECK)
15582 #define _PAGE_NX (_AT(pteval_t, 0))
15583+#else
15584+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
15585 #endif
15586
15587 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
15588@@ -116,6 +116,9 @@
15589 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
15590 _PAGE_ACCESSED)
15591
15592+#define PAGE_READONLY_NOEXEC PAGE_READONLY
15593+#define PAGE_SHARED_NOEXEC PAGE_SHARED
15594+
15595 #define __PAGE_KERNEL_EXEC \
15596 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
15597 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
15598@@ -126,7 +129,7 @@
15599 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
15600 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
15601 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
15602-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
15603+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
15604 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
15605 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
15606 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
15607@@ -188,8 +191,8 @@
15608 * bits are combined, this will alow user to access the high address mapped
15609 * VDSO in the presence of CONFIG_COMPAT_VDSO
15610 */
15611-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
15612-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
15613+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
15614+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
15615 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
15616 #endif
15617
15618@@ -227,7 +230,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
15619 {
15620 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
15621 }
15622+#endif
15623
15624+#if PAGETABLE_LEVELS == 3
15625+#include <asm-generic/pgtable-nopud.h>
15626+#endif
15627+
15628+#if PAGETABLE_LEVELS == 2
15629+#include <asm-generic/pgtable-nopmd.h>
15630+#endif
15631+
15632+#ifndef __ASSEMBLY__
15633 #if PAGETABLE_LEVELS > 3
15634 typedef struct { pudval_t pud; } pud_t;
15635
15636@@ -241,8 +254,6 @@ static inline pudval_t native_pud_val(pud_t pud)
15637 return pud.pud;
15638 }
15639 #else
15640-#include <asm-generic/pgtable-nopud.h>
15641-
15642 static inline pudval_t native_pud_val(pud_t pud)
15643 {
15644 return native_pgd_val(pud.pgd);
15645@@ -262,8 +273,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
15646 return pmd.pmd;
15647 }
15648 #else
15649-#include <asm-generic/pgtable-nopmd.h>
15650-
15651 static inline pmdval_t native_pmd_val(pmd_t pmd)
15652 {
15653 return native_pgd_val(pmd.pud.pgd);
15654@@ -303,7 +312,6 @@ typedef struct page *pgtable_t;
15655
15656 extern pteval_t __supported_pte_mask;
15657 extern void set_nx(void);
15658-extern int nx_enabled;
15659
15660 #define pgprot_writecombine pgprot_writecombine
15661 extern pgprot_t pgprot_writecombine(pgprot_t prot);
15662diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
15663index 888184b..a07ac89 100644
15664--- a/arch/x86/include/asm/processor.h
15665+++ b/arch/x86/include/asm/processor.h
15666@@ -287,7 +287,7 @@ struct tss_struct {
15667
15668 } ____cacheline_aligned;
15669
15670-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
15671+extern struct tss_struct init_tss[NR_CPUS];
15672
15673 /*
15674 * Save the original ist values for checking stack pointers during debugging
15675@@ -827,11 +827,18 @@ static inline void spin_lock_prefetch(const void *x)
15676 */
15677 #define TASK_SIZE PAGE_OFFSET
15678 #define TASK_SIZE_MAX TASK_SIZE
15679+
15680+#ifdef CONFIG_PAX_SEGMEXEC
15681+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
15682+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
15683+#else
15684 #define STACK_TOP TASK_SIZE
15685-#define STACK_TOP_MAX STACK_TOP
15686+#endif
15687+
15688+#define STACK_TOP_MAX TASK_SIZE
15689
15690 #define INIT_THREAD { \
15691- .sp0 = sizeof(init_stack) + (long)&init_stack, \
15692+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
15693 .vm86_info = NULL, \
15694 .sysenter_cs = __KERNEL_CS, \
15695 .io_bitmap_ptr = NULL, \
15696@@ -845,7 +852,7 @@ static inline void spin_lock_prefetch(const void *x)
15697 */
15698 #define INIT_TSS { \
15699 .x86_tss = { \
15700- .sp0 = sizeof(init_stack) + (long)&init_stack, \
15701+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
15702 .ss0 = __KERNEL_DS, \
15703 .ss1 = __KERNEL_CS, \
15704 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
15705@@ -856,11 +863,7 @@ static inline void spin_lock_prefetch(const void *x)
15706 extern unsigned long thread_saved_pc(struct task_struct *tsk);
15707
15708 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
15709-#define KSTK_TOP(info) \
15710-({ \
15711- unsigned long *__ptr = (unsigned long *)(info); \
15712- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
15713-})
15714+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
15715
15716 /*
15717 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
15718@@ -875,7 +878,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
15719 #define task_pt_regs(task) \
15720 ({ \
15721 struct pt_regs *__regs__; \
15722- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
15723+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
15724 __regs__ - 1; \
15725 })
15726
15727@@ -885,13 +888,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
15728 /*
15729 * User space process size. 47bits minus one guard page.
15730 */
15731-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
15732+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
15733
15734 /* This decides where the kernel will search for a free chunk of vm
15735 * space during mmap's.
15736 */
15737 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
15738- 0xc0000000 : 0xFFFFe000)
15739+ 0xc0000000 : 0xFFFFf000)
15740
15741 #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
15742 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
15743@@ -902,11 +905,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
15744 #define STACK_TOP_MAX TASK_SIZE_MAX
15745
15746 #define INIT_THREAD { \
15747- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
15748+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
15749 }
15750
15751 #define INIT_TSS { \
15752- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
15753+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
15754 }
15755
15756 /*
15757@@ -934,6 +937,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
15758 */
15759 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
15760
15761+#ifdef CONFIG_PAX_SEGMEXEC
15762+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
15763+#endif
15764+
15765 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
15766
15767 /* Get/set a process' ability to use the timestamp counter instruction */
15768@@ -994,12 +1001,12 @@ extern bool cpu_has_amd_erratum(const int *);
15769 #define cpu_has_amd_erratum(x) (false)
15770 #endif /* CONFIG_CPU_SUP_AMD */
15771
15772-extern unsigned long arch_align_stack(unsigned long sp);
15773+#define arch_align_stack(x) ((x) & ~0xfUL)
15774 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
15775
15776 void default_idle(void);
15777 bool set_pm_idle_to_default(void);
15778
15779-void stop_this_cpu(void *dummy);
15780+void stop_this_cpu(void *dummy) __noreturn;
15781
15782 #endif /* _ASM_X86_PROCESSOR_H */
15783diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
15784index 942a086..6c26446 100644
15785--- a/arch/x86/include/asm/ptrace.h
15786+++ b/arch/x86/include/asm/ptrace.h
15787@@ -85,28 +85,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
15788 }
15789
15790 /*
15791- * user_mode_vm(regs) determines whether a register set came from user mode.
15792+ * user_mode(regs) determines whether a register set came from user mode.
15793 * This is true if V8086 mode was enabled OR if the register set was from
15794 * protected mode with RPL-3 CS value. This tricky test checks that with
15795 * one comparison. Many places in the kernel can bypass this full check
15796- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
15797+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
15798+ * be used.
15799 */
15800-static inline int user_mode(struct pt_regs *regs)
15801+static inline int user_mode_novm(struct pt_regs *regs)
15802 {
15803 #ifdef CONFIG_X86_32
15804 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
15805 #else
15806- return !!(regs->cs & 3);
15807+ return !!(regs->cs & SEGMENT_RPL_MASK);
15808 #endif
15809 }
15810
15811-static inline int user_mode_vm(struct pt_regs *regs)
15812+static inline int user_mode(struct pt_regs *regs)
15813 {
15814 #ifdef CONFIG_X86_32
15815 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
15816 USER_RPL;
15817 #else
15818- return user_mode(regs);
15819+ return user_mode_novm(regs);
15820 #endif
15821 }
15822
15823@@ -122,15 +123,16 @@ static inline int v8086_mode(struct pt_regs *regs)
15824 #ifdef CONFIG_X86_64
15825 static inline bool user_64bit_mode(struct pt_regs *regs)
15826 {
15827+ unsigned long cs = regs->cs & 0xffff;
15828 #ifndef CONFIG_PARAVIRT
15829 /*
15830 * On non-paravirt systems, this is the only long mode CPL 3
15831 * selector. We do not allow long mode selectors in the LDT.
15832 */
15833- return regs->cs == __USER_CS;
15834+ return cs == __USER_CS;
15835 #else
15836 /* Headers are too twisted for this to go in paravirt.h. */
15837- return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
15838+ return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
15839 #endif
15840 }
15841
15842@@ -181,9 +183,11 @@ static inline unsigned long regs_get_register(struct pt_regs *regs,
15843 * Traps from the kernel do not save sp and ss.
15844 * Use the helper function to retrieve sp.
15845 */
15846- if (offset == offsetof(struct pt_regs, sp) &&
15847- regs->cs == __KERNEL_CS)
15848- return kernel_stack_pointer(regs);
15849+ if (offset == offsetof(struct pt_regs, sp)) {
15850+ unsigned long cs = regs->cs & 0xffff;
15851+ if (cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS)
15852+ return kernel_stack_pointer(regs);
15853+ }
15854 #endif
15855 return *(unsigned long *)((unsigned long)regs + offset);
15856 }
15857diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
15858index fe1ec5b..dc5c3fe 100644
15859--- a/arch/x86/include/asm/realmode.h
15860+++ b/arch/x86/include/asm/realmode.h
15861@@ -22,16 +22,14 @@ struct real_mode_header {
15862 #endif
15863 /* APM/BIOS reboot */
15864 u32 machine_real_restart_asm;
15865-#ifdef CONFIG_X86_64
15866 u32 machine_real_restart_seg;
15867-#endif
15868 };
15869
15870 /* This must match data at trampoline_32/64.S */
15871 struct trampoline_header {
15872 #ifdef CONFIG_X86_32
15873 u32 start;
15874- u16 gdt_pad;
15875+ u16 boot_cs;
15876 u16 gdt_limit;
15877 u32 gdt_base;
15878 #else
15879diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
15880index a82c4f1..ac45053 100644
15881--- a/arch/x86/include/asm/reboot.h
15882+++ b/arch/x86/include/asm/reboot.h
15883@@ -6,13 +6,13 @@
15884 struct pt_regs;
15885
15886 struct machine_ops {
15887- void (*restart)(char *cmd);
15888- void (*halt)(void);
15889- void (*power_off)(void);
15890+ void (* __noreturn restart)(char *cmd);
15891+ void (* __noreturn halt)(void);
15892+ void (* __noreturn power_off)(void);
15893 void (*shutdown)(void);
15894 void (*crash_shutdown)(struct pt_regs *);
15895- void (*emergency_restart)(void);
15896-};
15897+ void (* __noreturn emergency_restart)(void);
15898+} __no_const;
15899
15900 extern struct machine_ops machine_ops;
15901
15902diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
15903index 2dbe4a7..ce1db00 100644
15904--- a/arch/x86/include/asm/rwsem.h
15905+++ b/arch/x86/include/asm/rwsem.h
15906@@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
15907 {
15908 asm volatile("# beginning down_read\n\t"
15909 LOCK_PREFIX _ASM_INC "(%1)\n\t"
15910+
15911+#ifdef CONFIG_PAX_REFCOUNT
15912+ "jno 0f\n"
15913+ LOCK_PREFIX _ASM_DEC "(%1)\n"
15914+ "int $4\n0:\n"
15915+ _ASM_EXTABLE(0b, 0b)
15916+#endif
15917+
15918 /* adds 0x00000001 */
15919 " jns 1f\n"
15920 " call call_rwsem_down_read_failed\n"
15921@@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
15922 "1:\n\t"
15923 " mov %1,%2\n\t"
15924 " add %3,%2\n\t"
15925+
15926+#ifdef CONFIG_PAX_REFCOUNT
15927+ "jno 0f\n"
15928+ "sub %3,%2\n"
15929+ "int $4\n0:\n"
15930+ _ASM_EXTABLE(0b, 0b)
15931+#endif
15932+
15933 " jle 2f\n\t"
15934 LOCK_PREFIX " cmpxchg %2,%0\n\t"
15935 " jnz 1b\n\t"
15936@@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
15937 long tmp;
15938 asm volatile("# beginning down_write\n\t"
15939 LOCK_PREFIX " xadd %1,(%2)\n\t"
15940+
15941+#ifdef CONFIG_PAX_REFCOUNT
15942+ "jno 0f\n"
15943+ "mov %1,(%2)\n"
15944+ "int $4\n0:\n"
15945+ _ASM_EXTABLE(0b, 0b)
15946+#endif
15947+
15948 /* adds 0xffff0001, returns the old value */
15949 " test %1,%1\n\t"
15950 /* was the count 0 before? */
15951@@ -141,6 +165,14 @@ static inline void __up_read(struct rw_semaphore *sem)
15952 long tmp;
15953 asm volatile("# beginning __up_read\n\t"
15954 LOCK_PREFIX " xadd %1,(%2)\n\t"
15955+
15956+#ifdef CONFIG_PAX_REFCOUNT
15957+ "jno 0f\n"
15958+ "mov %1,(%2)\n"
15959+ "int $4\n0:\n"
15960+ _ASM_EXTABLE(0b, 0b)
15961+#endif
15962+
15963 /* subtracts 1, returns the old value */
15964 " jns 1f\n\t"
15965 " call call_rwsem_wake\n" /* expects old value in %edx */
15966@@ -159,6 +191,14 @@ static inline void __up_write(struct rw_semaphore *sem)
15967 long tmp;
15968 asm volatile("# beginning __up_write\n\t"
15969 LOCK_PREFIX " xadd %1,(%2)\n\t"
15970+
15971+#ifdef CONFIG_PAX_REFCOUNT
15972+ "jno 0f\n"
15973+ "mov %1,(%2)\n"
15974+ "int $4\n0:\n"
15975+ _ASM_EXTABLE(0b, 0b)
15976+#endif
15977+
15978 /* subtracts 0xffff0001, returns the old value */
15979 " jns 1f\n\t"
15980 " call call_rwsem_wake\n" /* expects old value in %edx */
15981@@ -176,6 +216,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
15982 {
15983 asm volatile("# beginning __downgrade_write\n\t"
15984 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
15985+
15986+#ifdef CONFIG_PAX_REFCOUNT
15987+ "jno 0f\n"
15988+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
15989+ "int $4\n0:\n"
15990+ _ASM_EXTABLE(0b, 0b)
15991+#endif
15992+
15993 /*
15994 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
15995 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
15996@@ -194,7 +242,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
15997 */
15998 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
15999 {
16000- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
16001+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
16002+
16003+#ifdef CONFIG_PAX_REFCOUNT
16004+ "jno 0f\n"
16005+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
16006+ "int $4\n0:\n"
16007+ _ASM_EXTABLE(0b, 0b)
16008+#endif
16009+
16010 : "+m" (sem->count)
16011 : "er" (delta));
16012 }
16013@@ -204,7 +260,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
16014 */
16015 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
16016 {
16017- return delta + xadd(&sem->count, delta);
16018+ return delta + xadd_check_overflow(&sem->count, delta);
16019 }
16020
16021 #endif /* __KERNEL__ */
16022diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
16023index c48a950..c6d7468 100644
16024--- a/arch/x86/include/asm/segment.h
16025+++ b/arch/x86/include/asm/segment.h
16026@@ -64,10 +64,15 @@
16027 * 26 - ESPFIX small SS
16028 * 27 - per-cpu [ offset to per-cpu data area ]
16029 * 28 - stack_canary-20 [ for stack protector ]
16030- * 29 - unused
16031- * 30 - unused
16032+ * 29 - PCI BIOS CS
16033+ * 30 - PCI BIOS DS
16034 * 31 - TSS for double fault handler
16035 */
16036+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
16037+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
16038+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
16039+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
16040+
16041 #define GDT_ENTRY_TLS_MIN 6
16042 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
16043
16044@@ -79,6 +84,8 @@
16045
16046 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
16047
16048+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
16049+
16050 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
16051
16052 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
16053@@ -104,6 +111,12 @@
16054 #define __KERNEL_STACK_CANARY 0
16055 #endif
16056
16057+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
16058+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
16059+
16060+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
16061+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
16062+
16063 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
16064
16065 /*
16066@@ -141,7 +154,7 @@
16067 */
16068
16069 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
16070-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
16071+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
16072
16073
16074 #else
16075@@ -165,6 +178,8 @@
16076 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
16077 #define __USER32_DS __USER_DS
16078
16079+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
16080+
16081 #define GDT_ENTRY_TSS 8 /* needs two entries */
16082 #define GDT_ENTRY_LDT 10 /* needs two entries */
16083 #define GDT_ENTRY_TLS_MIN 12
16084@@ -185,6 +200,7 @@
16085 #endif
16086
16087 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
16088+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
16089 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
16090 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
16091 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
16092@@ -265,7 +281,7 @@ static inline unsigned long get_limit(unsigned long segment)
16093 {
16094 unsigned long __limit;
16095 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
16096- return __limit + 1;
16097+ return __limit;
16098 }
16099
16100 #endif /* !__ASSEMBLY__ */
16101diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
16102index b073aae..39f9bdd 100644
16103--- a/arch/x86/include/asm/smp.h
16104+++ b/arch/x86/include/asm/smp.h
16105@@ -36,7 +36,7 @@ DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
16106 /* cpus sharing the last level cache: */
16107 DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
16108 DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id);
16109-DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
16110+DECLARE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
16111
16112 static inline struct cpumask *cpu_sibling_mask(int cpu)
16113 {
16114@@ -79,7 +79,7 @@ struct smp_ops {
16115
16116 void (*send_call_func_ipi)(const struct cpumask *mask);
16117 void (*send_call_func_single_ipi)(int cpu);
16118-};
16119+} __no_const;
16120
16121 /* Globals due to paravirt */
16122 extern void set_cpu_sibling_map(int cpu);
16123@@ -191,14 +191,8 @@ extern unsigned disabled_cpus __cpuinitdata;
16124 extern int safe_smp_processor_id(void);
16125
16126 #elif defined(CONFIG_X86_64_SMP)
16127-#define raw_smp_processor_id() (this_cpu_read(cpu_number))
16128-
16129-#define stack_smp_processor_id() \
16130-({ \
16131- struct thread_info *ti; \
16132- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
16133- ti->cpu; \
16134-})
16135+#define raw_smp_processor_id() (this_cpu_read(cpu_number))
16136+#define stack_smp_processor_id() raw_smp_processor_id()
16137 #define safe_smp_processor_id() smp_processor_id()
16138
16139 #endif
16140diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
16141index 33692ea..350a534 100644
16142--- a/arch/x86/include/asm/spinlock.h
16143+++ b/arch/x86/include/asm/spinlock.h
16144@@ -172,6 +172,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
16145 static inline void arch_read_lock(arch_rwlock_t *rw)
16146 {
16147 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
16148+
16149+#ifdef CONFIG_PAX_REFCOUNT
16150+ "jno 0f\n"
16151+ LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
16152+ "int $4\n0:\n"
16153+ _ASM_EXTABLE(0b, 0b)
16154+#endif
16155+
16156 "jns 1f\n"
16157 "call __read_lock_failed\n\t"
16158 "1:\n"
16159@@ -181,6 +189,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
16160 static inline void arch_write_lock(arch_rwlock_t *rw)
16161 {
16162 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
16163+
16164+#ifdef CONFIG_PAX_REFCOUNT
16165+ "jno 0f\n"
16166+ LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
16167+ "int $4\n0:\n"
16168+ _ASM_EXTABLE(0b, 0b)
16169+#endif
16170+
16171 "jz 1f\n"
16172 "call __write_lock_failed\n\t"
16173 "1:\n"
16174@@ -210,13 +226,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
16175
16176 static inline void arch_read_unlock(arch_rwlock_t *rw)
16177 {
16178- asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
16179+ asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
16180+
16181+#ifdef CONFIG_PAX_REFCOUNT
16182+ "jno 0f\n"
16183+ LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
16184+ "int $4\n0:\n"
16185+ _ASM_EXTABLE(0b, 0b)
16186+#endif
16187+
16188 :"+m" (rw->lock) : : "memory");
16189 }
16190
16191 static inline void arch_write_unlock(arch_rwlock_t *rw)
16192 {
16193- asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
16194+ asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
16195+
16196+#ifdef CONFIG_PAX_REFCOUNT
16197+ "jno 0f\n"
16198+ LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
16199+ "int $4\n0:\n"
16200+ _ASM_EXTABLE(0b, 0b)
16201+#endif
16202+
16203 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
16204 }
16205
16206diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
16207index 6a99859..03cb807 100644
16208--- a/arch/x86/include/asm/stackprotector.h
16209+++ b/arch/x86/include/asm/stackprotector.h
16210@@ -47,7 +47,7 @@
16211 * head_32 for boot CPU and setup_per_cpu_areas() for others.
16212 */
16213 #define GDT_STACK_CANARY_INIT \
16214- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
16215+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
16216
16217 /*
16218 * Initialize the stackprotector canary value.
16219@@ -112,7 +112,7 @@ static inline void setup_stack_canary_segment(int cpu)
16220
16221 static inline void load_stack_canary_segment(void)
16222 {
16223-#ifdef CONFIG_X86_32
16224+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
16225 asm volatile ("mov %0, %%gs" : : "r" (0));
16226 #endif
16227 }
16228diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
16229index 70bbe39..4ae2bd4 100644
16230--- a/arch/x86/include/asm/stacktrace.h
16231+++ b/arch/x86/include/asm/stacktrace.h
16232@@ -11,28 +11,20 @@
16233
16234 extern int kstack_depth_to_print;
16235
16236-struct thread_info;
16237+struct task_struct;
16238 struct stacktrace_ops;
16239
16240-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
16241- unsigned long *stack,
16242- unsigned long bp,
16243- const struct stacktrace_ops *ops,
16244- void *data,
16245- unsigned long *end,
16246- int *graph);
16247+typedef unsigned long walk_stack_t(struct task_struct *task,
16248+ void *stack_start,
16249+ unsigned long *stack,
16250+ unsigned long bp,
16251+ const struct stacktrace_ops *ops,
16252+ void *data,
16253+ unsigned long *end,
16254+ int *graph);
16255
16256-extern unsigned long
16257-print_context_stack(struct thread_info *tinfo,
16258- unsigned long *stack, unsigned long bp,
16259- const struct stacktrace_ops *ops, void *data,
16260- unsigned long *end, int *graph);
16261-
16262-extern unsigned long
16263-print_context_stack_bp(struct thread_info *tinfo,
16264- unsigned long *stack, unsigned long bp,
16265- const struct stacktrace_ops *ops, void *data,
16266- unsigned long *end, int *graph);
16267+extern walk_stack_t print_context_stack;
16268+extern walk_stack_t print_context_stack_bp;
16269
16270 /* Generic stack tracer with callbacks */
16271
16272@@ -40,7 +32,7 @@ struct stacktrace_ops {
16273 void (*address)(void *data, unsigned long address, int reliable);
16274 /* On negative return stop dumping */
16275 int (*stack)(void *data, char *name);
16276- walk_stack_t walk_stack;
16277+ walk_stack_t *walk_stack;
16278 };
16279
16280 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
16281diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
16282index 4ec45b3..a4f0a8a 100644
16283--- a/arch/x86/include/asm/switch_to.h
16284+++ b/arch/x86/include/asm/switch_to.h
16285@@ -108,7 +108,7 @@ do { \
16286 "call __switch_to\n\t" \
16287 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
16288 __switch_canary \
16289- "movq %P[thread_info](%%rsi),%%r8\n\t" \
16290+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
16291 "movq %%rax,%%rdi\n\t" \
16292 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
16293 "jnz ret_from_fork\n\t" \
16294@@ -119,7 +119,7 @@ do { \
16295 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
16296 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
16297 [_tif_fork] "i" (_TIF_FORK), \
16298- [thread_info] "i" (offsetof(struct task_struct, stack)), \
16299+ [thread_info] "m" (current_tinfo), \
16300 [current_task] "m" (current_task) \
16301 __switch_canary_iparam \
16302 : "memory", "cc" __EXTRA_CLOBBER)
16303diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
16304index 2d946e6..e453ec4 100644
16305--- a/arch/x86/include/asm/thread_info.h
16306+++ b/arch/x86/include/asm/thread_info.h
16307@@ -10,6 +10,7 @@
16308 #include <linux/compiler.h>
16309 #include <asm/page.h>
16310 #include <asm/types.h>
16311+#include <asm/percpu.h>
16312
16313 /*
16314 * low level task data that entry.S needs immediate access to
16315@@ -24,7 +25,6 @@ struct exec_domain;
16316 #include <linux/atomic.h>
16317
16318 struct thread_info {
16319- struct task_struct *task; /* main task structure */
16320 struct exec_domain *exec_domain; /* execution domain */
16321 __u32 flags; /* low level flags */
16322 __u32 status; /* thread synchronous flags */
16323@@ -34,19 +34,13 @@ struct thread_info {
16324 mm_segment_t addr_limit;
16325 struct restart_block restart_block;
16326 void __user *sysenter_return;
16327-#ifdef CONFIG_X86_32
16328- unsigned long previous_esp; /* ESP of the previous stack in
16329- case of nested (IRQ) stacks
16330- */
16331- __u8 supervisor_stack[0];
16332-#endif
16333+ unsigned long lowest_stack;
16334 unsigned int sig_on_uaccess_error:1;
16335 unsigned int uaccess_err:1; /* uaccess failed */
16336 };
16337
16338-#define INIT_THREAD_INFO(tsk) \
16339+#define INIT_THREAD_INFO \
16340 { \
16341- .task = &tsk, \
16342 .exec_domain = &default_exec_domain, \
16343 .flags = 0, \
16344 .cpu = 0, \
16345@@ -57,7 +51,7 @@ struct thread_info {
16346 }, \
16347 }
16348
16349-#define init_thread_info (init_thread_union.thread_info)
16350+#define init_thread_info (init_thread_union.stack)
16351 #define init_stack (init_thread_union.stack)
16352
16353 #else /* !__ASSEMBLY__ */
16354@@ -98,6 +92,7 @@ struct thread_info {
16355 #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
16356 #define TIF_ADDR32 29 /* 32-bit address space on 64 bits */
16357 #define TIF_X32 30 /* 32-bit native x86-64 binary */
16358+#define TIF_GRSEC_SETXID 31 /* update credentials on syscall entry/exit */
16359
16360 #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
16361 #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
16362@@ -122,17 +117,18 @@ struct thread_info {
16363 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
16364 #define _TIF_ADDR32 (1 << TIF_ADDR32)
16365 #define _TIF_X32 (1 << TIF_X32)
16366+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
16367
16368 /* work to do in syscall_trace_enter() */
16369 #define _TIF_WORK_SYSCALL_ENTRY \
16370 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
16371 _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | \
16372- _TIF_NOHZ)
16373+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
16374
16375 /* work to do in syscall_trace_leave() */
16376 #define _TIF_WORK_SYSCALL_EXIT \
16377 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
16378- _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ)
16379+ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
16380
16381 /* work to do on interrupt/exception return */
16382 #define _TIF_WORK_MASK \
16383@@ -143,7 +139,7 @@ struct thread_info {
16384 /* work to do on any return to user space */
16385 #define _TIF_ALLWORK_MASK \
16386 ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \
16387- _TIF_NOHZ)
16388+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
16389
16390 /* Only used for 64 bit */
16391 #define _TIF_DO_NOTIFY_MASK \
16392@@ -159,45 +155,40 @@ struct thread_info {
16393
16394 #define PREEMPT_ACTIVE 0x10000000
16395
16396-#ifdef CONFIG_X86_32
16397-
16398-#define STACK_WARN (THREAD_SIZE/8)
16399-/*
16400- * macros/functions for gaining access to the thread information structure
16401- *
16402- * preempt_count needs to be 1 initially, until the scheduler is functional.
16403- */
16404-#ifndef __ASSEMBLY__
16405-
16406-
16407-/* how to get the current stack pointer from C */
16408-register unsigned long current_stack_pointer asm("esp") __used;
16409-
16410-/* how to get the thread information struct from C */
16411-static inline struct thread_info *current_thread_info(void)
16412-{
16413- return (struct thread_info *)
16414- (current_stack_pointer & ~(THREAD_SIZE - 1));
16415-}
16416-
16417-#else /* !__ASSEMBLY__ */
16418-
16419+#ifdef __ASSEMBLY__
16420 /* how to get the thread information struct from ASM */
16421 #define GET_THREAD_INFO(reg) \
16422- movl $-THREAD_SIZE, reg; \
16423- andl %esp, reg
16424+ mov PER_CPU_VAR(current_tinfo), reg
16425
16426 /* use this one if reg already contains %esp */
16427-#define GET_THREAD_INFO_WITH_ESP(reg) \
16428- andl $-THREAD_SIZE, reg
16429+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
16430+#else
16431+/* how to get the thread information struct from C */
16432+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
16433+
16434+static __always_inline struct thread_info *current_thread_info(void)
16435+{
16436+ return this_cpu_read_stable(current_tinfo);
16437+}
16438+#endif
16439+
16440+#ifdef CONFIG_X86_32
16441+
16442+#define STACK_WARN (THREAD_SIZE/8)
16443+/*
16444+ * macros/functions for gaining access to the thread information structure
16445+ *
16446+ * preempt_count needs to be 1 initially, until the scheduler is functional.
16447+ */
16448+#ifndef __ASSEMBLY__
16449+
16450+/* how to get the current stack pointer from C */
16451+register unsigned long current_stack_pointer asm("esp") __used;
16452
16453 #endif
16454
16455 #else /* X86_32 */
16456
16457-#include <asm/percpu.h>
16458-#define KERNEL_STACK_OFFSET (5*8)
16459-
16460 /*
16461 * macros/functions for gaining access to the thread information structure
16462 * preempt_count needs to be 1 initially, until the scheduler is functional.
16463@@ -205,27 +196,8 @@ static inline struct thread_info *current_thread_info(void)
16464 #ifndef __ASSEMBLY__
16465 DECLARE_PER_CPU(unsigned long, kernel_stack);
16466
16467-static inline struct thread_info *current_thread_info(void)
16468-{
16469- struct thread_info *ti;
16470- ti = (void *)(this_cpu_read_stable(kernel_stack) +
16471- KERNEL_STACK_OFFSET - THREAD_SIZE);
16472- return ti;
16473-}
16474-
16475-#else /* !__ASSEMBLY__ */
16476-
16477-/* how to get the thread information struct from ASM */
16478-#define GET_THREAD_INFO(reg) \
16479- movq PER_CPU_VAR(kernel_stack),reg ; \
16480- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
16481-
16482-/*
16483- * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
16484- * a certain register (to be used in assembler memory operands).
16485- */
16486-#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
16487-
16488+/* how to get the current stack pointer from C */
16489+register unsigned long current_stack_pointer asm("rsp") __used;
16490 #endif
16491
16492 #endif /* !X86_32 */
16493@@ -286,5 +258,12 @@ static inline bool is_ia32_task(void)
16494 extern void arch_task_cache_init(void);
16495 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
16496 extern void arch_release_task_struct(struct task_struct *tsk);
16497+
16498+#define __HAVE_THREAD_FUNCTIONS
16499+#define task_thread_info(task) (&(task)->tinfo)
16500+#define task_stack_page(task) ((task)->stack)
16501+#define setup_thread_stack(p, org) do {} while (0)
16502+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
16503+
16504 #endif
16505 #endif /* _ASM_X86_THREAD_INFO_H */
16506diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
16507index 1709801..0a60f2f 100644
16508--- a/arch/x86/include/asm/uaccess.h
16509+++ b/arch/x86/include/asm/uaccess.h
16510@@ -7,6 +7,7 @@
16511 #include <linux/compiler.h>
16512 #include <linux/thread_info.h>
16513 #include <linux/string.h>
16514+#include <linux/sched.h>
16515 #include <asm/asm.h>
16516 #include <asm/page.h>
16517 #include <asm/smap.h>
16518@@ -29,7 +30,12 @@
16519
16520 #define get_ds() (KERNEL_DS)
16521 #define get_fs() (current_thread_info()->addr_limit)
16522+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
16523+void __set_fs(mm_segment_t x);
16524+void set_fs(mm_segment_t x);
16525+#else
16526 #define set_fs(x) (current_thread_info()->addr_limit = (x))
16527+#endif
16528
16529 #define segment_eq(a, b) ((a).seg == (b).seg)
16530
16531@@ -77,8 +83,33 @@
16532 * checks that the pointer is in the user space range - after calling
16533 * this function, memory access functions may still return -EFAULT.
16534 */
16535-#define access_ok(type, addr, size) \
16536- (likely(__range_not_ok(addr, size, user_addr_max()) == 0))
16537+#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size, user_addr_max()) == 0))
16538+#define access_ok(type, addr, size) \
16539+({ \
16540+ long __size = size; \
16541+ unsigned long __addr = (unsigned long)addr; \
16542+ unsigned long __addr_ao = __addr & PAGE_MASK; \
16543+ unsigned long __end_ao = __addr + __size - 1; \
16544+ bool __ret_ao = __range_not_ok(__addr, __size, user_addr_max()) == 0;\
16545+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
16546+ while(__addr_ao <= __end_ao) { \
16547+ char __c_ao; \
16548+ __addr_ao += PAGE_SIZE; \
16549+ if (__size > PAGE_SIZE) \
16550+ cond_resched(); \
16551+ if (__get_user(__c_ao, (char __user *)__addr)) \
16552+ break; \
16553+ if (type != VERIFY_WRITE) { \
16554+ __addr = __addr_ao; \
16555+ continue; \
16556+ } \
16557+ if (__put_user(__c_ao, (char __user *)__addr)) \
16558+ break; \
16559+ __addr = __addr_ao; \
16560+ } \
16561+ } \
16562+ __ret_ao; \
16563+})
16564
16565 /*
16566 * The exception table consists of pairs of addresses relative to the
16567@@ -189,13 +220,21 @@ extern int __get_user_bad(void);
16568 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
16569 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
16570
16571-
16572+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
16573+#define __copyuser_seg "gs;"
16574+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
16575+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
16576+#else
16577+#define __copyuser_seg
16578+#define __COPYUSER_SET_ES
16579+#define __COPYUSER_RESTORE_ES
16580+#endif
16581
16582 #ifdef CONFIG_X86_32
16583 #define __put_user_asm_u64(x, addr, err, errret) \
16584 asm volatile(ASM_STAC "\n" \
16585- "1: movl %%eax,0(%2)\n" \
16586- "2: movl %%edx,4(%2)\n" \
16587+ "1: "__copyuser_seg"movl %%eax,0(%2)\n" \
16588+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
16589 "3: " ASM_CLAC "\n" \
16590 ".section .fixup,\"ax\"\n" \
16591 "4: movl %3,%0\n" \
16592@@ -208,8 +247,8 @@ extern int __get_user_bad(void);
16593
16594 #define __put_user_asm_ex_u64(x, addr) \
16595 asm volatile(ASM_STAC "\n" \
16596- "1: movl %%eax,0(%1)\n" \
16597- "2: movl %%edx,4(%1)\n" \
16598+ "1: "__copyuser_seg"movl %%eax,0(%1)\n" \
16599+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
16600 "3: " ASM_CLAC "\n" \
16601 _ASM_EXTABLE_EX(1b, 2b) \
16602 _ASM_EXTABLE_EX(2b, 3b) \
16603@@ -259,7 +298,7 @@ extern void __put_user_8(void);
16604 __typeof__(*(ptr)) __pu_val; \
16605 __chk_user_ptr(ptr); \
16606 might_fault(); \
16607- __pu_val = x; \
16608+ __pu_val = (x); \
16609 switch (sizeof(*(ptr))) { \
16610 case 1: \
16611 __put_user_x(1, __pu_val, ptr, __ret_pu); \
16612@@ -358,7 +397,7 @@ do { \
16613
16614 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
16615 asm volatile(ASM_STAC "\n" \
16616- "1: mov"itype" %2,%"rtype"1\n" \
16617+ "1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
16618 "2: " ASM_CLAC "\n" \
16619 ".section .fixup,\"ax\"\n" \
16620 "3: mov %3,%0\n" \
16621@@ -366,7 +405,7 @@ do { \
16622 " jmp 2b\n" \
16623 ".previous\n" \
16624 _ASM_EXTABLE(1b, 3b) \
16625- : "=r" (err), ltype(x) \
16626+ : "=r" (err), ltype (x) \
16627 : "m" (__m(addr)), "i" (errret), "0" (err))
16628
16629 #define __get_user_size_ex(x, ptr, size) \
16630@@ -391,7 +430,7 @@ do { \
16631 } while (0)
16632
16633 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
16634- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
16635+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
16636 "2:\n" \
16637 _ASM_EXTABLE_EX(1b, 2b) \
16638 : ltype(x) : "m" (__m(addr)))
16639@@ -408,13 +447,24 @@ do { \
16640 int __gu_err; \
16641 unsigned long __gu_val; \
16642 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
16643- (x) = (__force __typeof__(*(ptr)))__gu_val; \
16644+ (x) = (__typeof__(*(ptr)))__gu_val; \
16645 __gu_err; \
16646 })
16647
16648 /* FIXME: this hack is definitely wrong -AK */
16649 struct __large_struct { unsigned long buf[100]; };
16650-#define __m(x) (*(struct __large_struct __user *)(x))
16651+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
16652+#define ____m(x) \
16653+({ \
16654+ unsigned long ____x = (unsigned long)(x); \
16655+ if (____x < PAX_USER_SHADOW_BASE) \
16656+ ____x += PAX_USER_SHADOW_BASE; \
16657+ (void __user *)____x; \
16658+})
16659+#else
16660+#define ____m(x) (x)
16661+#endif
16662+#define __m(x) (*(struct __large_struct __user *)____m(x))
16663
16664 /*
16665 * Tell gcc we read from memory instead of writing: this is because
16666@@ -423,7 +473,7 @@ struct __large_struct { unsigned long buf[100]; };
16667 */
16668 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
16669 asm volatile(ASM_STAC "\n" \
16670- "1: mov"itype" %"rtype"1,%2\n" \
16671+ "1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
16672 "2: " ASM_CLAC "\n" \
16673 ".section .fixup,\"ax\"\n" \
16674 "3: mov %3,%0\n" \
16675@@ -431,10 +481,10 @@ struct __large_struct { unsigned long buf[100]; };
16676 ".previous\n" \
16677 _ASM_EXTABLE(1b, 3b) \
16678 : "=r"(err) \
16679- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
16680+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
16681
16682 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
16683- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
16684+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
16685 "2:\n" \
16686 _ASM_EXTABLE_EX(1b, 2b) \
16687 : : ltype(x), "m" (__m(addr)))
16688@@ -473,8 +523,12 @@ struct __large_struct { unsigned long buf[100]; };
16689 * On error, the variable @x is set to zero.
16690 */
16691
16692+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
16693+#define __get_user(x, ptr) get_user((x), (ptr))
16694+#else
16695 #define __get_user(x, ptr) \
16696 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
16697+#endif
16698
16699 /**
16700 * __put_user: - Write a simple value into user space, with less checking.
16701@@ -496,8 +550,12 @@ struct __large_struct { unsigned long buf[100]; };
16702 * Returns zero on success, or -EFAULT on error.
16703 */
16704
16705+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
16706+#define __put_user(x, ptr) put_user((x), (ptr))
16707+#else
16708 #define __put_user(x, ptr) \
16709 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
16710+#endif
16711
16712 #define __get_user_unaligned __get_user
16713 #define __put_user_unaligned __put_user
16714@@ -515,7 +573,7 @@ struct __large_struct { unsigned long buf[100]; };
16715 #define get_user_ex(x, ptr) do { \
16716 unsigned long __gue_val; \
16717 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
16718- (x) = (__force __typeof__(*(ptr)))__gue_val; \
16719+ (x) = (__typeof__(*(ptr)))__gue_val; \
16720 } while (0)
16721
16722 #define put_user_try uaccess_try
16723@@ -532,8 +590,8 @@ strncpy_from_user(char *dst, const char __user *src, long count);
16724 extern __must_check long strlen_user(const char __user *str);
16725 extern __must_check long strnlen_user(const char __user *str, long n);
16726
16727-unsigned long __must_check clear_user(void __user *mem, unsigned long len);
16728-unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
16729+unsigned long __must_check clear_user(void __user *mem, unsigned long len) __size_overflow(2);
16730+unsigned long __must_check __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
16731
16732 /*
16733 * movsl can be slow when source and dest are not both 8-byte aligned
16734diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
16735index 7f760a9..04b1c65 100644
16736--- a/arch/x86/include/asm/uaccess_32.h
16737+++ b/arch/x86/include/asm/uaccess_32.h
16738@@ -11,15 +11,15 @@
16739 #include <asm/page.h>
16740
16741 unsigned long __must_check __copy_to_user_ll
16742- (void __user *to, const void *from, unsigned long n);
16743+ (void __user *to, const void *from, unsigned long n) __size_overflow(3);
16744 unsigned long __must_check __copy_from_user_ll
16745- (void *to, const void __user *from, unsigned long n);
16746+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
16747 unsigned long __must_check __copy_from_user_ll_nozero
16748- (void *to, const void __user *from, unsigned long n);
16749+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
16750 unsigned long __must_check __copy_from_user_ll_nocache
16751- (void *to, const void __user *from, unsigned long n);
16752+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
16753 unsigned long __must_check __copy_from_user_ll_nocache_nozero
16754- (void *to, const void __user *from, unsigned long n);
16755+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
16756
16757 /**
16758 * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
16759@@ -43,6 +43,11 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
16760 static __always_inline unsigned long __must_check
16761 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
16762 {
16763+ if ((long)n < 0)
16764+ return n;
16765+
16766+ check_object_size(from, n, true);
16767+
16768 if (__builtin_constant_p(n)) {
16769 unsigned long ret;
16770
16771@@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
16772 __copy_to_user(void __user *to, const void *from, unsigned long n)
16773 {
16774 might_fault();
16775+
16776 return __copy_to_user_inatomic(to, from, n);
16777 }
16778
16779 static __always_inline unsigned long
16780 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
16781 {
16782+ if ((long)n < 0)
16783+ return n;
16784+
16785 /* Avoid zeroing the tail if the copy fails..
16786 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
16787 * but as the zeroing behaviour is only significant when n is not
16788@@ -137,6 +146,12 @@ static __always_inline unsigned long
16789 __copy_from_user(void *to, const void __user *from, unsigned long n)
16790 {
16791 might_fault();
16792+
16793+ if ((long)n < 0)
16794+ return n;
16795+
16796+ check_object_size(to, n, false);
16797+
16798 if (__builtin_constant_p(n)) {
16799 unsigned long ret;
16800
16801@@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
16802 const void __user *from, unsigned long n)
16803 {
16804 might_fault();
16805+
16806+ if ((long)n < 0)
16807+ return n;
16808+
16809 if (__builtin_constant_p(n)) {
16810 unsigned long ret;
16811
16812@@ -181,15 +200,19 @@ static __always_inline unsigned long
16813 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
16814 unsigned long n)
16815 {
16816- return __copy_from_user_ll_nocache_nozero(to, from, n);
16817+ if ((long)n < 0)
16818+ return n;
16819+
16820+ return __copy_from_user_ll_nocache_nozero(to, from, n);
16821 }
16822
16823-unsigned long __must_check copy_to_user(void __user *to,
16824- const void *from, unsigned long n);
16825-unsigned long __must_check _copy_from_user(void *to,
16826- const void __user *from,
16827- unsigned long n);
16828-
16829+extern void copy_to_user_overflow(void)
16830+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
16831+ __compiletime_error("copy_to_user() buffer size is not provably correct")
16832+#else
16833+ __compiletime_warning("copy_to_user() buffer size is not provably correct")
16834+#endif
16835+;
16836
16837 extern void copy_from_user_overflow(void)
16838 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
16839@@ -199,17 +222,60 @@ extern void copy_from_user_overflow(void)
16840 #endif
16841 ;
16842
16843-static inline unsigned long __must_check copy_from_user(void *to,
16844- const void __user *from,
16845- unsigned long n)
16846+/**
16847+ * copy_to_user: - Copy a block of data into user space.
16848+ * @to: Destination address, in user space.
16849+ * @from: Source address, in kernel space.
16850+ * @n: Number of bytes to copy.
16851+ *
16852+ * Context: User context only. This function may sleep.
16853+ *
16854+ * Copy data from kernel space to user space.
16855+ *
16856+ * Returns number of bytes that could not be copied.
16857+ * On success, this will be zero.
16858+ */
16859+static inline unsigned long __must_check
16860+copy_to_user(void __user *to, const void *from, unsigned long n)
16861 {
16862- int sz = __compiletime_object_size(to);
16863+ size_t sz = __compiletime_object_size(from);
16864
16865- if (likely(sz == -1 || sz >= n))
16866- n = _copy_from_user(to, from, n);
16867- else
16868+ if (unlikely(sz != (size_t)-1 && sz < n))
16869+ copy_to_user_overflow();
16870+ else if (access_ok(VERIFY_WRITE, to, n))
16871+ n = __copy_to_user(to, from, n);
16872+ return n;
16873+}
16874+
16875+/**
16876+ * copy_from_user: - Copy a block of data from user space.
16877+ * @to: Destination address, in kernel space.
16878+ * @from: Source address, in user space.
16879+ * @n: Number of bytes to copy.
16880+ *
16881+ * Context: User context only. This function may sleep.
16882+ *
16883+ * Copy data from user space to kernel space.
16884+ *
16885+ * Returns number of bytes that could not be copied.
16886+ * On success, this will be zero.
16887+ *
16888+ * If some data could not be copied, this function will pad the copied
16889+ * data to the requested size using zero bytes.
16890+ */
16891+static inline unsigned long __must_check
16892+copy_from_user(void *to, const void __user *from, unsigned long n)
16893+{
16894+ size_t sz = __compiletime_object_size(to);
16895+
16896+ check_object_size(to, n, false);
16897+
16898+ if (unlikely(sz != (size_t)-1 && sz < n))
16899 copy_from_user_overflow();
16900-
16901+ else if (access_ok(VERIFY_READ, from, n))
16902+ n = __copy_from_user(to, from, n);
16903+ else if ((long)n > 0)
16904+ memset(to, 0, n);
16905 return n;
16906 }
16907
16908diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
16909index 142810c..1f2a0a7 100644
16910--- a/arch/x86/include/asm/uaccess_64.h
16911+++ b/arch/x86/include/asm/uaccess_64.h
16912@@ -10,6 +10,9 @@
16913 #include <asm/alternative.h>
16914 #include <asm/cpufeature.h>
16915 #include <asm/page.h>
16916+#include <asm/pgtable.h>
16917+
16918+#define set_fs(x) (current_thread_info()->addr_limit = (x))
16919
16920 /*
16921 * Copy To/From Userspace
16922@@ -17,13 +20,13 @@
16923
16924 /* Handles exceptions in both to and from, but doesn't do access_ok */
16925 __must_check unsigned long
16926-copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
16927+copy_user_enhanced_fast_string(void *to, const void *from, unsigned len) __size_overflow(3);
16928 __must_check unsigned long
16929-copy_user_generic_string(void *to, const void *from, unsigned len);
16930+copy_user_generic_string(void *to, const void *from, unsigned len) __size_overflow(3);
16931 __must_check unsigned long
16932-copy_user_generic_unrolled(void *to, const void *from, unsigned len);
16933+copy_user_generic_unrolled(void *to, const void *from, unsigned len) __size_overflow(3);
16934
16935-static __always_inline __must_check unsigned long
16936+static __always_inline __must_check __size_overflow(3) unsigned long
16937 copy_user_generic(void *to, const void *from, unsigned len)
16938 {
16939 unsigned ret;
16940@@ -41,142 +44,204 @@ copy_user_generic(void *to, const void *from, unsigned len)
16941 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
16942 "=d" (len)),
16943 "1" (to), "2" (from), "3" (len)
16944- : "memory", "rcx", "r8", "r9", "r10", "r11");
16945+ : "memory", "rcx", "r8", "r9", "r11");
16946 return ret;
16947 }
16948
16949+static __always_inline __must_check unsigned long
16950+__copy_to_user(void __user *to, const void *from, unsigned long len);
16951+static __always_inline __must_check unsigned long
16952+__copy_from_user(void *to, const void __user *from, unsigned long len);
16953 __must_check unsigned long
16954-_copy_to_user(void __user *to, const void *from, unsigned len);
16955-__must_check unsigned long
16956-_copy_from_user(void *to, const void __user *from, unsigned len);
16957-__must_check unsigned long
16958-copy_in_user(void __user *to, const void __user *from, unsigned len);
16959+copy_in_user(void __user *to, const void __user *from, unsigned long len);
16960+
16961+extern void copy_to_user_overflow(void)
16962+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
16963+ __compiletime_error("copy_to_user() buffer size is not provably correct")
16964+#else
16965+ __compiletime_warning("copy_to_user() buffer size is not provably correct")
16966+#endif
16967+;
16968+
16969+extern void copy_from_user_overflow(void)
16970+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
16971+ __compiletime_error("copy_from_user() buffer size is not provably correct")
16972+#else
16973+ __compiletime_warning("copy_from_user() buffer size is not provably correct")
16974+#endif
16975+;
16976
16977 static inline unsigned long __must_check copy_from_user(void *to,
16978 const void __user *from,
16979 unsigned long n)
16980 {
16981- int sz = __compiletime_object_size(to);
16982-
16983 might_fault();
16984- if (likely(sz == -1 || sz >= n))
16985- n = _copy_from_user(to, from, n);
16986-#ifdef CONFIG_DEBUG_VM
16987- else
16988- WARN(1, "Buffer overflow detected!\n");
16989-#endif
16990+
16991+ check_object_size(to, n, false);
16992+
16993+ if (access_ok(VERIFY_READ, from, n))
16994+ n = __copy_from_user(to, from, n);
16995+ else if (n < INT_MAX)
16996+ memset(to, 0, n);
16997 return n;
16998 }
16999
17000 static __always_inline __must_check
17001-int copy_to_user(void __user *dst, const void *src, unsigned size)
17002+int copy_to_user(void __user *dst, const void *src, unsigned long size)
17003 {
17004 might_fault();
17005
17006- return _copy_to_user(dst, src, size);
17007+ if (access_ok(VERIFY_WRITE, dst, size))
17008+ size = __copy_to_user(dst, src, size);
17009+ return size;
17010 }
17011
17012 static __always_inline __must_check
17013-int __copy_from_user(void *dst, const void __user *src, unsigned size)
17014+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
17015 {
17016- int ret = 0;
17017+ size_t sz = __compiletime_object_size(dst);
17018+ unsigned ret = 0;
17019
17020 might_fault();
17021+
17022+ if (size > INT_MAX)
17023+ return size;
17024+
17025+ check_object_size(dst, size, false);
17026+
17027+#ifdef CONFIG_PAX_MEMORY_UDEREF
17028+ if (!__access_ok(VERIFY_READ, src, size))
17029+ return size;
17030+#endif
17031+
17032+ if (unlikely(sz != (size_t)-1 && sz < size)) {
17033+ copy_from_user_overflow();
17034+ return size;
17035+ }
17036+
17037 if (!__builtin_constant_p(size))
17038- return copy_user_generic(dst, (__force void *)src, size);
17039+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
17040 switch (size) {
17041- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
17042+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
17043 ret, "b", "b", "=q", 1);
17044 return ret;
17045- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
17046+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
17047 ret, "w", "w", "=r", 2);
17048 return ret;
17049- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
17050+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
17051 ret, "l", "k", "=r", 4);
17052 return ret;
17053- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
17054+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
17055 ret, "q", "", "=r", 8);
17056 return ret;
17057 case 10:
17058- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
17059+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
17060 ret, "q", "", "=r", 10);
17061 if (unlikely(ret))
17062 return ret;
17063 __get_user_asm(*(u16 *)(8 + (char *)dst),
17064- (u16 __user *)(8 + (char __user *)src),
17065+ (const u16 __user *)(8 + (const char __user *)src),
17066 ret, "w", "w", "=r", 2);
17067 return ret;
17068 case 16:
17069- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
17070+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
17071 ret, "q", "", "=r", 16);
17072 if (unlikely(ret))
17073 return ret;
17074 __get_user_asm(*(u64 *)(8 + (char *)dst),
17075- (u64 __user *)(8 + (char __user *)src),
17076+ (const u64 __user *)(8 + (const char __user *)src),
17077 ret, "q", "", "=r", 8);
17078 return ret;
17079 default:
17080- return copy_user_generic(dst, (__force void *)src, size);
17081+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
17082 }
17083 }
17084
17085 static __always_inline __must_check
17086-int __copy_to_user(void __user *dst, const void *src, unsigned size)
17087+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
17088 {
17089- int ret = 0;
17090+ size_t sz = __compiletime_object_size(src);
17091+ unsigned ret = 0;
17092
17093 might_fault();
17094+
17095+ if (size > INT_MAX)
17096+ return size;
17097+
17098+ check_object_size(src, size, true);
17099+
17100+#ifdef CONFIG_PAX_MEMORY_UDEREF
17101+ if (!__access_ok(VERIFY_WRITE, dst, size))
17102+ return size;
17103+#endif
17104+
17105+ if (unlikely(sz != (size_t)-1 && sz < size)) {
17106+ copy_to_user_overflow();
17107+ return size;
17108+ }
17109+
17110 if (!__builtin_constant_p(size))
17111- return copy_user_generic((__force void *)dst, src, size);
17112+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
17113 switch (size) {
17114- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
17115+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
17116 ret, "b", "b", "iq", 1);
17117 return ret;
17118- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
17119+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
17120 ret, "w", "w", "ir", 2);
17121 return ret;
17122- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
17123+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
17124 ret, "l", "k", "ir", 4);
17125 return ret;
17126- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
17127+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
17128 ret, "q", "", "er", 8);
17129 return ret;
17130 case 10:
17131- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
17132+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
17133 ret, "q", "", "er", 10);
17134 if (unlikely(ret))
17135 return ret;
17136 asm("":::"memory");
17137- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
17138+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
17139 ret, "w", "w", "ir", 2);
17140 return ret;
17141 case 16:
17142- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
17143+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
17144 ret, "q", "", "er", 16);
17145 if (unlikely(ret))
17146 return ret;
17147 asm("":::"memory");
17148- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
17149+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
17150 ret, "q", "", "er", 8);
17151 return ret;
17152 default:
17153- return copy_user_generic((__force void *)dst, src, size);
17154+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
17155 }
17156 }
17157
17158 static __always_inline __must_check
17159-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
17160+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
17161 {
17162- int ret = 0;
17163+ unsigned ret = 0;
17164
17165 might_fault();
17166+
17167+ if (size > INT_MAX)
17168+ return size;
17169+
17170+#ifdef CONFIG_PAX_MEMORY_UDEREF
17171+ if (!__access_ok(VERIFY_READ, src, size))
17172+ return size;
17173+ if (!__access_ok(VERIFY_WRITE, dst, size))
17174+ return size;
17175+#endif
17176+
17177 if (!__builtin_constant_p(size))
17178- return copy_user_generic((__force void *)dst,
17179- (__force void *)src, size);
17180+ return copy_user_generic((__force_kernel void *)____m(dst),
17181+ (__force_kernel const void *)____m(src), size);
17182 switch (size) {
17183 case 1: {
17184 u8 tmp;
17185- __get_user_asm(tmp, (u8 __user *)src,
17186+ __get_user_asm(tmp, (const u8 __user *)src,
17187 ret, "b", "b", "=q", 1);
17188 if (likely(!ret))
17189 __put_user_asm(tmp, (u8 __user *)dst,
17190@@ -185,7 +250,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
17191 }
17192 case 2: {
17193 u16 tmp;
17194- __get_user_asm(tmp, (u16 __user *)src,
17195+ __get_user_asm(tmp, (const u16 __user *)src,
17196 ret, "w", "w", "=r", 2);
17197 if (likely(!ret))
17198 __put_user_asm(tmp, (u16 __user *)dst,
17199@@ -195,7 +260,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
17200
17201 case 4: {
17202 u32 tmp;
17203- __get_user_asm(tmp, (u32 __user *)src,
17204+ __get_user_asm(tmp, (const u32 __user *)src,
17205 ret, "l", "k", "=r", 4);
17206 if (likely(!ret))
17207 __put_user_asm(tmp, (u32 __user *)dst,
17208@@ -204,7 +269,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
17209 }
17210 case 8: {
17211 u64 tmp;
17212- __get_user_asm(tmp, (u64 __user *)src,
17213+ __get_user_asm(tmp, (const u64 __user *)src,
17214 ret, "q", "", "=r", 8);
17215 if (likely(!ret))
17216 __put_user_asm(tmp, (u64 __user *)dst,
17217@@ -212,41 +277,72 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
17218 return ret;
17219 }
17220 default:
17221- return copy_user_generic((__force void *)dst,
17222- (__force void *)src, size);
17223+ return copy_user_generic((__force_kernel void *)____m(dst),
17224+ (__force_kernel const void *)____m(src), size);
17225 }
17226 }
17227
17228 static __must_check __always_inline int
17229-__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
17230+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
17231 {
17232- return copy_user_generic(dst, (__force const void *)src, size);
17233+ if (size > INT_MAX)
17234+ return size;
17235+
17236+#ifdef CONFIG_PAX_MEMORY_UDEREF
17237+ if (!__access_ok(VERIFY_READ, src, size))
17238+ return size;
17239+#endif
17240+
17241+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
17242 }
17243
17244-static __must_check __always_inline int
17245-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
17246+static __must_check __always_inline unsigned long
17247+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
17248 {
17249- return copy_user_generic((__force void *)dst, src, size);
17250+ if (size > INT_MAX)
17251+ return size;
17252+
17253+#ifdef CONFIG_PAX_MEMORY_UDEREF
17254+ if (!__access_ok(VERIFY_WRITE, dst, size))
17255+ return size;
17256+#endif
17257+
17258+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
17259 }
17260
17261-extern long __copy_user_nocache(void *dst, const void __user *src,
17262- unsigned size, int zerorest);
17263+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
17264+ unsigned long size, int zerorest) __size_overflow(3);
17265
17266-static inline int
17267-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
17268+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
17269 {
17270 might_sleep();
17271+
17272+ if (size > INT_MAX)
17273+ return size;
17274+
17275+#ifdef CONFIG_PAX_MEMORY_UDEREF
17276+ if (!__access_ok(VERIFY_READ, src, size))
17277+ return size;
17278+#endif
17279+
17280 return __copy_user_nocache(dst, src, size, 1);
17281 }
17282
17283-static inline int
17284-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
17285- unsigned size)
17286+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
17287+ unsigned long size)
17288 {
17289+ if (size > INT_MAX)
17290+ return size;
17291+
17292+#ifdef CONFIG_PAX_MEMORY_UDEREF
17293+ if (!__access_ok(VERIFY_READ, src, size))
17294+ return size;
17295+#endif
17296+
17297 return __copy_user_nocache(dst, src, size, 0);
17298 }
17299
17300-unsigned long
17301-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
17302+extern unsigned long
17303+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest) __size_overflow(3);
17304
17305 #endif /* _ASM_X86_UACCESS_64_H */
17306diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h
17307index 5b238981..77fdd78 100644
17308--- a/arch/x86/include/asm/word-at-a-time.h
17309+++ b/arch/x86/include/asm/word-at-a-time.h
17310@@ -11,7 +11,7 @@
17311 * and shift, for example.
17312 */
17313 struct word_at_a_time {
17314- const unsigned long one_bits, high_bits;
17315+ unsigned long one_bits, high_bits;
17316 };
17317
17318 #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
17319diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
17320index 5769349..a3d3e2a 100644
17321--- a/arch/x86/include/asm/x86_init.h
17322+++ b/arch/x86/include/asm/x86_init.h
17323@@ -141,7 +141,7 @@ struct x86_init_ops {
17324 struct x86_init_timers timers;
17325 struct x86_init_iommu iommu;
17326 struct x86_init_pci pci;
17327-};
17328+} __no_const;
17329
17330 /**
17331 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
17332@@ -152,7 +152,7 @@ struct x86_cpuinit_ops {
17333 void (*setup_percpu_clockev)(void);
17334 void (*early_percpu_clock_init)(void);
17335 void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
17336-};
17337+} __no_const;
17338
17339 /**
17340 * struct x86_platform_ops - platform specific runtime functions
17341@@ -178,7 +178,7 @@ struct x86_platform_ops {
17342 void (*save_sched_clock_state)(void);
17343 void (*restore_sched_clock_state)(void);
17344 void (*apic_post_init)(void);
17345-};
17346+} __no_const;
17347
17348 struct pci_dev;
17349
17350@@ -187,14 +187,14 @@ struct x86_msi_ops {
17351 void (*teardown_msi_irq)(unsigned int irq);
17352 void (*teardown_msi_irqs)(struct pci_dev *dev);
17353 void (*restore_msi_irqs)(struct pci_dev *dev, int irq);
17354-};
17355+} __no_const;
17356
17357 struct x86_io_apic_ops {
17358 void (*init) (void);
17359 unsigned int (*read) (unsigned int apic, unsigned int reg);
17360 void (*write) (unsigned int apic, unsigned int reg, unsigned int value);
17361 void (*modify)(unsigned int apic, unsigned int reg, unsigned int value);
17362-};
17363+} __no_const;
17364
17365 extern struct x86_init_ops x86_init;
17366 extern struct x86_cpuinit_ops x86_cpuinit;
17367diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
17368index 0415cda..b43d877 100644
17369--- a/arch/x86/include/asm/xsave.h
17370+++ b/arch/x86/include/asm/xsave.h
17371@@ -71,7 +71,9 @@ static inline int xsave_user(struct xsave_struct __user *buf)
17372 return -EFAULT;
17373
17374 __asm__ __volatile__(ASM_STAC "\n"
17375- "1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
17376+ "1:"
17377+ __copyuser_seg
17378+ ".byte " REX_PREFIX "0x0f,0xae,0x27\n"
17379 "2: " ASM_CLAC "\n"
17380 ".section .fixup,\"ax\"\n"
17381 "3: movl $-1,%[err]\n"
17382@@ -87,12 +89,14 @@ static inline int xsave_user(struct xsave_struct __user *buf)
17383 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
17384 {
17385 int err;
17386- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
17387+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
17388 u32 lmask = mask;
17389 u32 hmask = mask >> 32;
17390
17391 __asm__ __volatile__(ASM_STAC "\n"
17392- "1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
17393+ "1:"
17394+ __copyuser_seg
17395+ ".byte " REX_PREFIX "0x0f,0xae,0x2f\n"
17396 "2: " ASM_CLAC "\n"
17397 ".section .fixup,\"ax\"\n"
17398 "3: movl $-1,%[err]\n"
17399diff --git a/arch/x86/include/uapi/asm/e820.h b/arch/x86/include/uapi/asm/e820.h
17400index bbae024..e1528f9 100644
17401--- a/arch/x86/include/uapi/asm/e820.h
17402+++ b/arch/x86/include/uapi/asm/e820.h
17403@@ -63,7 +63,7 @@ struct e820map {
17404 #define ISA_START_ADDRESS 0xa0000
17405 #define ISA_END_ADDRESS 0x100000
17406
17407-#define BIOS_BEGIN 0x000a0000
17408+#define BIOS_BEGIN 0x000c0000
17409 #define BIOS_END 0x00100000
17410
17411 #define BIOS_ROM_BASE 0xffe00000
17412diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
17413index 34e923a..0c6bb6e 100644
17414--- a/arch/x86/kernel/Makefile
17415+++ b/arch/x86/kernel/Makefile
17416@@ -22,7 +22,7 @@ obj-y += time.o ioport.o ldt.o dumpstack.o nmi.o
17417 obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
17418 obj-$(CONFIG_IRQ_WORK) += irq_work.o
17419 obj-y += probe_roms.o
17420-obj-$(CONFIG_X86_32) += i386_ksyms_32.o
17421+obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
17422 obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
17423 obj-y += syscall_$(BITS).o
17424 obj-$(CONFIG_X86_64) += vsyscall_64.o
17425diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
17426index bacf4b0..4ede72e 100644
17427--- a/arch/x86/kernel/acpi/boot.c
17428+++ b/arch/x86/kernel/acpi/boot.c
17429@@ -1358,7 +1358,7 @@ static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d)
17430 * If your system is blacklisted here, but you find that acpi=force
17431 * works for you, please contact linux-acpi@vger.kernel.org
17432 */
17433-static struct dmi_system_id __initdata acpi_dmi_table[] = {
17434+static const struct dmi_system_id __initconst acpi_dmi_table[] = {
17435 /*
17436 * Boxes that need ACPI disabled
17437 */
17438@@ -1433,7 +1433,7 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
17439 };
17440
17441 /* second table for DMI checks that should run after early-quirks */
17442-static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
17443+static const struct dmi_system_id __initconst acpi_dmi_table_late[] = {
17444 /*
17445 * HP laptops which use a DSDT reporting as HP/SB400/10000,
17446 * which includes some code which overrides all temperature
17447diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
17448index d5e0d71..6533e08 100644
17449--- a/arch/x86/kernel/acpi/sleep.c
17450+++ b/arch/x86/kernel/acpi/sleep.c
17451@@ -74,8 +74,12 @@ int acpi_suspend_lowlevel(void)
17452 #else /* CONFIG_64BIT */
17453 #ifdef CONFIG_SMP
17454 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
17455+
17456+ pax_open_kernel();
17457 early_gdt_descr.address =
17458 (unsigned long)get_cpu_gdt_table(smp_processor_id());
17459+ pax_close_kernel();
17460+
17461 initial_gs = per_cpu_offset(smp_processor_id());
17462 #endif
17463 initial_code = (unsigned long)wakeup_long64;
17464diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
17465index 13ab720..95d5442 100644
17466--- a/arch/x86/kernel/acpi/wakeup_32.S
17467+++ b/arch/x86/kernel/acpi/wakeup_32.S
17468@@ -30,13 +30,11 @@ wakeup_pmode_return:
17469 # and restore the stack ... but you need gdt for this to work
17470 movl saved_context_esp, %esp
17471
17472- movl %cs:saved_magic, %eax
17473- cmpl $0x12345678, %eax
17474+ cmpl $0x12345678, saved_magic
17475 jne bogus_magic
17476
17477 # jump to place where we left off
17478- movl saved_eip, %eax
17479- jmp *%eax
17480+ jmp *(saved_eip)
17481
17482 bogus_magic:
17483 jmp bogus_magic
17484diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
17485index ef5ccca..bd83949 100644
17486--- a/arch/x86/kernel/alternative.c
17487+++ b/arch/x86/kernel/alternative.c
17488@@ -268,6 +268,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
17489 */
17490 for (a = start; a < end; a++) {
17491 instr = (u8 *)&a->instr_offset + a->instr_offset;
17492+
17493+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17494+ instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
17495+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
17496+ instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
17497+#endif
17498+
17499 replacement = (u8 *)&a->repl_offset + a->repl_offset;
17500 BUG_ON(a->replacementlen > a->instrlen);
17501 BUG_ON(a->instrlen > sizeof(insnbuf));
17502@@ -299,10 +306,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
17503 for (poff = start; poff < end; poff++) {
17504 u8 *ptr = (u8 *)poff + *poff;
17505
17506+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17507+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
17508+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
17509+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
17510+#endif
17511+
17512 if (!*poff || ptr < text || ptr >= text_end)
17513 continue;
17514 /* turn DS segment override prefix into lock prefix */
17515- if (*ptr == 0x3e)
17516+ if (*ktla_ktva(ptr) == 0x3e)
17517 text_poke(ptr, ((unsigned char []){0xf0}), 1);
17518 }
17519 mutex_unlock(&text_mutex);
17520@@ -317,10 +330,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
17521 for (poff = start; poff < end; poff++) {
17522 u8 *ptr = (u8 *)poff + *poff;
17523
17524+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17525+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
17526+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
17527+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
17528+#endif
17529+
17530 if (!*poff || ptr < text || ptr >= text_end)
17531 continue;
17532 /* turn lock prefix into DS segment override prefix */
17533- if (*ptr == 0xf0)
17534+ if (*ktla_ktva(ptr) == 0xf0)
17535 text_poke(ptr, ((unsigned char []){0x3E}), 1);
17536 }
17537 mutex_unlock(&text_mutex);
17538@@ -468,7 +487,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
17539
17540 BUG_ON(p->len > MAX_PATCH_LEN);
17541 /* prep the buffer with the original instructions */
17542- memcpy(insnbuf, p->instr, p->len);
17543+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
17544 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
17545 (unsigned long)p->instr, p->len);
17546
17547@@ -515,7 +534,7 @@ void __init alternative_instructions(void)
17548 if (!uniproc_patched || num_possible_cpus() == 1)
17549 free_init_pages("SMP alternatives",
17550 (unsigned long)__smp_locks,
17551- (unsigned long)__smp_locks_end);
17552+ PAGE_ALIGN((unsigned long)__smp_locks_end));
17553 #endif
17554
17555 apply_paravirt(__parainstructions, __parainstructions_end);
17556@@ -535,13 +554,17 @@ void __init alternative_instructions(void)
17557 * instructions. And on the local CPU you need to be protected again NMI or MCE
17558 * handlers seeing an inconsistent instruction while you patch.
17559 */
17560-void *__init_or_module text_poke_early(void *addr, const void *opcode,
17561+void *__kprobes text_poke_early(void *addr, const void *opcode,
17562 size_t len)
17563 {
17564 unsigned long flags;
17565 local_irq_save(flags);
17566- memcpy(addr, opcode, len);
17567+
17568+ pax_open_kernel();
17569+ memcpy(ktla_ktva(addr), opcode, len);
17570 sync_core();
17571+ pax_close_kernel();
17572+
17573 local_irq_restore(flags);
17574 /* Could also do a CLFLUSH here to speed up CPU recovery; but
17575 that causes hangs on some VIA CPUs. */
17576@@ -563,36 +586,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
17577 */
17578 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
17579 {
17580- unsigned long flags;
17581- char *vaddr;
17582+ unsigned char *vaddr = ktla_ktva(addr);
17583 struct page *pages[2];
17584- int i;
17585+ size_t i;
17586
17587 if (!core_kernel_text((unsigned long)addr)) {
17588- pages[0] = vmalloc_to_page(addr);
17589- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
17590+ pages[0] = vmalloc_to_page(vaddr);
17591+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
17592 } else {
17593- pages[0] = virt_to_page(addr);
17594+ pages[0] = virt_to_page(vaddr);
17595 WARN_ON(!PageReserved(pages[0]));
17596- pages[1] = virt_to_page(addr + PAGE_SIZE);
17597+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
17598 }
17599 BUG_ON(!pages[0]);
17600- local_irq_save(flags);
17601- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
17602- if (pages[1])
17603- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
17604- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
17605- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
17606- clear_fixmap(FIX_TEXT_POKE0);
17607- if (pages[1])
17608- clear_fixmap(FIX_TEXT_POKE1);
17609- local_flush_tlb();
17610- sync_core();
17611- /* Could also do a CLFLUSH here to speed up CPU recovery; but
17612- that causes hangs on some VIA CPUs. */
17613+ text_poke_early(addr, opcode, len);
17614 for (i = 0; i < len; i++)
17615- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
17616- local_irq_restore(flags);
17617+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
17618 return addr;
17619 }
17620
17621diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
17622index cbf5121..812b537 100644
17623--- a/arch/x86/kernel/apic/apic.c
17624+++ b/arch/x86/kernel/apic/apic.c
17625@@ -189,7 +189,7 @@ int first_system_vector = 0xfe;
17626 /*
17627 * Debug level, exported for io_apic.c
17628 */
17629-unsigned int apic_verbosity;
17630+int apic_verbosity;
17631
17632 int pic_mode;
17633
17634@@ -1956,7 +1956,7 @@ void smp_error_interrupt(struct pt_regs *regs)
17635 apic_write(APIC_ESR, 0);
17636 v1 = apic_read(APIC_ESR);
17637 ack_APIC_irq();
17638- atomic_inc(&irq_err_count);
17639+ atomic_inc_unchecked(&irq_err_count);
17640
17641 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
17642 smp_processor_id(), v0 , v1);
17643diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
17644index 00c77cf..2dc6a2d 100644
17645--- a/arch/x86/kernel/apic/apic_flat_64.c
17646+++ b/arch/x86/kernel/apic/apic_flat_64.c
17647@@ -157,7 +157,7 @@ static int flat_probe(void)
17648 return 1;
17649 }
17650
17651-static struct apic apic_flat = {
17652+static struct apic apic_flat __read_only = {
17653 .name = "flat",
17654 .probe = flat_probe,
17655 .acpi_madt_oem_check = flat_acpi_madt_oem_check,
17656@@ -271,7 +271,7 @@ static int physflat_probe(void)
17657 return 0;
17658 }
17659
17660-static struct apic apic_physflat = {
17661+static struct apic apic_physflat __read_only = {
17662
17663 .name = "physical flat",
17664 .probe = physflat_probe,
17665diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c
17666index e145f28..2752888 100644
17667--- a/arch/x86/kernel/apic/apic_noop.c
17668+++ b/arch/x86/kernel/apic/apic_noop.c
17669@@ -119,7 +119,7 @@ static void noop_apic_write(u32 reg, u32 v)
17670 WARN_ON_ONCE(cpu_has_apic && !disable_apic);
17671 }
17672
17673-struct apic apic_noop = {
17674+struct apic apic_noop __read_only = {
17675 .name = "noop",
17676 .probe = noop_probe,
17677 .acpi_madt_oem_check = NULL,
17678diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
17679index d50e364..543bee3 100644
17680--- a/arch/x86/kernel/apic/bigsmp_32.c
17681+++ b/arch/x86/kernel/apic/bigsmp_32.c
17682@@ -152,7 +152,7 @@ static int probe_bigsmp(void)
17683 return dmi_bigsmp;
17684 }
17685
17686-static struct apic apic_bigsmp = {
17687+static struct apic apic_bigsmp __read_only = {
17688
17689 .name = "bigsmp",
17690 .probe = probe_bigsmp,
17691diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c
17692index 0874799..a7a7892 100644
17693--- a/arch/x86/kernel/apic/es7000_32.c
17694+++ b/arch/x86/kernel/apic/es7000_32.c
17695@@ -608,8 +608,7 @@ static int es7000_mps_oem_check_cluster(struct mpc_table *mpc, char *oem,
17696 return ret && es7000_apic_is_cluster();
17697 }
17698
17699-/* We've been warned by a false positive warning.Use __refdata to keep calm. */
17700-static struct apic __refdata apic_es7000_cluster = {
17701+static struct apic apic_es7000_cluster __read_only = {
17702
17703 .name = "es7000",
17704 .probe = probe_es7000,
17705@@ -675,7 +674,7 @@ static struct apic __refdata apic_es7000_cluster = {
17706 .x86_32_early_logical_apicid = es7000_early_logical_apicid,
17707 };
17708
17709-static struct apic __refdata apic_es7000 = {
17710+static struct apic apic_es7000 __read_only = {
17711
17712 .name = "es7000",
17713 .probe = probe_es7000,
17714diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
17715index b739d39..aebc14c 100644
17716--- a/arch/x86/kernel/apic/io_apic.c
17717+++ b/arch/x86/kernel/apic/io_apic.c
17718@@ -1084,7 +1084,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
17719 }
17720 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
17721
17722-void lock_vector_lock(void)
17723+void lock_vector_lock(void) __acquires(vector_lock)
17724 {
17725 /* Used to the online set of cpus does not change
17726 * during assign_irq_vector.
17727@@ -1092,7 +1092,7 @@ void lock_vector_lock(void)
17728 raw_spin_lock(&vector_lock);
17729 }
17730
17731-void unlock_vector_lock(void)
17732+void unlock_vector_lock(void) __releases(vector_lock)
17733 {
17734 raw_spin_unlock(&vector_lock);
17735 }
17736@@ -2399,7 +2399,7 @@ static void ack_apic_edge(struct irq_data *data)
17737 ack_APIC_irq();
17738 }
17739
17740-atomic_t irq_mis_count;
17741+atomic_unchecked_t irq_mis_count;
17742
17743 #ifdef CONFIG_GENERIC_PENDING_IRQ
17744 static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
17745@@ -2540,7 +2540,7 @@ static void ack_apic_level(struct irq_data *data)
17746 * at the cpu.
17747 */
17748 if (!(v & (1 << (i & 0x1f)))) {
17749- atomic_inc(&irq_mis_count);
17750+ atomic_inc_unchecked(&irq_mis_count);
17751
17752 eoi_ioapic_irq(irq, cfg);
17753 }
17754@@ -2567,11 +2567,13 @@ static void ir_print_prefix(struct irq_data *data, struct seq_file *p)
17755
17756 static void irq_remap_modify_chip_defaults(struct irq_chip *chip)
17757 {
17758- chip->irq_print_chip = ir_print_prefix;
17759- chip->irq_ack = ir_ack_apic_edge;
17760- chip->irq_eoi = ir_ack_apic_level;
17761+ pax_open_kernel();
17762+ *(void **)&chip->irq_print_chip = ir_print_prefix;
17763+ *(void **)&chip->irq_ack = ir_ack_apic_edge;
17764+ *(void **)&chip->irq_eoi = ir_ack_apic_level;
17765
17766- chip->irq_set_affinity = set_remapped_irq_affinity;
17767+ *(void **)&chip->irq_set_affinity = set_remapped_irq_affinity;
17768+ pax_close_kernel();
17769 }
17770 #endif /* CONFIG_IRQ_REMAP */
17771
17772diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c
17773index d661ee9..791fd33 100644
17774--- a/arch/x86/kernel/apic/numaq_32.c
17775+++ b/arch/x86/kernel/apic/numaq_32.c
17776@@ -455,8 +455,7 @@ static void numaq_setup_portio_remap(void)
17777 (u_long) xquad_portio, (u_long) num_quads*XQUAD_PORTIO_QUAD);
17778 }
17779
17780-/* Use __refdata to keep false positive warning calm. */
17781-static struct apic __refdata apic_numaq = {
17782+static struct apic apic_numaq __read_only = {
17783
17784 .name = "NUMAQ",
17785 .probe = probe_numaq,
17786diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
17787index eb35ef9..f184a21 100644
17788--- a/arch/x86/kernel/apic/probe_32.c
17789+++ b/arch/x86/kernel/apic/probe_32.c
17790@@ -72,7 +72,7 @@ static int probe_default(void)
17791 return 1;
17792 }
17793
17794-static struct apic apic_default = {
17795+static struct apic apic_default __read_only = {
17796
17797 .name = "default",
17798 .probe = probe_default,
17799diff --git a/arch/x86/kernel/apic/summit_32.c b/arch/x86/kernel/apic/summit_32.c
17800index 77c95c0..434f8a4 100644
17801--- a/arch/x86/kernel/apic/summit_32.c
17802+++ b/arch/x86/kernel/apic/summit_32.c
17803@@ -486,7 +486,7 @@ void setup_summit(void)
17804 }
17805 #endif
17806
17807-static struct apic apic_summit = {
17808+static struct apic apic_summit __read_only = {
17809
17810 .name = "summit",
17811 .probe = probe_summit,
17812diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
17813index c88baa4..757aee1 100644
17814--- a/arch/x86/kernel/apic/x2apic_cluster.c
17815+++ b/arch/x86/kernel/apic/x2apic_cluster.c
17816@@ -183,7 +183,7 @@ update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu)
17817 return notifier_from_errno(err);
17818 }
17819
17820-static struct notifier_block __refdata x2apic_cpu_notifier = {
17821+static struct notifier_block x2apic_cpu_notifier = {
17822 .notifier_call = update_clusterinfo,
17823 };
17824
17825@@ -235,7 +235,7 @@ static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask,
17826 cpumask_and(retmask, mask, per_cpu(cpus_in_cluster, cpu));
17827 }
17828
17829-static struct apic apic_x2apic_cluster = {
17830+static struct apic apic_x2apic_cluster __read_only = {
17831
17832 .name = "cluster x2apic",
17833 .probe = x2apic_cluster_probe,
17834diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
17835index 562a76d..a003c0f 100644
17836--- a/arch/x86/kernel/apic/x2apic_phys.c
17837+++ b/arch/x86/kernel/apic/x2apic_phys.c
17838@@ -89,7 +89,7 @@ static int x2apic_phys_probe(void)
17839 return apic == &apic_x2apic_phys;
17840 }
17841
17842-static struct apic apic_x2apic_phys = {
17843+static struct apic apic_x2apic_phys __read_only = {
17844
17845 .name = "physical x2apic",
17846 .probe = x2apic_phys_probe,
17847diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
17848index 8cfade9..b9d04fc 100644
17849--- a/arch/x86/kernel/apic/x2apic_uv_x.c
17850+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
17851@@ -333,7 +333,7 @@ static int uv_probe(void)
17852 return apic == &apic_x2apic_uv_x;
17853 }
17854
17855-static struct apic __refdata apic_x2apic_uv_x = {
17856+static struct apic apic_x2apic_uv_x __read_only = {
17857
17858 .name = "UV large system",
17859 .probe = uv_probe,
17860diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
17861index d65464e..1035d31 100644
17862--- a/arch/x86/kernel/apm_32.c
17863+++ b/arch/x86/kernel/apm_32.c
17864@@ -412,7 +412,7 @@ static DEFINE_MUTEX(apm_mutex);
17865 * This is for buggy BIOS's that refer to (real mode) segment 0x40
17866 * even though they are called in protected mode.
17867 */
17868-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
17869+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
17870 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
17871
17872 static const char driver_version[] = "1.16ac"; /* no spaces */
17873@@ -590,7 +590,10 @@ static long __apm_bios_call(void *_call)
17874 BUG_ON(cpu != 0);
17875 gdt = get_cpu_gdt_table(cpu);
17876 save_desc_40 = gdt[0x40 / 8];
17877+
17878+ pax_open_kernel();
17879 gdt[0x40 / 8] = bad_bios_desc;
17880+ pax_close_kernel();
17881
17882 apm_irq_save(flags);
17883 APM_DO_SAVE_SEGS;
17884@@ -599,7 +602,11 @@ static long __apm_bios_call(void *_call)
17885 &call->esi);
17886 APM_DO_RESTORE_SEGS;
17887 apm_irq_restore(flags);
17888+
17889+ pax_open_kernel();
17890 gdt[0x40 / 8] = save_desc_40;
17891+ pax_close_kernel();
17892+
17893 put_cpu();
17894
17895 return call->eax & 0xff;
17896@@ -666,7 +673,10 @@ static long __apm_bios_call_simple(void *_call)
17897 BUG_ON(cpu != 0);
17898 gdt = get_cpu_gdt_table(cpu);
17899 save_desc_40 = gdt[0x40 / 8];
17900+
17901+ pax_open_kernel();
17902 gdt[0x40 / 8] = bad_bios_desc;
17903+ pax_close_kernel();
17904
17905 apm_irq_save(flags);
17906 APM_DO_SAVE_SEGS;
17907@@ -674,7 +684,11 @@ static long __apm_bios_call_simple(void *_call)
17908 &call->eax);
17909 APM_DO_RESTORE_SEGS;
17910 apm_irq_restore(flags);
17911+
17912+ pax_open_kernel();
17913 gdt[0x40 / 8] = save_desc_40;
17914+ pax_close_kernel();
17915+
17916 put_cpu();
17917 return error;
17918 }
17919@@ -2345,12 +2359,15 @@ static int __init apm_init(void)
17920 * code to that CPU.
17921 */
17922 gdt = get_cpu_gdt_table(0);
17923+
17924+ pax_open_kernel();
17925 set_desc_base(&gdt[APM_CS >> 3],
17926 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
17927 set_desc_base(&gdt[APM_CS_16 >> 3],
17928 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
17929 set_desc_base(&gdt[APM_DS >> 3],
17930 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
17931+ pax_close_kernel();
17932
17933 proc_create("apm", 0, NULL, &apm_file_ops);
17934
17935diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
17936index 2861082..6d4718e 100644
17937--- a/arch/x86/kernel/asm-offsets.c
17938+++ b/arch/x86/kernel/asm-offsets.c
17939@@ -33,6 +33,8 @@ void common(void) {
17940 OFFSET(TI_status, thread_info, status);
17941 OFFSET(TI_addr_limit, thread_info, addr_limit);
17942 OFFSET(TI_preempt_count, thread_info, preempt_count);
17943+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
17944+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
17945
17946 BLANK();
17947 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
17948@@ -53,8 +55,26 @@ void common(void) {
17949 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
17950 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
17951 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
17952+
17953+#ifdef CONFIG_PAX_KERNEXEC
17954+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
17955 #endif
17956
17957+#ifdef CONFIG_PAX_MEMORY_UDEREF
17958+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
17959+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
17960+#ifdef CONFIG_X86_64
17961+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
17962+#endif
17963+#endif
17964+
17965+#endif
17966+
17967+ BLANK();
17968+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
17969+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
17970+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
17971+
17972 #ifdef CONFIG_XEN
17973 BLANK();
17974 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
17975diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
17976index 1b4754f..fbb4227 100644
17977--- a/arch/x86/kernel/asm-offsets_64.c
17978+++ b/arch/x86/kernel/asm-offsets_64.c
17979@@ -76,6 +76,7 @@ int main(void)
17980 BLANK();
17981 #undef ENTRY
17982
17983+ DEFINE(TSS_size, sizeof(struct tss_struct));
17984 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
17985 BLANK();
17986
17987diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
17988index a0e067d..9c7db16 100644
17989--- a/arch/x86/kernel/cpu/Makefile
17990+++ b/arch/x86/kernel/cpu/Makefile
17991@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
17992 CFLAGS_REMOVE_perf_event.o = -pg
17993 endif
17994
17995-# Make sure load_percpu_segment has no stackprotector
17996-nostackp := $(call cc-option, -fno-stack-protector)
17997-CFLAGS_common.o := $(nostackp)
17998-
17999 obj-y := intel_cacheinfo.o scattered.o topology.o
18000 obj-y += proc.o capflags.o powerflags.o common.o
18001 obj-y += vmware.o hypervisor.o mshyperv.o
18002diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
18003index 15239ff..e23e04e 100644
18004--- a/arch/x86/kernel/cpu/amd.c
18005+++ b/arch/x86/kernel/cpu/amd.c
18006@@ -733,7 +733,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
18007 unsigned int size)
18008 {
18009 /* AMD errata T13 (order #21922) */
18010- if ((c->x86 == 6)) {
18011+ if (c->x86 == 6) {
18012 /* Duron Rev A0 */
18013 if (c->x86_model == 3 && c->x86_mask == 0)
18014 size = 64;
18015diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
18016index 9c3ab43..51e6366 100644
18017--- a/arch/x86/kernel/cpu/common.c
18018+++ b/arch/x86/kernel/cpu/common.c
18019@@ -86,60 +86,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
18020
18021 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
18022
18023-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
18024-#ifdef CONFIG_X86_64
18025- /*
18026- * We need valid kernel segments for data and code in long mode too
18027- * IRET will check the segment types kkeil 2000/10/28
18028- * Also sysret mandates a special GDT layout
18029- *
18030- * TLS descriptors are currently at a different place compared to i386.
18031- * Hopefully nobody expects them at a fixed place (Wine?)
18032- */
18033- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
18034- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
18035- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
18036- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
18037- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
18038- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
18039-#else
18040- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
18041- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
18042- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
18043- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
18044- /*
18045- * Segments used for calling PnP BIOS have byte granularity.
18046- * They code segments and data segments have fixed 64k limits,
18047- * the transfer segment sizes are set at run time.
18048- */
18049- /* 32-bit code */
18050- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
18051- /* 16-bit code */
18052- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
18053- /* 16-bit data */
18054- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
18055- /* 16-bit data */
18056- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
18057- /* 16-bit data */
18058- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
18059- /*
18060- * The APM segments have byte granularity and their bases
18061- * are set at run time. All have 64k limits.
18062- */
18063- /* 32-bit code */
18064- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
18065- /* 16-bit code */
18066- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
18067- /* data */
18068- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
18069-
18070- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
18071- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
18072- GDT_STACK_CANARY_INIT
18073-#endif
18074-} };
18075-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
18076-
18077 static int __init x86_xsave_setup(char *s)
18078 {
18079 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
18080@@ -389,7 +335,7 @@ void switch_to_new_gdt(int cpu)
18081 {
18082 struct desc_ptr gdt_descr;
18083
18084- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
18085+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
18086 gdt_descr.size = GDT_SIZE - 1;
18087 load_gdt(&gdt_descr);
18088 /* Reload the per-cpu base */
18089@@ -885,6 +831,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
18090 /* Filter out anything that depends on CPUID levels we don't have */
18091 filter_cpuid_features(c, true);
18092
18093+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
18094+ setup_clear_cpu_cap(X86_FEATURE_SEP);
18095+#endif
18096+
18097 /* If the model name is still unset, do table lookup. */
18098 if (!c->x86_model_id[0]) {
18099 const char *p;
18100@@ -1068,10 +1018,12 @@ static __init int setup_disablecpuid(char *arg)
18101 }
18102 __setup("clearcpuid=", setup_disablecpuid);
18103
18104+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
18105+EXPORT_PER_CPU_SYMBOL(current_tinfo);
18106+
18107 #ifdef CONFIG_X86_64
18108 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
18109-struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1,
18110- (unsigned long) nmi_idt_table };
18111+struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) nmi_idt_table };
18112
18113 DEFINE_PER_CPU_FIRST(union irq_stack_union,
18114 irq_stack_union) __aligned(PAGE_SIZE);
18115@@ -1085,7 +1037,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
18116 EXPORT_PER_CPU_SYMBOL(current_task);
18117
18118 DEFINE_PER_CPU(unsigned long, kernel_stack) =
18119- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
18120+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
18121 EXPORT_PER_CPU_SYMBOL(kernel_stack);
18122
18123 DEFINE_PER_CPU(char *, irq_stack_ptr) =
18124@@ -1224,7 +1176,7 @@ void __cpuinit cpu_init(void)
18125 int i;
18126
18127 cpu = stack_smp_processor_id();
18128- t = &per_cpu(init_tss, cpu);
18129+ t = init_tss + cpu;
18130 oist = &per_cpu(orig_ist, cpu);
18131
18132 #ifdef CONFIG_NUMA
18133@@ -1250,7 +1202,7 @@ void __cpuinit cpu_init(void)
18134 switch_to_new_gdt(cpu);
18135 loadsegment(fs, 0);
18136
18137- load_idt((const struct desc_ptr *)&idt_descr);
18138+ load_idt(&idt_descr);
18139
18140 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
18141 syscall_init();
18142@@ -1259,7 +1211,6 @@ void __cpuinit cpu_init(void)
18143 wrmsrl(MSR_KERNEL_GS_BASE, 0);
18144 barrier();
18145
18146- x86_configure_nx();
18147 enable_x2apic();
18148
18149 /*
18150@@ -1311,7 +1262,7 @@ void __cpuinit cpu_init(void)
18151 {
18152 int cpu = smp_processor_id();
18153 struct task_struct *curr = current;
18154- struct tss_struct *t = &per_cpu(init_tss, cpu);
18155+ struct tss_struct *t = init_tss + cpu;
18156 struct thread_struct *thread = &curr->thread;
18157
18158 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
18159diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
18160index fcaabd0..7b55a26 100644
18161--- a/arch/x86/kernel/cpu/intel.c
18162+++ b/arch/x86/kernel/cpu/intel.c
18163@@ -174,7 +174,7 @@ static void __cpuinit trap_init_f00f_bug(void)
18164 * Update the IDT descriptor and reload the IDT so that
18165 * it uses the read-only mapped virtual address.
18166 */
18167- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
18168+ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
18169 load_idt(&idt_descr);
18170 }
18171 #endif
18172diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
18173index 84c1309..39b7224 100644
18174--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
18175+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
18176@@ -1017,6 +1017,22 @@ static struct attribute *default_attrs[] = {
18177 };
18178
18179 #ifdef CONFIG_AMD_NB
18180+static struct attribute *default_attrs_amd_nb[] = {
18181+ &type.attr,
18182+ &level.attr,
18183+ &coherency_line_size.attr,
18184+ &physical_line_partition.attr,
18185+ &ways_of_associativity.attr,
18186+ &number_of_sets.attr,
18187+ &size.attr,
18188+ &shared_cpu_map.attr,
18189+ &shared_cpu_list.attr,
18190+ NULL,
18191+ NULL,
18192+ NULL,
18193+ NULL
18194+};
18195+
18196 static struct attribute ** __cpuinit amd_l3_attrs(void)
18197 {
18198 static struct attribute **attrs;
18199@@ -1027,18 +1043,7 @@ static struct attribute ** __cpuinit amd_l3_attrs(void)
18200
18201 n = ARRAY_SIZE(default_attrs);
18202
18203- if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
18204- n += 2;
18205-
18206- if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
18207- n += 1;
18208-
18209- attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
18210- if (attrs == NULL)
18211- return attrs = default_attrs;
18212-
18213- for (n = 0; default_attrs[n]; n++)
18214- attrs[n] = default_attrs[n];
18215+ attrs = default_attrs_amd_nb;
18216
18217 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
18218 attrs[n++] = &cache_disable_0.attr;
18219@@ -1089,6 +1094,13 @@ static struct kobj_type ktype_cache = {
18220 .default_attrs = default_attrs,
18221 };
18222
18223+#ifdef CONFIG_AMD_NB
18224+static struct kobj_type ktype_cache_amd_nb = {
18225+ .sysfs_ops = &sysfs_ops,
18226+ .default_attrs = default_attrs_amd_nb,
18227+};
18228+#endif
18229+
18230 static struct kobj_type ktype_percpu_entry = {
18231 .sysfs_ops = &sysfs_ops,
18232 };
18233@@ -1154,20 +1166,26 @@ static int __cpuinit cache_add_dev(struct device *dev)
18234 return retval;
18235 }
18236
18237+#ifdef CONFIG_AMD_NB
18238+ amd_l3_attrs();
18239+#endif
18240+
18241 for (i = 0; i < num_cache_leaves; i++) {
18242+ struct kobj_type *ktype;
18243+
18244 this_object = INDEX_KOBJECT_PTR(cpu, i);
18245 this_object->cpu = cpu;
18246 this_object->index = i;
18247
18248 this_leaf = CPUID4_INFO_IDX(cpu, i);
18249
18250- ktype_cache.default_attrs = default_attrs;
18251+ ktype = &ktype_cache;
18252 #ifdef CONFIG_AMD_NB
18253 if (this_leaf->base.nb)
18254- ktype_cache.default_attrs = amd_l3_attrs();
18255+ ktype = &ktype_cache_amd_nb;
18256 #endif
18257 retval = kobject_init_and_add(&(this_object->kobj),
18258- &ktype_cache,
18259+ ktype,
18260 per_cpu(ici_cache_kobject, cpu),
18261 "index%1lu", i);
18262 if (unlikely(retval)) {
18263@@ -1222,7 +1240,7 @@ static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
18264 return NOTIFY_OK;
18265 }
18266
18267-static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = {
18268+static struct notifier_block cacheinfo_cpu_notifier = {
18269 .notifier_call = cacheinfo_cpu_callback,
18270 };
18271
18272diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
18273index 80dbda8..be16652 100644
18274--- a/arch/x86/kernel/cpu/mcheck/mce.c
18275+++ b/arch/x86/kernel/cpu/mcheck/mce.c
18276@@ -45,6 +45,7 @@
18277 #include <asm/processor.h>
18278 #include <asm/mce.h>
18279 #include <asm/msr.h>
18280+#include <asm/local.h>
18281
18282 #include "mce-internal.h"
18283
18284@@ -246,7 +247,7 @@ static void print_mce(struct mce *m)
18285 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
18286 m->cs, m->ip);
18287
18288- if (m->cs == __KERNEL_CS)
18289+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
18290 print_symbol("{%s}", m->ip);
18291 pr_cont("\n");
18292 }
18293@@ -279,10 +280,10 @@ static void print_mce(struct mce *m)
18294
18295 #define PANIC_TIMEOUT 5 /* 5 seconds */
18296
18297-static atomic_t mce_paniced;
18298+static atomic_unchecked_t mce_paniced;
18299
18300 static int fake_panic;
18301-static atomic_t mce_fake_paniced;
18302+static atomic_unchecked_t mce_fake_paniced;
18303
18304 /* Panic in progress. Enable interrupts and wait for final IPI */
18305 static void wait_for_panic(void)
18306@@ -306,7 +307,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
18307 /*
18308 * Make sure only one CPU runs in machine check panic
18309 */
18310- if (atomic_inc_return(&mce_paniced) > 1)
18311+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
18312 wait_for_panic();
18313 barrier();
18314
18315@@ -314,7 +315,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
18316 console_verbose();
18317 } else {
18318 /* Don't log too much for fake panic */
18319- if (atomic_inc_return(&mce_fake_paniced) > 1)
18320+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
18321 return;
18322 }
18323 /* First print corrected ones that are still unlogged */
18324@@ -686,7 +687,7 @@ static int mce_timed_out(u64 *t)
18325 * might have been modified by someone else.
18326 */
18327 rmb();
18328- if (atomic_read(&mce_paniced))
18329+ if (atomic_read_unchecked(&mce_paniced))
18330 wait_for_panic();
18331 if (!mca_cfg.monarch_timeout)
18332 goto out;
18333@@ -1662,7 +1663,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
18334 }
18335
18336 /* Call the installed machine check handler for this CPU setup. */
18337-void (*machine_check_vector)(struct pt_regs *, long error_code) =
18338+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
18339 unexpected_machine_check;
18340
18341 /*
18342@@ -1685,7 +1686,9 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
18343 return;
18344 }
18345
18346+ pax_open_kernel();
18347 machine_check_vector = do_machine_check;
18348+ pax_close_kernel();
18349
18350 __mcheck_cpu_init_generic();
18351 __mcheck_cpu_init_vendor(c);
18352@@ -1699,7 +1702,7 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
18353 */
18354
18355 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
18356-static int mce_chrdev_open_count; /* #times opened */
18357+static local_t mce_chrdev_open_count; /* #times opened */
18358 static int mce_chrdev_open_exclu; /* already open exclusive? */
18359
18360 static int mce_chrdev_open(struct inode *inode, struct file *file)
18361@@ -1707,7 +1710,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
18362 spin_lock(&mce_chrdev_state_lock);
18363
18364 if (mce_chrdev_open_exclu ||
18365- (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
18366+ (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
18367 spin_unlock(&mce_chrdev_state_lock);
18368
18369 return -EBUSY;
18370@@ -1715,7 +1718,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
18371
18372 if (file->f_flags & O_EXCL)
18373 mce_chrdev_open_exclu = 1;
18374- mce_chrdev_open_count++;
18375+ local_inc(&mce_chrdev_open_count);
18376
18377 spin_unlock(&mce_chrdev_state_lock);
18378
18379@@ -1726,7 +1729,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
18380 {
18381 spin_lock(&mce_chrdev_state_lock);
18382
18383- mce_chrdev_open_count--;
18384+ local_dec(&mce_chrdev_open_count);
18385 mce_chrdev_open_exclu = 0;
18386
18387 spin_unlock(&mce_chrdev_state_lock);
18388@@ -2372,7 +2375,7 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
18389 return NOTIFY_OK;
18390 }
18391
18392-static struct notifier_block mce_cpu_notifier __cpuinitdata = {
18393+static struct notifier_block mce_cpu_notifier = {
18394 .notifier_call = mce_cpu_callback,
18395 };
18396
18397@@ -2382,7 +2385,7 @@ static __init void mce_init_banks(void)
18398
18399 for (i = 0; i < mca_cfg.banks; i++) {
18400 struct mce_bank *b = &mce_banks[i];
18401- struct device_attribute *a = &b->attr;
18402+ device_attribute_no_const *a = &b->attr;
18403
18404 sysfs_attr_init(&a->attr);
18405 a->attr.name = b->attrname;
18406@@ -2450,7 +2453,7 @@ struct dentry *mce_get_debugfs_dir(void)
18407 static void mce_reset(void)
18408 {
18409 cpu_missing = 0;
18410- atomic_set(&mce_fake_paniced, 0);
18411+ atomic_set_unchecked(&mce_fake_paniced, 0);
18412 atomic_set(&mce_executing, 0);
18413 atomic_set(&mce_callin, 0);
18414 atomic_set(&global_nwo, 0);
18415diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
18416index 2d5454c..51987eb 100644
18417--- a/arch/x86/kernel/cpu/mcheck/p5.c
18418+++ b/arch/x86/kernel/cpu/mcheck/p5.c
18419@@ -11,6 +11,7 @@
18420 #include <asm/processor.h>
18421 #include <asm/mce.h>
18422 #include <asm/msr.h>
18423+#include <asm/pgtable.h>
18424
18425 /* By default disabled */
18426 int mce_p5_enabled __read_mostly;
18427@@ -49,7 +50,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
18428 if (!cpu_has(c, X86_FEATURE_MCE))
18429 return;
18430
18431+ pax_open_kernel();
18432 machine_check_vector = pentium_machine_check;
18433+ pax_close_kernel();
18434 /* Make sure the vector pointer is visible before we enable MCEs: */
18435 wmb();
18436
18437diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
18438index 47a1870..8c019a7 100644
18439--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
18440+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
18441@@ -288,7 +288,7 @@ thermal_throttle_cpu_callback(struct notifier_block *nfb,
18442 return notifier_from_errno(err);
18443 }
18444
18445-static struct notifier_block thermal_throttle_cpu_notifier __cpuinitdata =
18446+static struct notifier_block thermal_throttle_cpu_notifier =
18447 {
18448 .notifier_call = thermal_throttle_cpu_callback,
18449 };
18450diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
18451index 2d7998f..17c9de1 100644
18452--- a/arch/x86/kernel/cpu/mcheck/winchip.c
18453+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
18454@@ -10,6 +10,7 @@
18455 #include <asm/processor.h>
18456 #include <asm/mce.h>
18457 #include <asm/msr.h>
18458+#include <asm/pgtable.h>
18459
18460 /* Machine check handler for WinChip C6: */
18461 static void winchip_machine_check(struct pt_regs *regs, long error_code)
18462@@ -23,7 +24,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
18463 {
18464 u32 lo, hi;
18465
18466+ pax_open_kernel();
18467 machine_check_vector = winchip_machine_check;
18468+ pax_close_kernel();
18469 /* Make sure the vector pointer is visible before we enable MCEs: */
18470 wmb();
18471
18472diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
18473index 726bf96..81f0526 100644
18474--- a/arch/x86/kernel/cpu/mtrr/main.c
18475+++ b/arch/x86/kernel/cpu/mtrr/main.c
18476@@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
18477 u64 size_or_mask, size_and_mask;
18478 static bool mtrr_aps_delayed_init;
18479
18480-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
18481+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
18482
18483 const struct mtrr_ops *mtrr_if;
18484
18485diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
18486index df5e41f..816c719 100644
18487--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
18488+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
18489@@ -25,7 +25,7 @@ struct mtrr_ops {
18490 int (*validate_add_page)(unsigned long base, unsigned long size,
18491 unsigned int type);
18492 int (*have_wrcomb)(void);
18493-};
18494+} __do_const;
18495
18496 extern int generic_get_free_region(unsigned long base, unsigned long size,
18497 int replace_reg);
18498diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
18499index 6774c17..72c1b22 100644
18500--- a/arch/x86/kernel/cpu/perf_event.c
18501+++ b/arch/x86/kernel/cpu/perf_event.c
18502@@ -1305,7 +1305,7 @@ static void __init pmu_check_apic(void)
18503 pr_info("no hardware sampling interrupt available.\n");
18504 }
18505
18506-static struct attribute_group x86_pmu_format_group = {
18507+static attribute_group_no_const x86_pmu_format_group = {
18508 .name = "format",
18509 .attrs = NULL,
18510 };
18511@@ -1313,7 +1313,7 @@ static struct attribute_group x86_pmu_format_group = {
18512 struct perf_pmu_events_attr {
18513 struct device_attribute attr;
18514 u64 id;
18515-};
18516+} __do_const;
18517
18518 /*
18519 * Remove all undefined events (x86_pmu.event_map(id) == 0)
18520@@ -1381,7 +1381,7 @@ static struct attribute *events_attr[] = {
18521 NULL,
18522 };
18523
18524-static struct attribute_group x86_pmu_events_group = {
18525+static attribute_group_no_const x86_pmu_events_group = {
18526 .name = "events",
18527 .attrs = events_attr,
18528 };
18529@@ -1880,7 +1880,7 @@ static unsigned long get_segment_base(unsigned int segment)
18530 if (idx > GDT_ENTRIES)
18531 return 0;
18532
18533- desc = __this_cpu_ptr(&gdt_page.gdt[0]);
18534+ desc = get_cpu_gdt_table(smp_processor_id());
18535 }
18536
18537 return get_desc_base(desc + idx);
18538@@ -1970,7 +1970,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
18539 break;
18540
18541 perf_callchain_store(entry, frame.return_address);
18542- fp = frame.next_frame;
18543+ fp = (const void __force_user *)frame.next_frame;
18544 }
18545 }
18546
18547diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
18548index 70602f8..9d9edb7 100644
18549--- a/arch/x86/kernel/cpu/perf_event_intel.c
18550+++ b/arch/x86/kernel/cpu/perf_event_intel.c
18551@@ -1964,10 +1964,10 @@ __init int intel_pmu_init(void)
18552 * v2 and above have a perf capabilities MSR
18553 */
18554 if (version > 1) {
18555- u64 capabilities;
18556+ u64 capabilities = x86_pmu.intel_cap.capabilities;
18557
18558- rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
18559- x86_pmu.intel_cap.capabilities = capabilities;
18560+ if (rdmsrl_safe(MSR_IA32_PERF_CAPABILITIES, &x86_pmu.intel_cap.capabilities))
18561+ x86_pmu.intel_cap.capabilities = capabilities;
18562 }
18563
18564 intel_ds_init();
18565diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
18566index b43200d..d235b3e 100644
18567--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
18568+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
18569@@ -2428,7 +2428,7 @@ static void __init uncore_types_exit(struct intel_uncore_type **types)
18570 static int __init uncore_type_init(struct intel_uncore_type *type)
18571 {
18572 struct intel_uncore_pmu *pmus;
18573- struct attribute_group *events_group;
18574+ attribute_group_no_const *attr_group;
18575 struct attribute **attrs;
18576 int i, j;
18577
18578@@ -2455,19 +2455,19 @@ static int __init uncore_type_init(struct intel_uncore_type *type)
18579 while (type->event_descs[i].attr.attr.name)
18580 i++;
18581
18582- events_group = kzalloc(sizeof(struct attribute *) * (i + 1) +
18583- sizeof(*events_group), GFP_KERNEL);
18584- if (!events_group)
18585+ attr_group = kzalloc(sizeof(struct attribute *) * (i + 1) +
18586+ sizeof(*attr_group), GFP_KERNEL);
18587+ if (!attr_group)
18588 goto fail;
18589
18590- attrs = (struct attribute **)(events_group + 1);
18591- events_group->name = "events";
18592- events_group->attrs = attrs;
18593+ attrs = (struct attribute **)(attr_group + 1);
18594+ attr_group->name = "events";
18595+ attr_group->attrs = attrs;
18596
18597 for (j = 0; j < i; j++)
18598 attrs[j] = &type->event_descs[j].attr.attr;
18599
18600- type->events_group = events_group;
18601+ type->events_group = attr_group;
18602 }
18603
18604 type->pmu_group = &uncore_pmu_attr_group;
18605@@ -2826,7 +2826,7 @@ static int
18606 return NOTIFY_OK;
18607 }
18608
18609-static struct notifier_block uncore_cpu_nb __cpuinitdata = {
18610+static struct notifier_block uncore_cpu_nb = {
18611 .notifier_call = uncore_cpu_notifier,
18612 /*
18613 * to migrate uncore events, our notifier should be executed
18614diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
18615index e68a455..975a932 100644
18616--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
18617+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
18618@@ -428,7 +428,7 @@ struct intel_uncore_box {
18619 struct uncore_event_desc {
18620 struct kobj_attribute attr;
18621 const char *config;
18622-};
18623+} __do_const;
18624
18625 #define INTEL_UNCORE_EVENT_DESC(_name, _config) \
18626 { \
18627diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
18628index 60c7891..9e911d3 100644
18629--- a/arch/x86/kernel/cpuid.c
18630+++ b/arch/x86/kernel/cpuid.c
18631@@ -171,7 +171,7 @@ static int __cpuinit cpuid_class_cpu_callback(struct notifier_block *nfb,
18632 return notifier_from_errno(err);
18633 }
18634
18635-static struct notifier_block __refdata cpuid_class_cpu_notifier =
18636+static struct notifier_block cpuid_class_cpu_notifier =
18637 {
18638 .notifier_call = cpuid_class_cpu_callback,
18639 };
18640diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
18641index 74467fe..18793d5 100644
18642--- a/arch/x86/kernel/crash.c
18643+++ b/arch/x86/kernel/crash.c
18644@@ -58,10 +58,8 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
18645 {
18646 #ifdef CONFIG_X86_32
18647 struct pt_regs fixed_regs;
18648-#endif
18649
18650-#ifdef CONFIG_X86_32
18651- if (!user_mode_vm(regs)) {
18652+ if (!user_mode(regs)) {
18653 crash_fixup_ss_esp(&fixed_regs, regs);
18654 regs = &fixed_regs;
18655 }
18656diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
18657index 37250fe..bf2ec74 100644
18658--- a/arch/x86/kernel/doublefault_32.c
18659+++ b/arch/x86/kernel/doublefault_32.c
18660@@ -11,7 +11,7 @@
18661
18662 #define DOUBLEFAULT_STACKSIZE (1024)
18663 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
18664-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
18665+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
18666
18667 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
18668
18669@@ -21,7 +21,7 @@ static void doublefault_fn(void)
18670 unsigned long gdt, tss;
18671
18672 store_gdt(&gdt_desc);
18673- gdt = gdt_desc.address;
18674+ gdt = (unsigned long)gdt_desc.address;
18675
18676 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
18677
18678@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
18679 /* 0x2 bit is always set */
18680 .flags = X86_EFLAGS_SF | 0x2,
18681 .sp = STACK_START,
18682- .es = __USER_DS,
18683+ .es = __KERNEL_DS,
18684 .cs = __KERNEL_CS,
18685 .ss = __KERNEL_DS,
18686- .ds = __USER_DS,
18687+ .ds = __KERNEL_DS,
18688 .fs = __KERNEL_PERCPU,
18689
18690 .__cr3 = __pa_nodebug(swapper_pg_dir),
18691diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
18692index ae42418b..787c16b 100644
18693--- a/arch/x86/kernel/dumpstack.c
18694+++ b/arch/x86/kernel/dumpstack.c
18695@@ -2,6 +2,9 @@
18696 * Copyright (C) 1991, 1992 Linus Torvalds
18697 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
18698 */
18699+#ifdef CONFIG_GRKERNSEC_HIDESYM
18700+#define __INCLUDED_BY_HIDESYM 1
18701+#endif
18702 #include <linux/kallsyms.h>
18703 #include <linux/kprobes.h>
18704 #include <linux/uaccess.h>
18705@@ -35,16 +38,14 @@ void printk_address(unsigned long address, int reliable)
18706 static void
18707 print_ftrace_graph_addr(unsigned long addr, void *data,
18708 const struct stacktrace_ops *ops,
18709- struct thread_info *tinfo, int *graph)
18710+ struct task_struct *task, int *graph)
18711 {
18712- struct task_struct *task;
18713 unsigned long ret_addr;
18714 int index;
18715
18716 if (addr != (unsigned long)return_to_handler)
18717 return;
18718
18719- task = tinfo->task;
18720 index = task->curr_ret_stack;
18721
18722 if (!task->ret_stack || index < *graph)
18723@@ -61,7 +62,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
18724 static inline void
18725 print_ftrace_graph_addr(unsigned long addr, void *data,
18726 const struct stacktrace_ops *ops,
18727- struct thread_info *tinfo, int *graph)
18728+ struct task_struct *task, int *graph)
18729 { }
18730 #endif
18731
18732@@ -72,10 +73,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
18733 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
18734 */
18735
18736-static inline int valid_stack_ptr(struct thread_info *tinfo,
18737- void *p, unsigned int size, void *end)
18738+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
18739 {
18740- void *t = tinfo;
18741 if (end) {
18742 if (p < end && p >= (end-THREAD_SIZE))
18743 return 1;
18744@@ -86,14 +85,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
18745 }
18746
18747 unsigned long
18748-print_context_stack(struct thread_info *tinfo,
18749+print_context_stack(struct task_struct *task, void *stack_start,
18750 unsigned long *stack, unsigned long bp,
18751 const struct stacktrace_ops *ops, void *data,
18752 unsigned long *end, int *graph)
18753 {
18754 struct stack_frame *frame = (struct stack_frame *)bp;
18755
18756- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
18757+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
18758 unsigned long addr;
18759
18760 addr = *stack;
18761@@ -105,7 +104,7 @@ print_context_stack(struct thread_info *tinfo,
18762 } else {
18763 ops->address(data, addr, 0);
18764 }
18765- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
18766+ print_ftrace_graph_addr(addr, data, ops, task, graph);
18767 }
18768 stack++;
18769 }
18770@@ -114,7 +113,7 @@ print_context_stack(struct thread_info *tinfo,
18771 EXPORT_SYMBOL_GPL(print_context_stack);
18772
18773 unsigned long
18774-print_context_stack_bp(struct thread_info *tinfo,
18775+print_context_stack_bp(struct task_struct *task, void *stack_start,
18776 unsigned long *stack, unsigned long bp,
18777 const struct stacktrace_ops *ops, void *data,
18778 unsigned long *end, int *graph)
18779@@ -122,7 +121,7 @@ print_context_stack_bp(struct thread_info *tinfo,
18780 struct stack_frame *frame = (struct stack_frame *)bp;
18781 unsigned long *ret_addr = &frame->return_address;
18782
18783- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
18784+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
18785 unsigned long addr = *ret_addr;
18786
18787 if (!__kernel_text_address(addr))
18788@@ -131,7 +130,7 @@ print_context_stack_bp(struct thread_info *tinfo,
18789 ops->address(data, addr, 1);
18790 frame = frame->next_frame;
18791 ret_addr = &frame->return_address;
18792- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
18793+ print_ftrace_graph_addr(addr, data, ops, task, graph);
18794 }
18795
18796 return (unsigned long)frame;
18797@@ -189,7 +188,7 @@ void dump_stack(void)
18798
18799 bp = stack_frame(current, NULL);
18800 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
18801- current->pid, current->comm, print_tainted(),
18802+ task_pid_nr(current), current->comm, print_tainted(),
18803 init_utsname()->release,
18804 (int)strcspn(init_utsname()->version, " "),
18805 init_utsname()->version);
18806@@ -225,6 +224,8 @@ unsigned __kprobes long oops_begin(void)
18807 }
18808 EXPORT_SYMBOL_GPL(oops_begin);
18809
18810+extern void gr_handle_kernel_exploit(void);
18811+
18812 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
18813 {
18814 if (regs && kexec_should_crash(current))
18815@@ -246,7 +247,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
18816 panic("Fatal exception in interrupt");
18817 if (panic_on_oops)
18818 panic("Fatal exception");
18819- do_exit(signr);
18820+
18821+ gr_handle_kernel_exploit();
18822+
18823+ do_group_exit(signr);
18824 }
18825
18826 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
18827@@ -274,7 +278,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
18828 print_modules();
18829 show_regs(regs);
18830 #ifdef CONFIG_X86_32
18831- if (user_mode_vm(regs)) {
18832+ if (user_mode(regs)) {
18833 sp = regs->sp;
18834 ss = regs->ss & 0xffff;
18835 } else {
18836@@ -302,7 +306,7 @@ void die(const char *str, struct pt_regs *regs, long err)
18837 unsigned long flags = oops_begin();
18838 int sig = SIGSEGV;
18839
18840- if (!user_mode_vm(regs))
18841+ if (!user_mode(regs))
18842 report_bug(regs->ip, regs);
18843
18844 if (__die(str, regs, err))
18845diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
18846index 1038a41..db2c12b 100644
18847--- a/arch/x86/kernel/dumpstack_32.c
18848+++ b/arch/x86/kernel/dumpstack_32.c
18849@@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
18850 bp = stack_frame(task, regs);
18851
18852 for (;;) {
18853- struct thread_info *context;
18854+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
18855
18856- context = (struct thread_info *)
18857- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
18858- bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
18859+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
18860
18861- stack = (unsigned long *)context->previous_esp;
18862- if (!stack)
18863+ if (stack_start == task_stack_page(task))
18864 break;
18865+ stack = *(unsigned long **)stack_start;
18866 if (ops->stack(data, "IRQ") < 0)
18867 break;
18868 touch_nmi_watchdog();
18869@@ -86,7 +84,7 @@ void show_regs(struct pt_regs *regs)
18870 {
18871 int i;
18872
18873- __show_regs(regs, !user_mode_vm(regs));
18874+ __show_regs(regs, !user_mode(regs));
18875
18876 pr_emerg("Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)\n",
18877 TASK_COMM_LEN, current->comm, task_pid_nr(current),
18878@@ -95,21 +93,22 @@ void show_regs(struct pt_regs *regs)
18879 * When in-kernel, we also print out the stack and code at the
18880 * time of the fault..
18881 */
18882- if (!user_mode_vm(regs)) {
18883+ if (!user_mode(regs)) {
18884 unsigned int code_prologue = code_bytes * 43 / 64;
18885 unsigned int code_len = code_bytes;
18886 unsigned char c;
18887 u8 *ip;
18888+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(0)[(0xffff & regs->cs) >> 3]);
18889
18890 pr_emerg("Stack:\n");
18891 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
18892
18893 pr_emerg("Code:");
18894
18895- ip = (u8 *)regs->ip - code_prologue;
18896+ ip = (u8 *)regs->ip - code_prologue + cs_base;
18897 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
18898 /* try starting at IP */
18899- ip = (u8 *)regs->ip;
18900+ ip = (u8 *)regs->ip + cs_base;
18901 code_len = code_len - code_prologue + 1;
18902 }
18903 for (i = 0; i < code_len; i++, ip++) {
18904@@ -118,7 +117,7 @@ void show_regs(struct pt_regs *regs)
18905 pr_cont(" Bad EIP value.");
18906 break;
18907 }
18908- if (ip == (u8 *)regs->ip)
18909+ if (ip == (u8 *)regs->ip + cs_base)
18910 pr_cont(" <%02x>", c);
18911 else
18912 pr_cont(" %02x", c);
18913@@ -131,6 +130,7 @@ int is_valid_bugaddr(unsigned long ip)
18914 {
18915 unsigned short ud2;
18916
18917+ ip = ktla_ktva(ip);
18918 if (ip < PAGE_OFFSET)
18919 return 0;
18920 if (probe_kernel_address((unsigned short *)ip, ud2))
18921@@ -138,3 +138,15 @@ int is_valid_bugaddr(unsigned long ip)
18922
18923 return ud2 == 0x0b0f;
18924 }
18925+
18926+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
18927+void pax_check_alloca(unsigned long size)
18928+{
18929+ unsigned long sp = (unsigned long)&sp, stack_left;
18930+
18931+ /* all kernel stacks are of the same size */
18932+ stack_left = sp & (THREAD_SIZE - 1);
18933+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
18934+}
18935+EXPORT_SYMBOL(pax_check_alloca);
18936+#endif
18937diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
18938index b653675..51cc8c0 100644
18939--- a/arch/x86/kernel/dumpstack_64.c
18940+++ b/arch/x86/kernel/dumpstack_64.c
18941@@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
18942 unsigned long *irq_stack_end =
18943 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
18944 unsigned used = 0;
18945- struct thread_info *tinfo;
18946 int graph = 0;
18947 unsigned long dummy;
18948+ void *stack_start;
18949
18950 if (!task)
18951 task = current;
18952@@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
18953 * current stack address. If the stacks consist of nested
18954 * exceptions
18955 */
18956- tinfo = task_thread_info(task);
18957 for (;;) {
18958 char *id;
18959 unsigned long *estack_end;
18960+
18961 estack_end = in_exception_stack(cpu, (unsigned long)stack,
18962 &used, &id);
18963
18964@@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
18965 if (ops->stack(data, id) < 0)
18966 break;
18967
18968- bp = ops->walk_stack(tinfo, stack, bp, ops,
18969+ bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
18970 data, estack_end, &graph);
18971 ops->stack(data, "<EOE>");
18972 /*
18973@@ -161,6 +161,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
18974 * second-to-last pointer (index -2 to end) in the
18975 * exception stack:
18976 */
18977+ if ((u16)estack_end[-1] != __KERNEL_DS)
18978+ goto out;
18979 stack = (unsigned long *) estack_end[-2];
18980 continue;
18981 }
18982@@ -172,7 +174,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
18983 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
18984 if (ops->stack(data, "IRQ") < 0)
18985 break;
18986- bp = ops->walk_stack(tinfo, stack, bp,
18987+ bp = ops->walk_stack(task, irq_stack, stack, bp,
18988 ops, data, irq_stack_end, &graph);
18989 /*
18990 * We link to the next stack (which would be
18991@@ -191,7 +193,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
18992 /*
18993 * This handles the process stack:
18994 */
18995- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
18996+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
18997+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
18998+out:
18999 put_cpu();
19000 }
19001 EXPORT_SYMBOL(dump_trace);
19002@@ -249,7 +253,7 @@ void show_regs(struct pt_regs *regs)
19003 {
19004 int i;
19005 unsigned long sp;
19006- const int cpu = smp_processor_id();
19007+ const int cpu = raw_smp_processor_id();
19008 struct task_struct *cur = current;
19009
19010 sp = regs->sp;
19011@@ -304,3 +308,50 @@ int is_valid_bugaddr(unsigned long ip)
19012
19013 return ud2 == 0x0b0f;
19014 }
19015+
19016+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
19017+void pax_check_alloca(unsigned long size)
19018+{
19019+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
19020+ unsigned cpu, used;
19021+ char *id;
19022+
19023+ /* check the process stack first */
19024+ stack_start = (unsigned long)task_stack_page(current);
19025+ stack_end = stack_start + THREAD_SIZE;
19026+ if (likely(stack_start <= sp && sp < stack_end)) {
19027+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
19028+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
19029+ return;
19030+ }
19031+
19032+ cpu = get_cpu();
19033+
19034+ /* check the irq stacks */
19035+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
19036+ stack_start = stack_end - IRQ_STACK_SIZE;
19037+ if (stack_start <= sp && sp < stack_end) {
19038+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
19039+ put_cpu();
19040+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
19041+ return;
19042+ }
19043+
19044+ /* check the exception stacks */
19045+ used = 0;
19046+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
19047+ stack_start = stack_end - EXCEPTION_STKSZ;
19048+ if (stack_end && stack_start <= sp && sp < stack_end) {
19049+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
19050+ put_cpu();
19051+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
19052+ return;
19053+ }
19054+
19055+ put_cpu();
19056+
19057+ /* unknown stack */
19058+ BUG();
19059+}
19060+EXPORT_SYMBOL(pax_check_alloca);
19061+#endif
19062diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
19063index 9b9f18b..9fcaa04 100644
19064--- a/arch/x86/kernel/early_printk.c
19065+++ b/arch/x86/kernel/early_printk.c
19066@@ -7,6 +7,7 @@
19067 #include <linux/pci_regs.h>
19068 #include <linux/pci_ids.h>
19069 #include <linux/errno.h>
19070+#include <linux/sched.h>
19071 #include <asm/io.h>
19072 #include <asm/processor.h>
19073 #include <asm/fcntl.h>
19074diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
19075index 6ed91d9..6cc365b 100644
19076--- a/arch/x86/kernel/entry_32.S
19077+++ b/arch/x86/kernel/entry_32.S
19078@@ -177,13 +177,153 @@
19079 /*CFI_REL_OFFSET gs, PT_GS*/
19080 .endm
19081 .macro SET_KERNEL_GS reg
19082+
19083+#ifdef CONFIG_CC_STACKPROTECTOR
19084 movl $(__KERNEL_STACK_CANARY), \reg
19085+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
19086+ movl $(__USER_DS), \reg
19087+#else
19088+ xorl \reg, \reg
19089+#endif
19090+
19091 movl \reg, %gs
19092 .endm
19093
19094 #endif /* CONFIG_X86_32_LAZY_GS */
19095
19096-.macro SAVE_ALL
19097+.macro pax_enter_kernel
19098+#ifdef CONFIG_PAX_KERNEXEC
19099+ call pax_enter_kernel
19100+#endif
19101+.endm
19102+
19103+.macro pax_exit_kernel
19104+#ifdef CONFIG_PAX_KERNEXEC
19105+ call pax_exit_kernel
19106+#endif
19107+.endm
19108+
19109+#ifdef CONFIG_PAX_KERNEXEC
19110+ENTRY(pax_enter_kernel)
19111+#ifdef CONFIG_PARAVIRT
19112+ pushl %eax
19113+ pushl %ecx
19114+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
19115+ mov %eax, %esi
19116+#else
19117+ mov %cr0, %esi
19118+#endif
19119+ bts $16, %esi
19120+ jnc 1f
19121+ mov %cs, %esi
19122+ cmp $__KERNEL_CS, %esi
19123+ jz 3f
19124+ ljmp $__KERNEL_CS, $3f
19125+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
19126+2:
19127+#ifdef CONFIG_PARAVIRT
19128+ mov %esi, %eax
19129+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
19130+#else
19131+ mov %esi, %cr0
19132+#endif
19133+3:
19134+#ifdef CONFIG_PARAVIRT
19135+ popl %ecx
19136+ popl %eax
19137+#endif
19138+ ret
19139+ENDPROC(pax_enter_kernel)
19140+
19141+ENTRY(pax_exit_kernel)
19142+#ifdef CONFIG_PARAVIRT
19143+ pushl %eax
19144+ pushl %ecx
19145+#endif
19146+ mov %cs, %esi
19147+ cmp $__KERNEXEC_KERNEL_CS, %esi
19148+ jnz 2f
19149+#ifdef CONFIG_PARAVIRT
19150+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
19151+ mov %eax, %esi
19152+#else
19153+ mov %cr0, %esi
19154+#endif
19155+ btr $16, %esi
19156+ ljmp $__KERNEL_CS, $1f
19157+1:
19158+#ifdef CONFIG_PARAVIRT
19159+ mov %esi, %eax
19160+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
19161+#else
19162+ mov %esi, %cr0
19163+#endif
19164+2:
19165+#ifdef CONFIG_PARAVIRT
19166+ popl %ecx
19167+ popl %eax
19168+#endif
19169+ ret
19170+ENDPROC(pax_exit_kernel)
19171+#endif
19172+
19173+.macro pax_erase_kstack
19174+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
19175+ call pax_erase_kstack
19176+#endif
19177+.endm
19178+
19179+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
19180+/*
19181+ * ebp: thread_info
19182+ */
19183+ENTRY(pax_erase_kstack)
19184+ pushl %edi
19185+ pushl %ecx
19186+ pushl %eax
19187+
19188+ mov TI_lowest_stack(%ebp), %edi
19189+ mov $-0xBEEF, %eax
19190+ std
19191+
19192+1: mov %edi, %ecx
19193+ and $THREAD_SIZE_asm - 1, %ecx
19194+ shr $2, %ecx
19195+ repne scasl
19196+ jecxz 2f
19197+
19198+ cmp $2*16, %ecx
19199+ jc 2f
19200+
19201+ mov $2*16, %ecx
19202+ repe scasl
19203+ jecxz 2f
19204+ jne 1b
19205+
19206+2: cld
19207+ mov %esp, %ecx
19208+ sub %edi, %ecx
19209+
19210+ cmp $THREAD_SIZE_asm, %ecx
19211+ jb 3f
19212+ ud2
19213+3:
19214+
19215+ shr $2, %ecx
19216+ rep stosl
19217+
19218+ mov TI_task_thread_sp0(%ebp), %edi
19219+ sub $128, %edi
19220+ mov %edi, TI_lowest_stack(%ebp)
19221+
19222+ popl %eax
19223+ popl %ecx
19224+ popl %edi
19225+ ret
19226+ENDPROC(pax_erase_kstack)
19227+#endif
19228+
19229+.macro __SAVE_ALL _DS
19230 cld
19231 PUSH_GS
19232 pushl_cfi %fs
19233@@ -206,7 +346,7 @@
19234 CFI_REL_OFFSET ecx, 0
19235 pushl_cfi %ebx
19236 CFI_REL_OFFSET ebx, 0
19237- movl $(__USER_DS), %edx
19238+ movl $\_DS, %edx
19239 movl %edx, %ds
19240 movl %edx, %es
19241 movl $(__KERNEL_PERCPU), %edx
19242@@ -214,6 +354,15 @@
19243 SET_KERNEL_GS %edx
19244 .endm
19245
19246+.macro SAVE_ALL
19247+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
19248+ __SAVE_ALL __KERNEL_DS
19249+ pax_enter_kernel
19250+#else
19251+ __SAVE_ALL __USER_DS
19252+#endif
19253+.endm
19254+
19255 .macro RESTORE_INT_REGS
19256 popl_cfi %ebx
19257 CFI_RESTORE ebx
19258@@ -297,7 +446,7 @@ ENTRY(ret_from_fork)
19259 popfl_cfi
19260 jmp syscall_exit
19261 CFI_ENDPROC
19262-END(ret_from_fork)
19263+ENDPROC(ret_from_fork)
19264
19265 ENTRY(ret_from_kernel_thread)
19266 CFI_STARTPROC
19267@@ -344,7 +493,15 @@ ret_from_intr:
19268 andl $SEGMENT_RPL_MASK, %eax
19269 #endif
19270 cmpl $USER_RPL, %eax
19271+
19272+#ifdef CONFIG_PAX_KERNEXEC
19273+ jae resume_userspace
19274+
19275+ pax_exit_kernel
19276+ jmp resume_kernel
19277+#else
19278 jb resume_kernel # not returning to v8086 or userspace
19279+#endif
19280
19281 ENTRY(resume_userspace)
19282 LOCKDEP_SYS_EXIT
19283@@ -356,8 +513,8 @@ ENTRY(resume_userspace)
19284 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
19285 # int/exception return?
19286 jne work_pending
19287- jmp restore_all
19288-END(ret_from_exception)
19289+ jmp restore_all_pax
19290+ENDPROC(ret_from_exception)
19291
19292 #ifdef CONFIG_PREEMPT
19293 ENTRY(resume_kernel)
19294@@ -372,7 +529,7 @@ need_resched:
19295 jz restore_all
19296 call preempt_schedule_irq
19297 jmp need_resched
19298-END(resume_kernel)
19299+ENDPROC(resume_kernel)
19300 #endif
19301 CFI_ENDPROC
19302 /*
19303@@ -406,30 +563,45 @@ sysenter_past_esp:
19304 /*CFI_REL_OFFSET cs, 0*/
19305 /*
19306 * Push current_thread_info()->sysenter_return to the stack.
19307- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
19308- * pushed above; +8 corresponds to copy_thread's esp0 setting.
19309 */
19310- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
19311+ pushl_cfi $0
19312 CFI_REL_OFFSET eip, 0
19313
19314 pushl_cfi %eax
19315 SAVE_ALL
19316+ GET_THREAD_INFO(%ebp)
19317+ movl TI_sysenter_return(%ebp),%ebp
19318+ movl %ebp,PT_EIP(%esp)
19319 ENABLE_INTERRUPTS(CLBR_NONE)
19320
19321 /*
19322 * Load the potential sixth argument from user stack.
19323 * Careful about security.
19324 */
19325+ movl PT_OLDESP(%esp),%ebp
19326+
19327+#ifdef CONFIG_PAX_MEMORY_UDEREF
19328+ mov PT_OLDSS(%esp),%ds
19329+1: movl %ds:(%ebp),%ebp
19330+ push %ss
19331+ pop %ds
19332+#else
19333 cmpl $__PAGE_OFFSET-3,%ebp
19334 jae syscall_fault
19335 ASM_STAC
19336 1: movl (%ebp),%ebp
19337 ASM_CLAC
19338+#endif
19339+
19340 movl %ebp,PT_EBP(%esp)
19341 _ASM_EXTABLE(1b,syscall_fault)
19342
19343 GET_THREAD_INFO(%ebp)
19344
19345+#ifdef CONFIG_PAX_RANDKSTACK
19346+ pax_erase_kstack
19347+#endif
19348+
19349 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
19350 jnz sysenter_audit
19351 sysenter_do_call:
19352@@ -444,12 +616,24 @@ sysenter_do_call:
19353 testl $_TIF_ALLWORK_MASK, %ecx
19354 jne sysexit_audit
19355 sysenter_exit:
19356+
19357+#ifdef CONFIG_PAX_RANDKSTACK
19358+ pushl_cfi %eax
19359+ movl %esp, %eax
19360+ call pax_randomize_kstack
19361+ popl_cfi %eax
19362+#endif
19363+
19364+ pax_erase_kstack
19365+
19366 /* if something modifies registers it must also disable sysexit */
19367 movl PT_EIP(%esp), %edx
19368 movl PT_OLDESP(%esp), %ecx
19369 xorl %ebp,%ebp
19370 TRACE_IRQS_ON
19371 1: mov PT_FS(%esp), %fs
19372+2: mov PT_DS(%esp), %ds
19373+3: mov PT_ES(%esp), %es
19374 PTGS_TO_GS
19375 ENABLE_INTERRUPTS_SYSEXIT
19376
19377@@ -466,6 +650,9 @@ sysenter_audit:
19378 movl %eax,%edx /* 2nd arg: syscall number */
19379 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
19380 call __audit_syscall_entry
19381+
19382+ pax_erase_kstack
19383+
19384 pushl_cfi %ebx
19385 movl PT_EAX(%esp),%eax /* reload syscall number */
19386 jmp sysenter_do_call
19387@@ -491,10 +678,16 @@ sysexit_audit:
19388
19389 CFI_ENDPROC
19390 .pushsection .fixup,"ax"
19391-2: movl $0,PT_FS(%esp)
19392+4: movl $0,PT_FS(%esp)
19393+ jmp 1b
19394+5: movl $0,PT_DS(%esp)
19395+ jmp 1b
19396+6: movl $0,PT_ES(%esp)
19397 jmp 1b
19398 .popsection
19399- _ASM_EXTABLE(1b,2b)
19400+ _ASM_EXTABLE(1b,4b)
19401+ _ASM_EXTABLE(2b,5b)
19402+ _ASM_EXTABLE(3b,6b)
19403 PTGS_TO_GS_EX
19404 ENDPROC(ia32_sysenter_target)
19405
19406@@ -509,6 +702,11 @@ ENTRY(system_call)
19407 pushl_cfi %eax # save orig_eax
19408 SAVE_ALL
19409 GET_THREAD_INFO(%ebp)
19410+
19411+#ifdef CONFIG_PAX_RANDKSTACK
19412+ pax_erase_kstack
19413+#endif
19414+
19415 # system call tracing in operation / emulation
19416 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
19417 jnz syscall_trace_entry
19418@@ -527,6 +725,15 @@ syscall_exit:
19419 testl $_TIF_ALLWORK_MASK, %ecx # current->work
19420 jne syscall_exit_work
19421
19422+restore_all_pax:
19423+
19424+#ifdef CONFIG_PAX_RANDKSTACK
19425+ movl %esp, %eax
19426+ call pax_randomize_kstack
19427+#endif
19428+
19429+ pax_erase_kstack
19430+
19431 restore_all:
19432 TRACE_IRQS_IRET
19433 restore_all_notrace:
19434@@ -583,14 +790,34 @@ ldt_ss:
19435 * compensating for the offset by changing to the ESPFIX segment with
19436 * a base address that matches for the difference.
19437 */
19438-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
19439+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
19440 mov %esp, %edx /* load kernel esp */
19441 mov PT_OLDESP(%esp), %eax /* load userspace esp */
19442 mov %dx, %ax /* eax: new kernel esp */
19443 sub %eax, %edx /* offset (low word is 0) */
19444+#ifdef CONFIG_SMP
19445+ movl PER_CPU_VAR(cpu_number), %ebx
19446+ shll $PAGE_SHIFT_asm, %ebx
19447+ addl $cpu_gdt_table, %ebx
19448+#else
19449+ movl $cpu_gdt_table, %ebx
19450+#endif
19451 shr $16, %edx
19452- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
19453- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
19454+
19455+#ifdef CONFIG_PAX_KERNEXEC
19456+ mov %cr0, %esi
19457+ btr $16, %esi
19458+ mov %esi, %cr0
19459+#endif
19460+
19461+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
19462+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
19463+
19464+#ifdef CONFIG_PAX_KERNEXEC
19465+ bts $16, %esi
19466+ mov %esi, %cr0
19467+#endif
19468+
19469 pushl_cfi $__ESPFIX_SS
19470 pushl_cfi %eax /* new kernel esp */
19471 /* Disable interrupts, but do not irqtrace this section: we
19472@@ -619,20 +846,18 @@ work_resched:
19473 movl TI_flags(%ebp), %ecx
19474 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
19475 # than syscall tracing?
19476- jz restore_all
19477+ jz restore_all_pax
19478 testb $_TIF_NEED_RESCHED, %cl
19479 jnz work_resched
19480
19481 work_notifysig: # deal with pending signals and
19482 # notify-resume requests
19483+ movl %esp, %eax
19484 #ifdef CONFIG_VM86
19485 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
19486- movl %esp, %eax
19487 jne work_notifysig_v86 # returning to kernel-space or
19488 # vm86-space
19489 1:
19490-#else
19491- movl %esp, %eax
19492 #endif
19493 TRACE_IRQS_ON
19494 ENABLE_INTERRUPTS(CLBR_NONE)
19495@@ -653,7 +878,7 @@ work_notifysig_v86:
19496 movl %eax, %esp
19497 jmp 1b
19498 #endif
19499-END(work_pending)
19500+ENDPROC(work_pending)
19501
19502 # perform syscall exit tracing
19503 ALIGN
19504@@ -661,11 +886,14 @@ syscall_trace_entry:
19505 movl $-ENOSYS,PT_EAX(%esp)
19506 movl %esp, %eax
19507 call syscall_trace_enter
19508+
19509+ pax_erase_kstack
19510+
19511 /* What it returned is what we'll actually use. */
19512 cmpl $(NR_syscalls), %eax
19513 jnae syscall_call
19514 jmp syscall_exit
19515-END(syscall_trace_entry)
19516+ENDPROC(syscall_trace_entry)
19517
19518 # perform syscall exit tracing
19519 ALIGN
19520@@ -678,21 +906,25 @@ syscall_exit_work:
19521 movl %esp, %eax
19522 call syscall_trace_leave
19523 jmp resume_userspace
19524-END(syscall_exit_work)
19525+ENDPROC(syscall_exit_work)
19526 CFI_ENDPROC
19527
19528 RING0_INT_FRAME # can't unwind into user space anyway
19529 syscall_fault:
19530+#ifdef CONFIG_PAX_MEMORY_UDEREF
19531+ push %ss
19532+ pop %ds
19533+#endif
19534 ASM_CLAC
19535 GET_THREAD_INFO(%ebp)
19536 movl $-EFAULT,PT_EAX(%esp)
19537 jmp resume_userspace
19538-END(syscall_fault)
19539+ENDPROC(syscall_fault)
19540
19541 syscall_badsys:
19542 movl $-ENOSYS,PT_EAX(%esp)
19543 jmp resume_userspace
19544-END(syscall_badsys)
19545+ENDPROC(syscall_badsys)
19546 CFI_ENDPROC
19547 /*
19548 * End of kprobes section
19549@@ -753,8 +985,15 @@ PTREGSCALL1(vm86old)
19550 * normal stack and adjusts ESP with the matching offset.
19551 */
19552 /* fixup the stack */
19553- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
19554- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
19555+#ifdef CONFIG_SMP
19556+ movl PER_CPU_VAR(cpu_number), %ebx
19557+ shll $PAGE_SHIFT_asm, %ebx
19558+ addl $cpu_gdt_table, %ebx
19559+#else
19560+ movl $cpu_gdt_table, %ebx
19561+#endif
19562+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
19563+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
19564 shl $16, %eax
19565 addl %esp, %eax /* the adjusted stack pointer */
19566 pushl_cfi $__KERNEL_DS
19567@@ -807,7 +1046,7 @@ vector=vector+1
19568 .endr
19569 2: jmp common_interrupt
19570 .endr
19571-END(irq_entries_start)
19572+ENDPROC(irq_entries_start)
19573
19574 .previous
19575 END(interrupt)
19576@@ -858,7 +1097,7 @@ ENTRY(coprocessor_error)
19577 pushl_cfi $do_coprocessor_error
19578 jmp error_code
19579 CFI_ENDPROC
19580-END(coprocessor_error)
19581+ENDPROC(coprocessor_error)
19582
19583 ENTRY(simd_coprocessor_error)
19584 RING0_INT_FRAME
19585@@ -880,7 +1119,7 @@ ENTRY(simd_coprocessor_error)
19586 #endif
19587 jmp error_code
19588 CFI_ENDPROC
19589-END(simd_coprocessor_error)
19590+ENDPROC(simd_coprocessor_error)
19591
19592 ENTRY(device_not_available)
19593 RING0_INT_FRAME
19594@@ -889,18 +1128,18 @@ ENTRY(device_not_available)
19595 pushl_cfi $do_device_not_available
19596 jmp error_code
19597 CFI_ENDPROC
19598-END(device_not_available)
19599+ENDPROC(device_not_available)
19600
19601 #ifdef CONFIG_PARAVIRT
19602 ENTRY(native_iret)
19603 iret
19604 _ASM_EXTABLE(native_iret, iret_exc)
19605-END(native_iret)
19606+ENDPROC(native_iret)
19607
19608 ENTRY(native_irq_enable_sysexit)
19609 sti
19610 sysexit
19611-END(native_irq_enable_sysexit)
19612+ENDPROC(native_irq_enable_sysexit)
19613 #endif
19614
19615 ENTRY(overflow)
19616@@ -910,7 +1149,7 @@ ENTRY(overflow)
19617 pushl_cfi $do_overflow
19618 jmp error_code
19619 CFI_ENDPROC
19620-END(overflow)
19621+ENDPROC(overflow)
19622
19623 ENTRY(bounds)
19624 RING0_INT_FRAME
19625@@ -919,7 +1158,7 @@ ENTRY(bounds)
19626 pushl_cfi $do_bounds
19627 jmp error_code
19628 CFI_ENDPROC
19629-END(bounds)
19630+ENDPROC(bounds)
19631
19632 ENTRY(invalid_op)
19633 RING0_INT_FRAME
19634@@ -928,7 +1167,7 @@ ENTRY(invalid_op)
19635 pushl_cfi $do_invalid_op
19636 jmp error_code
19637 CFI_ENDPROC
19638-END(invalid_op)
19639+ENDPROC(invalid_op)
19640
19641 ENTRY(coprocessor_segment_overrun)
19642 RING0_INT_FRAME
19643@@ -937,7 +1176,7 @@ ENTRY(coprocessor_segment_overrun)
19644 pushl_cfi $do_coprocessor_segment_overrun
19645 jmp error_code
19646 CFI_ENDPROC
19647-END(coprocessor_segment_overrun)
19648+ENDPROC(coprocessor_segment_overrun)
19649
19650 ENTRY(invalid_TSS)
19651 RING0_EC_FRAME
19652@@ -945,7 +1184,7 @@ ENTRY(invalid_TSS)
19653 pushl_cfi $do_invalid_TSS
19654 jmp error_code
19655 CFI_ENDPROC
19656-END(invalid_TSS)
19657+ENDPROC(invalid_TSS)
19658
19659 ENTRY(segment_not_present)
19660 RING0_EC_FRAME
19661@@ -953,7 +1192,7 @@ ENTRY(segment_not_present)
19662 pushl_cfi $do_segment_not_present
19663 jmp error_code
19664 CFI_ENDPROC
19665-END(segment_not_present)
19666+ENDPROC(segment_not_present)
19667
19668 ENTRY(stack_segment)
19669 RING0_EC_FRAME
19670@@ -961,7 +1200,7 @@ ENTRY(stack_segment)
19671 pushl_cfi $do_stack_segment
19672 jmp error_code
19673 CFI_ENDPROC
19674-END(stack_segment)
19675+ENDPROC(stack_segment)
19676
19677 ENTRY(alignment_check)
19678 RING0_EC_FRAME
19679@@ -969,7 +1208,7 @@ ENTRY(alignment_check)
19680 pushl_cfi $do_alignment_check
19681 jmp error_code
19682 CFI_ENDPROC
19683-END(alignment_check)
19684+ENDPROC(alignment_check)
19685
19686 ENTRY(divide_error)
19687 RING0_INT_FRAME
19688@@ -978,7 +1217,7 @@ ENTRY(divide_error)
19689 pushl_cfi $do_divide_error
19690 jmp error_code
19691 CFI_ENDPROC
19692-END(divide_error)
19693+ENDPROC(divide_error)
19694
19695 #ifdef CONFIG_X86_MCE
19696 ENTRY(machine_check)
19697@@ -988,7 +1227,7 @@ ENTRY(machine_check)
19698 pushl_cfi machine_check_vector
19699 jmp error_code
19700 CFI_ENDPROC
19701-END(machine_check)
19702+ENDPROC(machine_check)
19703 #endif
19704
19705 ENTRY(spurious_interrupt_bug)
19706@@ -998,7 +1237,7 @@ ENTRY(spurious_interrupt_bug)
19707 pushl_cfi $do_spurious_interrupt_bug
19708 jmp error_code
19709 CFI_ENDPROC
19710-END(spurious_interrupt_bug)
19711+ENDPROC(spurious_interrupt_bug)
19712 /*
19713 * End of kprobes section
19714 */
19715@@ -1101,7 +1340,7 @@ BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK,
19716
19717 ENTRY(mcount)
19718 ret
19719-END(mcount)
19720+ENDPROC(mcount)
19721
19722 ENTRY(ftrace_caller)
19723 cmpl $0, function_trace_stop
19724@@ -1134,7 +1373,7 @@ ftrace_graph_call:
19725 .globl ftrace_stub
19726 ftrace_stub:
19727 ret
19728-END(ftrace_caller)
19729+ENDPROC(ftrace_caller)
19730
19731 ENTRY(ftrace_regs_caller)
19732 pushf /* push flags before compare (in cs location) */
19733@@ -1235,7 +1474,7 @@ trace:
19734 popl %ecx
19735 popl %eax
19736 jmp ftrace_stub
19737-END(mcount)
19738+ENDPROC(mcount)
19739 #endif /* CONFIG_DYNAMIC_FTRACE */
19740 #endif /* CONFIG_FUNCTION_TRACER */
19741
19742@@ -1253,7 +1492,7 @@ ENTRY(ftrace_graph_caller)
19743 popl %ecx
19744 popl %eax
19745 ret
19746-END(ftrace_graph_caller)
19747+ENDPROC(ftrace_graph_caller)
19748
19749 .globl return_to_handler
19750 return_to_handler:
19751@@ -1309,15 +1548,18 @@ error_code:
19752 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
19753 REG_TO_PTGS %ecx
19754 SET_KERNEL_GS %ecx
19755- movl $(__USER_DS), %ecx
19756+ movl $(__KERNEL_DS), %ecx
19757 movl %ecx, %ds
19758 movl %ecx, %es
19759+
19760+ pax_enter_kernel
19761+
19762 TRACE_IRQS_OFF
19763 movl %esp,%eax # pt_regs pointer
19764 call *%edi
19765 jmp ret_from_exception
19766 CFI_ENDPROC
19767-END(page_fault)
19768+ENDPROC(page_fault)
19769
19770 /*
19771 * Debug traps and NMI can happen at the one SYSENTER instruction
19772@@ -1360,7 +1602,7 @@ debug_stack_correct:
19773 call do_debug
19774 jmp ret_from_exception
19775 CFI_ENDPROC
19776-END(debug)
19777+ENDPROC(debug)
19778
19779 /*
19780 * NMI is doubly nasty. It can happen _while_ we're handling
19781@@ -1398,6 +1640,9 @@ nmi_stack_correct:
19782 xorl %edx,%edx # zero error code
19783 movl %esp,%eax # pt_regs pointer
19784 call do_nmi
19785+
19786+ pax_exit_kernel
19787+
19788 jmp restore_all_notrace
19789 CFI_ENDPROC
19790
19791@@ -1434,12 +1679,15 @@ nmi_espfix_stack:
19792 FIXUP_ESPFIX_STACK # %eax == %esp
19793 xorl %edx,%edx # zero error code
19794 call do_nmi
19795+
19796+ pax_exit_kernel
19797+
19798 RESTORE_REGS
19799 lss 12+4(%esp), %esp # back to espfix stack
19800 CFI_ADJUST_CFA_OFFSET -24
19801 jmp irq_return
19802 CFI_ENDPROC
19803-END(nmi)
19804+ENDPROC(nmi)
19805
19806 ENTRY(int3)
19807 RING0_INT_FRAME
19808@@ -1452,14 +1700,14 @@ ENTRY(int3)
19809 call do_int3
19810 jmp ret_from_exception
19811 CFI_ENDPROC
19812-END(int3)
19813+ENDPROC(int3)
19814
19815 ENTRY(general_protection)
19816 RING0_EC_FRAME
19817 pushl_cfi $do_general_protection
19818 jmp error_code
19819 CFI_ENDPROC
19820-END(general_protection)
19821+ENDPROC(general_protection)
19822
19823 #ifdef CONFIG_KVM_GUEST
19824 ENTRY(async_page_fault)
19825@@ -1468,7 +1716,7 @@ ENTRY(async_page_fault)
19826 pushl_cfi $do_async_page_fault
19827 jmp error_code
19828 CFI_ENDPROC
19829-END(async_page_fault)
19830+ENDPROC(async_page_fault)
19831 #endif
19832
19833 /*
19834diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
19835index cb3c591..0617fa7 100644
19836--- a/arch/x86/kernel/entry_64.S
19837+++ b/arch/x86/kernel/entry_64.S
19838@@ -59,6 +59,8 @@
19839 #include <asm/context_tracking.h>
19840 #include <asm/smap.h>
19841 #include <linux/err.h>
19842+#include <asm/pgtable.h>
19843+#include <asm/alternative-asm.h>
19844
19845 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
19846 #include <linux/elf-em.h>
19847@@ -80,8 +82,9 @@
19848 #ifdef CONFIG_DYNAMIC_FTRACE
19849
19850 ENTRY(function_hook)
19851+ pax_force_retaddr
19852 retq
19853-END(function_hook)
19854+ENDPROC(function_hook)
19855
19856 /* skip is set if stack has been adjusted */
19857 .macro ftrace_caller_setup skip=0
19858@@ -122,8 +125,9 @@ GLOBAL(ftrace_graph_call)
19859 #endif
19860
19861 GLOBAL(ftrace_stub)
19862+ pax_force_retaddr
19863 retq
19864-END(ftrace_caller)
19865+ENDPROC(ftrace_caller)
19866
19867 ENTRY(ftrace_regs_caller)
19868 /* Save the current flags before compare (in SS location)*/
19869@@ -191,7 +195,7 @@ ftrace_restore_flags:
19870 popfq
19871 jmp ftrace_stub
19872
19873-END(ftrace_regs_caller)
19874+ENDPROC(ftrace_regs_caller)
19875
19876
19877 #else /* ! CONFIG_DYNAMIC_FTRACE */
19878@@ -212,6 +216,7 @@ ENTRY(function_hook)
19879 #endif
19880
19881 GLOBAL(ftrace_stub)
19882+ pax_force_retaddr
19883 retq
19884
19885 trace:
19886@@ -225,12 +230,13 @@ trace:
19887 #endif
19888 subq $MCOUNT_INSN_SIZE, %rdi
19889
19890+ pax_force_fptr ftrace_trace_function
19891 call *ftrace_trace_function
19892
19893 MCOUNT_RESTORE_FRAME
19894
19895 jmp ftrace_stub
19896-END(function_hook)
19897+ENDPROC(function_hook)
19898 #endif /* CONFIG_DYNAMIC_FTRACE */
19899 #endif /* CONFIG_FUNCTION_TRACER */
19900
19901@@ -252,8 +258,9 @@ ENTRY(ftrace_graph_caller)
19902
19903 MCOUNT_RESTORE_FRAME
19904
19905+ pax_force_retaddr
19906 retq
19907-END(ftrace_graph_caller)
19908+ENDPROC(ftrace_graph_caller)
19909
19910 GLOBAL(return_to_handler)
19911 subq $24, %rsp
19912@@ -269,7 +276,9 @@ GLOBAL(return_to_handler)
19913 movq 8(%rsp), %rdx
19914 movq (%rsp), %rax
19915 addq $24, %rsp
19916+ pax_force_fptr %rdi
19917 jmp *%rdi
19918+ENDPROC(return_to_handler)
19919 #endif
19920
19921
19922@@ -284,6 +293,282 @@ ENTRY(native_usergs_sysret64)
19923 ENDPROC(native_usergs_sysret64)
19924 #endif /* CONFIG_PARAVIRT */
19925
19926+ .macro ljmpq sel, off
19927+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
19928+ .byte 0x48; ljmp *1234f(%rip)
19929+ .pushsection .rodata
19930+ .align 16
19931+ 1234: .quad \off; .word \sel
19932+ .popsection
19933+#else
19934+ pushq $\sel
19935+ pushq $\off
19936+ lretq
19937+#endif
19938+ .endm
19939+
19940+ .macro pax_enter_kernel
19941+ pax_set_fptr_mask
19942+#ifdef CONFIG_PAX_KERNEXEC
19943+ call pax_enter_kernel
19944+#endif
19945+ .endm
19946+
19947+ .macro pax_exit_kernel
19948+#ifdef CONFIG_PAX_KERNEXEC
19949+ call pax_exit_kernel
19950+#endif
19951+ .endm
19952+
19953+#ifdef CONFIG_PAX_KERNEXEC
19954+ENTRY(pax_enter_kernel)
19955+ pushq %rdi
19956+
19957+#ifdef CONFIG_PARAVIRT
19958+ PV_SAVE_REGS(CLBR_RDI)
19959+#endif
19960+
19961+ GET_CR0_INTO_RDI
19962+ bts $16,%rdi
19963+ jnc 3f
19964+ mov %cs,%edi
19965+ cmp $__KERNEL_CS,%edi
19966+ jnz 2f
19967+1:
19968+
19969+#ifdef CONFIG_PARAVIRT
19970+ PV_RESTORE_REGS(CLBR_RDI)
19971+#endif
19972+
19973+ popq %rdi
19974+ pax_force_retaddr
19975+ retq
19976+
19977+2: ljmpq __KERNEL_CS,1b
19978+3: ljmpq __KERNEXEC_KERNEL_CS,4f
19979+4: SET_RDI_INTO_CR0
19980+ jmp 1b
19981+ENDPROC(pax_enter_kernel)
19982+
19983+ENTRY(pax_exit_kernel)
19984+ pushq %rdi
19985+
19986+#ifdef CONFIG_PARAVIRT
19987+ PV_SAVE_REGS(CLBR_RDI)
19988+#endif
19989+
19990+ mov %cs,%rdi
19991+ cmp $__KERNEXEC_KERNEL_CS,%edi
19992+ jz 2f
19993+ GET_CR0_INTO_RDI
19994+ bts $16,%rdi
19995+ jnc 4f
19996+1:
19997+
19998+#ifdef CONFIG_PARAVIRT
19999+ PV_RESTORE_REGS(CLBR_RDI);
20000+#endif
20001+
20002+ popq %rdi
20003+ pax_force_retaddr
20004+ retq
20005+
20006+2: GET_CR0_INTO_RDI
20007+ btr $16,%rdi
20008+ jnc 4f
20009+ ljmpq __KERNEL_CS,3f
20010+3: SET_RDI_INTO_CR0
20011+ jmp 1b
20012+4: ud2
20013+ jmp 4b
20014+ENDPROC(pax_exit_kernel)
20015+#endif
20016+
20017+ .macro pax_enter_kernel_user
20018+ pax_set_fptr_mask
20019+#ifdef CONFIG_PAX_MEMORY_UDEREF
20020+ call pax_enter_kernel_user
20021+#endif
20022+ .endm
20023+
20024+ .macro pax_exit_kernel_user
20025+#ifdef CONFIG_PAX_MEMORY_UDEREF
20026+ call pax_exit_kernel_user
20027+#endif
20028+#ifdef CONFIG_PAX_RANDKSTACK
20029+ pushq %rax
20030+ call pax_randomize_kstack
20031+ popq %rax
20032+#endif
20033+ .endm
20034+
20035+#ifdef CONFIG_PAX_MEMORY_UDEREF
20036+ENTRY(pax_enter_kernel_user)
20037+ pushq %rdi
20038+ pushq %rbx
20039+
20040+#ifdef CONFIG_PARAVIRT
20041+ PV_SAVE_REGS(CLBR_RDI)
20042+#endif
20043+
20044+ GET_CR3_INTO_RDI
20045+ mov %rdi,%rbx
20046+ add $__START_KERNEL_map,%rbx
20047+ sub phys_base(%rip),%rbx
20048+
20049+#ifdef CONFIG_PARAVIRT
20050+ pushq %rdi
20051+ cmpl $0, pv_info+PARAVIRT_enabled
20052+ jz 1f
20053+ i = 0
20054+ .rept USER_PGD_PTRS
20055+ mov i*8(%rbx),%rsi
20056+ mov $0,%sil
20057+ lea i*8(%rbx),%rdi
20058+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
20059+ i = i + 1
20060+ .endr
20061+ jmp 2f
20062+1:
20063+#endif
20064+
20065+ i = 0
20066+ .rept USER_PGD_PTRS
20067+ movb $0,i*8(%rbx)
20068+ i = i + 1
20069+ .endr
20070+
20071+#ifdef CONFIG_PARAVIRT
20072+2: popq %rdi
20073+#endif
20074+ SET_RDI_INTO_CR3
20075+
20076+#ifdef CONFIG_PAX_KERNEXEC
20077+ GET_CR0_INTO_RDI
20078+ bts $16,%rdi
20079+ SET_RDI_INTO_CR0
20080+#endif
20081+
20082+#ifdef CONFIG_PARAVIRT
20083+ PV_RESTORE_REGS(CLBR_RDI)
20084+#endif
20085+
20086+ popq %rbx
20087+ popq %rdi
20088+ pax_force_retaddr
20089+ retq
20090+ENDPROC(pax_enter_kernel_user)
20091+
20092+ENTRY(pax_exit_kernel_user)
20093+ push %rdi
20094+
20095+#ifdef CONFIG_PARAVIRT
20096+ pushq %rbx
20097+ PV_SAVE_REGS(CLBR_RDI)
20098+#endif
20099+
20100+#ifdef CONFIG_PAX_KERNEXEC
20101+ GET_CR0_INTO_RDI
20102+ btr $16,%rdi
20103+ jnc 3f
20104+ SET_RDI_INTO_CR0
20105+#endif
20106+
20107+ GET_CR3_INTO_RDI
20108+ add $__START_KERNEL_map,%rdi
20109+ sub phys_base(%rip),%rdi
20110+
20111+#ifdef CONFIG_PARAVIRT
20112+ cmpl $0, pv_info+PARAVIRT_enabled
20113+ jz 1f
20114+ mov %rdi,%rbx
20115+ i = 0
20116+ .rept USER_PGD_PTRS
20117+ mov i*8(%rbx),%rsi
20118+ mov $0x67,%sil
20119+ lea i*8(%rbx),%rdi
20120+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
20121+ i = i + 1
20122+ .endr
20123+ jmp 2f
20124+1:
20125+#endif
20126+
20127+ i = 0
20128+ .rept USER_PGD_PTRS
20129+ movb $0x67,i*8(%rdi)
20130+ i = i + 1
20131+ .endr
20132+
20133+#ifdef CONFIG_PARAVIRT
20134+2: PV_RESTORE_REGS(CLBR_RDI)
20135+ popq %rbx
20136+#endif
20137+
20138+ popq %rdi
20139+ pax_force_retaddr
20140+ retq
20141+3: ud2
20142+ jmp 3b
20143+ENDPROC(pax_exit_kernel_user)
20144+#endif
20145+
20146+.macro pax_erase_kstack
20147+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
20148+ call pax_erase_kstack
20149+#endif
20150+.endm
20151+
20152+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
20153+ENTRY(pax_erase_kstack)
20154+ pushq %rdi
20155+ pushq %rcx
20156+ pushq %rax
20157+ pushq %r11
20158+
20159+ GET_THREAD_INFO(%r11)
20160+ mov TI_lowest_stack(%r11), %rdi
20161+ mov $-0xBEEF, %rax
20162+ std
20163+
20164+1: mov %edi, %ecx
20165+ and $THREAD_SIZE_asm - 1, %ecx
20166+ shr $3, %ecx
20167+ repne scasq
20168+ jecxz 2f
20169+
20170+ cmp $2*8, %ecx
20171+ jc 2f
20172+
20173+ mov $2*8, %ecx
20174+ repe scasq
20175+ jecxz 2f
20176+ jne 1b
20177+
20178+2: cld
20179+ mov %esp, %ecx
20180+ sub %edi, %ecx
20181+
20182+ cmp $THREAD_SIZE_asm, %rcx
20183+ jb 3f
20184+ ud2
20185+3:
20186+
20187+ shr $3, %ecx
20188+ rep stosq
20189+
20190+ mov TI_task_thread_sp0(%r11), %rdi
20191+ sub $256, %rdi
20192+ mov %rdi, TI_lowest_stack(%r11)
20193+
20194+ popq %r11
20195+ popq %rax
20196+ popq %rcx
20197+ popq %rdi
20198+ pax_force_retaddr
20199+ ret
20200+ENDPROC(pax_erase_kstack)
20201+#endif
20202
20203 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
20204 #ifdef CONFIG_TRACE_IRQFLAGS
20205@@ -375,8 +660,8 @@ ENDPROC(native_usergs_sysret64)
20206 .endm
20207
20208 .macro UNFAKE_STACK_FRAME
20209- addq $8*6, %rsp
20210- CFI_ADJUST_CFA_OFFSET -(6*8)
20211+ addq $8*6 + ARG_SKIP, %rsp
20212+ CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
20213 .endm
20214
20215 /*
20216@@ -463,7 +748,7 @@ ENDPROC(native_usergs_sysret64)
20217 movq %rsp, %rsi
20218
20219 leaq -RBP(%rsp),%rdi /* arg1 for handler */
20220- testl $3, CS-RBP(%rsi)
20221+ testb $3, CS-RBP(%rsi)
20222 je 1f
20223 SWAPGS
20224 /*
20225@@ -498,9 +783,10 @@ ENTRY(save_rest)
20226 movq_cfi r15, R15+16
20227 movq %r11, 8(%rsp) /* return address */
20228 FIXUP_TOP_OF_STACK %r11, 16
20229+ pax_force_retaddr
20230 ret
20231 CFI_ENDPROC
20232-END(save_rest)
20233+ENDPROC(save_rest)
20234
20235 /* save complete stack frame */
20236 .pushsection .kprobes.text, "ax"
20237@@ -529,9 +815,10 @@ ENTRY(save_paranoid)
20238 js 1f /* negative -> in kernel */
20239 SWAPGS
20240 xorl %ebx,%ebx
20241-1: ret
20242+1: pax_force_retaddr_bts
20243+ ret
20244 CFI_ENDPROC
20245-END(save_paranoid)
20246+ENDPROC(save_paranoid)
20247 .popsection
20248
20249 /*
20250@@ -553,7 +840,7 @@ ENTRY(ret_from_fork)
20251
20252 RESTORE_REST
20253
20254- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
20255+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
20256 jz 1f
20257
20258 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
20259@@ -571,7 +858,7 @@ ENTRY(ret_from_fork)
20260 RESTORE_REST
20261 jmp int_ret_from_sys_call
20262 CFI_ENDPROC
20263-END(ret_from_fork)
20264+ENDPROC(ret_from_fork)
20265
20266 /*
20267 * System call entry. Up to 6 arguments in registers are supported.
20268@@ -608,7 +895,7 @@ END(ret_from_fork)
20269 ENTRY(system_call)
20270 CFI_STARTPROC simple
20271 CFI_SIGNAL_FRAME
20272- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
20273+ CFI_DEF_CFA rsp,0
20274 CFI_REGISTER rip,rcx
20275 /*CFI_REGISTER rflags,r11*/
20276 SWAPGS_UNSAFE_STACK
20277@@ -621,16 +908,23 @@ GLOBAL(system_call_after_swapgs)
20278
20279 movq %rsp,PER_CPU_VAR(old_rsp)
20280 movq PER_CPU_VAR(kernel_stack),%rsp
20281+ SAVE_ARGS 8*6,0
20282+ pax_enter_kernel_user
20283+
20284+#ifdef CONFIG_PAX_RANDKSTACK
20285+ pax_erase_kstack
20286+#endif
20287+
20288 /*
20289 * No need to follow this irqs off/on section - it's straight
20290 * and short:
20291 */
20292 ENABLE_INTERRUPTS(CLBR_NONE)
20293- SAVE_ARGS 8,0
20294 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
20295 movq %rcx,RIP-ARGOFFSET(%rsp)
20296 CFI_REL_OFFSET rip,RIP-ARGOFFSET
20297- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
20298+ GET_THREAD_INFO(%rcx)
20299+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
20300 jnz tracesys
20301 system_call_fastpath:
20302 #if __SYSCALL_MASK == ~0
20303@@ -640,7 +934,7 @@ system_call_fastpath:
20304 cmpl $__NR_syscall_max,%eax
20305 #endif
20306 ja badsys
20307- movq %r10,%rcx
20308+ movq R10-ARGOFFSET(%rsp),%rcx
20309 call *sys_call_table(,%rax,8) # XXX: rip relative
20310 movq %rax,RAX-ARGOFFSET(%rsp)
20311 /*
20312@@ -654,10 +948,13 @@ sysret_check:
20313 LOCKDEP_SYS_EXIT
20314 DISABLE_INTERRUPTS(CLBR_NONE)
20315 TRACE_IRQS_OFF
20316- movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx
20317+ GET_THREAD_INFO(%rcx)
20318+ movl TI_flags(%rcx),%edx
20319 andl %edi,%edx
20320 jnz sysret_careful
20321 CFI_REMEMBER_STATE
20322+ pax_exit_kernel_user
20323+ pax_erase_kstack
20324 /*
20325 * sysretq will re-enable interrupts:
20326 */
20327@@ -709,14 +1006,18 @@ badsys:
20328 * jump back to the normal fast path.
20329 */
20330 auditsys:
20331- movq %r10,%r9 /* 6th arg: 4th syscall arg */
20332+ movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
20333 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
20334 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
20335 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
20336 movq %rax,%rsi /* 2nd arg: syscall number */
20337 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
20338 call __audit_syscall_entry
20339+
20340+ pax_erase_kstack
20341+
20342 LOAD_ARGS 0 /* reload call-clobbered registers */
20343+ pax_set_fptr_mask
20344 jmp system_call_fastpath
20345
20346 /*
20347@@ -737,7 +1038,7 @@ sysret_audit:
20348 /* Do syscall tracing */
20349 tracesys:
20350 #ifdef CONFIG_AUDITSYSCALL
20351- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
20352+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
20353 jz auditsys
20354 #endif
20355 SAVE_REST
20356@@ -745,12 +1046,16 @@ tracesys:
20357 FIXUP_TOP_OF_STACK %rdi
20358 movq %rsp,%rdi
20359 call syscall_trace_enter
20360+
20361+ pax_erase_kstack
20362+
20363 /*
20364 * Reload arg registers from stack in case ptrace changed them.
20365 * We don't reload %rax because syscall_trace_enter() returned
20366 * the value it wants us to use in the table lookup.
20367 */
20368 LOAD_ARGS ARGOFFSET, 1
20369+ pax_set_fptr_mask
20370 RESTORE_REST
20371 #if __SYSCALL_MASK == ~0
20372 cmpq $__NR_syscall_max,%rax
20373@@ -759,7 +1064,7 @@ tracesys:
20374 cmpl $__NR_syscall_max,%eax
20375 #endif
20376 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
20377- movq %r10,%rcx /* fixup for C */
20378+ movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
20379 call *sys_call_table(,%rax,8)
20380 movq %rax,RAX-ARGOFFSET(%rsp)
20381 /* Use IRET because user could have changed frame */
20382@@ -780,7 +1085,9 @@ GLOBAL(int_with_check)
20383 andl %edi,%edx
20384 jnz int_careful
20385 andl $~TS_COMPAT,TI_status(%rcx)
20386- jmp retint_swapgs
20387+ pax_exit_kernel_user
20388+ pax_erase_kstack
20389+ jmp retint_swapgs_pax
20390
20391 /* Either reschedule or signal or syscall exit tracking needed. */
20392 /* First do a reschedule test. */
20393@@ -826,7 +1133,7 @@ int_restore_rest:
20394 TRACE_IRQS_OFF
20395 jmp int_with_check
20396 CFI_ENDPROC
20397-END(system_call)
20398+ENDPROC(system_call)
20399
20400 /*
20401 * Certain special system calls that need to save a complete full stack frame.
20402@@ -842,7 +1149,7 @@ ENTRY(\label)
20403 call \func
20404 jmp ptregscall_common
20405 CFI_ENDPROC
20406-END(\label)
20407+ENDPROC(\label)
20408 .endm
20409
20410 .macro FORK_LIKE func
20411@@ -856,9 +1163,10 @@ ENTRY(stub_\func)
20412 DEFAULT_FRAME 0 8 /* offset 8: return address */
20413 call sys_\func
20414 RESTORE_TOP_OF_STACK %r11, 8
20415+ pax_force_retaddr
20416 ret $REST_SKIP /* pop extended registers */
20417 CFI_ENDPROC
20418-END(stub_\func)
20419+ENDPROC(stub_\func)
20420 .endm
20421
20422 FORK_LIKE clone
20423@@ -875,9 +1183,10 @@ ENTRY(ptregscall_common)
20424 movq_cfi_restore R12+8, r12
20425 movq_cfi_restore RBP+8, rbp
20426 movq_cfi_restore RBX+8, rbx
20427+ pax_force_retaddr
20428 ret $REST_SKIP /* pop extended registers */
20429 CFI_ENDPROC
20430-END(ptregscall_common)
20431+ENDPROC(ptregscall_common)
20432
20433 ENTRY(stub_execve)
20434 CFI_STARTPROC
20435@@ -891,7 +1200,7 @@ ENTRY(stub_execve)
20436 RESTORE_REST
20437 jmp int_ret_from_sys_call
20438 CFI_ENDPROC
20439-END(stub_execve)
20440+ENDPROC(stub_execve)
20441
20442 /*
20443 * sigreturn is special because it needs to restore all registers on return.
20444@@ -909,7 +1218,7 @@ ENTRY(stub_rt_sigreturn)
20445 RESTORE_REST
20446 jmp int_ret_from_sys_call
20447 CFI_ENDPROC
20448-END(stub_rt_sigreturn)
20449+ENDPROC(stub_rt_sigreturn)
20450
20451 #ifdef CONFIG_X86_X32_ABI
20452 ENTRY(stub_x32_rt_sigreturn)
20453@@ -975,7 +1284,7 @@ vector=vector+1
20454 2: jmp common_interrupt
20455 .endr
20456 CFI_ENDPROC
20457-END(irq_entries_start)
20458+ENDPROC(irq_entries_start)
20459
20460 .previous
20461 END(interrupt)
20462@@ -995,6 +1304,16 @@ END(interrupt)
20463 subq $ORIG_RAX-RBP, %rsp
20464 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
20465 SAVE_ARGS_IRQ
20466+#ifdef CONFIG_PAX_MEMORY_UDEREF
20467+ testb $3, CS(%rdi)
20468+ jnz 1f
20469+ pax_enter_kernel
20470+ jmp 2f
20471+1: pax_enter_kernel_user
20472+2:
20473+#else
20474+ pax_enter_kernel
20475+#endif
20476 call \func
20477 .endm
20478
20479@@ -1027,7 +1346,7 @@ ret_from_intr:
20480
20481 exit_intr:
20482 GET_THREAD_INFO(%rcx)
20483- testl $3,CS-ARGOFFSET(%rsp)
20484+ testb $3,CS-ARGOFFSET(%rsp)
20485 je retint_kernel
20486
20487 /* Interrupt came from user space */
20488@@ -1049,12 +1368,16 @@ retint_swapgs: /* return to user-space */
20489 * The iretq could re-enable interrupts:
20490 */
20491 DISABLE_INTERRUPTS(CLBR_ANY)
20492+ pax_exit_kernel_user
20493+retint_swapgs_pax:
20494 TRACE_IRQS_IRETQ
20495 SWAPGS
20496 jmp restore_args
20497
20498 retint_restore_args: /* return to kernel space */
20499 DISABLE_INTERRUPTS(CLBR_ANY)
20500+ pax_exit_kernel
20501+ pax_force_retaddr (RIP-ARGOFFSET)
20502 /*
20503 * The iretq could re-enable interrupts:
20504 */
20505@@ -1137,7 +1460,7 @@ ENTRY(retint_kernel)
20506 #endif
20507
20508 CFI_ENDPROC
20509-END(common_interrupt)
20510+ENDPROC(common_interrupt)
20511 /*
20512 * End of kprobes section
20513 */
20514@@ -1155,7 +1478,7 @@ ENTRY(\sym)
20515 interrupt \do_sym
20516 jmp ret_from_intr
20517 CFI_ENDPROC
20518-END(\sym)
20519+ENDPROC(\sym)
20520 .endm
20521
20522 #ifdef CONFIG_SMP
20523@@ -1211,12 +1534,22 @@ ENTRY(\sym)
20524 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
20525 call error_entry
20526 DEFAULT_FRAME 0
20527+#ifdef CONFIG_PAX_MEMORY_UDEREF
20528+ testb $3, CS(%rsp)
20529+ jnz 1f
20530+ pax_enter_kernel
20531+ jmp 2f
20532+1: pax_enter_kernel_user
20533+2:
20534+#else
20535+ pax_enter_kernel
20536+#endif
20537 movq %rsp,%rdi /* pt_regs pointer */
20538 xorl %esi,%esi /* no error code */
20539 call \do_sym
20540 jmp error_exit /* %ebx: no swapgs flag */
20541 CFI_ENDPROC
20542-END(\sym)
20543+ENDPROC(\sym)
20544 .endm
20545
20546 .macro paranoidzeroentry sym do_sym
20547@@ -1229,15 +1562,25 @@ ENTRY(\sym)
20548 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
20549 call save_paranoid
20550 TRACE_IRQS_OFF
20551+#ifdef CONFIG_PAX_MEMORY_UDEREF
20552+ testb $3, CS(%rsp)
20553+ jnz 1f
20554+ pax_enter_kernel
20555+ jmp 2f
20556+1: pax_enter_kernel_user
20557+2:
20558+#else
20559+ pax_enter_kernel
20560+#endif
20561 movq %rsp,%rdi /* pt_regs pointer */
20562 xorl %esi,%esi /* no error code */
20563 call \do_sym
20564 jmp paranoid_exit /* %ebx: no swapgs flag */
20565 CFI_ENDPROC
20566-END(\sym)
20567+ENDPROC(\sym)
20568 .endm
20569
20570-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
20571+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
20572 .macro paranoidzeroentry_ist sym do_sym ist
20573 ENTRY(\sym)
20574 INTR_FRAME
20575@@ -1248,14 +1591,30 @@ ENTRY(\sym)
20576 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
20577 call save_paranoid
20578 TRACE_IRQS_OFF_DEBUG
20579+#ifdef CONFIG_PAX_MEMORY_UDEREF
20580+ testb $3, CS(%rsp)
20581+ jnz 1f
20582+ pax_enter_kernel
20583+ jmp 2f
20584+1: pax_enter_kernel_user
20585+2:
20586+#else
20587+ pax_enter_kernel
20588+#endif
20589 movq %rsp,%rdi /* pt_regs pointer */
20590 xorl %esi,%esi /* no error code */
20591+#ifdef CONFIG_SMP
20592+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
20593+ lea init_tss(%r12), %r12
20594+#else
20595+ lea init_tss(%rip), %r12
20596+#endif
20597 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
20598 call \do_sym
20599 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
20600 jmp paranoid_exit /* %ebx: no swapgs flag */
20601 CFI_ENDPROC
20602-END(\sym)
20603+ENDPROC(\sym)
20604 .endm
20605
20606 .macro errorentry sym do_sym
20607@@ -1267,13 +1626,23 @@ ENTRY(\sym)
20608 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
20609 call error_entry
20610 DEFAULT_FRAME 0
20611+#ifdef CONFIG_PAX_MEMORY_UDEREF
20612+ testb $3, CS(%rsp)
20613+ jnz 1f
20614+ pax_enter_kernel
20615+ jmp 2f
20616+1: pax_enter_kernel_user
20617+2:
20618+#else
20619+ pax_enter_kernel
20620+#endif
20621 movq %rsp,%rdi /* pt_regs pointer */
20622 movq ORIG_RAX(%rsp),%rsi /* get error code */
20623 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
20624 call \do_sym
20625 jmp error_exit /* %ebx: no swapgs flag */
20626 CFI_ENDPROC
20627-END(\sym)
20628+ENDPROC(\sym)
20629 .endm
20630
20631 /* error code is on the stack already */
20632@@ -1287,13 +1656,23 @@ ENTRY(\sym)
20633 call save_paranoid
20634 DEFAULT_FRAME 0
20635 TRACE_IRQS_OFF
20636+#ifdef CONFIG_PAX_MEMORY_UDEREF
20637+ testb $3, CS(%rsp)
20638+ jnz 1f
20639+ pax_enter_kernel
20640+ jmp 2f
20641+1: pax_enter_kernel_user
20642+2:
20643+#else
20644+ pax_enter_kernel
20645+#endif
20646 movq %rsp,%rdi /* pt_regs pointer */
20647 movq ORIG_RAX(%rsp),%rsi /* get error code */
20648 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
20649 call \do_sym
20650 jmp paranoid_exit /* %ebx: no swapgs flag */
20651 CFI_ENDPROC
20652-END(\sym)
20653+ENDPROC(\sym)
20654 .endm
20655
20656 zeroentry divide_error do_divide_error
20657@@ -1323,9 +1702,10 @@ gs_change:
20658 2: mfence /* workaround */
20659 SWAPGS
20660 popfq_cfi
20661+ pax_force_retaddr
20662 ret
20663 CFI_ENDPROC
20664-END(native_load_gs_index)
20665+ENDPROC(native_load_gs_index)
20666
20667 _ASM_EXTABLE(gs_change,bad_gs)
20668 .section .fixup,"ax"
20669@@ -1353,9 +1733,10 @@ ENTRY(call_softirq)
20670 CFI_DEF_CFA_REGISTER rsp
20671 CFI_ADJUST_CFA_OFFSET -8
20672 decl PER_CPU_VAR(irq_count)
20673+ pax_force_retaddr
20674 ret
20675 CFI_ENDPROC
20676-END(call_softirq)
20677+ENDPROC(call_softirq)
20678
20679 #ifdef CONFIG_XEN
20680 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
20681@@ -1393,7 +1774,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
20682 decl PER_CPU_VAR(irq_count)
20683 jmp error_exit
20684 CFI_ENDPROC
20685-END(xen_do_hypervisor_callback)
20686+ENDPROC(xen_do_hypervisor_callback)
20687
20688 /*
20689 * Hypervisor uses this for application faults while it executes.
20690@@ -1452,7 +1833,7 @@ ENTRY(xen_failsafe_callback)
20691 SAVE_ALL
20692 jmp error_exit
20693 CFI_ENDPROC
20694-END(xen_failsafe_callback)
20695+ENDPROC(xen_failsafe_callback)
20696
20697 apicinterrupt XEN_HVM_EVTCHN_CALLBACK \
20698 xen_hvm_callback_vector xen_evtchn_do_upcall
20699@@ -1501,16 +1882,31 @@ ENTRY(paranoid_exit)
20700 TRACE_IRQS_OFF_DEBUG
20701 testl %ebx,%ebx /* swapgs needed? */
20702 jnz paranoid_restore
20703- testl $3,CS(%rsp)
20704+ testb $3,CS(%rsp)
20705 jnz paranoid_userspace
20706+#ifdef CONFIG_PAX_MEMORY_UDEREF
20707+ pax_exit_kernel
20708+ TRACE_IRQS_IRETQ 0
20709+ SWAPGS_UNSAFE_STACK
20710+ RESTORE_ALL 8
20711+ pax_force_retaddr_bts
20712+ jmp irq_return
20713+#endif
20714 paranoid_swapgs:
20715+#ifdef CONFIG_PAX_MEMORY_UDEREF
20716+ pax_exit_kernel_user
20717+#else
20718+ pax_exit_kernel
20719+#endif
20720 TRACE_IRQS_IRETQ 0
20721 SWAPGS_UNSAFE_STACK
20722 RESTORE_ALL 8
20723 jmp irq_return
20724 paranoid_restore:
20725+ pax_exit_kernel
20726 TRACE_IRQS_IRETQ_DEBUG 0
20727 RESTORE_ALL 8
20728+ pax_force_retaddr_bts
20729 jmp irq_return
20730 paranoid_userspace:
20731 GET_THREAD_INFO(%rcx)
20732@@ -1539,7 +1935,7 @@ paranoid_schedule:
20733 TRACE_IRQS_OFF
20734 jmp paranoid_userspace
20735 CFI_ENDPROC
20736-END(paranoid_exit)
20737+ENDPROC(paranoid_exit)
20738
20739 /*
20740 * Exception entry point. This expects an error code/orig_rax on the stack.
20741@@ -1566,12 +1962,13 @@ ENTRY(error_entry)
20742 movq_cfi r14, R14+8
20743 movq_cfi r15, R15+8
20744 xorl %ebx,%ebx
20745- testl $3,CS+8(%rsp)
20746+ testb $3,CS+8(%rsp)
20747 je error_kernelspace
20748 error_swapgs:
20749 SWAPGS
20750 error_sti:
20751 TRACE_IRQS_OFF
20752+ pax_force_retaddr_bts
20753 ret
20754
20755 /*
20756@@ -1598,7 +1995,7 @@ bstep_iret:
20757 movq %rcx,RIP+8(%rsp)
20758 jmp error_swapgs
20759 CFI_ENDPROC
20760-END(error_entry)
20761+ENDPROC(error_entry)
20762
20763
20764 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
20765@@ -1618,7 +2015,7 @@ ENTRY(error_exit)
20766 jnz retint_careful
20767 jmp retint_swapgs
20768 CFI_ENDPROC
20769-END(error_exit)
20770+ENDPROC(error_exit)
20771
20772 /*
20773 * Test if a given stack is an NMI stack or not.
20774@@ -1676,9 +2073,11 @@ ENTRY(nmi)
20775 * If %cs was not the kernel segment, then the NMI triggered in user
20776 * space, which means it is definitely not nested.
20777 */
20778+ cmpl $__KERNEXEC_KERNEL_CS, 16(%rsp)
20779+ je 1f
20780 cmpl $__KERNEL_CS, 16(%rsp)
20781 jne first_nmi
20782-
20783+1:
20784 /*
20785 * Check the special variable on the stack to see if NMIs are
20786 * executing.
20787@@ -1712,8 +2111,7 @@ nested_nmi:
20788
20789 1:
20790 /* Set up the interrupted NMIs stack to jump to repeat_nmi */
20791- leaq -1*8(%rsp), %rdx
20792- movq %rdx, %rsp
20793+ subq $8, %rsp
20794 CFI_ADJUST_CFA_OFFSET 1*8
20795 leaq -10*8(%rsp), %rdx
20796 pushq_cfi $__KERNEL_DS
20797@@ -1731,6 +2129,7 @@ nested_nmi_out:
20798 CFI_RESTORE rdx
20799
20800 /* No need to check faults here */
20801+ pax_force_retaddr_bts
20802 INTERRUPT_RETURN
20803
20804 CFI_RESTORE_STATE
20805@@ -1847,6 +2246,17 @@ end_repeat_nmi:
20806 */
20807 movq %cr2, %r12
20808
20809+#ifdef CONFIG_PAX_MEMORY_UDEREF
20810+ testb $3, CS(%rsp)
20811+ jnz 1f
20812+ pax_enter_kernel
20813+ jmp 2f
20814+1: pax_enter_kernel_user
20815+2:
20816+#else
20817+ pax_enter_kernel
20818+#endif
20819+
20820 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
20821 movq %rsp,%rdi
20822 movq $-1,%rsi
20823@@ -1862,23 +2272,34 @@ end_repeat_nmi:
20824 testl %ebx,%ebx /* swapgs needed? */
20825 jnz nmi_restore
20826 nmi_swapgs:
20827+#ifdef CONFIG_PAX_MEMORY_UDEREF
20828+ pax_exit_kernel_user
20829+#else
20830+ pax_exit_kernel
20831+#endif
20832 SWAPGS_UNSAFE_STACK
20833+ RESTORE_ALL 6*8
20834+ /* Clear the NMI executing stack variable */
20835+ movq $0, 5*8(%rsp)
20836+ jmp irq_return
20837 nmi_restore:
20838+ pax_exit_kernel
20839 /* Pop the extra iret frame at once */
20840 RESTORE_ALL 6*8
20841+ pax_force_retaddr_bts
20842
20843 /* Clear the NMI executing stack variable */
20844 movq $0, 5*8(%rsp)
20845 jmp irq_return
20846 CFI_ENDPROC
20847-END(nmi)
20848+ENDPROC(nmi)
20849
20850 ENTRY(ignore_sysret)
20851 CFI_STARTPROC
20852 mov $-ENOSYS,%eax
20853 sysret
20854 CFI_ENDPROC
20855-END(ignore_sysret)
20856+ENDPROC(ignore_sysret)
20857
20858 /*
20859 * End of kprobes section
20860diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
20861index 1d41402..af9a46a 100644
20862--- a/arch/x86/kernel/ftrace.c
20863+++ b/arch/x86/kernel/ftrace.c
20864@@ -105,6 +105,8 @@ ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
20865 {
20866 unsigned char replaced[MCOUNT_INSN_SIZE];
20867
20868+ ip = ktla_ktva(ip);
20869+
20870 /*
20871 * Note: Due to modules and __init, code can
20872 * disappear and change, we need to protect against faulting
20873@@ -227,7 +229,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
20874 unsigned char old[MCOUNT_INSN_SIZE], *new;
20875 int ret;
20876
20877- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
20878+ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
20879 new = ftrace_call_replace(ip, (unsigned long)func);
20880
20881 /* See comment above by declaration of modifying_ftrace_code */
20882@@ -238,7 +240,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
20883 /* Also update the regs callback function */
20884 if (!ret) {
20885 ip = (unsigned long)(&ftrace_regs_call);
20886- memcpy(old, &ftrace_regs_call, MCOUNT_INSN_SIZE);
20887+ memcpy(old, ktla_ktva((void *)&ftrace_regs_call), MCOUNT_INSN_SIZE);
20888 new = ftrace_call_replace(ip, (unsigned long)func);
20889 ret = ftrace_modify_code(ip, old, new);
20890 }
20891@@ -279,7 +281,7 @@ static int ftrace_write(unsigned long ip, const char *val, int size)
20892 * kernel identity mapping to modify code.
20893 */
20894 if (within(ip, (unsigned long)_text, (unsigned long)_etext))
20895- ip = (unsigned long)__va(__pa(ip));
20896+ ip = (unsigned long)__va(__pa(ktla_ktva(ip)));
20897
20898 return probe_kernel_write((void *)ip, val, size);
20899 }
20900@@ -289,7 +291,7 @@ static int add_break(unsigned long ip, const char *old)
20901 unsigned char replaced[MCOUNT_INSN_SIZE];
20902 unsigned char brk = BREAKPOINT_INSTRUCTION;
20903
20904- if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
20905+ if (probe_kernel_read(replaced, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE))
20906 return -EFAULT;
20907
20908 /* Make sure it is what we expect it to be */
20909@@ -637,7 +639,7 @@ ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
20910 return ret;
20911
20912 fail_update:
20913- probe_kernel_write((void *)ip, &old_code[0], 1);
20914+ probe_kernel_write((void *)ktla_ktva(ip), &old_code[0], 1);
20915 goto out;
20916 }
20917
20918@@ -670,6 +672,8 @@ static int ftrace_mod_jmp(unsigned long ip,
20919 {
20920 unsigned char code[MCOUNT_INSN_SIZE];
20921
20922+ ip = ktla_ktva(ip);
20923+
20924 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
20925 return -EFAULT;
20926
20927diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
20928index c18f59d..9c0c9f6 100644
20929--- a/arch/x86/kernel/head32.c
20930+++ b/arch/x86/kernel/head32.c
20931@@ -18,6 +18,7 @@
20932 #include <asm/io_apic.h>
20933 #include <asm/bios_ebda.h>
20934 #include <asm/tlbflush.h>
20935+#include <asm/boot.h>
20936
20937 static void __init i386_default_early_setup(void)
20938 {
20939@@ -30,8 +31,7 @@ static void __init i386_default_early_setup(void)
20940
20941 void __init i386_start_kernel(void)
20942 {
20943- memblock_reserve(__pa_symbol(&_text),
20944- __pa_symbol(&__bss_stop) - __pa_symbol(&_text));
20945+ memblock_reserve(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop) - LOAD_PHYSICAL_ADDR);
20946
20947 #ifdef CONFIG_BLK_DEV_INITRD
20948 /* Reserve INITRD */
20949diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
20950index c8932c7..d56b622 100644
20951--- a/arch/x86/kernel/head_32.S
20952+++ b/arch/x86/kernel/head_32.S
20953@@ -26,6 +26,12 @@
20954 /* Physical address */
20955 #define pa(X) ((X) - __PAGE_OFFSET)
20956
20957+#ifdef CONFIG_PAX_KERNEXEC
20958+#define ta(X) (X)
20959+#else
20960+#define ta(X) ((X) - __PAGE_OFFSET)
20961+#endif
20962+
20963 /*
20964 * References to members of the new_cpu_data structure.
20965 */
20966@@ -55,11 +61,7 @@
20967 * and small than max_low_pfn, otherwise will waste some page table entries
20968 */
20969
20970-#if PTRS_PER_PMD > 1
20971-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
20972-#else
20973-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
20974-#endif
20975+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
20976
20977 /* Number of possible pages in the lowmem region */
20978 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
20979@@ -78,6 +80,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
20980 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
20981
20982 /*
20983+ * Real beginning of normal "text" segment
20984+ */
20985+ENTRY(stext)
20986+ENTRY(_stext)
20987+
20988+/*
20989 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
20990 * %esi points to the real-mode code as a 32-bit pointer.
20991 * CS and DS must be 4 GB flat segments, but we don't depend on
20992@@ -85,6 +93,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
20993 * can.
20994 */
20995 __HEAD
20996+
20997+#ifdef CONFIG_PAX_KERNEXEC
20998+ jmp startup_32
20999+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
21000+.fill PAGE_SIZE-5,1,0xcc
21001+#endif
21002+
21003 ENTRY(startup_32)
21004 movl pa(stack_start),%ecx
21005
21006@@ -106,6 +121,59 @@ ENTRY(startup_32)
21007 2:
21008 leal -__PAGE_OFFSET(%ecx),%esp
21009
21010+#ifdef CONFIG_SMP
21011+ movl $pa(cpu_gdt_table),%edi
21012+ movl $__per_cpu_load,%eax
21013+ movw %ax,GDT_ENTRY_PERCPU * 8 + 2(%edi)
21014+ rorl $16,%eax
21015+ movb %al,GDT_ENTRY_PERCPU * 8 + 4(%edi)
21016+ movb %ah,GDT_ENTRY_PERCPU * 8 + 7(%edi)
21017+ movl $__per_cpu_end - 1,%eax
21018+ subl $__per_cpu_start,%eax
21019+ movw %ax,GDT_ENTRY_PERCPU * 8 + 0(%edi)
21020+#endif
21021+
21022+#ifdef CONFIG_PAX_MEMORY_UDEREF
21023+ movl $NR_CPUS,%ecx
21024+ movl $pa(cpu_gdt_table),%edi
21025+1:
21026+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
21027+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
21028+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
21029+ addl $PAGE_SIZE_asm,%edi
21030+ loop 1b
21031+#endif
21032+
21033+#ifdef CONFIG_PAX_KERNEXEC
21034+ movl $pa(boot_gdt),%edi
21035+ movl $__LOAD_PHYSICAL_ADDR,%eax
21036+ movw %ax,GDT_ENTRY_BOOT_CS * 8 + 2(%edi)
21037+ rorl $16,%eax
21038+ movb %al,GDT_ENTRY_BOOT_CS * 8 + 4(%edi)
21039+ movb %ah,GDT_ENTRY_BOOT_CS * 8 + 7(%edi)
21040+ rorl $16,%eax
21041+
21042+ ljmp $(__BOOT_CS),$1f
21043+1:
21044+
21045+ movl $NR_CPUS,%ecx
21046+ movl $pa(cpu_gdt_table),%edi
21047+ addl $__PAGE_OFFSET,%eax
21048+1:
21049+ movb $0xc0,GDT_ENTRY_KERNEL_CS * 8 + 6(%edi)
21050+ movb $0xc0,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 6(%edi)
21051+ movw %ax,GDT_ENTRY_KERNEL_CS * 8 + 2(%edi)
21052+ movw %ax,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 2(%edi)
21053+ rorl $16,%eax
21054+ movb %al,GDT_ENTRY_KERNEL_CS * 8 + 4(%edi)
21055+ movb %al,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 4(%edi)
21056+ movb %ah,GDT_ENTRY_KERNEL_CS * 8 + 7(%edi)
21057+ movb %ah,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 7(%edi)
21058+ rorl $16,%eax
21059+ addl $PAGE_SIZE_asm,%edi
21060+ loop 1b
21061+#endif
21062+
21063 /*
21064 * Clear BSS first so that there are no surprises...
21065 */
21066@@ -196,8 +264,11 @@ ENTRY(startup_32)
21067 movl %eax, pa(max_pfn_mapped)
21068
21069 /* Do early initialization of the fixmap area */
21070- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
21071- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
21072+#ifdef CONFIG_COMPAT_VDSO
21073+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
21074+#else
21075+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
21076+#endif
21077 #else /* Not PAE */
21078
21079 page_pde_offset = (__PAGE_OFFSET >> 20);
21080@@ -227,8 +298,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
21081 movl %eax, pa(max_pfn_mapped)
21082
21083 /* Do early initialization of the fixmap area */
21084- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
21085- movl %eax,pa(initial_page_table+0xffc)
21086+#ifdef CONFIG_COMPAT_VDSO
21087+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
21088+#else
21089+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
21090+#endif
21091 #endif
21092
21093 #ifdef CONFIG_PARAVIRT
21094@@ -242,9 +316,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
21095 cmpl $num_subarch_entries, %eax
21096 jae bad_subarch
21097
21098- movl pa(subarch_entries)(,%eax,4), %eax
21099- subl $__PAGE_OFFSET, %eax
21100- jmp *%eax
21101+ jmp *pa(subarch_entries)(,%eax,4)
21102
21103 bad_subarch:
21104 WEAK(lguest_entry)
21105@@ -256,10 +328,10 @@ WEAK(xen_entry)
21106 __INITDATA
21107
21108 subarch_entries:
21109- .long default_entry /* normal x86/PC */
21110- .long lguest_entry /* lguest hypervisor */
21111- .long xen_entry /* Xen hypervisor */
21112- .long default_entry /* Moorestown MID */
21113+ .long ta(default_entry) /* normal x86/PC */
21114+ .long ta(lguest_entry) /* lguest hypervisor */
21115+ .long ta(xen_entry) /* Xen hypervisor */
21116+ .long ta(default_entry) /* Moorestown MID */
21117 num_subarch_entries = (. - subarch_entries) / 4
21118 .previous
21119 #else
21120@@ -335,6 +407,7 @@ default_entry:
21121 movl pa(mmu_cr4_features),%eax
21122 movl %eax,%cr4
21123
21124+#ifdef CONFIG_X86_PAE
21125 testb $X86_CR4_PAE, %al # check if PAE is enabled
21126 jz 6f
21127
21128@@ -363,6 +436,9 @@ default_entry:
21129 /* Make changes effective */
21130 wrmsr
21131
21132+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
21133+#endif
21134+
21135 6:
21136
21137 /*
21138@@ -460,14 +536,20 @@ is386: movl $2,%ecx # set MP
21139 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
21140 movl %eax,%ss # after changing gdt.
21141
21142- movl $(__USER_DS),%eax # DS/ES contains default USER segment
21143+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
21144 movl %eax,%ds
21145 movl %eax,%es
21146
21147 movl $(__KERNEL_PERCPU), %eax
21148 movl %eax,%fs # set this cpu's percpu
21149
21150+#ifdef CONFIG_CC_STACKPROTECTOR
21151 movl $(__KERNEL_STACK_CANARY),%eax
21152+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
21153+ movl $(__USER_DS),%eax
21154+#else
21155+ xorl %eax,%eax
21156+#endif
21157 movl %eax,%gs
21158
21159 xorl %eax,%eax # Clear LDT
21160@@ -544,8 +626,11 @@ setup_once:
21161 * relocation. Manually set base address in stack canary
21162 * segment descriptor.
21163 */
21164- movl $gdt_page,%eax
21165+ movl $cpu_gdt_table,%eax
21166 movl $stack_canary,%ecx
21167+#ifdef CONFIG_SMP
21168+ addl $__per_cpu_load,%ecx
21169+#endif
21170 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
21171 shrl $16, %ecx
21172 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
21173@@ -576,7 +661,7 @@ ENDPROC(early_idt_handlers)
21174 /* This is global to keep gas from relaxing the jumps */
21175 ENTRY(early_idt_handler)
21176 cld
21177- cmpl $2,%ss:early_recursion_flag
21178+ cmpl $1,%ss:early_recursion_flag
21179 je hlt_loop
21180 incl %ss:early_recursion_flag
21181
21182@@ -614,8 +699,8 @@ ENTRY(early_idt_handler)
21183 pushl (20+6*4)(%esp) /* trapno */
21184 pushl $fault_msg
21185 call printk
21186-#endif
21187 call dump_stack
21188+#endif
21189 hlt_loop:
21190 hlt
21191 jmp hlt_loop
21192@@ -634,8 +719,11 @@ ENDPROC(early_idt_handler)
21193 /* This is the default interrupt "handler" :-) */
21194 ALIGN
21195 ignore_int:
21196- cld
21197 #ifdef CONFIG_PRINTK
21198+ cmpl $2,%ss:early_recursion_flag
21199+ je hlt_loop
21200+ incl %ss:early_recursion_flag
21201+ cld
21202 pushl %eax
21203 pushl %ecx
21204 pushl %edx
21205@@ -644,9 +732,6 @@ ignore_int:
21206 movl $(__KERNEL_DS),%eax
21207 movl %eax,%ds
21208 movl %eax,%es
21209- cmpl $2,early_recursion_flag
21210- je hlt_loop
21211- incl early_recursion_flag
21212 pushl 16(%esp)
21213 pushl 24(%esp)
21214 pushl 32(%esp)
21215@@ -680,29 +765,43 @@ ENTRY(setup_once_ref)
21216 /*
21217 * BSS section
21218 */
21219-__PAGE_ALIGNED_BSS
21220- .align PAGE_SIZE
21221 #ifdef CONFIG_X86_PAE
21222+.section .initial_pg_pmd,"a",@progbits
21223 initial_pg_pmd:
21224 .fill 1024*KPMDS,4,0
21225 #else
21226+.section .initial_page_table,"a",@progbits
21227 ENTRY(initial_page_table)
21228 .fill 1024,4,0
21229 #endif
21230+.section .initial_pg_fixmap,"a",@progbits
21231 initial_pg_fixmap:
21232 .fill 1024,4,0
21233+.section .empty_zero_page,"a",@progbits
21234 ENTRY(empty_zero_page)
21235 .fill 4096,1,0
21236+.section .swapper_pg_dir,"a",@progbits
21237 ENTRY(swapper_pg_dir)
21238+#ifdef CONFIG_X86_PAE
21239+ .fill 4,8,0
21240+#else
21241 .fill 1024,4,0
21242+#endif
21243+
21244+/*
21245+ * The IDT has to be page-aligned to simplify the Pentium
21246+ * F0 0F bug workaround.. We have a special link segment
21247+ * for this.
21248+ */
21249+.section .idt,"a",@progbits
21250+ENTRY(idt_table)
21251+ .fill 256,8,0
21252
21253 /*
21254 * This starts the data section.
21255 */
21256 #ifdef CONFIG_X86_PAE
21257-__PAGE_ALIGNED_DATA
21258- /* Page-aligned for the benefit of paravirt? */
21259- .align PAGE_SIZE
21260+.section .initial_page_table,"a",@progbits
21261 ENTRY(initial_page_table)
21262 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
21263 # if KPMDS == 3
21264@@ -721,12 +820,20 @@ ENTRY(initial_page_table)
21265 # error "Kernel PMDs should be 1, 2 or 3"
21266 # endif
21267 .align PAGE_SIZE /* needs to be page-sized too */
21268+
21269+#ifdef CONFIG_PAX_PER_CPU_PGD
21270+ENTRY(cpu_pgd)
21271+ .rept NR_CPUS
21272+ .fill 4,8,0
21273+ .endr
21274+#endif
21275+
21276 #endif
21277
21278 .data
21279 .balign 4
21280 ENTRY(stack_start)
21281- .long init_thread_union+THREAD_SIZE
21282+ .long init_thread_union+THREAD_SIZE-8
21283
21284 __INITRODATA
21285 int_msg:
21286@@ -754,7 +861,7 @@ fault_msg:
21287 * segment size, and 32-bit linear address value:
21288 */
21289
21290- .data
21291+.section .rodata,"a",@progbits
21292 .globl boot_gdt_descr
21293 .globl idt_descr
21294
21295@@ -763,7 +870,7 @@ fault_msg:
21296 .word 0 # 32 bit align gdt_desc.address
21297 boot_gdt_descr:
21298 .word __BOOT_DS+7
21299- .long boot_gdt - __PAGE_OFFSET
21300+ .long pa(boot_gdt)
21301
21302 .word 0 # 32-bit align idt_desc.address
21303 idt_descr:
21304@@ -774,7 +881,7 @@ idt_descr:
21305 .word 0 # 32 bit align gdt_desc.address
21306 ENTRY(early_gdt_descr)
21307 .word GDT_ENTRIES*8-1
21308- .long gdt_page /* Overwritten for secondary CPUs */
21309+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
21310
21311 /*
21312 * The boot_gdt must mirror the equivalent in setup.S and is
21313@@ -783,5 +890,65 @@ ENTRY(early_gdt_descr)
21314 .align L1_CACHE_BYTES
21315 ENTRY(boot_gdt)
21316 .fill GDT_ENTRY_BOOT_CS,8,0
21317- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
21318- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
21319+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
21320+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
21321+
21322+ .align PAGE_SIZE_asm
21323+ENTRY(cpu_gdt_table)
21324+ .rept NR_CPUS
21325+ .quad 0x0000000000000000 /* NULL descriptor */
21326+ .quad 0x0000000000000000 /* 0x0b reserved */
21327+ .quad 0x0000000000000000 /* 0x13 reserved */
21328+ .quad 0x0000000000000000 /* 0x1b reserved */
21329+
21330+#ifdef CONFIG_PAX_KERNEXEC
21331+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
21332+#else
21333+ .quad 0x0000000000000000 /* 0x20 unused */
21334+#endif
21335+
21336+ .quad 0x0000000000000000 /* 0x28 unused */
21337+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
21338+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
21339+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
21340+ .quad 0x0000000000000000 /* 0x4b reserved */
21341+ .quad 0x0000000000000000 /* 0x53 reserved */
21342+ .quad 0x0000000000000000 /* 0x5b reserved */
21343+
21344+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
21345+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
21346+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
21347+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
21348+
21349+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
21350+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
21351+
21352+ /*
21353+ * Segments used for calling PnP BIOS have byte granularity.
21354+ * The code segments and data segments have fixed 64k limits,
21355+ * the transfer segment sizes are set at run time.
21356+ */
21357+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
21358+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
21359+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
21360+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
21361+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
21362+
21363+ /*
21364+ * The APM segments have byte granularity and their bases
21365+ * are set at run time. All have 64k limits.
21366+ */
21367+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
21368+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
21369+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
21370+
21371+ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
21372+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
21373+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
21374+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
21375+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
21376+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
21377+
21378+ /* Be sure this is zeroed to avoid false validations in Xen */
21379+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
21380+ .endr
21381diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
21382index 980053c..74d3b44 100644
21383--- a/arch/x86/kernel/head_64.S
21384+++ b/arch/x86/kernel/head_64.S
21385@@ -20,6 +20,8 @@
21386 #include <asm/processor-flags.h>
21387 #include <asm/percpu.h>
21388 #include <asm/nops.h>
21389+#include <asm/cpufeature.h>
21390+#include <asm/alternative-asm.h>
21391
21392 #ifdef CONFIG_PARAVIRT
21393 #include <asm/asm-offsets.h>
21394@@ -41,6 +43,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
21395 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
21396 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
21397 L3_START_KERNEL = pud_index(__START_KERNEL_map)
21398+L4_VMALLOC_START = pgd_index(VMALLOC_START)
21399+L3_VMALLOC_START = pud_index(VMALLOC_START)
21400+L4_VMALLOC_END = pgd_index(VMALLOC_END)
21401+L3_VMALLOC_END = pud_index(VMALLOC_END)
21402+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
21403+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
21404
21405 .text
21406 __HEAD
21407@@ -88,35 +96,23 @@ startup_64:
21408 */
21409 addq %rbp, init_level4_pgt + 0(%rip)
21410 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
21411+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
21412+ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
21413+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
21414 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
21415
21416 addq %rbp, level3_ident_pgt + 0(%rip)
21417+#ifndef CONFIG_XEN
21418+ addq %rbp, level3_ident_pgt + 8(%rip)
21419+#endif
21420
21421- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
21422- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
21423+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
21424+
21425+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
21426+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
21427
21428 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
21429-
21430- /* Add an Identity mapping if I am above 1G */
21431- leaq _text(%rip), %rdi
21432- andq $PMD_PAGE_MASK, %rdi
21433-
21434- movq %rdi, %rax
21435- shrq $PUD_SHIFT, %rax
21436- andq $(PTRS_PER_PUD - 1), %rax
21437- jz ident_complete
21438-
21439- leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
21440- leaq level3_ident_pgt(%rip), %rbx
21441- movq %rdx, 0(%rbx, %rax, 8)
21442-
21443- movq %rdi, %rax
21444- shrq $PMD_SHIFT, %rax
21445- andq $(PTRS_PER_PMD - 1), %rax
21446- leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
21447- leaq level2_spare_pgt(%rip), %rbx
21448- movq %rdx, 0(%rbx, %rax, 8)
21449-ident_complete:
21450+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
21451
21452 /*
21453 * Fixup the kernel text+data virtual addresses. Note that
21454@@ -159,8 +155,8 @@ ENTRY(secondary_startup_64)
21455 * after the boot processor executes this code.
21456 */
21457
21458- /* Enable PAE mode and PGE */
21459- movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
21460+ /* Enable PAE mode and PSE/PGE */
21461+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
21462 movq %rax, %cr4
21463
21464 /* Setup early boot stage 4 level pagetables. */
21465@@ -182,9 +178,17 @@ ENTRY(secondary_startup_64)
21466 movl $MSR_EFER, %ecx
21467 rdmsr
21468 btsl $_EFER_SCE, %eax /* Enable System Call */
21469- btl $20,%edi /* No Execute supported? */
21470+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
21471 jnc 1f
21472 btsl $_EFER_NX, %eax
21473+ leaq init_level4_pgt(%rip), %rdi
21474+#ifndef CONFIG_EFI
21475+ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
21476+#endif
21477+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
21478+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
21479+ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
21480+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
21481 1: wrmsr /* Make changes effective */
21482
21483 /* Setup cr0 */
21484@@ -246,6 +250,7 @@ ENTRY(secondary_startup_64)
21485 * jump. In addition we need to ensure %cs is set so we make this
21486 * a far return.
21487 */
21488+ pax_set_fptr_mask
21489 movq initial_code(%rip),%rax
21490 pushq $0 # fake return address to stop unwinder
21491 pushq $__KERNEL_CS # set correct cs
21492@@ -284,7 +289,7 @@ ENDPROC(start_cpu0)
21493 bad_address:
21494 jmp bad_address
21495
21496- .section ".init.text","ax"
21497+ __INIT
21498 .globl early_idt_handlers
21499 early_idt_handlers:
21500 # 104(%rsp) %rflags
21501@@ -343,7 +348,7 @@ ENTRY(early_idt_handler)
21502 call dump_stack
21503 #ifdef CONFIG_KALLSYMS
21504 leaq early_idt_ripmsg(%rip),%rdi
21505- movq 40(%rsp),%rsi # %rip again
21506+ movq 88(%rsp),%rsi # %rip again
21507 call __print_symbol
21508 #endif
21509 #endif /* EARLY_PRINTK */
21510@@ -363,11 +368,15 @@ ENTRY(early_idt_handler)
21511 addq $16,%rsp # drop vector number and error code
21512 decl early_recursion_flag(%rip)
21513 INTERRUPT_RETURN
21514+ .previous
21515
21516+ __INITDATA
21517 .balign 4
21518 early_recursion_flag:
21519 .long 0
21520+ .previous
21521
21522+ .section .rodata,"a",@progbits
21523 #ifdef CONFIG_EARLY_PRINTK
21524 early_idt_msg:
21525 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
21526@@ -376,6 +385,7 @@ early_idt_ripmsg:
21527 #endif /* CONFIG_EARLY_PRINTK */
21528 .previous
21529
21530+ .section .rodata,"a",@progbits
21531 #define NEXT_PAGE(name) \
21532 .balign PAGE_SIZE; \
21533 ENTRY(name)
21534@@ -388,7 +398,6 @@ ENTRY(name)
21535 i = i + 1 ; \
21536 .endr
21537
21538- .data
21539 /*
21540 * This default setting generates an ident mapping at address 0x100000
21541 * and a mapping for the kernel that precisely maps virtual address
21542@@ -399,13 +408,41 @@ NEXT_PAGE(init_level4_pgt)
21543 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
21544 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
21545 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
21546+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
21547+ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
21548+ .org init_level4_pgt + L4_VMALLOC_END*8, 0
21549+ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
21550+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
21551+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
21552 .org init_level4_pgt + L4_START_KERNEL*8, 0
21553 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
21554 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
21555
21556+#ifdef CONFIG_PAX_PER_CPU_PGD
21557+NEXT_PAGE(cpu_pgd)
21558+ .rept NR_CPUS
21559+ .fill 512,8,0
21560+ .endr
21561+#endif
21562+
21563 NEXT_PAGE(level3_ident_pgt)
21564 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
21565+#ifdef CONFIG_XEN
21566 .fill 511,8,0
21567+#else
21568+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
21569+ .fill 510,8,0
21570+#endif
21571+
21572+NEXT_PAGE(level3_vmalloc_start_pgt)
21573+ .fill 512,8,0
21574+
21575+NEXT_PAGE(level3_vmalloc_end_pgt)
21576+ .fill 512,8,0
21577+
21578+NEXT_PAGE(level3_vmemmap_pgt)
21579+ .fill L3_VMEMMAP_START,8,0
21580+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
21581
21582 NEXT_PAGE(level3_kernel_pgt)
21583 .fill L3_START_KERNEL,8,0
21584@@ -413,20 +450,23 @@ NEXT_PAGE(level3_kernel_pgt)
21585 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
21586 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
21587
21588+NEXT_PAGE(level2_vmemmap_pgt)
21589+ .fill 512,8,0
21590+
21591 NEXT_PAGE(level2_fixmap_pgt)
21592- .fill 506,8,0
21593- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
21594- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
21595- .fill 5,8,0
21596+ .fill 507,8,0
21597+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
21598+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
21599+ .fill 4,8,0
21600
21601-NEXT_PAGE(level1_fixmap_pgt)
21602+NEXT_PAGE(level1_vsyscall_pgt)
21603 .fill 512,8,0
21604
21605-NEXT_PAGE(level2_ident_pgt)
21606- /* Since I easily can, map the first 1G.
21607+ /* Since I easily can, map the first 2G.
21608 * Don't set NX because code runs from these pages.
21609 */
21610- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
21611+NEXT_PAGE(level2_ident_pgt)
21612+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
21613
21614 NEXT_PAGE(level2_kernel_pgt)
21615 /*
21616@@ -439,37 +479,59 @@ NEXT_PAGE(level2_kernel_pgt)
21617 * If you want to increase this then increase MODULES_VADDR
21618 * too.)
21619 */
21620- PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
21621- KERNEL_IMAGE_SIZE/PMD_SIZE)
21622-
21623-NEXT_PAGE(level2_spare_pgt)
21624- .fill 512, 8, 0
21625+ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
21626
21627 #undef PMDS
21628 #undef NEXT_PAGE
21629
21630- .data
21631+ .align PAGE_SIZE
21632+ENTRY(cpu_gdt_table)
21633+ .rept NR_CPUS
21634+ .quad 0x0000000000000000 /* NULL descriptor */
21635+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
21636+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
21637+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
21638+ .quad 0x00cffb000000ffff /* __USER32_CS */
21639+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
21640+ .quad 0x00affb000000ffff /* __USER_CS */
21641+
21642+#ifdef CONFIG_PAX_KERNEXEC
21643+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
21644+#else
21645+ .quad 0x0 /* unused */
21646+#endif
21647+
21648+ .quad 0,0 /* TSS */
21649+ .quad 0,0 /* LDT */
21650+ .quad 0,0,0 /* three TLS descriptors */
21651+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
21652+ /* asm/segment.h:GDT_ENTRIES must match this */
21653+
21654+ /* zero the remaining page */
21655+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
21656+ .endr
21657+
21658 .align 16
21659 .globl early_gdt_descr
21660 early_gdt_descr:
21661 .word GDT_ENTRIES*8-1
21662 early_gdt_descr_base:
21663- .quad INIT_PER_CPU_VAR(gdt_page)
21664+ .quad cpu_gdt_table
21665
21666 ENTRY(phys_base)
21667 /* This must match the first entry in level2_kernel_pgt */
21668 .quad 0x0000000000000000
21669
21670 #include "../../x86/xen/xen-head.S"
21671-
21672- .section .bss, "aw", @nobits
21673+
21674+ .section .rodata,"a",@progbits
21675 .align L1_CACHE_BYTES
21676 ENTRY(idt_table)
21677- .skip IDT_ENTRIES * 16
21678+ .fill 512,8,0
21679
21680 .align L1_CACHE_BYTES
21681 ENTRY(nmi_idt_table)
21682- .skip IDT_ENTRIES * 16
21683+ .fill 512,8,0
21684
21685 __PAGE_ALIGNED_BSS
21686 .align PAGE_SIZE
21687diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
21688index 9c3bd4a..e1d9b35 100644
21689--- a/arch/x86/kernel/i386_ksyms_32.c
21690+++ b/arch/x86/kernel/i386_ksyms_32.c
21691@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
21692 EXPORT_SYMBOL(cmpxchg8b_emu);
21693 #endif
21694
21695+EXPORT_SYMBOL_GPL(cpu_gdt_table);
21696+
21697 /* Networking helper routines. */
21698 EXPORT_SYMBOL(csum_partial_copy_generic);
21699+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
21700+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
21701
21702 EXPORT_SYMBOL(__get_user_1);
21703 EXPORT_SYMBOL(__get_user_2);
21704@@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
21705
21706 EXPORT_SYMBOL(csum_partial);
21707 EXPORT_SYMBOL(empty_zero_page);
21708+
21709+#ifdef CONFIG_PAX_KERNEXEC
21710+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
21711+#endif
21712diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
21713index 245a71d..89d9ce4 100644
21714--- a/arch/x86/kernel/i387.c
21715+++ b/arch/x86/kernel/i387.c
21716@@ -55,7 +55,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
21717 static inline bool interrupted_user_mode(void)
21718 {
21719 struct pt_regs *regs = get_irq_regs();
21720- return regs && user_mode_vm(regs);
21721+ return regs && user_mode(regs);
21722 }
21723
21724 /*
21725diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
21726index 9a5c460..84868423 100644
21727--- a/arch/x86/kernel/i8259.c
21728+++ b/arch/x86/kernel/i8259.c
21729@@ -110,7 +110,7 @@ static int i8259A_irq_pending(unsigned int irq)
21730 static void make_8259A_irq(unsigned int irq)
21731 {
21732 disable_irq_nosync(irq);
21733- io_apic_irqs &= ~(1<<irq);
21734+ io_apic_irqs &= ~(1UL<<irq);
21735 irq_set_chip_and_handler_name(irq, &i8259A_chip, handle_level_irq,
21736 i8259A_chip.name);
21737 enable_irq(irq);
21738@@ -209,7 +209,7 @@ spurious_8259A_irq:
21739 "spurious 8259A interrupt: IRQ%d.\n", irq);
21740 spurious_irq_mask |= irqmask;
21741 }
21742- atomic_inc(&irq_err_count);
21743+ atomic_inc_unchecked(&irq_err_count);
21744 /*
21745 * Theoretically we do not have to handle this IRQ,
21746 * but in Linux this does not cause problems and is
21747@@ -333,14 +333,16 @@ static void init_8259A(int auto_eoi)
21748 /* (slave's support for AEOI in flat mode is to be investigated) */
21749 outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR);
21750
21751+ pax_open_kernel();
21752 if (auto_eoi)
21753 /*
21754 * In AEOI mode we just have to mask the interrupt
21755 * when acking.
21756 */
21757- i8259A_chip.irq_mask_ack = disable_8259A_irq;
21758+ *(void **)&i8259A_chip.irq_mask_ack = disable_8259A_irq;
21759 else
21760- i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
21761+ *(void **)&i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
21762+ pax_close_kernel();
21763
21764 udelay(100); /* wait for 8259A to initialize */
21765
21766diff --git a/arch/x86/kernel/io_delay.c b/arch/x86/kernel/io_delay.c
21767index a979b5b..1d6db75 100644
21768--- a/arch/x86/kernel/io_delay.c
21769+++ b/arch/x86/kernel/io_delay.c
21770@@ -58,7 +58,7 @@ static int __init dmi_io_delay_0xed_port(const struct dmi_system_id *id)
21771 * Quirk table for systems that misbehave (lock up, etc.) if port
21772 * 0x80 is used:
21773 */
21774-static struct dmi_system_id __initdata io_delay_0xed_port_dmi_table[] = {
21775+static const struct dmi_system_id __initconst io_delay_0xed_port_dmi_table[] = {
21776 {
21777 .callback = dmi_io_delay_0xed_port,
21778 .ident = "Compaq Presario V6000",
21779diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
21780index 8c96897..be66bfa 100644
21781--- a/arch/x86/kernel/ioport.c
21782+++ b/arch/x86/kernel/ioport.c
21783@@ -6,6 +6,7 @@
21784 #include <linux/sched.h>
21785 #include <linux/kernel.h>
21786 #include <linux/capability.h>
21787+#include <linux/security.h>
21788 #include <linux/errno.h>
21789 #include <linux/types.h>
21790 #include <linux/ioport.h>
21791@@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
21792
21793 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
21794 return -EINVAL;
21795+#ifdef CONFIG_GRKERNSEC_IO
21796+ if (turn_on && grsec_disable_privio) {
21797+ gr_handle_ioperm();
21798+ return -EPERM;
21799+ }
21800+#endif
21801 if (turn_on && !capable(CAP_SYS_RAWIO))
21802 return -EPERM;
21803
21804@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
21805 * because the ->io_bitmap_max value must match the bitmap
21806 * contents:
21807 */
21808- tss = &per_cpu(init_tss, get_cpu());
21809+ tss = init_tss + get_cpu();
21810
21811 if (turn_on)
21812 bitmap_clear(t->io_bitmap_ptr, from, num);
21813@@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct pt_regs *regs)
21814 return -EINVAL;
21815 /* Trying to gain more privileges? */
21816 if (level > old) {
21817+#ifdef CONFIG_GRKERNSEC_IO
21818+ if (grsec_disable_privio) {
21819+ gr_handle_iopl();
21820+ return -EPERM;
21821+ }
21822+#endif
21823 if (!capable(CAP_SYS_RAWIO))
21824 return -EPERM;
21825 }
21826diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
21827index e4595f1..ee3bfb8 100644
21828--- a/arch/x86/kernel/irq.c
21829+++ b/arch/x86/kernel/irq.c
21830@@ -18,7 +18,7 @@
21831 #include <asm/mce.h>
21832 #include <asm/hw_irq.h>
21833
21834-atomic_t irq_err_count;
21835+atomic_unchecked_t irq_err_count;
21836
21837 /* Function pointer for generic interrupt vector handling */
21838 void (*x86_platform_ipi_callback)(void) = NULL;
21839@@ -122,9 +122,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
21840 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
21841 seq_printf(p, " Machine check polls\n");
21842 #endif
21843- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
21844+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
21845 #if defined(CONFIG_X86_IO_APIC)
21846- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
21847+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
21848 #endif
21849 return 0;
21850 }
21851@@ -164,10 +164,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
21852
21853 u64 arch_irq_stat(void)
21854 {
21855- u64 sum = atomic_read(&irq_err_count);
21856+ u64 sum = atomic_read_unchecked(&irq_err_count);
21857
21858 #ifdef CONFIG_X86_IO_APIC
21859- sum += atomic_read(&irq_mis_count);
21860+ sum += atomic_read_unchecked(&irq_mis_count);
21861 #endif
21862 return sum;
21863 }
21864diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
21865index 344faf8..355f60d 100644
21866--- a/arch/x86/kernel/irq_32.c
21867+++ b/arch/x86/kernel/irq_32.c
21868@@ -39,7 +39,7 @@ static int check_stack_overflow(void)
21869 __asm__ __volatile__("andl %%esp,%0" :
21870 "=r" (sp) : "0" (THREAD_SIZE - 1));
21871
21872- return sp < (sizeof(struct thread_info) + STACK_WARN);
21873+ return sp < STACK_WARN;
21874 }
21875
21876 static void print_stack_overflow(void)
21877@@ -59,8 +59,8 @@ static inline void print_stack_overflow(void) { }
21878 * per-CPU IRQ handling contexts (thread information and stack)
21879 */
21880 union irq_ctx {
21881- struct thread_info tinfo;
21882- u32 stack[THREAD_SIZE/sizeof(u32)];
21883+ unsigned long previous_esp;
21884+ u32 stack[THREAD_SIZE/sizeof(u32)];
21885 } __attribute__((aligned(THREAD_SIZE)));
21886
21887 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
21888@@ -80,10 +80,9 @@ static void call_on_stack(void *func, void *stack)
21889 static inline int
21890 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
21891 {
21892- union irq_ctx *curctx, *irqctx;
21893+ union irq_ctx *irqctx;
21894 u32 *isp, arg1, arg2;
21895
21896- curctx = (union irq_ctx *) current_thread_info();
21897 irqctx = __this_cpu_read(hardirq_ctx);
21898
21899 /*
21900@@ -92,16 +91,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
21901 * handler) we can't do that and just have to keep using the
21902 * current stack (which is the irq stack already after all)
21903 */
21904- if (unlikely(curctx == irqctx))
21905+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
21906 return 0;
21907
21908 /* build the stack frame on the IRQ stack */
21909- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
21910- irqctx->tinfo.task = curctx->tinfo.task;
21911- irqctx->tinfo.previous_esp = current_stack_pointer;
21912+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
21913+ irqctx->previous_esp = current_stack_pointer;
21914
21915- /* Copy the preempt_count so that the [soft]irq checks work. */
21916- irqctx->tinfo.preempt_count = curctx->tinfo.preempt_count;
21917+#ifdef CONFIG_PAX_MEMORY_UDEREF
21918+ __set_fs(MAKE_MM_SEG(0));
21919+#endif
21920
21921 if (unlikely(overflow))
21922 call_on_stack(print_stack_overflow, isp);
21923@@ -113,6 +112,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
21924 : "0" (irq), "1" (desc), "2" (isp),
21925 "D" (desc->handle_irq)
21926 : "memory", "cc", "ecx");
21927+
21928+#ifdef CONFIG_PAX_MEMORY_UDEREF
21929+ __set_fs(current_thread_info()->addr_limit);
21930+#endif
21931+
21932 return 1;
21933 }
21934
21935@@ -121,29 +125,14 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
21936 */
21937 void __cpuinit irq_ctx_init(int cpu)
21938 {
21939- union irq_ctx *irqctx;
21940-
21941 if (per_cpu(hardirq_ctx, cpu))
21942 return;
21943
21944- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
21945- THREADINFO_GFP,
21946- THREAD_SIZE_ORDER));
21947- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
21948- irqctx->tinfo.cpu = cpu;
21949- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
21950- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
21951-
21952- per_cpu(hardirq_ctx, cpu) = irqctx;
21953-
21954- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
21955- THREADINFO_GFP,
21956- THREAD_SIZE_ORDER));
21957- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
21958- irqctx->tinfo.cpu = cpu;
21959- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
21960-
21961- per_cpu(softirq_ctx, cpu) = irqctx;
21962+ per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
21963+ per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
21964+
21965+ printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
21966+ cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
21967
21968 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
21969 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
21970@@ -152,7 +141,6 @@ void __cpuinit irq_ctx_init(int cpu)
21971 asmlinkage void do_softirq(void)
21972 {
21973 unsigned long flags;
21974- struct thread_info *curctx;
21975 union irq_ctx *irqctx;
21976 u32 *isp;
21977
21978@@ -162,15 +150,22 @@ asmlinkage void do_softirq(void)
21979 local_irq_save(flags);
21980
21981 if (local_softirq_pending()) {
21982- curctx = current_thread_info();
21983 irqctx = __this_cpu_read(softirq_ctx);
21984- irqctx->tinfo.task = curctx->task;
21985- irqctx->tinfo.previous_esp = current_stack_pointer;
21986+ irqctx->previous_esp = current_stack_pointer;
21987
21988 /* build the stack frame on the softirq stack */
21989- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
21990+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
21991+
21992+#ifdef CONFIG_PAX_MEMORY_UDEREF
21993+ __set_fs(MAKE_MM_SEG(0));
21994+#endif
21995
21996 call_on_stack(__do_softirq, isp);
21997+
21998+#ifdef CONFIG_PAX_MEMORY_UDEREF
21999+ __set_fs(current_thread_info()->addr_limit);
22000+#endif
22001+
22002 /*
22003 * Shouldn't happen, we returned above if in_interrupt():
22004 */
22005@@ -191,7 +186,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
22006 if (unlikely(!desc))
22007 return false;
22008
22009- if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
22010+ if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
22011 if (unlikely(overflow))
22012 print_stack_overflow();
22013 desc->handle_irq(irq, desc);
22014diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
22015index d04d3ec..ea4b374 100644
22016--- a/arch/x86/kernel/irq_64.c
22017+++ b/arch/x86/kernel/irq_64.c
22018@@ -44,7 +44,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
22019 u64 estack_top, estack_bottom;
22020 u64 curbase = (u64)task_stack_page(current);
22021
22022- if (user_mode_vm(regs))
22023+ if (user_mode(regs))
22024 return;
22025
22026 if (regs->sp >= curbase + sizeof(struct thread_info) +
22027diff --git a/arch/x86/kernel/kdebugfs.c b/arch/x86/kernel/kdebugfs.c
22028index dc1404b..bbc43e7 100644
22029--- a/arch/x86/kernel/kdebugfs.c
22030+++ b/arch/x86/kernel/kdebugfs.c
22031@@ -27,7 +27,7 @@ struct setup_data_node {
22032 u32 len;
22033 };
22034
22035-static ssize_t setup_data_read(struct file *file, char __user *user_buf,
22036+static ssize_t __size_overflow(3) setup_data_read(struct file *file, char __user *user_buf,
22037 size_t count, loff_t *ppos)
22038 {
22039 struct setup_data_node *node = file->private_data;
22040diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
22041index 836f832..a8bda67 100644
22042--- a/arch/x86/kernel/kgdb.c
22043+++ b/arch/x86/kernel/kgdb.c
22044@@ -127,11 +127,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
22045 #ifdef CONFIG_X86_32
22046 switch (regno) {
22047 case GDB_SS:
22048- if (!user_mode_vm(regs))
22049+ if (!user_mode(regs))
22050 *(unsigned long *)mem = __KERNEL_DS;
22051 break;
22052 case GDB_SP:
22053- if (!user_mode_vm(regs))
22054+ if (!user_mode(regs))
22055 *(unsigned long *)mem = kernel_stack_pointer(regs);
22056 break;
22057 case GDB_GS:
22058@@ -229,7 +229,10 @@ static void kgdb_correct_hw_break(void)
22059 bp->attr.bp_addr = breakinfo[breakno].addr;
22060 bp->attr.bp_len = breakinfo[breakno].len;
22061 bp->attr.bp_type = breakinfo[breakno].type;
22062- info->address = breakinfo[breakno].addr;
22063+ if (breakinfo[breakno].type == X86_BREAKPOINT_EXECUTE)
22064+ info->address = ktla_ktva(breakinfo[breakno].addr);
22065+ else
22066+ info->address = breakinfo[breakno].addr;
22067 info->len = breakinfo[breakno].len;
22068 info->type = breakinfo[breakno].type;
22069 val = arch_install_hw_breakpoint(bp);
22070@@ -476,12 +479,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
22071 case 'k':
22072 /* clear the trace bit */
22073 linux_regs->flags &= ~X86_EFLAGS_TF;
22074- atomic_set(&kgdb_cpu_doing_single_step, -1);
22075+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
22076
22077 /* set the trace bit if we're stepping */
22078 if (remcomInBuffer[0] == 's') {
22079 linux_regs->flags |= X86_EFLAGS_TF;
22080- atomic_set(&kgdb_cpu_doing_single_step,
22081+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
22082 raw_smp_processor_id());
22083 }
22084
22085@@ -546,7 +549,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
22086
22087 switch (cmd) {
22088 case DIE_DEBUG:
22089- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
22090+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
22091 if (user_mode(regs))
22092 return single_step_cont(regs, args);
22093 break;
22094@@ -751,11 +754,11 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
22095 #endif /* CONFIG_DEBUG_RODATA */
22096
22097 bpt->type = BP_BREAKPOINT;
22098- err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
22099+ err = probe_kernel_read(bpt->saved_instr, ktla_ktva((char *)bpt->bpt_addr),
22100 BREAK_INSTR_SIZE);
22101 if (err)
22102 return err;
22103- err = probe_kernel_write((char *)bpt->bpt_addr,
22104+ err = probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
22105 arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
22106 #ifdef CONFIG_DEBUG_RODATA
22107 if (!err)
22108@@ -768,7 +771,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
22109 return -EBUSY;
22110 text_poke((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr,
22111 BREAK_INSTR_SIZE);
22112- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
22113+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
22114 if (err)
22115 return err;
22116 if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE))
22117@@ -793,13 +796,13 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
22118 if (mutex_is_locked(&text_mutex))
22119 goto knl_write;
22120 text_poke((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE);
22121- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
22122+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
22123 if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE))
22124 goto knl_write;
22125 return err;
22126 knl_write:
22127 #endif /* CONFIG_DEBUG_RODATA */
22128- return probe_kernel_write((char *)bpt->bpt_addr,
22129+ return probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
22130 (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
22131 }
22132
22133diff --git a/arch/x86/kernel/kprobes-opt.c b/arch/x86/kernel/kprobes-opt.c
22134index c5e410e..ed5a7f0 100644
22135--- a/arch/x86/kernel/kprobes-opt.c
22136+++ b/arch/x86/kernel/kprobes-opt.c
22137@@ -338,7 +338,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
22138 * Verify if the address gap is in 2GB range, because this uses
22139 * a relative jump.
22140 */
22141- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
22142+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
22143 if (abs(rel) > 0x7fffffff)
22144 return -ERANGE;
22145
22146@@ -353,16 +353,16 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
22147 op->optinsn.size = ret;
22148
22149 /* Copy arch-dep-instance from template */
22150- memcpy(buf, &optprobe_template_entry, TMPL_END_IDX);
22151+ memcpy(buf, ktla_ktva(&optprobe_template_entry), TMPL_END_IDX);
22152
22153 /* Set probe information */
22154 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
22155
22156 /* Set probe function call */
22157- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
22158+ synthesize_relcall(ktva_ktla(buf) + TMPL_CALL_IDX, optimized_callback);
22159
22160 /* Set returning jmp instruction at the tail of out-of-line buffer */
22161- synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
22162+ synthesize_reljump(ktva_ktla(buf) + TMPL_END_IDX + op->optinsn.size,
22163 (u8 *)op->kp.addr + op->optinsn.size);
22164
22165 flush_icache_range((unsigned long) buf,
22166@@ -385,7 +385,7 @@ static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
22167 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
22168
22169 /* Backup instructions which will be replaced by jump address */
22170- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
22171+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
22172 RELATIVE_ADDR_SIZE);
22173
22174 insn_buf[0] = RELATIVEJUMP_OPCODE;
22175@@ -483,7 +483,7 @@ setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
22176 /* This kprobe is really able to run optimized path. */
22177 op = container_of(p, struct optimized_kprobe, kp);
22178 /* Detour through copied instructions */
22179- regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
22180+ regs->ip = ktva_ktla((unsigned long)op->optinsn.insn) + TMPL_END_IDX;
22181 if (!reenter)
22182 reset_current_kprobe();
22183 preempt_enable_no_resched();
22184diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
22185index 57916c0..9e0b9d0 100644
22186--- a/arch/x86/kernel/kprobes.c
22187+++ b/arch/x86/kernel/kprobes.c
22188@@ -119,9 +119,12 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
22189 s32 raddr;
22190 } __attribute__((packed)) *insn;
22191
22192- insn = (struct __arch_relative_insn *)from;
22193+ insn = (struct __arch_relative_insn *)ktla_ktva(from);
22194+
22195+ pax_open_kernel();
22196 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
22197 insn->op = op;
22198+ pax_close_kernel();
22199 }
22200
22201 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
22202@@ -164,7 +167,7 @@ int __kprobes can_boost(kprobe_opcode_t *opcodes)
22203 kprobe_opcode_t opcode;
22204 kprobe_opcode_t *orig_opcodes = opcodes;
22205
22206- if (search_exception_tables((unsigned long)opcodes))
22207+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
22208 return 0; /* Page fault may occur on this address. */
22209
22210 retry:
22211@@ -238,9 +241,9 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
22212 * for the first byte, we can recover the original instruction
22213 * from it and kp->opcode.
22214 */
22215- memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
22216+ memcpy(buf, ktla_ktva(kp->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
22217 buf[0] = kp->opcode;
22218- return (unsigned long)buf;
22219+ return ktva_ktla((unsigned long)buf);
22220 }
22221
22222 /*
22223@@ -332,7 +335,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
22224 /* Another subsystem puts a breakpoint, failed to recover */
22225 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
22226 return 0;
22227+ pax_open_kernel();
22228 memcpy(dest, insn.kaddr, insn.length);
22229+ pax_close_kernel();
22230
22231 #ifdef CONFIG_X86_64
22232 if (insn_rip_relative(&insn)) {
22233@@ -355,7 +360,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
22234 newdisp = (u8 *) src + (s64) insn.displacement.value - (u8 *) dest;
22235 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
22236 disp = (u8 *) dest + insn_offset_displacement(&insn);
22237+ pax_open_kernel();
22238 *(s32 *) disp = (s32) newdisp;
22239+ pax_close_kernel();
22240 }
22241 #endif
22242 return insn.length;
22243@@ -485,7 +492,7 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
22244 * nor set current_kprobe, because it doesn't use single
22245 * stepping.
22246 */
22247- regs->ip = (unsigned long)p->ainsn.insn;
22248+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
22249 preempt_enable_no_resched();
22250 return;
22251 }
22252@@ -502,9 +509,9 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
22253 regs->flags &= ~X86_EFLAGS_IF;
22254 /* single step inline if the instruction is an int3 */
22255 if (p->opcode == BREAKPOINT_INSTRUCTION)
22256- regs->ip = (unsigned long)p->addr;
22257+ regs->ip = ktla_ktva((unsigned long)p->addr);
22258 else
22259- regs->ip = (unsigned long)p->ainsn.insn;
22260+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
22261 }
22262
22263 /*
22264@@ -600,7 +607,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
22265 setup_singlestep(p, regs, kcb, 0);
22266 return 1;
22267 }
22268- } else if (*addr != BREAKPOINT_INSTRUCTION) {
22269+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
22270 /*
22271 * The breakpoint instruction was removed right
22272 * after we hit it. Another cpu has removed
22273@@ -651,6 +658,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
22274 " movq %rax, 152(%rsp)\n"
22275 RESTORE_REGS_STRING
22276 " popfq\n"
22277+#ifdef KERNEXEC_PLUGIN
22278+ " btsq $63,(%rsp)\n"
22279+#endif
22280 #else
22281 " pushf\n"
22282 SAVE_REGS_STRING
22283@@ -788,7 +798,7 @@ static void __kprobes
22284 resume_execution(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb)
22285 {
22286 unsigned long *tos = stack_addr(regs);
22287- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
22288+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
22289 unsigned long orig_ip = (unsigned long)p->addr;
22290 kprobe_opcode_t *insn = p->ainsn.insn;
22291
22292@@ -970,7 +980,7 @@ kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *d
22293 struct die_args *args = data;
22294 int ret = NOTIFY_DONE;
22295
22296- if (args->regs && user_mode_vm(args->regs))
22297+ if (args->regs && user_mode(args->regs))
22298 return ret;
22299
22300 switch (val) {
22301diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
22302index 9c2bd8b..bb1131c 100644
22303--- a/arch/x86/kernel/kvm.c
22304+++ b/arch/x86/kernel/kvm.c
22305@@ -452,7 +452,7 @@ static int __cpuinit kvm_cpu_notify(struct notifier_block *self,
22306 return NOTIFY_OK;
22307 }
22308
22309-static struct notifier_block __cpuinitdata kvm_cpu_notifier = {
22310+static struct notifier_block kvm_cpu_notifier = {
22311 .notifier_call = kvm_cpu_notify,
22312 };
22313 #endif
22314diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
22315index ebc9873..1b9724b 100644
22316--- a/arch/x86/kernel/ldt.c
22317+++ b/arch/x86/kernel/ldt.c
22318@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
22319 if (reload) {
22320 #ifdef CONFIG_SMP
22321 preempt_disable();
22322- load_LDT(pc);
22323+ load_LDT_nolock(pc);
22324 if (!cpumask_equal(mm_cpumask(current->mm),
22325 cpumask_of(smp_processor_id())))
22326 smp_call_function(flush_ldt, current->mm, 1);
22327 preempt_enable();
22328 #else
22329- load_LDT(pc);
22330+ load_LDT_nolock(pc);
22331 #endif
22332 }
22333 if (oldsize) {
22334@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
22335 return err;
22336
22337 for (i = 0; i < old->size; i++)
22338- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
22339+ write_ldt_entry(new->ldt, i, old->ldt + i);
22340 return 0;
22341 }
22342
22343@@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
22344 retval = copy_ldt(&mm->context, &old_mm->context);
22345 mutex_unlock(&old_mm->context.lock);
22346 }
22347+
22348+ if (tsk == current) {
22349+ mm->context.vdso = 0;
22350+
22351+#ifdef CONFIG_X86_32
22352+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
22353+ mm->context.user_cs_base = 0UL;
22354+ mm->context.user_cs_limit = ~0UL;
22355+
22356+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
22357+ cpus_clear(mm->context.cpu_user_cs_mask);
22358+#endif
22359+
22360+#endif
22361+#endif
22362+
22363+ }
22364+
22365 return retval;
22366 }
22367
22368@@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
22369 }
22370 }
22371
22372+#ifdef CONFIG_PAX_SEGMEXEC
22373+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
22374+ error = -EINVAL;
22375+ goto out_unlock;
22376+ }
22377+#endif
22378+
22379 fill_ldt(&ldt, &ldt_info);
22380 if (oldmode)
22381 ldt.avl = 0;
22382diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
22383index 5b19e4d..6476a76 100644
22384--- a/arch/x86/kernel/machine_kexec_32.c
22385+++ b/arch/x86/kernel/machine_kexec_32.c
22386@@ -26,7 +26,7 @@
22387 #include <asm/cacheflush.h>
22388 #include <asm/debugreg.h>
22389
22390-static void set_idt(void *newidt, __u16 limit)
22391+static void set_idt(struct desc_struct *newidt, __u16 limit)
22392 {
22393 struct desc_ptr curidt;
22394
22395@@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit)
22396 }
22397
22398
22399-static void set_gdt(void *newgdt, __u16 limit)
22400+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
22401 {
22402 struct desc_ptr curgdt;
22403
22404@@ -216,7 +216,7 @@ void machine_kexec(struct kimage *image)
22405 }
22406
22407 control_page = page_address(image->control_code_page);
22408- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
22409+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
22410
22411 relocate_kernel_ptr = control_page;
22412 page_list[PA_CONTROL_PAGE] = __pa(control_page);
22413diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
22414index 3a04b22..1d2eb09 100644
22415--- a/arch/x86/kernel/microcode_core.c
22416+++ b/arch/x86/kernel/microcode_core.c
22417@@ -512,7 +512,7 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
22418 return NOTIFY_OK;
22419 }
22420
22421-static struct notifier_block __refdata mc_cpu_notifier = {
22422+static struct notifier_block mc_cpu_notifier = {
22423 .notifier_call = mc_cpu_callback,
22424 };
22425
22426diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
22427index 3544aed..01ddc1c 100644
22428--- a/arch/x86/kernel/microcode_intel.c
22429+++ b/arch/x86/kernel/microcode_intel.c
22430@@ -431,13 +431,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
22431
22432 static int get_ucode_user(void *to, const void *from, size_t n)
22433 {
22434- return copy_from_user(to, from, n);
22435+ return copy_from_user(to, (const void __force_user *)from, n);
22436 }
22437
22438 static enum ucode_state
22439 request_microcode_user(int cpu, const void __user *buf, size_t size)
22440 {
22441- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
22442+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
22443 }
22444
22445 static void microcode_fini_cpu(int cpu)
22446diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
22447index 216a4d7..228255a 100644
22448--- a/arch/x86/kernel/module.c
22449+++ b/arch/x86/kernel/module.c
22450@@ -43,15 +43,60 @@ do { \
22451 } while (0)
22452 #endif
22453
22454-void *module_alloc(unsigned long size)
22455+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
22456 {
22457- if (PAGE_ALIGN(size) > MODULES_LEN)
22458+ if (!size || PAGE_ALIGN(size) > MODULES_LEN)
22459 return NULL;
22460 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
22461- GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
22462+ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
22463 -1, __builtin_return_address(0));
22464 }
22465
22466+void *module_alloc(unsigned long size)
22467+{
22468+
22469+#ifdef CONFIG_PAX_KERNEXEC
22470+ return __module_alloc(size, PAGE_KERNEL);
22471+#else
22472+ return __module_alloc(size, PAGE_KERNEL_EXEC);
22473+#endif
22474+
22475+}
22476+
22477+#ifdef CONFIG_PAX_KERNEXEC
22478+#ifdef CONFIG_X86_32
22479+void *module_alloc_exec(unsigned long size)
22480+{
22481+ struct vm_struct *area;
22482+
22483+ if (size == 0)
22484+ return NULL;
22485+
22486+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
22487+ return area ? area->addr : NULL;
22488+}
22489+EXPORT_SYMBOL(module_alloc_exec);
22490+
22491+void module_free_exec(struct module *mod, void *module_region)
22492+{
22493+ vunmap(module_region);
22494+}
22495+EXPORT_SYMBOL(module_free_exec);
22496+#else
22497+void module_free_exec(struct module *mod, void *module_region)
22498+{
22499+ module_free(mod, module_region);
22500+}
22501+EXPORT_SYMBOL(module_free_exec);
22502+
22503+void *module_alloc_exec(unsigned long size)
22504+{
22505+ return __module_alloc(size, PAGE_KERNEL_RX);
22506+}
22507+EXPORT_SYMBOL(module_alloc_exec);
22508+#endif
22509+#endif
22510+
22511 #ifdef CONFIG_X86_32
22512 int apply_relocate(Elf32_Shdr *sechdrs,
22513 const char *strtab,
22514@@ -62,14 +107,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
22515 unsigned int i;
22516 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
22517 Elf32_Sym *sym;
22518- uint32_t *location;
22519+ uint32_t *plocation, location;
22520
22521 DEBUGP("Applying relocate section %u to %u\n",
22522 relsec, sechdrs[relsec].sh_info);
22523 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
22524 /* This is where to make the change */
22525- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
22526- + rel[i].r_offset;
22527+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
22528+ location = (uint32_t)plocation;
22529+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
22530+ plocation = ktla_ktva((void *)plocation);
22531 /* This is the symbol it is referring to. Note that all
22532 undefined symbols have been resolved. */
22533 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
22534@@ -78,11 +125,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
22535 switch (ELF32_R_TYPE(rel[i].r_info)) {
22536 case R_386_32:
22537 /* We add the value into the location given */
22538- *location += sym->st_value;
22539+ pax_open_kernel();
22540+ *plocation += sym->st_value;
22541+ pax_close_kernel();
22542 break;
22543 case R_386_PC32:
22544 /* Add the value, subtract its position */
22545- *location += sym->st_value - (uint32_t)location;
22546+ pax_open_kernel();
22547+ *plocation += sym->st_value - location;
22548+ pax_close_kernel();
22549 break;
22550 default:
22551 pr_err("%s: Unknown relocation: %u\n",
22552@@ -127,21 +178,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
22553 case R_X86_64_NONE:
22554 break;
22555 case R_X86_64_64:
22556+ pax_open_kernel();
22557 *(u64 *)loc = val;
22558+ pax_close_kernel();
22559 break;
22560 case R_X86_64_32:
22561+ pax_open_kernel();
22562 *(u32 *)loc = val;
22563+ pax_close_kernel();
22564 if (val != *(u32 *)loc)
22565 goto overflow;
22566 break;
22567 case R_X86_64_32S:
22568+ pax_open_kernel();
22569 *(s32 *)loc = val;
22570+ pax_close_kernel();
22571 if ((s64)val != *(s32 *)loc)
22572 goto overflow;
22573 break;
22574 case R_X86_64_PC32:
22575 val -= (u64)loc;
22576+ pax_open_kernel();
22577 *(u32 *)loc = val;
22578+ pax_close_kernel();
22579+
22580 #if 0
22581 if ((s64)val != *(s32 *)loc)
22582 goto overflow;
22583diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
22584index 4929502..686c291 100644
22585--- a/arch/x86/kernel/msr.c
22586+++ b/arch/x86/kernel/msr.c
22587@@ -234,7 +234,7 @@ static int __cpuinit msr_class_cpu_callback(struct notifier_block *nfb,
22588 return notifier_from_errno(err);
22589 }
22590
22591-static struct notifier_block __refdata msr_class_cpu_notifier = {
22592+static struct notifier_block msr_class_cpu_notifier = {
22593 .notifier_call = msr_class_cpu_callback,
22594 };
22595
22596diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
22597index f84f5c5..f404e81 100644
22598--- a/arch/x86/kernel/nmi.c
22599+++ b/arch/x86/kernel/nmi.c
22600@@ -105,7 +105,7 @@ static int __kprobes nmi_handle(unsigned int type, struct pt_regs *regs, bool b2
22601 return handled;
22602 }
22603
22604-int __register_nmi_handler(unsigned int type, struct nmiaction *action)
22605+int __register_nmi_handler(unsigned int type, const struct nmiaction *action)
22606 {
22607 struct nmi_desc *desc = nmi_to_desc(type);
22608 unsigned long flags;
22609@@ -129,9 +129,9 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
22610 * event confuses some handlers (kdump uses this flag)
22611 */
22612 if (action->flags & NMI_FLAG_FIRST)
22613- list_add_rcu(&action->list, &desc->head);
22614+ pax_list_add_rcu((struct list_head *)&action->list, &desc->head);
22615 else
22616- list_add_tail_rcu(&action->list, &desc->head);
22617+ pax_list_add_tail_rcu((struct list_head *)&action->list, &desc->head);
22618
22619 spin_unlock_irqrestore(&desc->lock, flags);
22620 return 0;
22621@@ -154,7 +154,7 @@ void unregister_nmi_handler(unsigned int type, const char *name)
22622 if (!strcmp(n->name, name)) {
22623 WARN(in_nmi(),
22624 "Trying to free NMI (%s) from NMI context!\n", n->name);
22625- list_del_rcu(&n->list);
22626+ pax_list_del_rcu((struct list_head *)&n->list);
22627 break;
22628 }
22629 }
22630@@ -479,6 +479,17 @@ static inline void nmi_nesting_postprocess(void)
22631 dotraplinkage notrace __kprobes void
22632 do_nmi(struct pt_regs *regs, long error_code)
22633 {
22634+
22635+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
22636+ if (!user_mode(regs)) {
22637+ unsigned long cs = regs->cs & 0xFFFF;
22638+ unsigned long ip = ktva_ktla(regs->ip);
22639+
22640+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
22641+ regs->ip = ip;
22642+ }
22643+#endif
22644+
22645 nmi_nesting_preprocess(regs);
22646
22647 nmi_enter();
22648diff --git a/arch/x86/kernel/nmi_selftest.c b/arch/x86/kernel/nmi_selftest.c
22649index 6d9582e..f746287 100644
22650--- a/arch/x86/kernel/nmi_selftest.c
22651+++ b/arch/x86/kernel/nmi_selftest.c
22652@@ -43,7 +43,7 @@ static void __init init_nmi_testsuite(void)
22653 {
22654 /* trap all the unknown NMIs we may generate */
22655 register_nmi_handler(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk",
22656- __initdata);
22657+ __initconst);
22658 }
22659
22660 static void __init cleanup_nmi_testsuite(void)
22661@@ -66,7 +66,7 @@ static void __init test_nmi_ipi(struct cpumask *mask)
22662 unsigned long timeout;
22663
22664 if (register_nmi_handler(NMI_LOCAL, test_nmi_ipi_callback,
22665- NMI_FLAG_FIRST, "nmi_selftest", __initdata)) {
22666+ NMI_FLAG_FIRST, "nmi_selftest", __initconst)) {
22667 nmi_fail = FAILURE;
22668 return;
22669 }
22670diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
22671index 676b8c7..870ba04 100644
22672--- a/arch/x86/kernel/paravirt-spinlocks.c
22673+++ b/arch/x86/kernel/paravirt-spinlocks.c
22674@@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
22675 arch_spin_lock(lock);
22676 }
22677
22678-struct pv_lock_ops pv_lock_ops = {
22679+struct pv_lock_ops pv_lock_ops __read_only = {
22680 #ifdef CONFIG_SMP
22681 .spin_is_locked = __ticket_spin_is_locked,
22682 .spin_is_contended = __ticket_spin_is_contended,
22683diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
22684index 8bfb335..c1463c6 100644
22685--- a/arch/x86/kernel/paravirt.c
22686+++ b/arch/x86/kernel/paravirt.c
22687@@ -55,6 +55,9 @@ u64 _paravirt_ident_64(u64 x)
22688 {
22689 return x;
22690 }
22691+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
22692+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
22693+#endif
22694
22695 void __init default_banner(void)
22696 {
22697@@ -147,15 +150,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
22698 if (opfunc == NULL)
22699 /* If there's no function, patch it with a ud2a (BUG) */
22700 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
22701- else if (opfunc == _paravirt_nop)
22702+ else if (opfunc == (void *)_paravirt_nop)
22703 /* If the operation is a nop, then nop the callsite */
22704 ret = paravirt_patch_nop();
22705
22706 /* identity functions just return their single argument */
22707- else if (opfunc == _paravirt_ident_32)
22708+ else if (opfunc == (void *)_paravirt_ident_32)
22709 ret = paravirt_patch_ident_32(insnbuf, len);
22710- else if (opfunc == _paravirt_ident_64)
22711+ else if (opfunc == (void *)_paravirt_ident_64)
22712 ret = paravirt_patch_ident_64(insnbuf, len);
22713+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
22714+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
22715+ ret = paravirt_patch_ident_64(insnbuf, len);
22716+#endif
22717
22718 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
22719 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
22720@@ -180,7 +187,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
22721 if (insn_len > len || start == NULL)
22722 insn_len = len;
22723 else
22724- memcpy(insnbuf, start, insn_len);
22725+ memcpy(insnbuf, ktla_ktva(start), insn_len);
22726
22727 return insn_len;
22728 }
22729@@ -304,7 +311,7 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
22730 return this_cpu_read(paravirt_lazy_mode);
22731 }
22732
22733-struct pv_info pv_info = {
22734+struct pv_info pv_info __read_only = {
22735 .name = "bare hardware",
22736 .paravirt_enabled = 0,
22737 .kernel_rpl = 0,
22738@@ -315,16 +322,16 @@ struct pv_info pv_info = {
22739 #endif
22740 };
22741
22742-struct pv_init_ops pv_init_ops = {
22743+struct pv_init_ops pv_init_ops __read_only = {
22744 .patch = native_patch,
22745 };
22746
22747-struct pv_time_ops pv_time_ops = {
22748+struct pv_time_ops pv_time_ops __read_only = {
22749 .sched_clock = native_sched_clock,
22750 .steal_clock = native_steal_clock,
22751 };
22752
22753-struct pv_irq_ops pv_irq_ops = {
22754+struct pv_irq_ops pv_irq_ops __read_only = {
22755 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
22756 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
22757 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
22758@@ -336,7 +343,7 @@ struct pv_irq_ops pv_irq_ops = {
22759 #endif
22760 };
22761
22762-struct pv_cpu_ops pv_cpu_ops = {
22763+struct pv_cpu_ops pv_cpu_ops __read_only = {
22764 .cpuid = native_cpuid,
22765 .get_debugreg = native_get_debugreg,
22766 .set_debugreg = native_set_debugreg,
22767@@ -395,21 +402,26 @@ struct pv_cpu_ops pv_cpu_ops = {
22768 .end_context_switch = paravirt_nop,
22769 };
22770
22771-struct pv_apic_ops pv_apic_ops = {
22772+struct pv_apic_ops pv_apic_ops __read_only= {
22773 #ifdef CONFIG_X86_LOCAL_APIC
22774 .startup_ipi_hook = paravirt_nop,
22775 #endif
22776 };
22777
22778-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
22779+#ifdef CONFIG_X86_32
22780+#ifdef CONFIG_X86_PAE
22781+/* 64-bit pagetable entries */
22782+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
22783+#else
22784 /* 32-bit pagetable entries */
22785 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
22786+#endif
22787 #else
22788 /* 64-bit pagetable entries */
22789 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
22790 #endif
22791
22792-struct pv_mmu_ops pv_mmu_ops = {
22793+struct pv_mmu_ops pv_mmu_ops __read_only = {
22794
22795 .read_cr2 = native_read_cr2,
22796 .write_cr2 = native_write_cr2,
22797@@ -459,6 +471,7 @@ struct pv_mmu_ops pv_mmu_ops = {
22798 .make_pud = PTE_IDENT,
22799
22800 .set_pgd = native_set_pgd,
22801+ .set_pgd_batched = native_set_pgd_batched,
22802 #endif
22803 #endif /* PAGETABLE_LEVELS >= 3 */
22804
22805@@ -479,6 +492,12 @@ struct pv_mmu_ops pv_mmu_ops = {
22806 },
22807
22808 .set_fixmap = native_set_fixmap,
22809+
22810+#ifdef CONFIG_PAX_KERNEXEC
22811+ .pax_open_kernel = native_pax_open_kernel,
22812+ .pax_close_kernel = native_pax_close_kernel,
22813+#endif
22814+
22815 };
22816
22817 EXPORT_SYMBOL_GPL(pv_time_ops);
22818diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
22819index 299d493..2ccb0ee 100644
22820--- a/arch/x86/kernel/pci-calgary_64.c
22821+++ b/arch/x86/kernel/pci-calgary_64.c
22822@@ -1339,7 +1339,7 @@ static void __init get_tce_space_from_tar(void)
22823 tce_space = be64_to_cpu(readq(target));
22824 tce_space = tce_space & TAR_SW_BITS;
22825
22826- tce_space = tce_space & (~specified_table_size);
22827+ tce_space = tce_space & (~(unsigned long)specified_table_size);
22828 info->tce_space = (u64 *)__va(tce_space);
22829 }
22830 }
22831diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
22832index 35ccf75..7a15747 100644
22833--- a/arch/x86/kernel/pci-iommu_table.c
22834+++ b/arch/x86/kernel/pci-iommu_table.c
22835@@ -2,7 +2,7 @@
22836 #include <asm/iommu_table.h>
22837 #include <linux/string.h>
22838 #include <linux/kallsyms.h>
22839-
22840+#include <linux/sched.h>
22841
22842 #define DEBUG 1
22843
22844diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
22845index 6c483ba..d10ce2f 100644
22846--- a/arch/x86/kernel/pci-swiotlb.c
22847+++ b/arch/x86/kernel/pci-swiotlb.c
22848@@ -32,7 +32,7 @@ static void x86_swiotlb_free_coherent(struct device *dev, size_t size,
22849 void *vaddr, dma_addr_t dma_addr,
22850 struct dma_attrs *attrs)
22851 {
22852- swiotlb_free_coherent(dev, size, vaddr, dma_addr);
22853+ swiotlb_free_coherent(dev, size, vaddr, dma_addr, attrs);
22854 }
22855
22856 static struct dma_map_ops swiotlb_dma_ops = {
22857diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
22858index 2ed787f..f70c9f6 100644
22859--- a/arch/x86/kernel/process.c
22860+++ b/arch/x86/kernel/process.c
22861@@ -36,7 +36,8 @@
22862 * section. Since TSS's are completely CPU-local, we want them
22863 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
22864 */
22865-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
22866+struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
22867+EXPORT_SYMBOL(init_tss);
22868
22869 #ifdef CONFIG_X86_64
22870 static DEFINE_PER_CPU(unsigned char, is_idle);
22871@@ -92,7 +93,7 @@ void arch_task_cache_init(void)
22872 task_xstate_cachep =
22873 kmem_cache_create("task_xstate", xstate_size,
22874 __alignof__(union thread_xstate),
22875- SLAB_PANIC | SLAB_NOTRACK, NULL);
22876+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
22877 }
22878
22879 /*
22880@@ -105,7 +106,7 @@ void exit_thread(void)
22881 unsigned long *bp = t->io_bitmap_ptr;
22882
22883 if (bp) {
22884- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
22885+ struct tss_struct *tss = init_tss + get_cpu();
22886
22887 t->io_bitmap_ptr = NULL;
22888 clear_thread_flag(TIF_IO_BITMAP);
22889@@ -136,7 +137,7 @@ void show_regs_common(void)
22890 board = dmi_get_system_info(DMI_BOARD_NAME);
22891
22892 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s %s %s%s%s\n",
22893- current->pid, current->comm, print_tainted(),
22894+ task_pid_nr(current), current->comm, print_tainted(),
22895 init_utsname()->release,
22896 (int)strcspn(init_utsname()->version, " "),
22897 init_utsname()->version,
22898@@ -149,6 +150,9 @@ void flush_thread(void)
22899 {
22900 struct task_struct *tsk = current;
22901
22902+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
22903+ loadsegment(gs, 0);
22904+#endif
22905 flush_ptrace_hw_breakpoint(tsk);
22906 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
22907 drop_init_fpu(tsk);
22908@@ -301,7 +305,7 @@ static void __exit_idle(void)
22909 void exit_idle(void)
22910 {
22911 /* idle loop has pid 0 */
22912- if (current->pid)
22913+ if (task_pid_nr(current))
22914 return;
22915 __exit_idle();
22916 }
22917@@ -404,7 +408,7 @@ bool set_pm_idle_to_default(void)
22918
22919 return ret;
22920 }
22921-void stop_this_cpu(void *dummy)
22922+__noreturn void stop_this_cpu(void *dummy)
22923 {
22924 local_irq_disable();
22925 /*
22926@@ -632,16 +636,37 @@ static int __init idle_setup(char *str)
22927 }
22928 early_param("idle", idle_setup);
22929
22930-unsigned long arch_align_stack(unsigned long sp)
22931+#ifdef CONFIG_PAX_RANDKSTACK
22932+void pax_randomize_kstack(struct pt_regs *regs)
22933 {
22934- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
22935- sp -= get_random_int() % 8192;
22936- return sp & ~0xf;
22937-}
22938+ struct thread_struct *thread = &current->thread;
22939+ unsigned long time;
22940
22941-unsigned long arch_randomize_brk(struct mm_struct *mm)
22942-{
22943- unsigned long range_end = mm->brk + 0x02000000;
22944- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
22945-}
22946+ if (!randomize_va_space)
22947+ return;
22948+
22949+ if (v8086_mode(regs))
22950+ return;
22951
22952+ rdtscl(time);
22953+
22954+ /* P4 seems to return a 0 LSB, ignore it */
22955+#ifdef CONFIG_MPENTIUM4
22956+ time &= 0x3EUL;
22957+ time <<= 2;
22958+#elif defined(CONFIG_X86_64)
22959+ time &= 0xFUL;
22960+ time <<= 4;
22961+#else
22962+ time &= 0x1FUL;
22963+ time <<= 3;
22964+#endif
22965+
22966+ thread->sp0 ^= time;
22967+ load_sp0(init_tss + smp_processor_id(), thread);
22968+
22969+#ifdef CONFIG_X86_64
22970+ this_cpu_write(kernel_stack, thread->sp0);
22971+#endif
22972+}
22973+#endif
22974diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
22975index b5a8905..d9cacac 100644
22976--- a/arch/x86/kernel/process_32.c
22977+++ b/arch/x86/kernel/process_32.c
22978@@ -65,6 +65,7 @@ asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread");
22979 unsigned long thread_saved_pc(struct task_struct *tsk)
22980 {
22981 return ((unsigned long *)tsk->thread.sp)[3];
22982+//XXX return tsk->thread.eip;
22983 }
22984
22985 void __show_regs(struct pt_regs *regs, int all)
22986@@ -74,21 +75,20 @@ void __show_regs(struct pt_regs *regs, int all)
22987 unsigned long sp;
22988 unsigned short ss, gs;
22989
22990- if (user_mode_vm(regs)) {
22991+ if (user_mode(regs)) {
22992 sp = regs->sp;
22993 ss = regs->ss & 0xffff;
22994- gs = get_user_gs(regs);
22995 } else {
22996 sp = kernel_stack_pointer(regs);
22997 savesegment(ss, ss);
22998- savesegment(gs, gs);
22999 }
23000+ gs = get_user_gs(regs);
23001
23002 show_regs_common();
23003
23004 printk(KERN_DEFAULT "EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
23005 (u16)regs->cs, regs->ip, regs->flags,
23006- smp_processor_id());
23007+ raw_smp_processor_id());
23008 print_symbol("EIP is at %s\n", regs->ip);
23009
23010 printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
23011@@ -130,20 +130,21 @@ void release_thread(struct task_struct *dead_task)
23012 int copy_thread(unsigned long clone_flags, unsigned long sp,
23013 unsigned long arg, struct task_struct *p)
23014 {
23015- struct pt_regs *childregs = task_pt_regs(p);
23016+ struct pt_regs *childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
23017 struct task_struct *tsk;
23018 int err;
23019
23020 p->thread.sp = (unsigned long) childregs;
23021 p->thread.sp0 = (unsigned long) (childregs+1);
23022+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
23023
23024 if (unlikely(p->flags & PF_KTHREAD)) {
23025 /* kernel thread */
23026 memset(childregs, 0, sizeof(struct pt_regs));
23027 p->thread.ip = (unsigned long) ret_from_kernel_thread;
23028- task_user_gs(p) = __KERNEL_STACK_CANARY;
23029- childregs->ds = __USER_DS;
23030- childregs->es = __USER_DS;
23031+ savesegment(gs, childregs->gs);
23032+ childregs->ds = __KERNEL_DS;
23033+ childregs->es = __KERNEL_DS;
23034 childregs->fs = __KERNEL_PERCPU;
23035 childregs->bx = sp; /* function */
23036 childregs->bp = arg;
23037@@ -250,7 +251,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
23038 struct thread_struct *prev = &prev_p->thread,
23039 *next = &next_p->thread;
23040 int cpu = smp_processor_id();
23041- struct tss_struct *tss = &per_cpu(init_tss, cpu);
23042+ struct tss_struct *tss = init_tss + cpu;
23043 fpu_switch_t fpu;
23044
23045 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
23046@@ -274,6 +275,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
23047 */
23048 lazy_save_gs(prev->gs);
23049
23050+#ifdef CONFIG_PAX_MEMORY_UDEREF
23051+ __set_fs(task_thread_info(next_p)->addr_limit);
23052+#endif
23053+
23054 /*
23055 * Load the per-thread Thread-Local Storage descriptor.
23056 */
23057@@ -304,6 +309,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
23058 */
23059 arch_end_context_switch(next_p);
23060
23061+ this_cpu_write(current_task, next_p);
23062+ this_cpu_write(current_tinfo, &next_p->tinfo);
23063+
23064 /*
23065 * Restore %gs if needed (which is common)
23066 */
23067@@ -312,8 +320,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
23068
23069 switch_fpu_finish(next_p, fpu);
23070
23071- this_cpu_write(current_task, next_p);
23072-
23073 return prev_p;
23074 }
23075
23076@@ -343,4 +349,3 @@ unsigned long get_wchan(struct task_struct *p)
23077 } while (count++ < 16);
23078 return 0;
23079 }
23080-
23081diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
23082index 6e68a61..955a9a5 100644
23083--- a/arch/x86/kernel/process_64.c
23084+++ b/arch/x86/kernel/process_64.c
23085@@ -152,10 +152,11 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
23086 struct pt_regs *childregs;
23087 struct task_struct *me = current;
23088
23089- p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
23090+ p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE - 16;
23091 childregs = task_pt_regs(p);
23092 p->thread.sp = (unsigned long) childregs;
23093 p->thread.usersp = me->thread.usersp;
23094+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
23095 set_tsk_thread_flag(p, TIF_FORK);
23096 p->fpu_counter = 0;
23097 p->thread.io_bitmap_ptr = NULL;
23098@@ -274,7 +275,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
23099 struct thread_struct *prev = &prev_p->thread;
23100 struct thread_struct *next = &next_p->thread;
23101 int cpu = smp_processor_id();
23102- struct tss_struct *tss = &per_cpu(init_tss, cpu);
23103+ struct tss_struct *tss = init_tss + cpu;
23104 unsigned fsindex, gsindex;
23105 fpu_switch_t fpu;
23106
23107@@ -356,10 +357,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
23108 prev->usersp = this_cpu_read(old_rsp);
23109 this_cpu_write(old_rsp, next->usersp);
23110 this_cpu_write(current_task, next_p);
23111+ this_cpu_write(current_tinfo, &next_p->tinfo);
23112
23113- this_cpu_write(kernel_stack,
23114- (unsigned long)task_stack_page(next_p) +
23115- THREAD_SIZE - KERNEL_STACK_OFFSET);
23116+ this_cpu_write(kernel_stack, next->sp0);
23117
23118 /*
23119 * Now maybe reload the debug registers and handle I/O bitmaps
23120@@ -428,12 +428,11 @@ unsigned long get_wchan(struct task_struct *p)
23121 if (!p || p == current || p->state == TASK_RUNNING)
23122 return 0;
23123 stack = (unsigned long)task_stack_page(p);
23124- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
23125+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
23126 return 0;
23127 fp = *(u64 *)(p->thread.sp);
23128 do {
23129- if (fp < (unsigned long)stack ||
23130- fp >= (unsigned long)stack+THREAD_SIZE)
23131+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
23132 return 0;
23133 ip = *(u64 *)(fp+8);
23134 if (!in_sched_functions(ip))
23135diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
23136index b629bbe..0fa615a 100644
23137--- a/arch/x86/kernel/ptrace.c
23138+++ b/arch/x86/kernel/ptrace.c
23139@@ -184,14 +184,13 @@ unsigned long kernel_stack_pointer(struct pt_regs *regs)
23140 {
23141 unsigned long context = (unsigned long)regs & ~(THREAD_SIZE - 1);
23142 unsigned long sp = (unsigned long)&regs->sp;
23143- struct thread_info *tinfo;
23144
23145- if (context == (sp & ~(THREAD_SIZE - 1)))
23146+ if (context == ((sp + 8) & ~(THREAD_SIZE - 1)))
23147 return sp;
23148
23149- tinfo = (struct thread_info *)context;
23150- if (tinfo->previous_esp)
23151- return tinfo->previous_esp;
23152+ sp = *(unsigned long *)context;
23153+ if (sp)
23154+ return sp;
23155
23156 return (unsigned long)regs;
23157 }
23158@@ -588,7 +587,7 @@ static void ptrace_triggered(struct perf_event *bp,
23159 static unsigned long ptrace_get_dr7(struct perf_event *bp[])
23160 {
23161 int i;
23162- int dr7 = 0;
23163+ unsigned long dr7 = 0;
23164 struct arch_hw_breakpoint *info;
23165
23166 for (i = 0; i < HBP_NUM; i++) {
23167@@ -856,7 +855,7 @@ long arch_ptrace(struct task_struct *child, long request,
23168 unsigned long addr, unsigned long data)
23169 {
23170 int ret;
23171- unsigned long __user *datap = (unsigned long __user *)data;
23172+ unsigned long __user *datap = (__force unsigned long __user *)data;
23173
23174 switch (request) {
23175 /* read the word at location addr in the USER area. */
23176@@ -941,14 +940,14 @@ long arch_ptrace(struct task_struct *child, long request,
23177 if ((int) addr < 0)
23178 return -EIO;
23179 ret = do_get_thread_area(child, addr,
23180- (struct user_desc __user *)data);
23181+ (__force struct user_desc __user *) data);
23182 break;
23183
23184 case PTRACE_SET_THREAD_AREA:
23185 if ((int) addr < 0)
23186 return -EIO;
23187 ret = do_set_thread_area(child, addr,
23188- (struct user_desc __user *)data, 0);
23189+ (__force struct user_desc __user *) data, 0);
23190 break;
23191 #endif
23192
23193@@ -1326,7 +1325,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
23194
23195 #ifdef CONFIG_X86_64
23196
23197-static struct user_regset x86_64_regsets[] __read_mostly = {
23198+static user_regset_no_const x86_64_regsets[] __read_only = {
23199 [REGSET_GENERAL] = {
23200 .core_note_type = NT_PRSTATUS,
23201 .n = sizeof(struct user_regs_struct) / sizeof(long),
23202@@ -1367,7 +1366,7 @@ static const struct user_regset_view user_x86_64_view = {
23203 #endif /* CONFIG_X86_64 */
23204
23205 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
23206-static struct user_regset x86_32_regsets[] __read_mostly = {
23207+static user_regset_no_const x86_32_regsets[] __read_only = {
23208 [REGSET_GENERAL] = {
23209 .core_note_type = NT_PRSTATUS,
23210 .n = sizeof(struct user_regs_struct32) / sizeof(u32),
23211@@ -1420,7 +1419,7 @@ static const struct user_regset_view user_x86_32_view = {
23212 */
23213 u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
23214
23215-void update_regset_xstate_info(unsigned int size, u64 xstate_mask)
23216+void __init update_regset_xstate_info(unsigned int size, u64 xstate_mask)
23217 {
23218 #ifdef CONFIG_X86_64
23219 x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64);
23220@@ -1455,7 +1454,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
23221 memset(info, 0, sizeof(*info));
23222 info->si_signo = SIGTRAP;
23223 info->si_code = si_code;
23224- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
23225+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
23226 }
23227
23228 void user_single_step_siginfo(struct task_struct *tsk,
23229@@ -1484,6 +1483,10 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
23230 # define IS_IA32 0
23231 #endif
23232
23233+#ifdef CONFIG_GRKERNSEC_SETXID
23234+extern void gr_delayed_cred_worker(void);
23235+#endif
23236+
23237 /*
23238 * We must return the syscall number to actually look up in the table.
23239 * This can be -1L to skip running any syscall at all.
23240@@ -1494,6 +1497,11 @@ long syscall_trace_enter(struct pt_regs *regs)
23241
23242 user_exit();
23243
23244+#ifdef CONFIG_GRKERNSEC_SETXID
23245+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
23246+ gr_delayed_cred_worker();
23247+#endif
23248+
23249 /*
23250 * If we stepped into a sysenter/syscall insn, it trapped in
23251 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
23252@@ -1549,6 +1557,11 @@ void syscall_trace_leave(struct pt_regs *regs)
23253 */
23254 user_exit();
23255
23256+#ifdef CONFIG_GRKERNSEC_SETXID
23257+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
23258+ gr_delayed_cred_worker();
23259+#endif
23260+
23261 audit_syscall_exit(regs);
23262
23263 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
23264diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
23265index 2cb9470..ff1fd80 100644
23266--- a/arch/x86/kernel/pvclock.c
23267+++ b/arch/x86/kernel/pvclock.c
23268@@ -43,11 +43,11 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
23269 return pv_tsc_khz;
23270 }
23271
23272-static atomic64_t last_value = ATOMIC64_INIT(0);
23273+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
23274
23275 void pvclock_resume(void)
23276 {
23277- atomic64_set(&last_value, 0);
23278+ atomic64_set_unchecked(&last_value, 0);
23279 }
23280
23281 u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
23282@@ -92,11 +92,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
23283 * updating at the same time, and one of them could be slightly behind,
23284 * making the assumption that last_value always go forward fail to hold.
23285 */
23286- last = atomic64_read(&last_value);
23287+ last = atomic64_read_unchecked(&last_value);
23288 do {
23289 if (ret < last)
23290 return last;
23291- last = atomic64_cmpxchg(&last_value, last, ret);
23292+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
23293 } while (unlikely(last != ret));
23294
23295 return ret;
23296diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
23297index 76fa1e9..abf09ea 100644
23298--- a/arch/x86/kernel/reboot.c
23299+++ b/arch/x86/kernel/reboot.c
23300@@ -36,7 +36,7 @@ void (*pm_power_off)(void);
23301 EXPORT_SYMBOL(pm_power_off);
23302
23303 static const struct desc_ptr no_idt = {};
23304-static int reboot_mode;
23305+static unsigned short reboot_mode;
23306 enum reboot_type reboot_type = BOOT_ACPI;
23307 int reboot_force;
23308
23309@@ -157,6 +157,11 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
23310
23311 void __noreturn machine_real_restart(unsigned int type)
23312 {
23313+
23314+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
23315+ struct desc_struct *gdt;
23316+#endif
23317+
23318 local_irq_disable();
23319
23320 /*
23321@@ -184,7 +189,29 @@ void __noreturn machine_real_restart(unsigned int type)
23322
23323 /* Jump to the identity-mapped low memory code */
23324 #ifdef CONFIG_X86_32
23325- asm volatile("jmpl *%0" : :
23326+
23327+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
23328+ gdt = get_cpu_gdt_table(smp_processor_id());
23329+ pax_open_kernel();
23330+#ifdef CONFIG_PAX_MEMORY_UDEREF
23331+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
23332+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
23333+ loadsegment(ds, __KERNEL_DS);
23334+ loadsegment(es, __KERNEL_DS);
23335+ loadsegment(ss, __KERNEL_DS);
23336+#endif
23337+#ifdef CONFIG_PAX_KERNEXEC
23338+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
23339+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
23340+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
23341+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
23342+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
23343+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
23344+#endif
23345+ pax_close_kernel();
23346+#endif
23347+
23348+ asm volatile("ljmpl *%0" : :
23349 "rm" (real_mode_header->machine_real_restart_asm),
23350 "a" (type));
23351 #else
23352@@ -531,7 +558,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
23353 * try to force a triple fault and then cycle between hitting the keyboard
23354 * controller and doing that
23355 */
23356-static void native_machine_emergency_restart(void)
23357+static void __noreturn native_machine_emergency_restart(void)
23358 {
23359 int i;
23360 int attempt = 0;
23361@@ -654,13 +681,13 @@ void native_machine_shutdown(void)
23362 #endif
23363 }
23364
23365-static void __machine_emergency_restart(int emergency)
23366+static void __noreturn __machine_emergency_restart(int emergency)
23367 {
23368 reboot_emergency = emergency;
23369 machine_ops.emergency_restart();
23370 }
23371
23372-static void native_machine_restart(char *__unused)
23373+static void __noreturn native_machine_restart(char *__unused)
23374 {
23375 pr_notice("machine restart\n");
23376
23377@@ -669,7 +696,7 @@ static void native_machine_restart(char *__unused)
23378 __machine_emergency_restart(0);
23379 }
23380
23381-static void native_machine_halt(void)
23382+static void __noreturn native_machine_halt(void)
23383 {
23384 /* Stop other cpus and apics */
23385 machine_shutdown();
23386@@ -679,7 +706,7 @@ static void native_machine_halt(void)
23387 stop_this_cpu(NULL);
23388 }
23389
23390-static void native_machine_power_off(void)
23391+static void __noreturn native_machine_power_off(void)
23392 {
23393 if (pm_power_off) {
23394 if (!reboot_force)
23395@@ -688,9 +715,10 @@ static void native_machine_power_off(void)
23396 }
23397 /* A fallback in case there is no PM info available */
23398 tboot_shutdown(TB_SHUTDOWN_HALT);
23399+ unreachable();
23400 }
23401
23402-struct machine_ops machine_ops = {
23403+struct machine_ops machine_ops __read_only = {
23404 .power_off = native_machine_power_off,
23405 .shutdown = native_machine_shutdown,
23406 .emergency_restart = native_machine_emergency_restart,
23407diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
23408index 7a6f3b3..bed145d7 100644
23409--- a/arch/x86/kernel/relocate_kernel_64.S
23410+++ b/arch/x86/kernel/relocate_kernel_64.S
23411@@ -11,6 +11,7 @@
23412 #include <asm/kexec.h>
23413 #include <asm/processor-flags.h>
23414 #include <asm/pgtable_types.h>
23415+#include <asm/alternative-asm.h>
23416
23417 /*
23418 * Must be relocatable PIC code callable as a C function
23419@@ -160,13 +161,14 @@ identity_mapped:
23420 xorq %rbp, %rbp
23421 xorq %r8, %r8
23422 xorq %r9, %r9
23423- xorq %r10, %r9
23424+ xorq %r10, %r10
23425 xorq %r11, %r11
23426 xorq %r12, %r12
23427 xorq %r13, %r13
23428 xorq %r14, %r14
23429 xorq %r15, %r15
23430
23431+ pax_force_retaddr 0, 1
23432 ret
23433
23434 1:
23435diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
23436index 8b24289..d37b58b 100644
23437--- a/arch/x86/kernel/setup.c
23438+++ b/arch/x86/kernel/setup.c
23439@@ -437,7 +437,7 @@ static void __init parse_setup_data(void)
23440
23441 switch (data->type) {
23442 case SETUP_E820_EXT:
23443- parse_e820_ext(data);
23444+ parse_e820_ext((struct setup_data __force_kernel *)data);
23445 break;
23446 case SETUP_DTB:
23447 add_dtb(pa_data);
23448@@ -706,7 +706,7 @@ static void __init trim_bios_range(void)
23449 * area (640->1Mb) as ram even though it is not.
23450 * take them out.
23451 */
23452- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
23453+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
23454
23455 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
23456 }
23457@@ -830,14 +830,14 @@ void __init setup_arch(char **cmdline_p)
23458
23459 if (!boot_params.hdr.root_flags)
23460 root_mountflags &= ~MS_RDONLY;
23461- init_mm.start_code = (unsigned long) _text;
23462- init_mm.end_code = (unsigned long) _etext;
23463+ init_mm.start_code = ktla_ktva((unsigned long) _text);
23464+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
23465 init_mm.end_data = (unsigned long) _edata;
23466 init_mm.brk = _brk_end;
23467
23468- code_resource.start = virt_to_phys(_text);
23469- code_resource.end = virt_to_phys(_etext)-1;
23470- data_resource.start = virt_to_phys(_etext);
23471+ code_resource.start = virt_to_phys(ktla_ktva(_text));
23472+ code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
23473+ data_resource.start = virt_to_phys(_sdata);
23474 data_resource.end = virt_to_phys(_edata)-1;
23475 bss_resource.start = virt_to_phys(&__bss_start);
23476 bss_resource.end = virt_to_phys(&__bss_stop)-1;
23477diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
23478index 5cdff03..80fa283 100644
23479--- a/arch/x86/kernel/setup_percpu.c
23480+++ b/arch/x86/kernel/setup_percpu.c
23481@@ -21,19 +21,17 @@
23482 #include <asm/cpu.h>
23483 #include <asm/stackprotector.h>
23484
23485-DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
23486+#ifdef CONFIG_SMP
23487+DEFINE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
23488 EXPORT_PER_CPU_SYMBOL(cpu_number);
23489+#endif
23490
23491-#ifdef CONFIG_X86_64
23492 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
23493-#else
23494-#define BOOT_PERCPU_OFFSET 0
23495-#endif
23496
23497 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
23498 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
23499
23500-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
23501+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
23502 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
23503 };
23504 EXPORT_SYMBOL(__per_cpu_offset);
23505@@ -66,7 +64,7 @@ static bool __init pcpu_need_numa(void)
23506 {
23507 #ifdef CONFIG_NEED_MULTIPLE_NODES
23508 pg_data_t *last = NULL;
23509- unsigned int cpu;
23510+ int cpu;
23511
23512 for_each_possible_cpu(cpu) {
23513 int node = early_cpu_to_node(cpu);
23514@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
23515 {
23516 #ifdef CONFIG_X86_32
23517 struct desc_struct gdt;
23518+ unsigned long base = per_cpu_offset(cpu);
23519
23520- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
23521- 0x2 | DESCTYPE_S, 0x8);
23522- gdt.s = 1;
23523+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
23524+ 0x83 | DESCTYPE_S, 0xC);
23525 write_gdt_entry(get_cpu_gdt_table(cpu),
23526 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
23527 #endif
23528@@ -219,6 +217,11 @@ void __init setup_per_cpu_areas(void)
23529 /* alrighty, percpu areas up and running */
23530 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
23531 for_each_possible_cpu(cpu) {
23532+#ifdef CONFIG_CC_STACKPROTECTOR
23533+#ifdef CONFIG_X86_32
23534+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
23535+#endif
23536+#endif
23537 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
23538 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
23539 per_cpu(cpu_number, cpu) = cpu;
23540@@ -259,6 +262,12 @@ void __init setup_per_cpu_areas(void)
23541 */
23542 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
23543 #endif
23544+#ifdef CONFIG_CC_STACKPROTECTOR
23545+#ifdef CONFIG_X86_32
23546+ if (!cpu)
23547+ per_cpu(stack_canary.canary, cpu) = canary;
23548+#endif
23549+#endif
23550 /*
23551 * Up to this point, the boot CPU has been using .init.data
23552 * area. Reload any changed state for the boot CPU.
23553diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
23554index d6bf1f3..3ffce5a 100644
23555--- a/arch/x86/kernel/signal.c
23556+++ b/arch/x86/kernel/signal.c
23557@@ -196,7 +196,7 @@ static unsigned long align_sigframe(unsigned long sp)
23558 * Align the stack pointer according to the i386 ABI,
23559 * i.e. so that on function entry ((sp + 4) & 15) == 0.
23560 */
23561- sp = ((sp + 4) & -16ul) - 4;
23562+ sp = ((sp - 12) & -16ul) - 4;
23563 #else /* !CONFIG_X86_32 */
23564 sp = round_down(sp, 16) - 8;
23565 #endif
23566@@ -304,9 +304,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
23567 }
23568
23569 if (current->mm->context.vdso)
23570- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
23571+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
23572 else
23573- restorer = &frame->retcode;
23574+ restorer = (void __user *)&frame->retcode;
23575 if (ka->sa.sa_flags & SA_RESTORER)
23576 restorer = ka->sa.sa_restorer;
23577
23578@@ -320,7 +320,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
23579 * reasons and because gdb uses it as a signature to notice
23580 * signal handler stack frames.
23581 */
23582- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
23583+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
23584
23585 if (err)
23586 return -EFAULT;
23587@@ -367,7 +367,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
23588 err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
23589
23590 /* Set up to return from userspace. */
23591- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
23592+ if (current->mm->context.vdso)
23593+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
23594+ else
23595+ restorer = (void __user *)&frame->retcode;
23596 if (ka->sa.sa_flags & SA_RESTORER)
23597 restorer = ka->sa.sa_restorer;
23598 put_user_ex(restorer, &frame->pretcode);
23599@@ -379,7 +382,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
23600 * reasons and because gdb uses it as a signature to notice
23601 * signal handler stack frames.
23602 */
23603- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
23604+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
23605 } put_user_catch(err);
23606
23607 err |= copy_siginfo_to_user(&frame->info, info);
23608diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
23609index 48d2b7d..90d328a 100644
23610--- a/arch/x86/kernel/smp.c
23611+++ b/arch/x86/kernel/smp.c
23612@@ -285,7 +285,7 @@ static int __init nonmi_ipi_setup(char *str)
23613
23614 __setup("nonmi_ipi", nonmi_ipi_setup);
23615
23616-struct smp_ops smp_ops = {
23617+struct smp_ops smp_ops __read_only = {
23618 .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
23619 .smp_prepare_cpus = native_smp_prepare_cpus,
23620 .smp_cpus_done = native_smp_cpus_done,
23621diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
23622index ed0fe38..87fc692 100644
23623--- a/arch/x86/kernel/smpboot.c
23624+++ b/arch/x86/kernel/smpboot.c
23625@@ -748,6 +748,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
23626 idle->thread.sp = (unsigned long) (((struct pt_regs *)
23627 (THREAD_SIZE + task_stack_page(idle))) - 1);
23628 per_cpu(current_task, cpu) = idle;
23629+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
23630
23631 #ifdef CONFIG_X86_32
23632 /* Stack for startup_32 can be just as for start_secondary onwards */
23633@@ -755,11 +756,13 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
23634 #else
23635 clear_tsk_thread_flag(idle, TIF_FORK);
23636 initial_gs = per_cpu_offset(cpu);
23637- per_cpu(kernel_stack, cpu) =
23638- (unsigned long)task_stack_page(idle) -
23639- KERNEL_STACK_OFFSET + THREAD_SIZE;
23640+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
23641 #endif
23642+
23643+ pax_open_kernel();
23644 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
23645+ pax_close_kernel();
23646+
23647 initial_code = (unsigned long)start_secondary;
23648 stack_start = idle->thread.sp;
23649
23650@@ -908,6 +911,15 @@ int __cpuinit native_cpu_up(unsigned int cpu, struct task_struct *tidle)
23651 /* the FPU context is blank, nobody can own it */
23652 __cpu_disable_lazy_restore(cpu);
23653
23654+#ifdef CONFIG_PAX_PER_CPU_PGD
23655+ clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
23656+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
23657+ KERNEL_PGD_PTRS);
23658+#endif
23659+
23660+ /* the FPU context is blank, nobody can own it */
23661+ __cpu_disable_lazy_restore(cpu);
23662+
23663 err = do_boot_cpu(apicid, cpu, tidle);
23664 if (err) {
23665 pr_debug("do_boot_cpu failed %d\n", err);
23666diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
23667index 9b4d51d..5d28b58 100644
23668--- a/arch/x86/kernel/step.c
23669+++ b/arch/x86/kernel/step.c
23670@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
23671 struct desc_struct *desc;
23672 unsigned long base;
23673
23674- seg &= ~7UL;
23675+ seg >>= 3;
23676
23677 mutex_lock(&child->mm->context.lock);
23678- if (unlikely((seg >> 3) >= child->mm->context.size))
23679+ if (unlikely(seg >= child->mm->context.size))
23680 addr = -1L; /* bogus selector, access would fault */
23681 else {
23682 desc = child->mm->context.ldt + seg;
23683@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
23684 addr += base;
23685 }
23686 mutex_unlock(&child->mm->context.lock);
23687- }
23688+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
23689+ addr = ktla_ktva(addr);
23690
23691 return addr;
23692 }
23693@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
23694 unsigned char opcode[15];
23695 unsigned long addr = convert_ip_to_linear(child, regs);
23696
23697+ if (addr == -EINVAL)
23698+ return 0;
23699+
23700 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
23701 for (i = 0; i < copied; i++) {
23702 switch (opcode[i]) {
23703diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
23704new file mode 100644
23705index 0000000..207bec6
23706--- /dev/null
23707+++ b/arch/x86/kernel/sys_i386_32.c
23708@@ -0,0 +1,250 @@
23709+/*
23710+ * This file contains various random system calls that
23711+ * have a non-standard calling sequence on the Linux/i386
23712+ * platform.
23713+ */
23714+
23715+#include <linux/errno.h>
23716+#include <linux/sched.h>
23717+#include <linux/mm.h>
23718+#include <linux/fs.h>
23719+#include <linux/smp.h>
23720+#include <linux/sem.h>
23721+#include <linux/msg.h>
23722+#include <linux/shm.h>
23723+#include <linux/stat.h>
23724+#include <linux/syscalls.h>
23725+#include <linux/mman.h>
23726+#include <linux/file.h>
23727+#include <linux/utsname.h>
23728+#include <linux/ipc.h>
23729+
23730+#include <linux/uaccess.h>
23731+#include <linux/unistd.h>
23732+
23733+#include <asm/syscalls.h>
23734+
23735+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
23736+{
23737+ unsigned long pax_task_size = TASK_SIZE;
23738+
23739+#ifdef CONFIG_PAX_SEGMEXEC
23740+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
23741+ pax_task_size = SEGMEXEC_TASK_SIZE;
23742+#endif
23743+
23744+ if (flags & MAP_FIXED)
23745+ if (len > pax_task_size || addr > pax_task_size - len)
23746+ return -EINVAL;
23747+
23748+ return 0;
23749+}
23750+
23751+unsigned long
23752+arch_get_unmapped_area(struct file *filp, unsigned long addr,
23753+ unsigned long len, unsigned long pgoff, unsigned long flags)
23754+{
23755+ struct mm_struct *mm = current->mm;
23756+ struct vm_area_struct *vma;
23757+ unsigned long start_addr, pax_task_size = TASK_SIZE;
23758+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
23759+
23760+#ifdef CONFIG_PAX_SEGMEXEC
23761+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
23762+ pax_task_size = SEGMEXEC_TASK_SIZE;
23763+#endif
23764+
23765+ pax_task_size -= PAGE_SIZE;
23766+
23767+ if (len > pax_task_size)
23768+ return -ENOMEM;
23769+
23770+ if (flags & MAP_FIXED)
23771+ return addr;
23772+
23773+#ifdef CONFIG_PAX_RANDMMAP
23774+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
23775+#endif
23776+
23777+ if (addr) {
23778+ addr = PAGE_ALIGN(addr);
23779+ if (pax_task_size - len >= addr) {
23780+ vma = find_vma(mm, addr);
23781+ if (check_heap_stack_gap(vma, addr, len, offset))
23782+ return addr;
23783+ }
23784+ }
23785+ if (len > mm->cached_hole_size) {
23786+ start_addr = addr = mm->free_area_cache;
23787+ } else {
23788+ start_addr = addr = mm->mmap_base;
23789+ mm->cached_hole_size = 0;
23790+ }
23791+
23792+#ifdef CONFIG_PAX_PAGEEXEC
23793+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
23794+ start_addr = 0x00110000UL;
23795+
23796+#ifdef CONFIG_PAX_RANDMMAP
23797+ if (mm->pax_flags & MF_PAX_RANDMMAP)
23798+ start_addr += mm->delta_mmap & 0x03FFF000UL;
23799+#endif
23800+
23801+ if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
23802+ start_addr = addr = mm->mmap_base;
23803+ else
23804+ addr = start_addr;
23805+ }
23806+#endif
23807+
23808+full_search:
23809+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
23810+ /* At this point: (!vma || addr < vma->vm_end). */
23811+ if (pax_task_size - len < addr) {
23812+ /*
23813+ * Start a new search - just in case we missed
23814+ * some holes.
23815+ */
23816+ if (start_addr != mm->mmap_base) {
23817+ start_addr = addr = mm->mmap_base;
23818+ mm->cached_hole_size = 0;
23819+ goto full_search;
23820+ }
23821+ return -ENOMEM;
23822+ }
23823+ if (check_heap_stack_gap(vma, addr, len, offset))
23824+ break;
23825+ if (addr + mm->cached_hole_size < vma->vm_start)
23826+ mm->cached_hole_size = vma->vm_start - addr;
23827+ addr = vma->vm_end;
23828+ if (mm->start_brk <= addr && addr < mm->mmap_base) {
23829+ start_addr = addr = mm->mmap_base;
23830+ mm->cached_hole_size = 0;
23831+ goto full_search;
23832+ }
23833+ }
23834+
23835+ /*
23836+ * Remember the place where we stopped the search:
23837+ */
23838+ mm->free_area_cache = addr + len;
23839+ return addr;
23840+}
23841+
23842+unsigned long
23843+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
23844+ const unsigned long len, const unsigned long pgoff,
23845+ const unsigned long flags)
23846+{
23847+ struct vm_area_struct *vma;
23848+ struct mm_struct *mm = current->mm;
23849+ unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
23850+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
23851+
23852+#ifdef CONFIG_PAX_SEGMEXEC
23853+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
23854+ pax_task_size = SEGMEXEC_TASK_SIZE;
23855+#endif
23856+
23857+ pax_task_size -= PAGE_SIZE;
23858+
23859+ /* requested length too big for entire address space */
23860+ if (len > pax_task_size)
23861+ return -ENOMEM;
23862+
23863+ if (flags & MAP_FIXED)
23864+ return addr;
23865+
23866+#ifdef CONFIG_PAX_PAGEEXEC
23867+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
23868+ goto bottomup;
23869+#endif
23870+
23871+#ifdef CONFIG_PAX_RANDMMAP
23872+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
23873+#endif
23874+
23875+ /* requesting a specific address */
23876+ if (addr) {
23877+ addr = PAGE_ALIGN(addr);
23878+ if (pax_task_size - len >= addr) {
23879+ vma = find_vma(mm, addr);
23880+ if (check_heap_stack_gap(vma, addr, len, offset))
23881+ return addr;
23882+ }
23883+ }
23884+
23885+ /* check if free_area_cache is useful for us */
23886+ if (len <= mm->cached_hole_size) {
23887+ mm->cached_hole_size = 0;
23888+ mm->free_area_cache = mm->mmap_base;
23889+ }
23890+
23891+ /* either no address requested or can't fit in requested address hole */
23892+ addr = mm->free_area_cache;
23893+
23894+ /* make sure it can fit in the remaining address space */
23895+ if (addr > len) {
23896+ vma = find_vma(mm, addr-len);
23897+ if (check_heap_stack_gap(vma, addr - len, len, offset))
23898+ /* remember the address as a hint for next time */
23899+ return (mm->free_area_cache = addr-len);
23900+ }
23901+
23902+ if (mm->mmap_base < len)
23903+ goto bottomup;
23904+
23905+ addr = mm->mmap_base-len;
23906+
23907+ do {
23908+ /*
23909+ * Lookup failure means no vma is above this address,
23910+ * else if new region fits below vma->vm_start,
23911+ * return with success:
23912+ */
23913+ vma = find_vma(mm, addr);
23914+ if (check_heap_stack_gap(vma, addr, len, offset))
23915+ /* remember the address as a hint for next time */
23916+ return (mm->free_area_cache = addr);
23917+
23918+ /* remember the largest hole we saw so far */
23919+ if (addr + mm->cached_hole_size < vma->vm_start)
23920+ mm->cached_hole_size = vma->vm_start - addr;
23921+
23922+ /* try just below the current vma->vm_start */
23923+ addr = skip_heap_stack_gap(vma, len, offset);
23924+ } while (!IS_ERR_VALUE(addr));
23925+
23926+bottomup:
23927+ /*
23928+ * A failed mmap() very likely causes application failure,
23929+ * so fall back to the bottom-up function here. This scenario
23930+ * can happen with large stack limits and large mmap()
23931+ * allocations.
23932+ */
23933+
23934+#ifdef CONFIG_PAX_SEGMEXEC
23935+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
23936+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
23937+ else
23938+#endif
23939+
23940+ mm->mmap_base = TASK_UNMAPPED_BASE;
23941+
23942+#ifdef CONFIG_PAX_RANDMMAP
23943+ if (mm->pax_flags & MF_PAX_RANDMMAP)
23944+ mm->mmap_base += mm->delta_mmap;
23945+#endif
23946+
23947+ mm->free_area_cache = mm->mmap_base;
23948+ mm->cached_hole_size = ~0UL;
23949+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
23950+ /*
23951+ * Restore the topdown base:
23952+ */
23953+ mm->mmap_base = base;
23954+ mm->free_area_cache = base;
23955+ mm->cached_hole_size = ~0UL;
23956+
23957+ return addr;
23958+}
23959diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
23960index 97ef74b..57a1882 100644
23961--- a/arch/x86/kernel/sys_x86_64.c
23962+++ b/arch/x86/kernel/sys_x86_64.c
23963@@ -81,8 +81,8 @@ out:
23964 return error;
23965 }
23966
23967-static void find_start_end(unsigned long flags, unsigned long *begin,
23968- unsigned long *end)
23969+static void find_start_end(struct mm_struct *mm, unsigned long flags,
23970+ unsigned long *begin, unsigned long *end)
23971 {
23972 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
23973 unsigned long new_begin;
23974@@ -101,7 +101,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
23975 *begin = new_begin;
23976 }
23977 } else {
23978- *begin = TASK_UNMAPPED_BASE;
23979+ *begin = mm->mmap_base;
23980 *end = TASK_SIZE;
23981 }
23982 }
23983@@ -114,20 +114,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
23984 struct vm_area_struct *vma;
23985 struct vm_unmapped_area_info info;
23986 unsigned long begin, end;
23987+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
23988
23989 if (flags & MAP_FIXED)
23990 return addr;
23991
23992- find_start_end(flags, &begin, &end);
23993+ find_start_end(mm, flags, &begin, &end);
23994
23995 if (len > end)
23996 return -ENOMEM;
23997
23998+#ifdef CONFIG_PAX_RANDMMAP
23999+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
24000+#endif
24001+
24002 if (addr) {
24003 addr = PAGE_ALIGN(addr);
24004 vma = find_vma(mm, addr);
24005- if (end - len >= addr &&
24006- (!vma || addr + len <= vma->vm_start))
24007+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
24008 return addr;
24009 }
24010
24011@@ -161,6 +165,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
24012 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
24013 goto bottomup;
24014
24015+#ifdef CONFIG_PAX_RANDMMAP
24016+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
24017+#endif
24018+
24019 /* requesting a specific address */
24020 if (addr) {
24021 addr = PAGE_ALIGN(addr);
24022diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
24023index f84fe00..f41d9f1 100644
24024--- a/arch/x86/kernel/tboot.c
24025+++ b/arch/x86/kernel/tboot.c
24026@@ -220,7 +220,7 @@ static int tboot_setup_sleep(void)
24027
24028 void tboot_shutdown(u32 shutdown_type)
24029 {
24030- void (*shutdown)(void);
24031+ void (* __noreturn shutdown)(void);
24032
24033 if (!tboot_enabled())
24034 return;
24035@@ -242,7 +242,7 @@ void tboot_shutdown(u32 shutdown_type)
24036
24037 switch_to_tboot_pt();
24038
24039- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
24040+ shutdown = (void *)tboot->shutdown_entry;
24041 shutdown();
24042
24043 /* should not reach here */
24044@@ -300,7 +300,7 @@ static int tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
24045 return 0;
24046 }
24047
24048-static atomic_t ap_wfs_count;
24049+static atomic_unchecked_t ap_wfs_count;
24050
24051 static int tboot_wait_for_aps(int num_aps)
24052 {
24053@@ -324,16 +324,16 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
24054 {
24055 switch (action) {
24056 case CPU_DYING:
24057- atomic_inc(&ap_wfs_count);
24058+ atomic_inc_unchecked(&ap_wfs_count);
24059 if (num_online_cpus() == 1)
24060- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
24061+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
24062 return NOTIFY_BAD;
24063 break;
24064 }
24065 return NOTIFY_OK;
24066 }
24067
24068-static struct notifier_block tboot_cpu_notifier __cpuinitdata =
24069+static struct notifier_block tboot_cpu_notifier =
24070 {
24071 .notifier_call = tboot_cpu_callback,
24072 };
24073@@ -345,7 +345,7 @@ static __init int tboot_late_init(void)
24074
24075 tboot_create_trampoline();
24076
24077- atomic_set(&ap_wfs_count, 0);
24078+ atomic_set_unchecked(&ap_wfs_count, 0);
24079 register_hotcpu_notifier(&tboot_cpu_notifier);
24080
24081 acpi_os_set_prepare_sleep(&tboot_sleep);
24082diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
24083index 24d3c91..d06b473 100644
24084--- a/arch/x86/kernel/time.c
24085+++ b/arch/x86/kernel/time.c
24086@@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs *regs)
24087 {
24088 unsigned long pc = instruction_pointer(regs);
24089
24090- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
24091+ if (!user_mode(regs) && in_lock_functions(pc)) {
24092 #ifdef CONFIG_FRAME_POINTER
24093- return *(unsigned long *)(regs->bp + sizeof(long));
24094+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
24095 #else
24096 unsigned long *sp =
24097 (unsigned long *)kernel_stack_pointer(regs);
24098@@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
24099 * or above a saved flags. Eflags has bits 22-31 zero,
24100 * kernel addresses don't.
24101 */
24102+
24103+#ifdef CONFIG_PAX_KERNEXEC
24104+ return ktla_ktva(sp[0]);
24105+#else
24106 if (sp[0] >> 22)
24107 return sp[0];
24108 if (sp[1] >> 22)
24109 return sp[1];
24110 #endif
24111+
24112+#endif
24113 }
24114 return pc;
24115 }
24116diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
24117index 9d9d2f9..cad418a 100644
24118--- a/arch/x86/kernel/tls.c
24119+++ b/arch/x86/kernel/tls.c
24120@@ -84,6 +84,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
24121 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
24122 return -EINVAL;
24123
24124+#ifdef CONFIG_PAX_SEGMEXEC
24125+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
24126+ return -EINVAL;
24127+#endif
24128+
24129 set_tls_desc(p, idx, &info, 1);
24130
24131 return 0;
24132@@ -204,7 +209,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
24133
24134 if (kbuf)
24135 info = kbuf;
24136- else if (__copy_from_user(infobuf, ubuf, count))
24137+ else if (count > sizeof infobuf || __copy_from_user(infobuf, ubuf, count))
24138 return -EFAULT;
24139 else
24140 info = infobuf;
24141diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
24142index ecffca1..95c4d13 100644
24143--- a/arch/x86/kernel/traps.c
24144+++ b/arch/x86/kernel/traps.c
24145@@ -68,12 +68,6 @@
24146 #include <asm/setup.h>
24147
24148 asmlinkage int system_call(void);
24149-
24150-/*
24151- * The IDT has to be page-aligned to simplify the Pentium
24152- * F0 0F bug workaround.
24153- */
24154-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
24155 #endif
24156
24157 DECLARE_BITMAP(used_vectors, NR_VECTORS);
24158@@ -106,11 +100,11 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
24159 }
24160
24161 static int __kprobes
24162-do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
24163+do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str,
24164 struct pt_regs *regs, long error_code)
24165 {
24166 #ifdef CONFIG_X86_32
24167- if (regs->flags & X86_VM_MASK) {
24168+ if (v8086_mode(regs)) {
24169 /*
24170 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
24171 * On nmi (interrupt 2), do_trap should not be called.
24172@@ -123,12 +117,24 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
24173 return -1;
24174 }
24175 #endif
24176- if (!user_mode(regs)) {
24177+ if (!user_mode_novm(regs)) {
24178 if (!fixup_exception(regs)) {
24179 tsk->thread.error_code = error_code;
24180 tsk->thread.trap_nr = trapnr;
24181+
24182+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
24183+ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
24184+ str = "PAX: suspicious stack segment fault";
24185+#endif
24186+
24187 die(str, regs, error_code);
24188 }
24189+
24190+#ifdef CONFIG_PAX_REFCOUNT
24191+ if (trapnr == 4)
24192+ pax_report_refcount_overflow(regs);
24193+#endif
24194+
24195 return 0;
24196 }
24197
24198@@ -136,7 +142,7 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
24199 }
24200
24201 static void __kprobes
24202-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
24203+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
24204 long error_code, siginfo_t *info)
24205 {
24206 struct task_struct *tsk = current;
24207@@ -160,7 +166,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
24208 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
24209 printk_ratelimit()) {
24210 pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
24211- tsk->comm, tsk->pid, str,
24212+ tsk->comm, task_pid_nr(tsk), str,
24213 regs->ip, regs->sp, error_code);
24214 print_vma_addr(" in ", regs->ip);
24215 pr_cont("\n");
24216@@ -266,7 +272,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
24217 conditional_sti(regs);
24218
24219 #ifdef CONFIG_X86_32
24220- if (regs->flags & X86_VM_MASK) {
24221+ if (v8086_mode(regs)) {
24222 local_irq_enable();
24223 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
24224 goto exit;
24225@@ -274,18 +280,42 @@ do_general_protection(struct pt_regs *regs, long error_code)
24226 #endif
24227
24228 tsk = current;
24229- if (!user_mode(regs)) {
24230+ if (!user_mode_novm(regs)) {
24231 if (fixup_exception(regs))
24232 goto exit;
24233
24234 tsk->thread.error_code = error_code;
24235 tsk->thread.trap_nr = X86_TRAP_GP;
24236 if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
24237- X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP)
24238+ X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP) {
24239+
24240+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
24241+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
24242+ die("PAX: suspicious general protection fault", regs, error_code);
24243+ else
24244+#endif
24245+
24246 die("general protection fault", regs, error_code);
24247+ }
24248 goto exit;
24249 }
24250
24251+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
24252+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
24253+ struct mm_struct *mm = tsk->mm;
24254+ unsigned long limit;
24255+
24256+ down_write(&mm->mmap_sem);
24257+ limit = mm->context.user_cs_limit;
24258+ if (limit < TASK_SIZE) {
24259+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
24260+ up_write(&mm->mmap_sem);
24261+ return;
24262+ }
24263+ up_write(&mm->mmap_sem);
24264+ }
24265+#endif
24266+
24267 tsk->thread.error_code = error_code;
24268 tsk->thread.trap_nr = X86_TRAP_GP;
24269
24270@@ -440,7 +470,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
24271 /* It's safe to allow irq's after DR6 has been saved */
24272 preempt_conditional_sti(regs);
24273
24274- if (regs->flags & X86_VM_MASK) {
24275+ if (v8086_mode(regs)) {
24276 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
24277 X86_TRAP_DB);
24278 preempt_conditional_cli(regs);
24279@@ -455,7 +485,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
24280 * We already checked v86 mode above, so we can check for kernel mode
24281 * by just checking the CPL of CS.
24282 */
24283- if ((dr6 & DR_STEP) && !user_mode(regs)) {
24284+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
24285 tsk->thread.debugreg6 &= ~DR_STEP;
24286 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
24287 regs->flags &= ~X86_EFLAGS_TF;
24288@@ -487,7 +517,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
24289 return;
24290 conditional_sti(regs);
24291
24292- if (!user_mode_vm(regs))
24293+ if (!user_mode(regs))
24294 {
24295 if (!fixup_exception(regs)) {
24296 task->thread.error_code = error_code;
24297diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
24298index c71025b..b117501 100644
24299--- a/arch/x86/kernel/uprobes.c
24300+++ b/arch/x86/kernel/uprobes.c
24301@@ -629,7 +629,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
24302 int ret = NOTIFY_DONE;
24303
24304 /* We are only interested in userspace traps */
24305- if (regs && !user_mode_vm(regs))
24306+ if (regs && !user_mode(regs))
24307 return NOTIFY_DONE;
24308
24309 switch (val) {
24310diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
24311index b9242ba..50c5edd 100644
24312--- a/arch/x86/kernel/verify_cpu.S
24313+++ b/arch/x86/kernel/verify_cpu.S
24314@@ -20,6 +20,7 @@
24315 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
24316 * arch/x86/kernel/trampoline_64.S: secondary processor verification
24317 * arch/x86/kernel/head_32.S: processor startup
24318+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
24319 *
24320 * verify_cpu, returns the status of longmode and SSE in register %eax.
24321 * 0: Success 1: Failure
24322diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
24323index 1dfe69c..a3df6f6 100644
24324--- a/arch/x86/kernel/vm86_32.c
24325+++ b/arch/x86/kernel/vm86_32.c
24326@@ -43,6 +43,7 @@
24327 #include <linux/ptrace.h>
24328 #include <linux/audit.h>
24329 #include <linux/stddef.h>
24330+#include <linux/grsecurity.h>
24331
24332 #include <asm/uaccess.h>
24333 #include <asm/io.h>
24334@@ -150,7 +151,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
24335 do_exit(SIGSEGV);
24336 }
24337
24338- tss = &per_cpu(init_tss, get_cpu());
24339+ tss = init_tss + get_cpu();
24340 current->thread.sp0 = current->thread.saved_sp0;
24341 current->thread.sysenter_cs = __KERNEL_CS;
24342 load_sp0(tss, &current->thread);
24343@@ -212,6 +213,13 @@ int sys_vm86old(struct vm86_struct __user *v86, struct pt_regs *regs)
24344 struct task_struct *tsk;
24345 int tmp, ret = -EPERM;
24346
24347+#ifdef CONFIG_GRKERNSEC_VM86
24348+ if (!capable(CAP_SYS_RAWIO)) {
24349+ gr_handle_vm86();
24350+ goto out;
24351+ }
24352+#endif
24353+
24354 tsk = current;
24355 if (tsk->thread.saved_sp0)
24356 goto out;
24357@@ -242,6 +250,14 @@ int sys_vm86(unsigned long cmd, unsigned long arg, struct pt_regs *regs)
24358 int tmp, ret;
24359 struct vm86plus_struct __user *v86;
24360
24361+#ifdef CONFIG_GRKERNSEC_VM86
24362+ if (!capable(CAP_SYS_RAWIO)) {
24363+ gr_handle_vm86();
24364+ ret = -EPERM;
24365+ goto out;
24366+ }
24367+#endif
24368+
24369 tsk = current;
24370 switch (cmd) {
24371 case VM86_REQUEST_IRQ:
24372@@ -328,7 +344,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
24373 tsk->thread.saved_fs = info->regs32->fs;
24374 tsk->thread.saved_gs = get_user_gs(info->regs32);
24375
24376- tss = &per_cpu(init_tss, get_cpu());
24377+ tss = init_tss + get_cpu();
24378 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
24379 if (cpu_has_sep)
24380 tsk->thread.sysenter_cs = 0;
24381@@ -535,7 +551,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
24382 goto cannot_handle;
24383 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
24384 goto cannot_handle;
24385- intr_ptr = (unsigned long __user *) (i << 2);
24386+ intr_ptr = (__force unsigned long __user *) (i << 2);
24387 if (get_user(segoffs, intr_ptr))
24388 goto cannot_handle;
24389 if ((segoffs >> 16) == BIOSSEG)
24390diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
24391index 22a1530..8fbaaad 100644
24392--- a/arch/x86/kernel/vmlinux.lds.S
24393+++ b/arch/x86/kernel/vmlinux.lds.S
24394@@ -26,6 +26,13 @@
24395 #include <asm/page_types.h>
24396 #include <asm/cache.h>
24397 #include <asm/boot.h>
24398+#include <asm/segment.h>
24399+
24400+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
24401+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
24402+#else
24403+#define __KERNEL_TEXT_OFFSET 0
24404+#endif
24405
24406 #undef i386 /* in case the preprocessor is a 32bit one */
24407
24408@@ -69,30 +76,43 @@ jiffies_64 = jiffies;
24409
24410 PHDRS {
24411 text PT_LOAD FLAGS(5); /* R_E */
24412+#ifdef CONFIG_X86_32
24413+ module PT_LOAD FLAGS(5); /* R_E */
24414+#endif
24415+#ifdef CONFIG_XEN
24416+ rodata PT_LOAD FLAGS(5); /* R_E */
24417+#else
24418+ rodata PT_LOAD FLAGS(4); /* R__ */
24419+#endif
24420 data PT_LOAD FLAGS(6); /* RW_ */
24421-#ifdef CONFIG_X86_64
24422+ init.begin PT_LOAD FLAGS(6); /* RW_ */
24423 #ifdef CONFIG_SMP
24424 percpu PT_LOAD FLAGS(6); /* RW_ */
24425 #endif
24426+ text.init PT_LOAD FLAGS(5); /* R_E */
24427+ text.exit PT_LOAD FLAGS(5); /* R_E */
24428 init PT_LOAD FLAGS(7); /* RWE */
24429-#endif
24430 note PT_NOTE FLAGS(0); /* ___ */
24431 }
24432
24433 SECTIONS
24434 {
24435 #ifdef CONFIG_X86_32
24436- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
24437- phys_startup_32 = startup_32 - LOAD_OFFSET;
24438+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
24439 #else
24440- . = __START_KERNEL;
24441- phys_startup_64 = startup_64 - LOAD_OFFSET;
24442+ . = __START_KERNEL;
24443 #endif
24444
24445 /* Text and read-only data */
24446- .text : AT(ADDR(.text) - LOAD_OFFSET) {
24447- _text = .;
24448+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
24449 /* bootstrapping code */
24450+#ifdef CONFIG_X86_32
24451+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
24452+#else
24453+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
24454+#endif
24455+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
24456+ _text = .;
24457 HEAD_TEXT
24458 #ifdef CONFIG_X86_32
24459 . = ALIGN(PAGE_SIZE);
24460@@ -108,13 +128,48 @@ SECTIONS
24461 IRQENTRY_TEXT
24462 *(.fixup)
24463 *(.gnu.warning)
24464- /* End of text section */
24465- _etext = .;
24466 } :text = 0x9090
24467
24468- NOTES :text :note
24469+ . += __KERNEL_TEXT_OFFSET;
24470
24471- EXCEPTION_TABLE(16) :text = 0x9090
24472+#ifdef CONFIG_X86_32
24473+ . = ALIGN(PAGE_SIZE);
24474+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
24475+
24476+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
24477+ MODULES_EXEC_VADDR = .;
24478+ BYTE(0)
24479+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
24480+ . = ALIGN(HPAGE_SIZE) - 1;
24481+ MODULES_EXEC_END = .;
24482+#endif
24483+
24484+ } :module
24485+#endif
24486+
24487+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
24488+ /* End of text section */
24489+ BYTE(0)
24490+ _etext = . - __KERNEL_TEXT_OFFSET;
24491+ }
24492+
24493+#ifdef CONFIG_X86_32
24494+ . = ALIGN(PAGE_SIZE);
24495+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
24496+ *(.idt)
24497+ . = ALIGN(PAGE_SIZE);
24498+ *(.empty_zero_page)
24499+ *(.initial_pg_fixmap)
24500+ *(.initial_pg_pmd)
24501+ *(.initial_page_table)
24502+ *(.swapper_pg_dir)
24503+ } :rodata
24504+#endif
24505+
24506+ . = ALIGN(PAGE_SIZE);
24507+ NOTES :rodata :note
24508+
24509+ EXCEPTION_TABLE(16) :rodata
24510
24511 #if defined(CONFIG_DEBUG_RODATA)
24512 /* .text should occupy whole number of pages */
24513@@ -126,16 +181,20 @@ SECTIONS
24514
24515 /* Data */
24516 .data : AT(ADDR(.data) - LOAD_OFFSET) {
24517+
24518+#ifdef CONFIG_PAX_KERNEXEC
24519+ . = ALIGN(HPAGE_SIZE);
24520+#else
24521+ . = ALIGN(PAGE_SIZE);
24522+#endif
24523+
24524 /* Start of data section */
24525 _sdata = .;
24526
24527 /* init_task */
24528 INIT_TASK_DATA(THREAD_SIZE)
24529
24530-#ifdef CONFIG_X86_32
24531- /* 32 bit has nosave before _edata */
24532 NOSAVE_DATA
24533-#endif
24534
24535 PAGE_ALIGNED_DATA(PAGE_SIZE)
24536
24537@@ -176,12 +235,19 @@ SECTIONS
24538 #endif /* CONFIG_X86_64 */
24539
24540 /* Init code and data - will be freed after init */
24541- . = ALIGN(PAGE_SIZE);
24542 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
24543+ BYTE(0)
24544+
24545+#ifdef CONFIG_PAX_KERNEXEC
24546+ . = ALIGN(HPAGE_SIZE);
24547+#else
24548+ . = ALIGN(PAGE_SIZE);
24549+#endif
24550+
24551 __init_begin = .; /* paired with __init_end */
24552- }
24553+ } :init.begin
24554
24555-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
24556+#ifdef CONFIG_SMP
24557 /*
24558 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
24559 * output PHDR, so the next output section - .init.text - should
24560@@ -190,12 +256,27 @@ SECTIONS
24561 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
24562 #endif
24563
24564- INIT_TEXT_SECTION(PAGE_SIZE)
24565-#ifdef CONFIG_X86_64
24566- :init
24567-#endif
24568+ . = ALIGN(PAGE_SIZE);
24569+ init_begin = .;
24570+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
24571+ VMLINUX_SYMBOL(_sinittext) = .;
24572+ INIT_TEXT
24573+ VMLINUX_SYMBOL(_einittext) = .;
24574+ . = ALIGN(PAGE_SIZE);
24575+ } :text.init
24576
24577- INIT_DATA_SECTION(16)
24578+ /*
24579+ * .exit.text is discard at runtime, not link time, to deal with
24580+ * references from .altinstructions and .eh_frame
24581+ */
24582+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
24583+ EXIT_TEXT
24584+ . = ALIGN(16);
24585+ } :text.exit
24586+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
24587+
24588+ . = ALIGN(PAGE_SIZE);
24589+ INIT_DATA_SECTION(16) :init
24590
24591 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
24592 __x86_cpu_dev_start = .;
24593@@ -257,19 +338,12 @@ SECTIONS
24594 }
24595
24596 . = ALIGN(8);
24597- /*
24598- * .exit.text is discard at runtime, not link time, to deal with
24599- * references from .altinstructions and .eh_frame
24600- */
24601- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
24602- EXIT_TEXT
24603- }
24604
24605 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
24606 EXIT_DATA
24607 }
24608
24609-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
24610+#ifndef CONFIG_SMP
24611 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
24612 #endif
24613
24614@@ -288,16 +362,10 @@ SECTIONS
24615 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
24616 __smp_locks = .;
24617 *(.smp_locks)
24618- . = ALIGN(PAGE_SIZE);
24619 __smp_locks_end = .;
24620+ . = ALIGN(PAGE_SIZE);
24621 }
24622
24623-#ifdef CONFIG_X86_64
24624- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
24625- NOSAVE_DATA
24626- }
24627-#endif
24628-
24629 /* BSS */
24630 . = ALIGN(PAGE_SIZE);
24631 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
24632@@ -313,6 +381,7 @@ SECTIONS
24633 __brk_base = .;
24634 . += 64 * 1024; /* 64k alignment slop space */
24635 *(.brk_reservation) /* areas brk users have reserved */
24636+ . = ALIGN(HPAGE_SIZE);
24637 __brk_limit = .;
24638 }
24639
24640@@ -339,13 +408,12 @@ SECTIONS
24641 * for the boot processor.
24642 */
24643 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
24644-INIT_PER_CPU(gdt_page);
24645 INIT_PER_CPU(irq_stack_union);
24646
24647 /*
24648 * Build-time check on the image size:
24649 */
24650-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
24651+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
24652 "kernel image bigger than KERNEL_IMAGE_SIZE");
24653
24654 #ifdef CONFIG_SMP
24655diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
24656index 9a907a6..f83f921 100644
24657--- a/arch/x86/kernel/vsyscall_64.c
24658+++ b/arch/x86/kernel/vsyscall_64.c
24659@@ -56,15 +56,13 @@
24660 DEFINE_VVAR(int, vgetcpu_mode);
24661 DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data);
24662
24663-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
24664+static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
24665
24666 static int __init vsyscall_setup(char *str)
24667 {
24668 if (str) {
24669 if (!strcmp("emulate", str))
24670 vsyscall_mode = EMULATE;
24671- else if (!strcmp("native", str))
24672- vsyscall_mode = NATIVE;
24673 else if (!strcmp("none", str))
24674 vsyscall_mode = NONE;
24675 else
24676@@ -323,8 +321,7 @@ do_ret:
24677 return true;
24678
24679 sigsegv:
24680- force_sig(SIGSEGV, current);
24681- return true;
24682+ do_group_exit(SIGKILL);
24683 }
24684
24685 /*
24686@@ -377,10 +374,7 @@ void __init map_vsyscall(void)
24687 extern char __vvar_page;
24688 unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
24689
24690- __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
24691- vsyscall_mode == NATIVE
24692- ? PAGE_KERNEL_VSYSCALL
24693- : PAGE_KERNEL_VVAR);
24694+ __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
24695 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
24696 (unsigned long)VSYSCALL_START);
24697
24698diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
24699index 1330dd1..d220b99 100644
24700--- a/arch/x86/kernel/x8664_ksyms_64.c
24701+++ b/arch/x86/kernel/x8664_ksyms_64.c
24702@@ -34,8 +34,6 @@ EXPORT_SYMBOL(copy_user_generic_string);
24703 EXPORT_SYMBOL(copy_user_generic_unrolled);
24704 EXPORT_SYMBOL(copy_user_enhanced_fast_string);
24705 EXPORT_SYMBOL(__copy_user_nocache);
24706-EXPORT_SYMBOL(_copy_from_user);
24707-EXPORT_SYMBOL(_copy_to_user);
24708
24709 EXPORT_SYMBOL(copy_page);
24710 EXPORT_SYMBOL(clear_page);
24711diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
24712index 7a3d075..6cb373d 100644
24713--- a/arch/x86/kernel/x86_init.c
24714+++ b/arch/x86/kernel/x86_init.c
24715@@ -88,7 +88,7 @@ struct x86_init_ops x86_init __initdata = {
24716 },
24717 };
24718
24719-struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = {
24720+struct x86_cpuinit_ops x86_cpuinit __cpuinitconst = {
24721 .early_percpu_clock_init = x86_init_noop,
24722 .setup_percpu_clockev = setup_secondary_APIC_clock,
24723 };
24724@@ -96,7 +96,7 @@ struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = {
24725 static void default_nmi_init(void) { };
24726 static int default_i8042_detect(void) { return 1; };
24727
24728-struct x86_platform_ops x86_platform = {
24729+struct x86_platform_ops x86_platform __read_only = {
24730 .calibrate_tsc = native_calibrate_tsc,
24731 .get_wallclock = mach_get_cmos_time,
24732 .set_wallclock = mach_set_rtc_mmss,
24733@@ -110,14 +110,14 @@ struct x86_platform_ops x86_platform = {
24734 };
24735
24736 EXPORT_SYMBOL_GPL(x86_platform);
24737-struct x86_msi_ops x86_msi = {
24738+struct x86_msi_ops x86_msi __read_only = {
24739 .setup_msi_irqs = native_setup_msi_irqs,
24740 .teardown_msi_irq = native_teardown_msi_irq,
24741 .teardown_msi_irqs = default_teardown_msi_irqs,
24742 .restore_msi_irqs = default_restore_msi_irqs,
24743 };
24744
24745-struct x86_io_apic_ops x86_io_apic_ops = {
24746+struct x86_io_apic_ops x86_io_apic_ops __read_only = {
24747 .init = native_io_apic_init_mappings,
24748 .read = native_io_apic_read,
24749 .write = native_io_apic_write,
24750diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
24751index ada87a3..afea76d 100644
24752--- a/arch/x86/kernel/xsave.c
24753+++ b/arch/x86/kernel/xsave.c
24754@@ -199,6 +199,7 @@ static inline int save_user_xstate(struct xsave_struct __user *buf)
24755 {
24756 int err;
24757
24758+ buf = (struct xsave_struct __user *)____m(buf);
24759 if (use_xsave())
24760 err = xsave_user(buf);
24761 else if (use_fxsr())
24762@@ -311,6 +312,7 @@ sanitize_restored_xstate(struct task_struct *tsk,
24763 */
24764 static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only)
24765 {
24766+ buf = (void __user *)____m(buf);
24767 if (use_xsave()) {
24768 if ((unsigned long)buf % 64 || fx_only) {
24769 u64 init_bv = pcntxt_mask & ~XSTATE_FPSSE;
24770diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
24771index a20ecb5..d0e2194 100644
24772--- a/arch/x86/kvm/cpuid.c
24773+++ b/arch/x86/kvm/cpuid.c
24774@@ -124,15 +124,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
24775 struct kvm_cpuid2 *cpuid,
24776 struct kvm_cpuid_entry2 __user *entries)
24777 {
24778- int r;
24779+ int r, i;
24780
24781 r = -E2BIG;
24782 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
24783 goto out;
24784 r = -EFAULT;
24785- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
24786- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
24787+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
24788 goto out;
24789+ for (i = 0; i < cpuid->nent; ++i) {
24790+ struct kvm_cpuid_entry2 cpuid_entry;
24791+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
24792+ goto out;
24793+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
24794+ }
24795 vcpu->arch.cpuid_nent = cpuid->nent;
24796 kvm_apic_set_version(vcpu);
24797 kvm_x86_ops->cpuid_update(vcpu);
24798@@ -147,15 +152,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
24799 struct kvm_cpuid2 *cpuid,
24800 struct kvm_cpuid_entry2 __user *entries)
24801 {
24802- int r;
24803+ int r, i;
24804
24805 r = -E2BIG;
24806 if (cpuid->nent < vcpu->arch.cpuid_nent)
24807 goto out;
24808 r = -EFAULT;
24809- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
24810- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
24811+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
24812 goto out;
24813+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
24814+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
24815+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
24816+ goto out;
24817+ }
24818 return 0;
24819
24820 out:
24821diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
24822index a27e763..54bfe43 100644
24823--- a/arch/x86/kvm/emulate.c
24824+++ b/arch/x86/kvm/emulate.c
24825@@ -292,6 +292,7 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
24826
24827 #define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype) \
24828 do { \
24829+ unsigned long _tmp; \
24830 __asm__ __volatile__ ( \
24831 _PRE_EFLAGS("0", "4", "2") \
24832 _op _suffix " %"_x"3,%1; " \
24833@@ -306,8 +307,6 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
24834 /* Raw emulation: instruction has two explicit operands. */
24835 #define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy) \
24836 do { \
24837- unsigned long _tmp; \
24838- \
24839 switch ((ctxt)->dst.bytes) { \
24840 case 2: \
24841 ____emulate_2op(ctxt,_op,_wx,_wy,"w",u16); \
24842@@ -323,7 +322,6 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
24843
24844 #define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
24845 do { \
24846- unsigned long _tmp; \
24847 switch ((ctxt)->dst.bytes) { \
24848 case 1: \
24849 ____emulate_2op(ctxt,_op,_bx,_by,"b",u8); \
24850diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
24851index a2f492c..899e107 100644
24852--- a/arch/x86/kvm/lapic.c
24853+++ b/arch/x86/kvm/lapic.c
24854@@ -55,7 +55,7 @@
24855 #define APIC_BUS_CYCLE_NS 1
24856
24857 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
24858-#define apic_debug(fmt, arg...)
24859+#define apic_debug(fmt, arg...) do {} while (0)
24860
24861 #define APIC_LVT_NUM 6
24862 /* 14 is the version for Xeon and Pentium 8.4.8*/
24863diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
24864index 891eb6d..e027900 100644
24865--- a/arch/x86/kvm/paging_tmpl.h
24866+++ b/arch/x86/kvm/paging_tmpl.h
24867@@ -208,7 +208,7 @@ retry_walk:
24868 if (unlikely(kvm_is_error_hva(host_addr)))
24869 goto error;
24870
24871- ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
24872+ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
24873 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
24874 goto error;
24875 walker->ptep_user[walker->level - 1] = ptep_user;
24876diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
24877index d29d3cd..ec9d522 100644
24878--- a/arch/x86/kvm/svm.c
24879+++ b/arch/x86/kvm/svm.c
24880@@ -3507,7 +3507,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
24881 int cpu = raw_smp_processor_id();
24882
24883 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
24884+
24885+ pax_open_kernel();
24886 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
24887+ pax_close_kernel();
24888+
24889 load_TR_desc();
24890 }
24891
24892@@ -3881,6 +3885,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
24893 #endif
24894 #endif
24895
24896+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
24897+ __set_fs(current_thread_info()->addr_limit);
24898+#endif
24899+
24900 reload_tss(vcpu);
24901
24902 local_irq_disable();
24903diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
24904index 9120ae1..aca46d0 100644
24905--- a/arch/x86/kvm/vmx.c
24906+++ b/arch/x86/kvm/vmx.c
24907@@ -1164,12 +1164,12 @@ static void vmcs_write64(unsigned long field, u64 value)
24908 #endif
24909 }
24910
24911-static void vmcs_clear_bits(unsigned long field, u32 mask)
24912+static void vmcs_clear_bits(unsigned long field, unsigned long mask)
24913 {
24914 vmcs_writel(field, vmcs_readl(field) & ~mask);
24915 }
24916
24917-static void vmcs_set_bits(unsigned long field, u32 mask)
24918+static void vmcs_set_bits(unsigned long field, unsigned long mask)
24919 {
24920 vmcs_writel(field, vmcs_readl(field) | mask);
24921 }
24922@@ -1370,7 +1370,11 @@ static void reload_tss(void)
24923 struct desc_struct *descs;
24924
24925 descs = (void *)gdt->address;
24926+
24927+ pax_open_kernel();
24928 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
24929+ pax_close_kernel();
24930+
24931 load_TR_desc();
24932 }
24933
24934@@ -1594,6 +1598,10 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
24935 vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
24936 vmcs_writel(HOST_GDTR_BASE, gdt->address); /* 22.2.4 */
24937
24938+#ifdef CONFIG_PAX_PER_CPU_PGD
24939+ vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
24940+#endif
24941+
24942 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
24943 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
24944 vmx->loaded_vmcs->cpu = cpu;
24945@@ -2738,8 +2746,11 @@ static __init int hardware_setup(void)
24946 if (!cpu_has_vmx_flexpriority())
24947 flexpriority_enabled = 0;
24948
24949- if (!cpu_has_vmx_tpr_shadow())
24950- kvm_x86_ops->update_cr8_intercept = NULL;
24951+ if (!cpu_has_vmx_tpr_shadow()) {
24952+ pax_open_kernel();
24953+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
24954+ pax_close_kernel();
24955+ }
24956
24957 if (enable_ept && !cpu_has_vmx_ept_2m_page())
24958 kvm_disable_largepages();
24959@@ -3782,7 +3793,10 @@ static void vmx_set_constant_host_state(void)
24960
24961 vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */
24962 vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */
24963+
24964+#ifndef CONFIG_PAX_PER_CPU_PGD
24965 vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
24966+#endif
24967
24968 vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
24969 #ifdef CONFIG_X86_64
24970@@ -3803,7 +3817,7 @@ static void vmx_set_constant_host_state(void)
24971 native_store_idt(&dt);
24972 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
24973
24974- vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */
24975+ vmcs_writel(HOST_RIP, ktla_ktva(vmx_return)); /* 22.2.5 */
24976
24977 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
24978 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
24979@@ -6355,6 +6369,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
24980 "jmp 2f \n\t"
24981 "1: " __ex(ASM_VMX_VMRESUME) "\n\t"
24982 "2: "
24983+
24984+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
24985+ "ljmp %[cs],$3f\n\t"
24986+ "3: "
24987+#endif
24988+
24989 /* Save guest registers, load host registers, keep flags */
24990 "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
24991 "pop %0 \n\t"
24992@@ -6407,6 +6427,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
24993 #endif
24994 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
24995 [wordsize]"i"(sizeof(ulong))
24996+
24997+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
24998+ ,[cs]"i"(__KERNEL_CS)
24999+#endif
25000+
25001 : "cc", "memory"
25002 #ifdef CONFIG_X86_64
25003 , "rax", "rbx", "rdi", "rsi"
25004@@ -6420,7 +6445,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
25005 if (debugctlmsr)
25006 update_debugctlmsr(debugctlmsr);
25007
25008-#ifndef CONFIG_X86_64
25009+#ifdef CONFIG_X86_32
25010 /*
25011 * The sysexit path does not restore ds/es, so we must set them to
25012 * a reasonable value ourselves.
25013@@ -6429,8 +6454,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
25014 * may be executed in interrupt context, which saves and restore segments
25015 * around it, nullifying its effect.
25016 */
25017- loadsegment(ds, __USER_DS);
25018- loadsegment(es, __USER_DS);
25019+ loadsegment(ds, __KERNEL_DS);
25020+ loadsegment(es, __KERNEL_DS);
25021+ loadsegment(ss, __KERNEL_DS);
25022+
25023+#ifdef CONFIG_PAX_KERNEXEC
25024+ loadsegment(fs, __KERNEL_PERCPU);
25025+#endif
25026+
25027+#ifdef CONFIG_PAX_MEMORY_UDEREF
25028+ __set_fs(current_thread_info()->addr_limit);
25029+#endif
25030+
25031 #endif
25032
25033 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
25034diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
25035index 9a51121..f739a79 100644
25036--- a/arch/x86/kvm/x86.c
25037+++ b/arch/x86/kvm/x86.c
25038@@ -1688,8 +1688,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
25039 {
25040 struct kvm *kvm = vcpu->kvm;
25041 int lm = is_long_mode(vcpu);
25042- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
25043- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
25044+ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
25045+ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
25046 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
25047 : kvm->arch.xen_hvm_config.blob_size_32;
25048 u32 page_num = data & ~PAGE_MASK;
25049@@ -2567,6 +2567,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
25050 if (n < msr_list.nmsrs)
25051 goto out;
25052 r = -EFAULT;
25053+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
25054+ goto out;
25055 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
25056 num_msrs_to_save * sizeof(u32)))
25057 goto out;
25058@@ -2696,7 +2698,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
25059 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
25060 struct kvm_interrupt *irq)
25061 {
25062- if (irq->irq < 0 || irq->irq >= KVM_NR_INTERRUPTS)
25063+ if (irq->irq >= KVM_NR_INTERRUPTS)
25064 return -EINVAL;
25065 if (irqchip_in_kernel(vcpu->kvm))
25066 return -ENXIO;
25067@@ -5209,7 +5211,7 @@ static struct notifier_block pvclock_gtod_notifier = {
25068 };
25069 #endif
25070
25071-int kvm_arch_init(void *opaque)
25072+int kvm_arch_init(const void *opaque)
25073 {
25074 int r;
25075 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
25076diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
25077index 20a4fd4..d806083 100644
25078--- a/arch/x86/lguest/boot.c
25079+++ b/arch/x86/lguest/boot.c
25080@@ -1200,9 +1200,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
25081 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
25082 * Launcher to reboot us.
25083 */
25084-static void lguest_restart(char *reason)
25085+static __noreturn void lguest_restart(char *reason)
25086 {
25087 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
25088+ BUG();
25089 }
25090
25091 /*G:050
25092diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
25093index 00933d5..3a64af9 100644
25094--- a/arch/x86/lib/atomic64_386_32.S
25095+++ b/arch/x86/lib/atomic64_386_32.S
25096@@ -48,6 +48,10 @@ BEGIN(read)
25097 movl (v), %eax
25098 movl 4(v), %edx
25099 RET_ENDP
25100+BEGIN(read_unchecked)
25101+ movl (v), %eax
25102+ movl 4(v), %edx
25103+RET_ENDP
25104 #undef v
25105
25106 #define v %esi
25107@@ -55,6 +59,10 @@ BEGIN(set)
25108 movl %ebx, (v)
25109 movl %ecx, 4(v)
25110 RET_ENDP
25111+BEGIN(set_unchecked)
25112+ movl %ebx, (v)
25113+ movl %ecx, 4(v)
25114+RET_ENDP
25115 #undef v
25116
25117 #define v %esi
25118@@ -70,6 +78,20 @@ RET_ENDP
25119 BEGIN(add)
25120 addl %eax, (v)
25121 adcl %edx, 4(v)
25122+
25123+#ifdef CONFIG_PAX_REFCOUNT
25124+ jno 0f
25125+ subl %eax, (v)
25126+ sbbl %edx, 4(v)
25127+ int $4
25128+0:
25129+ _ASM_EXTABLE(0b, 0b)
25130+#endif
25131+
25132+RET_ENDP
25133+BEGIN(add_unchecked)
25134+ addl %eax, (v)
25135+ adcl %edx, 4(v)
25136 RET_ENDP
25137 #undef v
25138
25139@@ -77,6 +99,24 @@ RET_ENDP
25140 BEGIN(add_return)
25141 addl (v), %eax
25142 adcl 4(v), %edx
25143+
25144+#ifdef CONFIG_PAX_REFCOUNT
25145+ into
25146+1234:
25147+ _ASM_EXTABLE(1234b, 2f)
25148+#endif
25149+
25150+ movl %eax, (v)
25151+ movl %edx, 4(v)
25152+
25153+#ifdef CONFIG_PAX_REFCOUNT
25154+2:
25155+#endif
25156+
25157+RET_ENDP
25158+BEGIN(add_return_unchecked)
25159+ addl (v), %eax
25160+ adcl 4(v), %edx
25161 movl %eax, (v)
25162 movl %edx, 4(v)
25163 RET_ENDP
25164@@ -86,6 +126,20 @@ RET_ENDP
25165 BEGIN(sub)
25166 subl %eax, (v)
25167 sbbl %edx, 4(v)
25168+
25169+#ifdef CONFIG_PAX_REFCOUNT
25170+ jno 0f
25171+ addl %eax, (v)
25172+ adcl %edx, 4(v)
25173+ int $4
25174+0:
25175+ _ASM_EXTABLE(0b, 0b)
25176+#endif
25177+
25178+RET_ENDP
25179+BEGIN(sub_unchecked)
25180+ subl %eax, (v)
25181+ sbbl %edx, 4(v)
25182 RET_ENDP
25183 #undef v
25184
25185@@ -96,6 +150,27 @@ BEGIN(sub_return)
25186 sbbl $0, %edx
25187 addl (v), %eax
25188 adcl 4(v), %edx
25189+
25190+#ifdef CONFIG_PAX_REFCOUNT
25191+ into
25192+1234:
25193+ _ASM_EXTABLE(1234b, 2f)
25194+#endif
25195+
25196+ movl %eax, (v)
25197+ movl %edx, 4(v)
25198+
25199+#ifdef CONFIG_PAX_REFCOUNT
25200+2:
25201+#endif
25202+
25203+RET_ENDP
25204+BEGIN(sub_return_unchecked)
25205+ negl %edx
25206+ negl %eax
25207+ sbbl $0, %edx
25208+ addl (v), %eax
25209+ adcl 4(v), %edx
25210 movl %eax, (v)
25211 movl %edx, 4(v)
25212 RET_ENDP
25213@@ -105,6 +180,20 @@ RET_ENDP
25214 BEGIN(inc)
25215 addl $1, (v)
25216 adcl $0, 4(v)
25217+
25218+#ifdef CONFIG_PAX_REFCOUNT
25219+ jno 0f
25220+ subl $1, (v)
25221+ sbbl $0, 4(v)
25222+ int $4
25223+0:
25224+ _ASM_EXTABLE(0b, 0b)
25225+#endif
25226+
25227+RET_ENDP
25228+BEGIN(inc_unchecked)
25229+ addl $1, (v)
25230+ adcl $0, 4(v)
25231 RET_ENDP
25232 #undef v
25233
25234@@ -114,6 +203,26 @@ BEGIN(inc_return)
25235 movl 4(v), %edx
25236 addl $1, %eax
25237 adcl $0, %edx
25238+
25239+#ifdef CONFIG_PAX_REFCOUNT
25240+ into
25241+1234:
25242+ _ASM_EXTABLE(1234b, 2f)
25243+#endif
25244+
25245+ movl %eax, (v)
25246+ movl %edx, 4(v)
25247+
25248+#ifdef CONFIG_PAX_REFCOUNT
25249+2:
25250+#endif
25251+
25252+RET_ENDP
25253+BEGIN(inc_return_unchecked)
25254+ movl (v), %eax
25255+ movl 4(v), %edx
25256+ addl $1, %eax
25257+ adcl $0, %edx
25258 movl %eax, (v)
25259 movl %edx, 4(v)
25260 RET_ENDP
25261@@ -123,6 +232,20 @@ RET_ENDP
25262 BEGIN(dec)
25263 subl $1, (v)
25264 sbbl $0, 4(v)
25265+
25266+#ifdef CONFIG_PAX_REFCOUNT
25267+ jno 0f
25268+ addl $1, (v)
25269+ adcl $0, 4(v)
25270+ int $4
25271+0:
25272+ _ASM_EXTABLE(0b, 0b)
25273+#endif
25274+
25275+RET_ENDP
25276+BEGIN(dec_unchecked)
25277+ subl $1, (v)
25278+ sbbl $0, 4(v)
25279 RET_ENDP
25280 #undef v
25281
25282@@ -132,6 +255,26 @@ BEGIN(dec_return)
25283 movl 4(v), %edx
25284 subl $1, %eax
25285 sbbl $0, %edx
25286+
25287+#ifdef CONFIG_PAX_REFCOUNT
25288+ into
25289+1234:
25290+ _ASM_EXTABLE(1234b, 2f)
25291+#endif
25292+
25293+ movl %eax, (v)
25294+ movl %edx, 4(v)
25295+
25296+#ifdef CONFIG_PAX_REFCOUNT
25297+2:
25298+#endif
25299+
25300+RET_ENDP
25301+BEGIN(dec_return_unchecked)
25302+ movl (v), %eax
25303+ movl 4(v), %edx
25304+ subl $1, %eax
25305+ sbbl $0, %edx
25306 movl %eax, (v)
25307 movl %edx, 4(v)
25308 RET_ENDP
25309@@ -143,6 +286,13 @@ BEGIN(add_unless)
25310 adcl %edx, %edi
25311 addl (v), %eax
25312 adcl 4(v), %edx
25313+
25314+#ifdef CONFIG_PAX_REFCOUNT
25315+ into
25316+1234:
25317+ _ASM_EXTABLE(1234b, 2f)
25318+#endif
25319+
25320 cmpl %eax, %ecx
25321 je 3f
25322 1:
25323@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
25324 1:
25325 addl $1, %eax
25326 adcl $0, %edx
25327+
25328+#ifdef CONFIG_PAX_REFCOUNT
25329+ into
25330+1234:
25331+ _ASM_EXTABLE(1234b, 2f)
25332+#endif
25333+
25334 movl %eax, (v)
25335 movl %edx, 4(v)
25336 movl $1, %eax
25337@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
25338 movl 4(v), %edx
25339 subl $1, %eax
25340 sbbl $0, %edx
25341+
25342+#ifdef CONFIG_PAX_REFCOUNT
25343+ into
25344+1234:
25345+ _ASM_EXTABLE(1234b, 1f)
25346+#endif
25347+
25348 js 1f
25349 movl %eax, (v)
25350 movl %edx, 4(v)
25351diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
25352index f5cc9eb..51fa319 100644
25353--- a/arch/x86/lib/atomic64_cx8_32.S
25354+++ b/arch/x86/lib/atomic64_cx8_32.S
25355@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
25356 CFI_STARTPROC
25357
25358 read64 %ecx
25359+ pax_force_retaddr
25360 ret
25361 CFI_ENDPROC
25362 ENDPROC(atomic64_read_cx8)
25363
25364+ENTRY(atomic64_read_unchecked_cx8)
25365+ CFI_STARTPROC
25366+
25367+ read64 %ecx
25368+ pax_force_retaddr
25369+ ret
25370+ CFI_ENDPROC
25371+ENDPROC(atomic64_read_unchecked_cx8)
25372+
25373 ENTRY(atomic64_set_cx8)
25374 CFI_STARTPROC
25375
25376@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
25377 cmpxchg8b (%esi)
25378 jne 1b
25379
25380+ pax_force_retaddr
25381 ret
25382 CFI_ENDPROC
25383 ENDPROC(atomic64_set_cx8)
25384
25385+ENTRY(atomic64_set_unchecked_cx8)
25386+ CFI_STARTPROC
25387+
25388+1:
25389+/* we don't need LOCK_PREFIX since aligned 64-bit writes
25390+ * are atomic on 586 and newer */
25391+ cmpxchg8b (%esi)
25392+ jne 1b
25393+
25394+ pax_force_retaddr
25395+ ret
25396+ CFI_ENDPROC
25397+ENDPROC(atomic64_set_unchecked_cx8)
25398+
25399 ENTRY(atomic64_xchg_cx8)
25400 CFI_STARTPROC
25401
25402@@ -60,12 +85,13 @@ ENTRY(atomic64_xchg_cx8)
25403 cmpxchg8b (%esi)
25404 jne 1b
25405
25406+ pax_force_retaddr
25407 ret
25408 CFI_ENDPROC
25409 ENDPROC(atomic64_xchg_cx8)
25410
25411-.macro addsub_return func ins insc
25412-ENTRY(atomic64_\func\()_return_cx8)
25413+.macro addsub_return func ins insc unchecked=""
25414+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
25415 CFI_STARTPROC
25416 SAVE ebp
25417 SAVE ebx
25418@@ -82,27 +108,44 @@ ENTRY(atomic64_\func\()_return_cx8)
25419 movl %edx, %ecx
25420 \ins\()l %esi, %ebx
25421 \insc\()l %edi, %ecx
25422+
25423+.ifb \unchecked
25424+#ifdef CONFIG_PAX_REFCOUNT
25425+ into
25426+2:
25427+ _ASM_EXTABLE(2b, 3f)
25428+#endif
25429+.endif
25430+
25431 LOCK_PREFIX
25432 cmpxchg8b (%ebp)
25433 jne 1b
25434-
25435-10:
25436 movl %ebx, %eax
25437 movl %ecx, %edx
25438+
25439+.ifb \unchecked
25440+#ifdef CONFIG_PAX_REFCOUNT
25441+3:
25442+#endif
25443+.endif
25444+
25445 RESTORE edi
25446 RESTORE esi
25447 RESTORE ebx
25448 RESTORE ebp
25449+ pax_force_retaddr
25450 ret
25451 CFI_ENDPROC
25452-ENDPROC(atomic64_\func\()_return_cx8)
25453+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
25454 .endm
25455
25456 addsub_return add add adc
25457 addsub_return sub sub sbb
25458+addsub_return add add adc _unchecked
25459+addsub_return sub sub sbb _unchecked
25460
25461-.macro incdec_return func ins insc
25462-ENTRY(atomic64_\func\()_return_cx8)
25463+.macro incdec_return func ins insc unchecked=""
25464+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
25465 CFI_STARTPROC
25466 SAVE ebx
25467
25468@@ -112,21 +155,39 @@ ENTRY(atomic64_\func\()_return_cx8)
25469 movl %edx, %ecx
25470 \ins\()l $1, %ebx
25471 \insc\()l $0, %ecx
25472+
25473+.ifb \unchecked
25474+#ifdef CONFIG_PAX_REFCOUNT
25475+ into
25476+2:
25477+ _ASM_EXTABLE(2b, 3f)
25478+#endif
25479+.endif
25480+
25481 LOCK_PREFIX
25482 cmpxchg8b (%esi)
25483 jne 1b
25484
25485-10:
25486 movl %ebx, %eax
25487 movl %ecx, %edx
25488+
25489+.ifb \unchecked
25490+#ifdef CONFIG_PAX_REFCOUNT
25491+3:
25492+#endif
25493+.endif
25494+
25495 RESTORE ebx
25496+ pax_force_retaddr
25497 ret
25498 CFI_ENDPROC
25499-ENDPROC(atomic64_\func\()_return_cx8)
25500+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
25501 .endm
25502
25503 incdec_return inc add adc
25504 incdec_return dec sub sbb
25505+incdec_return inc add adc _unchecked
25506+incdec_return dec sub sbb _unchecked
25507
25508 ENTRY(atomic64_dec_if_positive_cx8)
25509 CFI_STARTPROC
25510@@ -138,6 +199,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
25511 movl %edx, %ecx
25512 subl $1, %ebx
25513 sbb $0, %ecx
25514+
25515+#ifdef CONFIG_PAX_REFCOUNT
25516+ into
25517+1234:
25518+ _ASM_EXTABLE(1234b, 2f)
25519+#endif
25520+
25521 js 2f
25522 LOCK_PREFIX
25523 cmpxchg8b (%esi)
25524@@ -147,6 +215,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
25525 movl %ebx, %eax
25526 movl %ecx, %edx
25527 RESTORE ebx
25528+ pax_force_retaddr
25529 ret
25530 CFI_ENDPROC
25531 ENDPROC(atomic64_dec_if_positive_cx8)
25532@@ -171,6 +240,13 @@ ENTRY(atomic64_add_unless_cx8)
25533 movl %edx, %ecx
25534 addl %ebp, %ebx
25535 adcl %edi, %ecx
25536+
25537+#ifdef CONFIG_PAX_REFCOUNT
25538+ into
25539+1234:
25540+ _ASM_EXTABLE(1234b, 3f)
25541+#endif
25542+
25543 LOCK_PREFIX
25544 cmpxchg8b (%esi)
25545 jne 1b
25546@@ -181,6 +257,7 @@ ENTRY(atomic64_add_unless_cx8)
25547 CFI_ADJUST_CFA_OFFSET -8
25548 RESTORE ebx
25549 RESTORE ebp
25550+ pax_force_retaddr
25551 ret
25552 4:
25553 cmpl %edx, 4(%esp)
25554@@ -203,6 +280,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
25555 xorl %ecx, %ecx
25556 addl $1, %ebx
25557 adcl %edx, %ecx
25558+
25559+#ifdef CONFIG_PAX_REFCOUNT
25560+ into
25561+1234:
25562+ _ASM_EXTABLE(1234b, 3f)
25563+#endif
25564+
25565 LOCK_PREFIX
25566 cmpxchg8b (%esi)
25567 jne 1b
25568@@ -210,6 +294,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
25569 movl $1, %eax
25570 3:
25571 RESTORE ebx
25572+ pax_force_retaddr
25573 ret
25574 CFI_ENDPROC
25575 ENDPROC(atomic64_inc_not_zero_cx8)
25576diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
25577index 2af5df3..62b1a5a 100644
25578--- a/arch/x86/lib/checksum_32.S
25579+++ b/arch/x86/lib/checksum_32.S
25580@@ -29,7 +29,8 @@
25581 #include <asm/dwarf2.h>
25582 #include <asm/errno.h>
25583 #include <asm/asm.h>
25584-
25585+#include <asm/segment.h>
25586+
25587 /*
25588 * computes a partial checksum, e.g. for TCP/UDP fragments
25589 */
25590@@ -293,9 +294,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
25591
25592 #define ARGBASE 16
25593 #define FP 12
25594-
25595-ENTRY(csum_partial_copy_generic)
25596+
25597+ENTRY(csum_partial_copy_generic_to_user)
25598 CFI_STARTPROC
25599+
25600+#ifdef CONFIG_PAX_MEMORY_UDEREF
25601+ pushl_cfi %gs
25602+ popl_cfi %es
25603+ jmp csum_partial_copy_generic
25604+#endif
25605+
25606+ENTRY(csum_partial_copy_generic_from_user)
25607+
25608+#ifdef CONFIG_PAX_MEMORY_UDEREF
25609+ pushl_cfi %gs
25610+ popl_cfi %ds
25611+#endif
25612+
25613+ENTRY(csum_partial_copy_generic)
25614 subl $4,%esp
25615 CFI_ADJUST_CFA_OFFSET 4
25616 pushl_cfi %edi
25617@@ -317,7 +333,7 @@ ENTRY(csum_partial_copy_generic)
25618 jmp 4f
25619 SRC(1: movw (%esi), %bx )
25620 addl $2, %esi
25621-DST( movw %bx, (%edi) )
25622+DST( movw %bx, %es:(%edi) )
25623 addl $2, %edi
25624 addw %bx, %ax
25625 adcl $0, %eax
25626@@ -329,30 +345,30 @@ DST( movw %bx, (%edi) )
25627 SRC(1: movl (%esi), %ebx )
25628 SRC( movl 4(%esi), %edx )
25629 adcl %ebx, %eax
25630-DST( movl %ebx, (%edi) )
25631+DST( movl %ebx, %es:(%edi) )
25632 adcl %edx, %eax
25633-DST( movl %edx, 4(%edi) )
25634+DST( movl %edx, %es:4(%edi) )
25635
25636 SRC( movl 8(%esi), %ebx )
25637 SRC( movl 12(%esi), %edx )
25638 adcl %ebx, %eax
25639-DST( movl %ebx, 8(%edi) )
25640+DST( movl %ebx, %es:8(%edi) )
25641 adcl %edx, %eax
25642-DST( movl %edx, 12(%edi) )
25643+DST( movl %edx, %es:12(%edi) )
25644
25645 SRC( movl 16(%esi), %ebx )
25646 SRC( movl 20(%esi), %edx )
25647 adcl %ebx, %eax
25648-DST( movl %ebx, 16(%edi) )
25649+DST( movl %ebx, %es:16(%edi) )
25650 adcl %edx, %eax
25651-DST( movl %edx, 20(%edi) )
25652+DST( movl %edx, %es:20(%edi) )
25653
25654 SRC( movl 24(%esi), %ebx )
25655 SRC( movl 28(%esi), %edx )
25656 adcl %ebx, %eax
25657-DST( movl %ebx, 24(%edi) )
25658+DST( movl %ebx, %es:24(%edi) )
25659 adcl %edx, %eax
25660-DST( movl %edx, 28(%edi) )
25661+DST( movl %edx, %es:28(%edi) )
25662
25663 lea 32(%esi), %esi
25664 lea 32(%edi), %edi
25665@@ -366,7 +382,7 @@ DST( movl %edx, 28(%edi) )
25666 shrl $2, %edx # This clears CF
25667 SRC(3: movl (%esi), %ebx )
25668 adcl %ebx, %eax
25669-DST( movl %ebx, (%edi) )
25670+DST( movl %ebx, %es:(%edi) )
25671 lea 4(%esi), %esi
25672 lea 4(%edi), %edi
25673 dec %edx
25674@@ -378,12 +394,12 @@ DST( movl %ebx, (%edi) )
25675 jb 5f
25676 SRC( movw (%esi), %cx )
25677 leal 2(%esi), %esi
25678-DST( movw %cx, (%edi) )
25679+DST( movw %cx, %es:(%edi) )
25680 leal 2(%edi), %edi
25681 je 6f
25682 shll $16,%ecx
25683 SRC(5: movb (%esi), %cl )
25684-DST( movb %cl, (%edi) )
25685+DST( movb %cl, %es:(%edi) )
25686 6: addl %ecx, %eax
25687 adcl $0, %eax
25688 7:
25689@@ -394,7 +410,7 @@ DST( movb %cl, (%edi) )
25690
25691 6001:
25692 movl ARGBASE+20(%esp), %ebx # src_err_ptr
25693- movl $-EFAULT, (%ebx)
25694+ movl $-EFAULT, %ss:(%ebx)
25695
25696 # zero the complete destination - computing the rest
25697 # is too much work
25698@@ -407,11 +423,15 @@ DST( movb %cl, (%edi) )
25699
25700 6002:
25701 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
25702- movl $-EFAULT,(%ebx)
25703+ movl $-EFAULT,%ss:(%ebx)
25704 jmp 5000b
25705
25706 .previous
25707
25708+ pushl_cfi %ss
25709+ popl_cfi %ds
25710+ pushl_cfi %ss
25711+ popl_cfi %es
25712 popl_cfi %ebx
25713 CFI_RESTORE ebx
25714 popl_cfi %esi
25715@@ -421,26 +441,43 @@ DST( movb %cl, (%edi) )
25716 popl_cfi %ecx # equivalent to addl $4,%esp
25717 ret
25718 CFI_ENDPROC
25719-ENDPROC(csum_partial_copy_generic)
25720+ENDPROC(csum_partial_copy_generic_to_user)
25721
25722 #else
25723
25724 /* Version for PentiumII/PPro */
25725
25726 #define ROUND1(x) \
25727+ nop; nop; nop; \
25728 SRC(movl x(%esi), %ebx ) ; \
25729 addl %ebx, %eax ; \
25730- DST(movl %ebx, x(%edi) ) ;
25731+ DST(movl %ebx, %es:x(%edi)) ;
25732
25733 #define ROUND(x) \
25734+ nop; nop; nop; \
25735 SRC(movl x(%esi), %ebx ) ; \
25736 adcl %ebx, %eax ; \
25737- DST(movl %ebx, x(%edi) ) ;
25738+ DST(movl %ebx, %es:x(%edi)) ;
25739
25740 #define ARGBASE 12
25741-
25742-ENTRY(csum_partial_copy_generic)
25743+
25744+ENTRY(csum_partial_copy_generic_to_user)
25745 CFI_STARTPROC
25746+
25747+#ifdef CONFIG_PAX_MEMORY_UDEREF
25748+ pushl_cfi %gs
25749+ popl_cfi %es
25750+ jmp csum_partial_copy_generic
25751+#endif
25752+
25753+ENTRY(csum_partial_copy_generic_from_user)
25754+
25755+#ifdef CONFIG_PAX_MEMORY_UDEREF
25756+ pushl_cfi %gs
25757+ popl_cfi %ds
25758+#endif
25759+
25760+ENTRY(csum_partial_copy_generic)
25761 pushl_cfi %ebx
25762 CFI_REL_OFFSET ebx, 0
25763 pushl_cfi %edi
25764@@ -461,7 +498,7 @@ ENTRY(csum_partial_copy_generic)
25765 subl %ebx, %edi
25766 lea -1(%esi),%edx
25767 andl $-32,%edx
25768- lea 3f(%ebx,%ebx), %ebx
25769+ lea 3f(%ebx,%ebx,2), %ebx
25770 testl %esi, %esi
25771 jmp *%ebx
25772 1: addl $64,%esi
25773@@ -482,19 +519,19 @@ ENTRY(csum_partial_copy_generic)
25774 jb 5f
25775 SRC( movw (%esi), %dx )
25776 leal 2(%esi), %esi
25777-DST( movw %dx, (%edi) )
25778+DST( movw %dx, %es:(%edi) )
25779 leal 2(%edi), %edi
25780 je 6f
25781 shll $16,%edx
25782 5:
25783 SRC( movb (%esi), %dl )
25784-DST( movb %dl, (%edi) )
25785+DST( movb %dl, %es:(%edi) )
25786 6: addl %edx, %eax
25787 adcl $0, %eax
25788 7:
25789 .section .fixup, "ax"
25790 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
25791- movl $-EFAULT, (%ebx)
25792+ movl $-EFAULT, %ss:(%ebx)
25793 # zero the complete destination (computing the rest is too much work)
25794 movl ARGBASE+8(%esp),%edi # dst
25795 movl ARGBASE+12(%esp),%ecx # len
25796@@ -502,10 +539,17 @@ DST( movb %dl, (%edi) )
25797 rep; stosb
25798 jmp 7b
25799 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
25800- movl $-EFAULT, (%ebx)
25801+ movl $-EFAULT, %ss:(%ebx)
25802 jmp 7b
25803 .previous
25804
25805+#ifdef CONFIG_PAX_MEMORY_UDEREF
25806+ pushl_cfi %ss
25807+ popl_cfi %ds
25808+ pushl_cfi %ss
25809+ popl_cfi %es
25810+#endif
25811+
25812 popl_cfi %esi
25813 CFI_RESTORE esi
25814 popl_cfi %edi
25815@@ -514,7 +558,7 @@ DST( movb %dl, (%edi) )
25816 CFI_RESTORE ebx
25817 ret
25818 CFI_ENDPROC
25819-ENDPROC(csum_partial_copy_generic)
25820+ENDPROC(csum_partial_copy_generic_to_user)
25821
25822 #undef ROUND
25823 #undef ROUND1
25824diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
25825index f2145cf..cea889d 100644
25826--- a/arch/x86/lib/clear_page_64.S
25827+++ b/arch/x86/lib/clear_page_64.S
25828@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
25829 movl $4096/8,%ecx
25830 xorl %eax,%eax
25831 rep stosq
25832+ pax_force_retaddr
25833 ret
25834 CFI_ENDPROC
25835 ENDPROC(clear_page_c)
25836@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
25837 movl $4096,%ecx
25838 xorl %eax,%eax
25839 rep stosb
25840+ pax_force_retaddr
25841 ret
25842 CFI_ENDPROC
25843 ENDPROC(clear_page_c_e)
25844@@ -43,6 +45,7 @@ ENTRY(clear_page)
25845 leaq 64(%rdi),%rdi
25846 jnz .Lloop
25847 nop
25848+ pax_force_retaddr
25849 ret
25850 CFI_ENDPROC
25851 .Lclear_page_end:
25852@@ -58,7 +61,7 @@ ENDPROC(clear_page)
25853
25854 #include <asm/cpufeature.h>
25855
25856- .section .altinstr_replacement,"ax"
25857+ .section .altinstr_replacement,"a"
25858 1: .byte 0xeb /* jmp <disp8> */
25859 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
25860 2: .byte 0xeb /* jmp <disp8> */
25861diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
25862index 1e572c5..2a162cd 100644
25863--- a/arch/x86/lib/cmpxchg16b_emu.S
25864+++ b/arch/x86/lib/cmpxchg16b_emu.S
25865@@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
25866
25867 popf
25868 mov $1, %al
25869+ pax_force_retaddr
25870 ret
25871
25872 not_same:
25873 popf
25874 xor %al,%al
25875+ pax_force_retaddr
25876 ret
25877
25878 CFI_ENDPROC
25879diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
25880index 176cca6..1166c50 100644
25881--- a/arch/x86/lib/copy_page_64.S
25882+++ b/arch/x86/lib/copy_page_64.S
25883@@ -9,6 +9,7 @@ copy_page_rep:
25884 CFI_STARTPROC
25885 movl $4096/8, %ecx
25886 rep movsq
25887+ pax_force_retaddr
25888 ret
25889 CFI_ENDPROC
25890 ENDPROC(copy_page_rep)
25891@@ -20,12 +21,14 @@ ENDPROC(copy_page_rep)
25892
25893 ENTRY(copy_page)
25894 CFI_STARTPROC
25895- subq $2*8, %rsp
25896- CFI_ADJUST_CFA_OFFSET 2*8
25897+ subq $3*8, %rsp
25898+ CFI_ADJUST_CFA_OFFSET 3*8
25899 movq %rbx, (%rsp)
25900 CFI_REL_OFFSET rbx, 0
25901 movq %r12, 1*8(%rsp)
25902 CFI_REL_OFFSET r12, 1*8
25903+ movq %r13, 2*8(%rsp)
25904+ CFI_REL_OFFSET r13, 2*8
25905
25906 movl $(4096/64)-5, %ecx
25907 .p2align 4
25908@@ -36,7 +39,7 @@ ENTRY(copy_page)
25909 movq 0x8*2(%rsi), %rdx
25910 movq 0x8*3(%rsi), %r8
25911 movq 0x8*4(%rsi), %r9
25912- movq 0x8*5(%rsi), %r10
25913+ movq 0x8*5(%rsi), %r13
25914 movq 0x8*6(%rsi), %r11
25915 movq 0x8*7(%rsi), %r12
25916
25917@@ -47,7 +50,7 @@ ENTRY(copy_page)
25918 movq %rdx, 0x8*2(%rdi)
25919 movq %r8, 0x8*3(%rdi)
25920 movq %r9, 0x8*4(%rdi)
25921- movq %r10, 0x8*5(%rdi)
25922+ movq %r13, 0x8*5(%rdi)
25923 movq %r11, 0x8*6(%rdi)
25924 movq %r12, 0x8*7(%rdi)
25925
25926@@ -66,7 +69,7 @@ ENTRY(copy_page)
25927 movq 0x8*2(%rsi), %rdx
25928 movq 0x8*3(%rsi), %r8
25929 movq 0x8*4(%rsi), %r9
25930- movq 0x8*5(%rsi), %r10
25931+ movq 0x8*5(%rsi), %r13
25932 movq 0x8*6(%rsi), %r11
25933 movq 0x8*7(%rsi), %r12
25934
25935@@ -75,7 +78,7 @@ ENTRY(copy_page)
25936 movq %rdx, 0x8*2(%rdi)
25937 movq %r8, 0x8*3(%rdi)
25938 movq %r9, 0x8*4(%rdi)
25939- movq %r10, 0x8*5(%rdi)
25940+ movq %r13, 0x8*5(%rdi)
25941 movq %r11, 0x8*6(%rdi)
25942 movq %r12, 0x8*7(%rdi)
25943
25944@@ -87,8 +90,11 @@ ENTRY(copy_page)
25945 CFI_RESTORE rbx
25946 movq 1*8(%rsp), %r12
25947 CFI_RESTORE r12
25948- addq $2*8, %rsp
25949- CFI_ADJUST_CFA_OFFSET -2*8
25950+ movq 2*8(%rsp), %r13
25951+ CFI_RESTORE r13
25952+ addq $3*8, %rsp
25953+ CFI_ADJUST_CFA_OFFSET -3*8
25954+ pax_force_retaddr
25955 ret
25956 .Lcopy_page_end:
25957 CFI_ENDPROC
25958@@ -99,7 +105,7 @@ ENDPROC(copy_page)
25959
25960 #include <asm/cpufeature.h>
25961
25962- .section .altinstr_replacement,"ax"
25963+ .section .altinstr_replacement,"a"
25964 1: .byte 0xeb /* jmp <disp8> */
25965 .byte (copy_page_rep - copy_page) - (2f - 1b) /* offset */
25966 2:
25967diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
25968index a30ca15..d25fab6 100644
25969--- a/arch/x86/lib/copy_user_64.S
25970+++ b/arch/x86/lib/copy_user_64.S
25971@@ -18,6 +18,7 @@
25972 #include <asm/alternative-asm.h>
25973 #include <asm/asm.h>
25974 #include <asm/smap.h>
25975+#include <asm/pgtable.h>
25976
25977 /*
25978 * By placing feature2 after feature1 in altinstructions section, we logically
25979@@ -31,7 +32,7 @@
25980 .byte 0xe9 /* 32bit jump */
25981 .long \orig-1f /* by default jump to orig */
25982 1:
25983- .section .altinstr_replacement,"ax"
25984+ .section .altinstr_replacement,"a"
25985 2: .byte 0xe9 /* near jump with 32bit immediate */
25986 .long \alt1-1b /* offset */ /* or alternatively to alt1 */
25987 3: .byte 0xe9 /* near jump with 32bit immediate */
25988@@ -70,47 +71,20 @@
25989 #endif
25990 .endm
25991
25992-/* Standard copy_to_user with segment limit checking */
25993-ENTRY(_copy_to_user)
25994- CFI_STARTPROC
25995- GET_THREAD_INFO(%rax)
25996- movq %rdi,%rcx
25997- addq %rdx,%rcx
25998- jc bad_to_user
25999- cmpq TI_addr_limit(%rax),%rcx
26000- ja bad_to_user
26001- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
26002- copy_user_generic_unrolled,copy_user_generic_string, \
26003- copy_user_enhanced_fast_string
26004- CFI_ENDPROC
26005-ENDPROC(_copy_to_user)
26006-
26007-/* Standard copy_from_user with segment limit checking */
26008-ENTRY(_copy_from_user)
26009- CFI_STARTPROC
26010- GET_THREAD_INFO(%rax)
26011- movq %rsi,%rcx
26012- addq %rdx,%rcx
26013- jc bad_from_user
26014- cmpq TI_addr_limit(%rax),%rcx
26015- ja bad_from_user
26016- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
26017- copy_user_generic_unrolled,copy_user_generic_string, \
26018- copy_user_enhanced_fast_string
26019- CFI_ENDPROC
26020-ENDPROC(_copy_from_user)
26021-
26022 .section .fixup,"ax"
26023 /* must zero dest */
26024 ENTRY(bad_from_user)
26025 bad_from_user:
26026 CFI_STARTPROC
26027+ testl %edx,%edx
26028+ js bad_to_user
26029 movl %edx,%ecx
26030 xorl %eax,%eax
26031 rep
26032 stosb
26033 bad_to_user:
26034 movl %edx,%eax
26035+ pax_force_retaddr
26036 ret
26037 CFI_ENDPROC
26038 ENDPROC(bad_from_user)
26039@@ -141,19 +115,19 @@ ENTRY(copy_user_generic_unrolled)
26040 jz 17f
26041 1: movq (%rsi),%r8
26042 2: movq 1*8(%rsi),%r9
26043-3: movq 2*8(%rsi),%r10
26044+3: movq 2*8(%rsi),%rax
26045 4: movq 3*8(%rsi),%r11
26046 5: movq %r8,(%rdi)
26047 6: movq %r9,1*8(%rdi)
26048-7: movq %r10,2*8(%rdi)
26049+7: movq %rax,2*8(%rdi)
26050 8: movq %r11,3*8(%rdi)
26051 9: movq 4*8(%rsi),%r8
26052 10: movq 5*8(%rsi),%r9
26053-11: movq 6*8(%rsi),%r10
26054+11: movq 6*8(%rsi),%rax
26055 12: movq 7*8(%rsi),%r11
26056 13: movq %r8,4*8(%rdi)
26057 14: movq %r9,5*8(%rdi)
26058-15: movq %r10,6*8(%rdi)
26059+15: movq %rax,6*8(%rdi)
26060 16: movq %r11,7*8(%rdi)
26061 leaq 64(%rsi),%rsi
26062 leaq 64(%rdi),%rdi
26063@@ -180,6 +154,7 @@ ENTRY(copy_user_generic_unrolled)
26064 jnz 21b
26065 23: xor %eax,%eax
26066 ASM_CLAC
26067+ pax_force_retaddr
26068 ret
26069
26070 .section .fixup,"ax"
26071@@ -251,6 +226,7 @@ ENTRY(copy_user_generic_string)
26072 movsb
26073 4: xorl %eax,%eax
26074 ASM_CLAC
26075+ pax_force_retaddr
26076 ret
26077
26078 .section .fixup,"ax"
26079@@ -286,6 +262,7 @@ ENTRY(copy_user_enhanced_fast_string)
26080 movsb
26081 2: xorl %eax,%eax
26082 ASM_CLAC
26083+ pax_force_retaddr
26084 ret
26085
26086 .section .fixup,"ax"
26087diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
26088index 6a4f43c..f5f9e26 100644
26089--- a/arch/x86/lib/copy_user_nocache_64.S
26090+++ b/arch/x86/lib/copy_user_nocache_64.S
26091@@ -8,6 +8,7 @@
26092
26093 #include <linux/linkage.h>
26094 #include <asm/dwarf2.h>
26095+#include <asm/alternative-asm.h>
26096
26097 #define FIX_ALIGNMENT 1
26098
26099@@ -16,6 +17,7 @@
26100 #include <asm/thread_info.h>
26101 #include <asm/asm.h>
26102 #include <asm/smap.h>
26103+#include <asm/pgtable.h>
26104
26105 .macro ALIGN_DESTINATION
26106 #ifdef FIX_ALIGNMENT
26107@@ -49,6 +51,15 @@
26108 */
26109 ENTRY(__copy_user_nocache)
26110 CFI_STARTPROC
26111+
26112+#ifdef CONFIG_PAX_MEMORY_UDEREF
26113+ mov $PAX_USER_SHADOW_BASE,%rcx
26114+ cmp %rcx,%rsi
26115+ jae 1f
26116+ add %rcx,%rsi
26117+1:
26118+#endif
26119+
26120 ASM_STAC
26121 cmpl $8,%edx
26122 jb 20f /* less then 8 bytes, go to byte copy loop */
26123@@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache)
26124 jz 17f
26125 1: movq (%rsi),%r8
26126 2: movq 1*8(%rsi),%r9
26127-3: movq 2*8(%rsi),%r10
26128+3: movq 2*8(%rsi),%rax
26129 4: movq 3*8(%rsi),%r11
26130 5: movnti %r8,(%rdi)
26131 6: movnti %r9,1*8(%rdi)
26132-7: movnti %r10,2*8(%rdi)
26133+7: movnti %rax,2*8(%rdi)
26134 8: movnti %r11,3*8(%rdi)
26135 9: movq 4*8(%rsi),%r8
26136 10: movq 5*8(%rsi),%r9
26137-11: movq 6*8(%rsi),%r10
26138+11: movq 6*8(%rsi),%rax
26139 12: movq 7*8(%rsi),%r11
26140 13: movnti %r8,4*8(%rdi)
26141 14: movnti %r9,5*8(%rdi)
26142-15: movnti %r10,6*8(%rdi)
26143+15: movnti %rax,6*8(%rdi)
26144 16: movnti %r11,7*8(%rdi)
26145 leaq 64(%rsi),%rsi
26146 leaq 64(%rdi),%rdi
26147@@ -99,6 +110,7 @@ ENTRY(__copy_user_nocache)
26148 23: xorl %eax,%eax
26149 ASM_CLAC
26150 sfence
26151+ pax_force_retaddr
26152 ret
26153
26154 .section .fixup,"ax"
26155diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
26156index 2419d5f..953ee51 100644
26157--- a/arch/x86/lib/csum-copy_64.S
26158+++ b/arch/x86/lib/csum-copy_64.S
26159@@ -9,6 +9,7 @@
26160 #include <asm/dwarf2.h>
26161 #include <asm/errno.h>
26162 #include <asm/asm.h>
26163+#include <asm/alternative-asm.h>
26164
26165 /*
26166 * Checksum copy with exception handling.
26167@@ -220,6 +221,7 @@ ENTRY(csum_partial_copy_generic)
26168 CFI_RESTORE rbp
26169 addq $7*8, %rsp
26170 CFI_ADJUST_CFA_OFFSET -7*8
26171+ pax_force_retaddr 0, 1
26172 ret
26173 CFI_RESTORE_STATE
26174
26175diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
26176index 25b7ae8..169fafc 100644
26177--- a/arch/x86/lib/csum-wrappers_64.c
26178+++ b/arch/x86/lib/csum-wrappers_64.c
26179@@ -52,7 +52,7 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
26180 len -= 2;
26181 }
26182 }
26183- isum = csum_partial_copy_generic((__force const void *)src,
26184+ isum = csum_partial_copy_generic((const void __force_kernel *)____m(src),
26185 dst, len, isum, errp, NULL);
26186 if (unlikely(*errp))
26187 goto out_err;
26188@@ -105,7 +105,7 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
26189 }
26190
26191 *errp = 0;
26192- return csum_partial_copy_generic(src, (void __force *)dst,
26193+ return csum_partial_copy_generic(src, (void __force_kernel *)____m(dst),
26194 len, isum, NULL, errp);
26195 }
26196 EXPORT_SYMBOL(csum_partial_copy_to_user);
26197diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
26198index 156b9c8..b144132 100644
26199--- a/arch/x86/lib/getuser.S
26200+++ b/arch/x86/lib/getuser.S
26201@@ -34,17 +34,40 @@
26202 #include <asm/thread_info.h>
26203 #include <asm/asm.h>
26204 #include <asm/smap.h>
26205+#include <asm/segment.h>
26206+#include <asm/pgtable.h>
26207+#include <asm/alternative-asm.h>
26208+
26209+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
26210+#define __copyuser_seg gs;
26211+#else
26212+#define __copyuser_seg
26213+#endif
26214
26215 .text
26216 ENTRY(__get_user_1)
26217 CFI_STARTPROC
26218+
26219+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
26220 GET_THREAD_INFO(%_ASM_DX)
26221 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
26222 jae bad_get_user
26223 ASM_STAC
26224-1: movzb (%_ASM_AX),%edx
26225+
26226+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
26227+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
26228+ cmp %_ASM_DX,%_ASM_AX
26229+ jae 1234f
26230+ add %_ASM_DX,%_ASM_AX
26231+1234:
26232+#endif
26233+
26234+#endif
26235+
26236+1: __copyuser_seg movzb (%_ASM_AX),%edx
26237 xor %eax,%eax
26238 ASM_CLAC
26239+ pax_force_retaddr
26240 ret
26241 CFI_ENDPROC
26242 ENDPROC(__get_user_1)
26243@@ -52,14 +75,28 @@ ENDPROC(__get_user_1)
26244 ENTRY(__get_user_2)
26245 CFI_STARTPROC
26246 add $1,%_ASM_AX
26247+
26248+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
26249 jc bad_get_user
26250 GET_THREAD_INFO(%_ASM_DX)
26251 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
26252 jae bad_get_user
26253 ASM_STAC
26254-2: movzwl -1(%_ASM_AX),%edx
26255+
26256+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
26257+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
26258+ cmp %_ASM_DX,%_ASM_AX
26259+ jae 1234f
26260+ add %_ASM_DX,%_ASM_AX
26261+1234:
26262+#endif
26263+
26264+#endif
26265+
26266+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
26267 xor %eax,%eax
26268 ASM_CLAC
26269+ pax_force_retaddr
26270 ret
26271 CFI_ENDPROC
26272 ENDPROC(__get_user_2)
26273@@ -67,14 +104,28 @@ ENDPROC(__get_user_2)
26274 ENTRY(__get_user_4)
26275 CFI_STARTPROC
26276 add $3,%_ASM_AX
26277+
26278+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
26279 jc bad_get_user
26280 GET_THREAD_INFO(%_ASM_DX)
26281 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
26282 jae bad_get_user
26283 ASM_STAC
26284-3: mov -3(%_ASM_AX),%edx
26285+
26286+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
26287+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
26288+ cmp %_ASM_DX,%_ASM_AX
26289+ jae 1234f
26290+ add %_ASM_DX,%_ASM_AX
26291+1234:
26292+#endif
26293+
26294+#endif
26295+
26296+3: __copyuser_seg mov -3(%_ASM_AX),%edx
26297 xor %eax,%eax
26298 ASM_CLAC
26299+ pax_force_retaddr
26300 ret
26301 CFI_ENDPROC
26302 ENDPROC(__get_user_4)
26303@@ -87,10 +138,20 @@ ENTRY(__get_user_8)
26304 GET_THREAD_INFO(%_ASM_DX)
26305 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
26306 jae bad_get_user
26307+
26308+#ifdef CONFIG_PAX_MEMORY_UDEREF
26309+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
26310+ cmp %_ASM_DX,%_ASM_AX
26311+ jae 1234f
26312+ add %_ASM_DX,%_ASM_AX
26313+1234:
26314+#endif
26315+
26316 ASM_STAC
26317 4: movq -7(%_ASM_AX),%_ASM_DX
26318 xor %eax,%eax
26319 ASM_CLAC
26320+ pax_force_retaddr
26321 ret
26322 CFI_ENDPROC
26323 ENDPROC(__get_user_8)
26324@@ -101,6 +162,7 @@ bad_get_user:
26325 xor %edx,%edx
26326 mov $(-EFAULT),%_ASM_AX
26327 ASM_CLAC
26328+ pax_force_retaddr
26329 ret
26330 CFI_ENDPROC
26331 END(bad_get_user)
26332diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
26333index 54fcffe..7be149e 100644
26334--- a/arch/x86/lib/insn.c
26335+++ b/arch/x86/lib/insn.c
26336@@ -20,8 +20,10 @@
26337
26338 #ifdef __KERNEL__
26339 #include <linux/string.h>
26340+#include <asm/pgtable_types.h>
26341 #else
26342 #include <string.h>
26343+#define ktla_ktva(addr) addr
26344 #endif
26345 #include <asm/inat.h>
26346 #include <asm/insn.h>
26347@@ -53,8 +55,8 @@
26348 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
26349 {
26350 memset(insn, 0, sizeof(*insn));
26351- insn->kaddr = kaddr;
26352- insn->next_byte = kaddr;
26353+ insn->kaddr = ktla_ktva(kaddr);
26354+ insn->next_byte = ktla_ktva(kaddr);
26355 insn->x86_64 = x86_64 ? 1 : 0;
26356 insn->opnd_bytes = 4;
26357 if (x86_64)
26358diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
26359index 05a95e7..326f2fa 100644
26360--- a/arch/x86/lib/iomap_copy_64.S
26361+++ b/arch/x86/lib/iomap_copy_64.S
26362@@ -17,6 +17,7 @@
26363
26364 #include <linux/linkage.h>
26365 #include <asm/dwarf2.h>
26366+#include <asm/alternative-asm.h>
26367
26368 /*
26369 * override generic version in lib/iomap_copy.c
26370@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
26371 CFI_STARTPROC
26372 movl %edx,%ecx
26373 rep movsd
26374+ pax_force_retaddr
26375 ret
26376 CFI_ENDPROC
26377 ENDPROC(__iowrite32_copy)
26378diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
26379index 1c273be..da9cc0e 100644
26380--- a/arch/x86/lib/memcpy_64.S
26381+++ b/arch/x86/lib/memcpy_64.S
26382@@ -33,6 +33,7 @@
26383 rep movsq
26384 movl %edx, %ecx
26385 rep movsb
26386+ pax_force_retaddr
26387 ret
26388 .Lmemcpy_e:
26389 .previous
26390@@ -49,6 +50,7 @@
26391 movq %rdi, %rax
26392 movq %rdx, %rcx
26393 rep movsb
26394+ pax_force_retaddr
26395 ret
26396 .Lmemcpy_e_e:
26397 .previous
26398@@ -76,13 +78,13 @@ ENTRY(memcpy)
26399 */
26400 movq 0*8(%rsi), %r8
26401 movq 1*8(%rsi), %r9
26402- movq 2*8(%rsi), %r10
26403+ movq 2*8(%rsi), %rcx
26404 movq 3*8(%rsi), %r11
26405 leaq 4*8(%rsi), %rsi
26406
26407 movq %r8, 0*8(%rdi)
26408 movq %r9, 1*8(%rdi)
26409- movq %r10, 2*8(%rdi)
26410+ movq %rcx, 2*8(%rdi)
26411 movq %r11, 3*8(%rdi)
26412 leaq 4*8(%rdi), %rdi
26413 jae .Lcopy_forward_loop
26414@@ -105,12 +107,12 @@ ENTRY(memcpy)
26415 subq $0x20, %rdx
26416 movq -1*8(%rsi), %r8
26417 movq -2*8(%rsi), %r9
26418- movq -3*8(%rsi), %r10
26419+ movq -3*8(%rsi), %rcx
26420 movq -4*8(%rsi), %r11
26421 leaq -4*8(%rsi), %rsi
26422 movq %r8, -1*8(%rdi)
26423 movq %r9, -2*8(%rdi)
26424- movq %r10, -3*8(%rdi)
26425+ movq %rcx, -3*8(%rdi)
26426 movq %r11, -4*8(%rdi)
26427 leaq -4*8(%rdi), %rdi
26428 jae .Lcopy_backward_loop
26429@@ -130,12 +132,13 @@ ENTRY(memcpy)
26430 */
26431 movq 0*8(%rsi), %r8
26432 movq 1*8(%rsi), %r9
26433- movq -2*8(%rsi, %rdx), %r10
26434+ movq -2*8(%rsi, %rdx), %rcx
26435 movq -1*8(%rsi, %rdx), %r11
26436 movq %r8, 0*8(%rdi)
26437 movq %r9, 1*8(%rdi)
26438- movq %r10, -2*8(%rdi, %rdx)
26439+ movq %rcx, -2*8(%rdi, %rdx)
26440 movq %r11, -1*8(%rdi, %rdx)
26441+ pax_force_retaddr
26442 retq
26443 .p2align 4
26444 .Lless_16bytes:
26445@@ -148,6 +151,7 @@ ENTRY(memcpy)
26446 movq -1*8(%rsi, %rdx), %r9
26447 movq %r8, 0*8(%rdi)
26448 movq %r9, -1*8(%rdi, %rdx)
26449+ pax_force_retaddr
26450 retq
26451 .p2align 4
26452 .Lless_8bytes:
26453@@ -161,6 +165,7 @@ ENTRY(memcpy)
26454 movl -4(%rsi, %rdx), %r8d
26455 movl %ecx, (%rdi)
26456 movl %r8d, -4(%rdi, %rdx)
26457+ pax_force_retaddr
26458 retq
26459 .p2align 4
26460 .Lless_3bytes:
26461@@ -179,6 +184,7 @@ ENTRY(memcpy)
26462 movb %cl, (%rdi)
26463
26464 .Lend:
26465+ pax_force_retaddr
26466 retq
26467 CFI_ENDPROC
26468 ENDPROC(memcpy)
26469diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
26470index ee16461..c39c199 100644
26471--- a/arch/x86/lib/memmove_64.S
26472+++ b/arch/x86/lib/memmove_64.S
26473@@ -61,13 +61,13 @@ ENTRY(memmove)
26474 5:
26475 sub $0x20, %rdx
26476 movq 0*8(%rsi), %r11
26477- movq 1*8(%rsi), %r10
26478+ movq 1*8(%rsi), %rcx
26479 movq 2*8(%rsi), %r9
26480 movq 3*8(%rsi), %r8
26481 leaq 4*8(%rsi), %rsi
26482
26483 movq %r11, 0*8(%rdi)
26484- movq %r10, 1*8(%rdi)
26485+ movq %rcx, 1*8(%rdi)
26486 movq %r9, 2*8(%rdi)
26487 movq %r8, 3*8(%rdi)
26488 leaq 4*8(%rdi), %rdi
26489@@ -81,10 +81,10 @@ ENTRY(memmove)
26490 4:
26491 movq %rdx, %rcx
26492 movq -8(%rsi, %rdx), %r11
26493- lea -8(%rdi, %rdx), %r10
26494+ lea -8(%rdi, %rdx), %r9
26495 shrq $3, %rcx
26496 rep movsq
26497- movq %r11, (%r10)
26498+ movq %r11, (%r9)
26499 jmp 13f
26500 .Lmemmove_end_forward:
26501
26502@@ -95,14 +95,14 @@ ENTRY(memmove)
26503 7:
26504 movq %rdx, %rcx
26505 movq (%rsi), %r11
26506- movq %rdi, %r10
26507+ movq %rdi, %r9
26508 leaq -8(%rsi, %rdx), %rsi
26509 leaq -8(%rdi, %rdx), %rdi
26510 shrq $3, %rcx
26511 std
26512 rep movsq
26513 cld
26514- movq %r11, (%r10)
26515+ movq %r11, (%r9)
26516 jmp 13f
26517
26518 /*
26519@@ -127,13 +127,13 @@ ENTRY(memmove)
26520 8:
26521 subq $0x20, %rdx
26522 movq -1*8(%rsi), %r11
26523- movq -2*8(%rsi), %r10
26524+ movq -2*8(%rsi), %rcx
26525 movq -3*8(%rsi), %r9
26526 movq -4*8(%rsi), %r8
26527 leaq -4*8(%rsi), %rsi
26528
26529 movq %r11, -1*8(%rdi)
26530- movq %r10, -2*8(%rdi)
26531+ movq %rcx, -2*8(%rdi)
26532 movq %r9, -3*8(%rdi)
26533 movq %r8, -4*8(%rdi)
26534 leaq -4*8(%rdi), %rdi
26535@@ -151,11 +151,11 @@ ENTRY(memmove)
26536 * Move data from 16 bytes to 31 bytes.
26537 */
26538 movq 0*8(%rsi), %r11
26539- movq 1*8(%rsi), %r10
26540+ movq 1*8(%rsi), %rcx
26541 movq -2*8(%rsi, %rdx), %r9
26542 movq -1*8(%rsi, %rdx), %r8
26543 movq %r11, 0*8(%rdi)
26544- movq %r10, 1*8(%rdi)
26545+ movq %rcx, 1*8(%rdi)
26546 movq %r9, -2*8(%rdi, %rdx)
26547 movq %r8, -1*8(%rdi, %rdx)
26548 jmp 13f
26549@@ -167,9 +167,9 @@ ENTRY(memmove)
26550 * Move data from 8 bytes to 15 bytes.
26551 */
26552 movq 0*8(%rsi), %r11
26553- movq -1*8(%rsi, %rdx), %r10
26554+ movq -1*8(%rsi, %rdx), %r9
26555 movq %r11, 0*8(%rdi)
26556- movq %r10, -1*8(%rdi, %rdx)
26557+ movq %r9, -1*8(%rdi, %rdx)
26558 jmp 13f
26559 10:
26560 cmpq $4, %rdx
26561@@ -178,9 +178,9 @@ ENTRY(memmove)
26562 * Move data from 4 bytes to 7 bytes.
26563 */
26564 movl (%rsi), %r11d
26565- movl -4(%rsi, %rdx), %r10d
26566+ movl -4(%rsi, %rdx), %r9d
26567 movl %r11d, (%rdi)
26568- movl %r10d, -4(%rdi, %rdx)
26569+ movl %r9d, -4(%rdi, %rdx)
26570 jmp 13f
26571 11:
26572 cmp $2, %rdx
26573@@ -189,9 +189,9 @@ ENTRY(memmove)
26574 * Move data from 2 bytes to 3 bytes.
26575 */
26576 movw (%rsi), %r11w
26577- movw -2(%rsi, %rdx), %r10w
26578+ movw -2(%rsi, %rdx), %r9w
26579 movw %r11w, (%rdi)
26580- movw %r10w, -2(%rdi, %rdx)
26581+ movw %r9w, -2(%rdi, %rdx)
26582 jmp 13f
26583 12:
26584 cmp $1, %rdx
26585@@ -202,6 +202,7 @@ ENTRY(memmove)
26586 movb (%rsi), %r11b
26587 movb %r11b, (%rdi)
26588 13:
26589+ pax_force_retaddr
26590 retq
26591 CFI_ENDPROC
26592
26593@@ -210,6 +211,7 @@ ENTRY(memmove)
26594 /* Forward moving data. */
26595 movq %rdx, %rcx
26596 rep movsb
26597+ pax_force_retaddr
26598 retq
26599 .Lmemmove_end_forward_efs:
26600 .previous
26601diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
26602index 2dcb380..963660a 100644
26603--- a/arch/x86/lib/memset_64.S
26604+++ b/arch/x86/lib/memset_64.S
26605@@ -30,6 +30,7 @@
26606 movl %edx,%ecx
26607 rep stosb
26608 movq %r9,%rax
26609+ pax_force_retaddr
26610 ret
26611 .Lmemset_e:
26612 .previous
26613@@ -52,6 +53,7 @@
26614 movq %rdx,%rcx
26615 rep stosb
26616 movq %r9,%rax
26617+ pax_force_retaddr
26618 ret
26619 .Lmemset_e_e:
26620 .previous
26621@@ -59,7 +61,7 @@
26622 ENTRY(memset)
26623 ENTRY(__memset)
26624 CFI_STARTPROC
26625- movq %rdi,%r10
26626+ movq %rdi,%r11
26627
26628 /* expand byte value */
26629 movzbl %sil,%ecx
26630@@ -117,7 +119,8 @@ ENTRY(__memset)
26631 jnz .Lloop_1
26632
26633 .Lende:
26634- movq %r10,%rax
26635+ movq %r11,%rax
26636+ pax_force_retaddr
26637 ret
26638
26639 CFI_RESTORE_STATE
26640diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
26641index c9f2d9b..e7fd2c0 100644
26642--- a/arch/x86/lib/mmx_32.c
26643+++ b/arch/x86/lib/mmx_32.c
26644@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
26645 {
26646 void *p;
26647 int i;
26648+ unsigned long cr0;
26649
26650 if (unlikely(in_interrupt()))
26651 return __memcpy(to, from, len);
26652@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
26653 kernel_fpu_begin();
26654
26655 __asm__ __volatile__ (
26656- "1: prefetch (%0)\n" /* This set is 28 bytes */
26657- " prefetch 64(%0)\n"
26658- " prefetch 128(%0)\n"
26659- " prefetch 192(%0)\n"
26660- " prefetch 256(%0)\n"
26661+ "1: prefetch (%1)\n" /* This set is 28 bytes */
26662+ " prefetch 64(%1)\n"
26663+ " prefetch 128(%1)\n"
26664+ " prefetch 192(%1)\n"
26665+ " prefetch 256(%1)\n"
26666 "2: \n"
26667 ".section .fixup, \"ax\"\n"
26668- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
26669+ "3: \n"
26670+
26671+#ifdef CONFIG_PAX_KERNEXEC
26672+ " movl %%cr0, %0\n"
26673+ " movl %0, %%eax\n"
26674+ " andl $0xFFFEFFFF, %%eax\n"
26675+ " movl %%eax, %%cr0\n"
26676+#endif
26677+
26678+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
26679+
26680+#ifdef CONFIG_PAX_KERNEXEC
26681+ " movl %0, %%cr0\n"
26682+#endif
26683+
26684 " jmp 2b\n"
26685 ".previous\n"
26686 _ASM_EXTABLE(1b, 3b)
26687- : : "r" (from));
26688+ : "=&r" (cr0) : "r" (from) : "ax");
26689
26690 for ( ; i > 5; i--) {
26691 __asm__ __volatile__ (
26692- "1: prefetch 320(%0)\n"
26693- "2: movq (%0), %%mm0\n"
26694- " movq 8(%0), %%mm1\n"
26695- " movq 16(%0), %%mm2\n"
26696- " movq 24(%0), %%mm3\n"
26697- " movq %%mm0, (%1)\n"
26698- " movq %%mm1, 8(%1)\n"
26699- " movq %%mm2, 16(%1)\n"
26700- " movq %%mm3, 24(%1)\n"
26701- " movq 32(%0), %%mm0\n"
26702- " movq 40(%0), %%mm1\n"
26703- " movq 48(%0), %%mm2\n"
26704- " movq 56(%0), %%mm3\n"
26705- " movq %%mm0, 32(%1)\n"
26706- " movq %%mm1, 40(%1)\n"
26707- " movq %%mm2, 48(%1)\n"
26708- " movq %%mm3, 56(%1)\n"
26709+ "1: prefetch 320(%1)\n"
26710+ "2: movq (%1), %%mm0\n"
26711+ " movq 8(%1), %%mm1\n"
26712+ " movq 16(%1), %%mm2\n"
26713+ " movq 24(%1), %%mm3\n"
26714+ " movq %%mm0, (%2)\n"
26715+ " movq %%mm1, 8(%2)\n"
26716+ " movq %%mm2, 16(%2)\n"
26717+ " movq %%mm3, 24(%2)\n"
26718+ " movq 32(%1), %%mm0\n"
26719+ " movq 40(%1), %%mm1\n"
26720+ " movq 48(%1), %%mm2\n"
26721+ " movq 56(%1), %%mm3\n"
26722+ " movq %%mm0, 32(%2)\n"
26723+ " movq %%mm1, 40(%2)\n"
26724+ " movq %%mm2, 48(%2)\n"
26725+ " movq %%mm3, 56(%2)\n"
26726 ".section .fixup, \"ax\"\n"
26727- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
26728+ "3:\n"
26729+
26730+#ifdef CONFIG_PAX_KERNEXEC
26731+ " movl %%cr0, %0\n"
26732+ " movl %0, %%eax\n"
26733+ " andl $0xFFFEFFFF, %%eax\n"
26734+ " movl %%eax, %%cr0\n"
26735+#endif
26736+
26737+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
26738+
26739+#ifdef CONFIG_PAX_KERNEXEC
26740+ " movl %0, %%cr0\n"
26741+#endif
26742+
26743 " jmp 2b\n"
26744 ".previous\n"
26745 _ASM_EXTABLE(1b, 3b)
26746- : : "r" (from), "r" (to) : "memory");
26747+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
26748
26749 from += 64;
26750 to += 64;
26751@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
26752 static void fast_copy_page(void *to, void *from)
26753 {
26754 int i;
26755+ unsigned long cr0;
26756
26757 kernel_fpu_begin();
26758
26759@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
26760 * but that is for later. -AV
26761 */
26762 __asm__ __volatile__(
26763- "1: prefetch (%0)\n"
26764- " prefetch 64(%0)\n"
26765- " prefetch 128(%0)\n"
26766- " prefetch 192(%0)\n"
26767- " prefetch 256(%0)\n"
26768+ "1: prefetch (%1)\n"
26769+ " prefetch 64(%1)\n"
26770+ " prefetch 128(%1)\n"
26771+ " prefetch 192(%1)\n"
26772+ " prefetch 256(%1)\n"
26773 "2: \n"
26774 ".section .fixup, \"ax\"\n"
26775- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
26776+ "3: \n"
26777+
26778+#ifdef CONFIG_PAX_KERNEXEC
26779+ " movl %%cr0, %0\n"
26780+ " movl %0, %%eax\n"
26781+ " andl $0xFFFEFFFF, %%eax\n"
26782+ " movl %%eax, %%cr0\n"
26783+#endif
26784+
26785+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
26786+
26787+#ifdef CONFIG_PAX_KERNEXEC
26788+ " movl %0, %%cr0\n"
26789+#endif
26790+
26791 " jmp 2b\n"
26792 ".previous\n"
26793- _ASM_EXTABLE(1b, 3b) : : "r" (from));
26794+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
26795
26796 for (i = 0; i < (4096-320)/64; i++) {
26797 __asm__ __volatile__ (
26798- "1: prefetch 320(%0)\n"
26799- "2: movq (%0), %%mm0\n"
26800- " movntq %%mm0, (%1)\n"
26801- " movq 8(%0), %%mm1\n"
26802- " movntq %%mm1, 8(%1)\n"
26803- " movq 16(%0), %%mm2\n"
26804- " movntq %%mm2, 16(%1)\n"
26805- " movq 24(%0), %%mm3\n"
26806- " movntq %%mm3, 24(%1)\n"
26807- " movq 32(%0), %%mm4\n"
26808- " movntq %%mm4, 32(%1)\n"
26809- " movq 40(%0), %%mm5\n"
26810- " movntq %%mm5, 40(%1)\n"
26811- " movq 48(%0), %%mm6\n"
26812- " movntq %%mm6, 48(%1)\n"
26813- " movq 56(%0), %%mm7\n"
26814- " movntq %%mm7, 56(%1)\n"
26815+ "1: prefetch 320(%1)\n"
26816+ "2: movq (%1), %%mm0\n"
26817+ " movntq %%mm0, (%2)\n"
26818+ " movq 8(%1), %%mm1\n"
26819+ " movntq %%mm1, 8(%2)\n"
26820+ " movq 16(%1), %%mm2\n"
26821+ " movntq %%mm2, 16(%2)\n"
26822+ " movq 24(%1), %%mm3\n"
26823+ " movntq %%mm3, 24(%2)\n"
26824+ " movq 32(%1), %%mm4\n"
26825+ " movntq %%mm4, 32(%2)\n"
26826+ " movq 40(%1), %%mm5\n"
26827+ " movntq %%mm5, 40(%2)\n"
26828+ " movq 48(%1), %%mm6\n"
26829+ " movntq %%mm6, 48(%2)\n"
26830+ " movq 56(%1), %%mm7\n"
26831+ " movntq %%mm7, 56(%2)\n"
26832 ".section .fixup, \"ax\"\n"
26833- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
26834+ "3:\n"
26835+
26836+#ifdef CONFIG_PAX_KERNEXEC
26837+ " movl %%cr0, %0\n"
26838+ " movl %0, %%eax\n"
26839+ " andl $0xFFFEFFFF, %%eax\n"
26840+ " movl %%eax, %%cr0\n"
26841+#endif
26842+
26843+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
26844+
26845+#ifdef CONFIG_PAX_KERNEXEC
26846+ " movl %0, %%cr0\n"
26847+#endif
26848+
26849 " jmp 2b\n"
26850 ".previous\n"
26851- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
26852+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
26853
26854 from += 64;
26855 to += 64;
26856@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
26857 static void fast_copy_page(void *to, void *from)
26858 {
26859 int i;
26860+ unsigned long cr0;
26861
26862 kernel_fpu_begin();
26863
26864 __asm__ __volatile__ (
26865- "1: prefetch (%0)\n"
26866- " prefetch 64(%0)\n"
26867- " prefetch 128(%0)\n"
26868- " prefetch 192(%0)\n"
26869- " prefetch 256(%0)\n"
26870+ "1: prefetch (%1)\n"
26871+ " prefetch 64(%1)\n"
26872+ " prefetch 128(%1)\n"
26873+ " prefetch 192(%1)\n"
26874+ " prefetch 256(%1)\n"
26875 "2: \n"
26876 ".section .fixup, \"ax\"\n"
26877- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
26878+ "3: \n"
26879+
26880+#ifdef CONFIG_PAX_KERNEXEC
26881+ " movl %%cr0, %0\n"
26882+ " movl %0, %%eax\n"
26883+ " andl $0xFFFEFFFF, %%eax\n"
26884+ " movl %%eax, %%cr0\n"
26885+#endif
26886+
26887+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
26888+
26889+#ifdef CONFIG_PAX_KERNEXEC
26890+ " movl %0, %%cr0\n"
26891+#endif
26892+
26893 " jmp 2b\n"
26894 ".previous\n"
26895- _ASM_EXTABLE(1b, 3b) : : "r" (from));
26896+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
26897
26898 for (i = 0; i < 4096/64; i++) {
26899 __asm__ __volatile__ (
26900- "1: prefetch 320(%0)\n"
26901- "2: movq (%0), %%mm0\n"
26902- " movq 8(%0), %%mm1\n"
26903- " movq 16(%0), %%mm2\n"
26904- " movq 24(%0), %%mm3\n"
26905- " movq %%mm0, (%1)\n"
26906- " movq %%mm1, 8(%1)\n"
26907- " movq %%mm2, 16(%1)\n"
26908- " movq %%mm3, 24(%1)\n"
26909- " movq 32(%0), %%mm0\n"
26910- " movq 40(%0), %%mm1\n"
26911- " movq 48(%0), %%mm2\n"
26912- " movq 56(%0), %%mm3\n"
26913- " movq %%mm0, 32(%1)\n"
26914- " movq %%mm1, 40(%1)\n"
26915- " movq %%mm2, 48(%1)\n"
26916- " movq %%mm3, 56(%1)\n"
26917+ "1: prefetch 320(%1)\n"
26918+ "2: movq (%1), %%mm0\n"
26919+ " movq 8(%1), %%mm1\n"
26920+ " movq 16(%1), %%mm2\n"
26921+ " movq 24(%1), %%mm3\n"
26922+ " movq %%mm0, (%2)\n"
26923+ " movq %%mm1, 8(%2)\n"
26924+ " movq %%mm2, 16(%2)\n"
26925+ " movq %%mm3, 24(%2)\n"
26926+ " movq 32(%1), %%mm0\n"
26927+ " movq 40(%1), %%mm1\n"
26928+ " movq 48(%1), %%mm2\n"
26929+ " movq 56(%1), %%mm3\n"
26930+ " movq %%mm0, 32(%2)\n"
26931+ " movq %%mm1, 40(%2)\n"
26932+ " movq %%mm2, 48(%2)\n"
26933+ " movq %%mm3, 56(%2)\n"
26934 ".section .fixup, \"ax\"\n"
26935- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
26936+ "3:\n"
26937+
26938+#ifdef CONFIG_PAX_KERNEXEC
26939+ " movl %%cr0, %0\n"
26940+ " movl %0, %%eax\n"
26941+ " andl $0xFFFEFFFF, %%eax\n"
26942+ " movl %%eax, %%cr0\n"
26943+#endif
26944+
26945+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
26946+
26947+#ifdef CONFIG_PAX_KERNEXEC
26948+ " movl %0, %%cr0\n"
26949+#endif
26950+
26951 " jmp 2b\n"
26952 ".previous\n"
26953 _ASM_EXTABLE(1b, 3b)
26954- : : "r" (from), "r" (to) : "memory");
26955+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
26956
26957 from += 64;
26958 to += 64;
26959diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
26960index f6d13ee..aca5f0b 100644
26961--- a/arch/x86/lib/msr-reg.S
26962+++ b/arch/x86/lib/msr-reg.S
26963@@ -3,6 +3,7 @@
26964 #include <asm/dwarf2.h>
26965 #include <asm/asm.h>
26966 #include <asm/msr.h>
26967+#include <asm/alternative-asm.h>
26968
26969 #ifdef CONFIG_X86_64
26970 /*
26971@@ -16,7 +17,7 @@ ENTRY(\op\()_safe_regs)
26972 CFI_STARTPROC
26973 pushq_cfi %rbx
26974 pushq_cfi %rbp
26975- movq %rdi, %r10 /* Save pointer */
26976+ movq %rdi, %r9 /* Save pointer */
26977 xorl %r11d, %r11d /* Return value */
26978 movl (%rdi), %eax
26979 movl 4(%rdi), %ecx
26980@@ -27,16 +28,17 @@ ENTRY(\op\()_safe_regs)
26981 movl 28(%rdi), %edi
26982 CFI_REMEMBER_STATE
26983 1: \op
26984-2: movl %eax, (%r10)
26985+2: movl %eax, (%r9)
26986 movl %r11d, %eax /* Return value */
26987- movl %ecx, 4(%r10)
26988- movl %edx, 8(%r10)
26989- movl %ebx, 12(%r10)
26990- movl %ebp, 20(%r10)
26991- movl %esi, 24(%r10)
26992- movl %edi, 28(%r10)
26993+ movl %ecx, 4(%r9)
26994+ movl %edx, 8(%r9)
26995+ movl %ebx, 12(%r9)
26996+ movl %ebp, 20(%r9)
26997+ movl %esi, 24(%r9)
26998+ movl %edi, 28(%r9)
26999 popq_cfi %rbp
27000 popq_cfi %rbx
27001+ pax_force_retaddr
27002 ret
27003 3:
27004 CFI_RESTORE_STATE
27005diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
27006index fc6ba17..04471c5 100644
27007--- a/arch/x86/lib/putuser.S
27008+++ b/arch/x86/lib/putuser.S
27009@@ -16,7 +16,9 @@
27010 #include <asm/errno.h>
27011 #include <asm/asm.h>
27012 #include <asm/smap.h>
27013-
27014+#include <asm/segment.h>
27015+#include <asm/pgtable.h>
27016+#include <asm/alternative-asm.h>
27017
27018 /*
27019 * __put_user_X
27020@@ -30,57 +32,125 @@
27021 * as they get called from within inline assembly.
27022 */
27023
27024-#define ENTER CFI_STARTPROC ; \
27025- GET_THREAD_INFO(%_ASM_BX)
27026-#define EXIT ASM_CLAC ; \
27027- ret ; \
27028+#define ENTER CFI_STARTPROC
27029+#define EXIT ASM_CLAC ; \
27030+ pax_force_retaddr ; \
27031+ ret ; \
27032 CFI_ENDPROC
27033
27034+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
27035+#define _DEST %_ASM_CX,%_ASM_BX
27036+#else
27037+#define _DEST %_ASM_CX
27038+#endif
27039+
27040+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
27041+#define __copyuser_seg gs;
27042+#else
27043+#define __copyuser_seg
27044+#endif
27045+
27046 .text
27047 ENTRY(__put_user_1)
27048 ENTER
27049+
27050+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
27051+ GET_THREAD_INFO(%_ASM_BX)
27052 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
27053 jae bad_put_user
27054 ASM_STAC
27055-1: movb %al,(%_ASM_CX)
27056+
27057+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
27058+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
27059+ cmp %_ASM_BX,%_ASM_CX
27060+ jb 1234f
27061+ xor %ebx,%ebx
27062+1234:
27063+#endif
27064+
27065+#endif
27066+
27067+1: __copyuser_seg movb %al,(_DEST)
27068 xor %eax,%eax
27069 EXIT
27070 ENDPROC(__put_user_1)
27071
27072 ENTRY(__put_user_2)
27073 ENTER
27074+
27075+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
27076+ GET_THREAD_INFO(%_ASM_BX)
27077 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
27078 sub $1,%_ASM_BX
27079 cmp %_ASM_BX,%_ASM_CX
27080 jae bad_put_user
27081 ASM_STAC
27082-2: movw %ax,(%_ASM_CX)
27083+
27084+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
27085+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
27086+ cmp %_ASM_BX,%_ASM_CX
27087+ jb 1234f
27088+ xor %ebx,%ebx
27089+1234:
27090+#endif
27091+
27092+#endif
27093+
27094+2: __copyuser_seg movw %ax,(_DEST)
27095 xor %eax,%eax
27096 EXIT
27097 ENDPROC(__put_user_2)
27098
27099 ENTRY(__put_user_4)
27100 ENTER
27101+
27102+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
27103+ GET_THREAD_INFO(%_ASM_BX)
27104 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
27105 sub $3,%_ASM_BX
27106 cmp %_ASM_BX,%_ASM_CX
27107 jae bad_put_user
27108 ASM_STAC
27109-3: movl %eax,(%_ASM_CX)
27110+
27111+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
27112+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
27113+ cmp %_ASM_BX,%_ASM_CX
27114+ jb 1234f
27115+ xor %ebx,%ebx
27116+1234:
27117+#endif
27118+
27119+#endif
27120+
27121+3: __copyuser_seg movl %eax,(_DEST)
27122 xor %eax,%eax
27123 EXIT
27124 ENDPROC(__put_user_4)
27125
27126 ENTRY(__put_user_8)
27127 ENTER
27128+
27129+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
27130+ GET_THREAD_INFO(%_ASM_BX)
27131 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
27132 sub $7,%_ASM_BX
27133 cmp %_ASM_BX,%_ASM_CX
27134 jae bad_put_user
27135 ASM_STAC
27136-4: mov %_ASM_AX,(%_ASM_CX)
27137+
27138+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
27139+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
27140+ cmp %_ASM_BX,%_ASM_CX
27141+ jb 1234f
27142+ xor %ebx,%ebx
27143+1234:
27144+#endif
27145+
27146+#endif
27147+
27148+4: __copyuser_seg mov %_ASM_AX,(_DEST)
27149 #ifdef CONFIG_X86_32
27150-5: movl %edx,4(%_ASM_CX)
27151+5: __copyuser_seg movl %edx,4(_DEST)
27152 #endif
27153 xor %eax,%eax
27154 EXIT
27155diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
27156index 1cad221..de671ee 100644
27157--- a/arch/x86/lib/rwlock.S
27158+++ b/arch/x86/lib/rwlock.S
27159@@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
27160 FRAME
27161 0: LOCK_PREFIX
27162 WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
27163+
27164+#ifdef CONFIG_PAX_REFCOUNT
27165+ jno 1234f
27166+ LOCK_PREFIX
27167+ WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
27168+ int $4
27169+1234:
27170+ _ASM_EXTABLE(1234b, 1234b)
27171+#endif
27172+
27173 1: rep; nop
27174 cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
27175 jne 1b
27176 LOCK_PREFIX
27177 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
27178+
27179+#ifdef CONFIG_PAX_REFCOUNT
27180+ jno 1234f
27181+ LOCK_PREFIX
27182+ WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
27183+ int $4
27184+1234:
27185+ _ASM_EXTABLE(1234b, 1234b)
27186+#endif
27187+
27188 jnz 0b
27189 ENDFRAME
27190+ pax_force_retaddr
27191 ret
27192 CFI_ENDPROC
27193 END(__write_lock_failed)
27194@@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
27195 FRAME
27196 0: LOCK_PREFIX
27197 READ_LOCK_SIZE(inc) (%__lock_ptr)
27198+
27199+#ifdef CONFIG_PAX_REFCOUNT
27200+ jno 1234f
27201+ LOCK_PREFIX
27202+ READ_LOCK_SIZE(dec) (%__lock_ptr)
27203+ int $4
27204+1234:
27205+ _ASM_EXTABLE(1234b, 1234b)
27206+#endif
27207+
27208 1: rep; nop
27209 READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
27210 js 1b
27211 LOCK_PREFIX
27212 READ_LOCK_SIZE(dec) (%__lock_ptr)
27213+
27214+#ifdef CONFIG_PAX_REFCOUNT
27215+ jno 1234f
27216+ LOCK_PREFIX
27217+ READ_LOCK_SIZE(inc) (%__lock_ptr)
27218+ int $4
27219+1234:
27220+ _ASM_EXTABLE(1234b, 1234b)
27221+#endif
27222+
27223 js 0b
27224 ENDFRAME
27225+ pax_force_retaddr
27226 ret
27227 CFI_ENDPROC
27228 END(__read_lock_failed)
27229diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
27230index 5dff5f0..cadebf4 100644
27231--- a/arch/x86/lib/rwsem.S
27232+++ b/arch/x86/lib/rwsem.S
27233@@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
27234 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
27235 CFI_RESTORE __ASM_REG(dx)
27236 restore_common_regs
27237+ pax_force_retaddr
27238 ret
27239 CFI_ENDPROC
27240 ENDPROC(call_rwsem_down_read_failed)
27241@@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
27242 movq %rax,%rdi
27243 call rwsem_down_write_failed
27244 restore_common_regs
27245+ pax_force_retaddr
27246 ret
27247 CFI_ENDPROC
27248 ENDPROC(call_rwsem_down_write_failed)
27249@@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
27250 movq %rax,%rdi
27251 call rwsem_wake
27252 restore_common_regs
27253-1: ret
27254+1: pax_force_retaddr
27255+ ret
27256 CFI_ENDPROC
27257 ENDPROC(call_rwsem_wake)
27258
27259@@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
27260 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
27261 CFI_RESTORE __ASM_REG(dx)
27262 restore_common_regs
27263+ pax_force_retaddr
27264 ret
27265 CFI_ENDPROC
27266 ENDPROC(call_rwsem_downgrade_wake)
27267diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
27268index a63efd6..ccecad8 100644
27269--- a/arch/x86/lib/thunk_64.S
27270+++ b/arch/x86/lib/thunk_64.S
27271@@ -8,6 +8,7 @@
27272 #include <linux/linkage.h>
27273 #include <asm/dwarf2.h>
27274 #include <asm/calling.h>
27275+#include <asm/alternative-asm.h>
27276
27277 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
27278 .macro THUNK name, func, put_ret_addr_in_rdi=0
27279@@ -41,5 +42,6 @@
27280 SAVE_ARGS
27281 restore:
27282 RESTORE_ARGS
27283+ pax_force_retaddr
27284 ret
27285 CFI_ENDPROC
27286diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
27287index f0312d7..9c39d63 100644
27288--- a/arch/x86/lib/usercopy_32.c
27289+++ b/arch/x86/lib/usercopy_32.c
27290@@ -42,11 +42,13 @@ do { \
27291 int __d0; \
27292 might_fault(); \
27293 __asm__ __volatile__( \
27294+ __COPYUSER_SET_ES \
27295 ASM_STAC "\n" \
27296 "0: rep; stosl\n" \
27297 " movl %2,%0\n" \
27298 "1: rep; stosb\n" \
27299 "2: " ASM_CLAC "\n" \
27300+ __COPYUSER_RESTORE_ES \
27301 ".section .fixup,\"ax\"\n" \
27302 "3: lea 0(%2,%0,4),%0\n" \
27303 " jmp 2b\n" \
27304@@ -98,7 +100,7 @@ EXPORT_SYMBOL(__clear_user);
27305
27306 #ifdef CONFIG_X86_INTEL_USERCOPY
27307 static unsigned long
27308-__copy_user_intel(void __user *to, const void *from, unsigned long size)
27309+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
27310 {
27311 int d0, d1;
27312 __asm__ __volatile__(
27313@@ -110,36 +112,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
27314 " .align 2,0x90\n"
27315 "3: movl 0(%4), %%eax\n"
27316 "4: movl 4(%4), %%edx\n"
27317- "5: movl %%eax, 0(%3)\n"
27318- "6: movl %%edx, 4(%3)\n"
27319+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
27320+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
27321 "7: movl 8(%4), %%eax\n"
27322 "8: movl 12(%4),%%edx\n"
27323- "9: movl %%eax, 8(%3)\n"
27324- "10: movl %%edx, 12(%3)\n"
27325+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
27326+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
27327 "11: movl 16(%4), %%eax\n"
27328 "12: movl 20(%4), %%edx\n"
27329- "13: movl %%eax, 16(%3)\n"
27330- "14: movl %%edx, 20(%3)\n"
27331+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
27332+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
27333 "15: movl 24(%4), %%eax\n"
27334 "16: movl 28(%4), %%edx\n"
27335- "17: movl %%eax, 24(%3)\n"
27336- "18: movl %%edx, 28(%3)\n"
27337+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
27338+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
27339 "19: movl 32(%4), %%eax\n"
27340 "20: movl 36(%4), %%edx\n"
27341- "21: movl %%eax, 32(%3)\n"
27342- "22: movl %%edx, 36(%3)\n"
27343+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
27344+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
27345 "23: movl 40(%4), %%eax\n"
27346 "24: movl 44(%4), %%edx\n"
27347- "25: movl %%eax, 40(%3)\n"
27348- "26: movl %%edx, 44(%3)\n"
27349+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
27350+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
27351 "27: movl 48(%4), %%eax\n"
27352 "28: movl 52(%4), %%edx\n"
27353- "29: movl %%eax, 48(%3)\n"
27354- "30: movl %%edx, 52(%3)\n"
27355+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
27356+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
27357 "31: movl 56(%4), %%eax\n"
27358 "32: movl 60(%4), %%edx\n"
27359- "33: movl %%eax, 56(%3)\n"
27360- "34: movl %%edx, 60(%3)\n"
27361+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
27362+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
27363 " addl $-64, %0\n"
27364 " addl $64, %4\n"
27365 " addl $64, %3\n"
27366@@ -149,10 +151,12 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
27367 " shrl $2, %0\n"
27368 " andl $3, %%eax\n"
27369 " cld\n"
27370+ __COPYUSER_SET_ES
27371 "99: rep; movsl\n"
27372 "36: movl %%eax, %0\n"
27373 "37: rep; movsb\n"
27374 "100:\n"
27375+ __COPYUSER_RESTORE_ES
27376 ".section .fixup,\"ax\"\n"
27377 "101: lea 0(%%eax,%0,4),%0\n"
27378 " jmp 100b\n"
27379@@ -202,46 +206,150 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
27380 }
27381
27382 static unsigned long
27383+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
27384+{
27385+ int d0, d1;
27386+ __asm__ __volatile__(
27387+ " .align 2,0x90\n"
27388+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
27389+ " cmpl $67, %0\n"
27390+ " jbe 3f\n"
27391+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
27392+ " .align 2,0x90\n"
27393+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
27394+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
27395+ "5: movl %%eax, 0(%3)\n"
27396+ "6: movl %%edx, 4(%3)\n"
27397+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
27398+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
27399+ "9: movl %%eax, 8(%3)\n"
27400+ "10: movl %%edx, 12(%3)\n"
27401+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
27402+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
27403+ "13: movl %%eax, 16(%3)\n"
27404+ "14: movl %%edx, 20(%3)\n"
27405+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
27406+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
27407+ "17: movl %%eax, 24(%3)\n"
27408+ "18: movl %%edx, 28(%3)\n"
27409+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
27410+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
27411+ "21: movl %%eax, 32(%3)\n"
27412+ "22: movl %%edx, 36(%3)\n"
27413+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
27414+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
27415+ "25: movl %%eax, 40(%3)\n"
27416+ "26: movl %%edx, 44(%3)\n"
27417+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
27418+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
27419+ "29: movl %%eax, 48(%3)\n"
27420+ "30: movl %%edx, 52(%3)\n"
27421+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
27422+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
27423+ "33: movl %%eax, 56(%3)\n"
27424+ "34: movl %%edx, 60(%3)\n"
27425+ " addl $-64, %0\n"
27426+ " addl $64, %4\n"
27427+ " addl $64, %3\n"
27428+ " cmpl $63, %0\n"
27429+ " ja 1b\n"
27430+ "35: movl %0, %%eax\n"
27431+ " shrl $2, %0\n"
27432+ " andl $3, %%eax\n"
27433+ " cld\n"
27434+ "99: rep; "__copyuser_seg" movsl\n"
27435+ "36: movl %%eax, %0\n"
27436+ "37: rep; "__copyuser_seg" movsb\n"
27437+ "100:\n"
27438+ ".section .fixup,\"ax\"\n"
27439+ "101: lea 0(%%eax,%0,4),%0\n"
27440+ " jmp 100b\n"
27441+ ".previous\n"
27442+ _ASM_EXTABLE(1b,100b)
27443+ _ASM_EXTABLE(2b,100b)
27444+ _ASM_EXTABLE(3b,100b)
27445+ _ASM_EXTABLE(4b,100b)
27446+ _ASM_EXTABLE(5b,100b)
27447+ _ASM_EXTABLE(6b,100b)
27448+ _ASM_EXTABLE(7b,100b)
27449+ _ASM_EXTABLE(8b,100b)
27450+ _ASM_EXTABLE(9b,100b)
27451+ _ASM_EXTABLE(10b,100b)
27452+ _ASM_EXTABLE(11b,100b)
27453+ _ASM_EXTABLE(12b,100b)
27454+ _ASM_EXTABLE(13b,100b)
27455+ _ASM_EXTABLE(14b,100b)
27456+ _ASM_EXTABLE(15b,100b)
27457+ _ASM_EXTABLE(16b,100b)
27458+ _ASM_EXTABLE(17b,100b)
27459+ _ASM_EXTABLE(18b,100b)
27460+ _ASM_EXTABLE(19b,100b)
27461+ _ASM_EXTABLE(20b,100b)
27462+ _ASM_EXTABLE(21b,100b)
27463+ _ASM_EXTABLE(22b,100b)
27464+ _ASM_EXTABLE(23b,100b)
27465+ _ASM_EXTABLE(24b,100b)
27466+ _ASM_EXTABLE(25b,100b)
27467+ _ASM_EXTABLE(26b,100b)
27468+ _ASM_EXTABLE(27b,100b)
27469+ _ASM_EXTABLE(28b,100b)
27470+ _ASM_EXTABLE(29b,100b)
27471+ _ASM_EXTABLE(30b,100b)
27472+ _ASM_EXTABLE(31b,100b)
27473+ _ASM_EXTABLE(32b,100b)
27474+ _ASM_EXTABLE(33b,100b)
27475+ _ASM_EXTABLE(34b,100b)
27476+ _ASM_EXTABLE(35b,100b)
27477+ _ASM_EXTABLE(36b,100b)
27478+ _ASM_EXTABLE(37b,100b)
27479+ _ASM_EXTABLE(99b,101b)
27480+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
27481+ : "1"(to), "2"(from), "0"(size)
27482+ : "eax", "edx", "memory");
27483+ return size;
27484+}
27485+
27486+static unsigned long __size_overflow(3)
27487 __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
27488 {
27489 int d0, d1;
27490 __asm__ __volatile__(
27491 " .align 2,0x90\n"
27492- "0: movl 32(%4), %%eax\n"
27493+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
27494 " cmpl $67, %0\n"
27495 " jbe 2f\n"
27496- "1: movl 64(%4), %%eax\n"
27497+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
27498 " .align 2,0x90\n"
27499- "2: movl 0(%4), %%eax\n"
27500- "21: movl 4(%4), %%edx\n"
27501+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
27502+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
27503 " movl %%eax, 0(%3)\n"
27504 " movl %%edx, 4(%3)\n"
27505- "3: movl 8(%4), %%eax\n"
27506- "31: movl 12(%4),%%edx\n"
27507+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
27508+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
27509 " movl %%eax, 8(%3)\n"
27510 " movl %%edx, 12(%3)\n"
27511- "4: movl 16(%4), %%eax\n"
27512- "41: movl 20(%4), %%edx\n"
27513+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
27514+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
27515 " movl %%eax, 16(%3)\n"
27516 " movl %%edx, 20(%3)\n"
27517- "10: movl 24(%4), %%eax\n"
27518- "51: movl 28(%4), %%edx\n"
27519+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
27520+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
27521 " movl %%eax, 24(%3)\n"
27522 " movl %%edx, 28(%3)\n"
27523- "11: movl 32(%4), %%eax\n"
27524- "61: movl 36(%4), %%edx\n"
27525+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
27526+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
27527 " movl %%eax, 32(%3)\n"
27528 " movl %%edx, 36(%3)\n"
27529- "12: movl 40(%4), %%eax\n"
27530- "71: movl 44(%4), %%edx\n"
27531+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
27532+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
27533 " movl %%eax, 40(%3)\n"
27534 " movl %%edx, 44(%3)\n"
27535- "13: movl 48(%4), %%eax\n"
27536- "81: movl 52(%4), %%edx\n"
27537+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
27538+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
27539 " movl %%eax, 48(%3)\n"
27540 " movl %%edx, 52(%3)\n"
27541- "14: movl 56(%4), %%eax\n"
27542- "91: movl 60(%4), %%edx\n"
27543+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
27544+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
27545 " movl %%eax, 56(%3)\n"
27546 " movl %%edx, 60(%3)\n"
27547 " addl $-64, %0\n"
27548@@ -253,9 +361,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
27549 " shrl $2, %0\n"
27550 " andl $3, %%eax\n"
27551 " cld\n"
27552- "6: rep; movsl\n"
27553+ "6: rep; "__copyuser_seg" movsl\n"
27554 " movl %%eax,%0\n"
27555- "7: rep; movsb\n"
27556+ "7: rep; "__copyuser_seg" movsb\n"
27557 "8:\n"
27558 ".section .fixup,\"ax\"\n"
27559 "9: lea 0(%%eax,%0,4),%0\n"
27560@@ -298,48 +406,48 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
27561 * hyoshiok@miraclelinux.com
27562 */
27563
27564-static unsigned long __copy_user_zeroing_intel_nocache(void *to,
27565+static unsigned long __size_overflow(3) __copy_user_zeroing_intel_nocache(void *to,
27566 const void __user *from, unsigned long size)
27567 {
27568 int d0, d1;
27569
27570 __asm__ __volatile__(
27571 " .align 2,0x90\n"
27572- "0: movl 32(%4), %%eax\n"
27573+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
27574 " cmpl $67, %0\n"
27575 " jbe 2f\n"
27576- "1: movl 64(%4), %%eax\n"
27577+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
27578 " .align 2,0x90\n"
27579- "2: movl 0(%4), %%eax\n"
27580- "21: movl 4(%4), %%edx\n"
27581+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
27582+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
27583 " movnti %%eax, 0(%3)\n"
27584 " movnti %%edx, 4(%3)\n"
27585- "3: movl 8(%4), %%eax\n"
27586- "31: movl 12(%4),%%edx\n"
27587+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
27588+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
27589 " movnti %%eax, 8(%3)\n"
27590 " movnti %%edx, 12(%3)\n"
27591- "4: movl 16(%4), %%eax\n"
27592- "41: movl 20(%4), %%edx\n"
27593+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
27594+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
27595 " movnti %%eax, 16(%3)\n"
27596 " movnti %%edx, 20(%3)\n"
27597- "10: movl 24(%4), %%eax\n"
27598- "51: movl 28(%4), %%edx\n"
27599+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
27600+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
27601 " movnti %%eax, 24(%3)\n"
27602 " movnti %%edx, 28(%3)\n"
27603- "11: movl 32(%4), %%eax\n"
27604- "61: movl 36(%4), %%edx\n"
27605+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
27606+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
27607 " movnti %%eax, 32(%3)\n"
27608 " movnti %%edx, 36(%3)\n"
27609- "12: movl 40(%4), %%eax\n"
27610- "71: movl 44(%4), %%edx\n"
27611+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
27612+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
27613 " movnti %%eax, 40(%3)\n"
27614 " movnti %%edx, 44(%3)\n"
27615- "13: movl 48(%4), %%eax\n"
27616- "81: movl 52(%4), %%edx\n"
27617+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
27618+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
27619 " movnti %%eax, 48(%3)\n"
27620 " movnti %%edx, 52(%3)\n"
27621- "14: movl 56(%4), %%eax\n"
27622- "91: movl 60(%4), %%edx\n"
27623+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
27624+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
27625 " movnti %%eax, 56(%3)\n"
27626 " movnti %%edx, 60(%3)\n"
27627 " addl $-64, %0\n"
27628@@ -352,9 +460,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
27629 " shrl $2, %0\n"
27630 " andl $3, %%eax\n"
27631 " cld\n"
27632- "6: rep; movsl\n"
27633+ "6: rep; "__copyuser_seg" movsl\n"
27634 " movl %%eax,%0\n"
27635- "7: rep; movsb\n"
27636+ "7: rep; "__copyuser_seg" movsb\n"
27637 "8:\n"
27638 ".section .fixup,\"ax\"\n"
27639 "9: lea 0(%%eax,%0,4),%0\n"
27640@@ -392,48 +500,48 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
27641 return size;
27642 }
27643
27644-static unsigned long __copy_user_intel_nocache(void *to,
27645+static unsigned long __size_overflow(3) __copy_user_intel_nocache(void *to,
27646 const void __user *from, unsigned long size)
27647 {
27648 int d0, d1;
27649
27650 __asm__ __volatile__(
27651 " .align 2,0x90\n"
27652- "0: movl 32(%4), %%eax\n"
27653+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
27654 " cmpl $67, %0\n"
27655 " jbe 2f\n"
27656- "1: movl 64(%4), %%eax\n"
27657+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
27658 " .align 2,0x90\n"
27659- "2: movl 0(%4), %%eax\n"
27660- "21: movl 4(%4), %%edx\n"
27661+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
27662+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
27663 " movnti %%eax, 0(%3)\n"
27664 " movnti %%edx, 4(%3)\n"
27665- "3: movl 8(%4), %%eax\n"
27666- "31: movl 12(%4),%%edx\n"
27667+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
27668+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
27669 " movnti %%eax, 8(%3)\n"
27670 " movnti %%edx, 12(%3)\n"
27671- "4: movl 16(%4), %%eax\n"
27672- "41: movl 20(%4), %%edx\n"
27673+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
27674+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
27675 " movnti %%eax, 16(%3)\n"
27676 " movnti %%edx, 20(%3)\n"
27677- "10: movl 24(%4), %%eax\n"
27678- "51: movl 28(%4), %%edx\n"
27679+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
27680+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
27681 " movnti %%eax, 24(%3)\n"
27682 " movnti %%edx, 28(%3)\n"
27683- "11: movl 32(%4), %%eax\n"
27684- "61: movl 36(%4), %%edx\n"
27685+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
27686+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
27687 " movnti %%eax, 32(%3)\n"
27688 " movnti %%edx, 36(%3)\n"
27689- "12: movl 40(%4), %%eax\n"
27690- "71: movl 44(%4), %%edx\n"
27691+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
27692+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
27693 " movnti %%eax, 40(%3)\n"
27694 " movnti %%edx, 44(%3)\n"
27695- "13: movl 48(%4), %%eax\n"
27696- "81: movl 52(%4), %%edx\n"
27697+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
27698+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
27699 " movnti %%eax, 48(%3)\n"
27700 " movnti %%edx, 52(%3)\n"
27701- "14: movl 56(%4), %%eax\n"
27702- "91: movl 60(%4), %%edx\n"
27703+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
27704+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
27705 " movnti %%eax, 56(%3)\n"
27706 " movnti %%edx, 60(%3)\n"
27707 " addl $-64, %0\n"
27708@@ -446,9 +554,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
27709 " shrl $2, %0\n"
27710 " andl $3, %%eax\n"
27711 " cld\n"
27712- "6: rep; movsl\n"
27713+ "6: rep; "__copyuser_seg" movsl\n"
27714 " movl %%eax,%0\n"
27715- "7: rep; movsb\n"
27716+ "7: rep; "__copyuser_seg" movsb\n"
27717 "8:\n"
27718 ".section .fixup,\"ax\"\n"
27719 "9: lea 0(%%eax,%0,4),%0\n"
27720@@ -488,32 +596,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
27721 */
27722 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
27723 unsigned long size);
27724-unsigned long __copy_user_intel(void __user *to, const void *from,
27725+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
27726+ unsigned long size);
27727+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
27728 unsigned long size);
27729 unsigned long __copy_user_zeroing_intel_nocache(void *to,
27730 const void __user *from, unsigned long size);
27731 #endif /* CONFIG_X86_INTEL_USERCOPY */
27732
27733 /* Generic arbitrary sized copy. */
27734-#define __copy_user(to, from, size) \
27735+#define __copy_user(to, from, size, prefix, set, restore) \
27736 do { \
27737 int __d0, __d1, __d2; \
27738 __asm__ __volatile__( \
27739+ set \
27740 " cmp $7,%0\n" \
27741 " jbe 1f\n" \
27742 " movl %1,%0\n" \
27743 " negl %0\n" \
27744 " andl $7,%0\n" \
27745 " subl %0,%3\n" \
27746- "4: rep; movsb\n" \
27747+ "4: rep; "prefix"movsb\n" \
27748 " movl %3,%0\n" \
27749 " shrl $2,%0\n" \
27750 " andl $3,%3\n" \
27751 " .align 2,0x90\n" \
27752- "0: rep; movsl\n" \
27753+ "0: rep; "prefix"movsl\n" \
27754 " movl %3,%0\n" \
27755- "1: rep; movsb\n" \
27756+ "1: rep; "prefix"movsb\n" \
27757 "2:\n" \
27758+ restore \
27759 ".section .fixup,\"ax\"\n" \
27760 "5: addl %3,%0\n" \
27761 " jmp 2b\n" \
27762@@ -538,14 +650,14 @@ do { \
27763 " negl %0\n" \
27764 " andl $7,%0\n" \
27765 " subl %0,%3\n" \
27766- "4: rep; movsb\n" \
27767+ "4: rep; "__copyuser_seg"movsb\n" \
27768 " movl %3,%0\n" \
27769 " shrl $2,%0\n" \
27770 " andl $3,%3\n" \
27771 " .align 2,0x90\n" \
27772- "0: rep; movsl\n" \
27773+ "0: rep; "__copyuser_seg"movsl\n" \
27774 " movl %3,%0\n" \
27775- "1: rep; movsb\n" \
27776+ "1: rep; "__copyuser_seg"movsb\n" \
27777 "2:\n" \
27778 ".section .fixup,\"ax\"\n" \
27779 "5: addl %3,%0\n" \
27780@@ -572,9 +684,9 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from,
27781 {
27782 stac();
27783 if (movsl_is_ok(to, from, n))
27784- __copy_user(to, from, n);
27785+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
27786 else
27787- n = __copy_user_intel(to, from, n);
27788+ n = __generic_copy_to_user_intel(to, from, n);
27789 clac();
27790 return n;
27791 }
27792@@ -598,10 +710,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
27793 {
27794 stac();
27795 if (movsl_is_ok(to, from, n))
27796- __copy_user(to, from, n);
27797+ __copy_user(to, from, n, __copyuser_seg, "", "");
27798 else
27799- n = __copy_user_intel((void __user *)to,
27800- (const void *)from, n);
27801+ n = __generic_copy_from_user_intel(to, from, n);
27802 clac();
27803 return n;
27804 }
27805@@ -632,66 +743,51 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
27806 if (n > 64 && cpu_has_xmm2)
27807 n = __copy_user_intel_nocache(to, from, n);
27808 else
27809- __copy_user(to, from, n);
27810+ __copy_user(to, from, n, __copyuser_seg, "", "");
27811 #else
27812- __copy_user(to, from, n);
27813+ __copy_user(to, from, n, __copyuser_seg, "", "");
27814 #endif
27815 clac();
27816 return n;
27817 }
27818 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
27819
27820-/**
27821- * copy_to_user: - Copy a block of data into user space.
27822- * @to: Destination address, in user space.
27823- * @from: Source address, in kernel space.
27824- * @n: Number of bytes to copy.
27825- *
27826- * Context: User context only. This function may sleep.
27827- *
27828- * Copy data from kernel space to user space.
27829- *
27830- * Returns number of bytes that could not be copied.
27831- * On success, this will be zero.
27832- */
27833-unsigned long
27834-copy_to_user(void __user *to, const void *from, unsigned long n)
27835-{
27836- if (access_ok(VERIFY_WRITE, to, n))
27837- n = __copy_to_user(to, from, n);
27838- return n;
27839-}
27840-EXPORT_SYMBOL(copy_to_user);
27841-
27842-/**
27843- * copy_from_user: - Copy a block of data from user space.
27844- * @to: Destination address, in kernel space.
27845- * @from: Source address, in user space.
27846- * @n: Number of bytes to copy.
27847- *
27848- * Context: User context only. This function may sleep.
27849- *
27850- * Copy data from user space to kernel space.
27851- *
27852- * Returns number of bytes that could not be copied.
27853- * On success, this will be zero.
27854- *
27855- * If some data could not be copied, this function will pad the copied
27856- * data to the requested size using zero bytes.
27857- */
27858-unsigned long
27859-_copy_from_user(void *to, const void __user *from, unsigned long n)
27860-{
27861- if (access_ok(VERIFY_READ, from, n))
27862- n = __copy_from_user(to, from, n);
27863- else
27864- memset(to, 0, n);
27865- return n;
27866-}
27867-EXPORT_SYMBOL(_copy_from_user);
27868-
27869 void copy_from_user_overflow(void)
27870 {
27871 WARN(1, "Buffer overflow detected!\n");
27872 }
27873 EXPORT_SYMBOL(copy_from_user_overflow);
27874+
27875+void copy_to_user_overflow(void)
27876+{
27877+ WARN(1, "Buffer overflow detected!\n");
27878+}
27879+EXPORT_SYMBOL(copy_to_user_overflow);
27880+
27881+#ifdef CONFIG_PAX_MEMORY_UDEREF
27882+void __set_fs(mm_segment_t x)
27883+{
27884+ switch (x.seg) {
27885+ case 0:
27886+ loadsegment(gs, 0);
27887+ break;
27888+ case TASK_SIZE_MAX:
27889+ loadsegment(gs, __USER_DS);
27890+ break;
27891+ case -1UL:
27892+ loadsegment(gs, __KERNEL_DS);
27893+ break;
27894+ default:
27895+ BUG();
27896+ }
27897+ return;
27898+}
27899+EXPORT_SYMBOL(__set_fs);
27900+
27901+void set_fs(mm_segment_t x)
27902+{
27903+ current_thread_info()->addr_limit = x;
27904+ __set_fs(x);
27905+}
27906+EXPORT_SYMBOL(set_fs);
27907+#endif
27908diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
27909index 906fea3..ee8a097 100644
27910--- a/arch/x86/lib/usercopy_64.c
27911+++ b/arch/x86/lib/usercopy_64.c
27912@@ -39,7 +39,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
27913 _ASM_EXTABLE(0b,3b)
27914 _ASM_EXTABLE(1b,2b)
27915 : [size8] "=&c"(size), [dst] "=&D" (__d0)
27916- : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
27917+ : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(____m(addr)),
27918 [zero] "r" (0UL), [eight] "r" (8UL));
27919 clac();
27920 return size;
27921@@ -54,12 +54,11 @@ unsigned long clear_user(void __user *to, unsigned long n)
27922 }
27923 EXPORT_SYMBOL(clear_user);
27924
27925-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
27926+unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
27927 {
27928- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
27929- return copy_user_generic((__force void *)to, (__force void *)from, len);
27930- }
27931- return len;
27932+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len))
27933+ return copy_user_generic((void __force_kernel *)____m(to), (void __force_kernel *)____m(from), len);
27934+ return len;
27935 }
27936 EXPORT_SYMBOL(copy_in_user);
27937
27938@@ -69,7 +68,7 @@ EXPORT_SYMBOL(copy_in_user);
27939 * it is not necessary to optimize tail handling.
27940 */
27941 unsigned long
27942-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
27943+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
27944 {
27945 char c;
27946 unsigned zero_len;
27947@@ -87,3 +86,15 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
27948 clac();
27949 return len;
27950 }
27951+
27952+void copy_from_user_overflow(void)
27953+{
27954+ WARN(1, "Buffer overflow detected!\n");
27955+}
27956+EXPORT_SYMBOL(copy_from_user_overflow);
27957+
27958+void copy_to_user_overflow(void)
27959+{
27960+ WARN(1, "Buffer overflow detected!\n");
27961+}
27962+EXPORT_SYMBOL(copy_to_user_overflow);
27963diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
27964index 903ec1e..c4166b2 100644
27965--- a/arch/x86/mm/extable.c
27966+++ b/arch/x86/mm/extable.c
27967@@ -6,12 +6,24 @@
27968 static inline unsigned long
27969 ex_insn_addr(const struct exception_table_entry *x)
27970 {
27971- return (unsigned long)&x->insn + x->insn;
27972+ unsigned long reloc = 0;
27973+
27974+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
27975+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
27976+#endif
27977+
27978+ return (unsigned long)&x->insn + x->insn + reloc;
27979 }
27980 static inline unsigned long
27981 ex_fixup_addr(const struct exception_table_entry *x)
27982 {
27983- return (unsigned long)&x->fixup + x->fixup;
27984+ unsigned long reloc = 0;
27985+
27986+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
27987+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
27988+#endif
27989+
27990+ return (unsigned long)&x->fixup + x->fixup + reloc;
27991 }
27992
27993 int fixup_exception(struct pt_regs *regs)
27994@@ -20,7 +32,7 @@ int fixup_exception(struct pt_regs *regs)
27995 unsigned long new_ip;
27996
27997 #ifdef CONFIG_PNPBIOS
27998- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
27999+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
28000 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
28001 extern u32 pnp_bios_is_utter_crap;
28002 pnp_bios_is_utter_crap = 1;
28003@@ -145,6 +157,13 @@ void sort_extable(struct exception_table_entry *start,
28004 i += 4;
28005 p->fixup -= i;
28006 i += 4;
28007+
28008+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28009+ BUILD_BUG_ON(!IS_ENABLED(CONFIG_BUILDTIME_EXTABLE_SORT));
28010+ p->insn -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
28011+ p->fixup -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
28012+#endif
28013+
28014 }
28015 }
28016
28017diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
28018index 4f7d793..165a8be 100644
28019--- a/arch/x86/mm/fault.c
28020+++ b/arch/x86/mm/fault.c
28021@@ -13,12 +13,19 @@
28022 #include <linux/perf_event.h> /* perf_sw_event */
28023 #include <linux/hugetlb.h> /* hstate_index_to_shift */
28024 #include <linux/prefetch.h> /* prefetchw */
28025+#include <linux/unistd.h>
28026+#include <linux/compiler.h>
28027
28028 #include <asm/traps.h> /* dotraplinkage, ... */
28029 #include <asm/pgalloc.h> /* pgd_*(), ... */
28030 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
28031 #include <asm/fixmap.h> /* VSYSCALL_START */
28032 #include <asm/context_tracking.h> /* exception_enter(), ... */
28033+#include <asm/tlbflush.h>
28034+
28035+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
28036+#include <asm/stacktrace.h>
28037+#endif
28038
28039 /*
28040 * Page fault error code bits:
28041@@ -56,7 +63,7 @@ static inline int __kprobes notify_page_fault(struct pt_regs *regs)
28042 int ret = 0;
28043
28044 /* kprobe_running() needs smp_processor_id() */
28045- if (kprobes_built_in() && !user_mode_vm(regs)) {
28046+ if (kprobes_built_in() && !user_mode(regs)) {
28047 preempt_disable();
28048 if (kprobe_running() && kprobe_fault_handler(regs, 14))
28049 ret = 1;
28050@@ -117,7 +124,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
28051 return !instr_lo || (instr_lo>>1) == 1;
28052 case 0x00:
28053 /* Prefetch instruction is 0x0F0D or 0x0F18 */
28054- if (probe_kernel_address(instr, opcode))
28055+ if (user_mode(regs)) {
28056+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
28057+ return 0;
28058+ } else if (probe_kernel_address(instr, opcode))
28059 return 0;
28060
28061 *prefetch = (instr_lo == 0xF) &&
28062@@ -151,7 +161,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
28063 while (instr < max_instr) {
28064 unsigned char opcode;
28065
28066- if (probe_kernel_address(instr, opcode))
28067+ if (user_mode(regs)) {
28068+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
28069+ break;
28070+ } else if (probe_kernel_address(instr, opcode))
28071 break;
28072
28073 instr++;
28074@@ -182,6 +195,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
28075 force_sig_info(si_signo, &info, tsk);
28076 }
28077
28078+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
28079+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
28080+#endif
28081+
28082+#ifdef CONFIG_PAX_EMUTRAMP
28083+static int pax_handle_fetch_fault(struct pt_regs *regs);
28084+#endif
28085+
28086+#ifdef CONFIG_PAX_PAGEEXEC
28087+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
28088+{
28089+ pgd_t *pgd;
28090+ pud_t *pud;
28091+ pmd_t *pmd;
28092+
28093+ pgd = pgd_offset(mm, address);
28094+ if (!pgd_present(*pgd))
28095+ return NULL;
28096+ pud = pud_offset(pgd, address);
28097+ if (!pud_present(*pud))
28098+ return NULL;
28099+ pmd = pmd_offset(pud, address);
28100+ if (!pmd_present(*pmd))
28101+ return NULL;
28102+ return pmd;
28103+}
28104+#endif
28105+
28106 DEFINE_SPINLOCK(pgd_lock);
28107 LIST_HEAD(pgd_list);
28108
28109@@ -232,10 +273,22 @@ void vmalloc_sync_all(void)
28110 for (address = VMALLOC_START & PMD_MASK;
28111 address >= TASK_SIZE && address < FIXADDR_TOP;
28112 address += PMD_SIZE) {
28113+
28114+#ifdef CONFIG_PAX_PER_CPU_PGD
28115+ unsigned long cpu;
28116+#else
28117 struct page *page;
28118+#endif
28119
28120 spin_lock(&pgd_lock);
28121+
28122+#ifdef CONFIG_PAX_PER_CPU_PGD
28123+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
28124+ pgd_t *pgd = get_cpu_pgd(cpu);
28125+ pmd_t *ret;
28126+#else
28127 list_for_each_entry(page, &pgd_list, lru) {
28128+ pgd_t *pgd;
28129 spinlock_t *pgt_lock;
28130 pmd_t *ret;
28131
28132@@ -243,8 +296,14 @@ void vmalloc_sync_all(void)
28133 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
28134
28135 spin_lock(pgt_lock);
28136- ret = vmalloc_sync_one(page_address(page), address);
28137+ pgd = page_address(page);
28138+#endif
28139+
28140+ ret = vmalloc_sync_one(pgd, address);
28141+
28142+#ifndef CONFIG_PAX_PER_CPU_PGD
28143 spin_unlock(pgt_lock);
28144+#endif
28145
28146 if (!ret)
28147 break;
28148@@ -278,6 +337,11 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
28149 * an interrupt in the middle of a task switch..
28150 */
28151 pgd_paddr = read_cr3();
28152+
28153+#ifdef CONFIG_PAX_PER_CPU_PGD
28154+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
28155+#endif
28156+
28157 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
28158 if (!pmd_k)
28159 return -1;
28160@@ -373,7 +437,14 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
28161 * happen within a race in page table update. In the later
28162 * case just flush:
28163 */
28164+
28165+#ifdef CONFIG_PAX_PER_CPU_PGD
28166+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
28167+ pgd = pgd_offset_cpu(smp_processor_id(), address);
28168+#else
28169 pgd = pgd_offset(current->active_mm, address);
28170+#endif
28171+
28172 pgd_ref = pgd_offset_k(address);
28173 if (pgd_none(*pgd_ref))
28174 return -1;
28175@@ -543,7 +614,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
28176 static int is_errata100(struct pt_regs *regs, unsigned long address)
28177 {
28178 #ifdef CONFIG_X86_64
28179- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
28180+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
28181 return 1;
28182 #endif
28183 return 0;
28184@@ -570,7 +641,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
28185 }
28186
28187 static const char nx_warning[] = KERN_CRIT
28188-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
28189+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
28190
28191 static void
28192 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
28193@@ -579,15 +650,27 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
28194 if (!oops_may_print())
28195 return;
28196
28197- if (error_code & PF_INSTR) {
28198+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
28199 unsigned int level;
28200
28201 pte_t *pte = lookup_address(address, &level);
28202
28203 if (pte && pte_present(*pte) && !pte_exec(*pte))
28204- printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
28205+ printk(nx_warning, from_kuid_munged(&init_user_ns, current_uid()), current->comm, task_pid_nr(current));
28206 }
28207
28208+#ifdef CONFIG_PAX_KERNEXEC
28209+ if (init_mm.start_code <= address && address < init_mm.end_code) {
28210+ if (current->signal->curr_ip)
28211+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
28212+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
28213+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
28214+ else
28215+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
28216+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
28217+ }
28218+#endif
28219+
28220 printk(KERN_ALERT "BUG: unable to handle kernel ");
28221 if (address < PAGE_SIZE)
28222 printk(KERN_CONT "NULL pointer dereference");
28223@@ -750,6 +833,22 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
28224 return;
28225 }
28226 #endif
28227+
28228+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
28229+ if (pax_is_fetch_fault(regs, error_code, address)) {
28230+
28231+#ifdef CONFIG_PAX_EMUTRAMP
28232+ switch (pax_handle_fetch_fault(regs)) {
28233+ case 2:
28234+ return;
28235+ }
28236+#endif
28237+
28238+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
28239+ do_group_exit(SIGKILL);
28240+ }
28241+#endif
28242+
28243 /* Kernel addresses are always protection faults: */
28244 if (address >= TASK_SIZE)
28245 error_code |= PF_PROT;
28246@@ -835,7 +934,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
28247 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
28248 printk(KERN_ERR
28249 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
28250- tsk->comm, tsk->pid, address);
28251+ tsk->comm, task_pid_nr(tsk), address);
28252 code = BUS_MCEERR_AR;
28253 }
28254 #endif
28255@@ -898,6 +997,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
28256 return 1;
28257 }
28258
28259+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
28260+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
28261+{
28262+ pte_t *pte;
28263+ pmd_t *pmd;
28264+ spinlock_t *ptl;
28265+ unsigned char pte_mask;
28266+
28267+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
28268+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
28269+ return 0;
28270+
28271+ /* PaX: it's our fault, let's handle it if we can */
28272+
28273+ /* PaX: take a look at read faults before acquiring any locks */
28274+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
28275+ /* instruction fetch attempt from a protected page in user mode */
28276+ up_read(&mm->mmap_sem);
28277+
28278+#ifdef CONFIG_PAX_EMUTRAMP
28279+ switch (pax_handle_fetch_fault(regs)) {
28280+ case 2:
28281+ return 1;
28282+ }
28283+#endif
28284+
28285+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
28286+ do_group_exit(SIGKILL);
28287+ }
28288+
28289+ pmd = pax_get_pmd(mm, address);
28290+ if (unlikely(!pmd))
28291+ return 0;
28292+
28293+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
28294+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
28295+ pte_unmap_unlock(pte, ptl);
28296+ return 0;
28297+ }
28298+
28299+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
28300+ /* write attempt to a protected page in user mode */
28301+ pte_unmap_unlock(pte, ptl);
28302+ return 0;
28303+ }
28304+
28305+#ifdef CONFIG_SMP
28306+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
28307+#else
28308+ if (likely(address > get_limit(regs->cs)))
28309+#endif
28310+ {
28311+ set_pte(pte, pte_mkread(*pte));
28312+ __flush_tlb_one(address);
28313+ pte_unmap_unlock(pte, ptl);
28314+ up_read(&mm->mmap_sem);
28315+ return 1;
28316+ }
28317+
28318+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
28319+
28320+ /*
28321+ * PaX: fill DTLB with user rights and retry
28322+ */
28323+ __asm__ __volatile__ (
28324+ "orb %2,(%1)\n"
28325+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
28326+/*
28327+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
28328+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
28329+ * page fault when examined during a TLB load attempt. this is true not only
28330+ * for PTEs holding a non-present entry but also present entries that will
28331+ * raise a page fault (such as those set up by PaX, or the copy-on-write
28332+ * mechanism). in effect it means that we do *not* need to flush the TLBs
28333+ * for our target pages since their PTEs are simply not in the TLBs at all.
28334+
28335+ * the best thing in omitting it is that we gain around 15-20% speed in the
28336+ * fast path of the page fault handler and can get rid of tracing since we
28337+ * can no longer flush unintended entries.
28338+ */
28339+ "invlpg (%0)\n"
28340+#endif
28341+ __copyuser_seg"testb $0,(%0)\n"
28342+ "xorb %3,(%1)\n"
28343+ :
28344+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
28345+ : "memory", "cc");
28346+ pte_unmap_unlock(pte, ptl);
28347+ up_read(&mm->mmap_sem);
28348+ return 1;
28349+}
28350+#endif
28351+
28352 /*
28353 * Handle a spurious fault caused by a stale TLB entry.
28354 *
28355@@ -970,6 +1162,9 @@ int show_unhandled_signals = 1;
28356 static inline int
28357 access_error(unsigned long error_code, struct vm_area_struct *vma)
28358 {
28359+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
28360+ return 1;
28361+
28362 if (error_code & PF_WRITE) {
28363 /* write, present and write, not present: */
28364 if (unlikely(!(vma->vm_flags & VM_WRITE)))
28365@@ -998,7 +1193,7 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs)
28366 if (error_code & PF_USER)
28367 return false;
28368
28369- if (!user_mode_vm(regs) && (regs->flags & X86_EFLAGS_AC))
28370+ if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC))
28371 return false;
28372
28373 return true;
28374@@ -1014,18 +1209,33 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
28375 {
28376 struct vm_area_struct *vma;
28377 struct task_struct *tsk;
28378- unsigned long address;
28379 struct mm_struct *mm;
28380 int fault;
28381 int write = error_code & PF_WRITE;
28382 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
28383 (write ? FAULT_FLAG_WRITE : 0);
28384
28385- tsk = current;
28386- mm = tsk->mm;
28387-
28388 /* Get the faulting address: */
28389- address = read_cr2();
28390+ unsigned long address = read_cr2();
28391+
28392+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
28393+ if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
28394+ if (!search_exception_tables(regs->ip)) {
28395+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
28396+ bad_area_nosemaphore(regs, error_code, address);
28397+ return;
28398+ }
28399+ if (address < PAX_USER_SHADOW_BASE) {
28400+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
28401+ printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
28402+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
28403+ } else
28404+ address -= PAX_USER_SHADOW_BASE;
28405+ }
28406+#endif
28407+
28408+ tsk = current;
28409+ mm = tsk->mm;
28410
28411 /*
28412 * Detect and handle instructions that would cause a page fault for
28413@@ -1086,7 +1296,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
28414 * User-mode registers count as a user access even for any
28415 * potential system fault or CPU buglet:
28416 */
28417- if (user_mode_vm(regs)) {
28418+ if (user_mode(regs)) {
28419 local_irq_enable();
28420 error_code |= PF_USER;
28421 } else {
28422@@ -1148,6 +1358,11 @@ retry:
28423 might_sleep();
28424 }
28425
28426+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
28427+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
28428+ return;
28429+#endif
28430+
28431 vma = find_vma(mm, address);
28432 if (unlikely(!vma)) {
28433 bad_area(regs, error_code, address);
28434@@ -1159,18 +1374,24 @@ retry:
28435 bad_area(regs, error_code, address);
28436 return;
28437 }
28438- if (error_code & PF_USER) {
28439- /*
28440- * Accessing the stack below %sp is always a bug.
28441- * The large cushion allows instructions like enter
28442- * and pusha to work. ("enter $65535, $31" pushes
28443- * 32 pointers and then decrements %sp by 65535.)
28444- */
28445- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
28446- bad_area(regs, error_code, address);
28447- return;
28448- }
28449+ /*
28450+ * Accessing the stack below %sp is always a bug.
28451+ * The large cushion allows instructions like enter
28452+ * and pusha to work. ("enter $65535, $31" pushes
28453+ * 32 pointers and then decrements %sp by 65535.)
28454+ */
28455+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
28456+ bad_area(regs, error_code, address);
28457+ return;
28458 }
28459+
28460+#ifdef CONFIG_PAX_SEGMEXEC
28461+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
28462+ bad_area(regs, error_code, address);
28463+ return;
28464+ }
28465+#endif
28466+
28467 if (unlikely(expand_stack(vma, address))) {
28468 bad_area(regs, error_code, address);
28469 return;
28470@@ -1234,3 +1455,292 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
28471 __do_page_fault(regs, error_code);
28472 exception_exit(regs);
28473 }
28474+
28475+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
28476+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
28477+{
28478+ struct mm_struct *mm = current->mm;
28479+ unsigned long ip = regs->ip;
28480+
28481+ if (v8086_mode(regs))
28482+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
28483+
28484+#ifdef CONFIG_PAX_PAGEEXEC
28485+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
28486+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
28487+ return true;
28488+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
28489+ return true;
28490+ return false;
28491+ }
28492+#endif
28493+
28494+#ifdef CONFIG_PAX_SEGMEXEC
28495+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
28496+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
28497+ return true;
28498+ return false;
28499+ }
28500+#endif
28501+
28502+ return false;
28503+}
28504+#endif
28505+
28506+#ifdef CONFIG_PAX_EMUTRAMP
28507+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
28508+{
28509+ int err;
28510+
28511+ do { /* PaX: libffi trampoline emulation */
28512+ unsigned char mov, jmp;
28513+ unsigned int addr1, addr2;
28514+
28515+#ifdef CONFIG_X86_64
28516+ if ((regs->ip + 9) >> 32)
28517+ break;
28518+#endif
28519+
28520+ err = get_user(mov, (unsigned char __user *)regs->ip);
28521+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
28522+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
28523+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
28524+
28525+ if (err)
28526+ break;
28527+
28528+ if (mov == 0xB8 && jmp == 0xE9) {
28529+ regs->ax = addr1;
28530+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
28531+ return 2;
28532+ }
28533+ } while (0);
28534+
28535+ do { /* PaX: gcc trampoline emulation #1 */
28536+ unsigned char mov1, mov2;
28537+ unsigned short jmp;
28538+ unsigned int addr1, addr2;
28539+
28540+#ifdef CONFIG_X86_64
28541+ if ((regs->ip + 11) >> 32)
28542+ break;
28543+#endif
28544+
28545+ err = get_user(mov1, (unsigned char __user *)regs->ip);
28546+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
28547+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
28548+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
28549+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
28550+
28551+ if (err)
28552+ break;
28553+
28554+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
28555+ regs->cx = addr1;
28556+ regs->ax = addr2;
28557+ regs->ip = addr2;
28558+ return 2;
28559+ }
28560+ } while (0);
28561+
28562+ do { /* PaX: gcc trampoline emulation #2 */
28563+ unsigned char mov, jmp;
28564+ unsigned int addr1, addr2;
28565+
28566+#ifdef CONFIG_X86_64
28567+ if ((regs->ip + 9) >> 32)
28568+ break;
28569+#endif
28570+
28571+ err = get_user(mov, (unsigned char __user *)regs->ip);
28572+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
28573+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
28574+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
28575+
28576+ if (err)
28577+ break;
28578+
28579+ if (mov == 0xB9 && jmp == 0xE9) {
28580+ regs->cx = addr1;
28581+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
28582+ return 2;
28583+ }
28584+ } while (0);
28585+
28586+ return 1; /* PaX in action */
28587+}
28588+
28589+#ifdef CONFIG_X86_64
28590+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
28591+{
28592+ int err;
28593+
28594+ do { /* PaX: libffi trampoline emulation */
28595+ unsigned short mov1, mov2, jmp1;
28596+ unsigned char stcclc, jmp2;
28597+ unsigned long addr1, addr2;
28598+
28599+ err = get_user(mov1, (unsigned short __user *)regs->ip);
28600+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
28601+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
28602+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
28603+ err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
28604+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
28605+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
28606+
28607+ if (err)
28608+ break;
28609+
28610+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
28611+ regs->r11 = addr1;
28612+ regs->r10 = addr2;
28613+ if (stcclc == 0xF8)
28614+ regs->flags &= ~X86_EFLAGS_CF;
28615+ else
28616+ regs->flags |= X86_EFLAGS_CF;
28617+ regs->ip = addr1;
28618+ return 2;
28619+ }
28620+ } while (0);
28621+
28622+ do { /* PaX: gcc trampoline emulation #1 */
28623+ unsigned short mov1, mov2, jmp1;
28624+ unsigned char jmp2;
28625+ unsigned int addr1;
28626+ unsigned long addr2;
28627+
28628+ err = get_user(mov1, (unsigned short __user *)regs->ip);
28629+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
28630+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
28631+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
28632+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
28633+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
28634+
28635+ if (err)
28636+ break;
28637+
28638+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
28639+ regs->r11 = addr1;
28640+ regs->r10 = addr2;
28641+ regs->ip = addr1;
28642+ return 2;
28643+ }
28644+ } while (0);
28645+
28646+ do { /* PaX: gcc trampoline emulation #2 */
28647+ unsigned short mov1, mov2, jmp1;
28648+ unsigned char jmp2;
28649+ unsigned long addr1, addr2;
28650+
28651+ err = get_user(mov1, (unsigned short __user *)regs->ip);
28652+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
28653+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
28654+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
28655+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
28656+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
28657+
28658+ if (err)
28659+ break;
28660+
28661+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
28662+ regs->r11 = addr1;
28663+ regs->r10 = addr2;
28664+ regs->ip = addr1;
28665+ return 2;
28666+ }
28667+ } while (0);
28668+
28669+ return 1; /* PaX in action */
28670+}
28671+#endif
28672+
28673+/*
28674+ * PaX: decide what to do with offenders (regs->ip = fault address)
28675+ *
28676+ * returns 1 when task should be killed
28677+ * 2 when gcc trampoline was detected
28678+ */
28679+static int pax_handle_fetch_fault(struct pt_regs *regs)
28680+{
28681+ if (v8086_mode(regs))
28682+ return 1;
28683+
28684+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
28685+ return 1;
28686+
28687+#ifdef CONFIG_X86_32
28688+ return pax_handle_fetch_fault_32(regs);
28689+#else
28690+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
28691+ return pax_handle_fetch_fault_32(regs);
28692+ else
28693+ return pax_handle_fetch_fault_64(regs);
28694+#endif
28695+}
28696+#endif
28697+
28698+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
28699+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
28700+{
28701+ long i;
28702+
28703+ printk(KERN_ERR "PAX: bytes at PC: ");
28704+ for (i = 0; i < 20; i++) {
28705+ unsigned char c;
28706+ if (get_user(c, (unsigned char __force_user *)pc+i))
28707+ printk(KERN_CONT "?? ");
28708+ else
28709+ printk(KERN_CONT "%02x ", c);
28710+ }
28711+ printk("\n");
28712+
28713+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
28714+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
28715+ unsigned long c;
28716+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
28717+#ifdef CONFIG_X86_32
28718+ printk(KERN_CONT "???????? ");
28719+#else
28720+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
28721+ printk(KERN_CONT "???????? ???????? ");
28722+ else
28723+ printk(KERN_CONT "???????????????? ");
28724+#endif
28725+ } else {
28726+#ifdef CONFIG_X86_64
28727+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
28728+ printk(KERN_CONT "%08x ", (unsigned int)c);
28729+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
28730+ } else
28731+#endif
28732+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
28733+ }
28734+ }
28735+ printk("\n");
28736+}
28737+#endif
28738+
28739+/**
28740+ * probe_kernel_write(): safely attempt to write to a location
28741+ * @dst: address to write to
28742+ * @src: pointer to the data that shall be written
28743+ * @size: size of the data chunk
28744+ *
28745+ * Safely write to address @dst from the buffer at @src. If a kernel fault
28746+ * happens, handle that and return -EFAULT.
28747+ */
28748+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
28749+{
28750+ long ret;
28751+ mm_segment_t old_fs = get_fs();
28752+
28753+ set_fs(KERNEL_DS);
28754+ pagefault_disable();
28755+ pax_open_kernel();
28756+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
28757+ pax_close_kernel();
28758+ pagefault_enable();
28759+ set_fs(old_fs);
28760+
28761+ return ret ? -EFAULT : 0;
28762+}
28763diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
28764index dd74e46..7d26398 100644
28765--- a/arch/x86/mm/gup.c
28766+++ b/arch/x86/mm/gup.c
28767@@ -255,7 +255,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
28768 addr = start;
28769 len = (unsigned long) nr_pages << PAGE_SHIFT;
28770 end = start + len;
28771- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
28772+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
28773 (void __user *)start, len)))
28774 return 0;
28775
28776diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
28777index 6f31ee5..8ee4164 100644
28778--- a/arch/x86/mm/highmem_32.c
28779+++ b/arch/x86/mm/highmem_32.c
28780@@ -44,7 +44,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
28781 idx = type + KM_TYPE_NR*smp_processor_id();
28782 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
28783 BUG_ON(!pte_none(*(kmap_pte-idx)));
28784+
28785+ pax_open_kernel();
28786 set_pte(kmap_pte-idx, mk_pte(page, prot));
28787+ pax_close_kernel();
28788+
28789 arch_flush_lazy_mmu_mode();
28790
28791 return (void *)vaddr;
28792diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
28793index ae1aa71..56316db 100644
28794--- a/arch/x86/mm/hugetlbpage.c
28795+++ b/arch/x86/mm/hugetlbpage.c
28796@@ -279,6 +279,12 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
28797 info.flags = 0;
28798 info.length = len;
28799 info.low_limit = TASK_UNMAPPED_BASE;
28800+
28801+#ifdef CONFIG_PAX_RANDMMAP
28802+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
28803+ info.low_limit += current->mm->delta_mmap;
28804+#endif
28805+
28806 info.high_limit = TASK_SIZE;
28807 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
28808 info.align_offset = 0;
28809@@ -311,6 +317,12 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
28810 VM_BUG_ON(addr != -ENOMEM);
28811 info.flags = 0;
28812 info.low_limit = TASK_UNMAPPED_BASE;
28813+
28814+#ifdef CONFIG_PAX_RANDMMAP
28815+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
28816+ info.low_limit += current->mm->delta_mmap;
28817+#endif
28818+
28819 info.high_limit = TASK_SIZE;
28820 addr = vm_unmapped_area(&info);
28821 }
28822@@ -325,10 +337,20 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
28823 struct hstate *h = hstate_file(file);
28824 struct mm_struct *mm = current->mm;
28825 struct vm_area_struct *vma;
28826+ unsigned long pax_task_size = TASK_SIZE;
28827+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
28828
28829 if (len & ~huge_page_mask(h))
28830 return -EINVAL;
28831- if (len > TASK_SIZE)
28832+
28833+#ifdef CONFIG_PAX_SEGMEXEC
28834+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
28835+ pax_task_size = SEGMEXEC_TASK_SIZE;
28836+#endif
28837+
28838+ pax_task_size -= PAGE_SIZE;
28839+
28840+ if (len > pax_task_size)
28841 return -ENOMEM;
28842
28843 if (flags & MAP_FIXED) {
28844@@ -337,11 +359,14 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
28845 return addr;
28846 }
28847
28848+#ifdef CONFIG_PAX_RANDMMAP
28849+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
28850+#endif
28851+
28852 if (addr) {
28853 addr = ALIGN(addr, huge_page_size(h));
28854 vma = find_vma(mm, addr);
28855- if (TASK_SIZE - len >= addr &&
28856- (!vma || addr + len <= vma->vm_start))
28857+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
28858 return addr;
28859 }
28860 if (mm->get_unmapped_area == arch_get_unmapped_area)
28861diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
28862index d7aea41..0fc945b 100644
28863--- a/arch/x86/mm/init.c
28864+++ b/arch/x86/mm/init.c
28865@@ -4,6 +4,7 @@
28866 #include <linux/swap.h>
28867 #include <linux/memblock.h>
28868 #include <linux/bootmem.h> /* for max_low_pfn */
28869+#include <linux/tboot.h>
28870
28871 #include <asm/cacheflush.h>
28872 #include <asm/e820.h>
28873@@ -16,6 +17,8 @@
28874 #include <asm/tlb.h>
28875 #include <asm/proto.h>
28876 #include <asm/dma.h> /* for MAX_DMA_PFN */
28877+#include <asm/desc.h>
28878+#include <asm/bios_ebda.h>
28879
28880 unsigned long __initdata pgt_buf_start;
28881 unsigned long __meminitdata pgt_buf_end;
28882@@ -44,7 +47,7 @@ static void __init find_early_table_space(struct map_range *mr, int nr_range)
28883 {
28884 int i;
28885 unsigned long puds = 0, pmds = 0, ptes = 0, tables;
28886- unsigned long start = 0, good_end;
28887+ unsigned long start = 0x100000, good_end;
28888 phys_addr_t base;
28889
28890 for (i = 0; i < nr_range; i++) {
28891@@ -321,10 +324,40 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
28892 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
28893 * mmio resources as well as potential bios/acpi data regions.
28894 */
28895+
28896+#ifdef CONFIG_GRKERNSEC_KMEM
28897+static unsigned int ebda_start __read_only;
28898+static unsigned int ebda_end __read_only;
28899+#endif
28900+
28901 int devmem_is_allowed(unsigned long pagenr)
28902 {
28903- if (pagenr < 256)
28904+#ifdef CONFIG_GRKERNSEC_KMEM
28905+ /* allow BDA */
28906+ if (!pagenr)
28907 return 1;
28908+ /* allow EBDA */
28909+ if (pagenr >= ebda_start && pagenr < ebda_end)
28910+ return 1;
28911+ /* if tboot is in use, allow access to its hardcoded serial log range */
28912+ if (tboot_enabled() && ((0x60000 >> PAGE_SHIFT) <= pagenr) && (pagenr < (0x68000 >> PAGE_SHIFT)))
28913+ return 1;
28914+#else
28915+ if (!pagenr)
28916+ return 1;
28917+#ifdef CONFIG_VM86
28918+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
28919+ return 1;
28920+#endif
28921+#endif
28922+
28923+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
28924+ return 1;
28925+#ifdef CONFIG_GRKERNSEC_KMEM
28926+ /* throw out everything else below 1MB */
28927+ if (pagenr <= 256)
28928+ return 0;
28929+#endif
28930 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
28931 return 0;
28932 if (!page_is_ram(pagenr))
28933@@ -381,8 +414,117 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
28934 #endif
28935 }
28936
28937+#ifdef CONFIG_GRKERNSEC_KMEM
28938+static inline void gr_init_ebda(void)
28939+{
28940+ unsigned int ebda_addr;
28941+ unsigned int ebda_size = 0;
28942+
28943+ ebda_addr = get_bios_ebda();
28944+ if (ebda_addr) {
28945+ ebda_size = *(unsigned char *)phys_to_virt(ebda_addr);
28946+ ebda_size <<= 10;
28947+ }
28948+ if (ebda_addr && ebda_size) {
28949+ ebda_start = ebda_addr >> PAGE_SHIFT;
28950+ ebda_end = min((unsigned int)PAGE_ALIGN(ebda_addr + ebda_size), (unsigned int)0xa0000) >> PAGE_SHIFT;
28951+ } else {
28952+ ebda_start = 0x9f000 >> PAGE_SHIFT;
28953+ ebda_end = 0xa0000 >> PAGE_SHIFT;
28954+ }
28955+}
28956+#else
28957+static inline void gr_init_ebda(void) { }
28958+#endif
28959+
28960 void free_initmem(void)
28961 {
28962+#ifdef CONFIG_PAX_KERNEXEC
28963+#ifdef CONFIG_X86_32
28964+ /* PaX: limit KERNEL_CS to actual size */
28965+ unsigned long addr, limit;
28966+ struct desc_struct d;
28967+ int cpu;
28968+#else
28969+ pgd_t *pgd;
28970+ pud_t *pud;
28971+ pmd_t *pmd;
28972+ unsigned long addr, end;
28973+#endif
28974+#endif
28975+
28976+ gr_init_ebda();
28977+
28978+#ifdef CONFIG_PAX_KERNEXEC
28979+#ifdef CONFIG_X86_32
28980+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
28981+ limit = (limit - 1UL) >> PAGE_SHIFT;
28982+
28983+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
28984+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
28985+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
28986+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
28987+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEXEC_KERNEL_CS, &d, DESCTYPE_S);
28988+ }
28989+
28990+ /* PaX: make KERNEL_CS read-only */
28991+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
28992+ if (!paravirt_enabled())
28993+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
28994+/*
28995+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
28996+ pgd = pgd_offset_k(addr);
28997+ pud = pud_offset(pgd, addr);
28998+ pmd = pmd_offset(pud, addr);
28999+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
29000+ }
29001+*/
29002+#ifdef CONFIG_X86_PAE
29003+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
29004+/*
29005+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
29006+ pgd = pgd_offset_k(addr);
29007+ pud = pud_offset(pgd, addr);
29008+ pmd = pmd_offset(pud, addr);
29009+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
29010+ }
29011+*/
29012+#endif
29013+
29014+#ifdef CONFIG_MODULES
29015+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
29016+#endif
29017+
29018+#else
29019+ /* PaX: make kernel code/rodata read-only, rest non-executable */
29020+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
29021+ pgd = pgd_offset_k(addr);
29022+ pud = pud_offset(pgd, addr);
29023+ pmd = pmd_offset(pud, addr);
29024+ if (!pmd_present(*pmd))
29025+ continue;
29026+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
29027+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
29028+ else
29029+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
29030+ }
29031+
29032+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
29033+ end = addr + KERNEL_IMAGE_SIZE;
29034+ for (; addr < end; addr += PMD_SIZE) {
29035+ pgd = pgd_offset_k(addr);
29036+ pud = pud_offset(pgd, addr);
29037+ pmd = pmd_offset(pud, addr);
29038+ if (!pmd_present(*pmd))
29039+ continue;
29040+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
29041+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
29042+ }
29043+#endif
29044+
29045+ flush_tlb_all();
29046+#endif
29047+
29048 free_init_pages("unused kernel memory",
29049 (unsigned long)(&__init_begin),
29050 (unsigned long)(&__init_end));
29051diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
29052index 745d66b..56bf568 100644
29053--- a/arch/x86/mm/init_32.c
29054+++ b/arch/x86/mm/init_32.c
29055@@ -73,36 +73,6 @@ static __init void *alloc_low_page(void)
29056 }
29057
29058 /*
29059- * Creates a middle page table and puts a pointer to it in the
29060- * given global directory entry. This only returns the gd entry
29061- * in non-PAE compilation mode, since the middle layer is folded.
29062- */
29063-static pmd_t * __init one_md_table_init(pgd_t *pgd)
29064-{
29065- pud_t *pud;
29066- pmd_t *pmd_table;
29067-
29068-#ifdef CONFIG_X86_PAE
29069- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
29070- if (after_bootmem)
29071- pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
29072- else
29073- pmd_table = (pmd_t *)alloc_low_page();
29074- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
29075- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
29076- pud = pud_offset(pgd, 0);
29077- BUG_ON(pmd_table != pmd_offset(pud, 0));
29078-
29079- return pmd_table;
29080- }
29081-#endif
29082- pud = pud_offset(pgd, 0);
29083- pmd_table = pmd_offset(pud, 0);
29084-
29085- return pmd_table;
29086-}
29087-
29088-/*
29089 * Create a page table and place a pointer to it in a middle page
29090 * directory entry:
29091 */
29092@@ -122,13 +92,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
29093 page_table = (pte_t *)alloc_low_page();
29094
29095 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
29096+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
29097+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
29098+#else
29099 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
29100+#endif
29101 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
29102 }
29103
29104 return pte_offset_kernel(pmd, 0);
29105 }
29106
29107+static pmd_t * __init one_md_table_init(pgd_t *pgd)
29108+{
29109+ pud_t *pud;
29110+ pmd_t *pmd_table;
29111+
29112+ pud = pud_offset(pgd, 0);
29113+ pmd_table = pmd_offset(pud, 0);
29114+
29115+ return pmd_table;
29116+}
29117+
29118 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
29119 {
29120 int pgd_idx = pgd_index(vaddr);
29121@@ -202,6 +187,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
29122 int pgd_idx, pmd_idx;
29123 unsigned long vaddr;
29124 pgd_t *pgd;
29125+ pud_t *pud;
29126 pmd_t *pmd;
29127 pte_t *pte = NULL;
29128
29129@@ -211,8 +197,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
29130 pgd = pgd_base + pgd_idx;
29131
29132 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
29133- pmd = one_md_table_init(pgd);
29134- pmd = pmd + pmd_index(vaddr);
29135+ pud = pud_offset(pgd, vaddr);
29136+ pmd = pmd_offset(pud, vaddr);
29137+
29138+#ifdef CONFIG_X86_PAE
29139+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
29140+#endif
29141+
29142 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
29143 pmd++, pmd_idx++) {
29144 pte = page_table_kmap_check(one_page_table_init(pmd),
29145@@ -224,11 +215,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
29146 }
29147 }
29148
29149-static inline int is_kernel_text(unsigned long addr)
29150+static inline int is_kernel_text(unsigned long start, unsigned long end)
29151 {
29152- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
29153- return 1;
29154- return 0;
29155+ if ((start > ktla_ktva((unsigned long)_etext) ||
29156+ end <= ktla_ktva((unsigned long)_stext)) &&
29157+ (start > ktla_ktva((unsigned long)_einittext) ||
29158+ end <= ktla_ktva((unsigned long)_sinittext)) &&
29159+
29160+#ifdef CONFIG_ACPI_SLEEP
29161+ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
29162+#endif
29163+
29164+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
29165+ return 0;
29166+ return 1;
29167 }
29168
29169 /*
29170@@ -245,9 +245,10 @@ kernel_physical_mapping_init(unsigned long start,
29171 unsigned long last_map_addr = end;
29172 unsigned long start_pfn, end_pfn;
29173 pgd_t *pgd_base = swapper_pg_dir;
29174- int pgd_idx, pmd_idx, pte_ofs;
29175+ unsigned int pgd_idx, pmd_idx, pte_ofs;
29176 unsigned long pfn;
29177 pgd_t *pgd;
29178+ pud_t *pud;
29179 pmd_t *pmd;
29180 pte_t *pte;
29181 unsigned pages_2m, pages_4k;
29182@@ -280,8 +281,13 @@ repeat:
29183 pfn = start_pfn;
29184 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
29185 pgd = pgd_base + pgd_idx;
29186- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
29187- pmd = one_md_table_init(pgd);
29188+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
29189+ pud = pud_offset(pgd, 0);
29190+ pmd = pmd_offset(pud, 0);
29191+
29192+#ifdef CONFIG_X86_PAE
29193+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
29194+#endif
29195
29196 if (pfn >= end_pfn)
29197 continue;
29198@@ -293,14 +299,13 @@ repeat:
29199 #endif
29200 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
29201 pmd++, pmd_idx++) {
29202- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
29203+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
29204
29205 /*
29206 * Map with big pages if possible, otherwise
29207 * create normal page tables:
29208 */
29209 if (use_pse) {
29210- unsigned int addr2;
29211 pgprot_t prot = PAGE_KERNEL_LARGE;
29212 /*
29213 * first pass will use the same initial
29214@@ -310,11 +315,7 @@ repeat:
29215 __pgprot(PTE_IDENT_ATTR |
29216 _PAGE_PSE);
29217
29218- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
29219- PAGE_OFFSET + PAGE_SIZE-1;
29220-
29221- if (is_kernel_text(addr) ||
29222- is_kernel_text(addr2))
29223+ if (is_kernel_text(address, address + PMD_SIZE))
29224 prot = PAGE_KERNEL_LARGE_EXEC;
29225
29226 pages_2m++;
29227@@ -331,7 +332,7 @@ repeat:
29228 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
29229 pte += pte_ofs;
29230 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
29231- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
29232+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
29233 pgprot_t prot = PAGE_KERNEL;
29234 /*
29235 * first pass will use the same initial
29236@@ -339,7 +340,7 @@ repeat:
29237 */
29238 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
29239
29240- if (is_kernel_text(addr))
29241+ if (is_kernel_text(address, address + PAGE_SIZE))
29242 prot = PAGE_KERNEL_EXEC;
29243
29244 pages_4k++;
29245@@ -465,7 +466,7 @@ void __init native_pagetable_init(void)
29246
29247 pud = pud_offset(pgd, va);
29248 pmd = pmd_offset(pud, va);
29249- if (!pmd_present(*pmd))
29250+ if (!pmd_present(*pmd) || pmd_huge(*pmd))
29251 break;
29252
29253 pte = pte_offset_kernel(pmd, va);
29254@@ -514,12 +515,10 @@ void __init early_ioremap_page_table_range_init(void)
29255
29256 static void __init pagetable_init(void)
29257 {
29258- pgd_t *pgd_base = swapper_pg_dir;
29259-
29260- permanent_kmaps_init(pgd_base);
29261+ permanent_kmaps_init(swapper_pg_dir);
29262 }
29263
29264-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
29265+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
29266 EXPORT_SYMBOL_GPL(__supported_pte_mask);
29267
29268 /* user-defined highmem size */
29269@@ -728,6 +727,12 @@ void __init mem_init(void)
29270
29271 pci_iommu_alloc();
29272
29273+#ifdef CONFIG_PAX_PER_CPU_PGD
29274+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
29275+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
29276+ KERNEL_PGD_PTRS);
29277+#endif
29278+
29279 #ifdef CONFIG_FLATMEM
29280 BUG_ON(!mem_map);
29281 #endif
29282@@ -754,7 +759,7 @@ void __init mem_init(void)
29283 reservedpages++;
29284
29285 codesize = (unsigned long) &_etext - (unsigned long) &_text;
29286- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
29287+ datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
29288 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
29289
29290 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
29291@@ -795,10 +800,10 @@ void __init mem_init(void)
29292 ((unsigned long)&__init_end -
29293 (unsigned long)&__init_begin) >> 10,
29294
29295- (unsigned long)&_etext, (unsigned long)&_edata,
29296- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
29297+ (unsigned long)&_sdata, (unsigned long)&_edata,
29298+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
29299
29300- (unsigned long)&_text, (unsigned long)&_etext,
29301+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
29302 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
29303
29304 /*
29305@@ -876,6 +881,7 @@ void set_kernel_text_rw(void)
29306 if (!kernel_set_to_readonly)
29307 return;
29308
29309+ start = ktla_ktva(start);
29310 pr_debug("Set kernel text: %lx - %lx for read write\n",
29311 start, start+size);
29312
29313@@ -890,6 +896,7 @@ void set_kernel_text_ro(void)
29314 if (!kernel_set_to_readonly)
29315 return;
29316
29317+ start = ktla_ktva(start);
29318 pr_debug("Set kernel text: %lx - %lx for read only\n",
29319 start, start+size);
29320
29321@@ -918,6 +925,7 @@ void mark_rodata_ro(void)
29322 unsigned long start = PFN_ALIGN(_text);
29323 unsigned long size = PFN_ALIGN(_etext) - start;
29324
29325+ start = ktla_ktva(start);
29326 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
29327 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
29328 size >> 10);
29329diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
29330index 75c9a6a..498d677 100644
29331--- a/arch/x86/mm/init_64.c
29332+++ b/arch/x86/mm/init_64.c
29333@@ -74,7 +74,7 @@ early_param("gbpages", parse_direct_gbpages_on);
29334 * around without checking the pgd every time.
29335 */
29336
29337-pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
29338+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
29339 EXPORT_SYMBOL_GPL(__supported_pte_mask);
29340
29341 int force_personality32;
29342@@ -107,12 +107,22 @@ void sync_global_pgds(unsigned long start, unsigned long end)
29343
29344 for (address = start; address <= end; address += PGDIR_SIZE) {
29345 const pgd_t *pgd_ref = pgd_offset_k(address);
29346+
29347+#ifdef CONFIG_PAX_PER_CPU_PGD
29348+ unsigned long cpu;
29349+#else
29350 struct page *page;
29351+#endif
29352
29353 if (pgd_none(*pgd_ref))
29354 continue;
29355
29356 spin_lock(&pgd_lock);
29357+
29358+#ifdef CONFIG_PAX_PER_CPU_PGD
29359+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
29360+ pgd_t *pgd = pgd_offset_cpu(cpu, address);
29361+#else
29362 list_for_each_entry(page, &pgd_list, lru) {
29363 pgd_t *pgd;
29364 spinlock_t *pgt_lock;
29365@@ -121,6 +131,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
29366 /* the pgt_lock only for Xen */
29367 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
29368 spin_lock(pgt_lock);
29369+#endif
29370
29371 if (pgd_none(*pgd))
29372 set_pgd(pgd, *pgd_ref);
29373@@ -128,7 +139,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
29374 BUG_ON(pgd_page_vaddr(*pgd)
29375 != pgd_page_vaddr(*pgd_ref));
29376
29377+#ifndef CONFIG_PAX_PER_CPU_PGD
29378 spin_unlock(pgt_lock);
29379+#endif
29380+
29381 }
29382 spin_unlock(&pgd_lock);
29383 }
29384@@ -161,7 +175,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
29385 {
29386 if (pgd_none(*pgd)) {
29387 pud_t *pud = (pud_t *)spp_getpage();
29388- pgd_populate(&init_mm, pgd, pud);
29389+ pgd_populate_kernel(&init_mm, pgd, pud);
29390 if (pud != pud_offset(pgd, 0))
29391 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
29392 pud, pud_offset(pgd, 0));
29393@@ -173,7 +187,7 @@ static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
29394 {
29395 if (pud_none(*pud)) {
29396 pmd_t *pmd = (pmd_t *) spp_getpage();
29397- pud_populate(&init_mm, pud, pmd);
29398+ pud_populate_kernel(&init_mm, pud, pmd);
29399 if (pmd != pmd_offset(pud, 0))
29400 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
29401 pmd, pmd_offset(pud, 0));
29402@@ -202,7 +216,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
29403 pmd = fill_pmd(pud, vaddr);
29404 pte = fill_pte(pmd, vaddr);
29405
29406+ pax_open_kernel();
29407 set_pte(pte, new_pte);
29408+ pax_close_kernel();
29409
29410 /*
29411 * It's enough to flush this one mapping.
29412@@ -261,14 +277,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
29413 pgd = pgd_offset_k((unsigned long)__va(phys));
29414 if (pgd_none(*pgd)) {
29415 pud = (pud_t *) spp_getpage();
29416- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
29417- _PAGE_USER));
29418+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
29419 }
29420 pud = pud_offset(pgd, (unsigned long)__va(phys));
29421 if (pud_none(*pud)) {
29422 pmd = (pmd_t *) spp_getpage();
29423- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
29424- _PAGE_USER));
29425+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
29426 }
29427 pmd = pmd_offset(pud, phys);
29428 BUG_ON(!pmd_none(*pmd));
29429@@ -329,7 +343,7 @@ static __ref void *alloc_low_page(unsigned long *phys)
29430 if (pfn >= pgt_buf_top)
29431 panic("alloc_low_page: ran out of memory");
29432
29433- adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
29434+ adr = (void __force_kernel *)early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
29435 clear_page(adr);
29436 *phys = pfn * PAGE_SIZE;
29437 return adr;
29438@@ -345,7 +359,7 @@ static __ref void *map_low_page(void *virt)
29439
29440 phys = __pa(virt);
29441 left = phys & (PAGE_SIZE - 1);
29442- adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE);
29443+ adr = (void __force_kernel *)early_memremap(phys & PAGE_MASK, PAGE_SIZE);
29444 adr = (void *)(((unsigned long)adr) | left);
29445
29446 return adr;
29447@@ -553,7 +567,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
29448 unmap_low_page(pmd);
29449
29450 spin_lock(&init_mm.page_table_lock);
29451- pud_populate(&init_mm, pud, __va(pmd_phys));
29452+ pud_populate_kernel(&init_mm, pud, __va(pmd_phys));
29453 spin_unlock(&init_mm.page_table_lock);
29454 }
29455 __flush_tlb_all();
29456@@ -599,7 +613,7 @@ kernel_physical_mapping_init(unsigned long start,
29457 unmap_low_page(pud);
29458
29459 spin_lock(&init_mm.page_table_lock);
29460- pgd_populate(&init_mm, pgd, __va(pud_phys));
29461+ pgd_populate_kernel(&init_mm, pgd, __va(pud_phys));
29462 spin_unlock(&init_mm.page_table_lock);
29463 pgd_changed = true;
29464 }
29465@@ -693,6 +707,12 @@ void __init mem_init(void)
29466
29467 pci_iommu_alloc();
29468
29469+#ifdef CONFIG_PAX_PER_CPU_PGD
29470+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
29471+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
29472+ KERNEL_PGD_PTRS);
29473+#endif
29474+
29475 /* clear_bss() already clear the empty_zero_page */
29476
29477 reservedpages = 0;
29478@@ -856,8 +876,8 @@ int kern_addr_valid(unsigned long addr)
29479 static struct vm_area_struct gate_vma = {
29480 .vm_start = VSYSCALL_START,
29481 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
29482- .vm_page_prot = PAGE_READONLY_EXEC,
29483- .vm_flags = VM_READ | VM_EXEC
29484+ .vm_page_prot = PAGE_READONLY,
29485+ .vm_flags = VM_READ
29486 };
29487
29488 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
29489@@ -891,7 +911,7 @@ int in_gate_area_no_mm(unsigned long addr)
29490
29491 const char *arch_vma_name(struct vm_area_struct *vma)
29492 {
29493- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
29494+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
29495 return "[vdso]";
29496 if (vma == &gate_vma)
29497 return "[vsyscall]";
29498diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
29499index 7b179b4..6bd17777 100644
29500--- a/arch/x86/mm/iomap_32.c
29501+++ b/arch/x86/mm/iomap_32.c
29502@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
29503 type = kmap_atomic_idx_push();
29504 idx = type + KM_TYPE_NR * smp_processor_id();
29505 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
29506+
29507+ pax_open_kernel();
29508 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
29509+ pax_close_kernel();
29510+
29511 arch_flush_lazy_mmu_mode();
29512
29513 return (void *)vaddr;
29514diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
29515index 78fe3f1..73b95e2 100644
29516--- a/arch/x86/mm/ioremap.c
29517+++ b/arch/x86/mm/ioremap.c
29518@@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
29519 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
29520 int is_ram = page_is_ram(pfn);
29521
29522- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
29523+ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
29524 return NULL;
29525 WARN_ON_ONCE(is_ram);
29526 }
29527@@ -256,7 +256,7 @@ EXPORT_SYMBOL(ioremap_prot);
29528 *
29529 * Caller must ensure there is only one unmapping for the same pointer.
29530 */
29531-void iounmap(volatile void __iomem *addr)
29532+void iounmap(const volatile void __iomem *addr)
29533 {
29534 struct vm_struct *p, *o;
29535
29536@@ -315,6 +315,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
29537
29538 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
29539 if (page_is_ram(start >> PAGE_SHIFT))
29540+#ifdef CONFIG_HIGHMEM
29541+ if ((start >> PAGE_SHIFT) < max_low_pfn)
29542+#endif
29543 return __va(phys);
29544
29545 addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
29546@@ -327,6 +330,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
29547 void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
29548 {
29549 if (page_is_ram(phys >> PAGE_SHIFT))
29550+#ifdef CONFIG_HIGHMEM
29551+ if ((phys >> PAGE_SHIFT) < max_low_pfn)
29552+#endif
29553 return;
29554
29555 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
29556@@ -344,7 +350,7 @@ static int __init early_ioremap_debug_setup(char *str)
29557 early_param("early_ioremap_debug", early_ioremap_debug_setup);
29558
29559 static __initdata int after_paging_init;
29560-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
29561+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
29562
29563 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
29564 {
29565@@ -381,8 +387,7 @@ void __init early_ioremap_init(void)
29566 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
29567
29568 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
29569- memset(bm_pte, 0, sizeof(bm_pte));
29570- pmd_populate_kernel(&init_mm, pmd, bm_pte);
29571+ pmd_populate_user(&init_mm, pmd, bm_pte);
29572
29573 /*
29574 * The boot-ioremap range spans multiple pmds, for which
29575diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
29576index d87dd6d..bf3fa66 100644
29577--- a/arch/x86/mm/kmemcheck/kmemcheck.c
29578+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
29579@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
29580 * memory (e.g. tracked pages)? For now, we need this to avoid
29581 * invoking kmemcheck for PnP BIOS calls.
29582 */
29583- if (regs->flags & X86_VM_MASK)
29584+ if (v8086_mode(regs))
29585 return false;
29586- if (regs->cs != __KERNEL_CS)
29587+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
29588 return false;
29589
29590 pte = kmemcheck_pte_lookup(address);
29591diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
29592index 845df68..1d8d29f 100644
29593--- a/arch/x86/mm/mmap.c
29594+++ b/arch/x86/mm/mmap.c
29595@@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size(void)
29596 * Leave an at least ~128 MB hole with possible stack randomization.
29597 */
29598 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
29599-#define MAX_GAP (TASK_SIZE/6*5)
29600+#define MAX_GAP (pax_task_size/6*5)
29601
29602 static int mmap_is_legacy(void)
29603 {
29604@@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
29605 return rnd << PAGE_SHIFT;
29606 }
29607
29608-static unsigned long mmap_base(void)
29609+static unsigned long mmap_base(struct mm_struct *mm)
29610 {
29611 unsigned long gap = rlimit(RLIMIT_STACK);
29612+ unsigned long pax_task_size = TASK_SIZE;
29613+
29614+#ifdef CONFIG_PAX_SEGMEXEC
29615+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
29616+ pax_task_size = SEGMEXEC_TASK_SIZE;
29617+#endif
29618
29619 if (gap < MIN_GAP)
29620 gap = MIN_GAP;
29621 else if (gap > MAX_GAP)
29622 gap = MAX_GAP;
29623
29624- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
29625+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
29626 }
29627
29628 /*
29629 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
29630 * does, but not when emulating X86_32
29631 */
29632-static unsigned long mmap_legacy_base(void)
29633+static unsigned long mmap_legacy_base(struct mm_struct *mm)
29634 {
29635- if (mmap_is_ia32())
29636+ if (mmap_is_ia32()) {
29637+
29638+#ifdef CONFIG_PAX_SEGMEXEC
29639+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
29640+ return SEGMEXEC_TASK_UNMAPPED_BASE;
29641+ else
29642+#endif
29643+
29644 return TASK_UNMAPPED_BASE;
29645- else
29646+ } else
29647 return TASK_UNMAPPED_BASE + mmap_rnd();
29648 }
29649
29650@@ -113,11 +126,23 @@ static unsigned long mmap_legacy_base(void)
29651 void arch_pick_mmap_layout(struct mm_struct *mm)
29652 {
29653 if (mmap_is_legacy()) {
29654- mm->mmap_base = mmap_legacy_base();
29655+ mm->mmap_base = mmap_legacy_base(mm);
29656+
29657+#ifdef CONFIG_PAX_RANDMMAP
29658+ if (mm->pax_flags & MF_PAX_RANDMMAP)
29659+ mm->mmap_base += mm->delta_mmap;
29660+#endif
29661+
29662 mm->get_unmapped_area = arch_get_unmapped_area;
29663 mm->unmap_area = arch_unmap_area;
29664 } else {
29665- mm->mmap_base = mmap_base();
29666+ mm->mmap_base = mmap_base(mm);
29667+
29668+#ifdef CONFIG_PAX_RANDMMAP
29669+ if (mm->pax_flags & MF_PAX_RANDMMAP)
29670+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
29671+#endif
29672+
29673 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
29674 mm->unmap_area = arch_unmap_area_topdown;
29675 }
29676diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
29677index dc0b727..f612039 100644
29678--- a/arch/x86/mm/mmio-mod.c
29679+++ b/arch/x86/mm/mmio-mod.c
29680@@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
29681 break;
29682 default:
29683 {
29684- unsigned char *ip = (unsigned char *)instptr;
29685+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
29686 my_trace->opcode = MMIO_UNKNOWN_OP;
29687 my_trace->width = 0;
29688 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
29689@@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
29690 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
29691 void __iomem *addr)
29692 {
29693- static atomic_t next_id;
29694+ static atomic_unchecked_t next_id;
29695 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
29696 /* These are page-unaligned. */
29697 struct mmiotrace_map map = {
29698@@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
29699 .private = trace
29700 },
29701 .phys = offset,
29702- .id = atomic_inc_return(&next_id)
29703+ .id = atomic_inc_return_unchecked(&next_id)
29704 };
29705 map.map_id = trace->id;
29706
29707@@ -290,7 +290,7 @@ void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
29708 ioremap_trace_core(offset, size, addr);
29709 }
29710
29711-static void iounmap_trace_core(volatile void __iomem *addr)
29712+static void iounmap_trace_core(const volatile void __iomem *addr)
29713 {
29714 struct mmiotrace_map map = {
29715 .phys = 0,
29716@@ -328,7 +328,7 @@ not_enabled:
29717 }
29718 }
29719
29720-void mmiotrace_iounmap(volatile void __iomem *addr)
29721+void mmiotrace_iounmap(const volatile void __iomem *addr)
29722 {
29723 might_sleep();
29724 if (is_enabled()) /* recheck and proper locking in *_core() */
29725diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
29726index 8504f36..5fc68f2 100644
29727--- a/arch/x86/mm/numa.c
29728+++ b/arch/x86/mm/numa.c
29729@@ -478,7 +478,7 @@ static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
29730 return true;
29731 }
29732
29733-static int __init numa_register_memblks(struct numa_meminfo *mi)
29734+static int __init __intentional_overflow(-1) numa_register_memblks(struct numa_meminfo *mi)
29735 {
29736 unsigned long uninitialized_var(pfn_align);
29737 int i, nid;
29738diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
29739index b008656..773eac2 100644
29740--- a/arch/x86/mm/pageattr-test.c
29741+++ b/arch/x86/mm/pageattr-test.c
29742@@ -36,7 +36,7 @@ enum {
29743
29744 static int pte_testbit(pte_t pte)
29745 {
29746- return pte_flags(pte) & _PAGE_UNUSED1;
29747+ return pte_flags(pte) & _PAGE_CPA_TEST;
29748 }
29749
29750 struct split_state {
29751diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
29752index a718e0d..77419bc 100644
29753--- a/arch/x86/mm/pageattr.c
29754+++ b/arch/x86/mm/pageattr.c
29755@@ -261,7 +261,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
29756 */
29757 #ifdef CONFIG_PCI_BIOS
29758 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
29759- pgprot_val(forbidden) |= _PAGE_NX;
29760+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
29761 #endif
29762
29763 /*
29764@@ -269,9 +269,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
29765 * Does not cover __inittext since that is gone later on. On
29766 * 64bit we do not enforce !NX on the low mapping
29767 */
29768- if (within(address, (unsigned long)_text, (unsigned long)_etext))
29769- pgprot_val(forbidden) |= _PAGE_NX;
29770+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
29771+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
29772
29773+#ifdef CONFIG_DEBUG_RODATA
29774 /*
29775 * The .rodata section needs to be read-only. Using the pfn
29776 * catches all aliases.
29777@@ -279,6 +280,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
29778 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
29779 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
29780 pgprot_val(forbidden) |= _PAGE_RW;
29781+#endif
29782
29783 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
29784 /*
29785@@ -317,6 +319,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
29786 }
29787 #endif
29788
29789+#ifdef CONFIG_PAX_KERNEXEC
29790+ if (within(pfn, __pa(ktla_ktva((unsigned long)&_text)), __pa((unsigned long)&_sdata))) {
29791+ pgprot_val(forbidden) |= _PAGE_RW;
29792+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
29793+ }
29794+#endif
29795+
29796 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
29797
29798 return prot;
29799@@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
29800 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
29801 {
29802 /* change init_mm */
29803+ pax_open_kernel();
29804 set_pte_atomic(kpte, pte);
29805+
29806 #ifdef CONFIG_X86_32
29807 if (!SHARED_KERNEL_PMD) {
29808+
29809+#ifdef CONFIG_PAX_PER_CPU_PGD
29810+ unsigned long cpu;
29811+#else
29812 struct page *page;
29813+#endif
29814
29815+#ifdef CONFIG_PAX_PER_CPU_PGD
29816+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
29817+ pgd_t *pgd = get_cpu_pgd(cpu);
29818+#else
29819 list_for_each_entry(page, &pgd_list, lru) {
29820- pgd_t *pgd;
29821+ pgd_t *pgd = (pgd_t *)page_address(page);
29822+#endif
29823+
29824 pud_t *pud;
29825 pmd_t *pmd;
29826
29827- pgd = (pgd_t *)page_address(page) + pgd_index(address);
29828+ pgd += pgd_index(address);
29829 pud = pud_offset(pgd, address);
29830 pmd = pmd_offset(pud, address);
29831 set_pte_atomic((pte_t *)pmd, pte);
29832 }
29833 }
29834 #endif
29835+ pax_close_kernel();
29836 }
29837
29838 static int
29839diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
29840index 0eb572e..92f5c1e 100644
29841--- a/arch/x86/mm/pat.c
29842+++ b/arch/x86/mm/pat.c
29843@@ -376,7 +376,7 @@ int free_memtype(u64 start, u64 end)
29844
29845 if (!entry) {
29846 printk(KERN_INFO "%s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
29847- current->comm, current->pid, start, end - 1);
29848+ current->comm, task_pid_nr(current), start, end - 1);
29849 return -EINVAL;
29850 }
29851
29852@@ -506,8 +506,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
29853
29854 while (cursor < to) {
29855 if (!devmem_is_allowed(pfn)) {
29856- printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx]\n",
29857- current->comm, from, to - 1);
29858+ printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx] (%#010Lx)\n",
29859+ current->comm, from, to - 1, cursor);
29860 return 0;
29861 }
29862 cursor += PAGE_SIZE;
29863@@ -570,7 +570,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
29864 if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) {
29865 printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
29866 "for [mem %#010Lx-%#010Lx]\n",
29867- current->comm, current->pid,
29868+ current->comm, task_pid_nr(current),
29869 cattr_name(flags),
29870 base, (unsigned long long)(base + size-1));
29871 return -EINVAL;
29872@@ -605,7 +605,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
29873 flags = lookup_memtype(paddr);
29874 if (want_flags != flags) {
29875 printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
29876- current->comm, current->pid,
29877+ current->comm, task_pid_nr(current),
29878 cattr_name(want_flags),
29879 (unsigned long long)paddr,
29880 (unsigned long long)(paddr + size - 1),
29881@@ -627,7 +627,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
29882 free_memtype(paddr, paddr + size);
29883 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
29884 " for [mem %#010Lx-%#010Lx], got %s\n",
29885- current->comm, current->pid,
29886+ current->comm, task_pid_nr(current),
29887 cattr_name(want_flags),
29888 (unsigned long long)paddr,
29889 (unsigned long long)(paddr + size - 1),
29890diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
29891index 9f0614d..92ae64a 100644
29892--- a/arch/x86/mm/pf_in.c
29893+++ b/arch/x86/mm/pf_in.c
29894@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
29895 int i;
29896 enum reason_type rv = OTHERS;
29897
29898- p = (unsigned char *)ins_addr;
29899+ p = (unsigned char *)ktla_ktva(ins_addr);
29900 p += skip_prefix(p, &prf);
29901 p += get_opcode(p, &opcode);
29902
29903@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
29904 struct prefix_bits prf;
29905 int i;
29906
29907- p = (unsigned char *)ins_addr;
29908+ p = (unsigned char *)ktla_ktva(ins_addr);
29909 p += skip_prefix(p, &prf);
29910 p += get_opcode(p, &opcode);
29911
29912@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
29913 struct prefix_bits prf;
29914 int i;
29915
29916- p = (unsigned char *)ins_addr;
29917+ p = (unsigned char *)ktla_ktva(ins_addr);
29918 p += skip_prefix(p, &prf);
29919 p += get_opcode(p, &opcode);
29920
29921@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
29922 struct prefix_bits prf;
29923 int i;
29924
29925- p = (unsigned char *)ins_addr;
29926+ p = (unsigned char *)ktla_ktva(ins_addr);
29927 p += skip_prefix(p, &prf);
29928 p += get_opcode(p, &opcode);
29929 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
29930@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
29931 struct prefix_bits prf;
29932 int i;
29933
29934- p = (unsigned char *)ins_addr;
29935+ p = (unsigned char *)ktla_ktva(ins_addr);
29936 p += skip_prefix(p, &prf);
29937 p += get_opcode(p, &opcode);
29938 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
29939diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
29940index 395b3b4a..213e72b 100644
29941--- a/arch/x86/mm/pgtable.c
29942+++ b/arch/x86/mm/pgtable.c
29943@@ -91,10 +91,64 @@ static inline void pgd_list_del(pgd_t *pgd)
29944 list_del(&page->lru);
29945 }
29946
29947-#define UNSHARED_PTRS_PER_PGD \
29948- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
29949+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
29950+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
29951
29952+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src)
29953+{
29954+ unsigned int count = USER_PGD_PTRS;
29955
29956+ while (count--)
29957+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
29958+}
29959+#endif
29960+
29961+#ifdef CONFIG_PAX_PER_CPU_PGD
29962+void __clone_user_pgds(pgd_t *dst, const pgd_t *src)
29963+{
29964+ unsigned int count = USER_PGD_PTRS;
29965+
29966+ while (count--) {
29967+ pgd_t pgd;
29968+
29969+#ifdef CONFIG_X86_64
29970+ pgd = __pgd(pgd_val(*src++) | _PAGE_USER);
29971+#else
29972+ pgd = *src++;
29973+#endif
29974+
29975+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
29976+ pgd = __pgd(pgd_val(pgd) & clone_pgd_mask);
29977+#endif
29978+
29979+ *dst++ = pgd;
29980+ }
29981+
29982+}
29983+#endif
29984+
29985+#ifdef CONFIG_X86_64
29986+#define pxd_t pud_t
29987+#define pyd_t pgd_t
29988+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
29989+#define pxd_free(mm, pud) pud_free((mm), (pud))
29990+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
29991+#define pyd_offset(mm, address) pgd_offset((mm), (address))
29992+#define PYD_SIZE PGDIR_SIZE
29993+#else
29994+#define pxd_t pmd_t
29995+#define pyd_t pud_t
29996+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
29997+#define pxd_free(mm, pud) pmd_free((mm), (pud))
29998+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
29999+#define pyd_offset(mm, address) pud_offset((mm), (address))
30000+#define PYD_SIZE PUD_SIZE
30001+#endif
30002+
30003+#ifdef CONFIG_PAX_PER_CPU_PGD
30004+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
30005+static inline void pgd_dtor(pgd_t *pgd) {}
30006+#else
30007 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
30008 {
30009 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
30010@@ -135,6 +189,7 @@ static void pgd_dtor(pgd_t *pgd)
30011 pgd_list_del(pgd);
30012 spin_unlock(&pgd_lock);
30013 }
30014+#endif
30015
30016 /*
30017 * List of all pgd's needed for non-PAE so it can invalidate entries
30018@@ -147,7 +202,7 @@ static void pgd_dtor(pgd_t *pgd)
30019 * -- nyc
30020 */
30021
30022-#ifdef CONFIG_X86_PAE
30023+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
30024 /*
30025 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
30026 * updating the top-level pagetable entries to guarantee the
30027@@ -159,7 +214,7 @@ static void pgd_dtor(pgd_t *pgd)
30028 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
30029 * and initialize the kernel pmds here.
30030 */
30031-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
30032+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
30033
30034 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
30035 {
30036@@ -177,36 +232,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
30037 */
30038 flush_tlb_mm(mm);
30039 }
30040+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
30041+#define PREALLOCATED_PXDS USER_PGD_PTRS
30042 #else /* !CONFIG_X86_PAE */
30043
30044 /* No need to prepopulate any pagetable entries in non-PAE modes. */
30045-#define PREALLOCATED_PMDS 0
30046+#define PREALLOCATED_PXDS 0
30047
30048 #endif /* CONFIG_X86_PAE */
30049
30050-static void free_pmds(pmd_t *pmds[])
30051+static void free_pxds(pxd_t *pxds[])
30052 {
30053 int i;
30054
30055- for(i = 0; i < PREALLOCATED_PMDS; i++)
30056- if (pmds[i])
30057- free_page((unsigned long)pmds[i]);
30058+ for(i = 0; i < PREALLOCATED_PXDS; i++)
30059+ if (pxds[i])
30060+ free_page((unsigned long)pxds[i]);
30061 }
30062
30063-static int preallocate_pmds(pmd_t *pmds[])
30064+static int preallocate_pxds(pxd_t *pxds[])
30065 {
30066 int i;
30067 bool failed = false;
30068
30069- for(i = 0; i < PREALLOCATED_PMDS; i++) {
30070- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
30071- if (pmd == NULL)
30072+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
30073+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
30074+ if (pxd == NULL)
30075 failed = true;
30076- pmds[i] = pmd;
30077+ pxds[i] = pxd;
30078 }
30079
30080 if (failed) {
30081- free_pmds(pmds);
30082+ free_pxds(pxds);
30083 return -ENOMEM;
30084 }
30085
30086@@ -219,51 +276,55 @@ static int preallocate_pmds(pmd_t *pmds[])
30087 * preallocate which never got a corresponding vma will need to be
30088 * freed manually.
30089 */
30090-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
30091+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
30092 {
30093 int i;
30094
30095- for(i = 0; i < PREALLOCATED_PMDS; i++) {
30096+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
30097 pgd_t pgd = pgdp[i];
30098
30099 if (pgd_val(pgd) != 0) {
30100- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
30101+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
30102
30103- pgdp[i] = native_make_pgd(0);
30104+ set_pgd(pgdp + i, native_make_pgd(0));
30105
30106- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
30107- pmd_free(mm, pmd);
30108+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
30109+ pxd_free(mm, pxd);
30110 }
30111 }
30112 }
30113
30114-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
30115+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
30116 {
30117- pud_t *pud;
30118+ pyd_t *pyd;
30119 unsigned long addr;
30120 int i;
30121
30122- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
30123+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
30124 return;
30125
30126- pud = pud_offset(pgd, 0);
30127+#ifdef CONFIG_X86_64
30128+ pyd = pyd_offset(mm, 0L);
30129+#else
30130+ pyd = pyd_offset(pgd, 0L);
30131+#endif
30132
30133- for (addr = i = 0; i < PREALLOCATED_PMDS;
30134- i++, pud++, addr += PUD_SIZE) {
30135- pmd_t *pmd = pmds[i];
30136+ for (addr = i = 0; i < PREALLOCATED_PXDS;
30137+ i++, pyd++, addr += PYD_SIZE) {
30138+ pxd_t *pxd = pxds[i];
30139
30140 if (i >= KERNEL_PGD_BOUNDARY)
30141- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
30142- sizeof(pmd_t) * PTRS_PER_PMD);
30143+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
30144+ sizeof(pxd_t) * PTRS_PER_PMD);
30145
30146- pud_populate(mm, pud, pmd);
30147+ pyd_populate(mm, pyd, pxd);
30148 }
30149 }
30150
30151 pgd_t *pgd_alloc(struct mm_struct *mm)
30152 {
30153 pgd_t *pgd;
30154- pmd_t *pmds[PREALLOCATED_PMDS];
30155+ pxd_t *pxds[PREALLOCATED_PXDS];
30156
30157 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
30158
30159@@ -272,11 +333,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
30160
30161 mm->pgd = pgd;
30162
30163- if (preallocate_pmds(pmds) != 0)
30164+ if (preallocate_pxds(pxds) != 0)
30165 goto out_free_pgd;
30166
30167 if (paravirt_pgd_alloc(mm) != 0)
30168- goto out_free_pmds;
30169+ goto out_free_pxds;
30170
30171 /*
30172 * Make sure that pre-populating the pmds is atomic with
30173@@ -286,14 +347,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
30174 spin_lock(&pgd_lock);
30175
30176 pgd_ctor(mm, pgd);
30177- pgd_prepopulate_pmd(mm, pgd, pmds);
30178+ pgd_prepopulate_pxd(mm, pgd, pxds);
30179
30180 spin_unlock(&pgd_lock);
30181
30182 return pgd;
30183
30184-out_free_pmds:
30185- free_pmds(pmds);
30186+out_free_pxds:
30187+ free_pxds(pxds);
30188 out_free_pgd:
30189 free_page((unsigned long)pgd);
30190 out:
30191@@ -302,7 +363,7 @@ out:
30192
30193 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
30194 {
30195- pgd_mop_up_pmds(mm, pgd);
30196+ pgd_mop_up_pxds(mm, pgd);
30197 pgd_dtor(pgd);
30198 paravirt_pgd_free(mm, pgd);
30199 free_page((unsigned long)pgd);
30200diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
30201index a69bcb8..19068ab 100644
30202--- a/arch/x86/mm/pgtable_32.c
30203+++ b/arch/x86/mm/pgtable_32.c
30204@@ -47,10 +47,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
30205 return;
30206 }
30207 pte = pte_offset_kernel(pmd, vaddr);
30208+
30209+ pax_open_kernel();
30210 if (pte_val(pteval))
30211 set_pte_at(&init_mm, vaddr, pte, pteval);
30212 else
30213 pte_clear(&init_mm, vaddr, pte);
30214+ pax_close_kernel();
30215
30216 /*
30217 * It's enough to flush this one mapping.
30218diff --git a/arch/x86/mm/physaddr.c b/arch/x86/mm/physaddr.c
30219index d2e2735..5c6586f 100644
30220--- a/arch/x86/mm/physaddr.c
30221+++ b/arch/x86/mm/physaddr.c
30222@@ -8,7 +8,7 @@
30223
30224 #ifdef CONFIG_X86_64
30225
30226-unsigned long __phys_addr(unsigned long x)
30227+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
30228 {
30229 if (x >= __START_KERNEL_map) {
30230 x -= __START_KERNEL_map;
30231@@ -45,7 +45,7 @@ EXPORT_SYMBOL(__virt_addr_valid);
30232 #else
30233
30234 #ifdef CONFIG_DEBUG_VIRTUAL
30235-unsigned long __phys_addr(unsigned long x)
30236+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
30237 {
30238 /* VMALLOC_* aren't constants */
30239 VIRTUAL_BUG_ON(x < PAGE_OFFSET);
30240diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
30241index 410531d..0f16030 100644
30242--- a/arch/x86/mm/setup_nx.c
30243+++ b/arch/x86/mm/setup_nx.c
30244@@ -5,8 +5,10 @@
30245 #include <asm/pgtable.h>
30246 #include <asm/proto.h>
30247
30248+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
30249 static int disable_nx __cpuinitdata;
30250
30251+#ifndef CONFIG_PAX_PAGEEXEC
30252 /*
30253 * noexec = on|off
30254 *
30255@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
30256 return 0;
30257 }
30258 early_param("noexec", noexec_setup);
30259+#endif
30260+
30261+#endif
30262
30263 void __cpuinit x86_configure_nx(void)
30264 {
30265+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
30266 if (cpu_has_nx && !disable_nx)
30267 __supported_pte_mask |= _PAGE_NX;
30268 else
30269+#endif
30270 __supported_pte_mask &= ~_PAGE_NX;
30271 }
30272
30273diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
30274index 13a6b29..c2fff23 100644
30275--- a/arch/x86/mm/tlb.c
30276+++ b/arch/x86/mm/tlb.c
30277@@ -48,7 +48,11 @@ void leave_mm(int cpu)
30278 BUG();
30279 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
30280 cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
30281+
30282+#ifndef CONFIG_PAX_PER_CPU_PGD
30283 load_cr3(swapper_pg_dir);
30284+#endif
30285+
30286 }
30287 }
30288 EXPORT_SYMBOL_GPL(leave_mm);
30289diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
30290index 877b9a1..a8ecf42 100644
30291--- a/arch/x86/net/bpf_jit.S
30292+++ b/arch/x86/net/bpf_jit.S
30293@@ -9,6 +9,7 @@
30294 */
30295 #include <linux/linkage.h>
30296 #include <asm/dwarf2.h>
30297+#include <asm/alternative-asm.h>
30298
30299 /*
30300 * Calling convention :
30301@@ -35,6 +36,7 @@ sk_load_word_positive_offset:
30302 jle bpf_slow_path_word
30303 mov (SKBDATA,%rsi),%eax
30304 bswap %eax /* ntohl() */
30305+ pax_force_retaddr
30306 ret
30307
30308 sk_load_half:
30309@@ -52,6 +54,7 @@ sk_load_half_positive_offset:
30310 jle bpf_slow_path_half
30311 movzwl (SKBDATA,%rsi),%eax
30312 rol $8,%ax # ntohs()
30313+ pax_force_retaddr
30314 ret
30315
30316 sk_load_byte:
30317@@ -66,6 +69,7 @@ sk_load_byte_positive_offset:
30318 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
30319 jle bpf_slow_path_byte
30320 movzbl (SKBDATA,%rsi),%eax
30321+ pax_force_retaddr
30322 ret
30323
30324 /**
30325@@ -87,6 +91,7 @@ sk_load_byte_msh_positive_offset:
30326 movzbl (SKBDATA,%rsi),%ebx
30327 and $15,%bl
30328 shl $2,%bl
30329+ pax_force_retaddr
30330 ret
30331
30332 /* rsi contains offset and can be scratched */
30333@@ -109,6 +114,7 @@ bpf_slow_path_word:
30334 js bpf_error
30335 mov -12(%rbp),%eax
30336 bswap %eax
30337+ pax_force_retaddr
30338 ret
30339
30340 bpf_slow_path_half:
30341@@ -117,12 +123,14 @@ bpf_slow_path_half:
30342 mov -12(%rbp),%ax
30343 rol $8,%ax
30344 movzwl %ax,%eax
30345+ pax_force_retaddr
30346 ret
30347
30348 bpf_slow_path_byte:
30349 bpf_slow_path_common(1)
30350 js bpf_error
30351 movzbl -12(%rbp),%eax
30352+ pax_force_retaddr
30353 ret
30354
30355 bpf_slow_path_byte_msh:
30356@@ -133,6 +141,7 @@ bpf_slow_path_byte_msh:
30357 and $15,%al
30358 shl $2,%al
30359 xchg %eax,%ebx
30360+ pax_force_retaddr
30361 ret
30362
30363 #define sk_negative_common(SIZE) \
30364@@ -157,6 +166,7 @@ sk_load_word_negative_offset:
30365 sk_negative_common(4)
30366 mov (%rax), %eax
30367 bswap %eax
30368+ pax_force_retaddr
30369 ret
30370
30371 bpf_slow_path_half_neg:
30372@@ -168,6 +178,7 @@ sk_load_half_negative_offset:
30373 mov (%rax),%ax
30374 rol $8,%ax
30375 movzwl %ax,%eax
30376+ pax_force_retaddr
30377 ret
30378
30379 bpf_slow_path_byte_neg:
30380@@ -177,6 +188,7 @@ sk_load_byte_negative_offset:
30381 .globl sk_load_byte_negative_offset
30382 sk_negative_common(1)
30383 movzbl (%rax), %eax
30384+ pax_force_retaddr
30385 ret
30386
30387 bpf_slow_path_byte_msh_neg:
30388@@ -190,6 +202,7 @@ sk_load_byte_msh_negative_offset:
30389 and $15,%al
30390 shl $2,%al
30391 xchg %eax,%ebx
30392+ pax_force_retaddr
30393 ret
30394
30395 bpf_error:
30396@@ -197,4 +210,5 @@ bpf_error:
30397 xor %eax,%eax
30398 mov -8(%rbp),%rbx
30399 leaveq
30400+ pax_force_retaddr
30401 ret
30402diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
30403index d11a470..3f9adff3 100644
30404--- a/arch/x86/net/bpf_jit_comp.c
30405+++ b/arch/x86/net/bpf_jit_comp.c
30406@@ -12,6 +12,7 @@
30407 #include <linux/netdevice.h>
30408 #include <linux/filter.h>
30409 #include <linux/if_vlan.h>
30410+#include <linux/random.h>
30411
30412 /*
30413 * Conventions :
30414@@ -49,13 +50,87 @@ static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
30415 return ptr + len;
30416 }
30417
30418+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
30419+#define MAX_INSTR_CODE_SIZE 96
30420+#else
30421+#define MAX_INSTR_CODE_SIZE 64
30422+#endif
30423+
30424 #define EMIT(bytes, len) do { prog = emit_code(prog, bytes, len); } while (0)
30425
30426 #define EMIT1(b1) EMIT(b1, 1)
30427 #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
30428 #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
30429 #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
30430+
30431+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
30432+/* original constant will appear in ecx */
30433+#define DILUTE_CONST_SEQUENCE(_off, _key) \
30434+do { \
30435+ /* mov ecx, randkey */ \
30436+ EMIT1(0xb9); \
30437+ EMIT(_key, 4); \
30438+ /* xor ecx, randkey ^ off */ \
30439+ EMIT2(0x81, 0xf1); \
30440+ EMIT((_key) ^ (_off), 4); \
30441+} while (0)
30442+
30443+#define EMIT1_off32(b1, _off) \
30444+do { \
30445+ switch (b1) { \
30446+ case 0x05: /* add eax, imm32 */ \
30447+ case 0x2d: /* sub eax, imm32 */ \
30448+ case 0x25: /* and eax, imm32 */ \
30449+ case 0x0d: /* or eax, imm32 */ \
30450+ case 0xb8: /* mov eax, imm32 */ \
30451+ case 0x3d: /* cmp eax, imm32 */ \
30452+ case 0xa9: /* test eax, imm32 */ \
30453+ DILUTE_CONST_SEQUENCE(_off, randkey); \
30454+ EMIT2((b1) - 4, 0xc8); /* convert imm instruction to eax, ecx */\
30455+ break; \
30456+ case 0xbb: /* mov ebx, imm32 */ \
30457+ DILUTE_CONST_SEQUENCE(_off, randkey); \
30458+ /* mov ebx, ecx */ \
30459+ EMIT2(0x89, 0xcb); \
30460+ break; \
30461+ case 0xbe: /* mov esi, imm32 */ \
30462+ DILUTE_CONST_SEQUENCE(_off, randkey); \
30463+ /* mov esi, ecx */ \
30464+ EMIT2(0x89, 0xce); \
30465+ break; \
30466+ case 0xe9: /* jmp rel imm32 */ \
30467+ EMIT1(b1); \
30468+ EMIT(_off, 4); \
30469+ /* prevent fall-through, we're not called if off = 0 */ \
30470+ EMIT(0xcccccccc, 4); \
30471+ EMIT(0xcccccccc, 4); \
30472+ break; \
30473+ default: \
30474+ EMIT1(b1); \
30475+ EMIT(_off, 4); \
30476+ } \
30477+} while (0)
30478+
30479+#define EMIT2_off32(b1, b2, _off) \
30480+do { \
30481+ if ((b1) == 0x8d && (b2) == 0xb3) { /* lea esi, [rbx+imm32] */ \
30482+ EMIT2(0x8d, 0xb3); /* lea esi, [rbx+randkey] */ \
30483+ EMIT(randkey, 4); \
30484+ EMIT2(0x8d, 0xb6); /* lea esi, [esi+off-randkey] */ \
30485+ EMIT((_off) - randkey, 4); \
30486+ } else if ((b1) == 0x69 && (b2) == 0xc0) { /* imul eax, imm32 */\
30487+ DILUTE_CONST_SEQUENCE(_off, randkey); \
30488+ /* imul eax, ecx */ \
30489+ EMIT3(0x0f, 0xaf, 0xc1); \
30490+ } else { \
30491+ EMIT2(b1, b2); \
30492+ EMIT(_off, 4); \
30493+ } \
30494+} while (0)
30495+#else
30496 #define EMIT1_off32(b1, off) do { EMIT1(b1); EMIT(off, 4);} while (0)
30497+#define EMIT2_off32(b1, b2, off) do { EMIT2(b1, b2); EMIT(off, 4);} while (0)
30498+#endif
30499
30500 #define CLEAR_A() EMIT2(0x31, 0xc0) /* xor %eax,%eax */
30501 #define CLEAR_X() EMIT2(0x31, 0xdb) /* xor %ebx,%ebx */
30502@@ -90,6 +165,24 @@ do { \
30503 #define X86_JBE 0x76
30504 #define X86_JA 0x77
30505
30506+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
30507+#define APPEND_FLOW_VERIFY() \
30508+do { \
30509+ /* mov ecx, randkey */ \
30510+ EMIT1(0xb9); \
30511+ EMIT(randkey, 4); \
30512+ /* cmp ecx, randkey */ \
30513+ EMIT2(0x81, 0xf9); \
30514+ EMIT(randkey, 4); \
30515+ /* jz after 8 int 3s */ \
30516+ EMIT2(0x74, 0x08); \
30517+ EMIT(0xcccccccc, 4); \
30518+ EMIT(0xcccccccc, 4); \
30519+} while (0)
30520+#else
30521+#define APPEND_FLOW_VERIFY() do { } while (0)
30522+#endif
30523+
30524 #define EMIT_COND_JMP(op, offset) \
30525 do { \
30526 if (is_near(offset)) \
30527@@ -97,6 +190,7 @@ do { \
30528 else { \
30529 EMIT2(0x0f, op + 0x10); \
30530 EMIT(offset, 4); /* jxx .+off32 */ \
30531+ APPEND_FLOW_VERIFY(); \
30532 } \
30533 } while (0)
30534
30535@@ -121,12 +215,17 @@ static inline void bpf_flush_icache(void *start, void *end)
30536 set_fs(old_fs);
30537 }
30538
30539+struct bpf_jit_work {
30540+ struct work_struct work;
30541+ void *image;
30542+};
30543+
30544 #define CHOOSE_LOAD_FUNC(K, func) \
30545 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
30546
30547 void bpf_jit_compile(struct sk_filter *fp)
30548 {
30549- u8 temp[64];
30550+ u8 temp[MAX_INSTR_CODE_SIZE];
30551 u8 *prog;
30552 unsigned int proglen, oldproglen = 0;
30553 int ilen, i;
30554@@ -139,6 +238,9 @@ void bpf_jit_compile(struct sk_filter *fp)
30555 unsigned int *addrs;
30556 const struct sock_filter *filter = fp->insns;
30557 int flen = fp->len;
30558+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
30559+ unsigned int randkey;
30560+#endif
30561
30562 if (!bpf_jit_enable)
30563 return;
30564@@ -147,11 +249,19 @@ void bpf_jit_compile(struct sk_filter *fp)
30565 if (addrs == NULL)
30566 return;
30567
30568+ fp->work = kmalloc(sizeof(*fp->work), GFP_KERNEL);
30569+ if (!fp->work)
30570+ goto out;
30571+
30572+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
30573+ randkey = get_random_int();
30574+#endif
30575+
30576 /* Before first pass, make a rough estimation of addrs[]
30577- * each bpf instruction is translated to less than 64 bytes
30578+ * each bpf instruction is translated to less than MAX_INSTR_CODE_SIZE bytes
30579 */
30580 for (proglen = 0, i = 0; i < flen; i++) {
30581- proglen += 64;
30582+ proglen += MAX_INSTR_CODE_SIZE;
30583 addrs[i] = proglen;
30584 }
30585 cleanup_addr = proglen; /* epilogue address */
30586@@ -261,10 +371,8 @@ void bpf_jit_compile(struct sk_filter *fp)
30587 case BPF_S_ALU_MUL_K: /* A *= K */
30588 if (is_imm8(K))
30589 EMIT3(0x6b, 0xc0, K); /* imul imm8,%eax,%eax */
30590- else {
30591- EMIT2(0x69, 0xc0); /* imul imm32,%eax */
30592- EMIT(K, 4);
30593- }
30594+ else
30595+ EMIT2_off32(0x69, 0xc0, K); /* imul imm32,%eax */
30596 break;
30597 case BPF_S_ALU_DIV_X: /* A /= X; */
30598 seen |= SEEN_XREG;
30599@@ -304,13 +412,23 @@ void bpf_jit_compile(struct sk_filter *fp)
30600 break;
30601 case BPF_S_ALU_MOD_K: /* A %= K; */
30602 EMIT2(0x31, 0xd2); /* xor %edx,%edx */
30603+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
30604+ DILUTE_CONST_SEQUENCE(K, randkey);
30605+#else
30606 EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */
30607+#endif
30608 EMIT2(0xf7, 0xf1); /* div %ecx */
30609 EMIT2(0x89, 0xd0); /* mov %edx,%eax */
30610 break;
30611 case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K); */
30612+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
30613+ DILUTE_CONST_SEQUENCE(K, randkey);
30614+ // imul rax, rcx
30615+ EMIT4(0x48, 0x0f, 0xaf, 0xc1);
30616+#else
30617 EMIT3(0x48, 0x69, 0xc0); /* imul imm32,%rax,%rax */
30618 EMIT(K, 4);
30619+#endif
30620 EMIT4(0x48, 0xc1, 0xe8, 0x20); /* shr $0x20,%rax */
30621 break;
30622 case BPF_S_ALU_AND_X:
30623@@ -564,8 +682,7 @@ common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG;
30624 if (is_imm8(K)) {
30625 EMIT3(0x8d, 0x73, K); /* lea imm8(%rbx), %esi */
30626 } else {
30627- EMIT2(0x8d, 0xb3); /* lea imm32(%rbx),%esi */
30628- EMIT(K, 4);
30629+ EMIT2_off32(0x8d, 0xb3, K); /* lea imm32(%rbx),%esi */
30630 }
30631 } else {
30632 EMIT2(0x89,0xde); /* mov %ebx,%esi */
30633@@ -648,17 +765,18 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
30634 break;
30635 default:
30636 /* hmm, too complex filter, give up with jit compiler */
30637- goto out;
30638+ goto error;
30639 }
30640 ilen = prog - temp;
30641 if (image) {
30642 if (unlikely(proglen + ilen > oldproglen)) {
30643 pr_err("bpb_jit_compile fatal error\n");
30644- kfree(addrs);
30645- module_free(NULL, image);
30646- return;
30647+ module_free_exec(NULL, image);
30648+ goto error;
30649 }
30650+ pax_open_kernel();
30651 memcpy(image + proglen, temp, ilen);
30652+ pax_close_kernel();
30653 }
30654 proglen += ilen;
30655 addrs[i] = proglen;
30656@@ -679,11 +797,9 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
30657 break;
30658 }
30659 if (proglen == oldproglen) {
30660- image = module_alloc(max_t(unsigned int,
30661- proglen,
30662- sizeof(struct work_struct)));
30663+ image = module_alloc_exec(proglen);
30664 if (!image)
30665- goto out;
30666+ goto error;
30667 }
30668 oldproglen = proglen;
30669 }
30670@@ -699,7 +815,10 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
30671 bpf_flush_icache(image, image + proglen);
30672
30673 fp->bpf_func = (void *)image;
30674- }
30675+ } else
30676+error:
30677+ kfree(fp->work);
30678+
30679 out:
30680 kfree(addrs);
30681 return;
30682@@ -707,18 +826,20 @@ out:
30683
30684 static void jit_free_defer(struct work_struct *arg)
30685 {
30686- module_free(NULL, arg);
30687+ module_free_exec(NULL, ((struct bpf_jit_work *)arg)->image);
30688+ kfree(arg);
30689 }
30690
30691 /* run from softirq, we must use a work_struct to call
30692- * module_free() from process context
30693+ * module_free_exec() from process context
30694 */
30695 void bpf_jit_free(struct sk_filter *fp)
30696 {
30697 if (fp->bpf_func != sk_run_filter) {
30698- struct work_struct *work = (struct work_struct *)fp->bpf_func;
30699+ struct work_struct *work = &fp->work->work;
30700
30701 INIT_WORK(work, jit_free_defer);
30702+ fp->work->image = fp->bpf_func;
30703 schedule_work(work);
30704 }
30705 }
30706diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
30707index d6aa6e8..266395a 100644
30708--- a/arch/x86/oprofile/backtrace.c
30709+++ b/arch/x86/oprofile/backtrace.c
30710@@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
30711 struct stack_frame_ia32 *fp;
30712 unsigned long bytes;
30713
30714- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
30715+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
30716 if (bytes != sizeof(bufhead))
30717 return NULL;
30718
30719- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
30720+ fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
30721
30722 oprofile_add_trace(bufhead[0].return_address);
30723
30724@@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
30725 struct stack_frame bufhead[2];
30726 unsigned long bytes;
30727
30728- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
30729+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
30730 if (bytes != sizeof(bufhead))
30731 return NULL;
30732
30733@@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
30734 {
30735 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
30736
30737- if (!user_mode_vm(regs)) {
30738+ if (!user_mode(regs)) {
30739 unsigned long stack = kernel_stack_pointer(regs);
30740 if (depth)
30741 dump_trace(NULL, regs, (unsigned long *)stack, 0,
30742diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
30743index 48768df..ba9143c 100644
30744--- a/arch/x86/oprofile/nmi_int.c
30745+++ b/arch/x86/oprofile/nmi_int.c
30746@@ -23,6 +23,7 @@
30747 #include <asm/nmi.h>
30748 #include <asm/msr.h>
30749 #include <asm/apic.h>
30750+#include <asm/pgtable.h>
30751
30752 #include "op_counter.h"
30753 #include "op_x86_model.h"
30754@@ -774,8 +775,11 @@ int __init op_nmi_init(struct oprofile_operations *ops)
30755 if (ret)
30756 return ret;
30757
30758- if (!model->num_virt_counters)
30759- model->num_virt_counters = model->num_counters;
30760+ if (!model->num_virt_counters) {
30761+ pax_open_kernel();
30762+ *(unsigned int *)&model->num_virt_counters = model->num_counters;
30763+ pax_close_kernel();
30764+ }
30765
30766 mux_init(ops);
30767
30768diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
30769index b2b9443..be58856 100644
30770--- a/arch/x86/oprofile/op_model_amd.c
30771+++ b/arch/x86/oprofile/op_model_amd.c
30772@@ -519,9 +519,11 @@ static int op_amd_init(struct oprofile_operations *ops)
30773 num_counters = AMD64_NUM_COUNTERS;
30774 }
30775
30776- op_amd_spec.num_counters = num_counters;
30777- op_amd_spec.num_controls = num_counters;
30778- op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
30779+ pax_open_kernel();
30780+ *(unsigned int *)&op_amd_spec.num_counters = num_counters;
30781+ *(unsigned int *)&op_amd_spec.num_controls = num_counters;
30782+ *(unsigned int *)&op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
30783+ pax_close_kernel();
30784
30785 return 0;
30786 }
30787diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
30788index d90528e..0127e2b 100644
30789--- a/arch/x86/oprofile/op_model_ppro.c
30790+++ b/arch/x86/oprofile/op_model_ppro.c
30791@@ -19,6 +19,7 @@
30792 #include <asm/msr.h>
30793 #include <asm/apic.h>
30794 #include <asm/nmi.h>
30795+#include <asm/pgtable.h>
30796
30797 #include "op_x86_model.h"
30798 #include "op_counter.h"
30799@@ -221,8 +222,10 @@ static void arch_perfmon_setup_counters(void)
30800
30801 num_counters = min((int)eax.split.num_counters, OP_MAX_COUNTER);
30802
30803- op_arch_perfmon_spec.num_counters = num_counters;
30804- op_arch_perfmon_spec.num_controls = num_counters;
30805+ pax_open_kernel();
30806+ *(unsigned int *)&op_arch_perfmon_spec.num_counters = num_counters;
30807+ *(unsigned int *)&op_arch_perfmon_spec.num_controls = num_counters;
30808+ pax_close_kernel();
30809 }
30810
30811 static int arch_perfmon_init(struct oprofile_operations *ignore)
30812diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h
30813index 71e8a67..6a313bb 100644
30814--- a/arch/x86/oprofile/op_x86_model.h
30815+++ b/arch/x86/oprofile/op_x86_model.h
30816@@ -52,7 +52,7 @@ struct op_x86_model_spec {
30817 void (*switch_ctrl)(struct op_x86_model_spec const *model,
30818 struct op_msrs const * const msrs);
30819 #endif
30820-};
30821+} __do_const;
30822
30823 struct op_counter_config;
30824
30825diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c
30826index e9e6ed5..e47ae67 100644
30827--- a/arch/x86/pci/amd_bus.c
30828+++ b/arch/x86/pci/amd_bus.c
30829@@ -337,7 +337,7 @@ static int __cpuinit amd_cpu_notify(struct notifier_block *self,
30830 return NOTIFY_OK;
30831 }
30832
30833-static struct notifier_block __cpuinitdata amd_cpu_notifier = {
30834+static struct notifier_block amd_cpu_notifier = {
30835 .notifier_call = amd_cpu_notify,
30836 };
30837
30838diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
30839index 372e9b8..e775a6c 100644
30840--- a/arch/x86/pci/irq.c
30841+++ b/arch/x86/pci/irq.c
30842@@ -50,7 +50,7 @@ struct irq_router {
30843 struct irq_router_handler {
30844 u16 vendor;
30845 int (*probe)(struct irq_router *r, struct pci_dev *router, u16 device);
30846-};
30847+} __do_const;
30848
30849 int (*pcibios_enable_irq)(struct pci_dev *dev) = pirq_enable_irq;
30850 void (*pcibios_disable_irq)(struct pci_dev *dev) = NULL;
30851@@ -794,7 +794,7 @@ static __init int pico_router_probe(struct irq_router *r, struct pci_dev *router
30852 return 0;
30853 }
30854
30855-static __initdata struct irq_router_handler pirq_routers[] = {
30856+static __initconst const struct irq_router_handler pirq_routers[] = {
30857 { PCI_VENDOR_ID_INTEL, intel_router_probe },
30858 { PCI_VENDOR_ID_AL, ali_router_probe },
30859 { PCI_VENDOR_ID_ITE, ite_router_probe },
30860@@ -821,7 +821,7 @@ static struct pci_dev *pirq_router_dev;
30861 static void __init pirq_find_router(struct irq_router *r)
30862 {
30863 struct irq_routing_table *rt = pirq_table;
30864- struct irq_router_handler *h;
30865+ const struct irq_router_handler *h;
30866
30867 #ifdef CONFIG_PCI_BIOS
30868 if (!rt->signature) {
30869@@ -1094,7 +1094,7 @@ static int __init fix_acer_tm360_irqrouting(const struct dmi_system_id *d)
30870 return 0;
30871 }
30872
30873-static struct dmi_system_id __initdata pciirq_dmi_table[] = {
30874+static const struct dmi_system_id __initconst pciirq_dmi_table[] = {
30875 {
30876 .callback = fix_broken_hp_bios_irq9,
30877 .ident = "HP Pavilion N5400 Series Laptop",
30878diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
30879index 6eb18c4..20d83de 100644
30880--- a/arch/x86/pci/mrst.c
30881+++ b/arch/x86/pci/mrst.c
30882@@ -238,7 +238,9 @@ int __init pci_mrst_init(void)
30883 printk(KERN_INFO "Intel MID platform detected, using MID PCI ops\n");
30884 pci_mmcfg_late_init();
30885 pcibios_enable_irq = mrst_pci_irq_enable;
30886- pci_root_ops = pci_mrst_ops;
30887+ pax_open_kernel();
30888+ memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
30889+ pax_close_kernel();
30890 pci_soc_mode = 1;
30891 /* Continue with standard init */
30892 return 1;
30893diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
30894index c77b24a..c979855 100644
30895--- a/arch/x86/pci/pcbios.c
30896+++ b/arch/x86/pci/pcbios.c
30897@@ -79,7 +79,7 @@ union bios32 {
30898 static struct {
30899 unsigned long address;
30900 unsigned short segment;
30901-} bios32_indirect = { 0, __KERNEL_CS };
30902+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
30903
30904 /*
30905 * Returns the entry point for the given service, NULL on error
30906@@ -92,37 +92,80 @@ static unsigned long bios32_service(unsigned long service)
30907 unsigned long length; /* %ecx */
30908 unsigned long entry; /* %edx */
30909 unsigned long flags;
30910+ struct desc_struct d, *gdt;
30911
30912 local_irq_save(flags);
30913- __asm__("lcall *(%%edi); cld"
30914+
30915+ gdt = get_cpu_gdt_table(smp_processor_id());
30916+
30917+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
30918+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
30919+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
30920+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
30921+
30922+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
30923 : "=a" (return_code),
30924 "=b" (address),
30925 "=c" (length),
30926 "=d" (entry)
30927 : "0" (service),
30928 "1" (0),
30929- "D" (&bios32_indirect));
30930+ "D" (&bios32_indirect),
30931+ "r"(__PCIBIOS_DS)
30932+ : "memory");
30933+
30934+ pax_open_kernel();
30935+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
30936+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
30937+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
30938+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
30939+ pax_close_kernel();
30940+
30941 local_irq_restore(flags);
30942
30943 switch (return_code) {
30944- case 0:
30945- return address + entry;
30946- case 0x80: /* Not present */
30947- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
30948- return 0;
30949- default: /* Shouldn't happen */
30950- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
30951- service, return_code);
30952+ case 0: {
30953+ int cpu;
30954+ unsigned char flags;
30955+
30956+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
30957+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
30958+ printk(KERN_WARNING "bios32_service: not valid\n");
30959 return 0;
30960+ }
30961+ address = address + PAGE_OFFSET;
30962+ length += 16UL; /* some BIOSs underreport this... */
30963+ flags = 4;
30964+ if (length >= 64*1024*1024) {
30965+ length >>= PAGE_SHIFT;
30966+ flags |= 8;
30967+ }
30968+
30969+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
30970+ gdt = get_cpu_gdt_table(cpu);
30971+ pack_descriptor(&d, address, length, 0x9b, flags);
30972+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
30973+ pack_descriptor(&d, address, length, 0x93, flags);
30974+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
30975+ }
30976+ return entry;
30977+ }
30978+ case 0x80: /* Not present */
30979+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
30980+ return 0;
30981+ default: /* Shouldn't happen */
30982+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
30983+ service, return_code);
30984+ return 0;
30985 }
30986 }
30987
30988 static struct {
30989 unsigned long address;
30990 unsigned short segment;
30991-} pci_indirect = { 0, __KERNEL_CS };
30992+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
30993
30994-static int pci_bios_present;
30995+static int pci_bios_present __read_only;
30996
30997 static int check_pcibios(void)
30998 {
30999@@ -131,11 +174,13 @@ static int check_pcibios(void)
31000 unsigned long flags, pcibios_entry;
31001
31002 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
31003- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
31004+ pci_indirect.address = pcibios_entry;
31005
31006 local_irq_save(flags);
31007- __asm__(
31008- "lcall *(%%edi); cld\n\t"
31009+ __asm__("movw %w6, %%ds\n\t"
31010+ "lcall *%%ss:(%%edi); cld\n\t"
31011+ "push %%ss\n\t"
31012+ "pop %%ds\n\t"
31013 "jc 1f\n\t"
31014 "xor %%ah, %%ah\n"
31015 "1:"
31016@@ -144,7 +189,8 @@ static int check_pcibios(void)
31017 "=b" (ebx),
31018 "=c" (ecx)
31019 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
31020- "D" (&pci_indirect)
31021+ "D" (&pci_indirect),
31022+ "r" (__PCIBIOS_DS)
31023 : "memory");
31024 local_irq_restore(flags);
31025
31026@@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
31027
31028 switch (len) {
31029 case 1:
31030- __asm__("lcall *(%%esi); cld\n\t"
31031+ __asm__("movw %w6, %%ds\n\t"
31032+ "lcall *%%ss:(%%esi); cld\n\t"
31033+ "push %%ss\n\t"
31034+ "pop %%ds\n\t"
31035 "jc 1f\n\t"
31036 "xor %%ah, %%ah\n"
31037 "1:"
31038@@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
31039 : "1" (PCIBIOS_READ_CONFIG_BYTE),
31040 "b" (bx),
31041 "D" ((long)reg),
31042- "S" (&pci_indirect));
31043+ "S" (&pci_indirect),
31044+ "r" (__PCIBIOS_DS));
31045 /*
31046 * Zero-extend the result beyond 8 bits, do not trust the
31047 * BIOS having done it:
31048@@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
31049 *value &= 0xff;
31050 break;
31051 case 2:
31052- __asm__("lcall *(%%esi); cld\n\t"
31053+ __asm__("movw %w6, %%ds\n\t"
31054+ "lcall *%%ss:(%%esi); cld\n\t"
31055+ "push %%ss\n\t"
31056+ "pop %%ds\n\t"
31057 "jc 1f\n\t"
31058 "xor %%ah, %%ah\n"
31059 "1:"
31060@@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
31061 : "1" (PCIBIOS_READ_CONFIG_WORD),
31062 "b" (bx),
31063 "D" ((long)reg),
31064- "S" (&pci_indirect));
31065+ "S" (&pci_indirect),
31066+ "r" (__PCIBIOS_DS));
31067 /*
31068 * Zero-extend the result beyond 16 bits, do not trust the
31069 * BIOS having done it:
31070@@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
31071 *value &= 0xffff;
31072 break;
31073 case 4:
31074- __asm__("lcall *(%%esi); cld\n\t"
31075+ __asm__("movw %w6, %%ds\n\t"
31076+ "lcall *%%ss:(%%esi); cld\n\t"
31077+ "push %%ss\n\t"
31078+ "pop %%ds\n\t"
31079 "jc 1f\n\t"
31080 "xor %%ah, %%ah\n"
31081 "1:"
31082@@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
31083 : "1" (PCIBIOS_READ_CONFIG_DWORD),
31084 "b" (bx),
31085 "D" ((long)reg),
31086- "S" (&pci_indirect));
31087+ "S" (&pci_indirect),
31088+ "r" (__PCIBIOS_DS));
31089 break;
31090 }
31091
31092@@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
31093
31094 switch (len) {
31095 case 1:
31096- __asm__("lcall *(%%esi); cld\n\t"
31097+ __asm__("movw %w6, %%ds\n\t"
31098+ "lcall *%%ss:(%%esi); cld\n\t"
31099+ "push %%ss\n\t"
31100+ "pop %%ds\n\t"
31101 "jc 1f\n\t"
31102 "xor %%ah, %%ah\n"
31103 "1:"
31104@@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
31105 "c" (value),
31106 "b" (bx),
31107 "D" ((long)reg),
31108- "S" (&pci_indirect));
31109+ "S" (&pci_indirect),
31110+ "r" (__PCIBIOS_DS));
31111 break;
31112 case 2:
31113- __asm__("lcall *(%%esi); cld\n\t"
31114+ __asm__("movw %w6, %%ds\n\t"
31115+ "lcall *%%ss:(%%esi); cld\n\t"
31116+ "push %%ss\n\t"
31117+ "pop %%ds\n\t"
31118 "jc 1f\n\t"
31119 "xor %%ah, %%ah\n"
31120 "1:"
31121@@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
31122 "c" (value),
31123 "b" (bx),
31124 "D" ((long)reg),
31125- "S" (&pci_indirect));
31126+ "S" (&pci_indirect),
31127+ "r" (__PCIBIOS_DS));
31128 break;
31129 case 4:
31130- __asm__("lcall *(%%esi); cld\n\t"
31131+ __asm__("movw %w6, %%ds\n\t"
31132+ "lcall *%%ss:(%%esi); cld\n\t"
31133+ "push %%ss\n\t"
31134+ "pop %%ds\n\t"
31135 "jc 1f\n\t"
31136 "xor %%ah, %%ah\n"
31137 "1:"
31138@@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
31139 "c" (value),
31140 "b" (bx),
31141 "D" ((long)reg),
31142- "S" (&pci_indirect));
31143+ "S" (&pci_indirect),
31144+ "r" (__PCIBIOS_DS));
31145 break;
31146 }
31147
31148@@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
31149
31150 DBG("PCI: Fetching IRQ routing table... ");
31151 __asm__("push %%es\n\t"
31152+ "movw %w8, %%ds\n\t"
31153 "push %%ds\n\t"
31154 "pop %%es\n\t"
31155- "lcall *(%%esi); cld\n\t"
31156+ "lcall *%%ss:(%%esi); cld\n\t"
31157 "pop %%es\n\t"
31158+ "push %%ss\n\t"
31159+ "pop %%ds\n"
31160 "jc 1f\n\t"
31161 "xor %%ah, %%ah\n"
31162 "1:"
31163@@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
31164 "1" (0),
31165 "D" ((long) &opt),
31166 "S" (&pci_indirect),
31167- "m" (opt)
31168+ "m" (opt),
31169+ "r" (__PCIBIOS_DS)
31170 : "memory");
31171 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
31172 if (ret & 0xff00)
31173@@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
31174 {
31175 int ret;
31176
31177- __asm__("lcall *(%%esi); cld\n\t"
31178+ __asm__("movw %w5, %%ds\n\t"
31179+ "lcall *%%ss:(%%esi); cld\n\t"
31180+ "push %%ss\n\t"
31181+ "pop %%ds\n"
31182 "jc 1f\n\t"
31183 "xor %%ah, %%ah\n"
31184 "1:"
31185@@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
31186 : "0" (PCIBIOS_SET_PCI_HW_INT),
31187 "b" ((dev->bus->number << 8) | dev->devfn),
31188 "c" ((irq << 8) | (pin + 10)),
31189- "S" (&pci_indirect));
31190+ "S" (&pci_indirect),
31191+ "r" (__PCIBIOS_DS));
31192 return !(ret & 0xff00);
31193 }
31194 EXPORT_SYMBOL(pcibios_set_irq_routing);
31195diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
31196index 40e4469..1ab536e 100644
31197--- a/arch/x86/platform/efi/efi_32.c
31198+++ b/arch/x86/platform/efi/efi_32.c
31199@@ -44,11 +44,22 @@ void efi_call_phys_prelog(void)
31200 {
31201 struct desc_ptr gdt_descr;
31202
31203+#ifdef CONFIG_PAX_KERNEXEC
31204+ struct desc_struct d;
31205+#endif
31206+
31207 local_irq_save(efi_rt_eflags);
31208
31209 load_cr3(initial_page_table);
31210 __flush_tlb_all();
31211
31212+#ifdef CONFIG_PAX_KERNEXEC
31213+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
31214+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
31215+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
31216+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
31217+#endif
31218+
31219 gdt_descr.address = __pa(get_cpu_gdt_table(0));
31220 gdt_descr.size = GDT_SIZE - 1;
31221 load_gdt(&gdt_descr);
31222@@ -58,6 +69,14 @@ void efi_call_phys_epilog(void)
31223 {
31224 struct desc_ptr gdt_descr;
31225
31226+#ifdef CONFIG_PAX_KERNEXEC
31227+ struct desc_struct d;
31228+
31229+ memset(&d, 0, sizeof d);
31230+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
31231+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
31232+#endif
31233+
31234 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
31235 gdt_descr.size = GDT_SIZE - 1;
31236 load_gdt(&gdt_descr);
31237diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
31238index fbe66e6..eae5e38 100644
31239--- a/arch/x86/platform/efi/efi_stub_32.S
31240+++ b/arch/x86/platform/efi/efi_stub_32.S
31241@@ -6,7 +6,9 @@
31242 */
31243
31244 #include <linux/linkage.h>
31245+#include <linux/init.h>
31246 #include <asm/page_types.h>
31247+#include <asm/segment.h>
31248
31249 /*
31250 * efi_call_phys(void *, ...) is a function with variable parameters.
31251@@ -20,7 +22,7 @@
31252 * service functions will comply with gcc calling convention, too.
31253 */
31254
31255-.text
31256+__INIT
31257 ENTRY(efi_call_phys)
31258 /*
31259 * 0. The function can only be called in Linux kernel. So CS has been
31260@@ -36,10 +38,24 @@ ENTRY(efi_call_phys)
31261 * The mapping of lower virtual memory has been created in prelog and
31262 * epilog.
31263 */
31264- movl $1f, %edx
31265- subl $__PAGE_OFFSET, %edx
31266- jmp *%edx
31267+#ifdef CONFIG_PAX_KERNEXEC
31268+ movl $(__KERNEXEC_EFI_DS), %edx
31269+ mov %edx, %ds
31270+ mov %edx, %es
31271+ mov %edx, %ss
31272+ addl $2f,(1f)
31273+ ljmp *(1f)
31274+
31275+__INITDATA
31276+1: .long __LOAD_PHYSICAL_ADDR, __KERNEXEC_EFI_CS
31277+.previous
31278+
31279+2:
31280+ subl $2b,(1b)
31281+#else
31282+ jmp 1f-__PAGE_OFFSET
31283 1:
31284+#endif
31285
31286 /*
31287 * 2. Now on the top of stack is the return
31288@@ -47,14 +63,8 @@ ENTRY(efi_call_phys)
31289 * parameter 2, ..., param n. To make things easy, we save the return
31290 * address of efi_call_phys in a global variable.
31291 */
31292- popl %edx
31293- movl %edx, saved_return_addr
31294- /* get the function pointer into ECX*/
31295- popl %ecx
31296- movl %ecx, efi_rt_function_ptr
31297- movl $2f, %edx
31298- subl $__PAGE_OFFSET, %edx
31299- pushl %edx
31300+ popl (saved_return_addr)
31301+ popl (efi_rt_function_ptr)
31302
31303 /*
31304 * 3. Clear PG bit in %CR0.
31305@@ -73,9 +83,8 @@ ENTRY(efi_call_phys)
31306 /*
31307 * 5. Call the physical function.
31308 */
31309- jmp *%ecx
31310+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
31311
31312-2:
31313 /*
31314 * 6. After EFI runtime service returns, control will return to
31315 * following instruction. We'd better readjust stack pointer first.
31316@@ -88,35 +97,36 @@ ENTRY(efi_call_phys)
31317 movl %cr0, %edx
31318 orl $0x80000000, %edx
31319 movl %edx, %cr0
31320- jmp 1f
31321-1:
31322+
31323 /*
31324 * 8. Now restore the virtual mode from flat mode by
31325 * adding EIP with PAGE_OFFSET.
31326 */
31327- movl $1f, %edx
31328- jmp *%edx
31329+#ifdef CONFIG_PAX_KERNEXEC
31330+ movl $(__KERNEL_DS), %edx
31331+ mov %edx, %ds
31332+ mov %edx, %es
31333+ mov %edx, %ss
31334+ ljmp $(__KERNEL_CS),$1f
31335+#else
31336+ jmp 1f+__PAGE_OFFSET
31337+#endif
31338 1:
31339
31340 /*
31341 * 9. Balance the stack. And because EAX contain the return value,
31342 * we'd better not clobber it.
31343 */
31344- leal efi_rt_function_ptr, %edx
31345- movl (%edx), %ecx
31346- pushl %ecx
31347+ pushl (efi_rt_function_ptr)
31348
31349 /*
31350- * 10. Push the saved return address onto the stack and return.
31351+ * 10. Return to the saved return address.
31352 */
31353- leal saved_return_addr, %edx
31354- movl (%edx), %ecx
31355- pushl %ecx
31356- ret
31357+ jmpl *(saved_return_addr)
31358 ENDPROC(efi_call_phys)
31359 .previous
31360
31361-.data
31362+__INITDATA
31363 saved_return_addr:
31364 .long 0
31365 efi_rt_function_ptr:
31366diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
31367index 4c07cca..2c8427d 100644
31368--- a/arch/x86/platform/efi/efi_stub_64.S
31369+++ b/arch/x86/platform/efi/efi_stub_64.S
31370@@ -7,6 +7,7 @@
31371 */
31372
31373 #include <linux/linkage.h>
31374+#include <asm/alternative-asm.h>
31375
31376 #define SAVE_XMM \
31377 mov %rsp, %rax; \
31378@@ -40,6 +41,7 @@ ENTRY(efi_call0)
31379 call *%rdi
31380 addq $32, %rsp
31381 RESTORE_XMM
31382+ pax_force_retaddr 0, 1
31383 ret
31384 ENDPROC(efi_call0)
31385
31386@@ -50,6 +52,7 @@ ENTRY(efi_call1)
31387 call *%rdi
31388 addq $32, %rsp
31389 RESTORE_XMM
31390+ pax_force_retaddr 0, 1
31391 ret
31392 ENDPROC(efi_call1)
31393
31394@@ -60,6 +63,7 @@ ENTRY(efi_call2)
31395 call *%rdi
31396 addq $32, %rsp
31397 RESTORE_XMM
31398+ pax_force_retaddr 0, 1
31399 ret
31400 ENDPROC(efi_call2)
31401
31402@@ -71,6 +75,7 @@ ENTRY(efi_call3)
31403 call *%rdi
31404 addq $32, %rsp
31405 RESTORE_XMM
31406+ pax_force_retaddr 0, 1
31407 ret
31408 ENDPROC(efi_call3)
31409
31410@@ -83,6 +88,7 @@ ENTRY(efi_call4)
31411 call *%rdi
31412 addq $32, %rsp
31413 RESTORE_XMM
31414+ pax_force_retaddr 0, 1
31415 ret
31416 ENDPROC(efi_call4)
31417
31418@@ -96,6 +102,7 @@ ENTRY(efi_call5)
31419 call *%rdi
31420 addq $48, %rsp
31421 RESTORE_XMM
31422+ pax_force_retaddr 0, 1
31423 ret
31424 ENDPROC(efi_call5)
31425
31426@@ -112,5 +119,6 @@ ENTRY(efi_call6)
31427 call *%rdi
31428 addq $48, %rsp
31429 RESTORE_XMM
31430+ pax_force_retaddr 0, 1
31431 ret
31432 ENDPROC(efi_call6)
31433diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
31434index e31bcd8..f12dc46 100644
31435--- a/arch/x86/platform/mrst/mrst.c
31436+++ b/arch/x86/platform/mrst/mrst.c
31437@@ -78,13 +78,15 @@ struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
31438 EXPORT_SYMBOL_GPL(sfi_mrtc_array);
31439 int sfi_mrtc_num;
31440
31441-static void mrst_power_off(void)
31442+static __noreturn void mrst_power_off(void)
31443 {
31444+ BUG();
31445 }
31446
31447-static void mrst_reboot(void)
31448+static __noreturn void mrst_reboot(void)
31449 {
31450 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
31451+ BUG();
31452 }
31453
31454 /* parse all the mtimer info to a static mtimer array */
31455diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c
31456index d6ee929..3637cb5 100644
31457--- a/arch/x86/platform/olpc/olpc_dt.c
31458+++ b/arch/x86/platform/olpc/olpc_dt.c
31459@@ -156,7 +156,7 @@ void * __init prom_early_alloc(unsigned long size)
31460 return res;
31461 }
31462
31463-static struct of_pdt_ops prom_olpc_ops __initdata = {
31464+static struct of_pdt_ops prom_olpc_ops __initconst = {
31465 .nextprop = olpc_dt_nextprop,
31466 .getproplen = olpc_dt_getproplen,
31467 .getproperty = olpc_dt_getproperty,
31468diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
31469index 3c68768..07e82b8 100644
31470--- a/arch/x86/power/cpu.c
31471+++ b/arch/x86/power/cpu.c
31472@@ -134,7 +134,7 @@ static void do_fpu_end(void)
31473 static void fix_processor_context(void)
31474 {
31475 int cpu = smp_processor_id();
31476- struct tss_struct *t = &per_cpu(init_tss, cpu);
31477+ struct tss_struct *t = init_tss + cpu;
31478
31479 set_tss_desc(cpu, t); /*
31480 * This just modifies memory; should not be
31481@@ -144,8 +144,6 @@ static void fix_processor_context(void)
31482 */
31483
31484 #ifdef CONFIG_X86_64
31485- get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
31486-
31487 syscall_init(); /* This sets MSR_*STAR and related */
31488 #endif
31489 load_TR_desc(); /* This does ltr */
31490diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
31491index cbca565..bae7133 100644
31492--- a/arch/x86/realmode/init.c
31493+++ b/arch/x86/realmode/init.c
31494@@ -62,7 +62,13 @@ void __init setup_real_mode(void)
31495 __va(real_mode_header->trampoline_header);
31496
31497 #ifdef CONFIG_X86_32
31498- trampoline_header->start = __pa(startup_32_smp);
31499+ trampoline_header->start = __pa(ktla_ktva(startup_32_smp));
31500+
31501+#ifdef CONFIG_PAX_KERNEXEC
31502+ trampoline_header->start -= LOAD_PHYSICAL_ADDR;
31503+#endif
31504+
31505+ trampoline_header->boot_cs = __BOOT_CS;
31506 trampoline_header->gdt_limit = __BOOT_DS + 7;
31507 trampoline_header->gdt_base = __pa(boot_gdt);
31508 #else
31509diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
31510index 8869287..d577672 100644
31511--- a/arch/x86/realmode/rm/Makefile
31512+++ b/arch/x86/realmode/rm/Makefile
31513@@ -78,5 +78,8 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ -D_WAKEUP \
31514 $(call cc-option, -fno-unit-at-a-time)) \
31515 $(call cc-option, -fno-stack-protector) \
31516 $(call cc-option, -mpreferred-stack-boundary=2)
31517+ifdef CONSTIFY_PLUGIN
31518+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
31519+endif
31520 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
31521 GCOV_PROFILE := n
31522diff --git a/arch/x86/realmode/rm/header.S b/arch/x86/realmode/rm/header.S
31523index a28221d..93c40f1 100644
31524--- a/arch/x86/realmode/rm/header.S
31525+++ b/arch/x86/realmode/rm/header.S
31526@@ -30,7 +30,9 @@ GLOBAL(real_mode_header)
31527 #endif
31528 /* APM/BIOS reboot */
31529 .long pa_machine_real_restart_asm
31530-#ifdef CONFIG_X86_64
31531+#ifdef CONFIG_X86_32
31532+ .long __KERNEL_CS
31533+#else
31534 .long __KERNEL32_CS
31535 #endif
31536 END(real_mode_header)
31537diff --git a/arch/x86/realmode/rm/trampoline_32.S b/arch/x86/realmode/rm/trampoline_32.S
31538index c1b2791..f9e31c7 100644
31539--- a/arch/x86/realmode/rm/trampoline_32.S
31540+++ b/arch/x86/realmode/rm/trampoline_32.S
31541@@ -25,6 +25,12 @@
31542 #include <asm/page_types.h>
31543 #include "realmode.h"
31544
31545+#ifdef CONFIG_PAX_KERNEXEC
31546+#define ta(X) (X)
31547+#else
31548+#define ta(X) (pa_ ## X)
31549+#endif
31550+
31551 .text
31552 .code16
31553
31554@@ -39,8 +45,6 @@ ENTRY(trampoline_start)
31555
31556 cli # We should be safe anyway
31557
31558- movl tr_start, %eax # where we need to go
31559-
31560 movl $0xA5A5A5A5, trampoline_status
31561 # write marker for master knows we're running
31562
31563@@ -56,7 +60,7 @@ ENTRY(trampoline_start)
31564 movw $1, %dx # protected mode (PE) bit
31565 lmsw %dx # into protected mode
31566
31567- ljmpl $__BOOT_CS, $pa_startup_32
31568+ ljmpl *(trampoline_header)
31569
31570 .section ".text32","ax"
31571 .code32
31572@@ -67,7 +71,7 @@ ENTRY(startup_32) # note: also used from wakeup_asm.S
31573 .balign 8
31574 GLOBAL(trampoline_header)
31575 tr_start: .space 4
31576- tr_gdt_pad: .space 2
31577+ tr_boot_cs: .space 2
31578 tr_gdt: .space 6
31579 END(trampoline_header)
31580
31581diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S
31582index bb360dc..3e5945f 100644
31583--- a/arch/x86/realmode/rm/trampoline_64.S
31584+++ b/arch/x86/realmode/rm/trampoline_64.S
31585@@ -107,7 +107,7 @@ ENTRY(startup_32)
31586 wrmsr
31587
31588 # Enable paging and in turn activate Long Mode
31589- movl $(X86_CR0_PG | X86_CR0_WP | X86_CR0_PE), %eax
31590+ movl $(X86_CR0_PG | X86_CR0_PE), %eax
31591 movl %eax, %cr0
31592
31593 /*
31594diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
31595index 79d67bd..c7e1b90 100644
31596--- a/arch/x86/tools/relocs.c
31597+++ b/arch/x86/tools/relocs.c
31598@@ -12,10 +12,13 @@
31599 #include <regex.h>
31600 #include <tools/le_byteshift.h>
31601
31602+#include "../../../include/generated/autoconf.h"
31603+
31604 static void die(char *fmt, ...);
31605
31606 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
31607 static Elf32_Ehdr ehdr;
31608+static Elf32_Phdr *phdr;
31609 static unsigned long reloc_count, reloc_idx;
31610 static unsigned long *relocs;
31611 static unsigned long reloc16_count, reloc16_idx;
31612@@ -330,9 +333,39 @@ static void read_ehdr(FILE *fp)
31613 }
31614 }
31615
31616+static void read_phdrs(FILE *fp)
31617+{
31618+ unsigned int i;
31619+
31620+ phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
31621+ if (!phdr) {
31622+ die("Unable to allocate %d program headers\n",
31623+ ehdr.e_phnum);
31624+ }
31625+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
31626+ die("Seek to %d failed: %s\n",
31627+ ehdr.e_phoff, strerror(errno));
31628+ }
31629+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
31630+ die("Cannot read ELF program headers: %s\n",
31631+ strerror(errno));
31632+ }
31633+ for(i = 0; i < ehdr.e_phnum; i++) {
31634+ phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
31635+ phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
31636+ phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
31637+ phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
31638+ phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
31639+ phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
31640+ phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
31641+ phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
31642+ }
31643+
31644+}
31645+
31646 static void read_shdrs(FILE *fp)
31647 {
31648- int i;
31649+ unsigned int i;
31650 Elf32_Shdr shdr;
31651
31652 secs = calloc(ehdr.e_shnum, sizeof(struct section));
31653@@ -367,7 +400,7 @@ static void read_shdrs(FILE *fp)
31654
31655 static void read_strtabs(FILE *fp)
31656 {
31657- int i;
31658+ unsigned int i;
31659 for (i = 0; i < ehdr.e_shnum; i++) {
31660 struct section *sec = &secs[i];
31661 if (sec->shdr.sh_type != SHT_STRTAB) {
31662@@ -392,7 +425,7 @@ static void read_strtabs(FILE *fp)
31663
31664 static void read_symtabs(FILE *fp)
31665 {
31666- int i,j;
31667+ unsigned int i,j;
31668 for (i = 0; i < ehdr.e_shnum; i++) {
31669 struct section *sec = &secs[i];
31670 if (sec->shdr.sh_type != SHT_SYMTAB) {
31671@@ -423,9 +456,11 @@ static void read_symtabs(FILE *fp)
31672 }
31673
31674
31675-static void read_relocs(FILE *fp)
31676+static void read_relocs(FILE *fp, int use_real_mode)
31677 {
31678- int i,j;
31679+ unsigned int i,j;
31680+ uint32_t base;
31681+
31682 for (i = 0; i < ehdr.e_shnum; i++) {
31683 struct section *sec = &secs[i];
31684 if (sec->shdr.sh_type != SHT_REL) {
31685@@ -445,9 +480,22 @@ static void read_relocs(FILE *fp)
31686 die("Cannot read symbol table: %s\n",
31687 strerror(errno));
31688 }
31689+ base = 0;
31690+
31691+#ifdef CONFIG_X86_32
31692+ for (j = 0; !use_real_mode && j < ehdr.e_phnum; j++) {
31693+ if (phdr[j].p_type != PT_LOAD )
31694+ continue;
31695+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
31696+ continue;
31697+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
31698+ break;
31699+ }
31700+#endif
31701+
31702 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
31703 Elf32_Rel *rel = &sec->reltab[j];
31704- rel->r_offset = elf32_to_cpu(rel->r_offset);
31705+ rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
31706 rel->r_info = elf32_to_cpu(rel->r_info);
31707 }
31708 }
31709@@ -456,13 +504,13 @@ static void read_relocs(FILE *fp)
31710
31711 static void print_absolute_symbols(void)
31712 {
31713- int i;
31714+ unsigned int i;
31715 printf("Absolute symbols\n");
31716 printf(" Num: Value Size Type Bind Visibility Name\n");
31717 for (i = 0; i < ehdr.e_shnum; i++) {
31718 struct section *sec = &secs[i];
31719 char *sym_strtab;
31720- int j;
31721+ unsigned int j;
31722
31723 if (sec->shdr.sh_type != SHT_SYMTAB) {
31724 continue;
31725@@ -489,14 +537,14 @@ static void print_absolute_symbols(void)
31726
31727 static void print_absolute_relocs(void)
31728 {
31729- int i, printed = 0;
31730+ unsigned int i, printed = 0;
31731
31732 for (i = 0; i < ehdr.e_shnum; i++) {
31733 struct section *sec = &secs[i];
31734 struct section *sec_applies, *sec_symtab;
31735 char *sym_strtab;
31736 Elf32_Sym *sh_symtab;
31737- int j;
31738+ unsigned int j;
31739 if (sec->shdr.sh_type != SHT_REL) {
31740 continue;
31741 }
31742@@ -558,13 +606,13 @@ static void print_absolute_relocs(void)
31743 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym),
31744 int use_real_mode)
31745 {
31746- int i;
31747+ unsigned int i;
31748 /* Walk through the relocations */
31749 for (i = 0; i < ehdr.e_shnum; i++) {
31750 char *sym_strtab;
31751 Elf32_Sym *sh_symtab;
31752 struct section *sec_applies, *sec_symtab;
31753- int j;
31754+ unsigned int j;
31755 struct section *sec = &secs[i];
31756
31757 if (sec->shdr.sh_type != SHT_REL) {
31758@@ -588,6 +636,24 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym),
31759 sym = &sh_symtab[ELF32_R_SYM(rel->r_info)];
31760 r_type = ELF32_R_TYPE(rel->r_info);
31761
31762+ if (!use_real_mode) {
31763+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
31764+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
31765+ continue;
31766+
31767+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
31768+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
31769+ if (!strcmp(sec_name(sym->st_shndx), ".text.end") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
31770+ continue;
31771+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
31772+ continue;
31773+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
31774+ continue;
31775+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
31776+ continue;
31777+#endif
31778+ }
31779+
31780 shn_abs = sym->st_shndx == SHN_ABS;
31781
31782 switch (r_type) {
31783@@ -681,7 +747,7 @@ static int write32(unsigned int v, FILE *f)
31784
31785 static void emit_relocs(int as_text, int use_real_mode)
31786 {
31787- int i;
31788+ unsigned int i;
31789 /* Count how many relocations I have and allocate space for them. */
31790 reloc_count = 0;
31791 walk_relocs(count_reloc, use_real_mode);
31792@@ -808,10 +874,11 @@ int main(int argc, char **argv)
31793 fname, strerror(errno));
31794 }
31795 read_ehdr(fp);
31796+ read_phdrs(fp);
31797 read_shdrs(fp);
31798 read_strtabs(fp);
31799 read_symtabs(fp);
31800- read_relocs(fp);
31801+ read_relocs(fp, use_real_mode);
31802 if (show_absolute_syms) {
31803 print_absolute_symbols();
31804 goto out;
31805diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
31806index fd14be1..e3c79c0 100644
31807--- a/arch/x86/vdso/Makefile
31808+++ b/arch/x86/vdso/Makefile
31809@@ -181,7 +181,7 @@ quiet_cmd_vdso = VDSO $@
31810 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
31811 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
31812
31813-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
31814+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
31815 GCOV_PROFILE := n
31816
31817 #
31818diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
31819index 0faad64..39ef157 100644
31820--- a/arch/x86/vdso/vdso32-setup.c
31821+++ b/arch/x86/vdso/vdso32-setup.c
31822@@ -25,6 +25,7 @@
31823 #include <asm/tlbflush.h>
31824 #include <asm/vdso.h>
31825 #include <asm/proto.h>
31826+#include <asm/mman.h>
31827
31828 enum {
31829 VDSO_DISABLED = 0,
31830@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
31831 void enable_sep_cpu(void)
31832 {
31833 int cpu = get_cpu();
31834- struct tss_struct *tss = &per_cpu(init_tss, cpu);
31835+ struct tss_struct *tss = init_tss + cpu;
31836
31837 if (!boot_cpu_has(X86_FEATURE_SEP)) {
31838 put_cpu();
31839@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
31840 gate_vma.vm_start = FIXADDR_USER_START;
31841 gate_vma.vm_end = FIXADDR_USER_END;
31842 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
31843- gate_vma.vm_page_prot = __P101;
31844+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
31845
31846 return 0;
31847 }
31848@@ -330,14 +331,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
31849 if (compat)
31850 addr = VDSO_HIGH_BASE;
31851 else {
31852- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
31853+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
31854 if (IS_ERR_VALUE(addr)) {
31855 ret = addr;
31856 goto up_fail;
31857 }
31858 }
31859
31860- current->mm->context.vdso = (void *)addr;
31861+ current->mm->context.vdso = addr;
31862
31863 if (compat_uses_vma || !compat) {
31864 /*
31865@@ -353,11 +354,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
31866 }
31867
31868 current_thread_info()->sysenter_return =
31869- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
31870+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
31871
31872 up_fail:
31873 if (ret)
31874- current->mm->context.vdso = NULL;
31875+ current->mm->context.vdso = 0;
31876
31877 up_write(&mm->mmap_sem);
31878
31879@@ -404,8 +405,14 @@ __initcall(ia32_binfmt_init);
31880
31881 const char *arch_vma_name(struct vm_area_struct *vma)
31882 {
31883- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
31884+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
31885 return "[vdso]";
31886+
31887+#ifdef CONFIG_PAX_SEGMEXEC
31888+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
31889+ return "[vdso]";
31890+#endif
31891+
31892 return NULL;
31893 }
31894
31895@@ -415,7 +422,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
31896 * Check to see if the corresponding task was created in compat vdso
31897 * mode.
31898 */
31899- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
31900+ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
31901 return &gate_vma;
31902 return NULL;
31903 }
31904diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
31905index 431e875..cbb23f3 100644
31906--- a/arch/x86/vdso/vma.c
31907+++ b/arch/x86/vdso/vma.c
31908@@ -16,8 +16,6 @@
31909 #include <asm/vdso.h>
31910 #include <asm/page.h>
31911
31912-unsigned int __read_mostly vdso_enabled = 1;
31913-
31914 extern char vdso_start[], vdso_end[];
31915 extern unsigned short vdso_sync_cpuid;
31916
31917@@ -141,7 +139,6 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
31918 * unaligned here as a result of stack start randomization.
31919 */
31920 addr = PAGE_ALIGN(addr);
31921- addr = align_vdso_addr(addr);
31922
31923 return addr;
31924 }
31925@@ -154,30 +151,31 @@ static int setup_additional_pages(struct linux_binprm *bprm,
31926 unsigned size)
31927 {
31928 struct mm_struct *mm = current->mm;
31929- unsigned long addr;
31930+ unsigned long addr = 0;
31931 int ret;
31932
31933- if (!vdso_enabled)
31934- return 0;
31935-
31936 down_write(&mm->mmap_sem);
31937+
31938+#ifdef CONFIG_PAX_RANDMMAP
31939+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
31940+#endif
31941+
31942 addr = vdso_addr(mm->start_stack, size);
31943+ addr = align_vdso_addr(addr);
31944 addr = get_unmapped_area(NULL, addr, size, 0, 0);
31945 if (IS_ERR_VALUE(addr)) {
31946 ret = addr;
31947 goto up_fail;
31948 }
31949
31950- current->mm->context.vdso = (void *)addr;
31951+ mm->context.vdso = addr;
31952
31953 ret = install_special_mapping(mm, addr, size,
31954 VM_READ|VM_EXEC|
31955 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
31956 pages);
31957- if (ret) {
31958- current->mm->context.vdso = NULL;
31959- goto up_fail;
31960- }
31961+ if (ret)
31962+ mm->context.vdso = 0;
31963
31964 up_fail:
31965 up_write(&mm->mmap_sem);
31966@@ -197,10 +195,3 @@ int x32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
31967 vdsox32_size);
31968 }
31969 #endif
31970-
31971-static __init int vdso_setup(char *s)
31972-{
31973- vdso_enabled = simple_strtoul(s, NULL, 0);
31974- return 0;
31975-}
31976-__setup("vdso=", vdso_setup);
31977diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
31978index 2262003..3ee61cf 100644
31979--- a/arch/x86/xen/enlighten.c
31980+++ b/arch/x86/xen/enlighten.c
31981@@ -100,8 +100,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
31982
31983 struct shared_info xen_dummy_shared_info;
31984
31985-void *xen_initial_gdt;
31986-
31987 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
31988 __read_mostly int xen_have_vector_callback;
31989 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
31990@@ -496,8 +494,7 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
31991 {
31992 unsigned long va = dtr->address;
31993 unsigned int size = dtr->size + 1;
31994- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
31995- unsigned long frames[pages];
31996+ unsigned long frames[65536 / PAGE_SIZE];
31997 int f;
31998
31999 /*
32000@@ -545,8 +542,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
32001 {
32002 unsigned long va = dtr->address;
32003 unsigned int size = dtr->size + 1;
32004- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
32005- unsigned long frames[pages];
32006+ unsigned long frames[(GDT_SIZE + PAGE_SIZE - 1) / PAGE_SIZE];
32007 int f;
32008
32009 /*
32010@@ -554,7 +550,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
32011 * 8-byte entries, or 16 4k pages..
32012 */
32013
32014- BUG_ON(size > 65536);
32015+ BUG_ON(size > GDT_SIZE);
32016 BUG_ON(va & ~PAGE_MASK);
32017
32018 for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
32019@@ -939,7 +935,7 @@ static u32 xen_safe_apic_wait_icr_idle(void)
32020 return 0;
32021 }
32022
32023-static void set_xen_basic_apic_ops(void)
32024+static void __init set_xen_basic_apic_ops(void)
32025 {
32026 apic->read = xen_apic_read;
32027 apic->write = xen_apic_write;
32028@@ -1245,30 +1241,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
32029 #endif
32030 };
32031
32032-static void xen_reboot(int reason)
32033+static __noreturn void xen_reboot(int reason)
32034 {
32035 struct sched_shutdown r = { .reason = reason };
32036
32037- if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
32038- BUG();
32039+ HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
32040+ BUG();
32041 }
32042
32043-static void xen_restart(char *msg)
32044+static __noreturn void xen_restart(char *msg)
32045 {
32046 xen_reboot(SHUTDOWN_reboot);
32047 }
32048
32049-static void xen_emergency_restart(void)
32050+static __noreturn void xen_emergency_restart(void)
32051 {
32052 xen_reboot(SHUTDOWN_reboot);
32053 }
32054
32055-static void xen_machine_halt(void)
32056+static __noreturn void xen_machine_halt(void)
32057 {
32058 xen_reboot(SHUTDOWN_poweroff);
32059 }
32060
32061-static void xen_machine_power_off(void)
32062+static __noreturn void xen_machine_power_off(void)
32063 {
32064 if (pm_power_off)
32065 pm_power_off();
32066@@ -1370,7 +1366,17 @@ asmlinkage void __init xen_start_kernel(void)
32067 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
32068
32069 /* Work out if we support NX */
32070- x86_configure_nx();
32071+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
32072+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
32073+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
32074+ unsigned l, h;
32075+
32076+ __supported_pte_mask |= _PAGE_NX;
32077+ rdmsr(MSR_EFER, l, h);
32078+ l |= EFER_NX;
32079+ wrmsr(MSR_EFER, l, h);
32080+ }
32081+#endif
32082
32083 xen_setup_features();
32084
32085@@ -1401,13 +1407,6 @@ asmlinkage void __init xen_start_kernel(void)
32086
32087 machine_ops = xen_machine_ops;
32088
32089- /*
32090- * The only reliable way to retain the initial address of the
32091- * percpu gdt_page is to remember it here, so we can go and
32092- * mark it RW later, when the initial percpu area is freed.
32093- */
32094- xen_initial_gdt = &per_cpu(gdt_page, 0);
32095-
32096 xen_smp_init();
32097
32098 #ifdef CONFIG_ACPI_NUMA
32099@@ -1598,7 +1597,7 @@ static int __cpuinit xen_hvm_cpu_notify(struct notifier_block *self,
32100 return NOTIFY_OK;
32101 }
32102
32103-static struct notifier_block xen_hvm_cpu_notifier __cpuinitdata = {
32104+static struct notifier_block xen_hvm_cpu_notifier = {
32105 .notifier_call = xen_hvm_cpu_notify,
32106 };
32107
32108diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
32109index cab96b6..8c629ba 100644
32110--- a/arch/x86/xen/mmu.c
32111+++ b/arch/x86/xen/mmu.c
32112@@ -1739,14 +1739,18 @@ static void *m2v(phys_addr_t maddr)
32113 }
32114
32115 /* Set the page permissions on an identity-mapped pages */
32116-static void set_page_prot(void *addr, pgprot_t prot)
32117+static void set_page_prot_flags(void *addr, pgprot_t prot, unsigned long flags)
32118 {
32119 unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
32120 pte_t pte = pfn_pte(pfn, prot);
32121
32122- if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, 0))
32123+ if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, flags))
32124 BUG();
32125 }
32126+static void set_page_prot(void *addr, pgprot_t prot)
32127+{
32128+ return set_page_prot_flags(addr, prot, UVMF_NONE);
32129+}
32130 #ifdef CONFIG_X86_32
32131 static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
32132 {
32133@@ -1830,12 +1834,12 @@ static void __init check_pt_base(unsigned long *pt_base, unsigned long *pt_end,
32134 unsigned long addr)
32135 {
32136 if (*pt_base == PFN_DOWN(__pa(addr))) {
32137- set_page_prot((void *)addr, PAGE_KERNEL);
32138+ set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG);
32139 clear_page((void *)addr);
32140 (*pt_base)++;
32141 }
32142 if (*pt_end == PFN_DOWN(__pa(addr))) {
32143- set_page_prot((void *)addr, PAGE_KERNEL);
32144+ set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG);
32145 clear_page((void *)addr);
32146 (*pt_end)--;
32147 }
32148@@ -1881,6 +1885,9 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
32149 /* L3_k[510] -> level2_kernel_pgt
32150 * L3_i[511] -> level2_fixmap_pgt */
32151 convert_pfn_mfn(level3_kernel_pgt);
32152+ convert_pfn_mfn(level3_vmalloc_start_pgt);
32153+ convert_pfn_mfn(level3_vmalloc_end_pgt);
32154+ convert_pfn_mfn(level3_vmemmap_pgt);
32155
32156 /* We get [511][511] and have Xen's version of level2_kernel_pgt */
32157 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
32158@@ -1910,8 +1917,12 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
32159 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
32160 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
32161 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
32162+ set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
32163+ set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
32164+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
32165 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
32166 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
32167+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
32168 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
32169 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
32170
32171@@ -2097,6 +2108,7 @@ static void __init xen_post_allocator_init(void)
32172 pv_mmu_ops.set_pud = xen_set_pud;
32173 #if PAGETABLE_LEVELS == 4
32174 pv_mmu_ops.set_pgd = xen_set_pgd;
32175+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
32176 #endif
32177
32178 /* This will work as long as patching hasn't happened yet
32179@@ -2178,6 +2190,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
32180 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
32181 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
32182 .set_pgd = xen_set_pgd_hyper,
32183+ .set_pgd_batched = xen_set_pgd_hyper,
32184
32185 .alloc_pud = xen_alloc_pmd_init,
32186 .release_pud = xen_release_pmd_init,
32187diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
32188index 34bc4ce..c34aa24 100644
32189--- a/arch/x86/xen/smp.c
32190+++ b/arch/x86/xen/smp.c
32191@@ -229,11 +229,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
32192 {
32193 BUG_ON(smp_processor_id() != 0);
32194 native_smp_prepare_boot_cpu();
32195-
32196- /* We've switched to the "real" per-cpu gdt, so make sure the
32197- old memory can be recycled */
32198- make_lowmem_page_readwrite(xen_initial_gdt);
32199-
32200 xen_filter_cpu_maps();
32201 xen_setup_vcpu_info_placement();
32202 }
32203@@ -300,12 +295,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
32204 gdt = get_cpu_gdt_table(cpu);
32205
32206 ctxt->flags = VGCF_IN_KERNEL;
32207- ctxt->user_regs.ds = __USER_DS;
32208- ctxt->user_regs.es = __USER_DS;
32209+ ctxt->user_regs.ds = __KERNEL_DS;
32210+ ctxt->user_regs.es = __KERNEL_DS;
32211 ctxt->user_regs.ss = __KERNEL_DS;
32212 #ifdef CONFIG_X86_32
32213 ctxt->user_regs.fs = __KERNEL_PERCPU;
32214- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
32215+ savesegment(gs, ctxt->user_regs.gs);
32216 #else
32217 ctxt->gs_base_kernel = per_cpu_offset(cpu);
32218 #endif
32219@@ -355,13 +350,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu, struct task_struct *idle)
32220 int rc;
32221
32222 per_cpu(current_task, cpu) = idle;
32223+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
32224 #ifdef CONFIG_X86_32
32225 irq_ctx_init(cpu);
32226 #else
32227 clear_tsk_thread_flag(idle, TIF_FORK);
32228- per_cpu(kernel_stack, cpu) =
32229- (unsigned long)task_stack_page(idle) -
32230- KERNEL_STACK_OFFSET + THREAD_SIZE;
32231+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
32232 #endif
32233 xen_setup_runstate_info(cpu);
32234 xen_setup_timer(cpu);
32235@@ -630,7 +624,7 @@ static const struct smp_ops xen_smp_ops __initconst = {
32236
32237 void __init xen_smp_init(void)
32238 {
32239- smp_ops = xen_smp_ops;
32240+ memcpy((void *)&smp_ops, &xen_smp_ops, sizeof smp_ops);
32241 xen_fill_possible_map();
32242 xen_init_spinlocks();
32243 }
32244diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
32245index 33ca6e4..0ded929 100644
32246--- a/arch/x86/xen/xen-asm_32.S
32247+++ b/arch/x86/xen/xen-asm_32.S
32248@@ -84,14 +84,14 @@ ENTRY(xen_iret)
32249 ESP_OFFSET=4 # bytes pushed onto stack
32250
32251 /*
32252- * Store vcpu_info pointer for easy access. Do it this way to
32253- * avoid having to reload %fs
32254+ * Store vcpu_info pointer for easy access.
32255 */
32256 #ifdef CONFIG_SMP
32257- GET_THREAD_INFO(%eax)
32258- movl %ss:TI_cpu(%eax), %eax
32259- movl %ss:__per_cpu_offset(,%eax,4), %eax
32260- mov %ss:xen_vcpu(%eax), %eax
32261+ push %fs
32262+ mov $(__KERNEL_PERCPU), %eax
32263+ mov %eax, %fs
32264+ mov PER_CPU_VAR(xen_vcpu), %eax
32265+ pop %fs
32266 #else
32267 movl %ss:xen_vcpu, %eax
32268 #endif
32269diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
32270index 7faed58..ba4427c 100644
32271--- a/arch/x86/xen/xen-head.S
32272+++ b/arch/x86/xen/xen-head.S
32273@@ -19,6 +19,17 @@ ENTRY(startup_xen)
32274 #ifdef CONFIG_X86_32
32275 mov %esi,xen_start_info
32276 mov $init_thread_union+THREAD_SIZE,%esp
32277+#ifdef CONFIG_SMP
32278+ movl $cpu_gdt_table,%edi
32279+ movl $__per_cpu_load,%eax
32280+ movw %ax,__KERNEL_PERCPU + 2(%edi)
32281+ rorl $16,%eax
32282+ movb %al,__KERNEL_PERCPU + 4(%edi)
32283+ movb %ah,__KERNEL_PERCPU + 7(%edi)
32284+ movl $__per_cpu_end - 1,%eax
32285+ subl $__per_cpu_start,%eax
32286+ movw %ax,__KERNEL_PERCPU + 0(%edi)
32287+#endif
32288 #else
32289 mov %rsi,xen_start_info
32290 mov $init_thread_union+THREAD_SIZE,%rsp
32291diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
32292index a95b417..b6dbd0b 100644
32293--- a/arch/x86/xen/xen-ops.h
32294+++ b/arch/x86/xen/xen-ops.h
32295@@ -10,8 +10,6 @@
32296 extern const char xen_hypervisor_callback[];
32297 extern const char xen_failsafe_callback[];
32298
32299-extern void *xen_initial_gdt;
32300-
32301 struct trap_info;
32302 void xen_copy_trap_info(struct trap_info *traps);
32303
32304diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
32305index 525bd3d..ef888b1 100644
32306--- a/arch/xtensa/variants/dc232b/include/variant/core.h
32307+++ b/arch/xtensa/variants/dc232b/include/variant/core.h
32308@@ -119,9 +119,9 @@
32309 ----------------------------------------------------------------------*/
32310
32311 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
32312-#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
32313 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
32314 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
32315+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
32316
32317 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
32318 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
32319diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
32320index 2f33760..835e50a 100644
32321--- a/arch/xtensa/variants/fsf/include/variant/core.h
32322+++ b/arch/xtensa/variants/fsf/include/variant/core.h
32323@@ -11,6 +11,7 @@
32324 #ifndef _XTENSA_CORE_H
32325 #define _XTENSA_CORE_H
32326
32327+#include <linux/const.h>
32328
32329 /****************************************************************************
32330 Parameters Useful for Any Code, USER or PRIVILEGED
32331@@ -112,9 +113,9 @@
32332 ----------------------------------------------------------------------*/
32333
32334 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
32335-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
32336 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
32337 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
32338+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
32339
32340 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
32341 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
32342diff --git a/arch/xtensa/variants/s6000/include/variant/core.h b/arch/xtensa/variants/s6000/include/variant/core.h
32343index af00795..2bb8105 100644
32344--- a/arch/xtensa/variants/s6000/include/variant/core.h
32345+++ b/arch/xtensa/variants/s6000/include/variant/core.h
32346@@ -11,6 +11,7 @@
32347 #ifndef _XTENSA_CORE_CONFIGURATION_H
32348 #define _XTENSA_CORE_CONFIGURATION_H
32349
32350+#include <linux/const.h>
32351
32352 /****************************************************************************
32353 Parameters Useful for Any Code, USER or PRIVILEGED
32354@@ -118,9 +119,9 @@
32355 ----------------------------------------------------------------------*/
32356
32357 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
32358-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
32359 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
32360 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
32361+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
32362
32363 #define XCHAL_ICACHE_SIZE 32768 /* I-cache size in bytes or 0 */
32364 #define XCHAL_DCACHE_SIZE 32768 /* D-cache size in bytes or 0 */
32365diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
32366index 58916af..eb9dbcf6 100644
32367--- a/block/blk-iopoll.c
32368+++ b/block/blk-iopoll.c
32369@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
32370 }
32371 EXPORT_SYMBOL(blk_iopoll_complete);
32372
32373-static void blk_iopoll_softirq(struct softirq_action *h)
32374+static void blk_iopoll_softirq(void)
32375 {
32376 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
32377 int rearm = 0, budget = blk_iopoll_budget;
32378@@ -209,7 +209,7 @@ static int __cpuinit blk_iopoll_cpu_notify(struct notifier_block *self,
32379 return NOTIFY_OK;
32380 }
32381
32382-static struct notifier_block __cpuinitdata blk_iopoll_cpu_notifier = {
32383+static struct notifier_block blk_iopoll_cpu_notifier = {
32384 .notifier_call = blk_iopoll_cpu_notify,
32385 };
32386
32387diff --git a/block/blk-map.c b/block/blk-map.c
32388index 623e1cd..ca1e109 100644
32389--- a/block/blk-map.c
32390+++ b/block/blk-map.c
32391@@ -302,7 +302,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
32392 if (!len || !kbuf)
32393 return -EINVAL;
32394
32395- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
32396+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
32397 if (do_copy)
32398 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
32399 else
32400diff --git a/block/blk-softirq.c b/block/blk-softirq.c
32401index 467c8de..f3628c5 100644
32402--- a/block/blk-softirq.c
32403+++ b/block/blk-softirq.c
32404@@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
32405 * Softirq action handler - move entries to local list and loop over them
32406 * while passing them to the queue registered handler.
32407 */
32408-static void blk_done_softirq(struct softirq_action *h)
32409+static void blk_done_softirq(void)
32410 {
32411 struct list_head *cpu_list, local_list;
32412
32413@@ -98,7 +98,7 @@ static int __cpuinit blk_cpu_notify(struct notifier_block *self,
32414 return NOTIFY_OK;
32415 }
32416
32417-static struct notifier_block __cpuinitdata blk_cpu_notifier = {
32418+static struct notifier_block blk_cpu_notifier = {
32419 .notifier_call = blk_cpu_notify,
32420 };
32421
32422diff --git a/block/bsg.c b/block/bsg.c
32423index ff64ae3..593560c 100644
32424--- a/block/bsg.c
32425+++ b/block/bsg.c
32426@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
32427 struct sg_io_v4 *hdr, struct bsg_device *bd,
32428 fmode_t has_write_perm)
32429 {
32430+ unsigned char tmpcmd[sizeof(rq->__cmd)];
32431+ unsigned char *cmdptr;
32432+
32433 if (hdr->request_len > BLK_MAX_CDB) {
32434 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
32435 if (!rq->cmd)
32436 return -ENOMEM;
32437- }
32438+ cmdptr = rq->cmd;
32439+ } else
32440+ cmdptr = tmpcmd;
32441
32442- if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
32443+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
32444 hdr->request_len))
32445 return -EFAULT;
32446
32447+ if (cmdptr != rq->cmd)
32448+ memcpy(rq->cmd, cmdptr, hdr->request_len);
32449+
32450 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
32451 if (blk_verify_command(rq->cmd, has_write_perm))
32452 return -EPERM;
32453diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
32454index 7c668c8..db3521c 100644
32455--- a/block/compat_ioctl.c
32456+++ b/block/compat_ioctl.c
32457@@ -340,7 +340,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
32458 err |= __get_user(f->spec1, &uf->spec1);
32459 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
32460 err |= __get_user(name, &uf->name);
32461- f->name = compat_ptr(name);
32462+ f->name = (void __force_kernel *)compat_ptr(name);
32463 if (err) {
32464 err = -EFAULT;
32465 goto out;
32466diff --git a/block/partitions/efi.c b/block/partitions/efi.c
32467index b62fb88..bdab4c4 100644
32468--- a/block/partitions/efi.c
32469+++ b/block/partitions/efi.c
32470@@ -234,14 +234,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
32471 if (!gpt)
32472 return NULL;
32473
32474+ if (!le32_to_cpu(gpt->num_partition_entries))
32475+ return NULL;
32476+ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
32477+ if (!pte)
32478+ return NULL;
32479+
32480 count = le32_to_cpu(gpt->num_partition_entries) *
32481 le32_to_cpu(gpt->sizeof_partition_entry);
32482- if (!count)
32483- return NULL;
32484- pte = kzalloc(count, GFP_KERNEL);
32485- if (!pte)
32486- return NULL;
32487-
32488 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
32489 (u8 *) pte,
32490 count) < count) {
32491diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
32492index 9a87daa..fb17486 100644
32493--- a/block/scsi_ioctl.c
32494+++ b/block/scsi_ioctl.c
32495@@ -223,8 +223,20 @@ EXPORT_SYMBOL(blk_verify_command);
32496 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
32497 struct sg_io_hdr *hdr, fmode_t mode)
32498 {
32499- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
32500+ unsigned char tmpcmd[sizeof(rq->__cmd)];
32501+ unsigned char *cmdptr;
32502+
32503+ if (rq->cmd != rq->__cmd)
32504+ cmdptr = rq->cmd;
32505+ else
32506+ cmdptr = tmpcmd;
32507+
32508+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
32509 return -EFAULT;
32510+
32511+ if (cmdptr != rq->cmd)
32512+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
32513+
32514 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
32515 return -EPERM;
32516
32517@@ -433,6 +445,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
32518 int err;
32519 unsigned int in_len, out_len, bytes, opcode, cmdlen;
32520 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
32521+ unsigned char tmpcmd[sizeof(rq->__cmd)];
32522+ unsigned char *cmdptr;
32523
32524 if (!sic)
32525 return -EINVAL;
32526@@ -466,9 +480,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
32527 */
32528 err = -EFAULT;
32529 rq->cmd_len = cmdlen;
32530- if (copy_from_user(rq->cmd, sic->data, cmdlen))
32531+
32532+ if (rq->cmd != rq->__cmd)
32533+ cmdptr = rq->cmd;
32534+ else
32535+ cmdptr = tmpcmd;
32536+
32537+ if (copy_from_user(cmdptr, sic->data, cmdlen))
32538 goto error;
32539
32540+ if (rq->cmd != cmdptr)
32541+ memcpy(rq->cmd, cmdptr, cmdlen);
32542+
32543 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
32544 goto error;
32545
32546diff --git a/crypto/cryptd.c b/crypto/cryptd.c
32547index 7bdd61b..afec999 100644
32548--- a/crypto/cryptd.c
32549+++ b/crypto/cryptd.c
32550@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
32551
32552 struct cryptd_blkcipher_request_ctx {
32553 crypto_completion_t complete;
32554-};
32555+} __no_const;
32556
32557 struct cryptd_hash_ctx {
32558 struct crypto_shash *child;
32559@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
32560
32561 struct cryptd_aead_request_ctx {
32562 crypto_completion_t complete;
32563-};
32564+} __no_const;
32565
32566 static void cryptd_queue_worker(struct work_struct *work);
32567
32568diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c
32569index f6d9baf..dfd511f 100644
32570--- a/crypto/crypto_user.c
32571+++ b/crypto/crypto_user.c
32572@@ -30,6 +30,8 @@
32573
32574 #include "internal.h"
32575
32576+#define null_terminated(x) (strnlen(x, sizeof(x)) < sizeof(x))
32577+
32578 static DEFINE_MUTEX(crypto_cfg_mutex);
32579
32580 /* The crypto netlink socket */
32581@@ -196,7 +198,10 @@ static int crypto_report(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
32582 struct crypto_dump_info info;
32583 int err;
32584
32585- if (!p->cru_driver_name)
32586+ if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
32587+ return -EINVAL;
32588+
32589+ if (!p->cru_driver_name[0])
32590 return -EINVAL;
32591
32592 alg = crypto_alg_match(p, 1);
32593@@ -260,6 +265,9 @@ static int crypto_update_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
32594 struct nlattr *priority = attrs[CRYPTOCFGA_PRIORITY_VAL];
32595 LIST_HEAD(list);
32596
32597+ if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
32598+ return -EINVAL;
32599+
32600 if (priority && !strlen(p->cru_driver_name))
32601 return -EINVAL;
32602
32603@@ -287,6 +295,9 @@ static int crypto_del_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
32604 struct crypto_alg *alg;
32605 struct crypto_user_alg *p = nlmsg_data(nlh);
32606
32607+ if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
32608+ return -EINVAL;
32609+
32610 alg = crypto_alg_match(p, 1);
32611 if (!alg)
32612 return -ENOENT;
32613@@ -368,6 +379,9 @@ static int crypto_add_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
32614 struct crypto_user_alg *p = nlmsg_data(nlh);
32615 struct nlattr *priority = attrs[CRYPTOCFGA_PRIORITY_VAL];
32616
32617+ if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
32618+ return -EINVAL;
32619+
32620 if (strlen(p->cru_driver_name))
32621 exact = 1;
32622
32623diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
32624index f220d64..d359ad6 100644
32625--- a/drivers/acpi/apei/apei-internal.h
32626+++ b/drivers/acpi/apei/apei-internal.h
32627@@ -20,7 +20,7 @@ typedef int (*apei_exec_ins_func_t)(struct apei_exec_context *ctx,
32628 struct apei_exec_ins_type {
32629 u32 flags;
32630 apei_exec_ins_func_t run;
32631-};
32632+} __do_const;
32633
32634 struct apei_exec_context {
32635 u32 ip;
32636diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
32637index e6defd8..c26a225 100644
32638--- a/drivers/acpi/apei/cper.c
32639+++ b/drivers/acpi/apei/cper.c
32640@@ -38,12 +38,12 @@
32641 */
32642 u64 cper_next_record_id(void)
32643 {
32644- static atomic64_t seq;
32645+ static atomic64_unchecked_t seq;
32646
32647- if (!atomic64_read(&seq))
32648- atomic64_set(&seq, ((u64)get_seconds()) << 32);
32649+ if (!atomic64_read_unchecked(&seq))
32650+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
32651
32652- return atomic64_inc_return(&seq);
32653+ return atomic64_inc_return_unchecked(&seq);
32654 }
32655 EXPORT_SYMBOL_GPL(cper_next_record_id);
32656
32657diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c
32658index be60399..778b33e8 100644
32659--- a/drivers/acpi/bgrt.c
32660+++ b/drivers/acpi/bgrt.c
32661@@ -87,8 +87,10 @@ static int __init bgrt_init(void)
32662 return -ENODEV;
32663
32664 sysfs_bin_attr_init(&image_attr);
32665- image_attr.private = bgrt_image;
32666- image_attr.size = bgrt_image_size;
32667+ pax_open_kernel();
32668+ *(void **)&image_attr.private = bgrt_image;
32669+ *(size_t *)&image_attr.size = bgrt_image_size;
32670+ pax_close_kernel();
32671
32672 bgrt_kobj = kobject_create_and_add("bgrt", acpi_kobj);
32673 if (!bgrt_kobj)
32674diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
32675index cb96296..b81293b 100644
32676--- a/drivers/acpi/blacklist.c
32677+++ b/drivers/acpi/blacklist.c
32678@@ -52,7 +52,7 @@ struct acpi_blacklist_item {
32679 u32 is_critical_error;
32680 };
32681
32682-static struct dmi_system_id acpi_osi_dmi_table[] __initdata;
32683+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst;
32684
32685 /*
32686 * POLICY: If *anything* doesn't work, put it on the blacklist.
32687@@ -193,7 +193,7 @@ static int __init dmi_disable_osi_win7(const struct dmi_system_id *d)
32688 return 0;
32689 }
32690
32691-static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
32692+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst = {
32693 {
32694 .callback = dmi_disable_osi_vista,
32695 .ident = "Fujitsu Siemens",
32696diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
32697index 7586544..636a2f0 100644
32698--- a/drivers/acpi/ec_sys.c
32699+++ b/drivers/acpi/ec_sys.c
32700@@ -12,6 +12,7 @@
32701 #include <linux/acpi.h>
32702 #include <linux/debugfs.h>
32703 #include <linux/module.h>
32704+#include <linux/uaccess.h>
32705 #include "internal.h"
32706
32707 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
32708@@ -34,7 +35,7 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
32709 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
32710 */
32711 unsigned int size = EC_SPACE_SIZE;
32712- u8 *data = (u8 *) buf;
32713+ u8 data;
32714 loff_t init_off = *off;
32715 int err = 0;
32716
32717@@ -47,9 +48,11 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
32718 size = count;
32719
32720 while (size) {
32721- err = ec_read(*off, &data[*off - init_off]);
32722+ err = ec_read(*off, &data);
32723 if (err)
32724 return err;
32725+ if (put_user(data, &buf[*off - init_off]))
32726+ return -EFAULT;
32727 *off += 1;
32728 size--;
32729 }
32730@@ -65,7 +68,6 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
32731
32732 unsigned int size = count;
32733 loff_t init_off = *off;
32734- u8 *data = (u8 *) buf;
32735 int err = 0;
32736
32737 if (*off >= EC_SPACE_SIZE)
32738@@ -76,7 +78,9 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
32739 }
32740
32741 while (size) {
32742- u8 byte_write = data[*off - init_off];
32743+ u8 byte_write;
32744+ if (get_user(byte_write, &buf[*off - init_off]))
32745+ return -EFAULT;
32746 err = ec_write(*off, byte_write);
32747 if (err)
32748 return err;
32749diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
32750index e83311b..142b5cc 100644
32751--- a/drivers/acpi/processor_driver.c
32752+++ b/drivers/acpi/processor_driver.c
32753@@ -558,7 +558,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
32754 return 0;
32755 #endif
32756
32757- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
32758+ BUG_ON(pr->id >= nr_cpu_ids);
32759
32760 /*
32761 * Buggy BIOS check
32762diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
32763index ed9a1cc..f4a354c 100644
32764--- a/drivers/acpi/processor_idle.c
32765+++ b/drivers/acpi/processor_idle.c
32766@@ -1005,7 +1005,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
32767 {
32768 int i, count = CPUIDLE_DRIVER_STATE_START;
32769 struct acpi_processor_cx *cx;
32770- struct cpuidle_state *state;
32771+ cpuidle_state_no_const *state;
32772 struct cpuidle_driver *drv = &acpi_idle_driver;
32773
32774 if (!pr->flags.power_setup_done)
32775diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
32776index ea61ca9..3fdd70d 100644
32777--- a/drivers/acpi/sysfs.c
32778+++ b/drivers/acpi/sysfs.c
32779@@ -420,11 +420,11 @@ static u32 num_counters;
32780 static struct attribute **all_attrs;
32781 static u32 acpi_gpe_count;
32782
32783-static struct attribute_group interrupt_stats_attr_group = {
32784+static attribute_group_no_const interrupt_stats_attr_group = {
32785 .name = "interrupts",
32786 };
32787
32788-static struct kobj_attribute *counter_attrs;
32789+static kobj_attribute_no_const *counter_attrs;
32790
32791 static void delete_gpe_attr_array(void)
32792 {
32793diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
32794index 6cd7805..07facb3 100644
32795--- a/drivers/ata/libahci.c
32796+++ b/drivers/ata/libahci.c
32797@@ -1230,7 +1230,7 @@ int ahci_kick_engine(struct ata_port *ap)
32798 }
32799 EXPORT_SYMBOL_GPL(ahci_kick_engine);
32800
32801-static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
32802+static int __intentional_overflow(-1) ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
32803 struct ata_taskfile *tf, int is_cmd, u16 flags,
32804 unsigned long timeout_msec)
32805 {
32806diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
32807index 501c209..5f28b4d 100644
32808--- a/drivers/ata/libata-core.c
32809+++ b/drivers/ata/libata-core.c
32810@@ -4784,7 +4784,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
32811 struct ata_port *ap;
32812 unsigned int tag;
32813
32814- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
32815+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
32816 ap = qc->ap;
32817
32818 qc->flags = 0;
32819@@ -4800,7 +4800,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
32820 struct ata_port *ap;
32821 struct ata_link *link;
32822
32823- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
32824+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
32825 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
32826 ap = qc->ap;
32827 link = qc->dev->link;
32828@@ -5896,6 +5896,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
32829 return;
32830
32831 spin_lock(&lock);
32832+ pax_open_kernel();
32833
32834 for (cur = ops->inherits; cur; cur = cur->inherits) {
32835 void **inherit = (void **)cur;
32836@@ -5909,8 +5910,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
32837 if (IS_ERR(*pp))
32838 *pp = NULL;
32839
32840- ops->inherits = NULL;
32841+ *(struct ata_port_operations **)&ops->inherits = NULL;
32842
32843+ pax_close_kernel();
32844 spin_unlock(&lock);
32845 }
32846
32847diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
32848index 405022d..fb70e53 100644
32849--- a/drivers/ata/pata_arasan_cf.c
32850+++ b/drivers/ata/pata_arasan_cf.c
32851@@ -864,7 +864,9 @@ static int arasan_cf_probe(struct platform_device *pdev)
32852 /* Handle platform specific quirks */
32853 if (pdata->quirk) {
32854 if (pdata->quirk & CF_BROKEN_PIO) {
32855- ap->ops->set_piomode = NULL;
32856+ pax_open_kernel();
32857+ *(void **)&ap->ops->set_piomode = NULL;
32858+ pax_close_kernel();
32859 ap->pio_mask = 0;
32860 }
32861 if (pdata->quirk & CF_BROKEN_MWDMA)
32862diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
32863index f9b983a..887b9d8 100644
32864--- a/drivers/atm/adummy.c
32865+++ b/drivers/atm/adummy.c
32866@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
32867 vcc->pop(vcc, skb);
32868 else
32869 dev_kfree_skb_any(skb);
32870- atomic_inc(&vcc->stats->tx);
32871+ atomic_inc_unchecked(&vcc->stats->tx);
32872
32873 return 0;
32874 }
32875diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
32876index 77a7480..05cde58 100644
32877--- a/drivers/atm/ambassador.c
32878+++ b/drivers/atm/ambassador.c
32879@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
32880 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
32881
32882 // VC layer stats
32883- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
32884+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
32885
32886 // free the descriptor
32887 kfree (tx_descr);
32888@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
32889 dump_skb ("<<<", vc, skb);
32890
32891 // VC layer stats
32892- atomic_inc(&atm_vcc->stats->rx);
32893+ atomic_inc_unchecked(&atm_vcc->stats->rx);
32894 __net_timestamp(skb);
32895 // end of our responsibility
32896 atm_vcc->push (atm_vcc, skb);
32897@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
32898 } else {
32899 PRINTK (KERN_INFO, "dropped over-size frame");
32900 // should we count this?
32901- atomic_inc(&atm_vcc->stats->rx_drop);
32902+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
32903 }
32904
32905 } else {
32906@@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
32907 }
32908
32909 if (check_area (skb->data, skb->len)) {
32910- atomic_inc(&atm_vcc->stats->tx_err);
32911+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
32912 return -ENOMEM; // ?
32913 }
32914
32915diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
32916index b22d71c..d6e1049 100644
32917--- a/drivers/atm/atmtcp.c
32918+++ b/drivers/atm/atmtcp.c
32919@@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
32920 if (vcc->pop) vcc->pop(vcc,skb);
32921 else dev_kfree_skb(skb);
32922 if (dev_data) return 0;
32923- atomic_inc(&vcc->stats->tx_err);
32924+ atomic_inc_unchecked(&vcc->stats->tx_err);
32925 return -ENOLINK;
32926 }
32927 size = skb->len+sizeof(struct atmtcp_hdr);
32928@@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
32929 if (!new_skb) {
32930 if (vcc->pop) vcc->pop(vcc,skb);
32931 else dev_kfree_skb(skb);
32932- atomic_inc(&vcc->stats->tx_err);
32933+ atomic_inc_unchecked(&vcc->stats->tx_err);
32934 return -ENOBUFS;
32935 }
32936 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
32937@@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
32938 if (vcc->pop) vcc->pop(vcc,skb);
32939 else dev_kfree_skb(skb);
32940 out_vcc->push(out_vcc,new_skb);
32941- atomic_inc(&vcc->stats->tx);
32942- atomic_inc(&out_vcc->stats->rx);
32943+ atomic_inc_unchecked(&vcc->stats->tx);
32944+ atomic_inc_unchecked(&out_vcc->stats->rx);
32945 return 0;
32946 }
32947
32948@@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
32949 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
32950 read_unlock(&vcc_sklist_lock);
32951 if (!out_vcc) {
32952- atomic_inc(&vcc->stats->tx_err);
32953+ atomic_inc_unchecked(&vcc->stats->tx_err);
32954 goto done;
32955 }
32956 skb_pull(skb,sizeof(struct atmtcp_hdr));
32957@@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
32958 __net_timestamp(new_skb);
32959 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
32960 out_vcc->push(out_vcc,new_skb);
32961- atomic_inc(&vcc->stats->tx);
32962- atomic_inc(&out_vcc->stats->rx);
32963+ atomic_inc_unchecked(&vcc->stats->tx);
32964+ atomic_inc_unchecked(&out_vcc->stats->rx);
32965 done:
32966 if (vcc->pop) vcc->pop(vcc,skb);
32967 else dev_kfree_skb(skb);
32968diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
32969index c1eb6fa..4c71be9 100644
32970--- a/drivers/atm/eni.c
32971+++ b/drivers/atm/eni.c
32972@@ -522,7 +522,7 @@ static int rx_aal0(struct atm_vcc *vcc)
32973 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
32974 vcc->dev->number);
32975 length = 0;
32976- atomic_inc(&vcc->stats->rx_err);
32977+ atomic_inc_unchecked(&vcc->stats->rx_err);
32978 }
32979 else {
32980 length = ATM_CELL_SIZE-1; /* no HEC */
32981@@ -577,7 +577,7 @@ static int rx_aal5(struct atm_vcc *vcc)
32982 size);
32983 }
32984 eff = length = 0;
32985- atomic_inc(&vcc->stats->rx_err);
32986+ atomic_inc_unchecked(&vcc->stats->rx_err);
32987 }
32988 else {
32989 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
32990@@ -594,7 +594,7 @@ static int rx_aal5(struct atm_vcc *vcc)
32991 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
32992 vcc->dev->number,vcc->vci,length,size << 2,descr);
32993 length = eff = 0;
32994- atomic_inc(&vcc->stats->rx_err);
32995+ atomic_inc_unchecked(&vcc->stats->rx_err);
32996 }
32997 }
32998 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
32999@@ -767,7 +767,7 @@ rx_dequeued++;
33000 vcc->push(vcc,skb);
33001 pushed++;
33002 }
33003- atomic_inc(&vcc->stats->rx);
33004+ atomic_inc_unchecked(&vcc->stats->rx);
33005 }
33006 wake_up(&eni_dev->rx_wait);
33007 }
33008@@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *dev)
33009 PCI_DMA_TODEVICE);
33010 if (vcc->pop) vcc->pop(vcc,skb);
33011 else dev_kfree_skb_irq(skb);
33012- atomic_inc(&vcc->stats->tx);
33013+ atomic_inc_unchecked(&vcc->stats->tx);
33014 wake_up(&eni_dev->tx_wait);
33015 dma_complete++;
33016 }
33017diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
33018index b41c948..a002b17 100644
33019--- a/drivers/atm/firestream.c
33020+++ b/drivers/atm/firestream.c
33021@@ -749,7 +749,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
33022 }
33023 }
33024
33025- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
33026+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
33027
33028 fs_dprintk (FS_DEBUG_TXMEM, "i");
33029 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
33030@@ -816,7 +816,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
33031 #endif
33032 skb_put (skb, qe->p1 & 0xffff);
33033 ATM_SKB(skb)->vcc = atm_vcc;
33034- atomic_inc(&atm_vcc->stats->rx);
33035+ atomic_inc_unchecked(&atm_vcc->stats->rx);
33036 __net_timestamp(skb);
33037 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
33038 atm_vcc->push (atm_vcc, skb);
33039@@ -837,12 +837,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
33040 kfree (pe);
33041 }
33042 if (atm_vcc)
33043- atomic_inc(&atm_vcc->stats->rx_drop);
33044+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
33045 break;
33046 case 0x1f: /* Reassembly abort: no buffers. */
33047 /* Silently increment error counter. */
33048 if (atm_vcc)
33049- atomic_inc(&atm_vcc->stats->rx_drop);
33050+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
33051 break;
33052 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
33053 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
33054diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
33055index 204814e..cede831 100644
33056--- a/drivers/atm/fore200e.c
33057+++ b/drivers/atm/fore200e.c
33058@@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
33059 #endif
33060 /* check error condition */
33061 if (*entry->status & STATUS_ERROR)
33062- atomic_inc(&vcc->stats->tx_err);
33063+ atomic_inc_unchecked(&vcc->stats->tx_err);
33064 else
33065- atomic_inc(&vcc->stats->tx);
33066+ atomic_inc_unchecked(&vcc->stats->tx);
33067 }
33068 }
33069
33070@@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
33071 if (skb == NULL) {
33072 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
33073
33074- atomic_inc(&vcc->stats->rx_drop);
33075+ atomic_inc_unchecked(&vcc->stats->rx_drop);
33076 return -ENOMEM;
33077 }
33078
33079@@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
33080
33081 dev_kfree_skb_any(skb);
33082
33083- atomic_inc(&vcc->stats->rx_drop);
33084+ atomic_inc_unchecked(&vcc->stats->rx_drop);
33085 return -ENOMEM;
33086 }
33087
33088 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
33089
33090 vcc->push(vcc, skb);
33091- atomic_inc(&vcc->stats->rx);
33092+ atomic_inc_unchecked(&vcc->stats->rx);
33093
33094 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
33095
33096@@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
33097 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
33098 fore200e->atm_dev->number,
33099 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
33100- atomic_inc(&vcc->stats->rx_err);
33101+ atomic_inc_unchecked(&vcc->stats->rx_err);
33102 }
33103 }
33104
33105@@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
33106 goto retry_here;
33107 }
33108
33109- atomic_inc(&vcc->stats->tx_err);
33110+ atomic_inc_unchecked(&vcc->stats->tx_err);
33111
33112 fore200e->tx_sat++;
33113 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
33114diff --git a/drivers/atm/he.c b/drivers/atm/he.c
33115index 72b6960..cf9167a 100644
33116--- a/drivers/atm/he.c
33117+++ b/drivers/atm/he.c
33118@@ -1699,7 +1699,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
33119
33120 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
33121 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
33122- atomic_inc(&vcc->stats->rx_drop);
33123+ atomic_inc_unchecked(&vcc->stats->rx_drop);
33124 goto return_host_buffers;
33125 }
33126
33127@@ -1726,7 +1726,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
33128 RBRQ_LEN_ERR(he_dev->rbrq_head)
33129 ? "LEN_ERR" : "",
33130 vcc->vpi, vcc->vci);
33131- atomic_inc(&vcc->stats->rx_err);
33132+ atomic_inc_unchecked(&vcc->stats->rx_err);
33133 goto return_host_buffers;
33134 }
33135
33136@@ -1778,7 +1778,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
33137 vcc->push(vcc, skb);
33138 spin_lock(&he_dev->global_lock);
33139
33140- atomic_inc(&vcc->stats->rx);
33141+ atomic_inc_unchecked(&vcc->stats->rx);
33142
33143 return_host_buffers:
33144 ++pdus_assembled;
33145@@ -2104,7 +2104,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
33146 tpd->vcc->pop(tpd->vcc, tpd->skb);
33147 else
33148 dev_kfree_skb_any(tpd->skb);
33149- atomic_inc(&tpd->vcc->stats->tx_err);
33150+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
33151 }
33152 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
33153 return;
33154@@ -2516,7 +2516,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
33155 vcc->pop(vcc, skb);
33156 else
33157 dev_kfree_skb_any(skb);
33158- atomic_inc(&vcc->stats->tx_err);
33159+ atomic_inc_unchecked(&vcc->stats->tx_err);
33160 return -EINVAL;
33161 }
33162
33163@@ -2527,7 +2527,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
33164 vcc->pop(vcc, skb);
33165 else
33166 dev_kfree_skb_any(skb);
33167- atomic_inc(&vcc->stats->tx_err);
33168+ atomic_inc_unchecked(&vcc->stats->tx_err);
33169 return -EINVAL;
33170 }
33171 #endif
33172@@ -2539,7 +2539,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
33173 vcc->pop(vcc, skb);
33174 else
33175 dev_kfree_skb_any(skb);
33176- atomic_inc(&vcc->stats->tx_err);
33177+ atomic_inc_unchecked(&vcc->stats->tx_err);
33178 spin_unlock_irqrestore(&he_dev->global_lock, flags);
33179 return -ENOMEM;
33180 }
33181@@ -2581,7 +2581,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
33182 vcc->pop(vcc, skb);
33183 else
33184 dev_kfree_skb_any(skb);
33185- atomic_inc(&vcc->stats->tx_err);
33186+ atomic_inc_unchecked(&vcc->stats->tx_err);
33187 spin_unlock_irqrestore(&he_dev->global_lock, flags);
33188 return -ENOMEM;
33189 }
33190@@ -2612,7 +2612,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
33191 __enqueue_tpd(he_dev, tpd, cid);
33192 spin_unlock_irqrestore(&he_dev->global_lock, flags);
33193
33194- atomic_inc(&vcc->stats->tx);
33195+ atomic_inc_unchecked(&vcc->stats->tx);
33196
33197 return 0;
33198 }
33199diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
33200index 1dc0519..1aadaf7 100644
33201--- a/drivers/atm/horizon.c
33202+++ b/drivers/atm/horizon.c
33203@@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
33204 {
33205 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
33206 // VC layer stats
33207- atomic_inc(&vcc->stats->rx);
33208+ atomic_inc_unchecked(&vcc->stats->rx);
33209 __net_timestamp(skb);
33210 // end of our responsibility
33211 vcc->push (vcc, skb);
33212@@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
33213 dev->tx_iovec = NULL;
33214
33215 // VC layer stats
33216- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
33217+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
33218
33219 // free the skb
33220 hrz_kfree_skb (skb);
33221diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
33222index 272f009..a18ba55 100644
33223--- a/drivers/atm/idt77252.c
33224+++ b/drivers/atm/idt77252.c
33225@@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
33226 else
33227 dev_kfree_skb(skb);
33228
33229- atomic_inc(&vcc->stats->tx);
33230+ atomic_inc_unchecked(&vcc->stats->tx);
33231 }
33232
33233 atomic_dec(&scq->used);
33234@@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
33235 if ((sb = dev_alloc_skb(64)) == NULL) {
33236 printk("%s: Can't allocate buffers for aal0.\n",
33237 card->name);
33238- atomic_add(i, &vcc->stats->rx_drop);
33239+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
33240 break;
33241 }
33242 if (!atm_charge(vcc, sb->truesize)) {
33243 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
33244 card->name);
33245- atomic_add(i - 1, &vcc->stats->rx_drop);
33246+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
33247 dev_kfree_skb(sb);
33248 break;
33249 }
33250@@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
33251 ATM_SKB(sb)->vcc = vcc;
33252 __net_timestamp(sb);
33253 vcc->push(vcc, sb);
33254- atomic_inc(&vcc->stats->rx);
33255+ atomic_inc_unchecked(&vcc->stats->rx);
33256
33257 cell += ATM_CELL_PAYLOAD;
33258 }
33259@@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
33260 "(CDC: %08x)\n",
33261 card->name, len, rpp->len, readl(SAR_REG_CDC));
33262 recycle_rx_pool_skb(card, rpp);
33263- atomic_inc(&vcc->stats->rx_err);
33264+ atomic_inc_unchecked(&vcc->stats->rx_err);
33265 return;
33266 }
33267 if (stat & SAR_RSQE_CRC) {
33268 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
33269 recycle_rx_pool_skb(card, rpp);
33270- atomic_inc(&vcc->stats->rx_err);
33271+ atomic_inc_unchecked(&vcc->stats->rx_err);
33272 return;
33273 }
33274 if (skb_queue_len(&rpp->queue) > 1) {
33275@@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
33276 RXPRINTK("%s: Can't alloc RX skb.\n",
33277 card->name);
33278 recycle_rx_pool_skb(card, rpp);
33279- atomic_inc(&vcc->stats->rx_err);
33280+ atomic_inc_unchecked(&vcc->stats->rx_err);
33281 return;
33282 }
33283 if (!atm_charge(vcc, skb->truesize)) {
33284@@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
33285 __net_timestamp(skb);
33286
33287 vcc->push(vcc, skb);
33288- atomic_inc(&vcc->stats->rx);
33289+ atomic_inc_unchecked(&vcc->stats->rx);
33290
33291 return;
33292 }
33293@@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
33294 __net_timestamp(skb);
33295
33296 vcc->push(vcc, skb);
33297- atomic_inc(&vcc->stats->rx);
33298+ atomic_inc_unchecked(&vcc->stats->rx);
33299
33300 if (skb->truesize > SAR_FB_SIZE_3)
33301 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
33302@@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
33303 if (vcc->qos.aal != ATM_AAL0) {
33304 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
33305 card->name, vpi, vci);
33306- atomic_inc(&vcc->stats->rx_drop);
33307+ atomic_inc_unchecked(&vcc->stats->rx_drop);
33308 goto drop;
33309 }
33310
33311 if ((sb = dev_alloc_skb(64)) == NULL) {
33312 printk("%s: Can't allocate buffers for AAL0.\n",
33313 card->name);
33314- atomic_inc(&vcc->stats->rx_err);
33315+ atomic_inc_unchecked(&vcc->stats->rx_err);
33316 goto drop;
33317 }
33318
33319@@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
33320 ATM_SKB(sb)->vcc = vcc;
33321 __net_timestamp(sb);
33322 vcc->push(vcc, sb);
33323- atomic_inc(&vcc->stats->rx);
33324+ atomic_inc_unchecked(&vcc->stats->rx);
33325
33326 drop:
33327 skb_pull(queue, 64);
33328@@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
33329
33330 if (vc == NULL) {
33331 printk("%s: NULL connection in send().\n", card->name);
33332- atomic_inc(&vcc->stats->tx_err);
33333+ atomic_inc_unchecked(&vcc->stats->tx_err);
33334 dev_kfree_skb(skb);
33335 return -EINVAL;
33336 }
33337 if (!test_bit(VCF_TX, &vc->flags)) {
33338 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
33339- atomic_inc(&vcc->stats->tx_err);
33340+ atomic_inc_unchecked(&vcc->stats->tx_err);
33341 dev_kfree_skb(skb);
33342 return -EINVAL;
33343 }
33344@@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
33345 break;
33346 default:
33347 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
33348- atomic_inc(&vcc->stats->tx_err);
33349+ atomic_inc_unchecked(&vcc->stats->tx_err);
33350 dev_kfree_skb(skb);
33351 return -EINVAL;
33352 }
33353
33354 if (skb_shinfo(skb)->nr_frags != 0) {
33355 printk("%s: No scatter-gather yet.\n", card->name);
33356- atomic_inc(&vcc->stats->tx_err);
33357+ atomic_inc_unchecked(&vcc->stats->tx_err);
33358 dev_kfree_skb(skb);
33359 return -EINVAL;
33360 }
33361@@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
33362
33363 err = queue_skb(card, vc, skb, oam);
33364 if (err) {
33365- atomic_inc(&vcc->stats->tx_err);
33366+ atomic_inc_unchecked(&vcc->stats->tx_err);
33367 dev_kfree_skb(skb);
33368 return err;
33369 }
33370@@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
33371 skb = dev_alloc_skb(64);
33372 if (!skb) {
33373 printk("%s: Out of memory in send_oam().\n", card->name);
33374- atomic_inc(&vcc->stats->tx_err);
33375+ atomic_inc_unchecked(&vcc->stats->tx_err);
33376 return -ENOMEM;
33377 }
33378 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
33379diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
33380index 4217f29..88f547a 100644
33381--- a/drivers/atm/iphase.c
33382+++ b/drivers/atm/iphase.c
33383@@ -1145,7 +1145,7 @@ static int rx_pkt(struct atm_dev *dev)
33384 status = (u_short) (buf_desc_ptr->desc_mode);
33385 if (status & (RX_CER | RX_PTE | RX_OFL))
33386 {
33387- atomic_inc(&vcc->stats->rx_err);
33388+ atomic_inc_unchecked(&vcc->stats->rx_err);
33389 IF_ERR(printk("IA: bad packet, dropping it");)
33390 if (status & RX_CER) {
33391 IF_ERR(printk(" cause: packet CRC error\n");)
33392@@ -1168,7 +1168,7 @@ static int rx_pkt(struct atm_dev *dev)
33393 len = dma_addr - buf_addr;
33394 if (len > iadev->rx_buf_sz) {
33395 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
33396- atomic_inc(&vcc->stats->rx_err);
33397+ atomic_inc_unchecked(&vcc->stats->rx_err);
33398 goto out_free_desc;
33399 }
33400
33401@@ -1318,7 +1318,7 @@ static void rx_dle_intr(struct atm_dev *dev)
33402 ia_vcc = INPH_IA_VCC(vcc);
33403 if (ia_vcc == NULL)
33404 {
33405- atomic_inc(&vcc->stats->rx_err);
33406+ atomic_inc_unchecked(&vcc->stats->rx_err);
33407 atm_return(vcc, skb->truesize);
33408 dev_kfree_skb_any(skb);
33409 goto INCR_DLE;
33410@@ -1330,7 +1330,7 @@ static void rx_dle_intr(struct atm_dev *dev)
33411 if ((length > iadev->rx_buf_sz) || (length >
33412 (skb->len - sizeof(struct cpcs_trailer))))
33413 {
33414- atomic_inc(&vcc->stats->rx_err);
33415+ atomic_inc_unchecked(&vcc->stats->rx_err);
33416 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
33417 length, skb->len);)
33418 atm_return(vcc, skb->truesize);
33419@@ -1346,7 +1346,7 @@ static void rx_dle_intr(struct atm_dev *dev)
33420
33421 IF_RX(printk("rx_dle_intr: skb push");)
33422 vcc->push(vcc,skb);
33423- atomic_inc(&vcc->stats->rx);
33424+ atomic_inc_unchecked(&vcc->stats->rx);
33425 iadev->rx_pkt_cnt++;
33426 }
33427 INCR_DLE:
33428@@ -2826,15 +2826,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
33429 {
33430 struct k_sonet_stats *stats;
33431 stats = &PRIV(_ia_dev[board])->sonet_stats;
33432- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
33433- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
33434- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
33435- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
33436- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
33437- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
33438- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
33439- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
33440- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
33441+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
33442+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
33443+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
33444+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
33445+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
33446+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
33447+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
33448+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
33449+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
33450 }
33451 ia_cmds.status = 0;
33452 break;
33453@@ -2939,7 +2939,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
33454 if ((desc == 0) || (desc > iadev->num_tx_desc))
33455 {
33456 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
33457- atomic_inc(&vcc->stats->tx);
33458+ atomic_inc_unchecked(&vcc->stats->tx);
33459 if (vcc->pop)
33460 vcc->pop(vcc, skb);
33461 else
33462@@ -3044,14 +3044,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
33463 ATM_DESC(skb) = vcc->vci;
33464 skb_queue_tail(&iadev->tx_dma_q, skb);
33465
33466- atomic_inc(&vcc->stats->tx);
33467+ atomic_inc_unchecked(&vcc->stats->tx);
33468 iadev->tx_pkt_cnt++;
33469 /* Increment transaction counter */
33470 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
33471
33472 #if 0
33473 /* add flow control logic */
33474- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
33475+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
33476 if (iavcc->vc_desc_cnt > 10) {
33477 vcc->tx_quota = vcc->tx_quota * 3 / 4;
33478 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
33479diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
33480index fa7d701..1e404c7 100644
33481--- a/drivers/atm/lanai.c
33482+++ b/drivers/atm/lanai.c
33483@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
33484 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
33485 lanai_endtx(lanai, lvcc);
33486 lanai_free_skb(lvcc->tx.atmvcc, skb);
33487- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
33488+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
33489 }
33490
33491 /* Try to fill the buffer - don't call unless there is backlog */
33492@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
33493 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
33494 __net_timestamp(skb);
33495 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
33496- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
33497+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
33498 out:
33499 lvcc->rx.buf.ptr = end;
33500 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
33501@@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
33502 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
33503 "vcc %d\n", lanai->number, (unsigned int) s, vci);
33504 lanai->stats.service_rxnotaal5++;
33505- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
33506+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
33507 return 0;
33508 }
33509 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
33510@@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
33511 int bytes;
33512 read_unlock(&vcc_sklist_lock);
33513 DPRINTK("got trashed rx pdu on vci %d\n", vci);
33514- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
33515+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
33516 lvcc->stats.x.aal5.service_trash++;
33517 bytes = (SERVICE_GET_END(s) * 16) -
33518 (((unsigned long) lvcc->rx.buf.ptr) -
33519@@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
33520 }
33521 if (s & SERVICE_STREAM) {
33522 read_unlock(&vcc_sklist_lock);
33523- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
33524+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
33525 lvcc->stats.x.aal5.service_stream++;
33526 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
33527 "PDU on VCI %d!\n", lanai->number, vci);
33528@@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
33529 return 0;
33530 }
33531 DPRINTK("got rx crc error on vci %d\n", vci);
33532- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
33533+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
33534 lvcc->stats.x.aal5.service_rxcrc++;
33535 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
33536 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
33537diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
33538index ed1d2b7..8cffc1f 100644
33539--- a/drivers/atm/nicstar.c
33540+++ b/drivers/atm/nicstar.c
33541@@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
33542 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
33543 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
33544 card->index);
33545- atomic_inc(&vcc->stats->tx_err);
33546+ atomic_inc_unchecked(&vcc->stats->tx_err);
33547 dev_kfree_skb_any(skb);
33548 return -EINVAL;
33549 }
33550@@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
33551 if (!vc->tx) {
33552 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
33553 card->index);
33554- atomic_inc(&vcc->stats->tx_err);
33555+ atomic_inc_unchecked(&vcc->stats->tx_err);
33556 dev_kfree_skb_any(skb);
33557 return -EINVAL;
33558 }
33559@@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
33560 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
33561 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
33562 card->index);
33563- atomic_inc(&vcc->stats->tx_err);
33564+ atomic_inc_unchecked(&vcc->stats->tx_err);
33565 dev_kfree_skb_any(skb);
33566 return -EINVAL;
33567 }
33568
33569 if (skb_shinfo(skb)->nr_frags != 0) {
33570 printk("nicstar%d: No scatter-gather yet.\n", card->index);
33571- atomic_inc(&vcc->stats->tx_err);
33572+ atomic_inc_unchecked(&vcc->stats->tx_err);
33573 dev_kfree_skb_any(skb);
33574 return -EINVAL;
33575 }
33576@@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
33577 }
33578
33579 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
33580- atomic_inc(&vcc->stats->tx_err);
33581+ atomic_inc_unchecked(&vcc->stats->tx_err);
33582 dev_kfree_skb_any(skb);
33583 return -EIO;
33584 }
33585- atomic_inc(&vcc->stats->tx);
33586+ atomic_inc_unchecked(&vcc->stats->tx);
33587
33588 return 0;
33589 }
33590@@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
33591 printk
33592 ("nicstar%d: Can't allocate buffers for aal0.\n",
33593 card->index);
33594- atomic_add(i, &vcc->stats->rx_drop);
33595+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
33596 break;
33597 }
33598 if (!atm_charge(vcc, sb->truesize)) {
33599 RXPRINTK
33600 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
33601 card->index);
33602- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
33603+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
33604 dev_kfree_skb_any(sb);
33605 break;
33606 }
33607@@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
33608 ATM_SKB(sb)->vcc = vcc;
33609 __net_timestamp(sb);
33610 vcc->push(vcc, sb);
33611- atomic_inc(&vcc->stats->rx);
33612+ atomic_inc_unchecked(&vcc->stats->rx);
33613 cell += ATM_CELL_PAYLOAD;
33614 }
33615
33616@@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
33617 if (iovb == NULL) {
33618 printk("nicstar%d: Out of iovec buffers.\n",
33619 card->index);
33620- atomic_inc(&vcc->stats->rx_drop);
33621+ atomic_inc_unchecked(&vcc->stats->rx_drop);
33622 recycle_rx_buf(card, skb);
33623 return;
33624 }
33625@@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
33626 small or large buffer itself. */
33627 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
33628 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
33629- atomic_inc(&vcc->stats->rx_err);
33630+ atomic_inc_unchecked(&vcc->stats->rx_err);
33631 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
33632 NS_MAX_IOVECS);
33633 NS_PRV_IOVCNT(iovb) = 0;
33634@@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
33635 ("nicstar%d: Expected a small buffer, and this is not one.\n",
33636 card->index);
33637 which_list(card, skb);
33638- atomic_inc(&vcc->stats->rx_err);
33639+ atomic_inc_unchecked(&vcc->stats->rx_err);
33640 recycle_rx_buf(card, skb);
33641 vc->rx_iov = NULL;
33642 recycle_iov_buf(card, iovb);
33643@@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
33644 ("nicstar%d: Expected a large buffer, and this is not one.\n",
33645 card->index);
33646 which_list(card, skb);
33647- atomic_inc(&vcc->stats->rx_err);
33648+ atomic_inc_unchecked(&vcc->stats->rx_err);
33649 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
33650 NS_PRV_IOVCNT(iovb));
33651 vc->rx_iov = NULL;
33652@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
33653 printk(" - PDU size mismatch.\n");
33654 else
33655 printk(".\n");
33656- atomic_inc(&vcc->stats->rx_err);
33657+ atomic_inc_unchecked(&vcc->stats->rx_err);
33658 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
33659 NS_PRV_IOVCNT(iovb));
33660 vc->rx_iov = NULL;
33661@@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
33662 /* skb points to a small buffer */
33663 if (!atm_charge(vcc, skb->truesize)) {
33664 push_rxbufs(card, skb);
33665- atomic_inc(&vcc->stats->rx_drop);
33666+ atomic_inc_unchecked(&vcc->stats->rx_drop);
33667 } else {
33668 skb_put(skb, len);
33669 dequeue_sm_buf(card, skb);
33670@@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
33671 ATM_SKB(skb)->vcc = vcc;
33672 __net_timestamp(skb);
33673 vcc->push(vcc, skb);
33674- atomic_inc(&vcc->stats->rx);
33675+ atomic_inc_unchecked(&vcc->stats->rx);
33676 }
33677 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
33678 struct sk_buff *sb;
33679@@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
33680 if (len <= NS_SMBUFSIZE) {
33681 if (!atm_charge(vcc, sb->truesize)) {
33682 push_rxbufs(card, sb);
33683- atomic_inc(&vcc->stats->rx_drop);
33684+ atomic_inc_unchecked(&vcc->stats->rx_drop);
33685 } else {
33686 skb_put(sb, len);
33687 dequeue_sm_buf(card, sb);
33688@@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
33689 ATM_SKB(sb)->vcc = vcc;
33690 __net_timestamp(sb);
33691 vcc->push(vcc, sb);
33692- atomic_inc(&vcc->stats->rx);
33693+ atomic_inc_unchecked(&vcc->stats->rx);
33694 }
33695
33696 push_rxbufs(card, skb);
33697@@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
33698
33699 if (!atm_charge(vcc, skb->truesize)) {
33700 push_rxbufs(card, skb);
33701- atomic_inc(&vcc->stats->rx_drop);
33702+ atomic_inc_unchecked(&vcc->stats->rx_drop);
33703 } else {
33704 dequeue_lg_buf(card, skb);
33705 #ifdef NS_USE_DESTRUCTORS
33706@@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
33707 ATM_SKB(skb)->vcc = vcc;
33708 __net_timestamp(skb);
33709 vcc->push(vcc, skb);
33710- atomic_inc(&vcc->stats->rx);
33711+ atomic_inc_unchecked(&vcc->stats->rx);
33712 }
33713
33714 push_rxbufs(card, sb);
33715@@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
33716 printk
33717 ("nicstar%d: Out of huge buffers.\n",
33718 card->index);
33719- atomic_inc(&vcc->stats->rx_drop);
33720+ atomic_inc_unchecked(&vcc->stats->rx_drop);
33721 recycle_iovec_rx_bufs(card,
33722 (struct iovec *)
33723 iovb->data,
33724@@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
33725 card->hbpool.count++;
33726 } else
33727 dev_kfree_skb_any(hb);
33728- atomic_inc(&vcc->stats->rx_drop);
33729+ atomic_inc_unchecked(&vcc->stats->rx_drop);
33730 } else {
33731 /* Copy the small buffer to the huge buffer */
33732 sb = (struct sk_buff *)iov->iov_base;
33733@@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
33734 #endif /* NS_USE_DESTRUCTORS */
33735 __net_timestamp(hb);
33736 vcc->push(vcc, hb);
33737- atomic_inc(&vcc->stats->rx);
33738+ atomic_inc_unchecked(&vcc->stats->rx);
33739 }
33740 }
33741
33742diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
33743index 0474a89..06ea4a1 100644
33744--- a/drivers/atm/solos-pci.c
33745+++ b/drivers/atm/solos-pci.c
33746@@ -838,7 +838,7 @@ void solos_bh(unsigned long card_arg)
33747 }
33748 atm_charge(vcc, skb->truesize);
33749 vcc->push(vcc, skb);
33750- atomic_inc(&vcc->stats->rx);
33751+ atomic_inc_unchecked(&vcc->stats->rx);
33752 break;
33753
33754 case PKT_STATUS:
33755@@ -1117,7 +1117,7 @@ static uint32_t fpga_tx(struct solos_card *card)
33756 vcc = SKB_CB(oldskb)->vcc;
33757
33758 if (vcc) {
33759- atomic_inc(&vcc->stats->tx);
33760+ atomic_inc_unchecked(&vcc->stats->tx);
33761 solos_pop(vcc, oldskb);
33762 } else {
33763 dev_kfree_skb_irq(oldskb);
33764diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
33765index 0215934..ce9f5b1 100644
33766--- a/drivers/atm/suni.c
33767+++ b/drivers/atm/suni.c
33768@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
33769
33770
33771 #define ADD_LIMITED(s,v) \
33772- atomic_add((v),&stats->s); \
33773- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
33774+ atomic_add_unchecked((v),&stats->s); \
33775+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
33776
33777
33778 static void suni_hz(unsigned long from_timer)
33779diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
33780index 5120a96..e2572bd 100644
33781--- a/drivers/atm/uPD98402.c
33782+++ b/drivers/atm/uPD98402.c
33783@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
33784 struct sonet_stats tmp;
33785 int error = 0;
33786
33787- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
33788+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
33789 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
33790 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
33791 if (zero && !error) {
33792@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
33793
33794
33795 #define ADD_LIMITED(s,v) \
33796- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
33797- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
33798- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
33799+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
33800+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
33801+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
33802
33803
33804 static void stat_event(struct atm_dev *dev)
33805@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
33806 if (reason & uPD98402_INT_PFM) stat_event(dev);
33807 if (reason & uPD98402_INT_PCO) {
33808 (void) GET(PCOCR); /* clear interrupt cause */
33809- atomic_add(GET(HECCT),
33810+ atomic_add_unchecked(GET(HECCT),
33811 &PRIV(dev)->sonet_stats.uncorr_hcs);
33812 }
33813 if ((reason & uPD98402_INT_RFO) &&
33814@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
33815 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
33816 uPD98402_INT_LOS),PIMR); /* enable them */
33817 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
33818- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
33819- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
33820- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
33821+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
33822+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
33823+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
33824 return 0;
33825 }
33826
33827diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
33828index 969c3c2..9b72956 100644
33829--- a/drivers/atm/zatm.c
33830+++ b/drivers/atm/zatm.c
33831@@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
33832 }
33833 if (!size) {
33834 dev_kfree_skb_irq(skb);
33835- if (vcc) atomic_inc(&vcc->stats->rx_err);
33836+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
33837 continue;
33838 }
33839 if (!atm_charge(vcc,skb->truesize)) {
33840@@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
33841 skb->len = size;
33842 ATM_SKB(skb)->vcc = vcc;
33843 vcc->push(vcc,skb);
33844- atomic_inc(&vcc->stats->rx);
33845+ atomic_inc_unchecked(&vcc->stats->rx);
33846 }
33847 zout(pos & 0xffff,MTA(mbx));
33848 #if 0 /* probably a stupid idea */
33849@@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
33850 skb_queue_head(&zatm_vcc->backlog,skb);
33851 break;
33852 }
33853- atomic_inc(&vcc->stats->tx);
33854+ atomic_inc_unchecked(&vcc->stats->tx);
33855 wake_up(&zatm_vcc->tx_wait);
33856 }
33857
33858diff --git a/drivers/base/bus.c b/drivers/base/bus.c
33859index 6856303..0602d70 100644
33860--- a/drivers/base/bus.c
33861+++ b/drivers/base/bus.c
33862@@ -1163,7 +1163,7 @@ int subsys_interface_register(struct subsys_interface *sif)
33863 return -EINVAL;
33864
33865 mutex_lock(&subsys->p->mutex);
33866- list_add_tail(&sif->node, &subsys->p->interfaces);
33867+ pax_list_add_tail((struct list_head *)&sif->node, &subsys->p->interfaces);
33868 if (sif->add_dev) {
33869 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
33870 while ((dev = subsys_dev_iter_next(&iter)))
33871@@ -1188,7 +1188,7 @@ void subsys_interface_unregister(struct subsys_interface *sif)
33872 subsys = sif->subsys;
33873
33874 mutex_lock(&subsys->p->mutex);
33875- list_del_init(&sif->node);
33876+ pax_list_del_init((struct list_head *)&sif->node);
33877 if (sif->remove_dev) {
33878 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
33879 while ((dev = subsys_dev_iter_next(&iter)))
33880diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
33881index 17cf7ca..7e553e1 100644
33882--- a/drivers/base/devtmpfs.c
33883+++ b/drivers/base/devtmpfs.c
33884@@ -347,7 +347,7 @@ int devtmpfs_mount(const char *mntdir)
33885 if (!thread)
33886 return 0;
33887
33888- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
33889+ err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
33890 if (err)
33891 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
33892 else
33893diff --git a/drivers/base/node.c b/drivers/base/node.c
33894index fac124a..66bd4ab 100644
33895--- a/drivers/base/node.c
33896+++ b/drivers/base/node.c
33897@@ -625,7 +625,7 @@ static ssize_t print_nodes_state(enum node_states state, char *buf)
33898 struct node_attr {
33899 struct device_attribute attr;
33900 enum node_states state;
33901-};
33902+} __do_const;
33903
33904 static ssize_t show_node_state(struct device *dev,
33905 struct device_attribute *attr, char *buf)
33906diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
33907index acc3a8d..981c236 100644
33908--- a/drivers/base/power/domain.c
33909+++ b/drivers/base/power/domain.c
33910@@ -1851,7 +1851,7 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
33911 {
33912 struct cpuidle_driver *cpuidle_drv;
33913 struct gpd_cpu_data *cpu_data;
33914- struct cpuidle_state *idle_state;
33915+ cpuidle_state_no_const *idle_state;
33916 int ret = 0;
33917
33918 if (IS_ERR_OR_NULL(genpd) || state < 0)
33919@@ -1919,7 +1919,7 @@ int pm_genpd_name_attach_cpuidle(const char *name, int state)
33920 int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
33921 {
33922 struct gpd_cpu_data *cpu_data;
33923- struct cpuidle_state *idle_state;
33924+ cpuidle_state_no_const *idle_state;
33925 int ret = 0;
33926
33927 if (IS_ERR_OR_NULL(genpd))
33928diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
33929index e6ee5e8..98ad7fc 100644
33930--- a/drivers/base/power/wakeup.c
33931+++ b/drivers/base/power/wakeup.c
33932@@ -29,14 +29,14 @@ bool events_check_enabled __read_mostly;
33933 * They need to be modified together atomically, so it's better to use one
33934 * atomic variable to hold them both.
33935 */
33936-static atomic_t combined_event_count = ATOMIC_INIT(0);
33937+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
33938
33939 #define IN_PROGRESS_BITS (sizeof(int) * 4)
33940 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
33941
33942 static void split_counters(unsigned int *cnt, unsigned int *inpr)
33943 {
33944- unsigned int comb = atomic_read(&combined_event_count);
33945+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
33946
33947 *cnt = (comb >> IN_PROGRESS_BITS);
33948 *inpr = comb & MAX_IN_PROGRESS;
33949@@ -389,7 +389,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
33950 ws->start_prevent_time = ws->last_time;
33951
33952 /* Increment the counter of events in progress. */
33953- cec = atomic_inc_return(&combined_event_count);
33954+ cec = atomic_inc_return_unchecked(&combined_event_count);
33955
33956 trace_wakeup_source_activate(ws->name, cec);
33957 }
33958@@ -515,7 +515,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
33959 * Increment the counter of registered wakeup events and decrement the
33960 * couter of wakeup events in progress simultaneously.
33961 */
33962- cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
33963+ cec = atomic_add_return_unchecked(MAX_IN_PROGRESS, &combined_event_count);
33964 trace_wakeup_source_deactivate(ws->name, cec);
33965
33966 split_counters(&cnt, &inpr);
33967diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
33968index e8d11b6..7b1b36f 100644
33969--- a/drivers/base/syscore.c
33970+++ b/drivers/base/syscore.c
33971@@ -21,7 +21,7 @@ static DEFINE_MUTEX(syscore_ops_lock);
33972 void register_syscore_ops(struct syscore_ops *ops)
33973 {
33974 mutex_lock(&syscore_ops_lock);
33975- list_add_tail(&ops->node, &syscore_ops_list);
33976+ pax_list_add_tail((struct list_head *)&ops->node, &syscore_ops_list);
33977 mutex_unlock(&syscore_ops_lock);
33978 }
33979 EXPORT_SYMBOL_GPL(register_syscore_ops);
33980@@ -33,7 +33,7 @@ EXPORT_SYMBOL_GPL(register_syscore_ops);
33981 void unregister_syscore_ops(struct syscore_ops *ops)
33982 {
33983 mutex_lock(&syscore_ops_lock);
33984- list_del(&ops->node);
33985+ pax_list_del((struct list_head *)&ops->node);
33986 mutex_unlock(&syscore_ops_lock);
33987 }
33988 EXPORT_SYMBOL_GPL(unregister_syscore_ops);
33989diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
33990index ade58bc..867143d 100644
33991--- a/drivers/block/cciss.c
33992+++ b/drivers/block/cciss.c
33993@@ -1196,6 +1196,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
33994 int err;
33995 u32 cp;
33996
33997+ memset(&arg64, 0, sizeof(arg64));
33998+
33999 err = 0;
34000 err |=
34001 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
34002@@ -3005,7 +3007,7 @@ static void start_io(ctlr_info_t *h)
34003 while (!list_empty(&h->reqQ)) {
34004 c = list_entry(h->reqQ.next, CommandList_struct, list);
34005 /* can't do anything if fifo is full */
34006- if ((h->access.fifo_full(h))) {
34007+ if ((h->access->fifo_full(h))) {
34008 dev_warn(&h->pdev->dev, "fifo full\n");
34009 break;
34010 }
34011@@ -3015,7 +3017,7 @@ static void start_io(ctlr_info_t *h)
34012 h->Qdepth--;
34013
34014 /* Tell the controller execute command */
34015- h->access.submit_command(h, c);
34016+ h->access->submit_command(h, c);
34017
34018 /* Put job onto the completed Q */
34019 addQ(&h->cmpQ, c);
34020@@ -3441,17 +3443,17 @@ startio:
34021
34022 static inline unsigned long get_next_completion(ctlr_info_t *h)
34023 {
34024- return h->access.command_completed(h);
34025+ return h->access->command_completed(h);
34026 }
34027
34028 static inline int interrupt_pending(ctlr_info_t *h)
34029 {
34030- return h->access.intr_pending(h);
34031+ return h->access->intr_pending(h);
34032 }
34033
34034 static inline long interrupt_not_for_us(ctlr_info_t *h)
34035 {
34036- return ((h->access.intr_pending(h) == 0) ||
34037+ return ((h->access->intr_pending(h) == 0) ||
34038 (h->interrupts_enabled == 0));
34039 }
34040
34041@@ -3484,7 +3486,7 @@ static inline u32 next_command(ctlr_info_t *h)
34042 u32 a;
34043
34044 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
34045- return h->access.command_completed(h);
34046+ return h->access->command_completed(h);
34047
34048 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
34049 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
34050@@ -4041,7 +4043,7 @@ static void cciss_put_controller_into_performant_mode(ctlr_info_t *h)
34051 trans_support & CFGTBL_Trans_use_short_tags);
34052
34053 /* Change the access methods to the performant access methods */
34054- h->access = SA5_performant_access;
34055+ h->access = &SA5_performant_access;
34056 h->transMethod = CFGTBL_Trans_Performant;
34057
34058 return;
34059@@ -4310,7 +4312,7 @@ static int cciss_pci_init(ctlr_info_t *h)
34060 if (prod_index < 0)
34061 return -ENODEV;
34062 h->product_name = products[prod_index].product_name;
34063- h->access = *(products[prod_index].access);
34064+ h->access = products[prod_index].access;
34065
34066 if (cciss_board_disabled(h)) {
34067 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
34068@@ -5032,7 +5034,7 @@ reinit_after_soft_reset:
34069 }
34070
34071 /* make sure the board interrupts are off */
34072- h->access.set_intr_mask(h, CCISS_INTR_OFF);
34073+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
34074 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
34075 if (rc)
34076 goto clean2;
34077@@ -5082,7 +5084,7 @@ reinit_after_soft_reset:
34078 * fake ones to scoop up any residual completions.
34079 */
34080 spin_lock_irqsave(&h->lock, flags);
34081- h->access.set_intr_mask(h, CCISS_INTR_OFF);
34082+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
34083 spin_unlock_irqrestore(&h->lock, flags);
34084 free_irq(h->intr[h->intr_mode], h);
34085 rc = cciss_request_irq(h, cciss_msix_discard_completions,
34086@@ -5102,9 +5104,9 @@ reinit_after_soft_reset:
34087 dev_info(&h->pdev->dev, "Board READY.\n");
34088 dev_info(&h->pdev->dev,
34089 "Waiting for stale completions to drain.\n");
34090- h->access.set_intr_mask(h, CCISS_INTR_ON);
34091+ h->access->set_intr_mask(h, CCISS_INTR_ON);
34092 msleep(10000);
34093- h->access.set_intr_mask(h, CCISS_INTR_OFF);
34094+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
34095
34096 rc = controller_reset_failed(h->cfgtable);
34097 if (rc)
34098@@ -5127,7 +5129,7 @@ reinit_after_soft_reset:
34099 cciss_scsi_setup(h);
34100
34101 /* Turn the interrupts on so we can service requests */
34102- h->access.set_intr_mask(h, CCISS_INTR_ON);
34103+ h->access->set_intr_mask(h, CCISS_INTR_ON);
34104
34105 /* Get the firmware version */
34106 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
34107@@ -5199,7 +5201,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
34108 kfree(flush_buf);
34109 if (return_code != IO_OK)
34110 dev_warn(&h->pdev->dev, "Error flushing cache\n");
34111- h->access.set_intr_mask(h, CCISS_INTR_OFF);
34112+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
34113 free_irq(h->intr[h->intr_mode], h);
34114 }
34115
34116diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
34117index 7fda30e..eb5dfe0 100644
34118--- a/drivers/block/cciss.h
34119+++ b/drivers/block/cciss.h
34120@@ -101,7 +101,7 @@ struct ctlr_info
34121 /* information about each logical volume */
34122 drive_info_struct *drv[CISS_MAX_LUN];
34123
34124- struct access_method access;
34125+ struct access_method *access;
34126
34127 /* queue and queue Info */
34128 struct list_head reqQ;
34129diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
34130index 3f08713..56a586a 100644
34131--- a/drivers/block/cpqarray.c
34132+++ b/drivers/block/cpqarray.c
34133@@ -404,7 +404,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
34134 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
34135 goto Enomem4;
34136 }
34137- hba[i]->access.set_intr_mask(hba[i], 0);
34138+ hba[i]->access->set_intr_mask(hba[i], 0);
34139 if (request_irq(hba[i]->intr, do_ida_intr,
34140 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
34141 {
34142@@ -459,7 +459,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
34143 add_timer(&hba[i]->timer);
34144
34145 /* Enable IRQ now that spinlock and rate limit timer are set up */
34146- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
34147+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
34148
34149 for(j=0; j<NWD; j++) {
34150 struct gendisk *disk = ida_gendisk[i][j];
34151@@ -694,7 +694,7 @@ DBGINFO(
34152 for(i=0; i<NR_PRODUCTS; i++) {
34153 if (board_id == products[i].board_id) {
34154 c->product_name = products[i].product_name;
34155- c->access = *(products[i].access);
34156+ c->access = products[i].access;
34157 break;
34158 }
34159 }
34160@@ -792,7 +792,7 @@ static int cpqarray_eisa_detect(void)
34161 hba[ctlr]->intr = intr;
34162 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
34163 hba[ctlr]->product_name = products[j].product_name;
34164- hba[ctlr]->access = *(products[j].access);
34165+ hba[ctlr]->access = products[j].access;
34166 hba[ctlr]->ctlr = ctlr;
34167 hba[ctlr]->board_id = board_id;
34168 hba[ctlr]->pci_dev = NULL; /* not PCI */
34169@@ -980,7 +980,7 @@ static void start_io(ctlr_info_t *h)
34170
34171 while((c = h->reqQ) != NULL) {
34172 /* Can't do anything if we're busy */
34173- if (h->access.fifo_full(h) == 0)
34174+ if (h->access->fifo_full(h) == 0)
34175 return;
34176
34177 /* Get the first entry from the request Q */
34178@@ -988,7 +988,7 @@ static void start_io(ctlr_info_t *h)
34179 h->Qdepth--;
34180
34181 /* Tell the controller to do our bidding */
34182- h->access.submit_command(h, c);
34183+ h->access->submit_command(h, c);
34184
34185 /* Get onto the completion Q */
34186 addQ(&h->cmpQ, c);
34187@@ -1050,7 +1050,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
34188 unsigned long flags;
34189 __u32 a,a1;
34190
34191- istat = h->access.intr_pending(h);
34192+ istat = h->access->intr_pending(h);
34193 /* Is this interrupt for us? */
34194 if (istat == 0)
34195 return IRQ_NONE;
34196@@ -1061,7 +1061,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
34197 */
34198 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
34199 if (istat & FIFO_NOT_EMPTY) {
34200- while((a = h->access.command_completed(h))) {
34201+ while((a = h->access->command_completed(h))) {
34202 a1 = a; a &= ~3;
34203 if ((c = h->cmpQ) == NULL)
34204 {
34205@@ -1449,11 +1449,11 @@ static int sendcmd(
34206 /*
34207 * Disable interrupt
34208 */
34209- info_p->access.set_intr_mask(info_p, 0);
34210+ info_p->access->set_intr_mask(info_p, 0);
34211 /* Make sure there is room in the command FIFO */
34212 /* Actually it should be completely empty at this time. */
34213 for (i = 200000; i > 0; i--) {
34214- temp = info_p->access.fifo_full(info_p);
34215+ temp = info_p->access->fifo_full(info_p);
34216 if (temp != 0) {
34217 break;
34218 }
34219@@ -1466,7 +1466,7 @@ DBG(
34220 /*
34221 * Send the cmd
34222 */
34223- info_p->access.submit_command(info_p, c);
34224+ info_p->access->submit_command(info_p, c);
34225 complete = pollcomplete(ctlr);
34226
34227 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
34228@@ -1549,9 +1549,9 @@ static int revalidate_allvol(ctlr_info_t *host)
34229 * we check the new geometry. Then turn interrupts back on when
34230 * we're done.
34231 */
34232- host->access.set_intr_mask(host, 0);
34233+ host->access->set_intr_mask(host, 0);
34234 getgeometry(ctlr);
34235- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
34236+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
34237
34238 for(i=0; i<NWD; i++) {
34239 struct gendisk *disk = ida_gendisk[ctlr][i];
34240@@ -1591,7 +1591,7 @@ static int pollcomplete(int ctlr)
34241 /* Wait (up to 2 seconds) for a command to complete */
34242
34243 for (i = 200000; i > 0; i--) {
34244- done = hba[ctlr]->access.command_completed(hba[ctlr]);
34245+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
34246 if (done == 0) {
34247 udelay(10); /* a short fixed delay */
34248 } else
34249diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
34250index be73e9d..7fbf140 100644
34251--- a/drivers/block/cpqarray.h
34252+++ b/drivers/block/cpqarray.h
34253@@ -99,7 +99,7 @@ struct ctlr_info {
34254 drv_info_t drv[NWD];
34255 struct proc_dir_entry *proc;
34256
34257- struct access_method access;
34258+ struct access_method *access;
34259
34260 cmdlist_t *reqQ;
34261 cmdlist_t *cmpQ;
34262diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
34263index 6b51afa..17e1191 100644
34264--- a/drivers/block/drbd/drbd_int.h
34265+++ b/drivers/block/drbd/drbd_int.h
34266@@ -582,7 +582,7 @@ struct drbd_epoch {
34267 struct drbd_tconn *tconn;
34268 struct list_head list;
34269 unsigned int barrier_nr;
34270- atomic_t epoch_size; /* increased on every request added. */
34271+ atomic_unchecked_t epoch_size; /* increased on every request added. */
34272 atomic_t active; /* increased on every req. added, and dec on every finished. */
34273 unsigned long flags;
34274 };
34275@@ -1011,7 +1011,7 @@ struct drbd_conf {
34276 int al_tr_cycle;
34277 int al_tr_pos; /* position of the next transaction in the journal */
34278 wait_queue_head_t seq_wait;
34279- atomic_t packet_seq;
34280+ atomic_unchecked_t packet_seq;
34281 unsigned int peer_seq;
34282 spinlock_t peer_seq_lock;
34283 unsigned int minor;
34284@@ -1527,7 +1527,7 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
34285 char __user *uoptval;
34286 int err;
34287
34288- uoptval = (char __user __force *)optval;
34289+ uoptval = (char __force_user *)optval;
34290
34291 set_fs(KERNEL_DS);
34292 if (level == SOL_SOCKET)
34293diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
34294index 8c13eeb..217adee 100644
34295--- a/drivers/block/drbd/drbd_main.c
34296+++ b/drivers/block/drbd/drbd_main.c
34297@@ -1317,7 +1317,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd,
34298 p->sector = sector;
34299 p->block_id = block_id;
34300 p->blksize = blksize;
34301- p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
34302+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&mdev->packet_seq));
34303 return drbd_send_command(mdev, sock, cmd, sizeof(*p), NULL, 0);
34304 }
34305
34306@@ -1619,7 +1619,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
34307 return -EIO;
34308 p->sector = cpu_to_be64(req->i.sector);
34309 p->block_id = (unsigned long)req;
34310- p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
34311+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&mdev->packet_seq));
34312 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
34313 if (mdev->state.conn >= C_SYNC_SOURCE &&
34314 mdev->state.conn <= C_PAUSED_SYNC_T)
34315@@ -2574,8 +2574,8 @@ void conn_destroy(struct kref *kref)
34316 {
34317 struct drbd_tconn *tconn = container_of(kref, struct drbd_tconn, kref);
34318
34319- if (atomic_read(&tconn->current_epoch->epoch_size) != 0)
34320- conn_err(tconn, "epoch_size:%d\n", atomic_read(&tconn->current_epoch->epoch_size));
34321+ if (atomic_read_unchecked(&tconn->current_epoch->epoch_size) != 0)
34322+ conn_err(tconn, "epoch_size:%d\n", atomic_read_unchecked(&tconn->current_epoch->epoch_size));
34323 kfree(tconn->current_epoch);
34324
34325 idr_destroy(&tconn->volumes);
34326diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
34327index a9eccfc..f5efe87 100644
34328--- a/drivers/block/drbd/drbd_receiver.c
34329+++ b/drivers/block/drbd/drbd_receiver.c
34330@@ -833,7 +833,7 @@ int drbd_connected(struct drbd_conf *mdev)
34331 {
34332 int err;
34333
34334- atomic_set(&mdev->packet_seq, 0);
34335+ atomic_set_unchecked(&mdev->packet_seq, 0);
34336 mdev->peer_seq = 0;
34337
34338 mdev->state_mutex = mdev->tconn->agreed_pro_version < 100 ?
34339@@ -1191,7 +1191,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
34340 do {
34341 next_epoch = NULL;
34342
34343- epoch_size = atomic_read(&epoch->epoch_size);
34344+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
34345
34346 switch (ev & ~EV_CLEANUP) {
34347 case EV_PUT:
34348@@ -1231,7 +1231,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
34349 rv = FE_DESTROYED;
34350 } else {
34351 epoch->flags = 0;
34352- atomic_set(&epoch->epoch_size, 0);
34353+ atomic_set_unchecked(&epoch->epoch_size, 0);
34354 /* atomic_set(&epoch->active, 0); is already zero */
34355 if (rv == FE_STILL_LIVE)
34356 rv = FE_RECYCLED;
34357@@ -1449,7 +1449,7 @@ static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
34358 conn_wait_active_ee_empty(tconn);
34359 drbd_flush(tconn);
34360
34361- if (atomic_read(&tconn->current_epoch->epoch_size)) {
34362+ if (atomic_read_unchecked(&tconn->current_epoch->epoch_size)) {
34363 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
34364 if (epoch)
34365 break;
34366@@ -1462,11 +1462,11 @@ static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
34367 }
34368
34369 epoch->flags = 0;
34370- atomic_set(&epoch->epoch_size, 0);
34371+ atomic_set_unchecked(&epoch->epoch_size, 0);
34372 atomic_set(&epoch->active, 0);
34373
34374 spin_lock(&tconn->epoch_lock);
34375- if (atomic_read(&tconn->current_epoch->epoch_size)) {
34376+ if (atomic_read_unchecked(&tconn->current_epoch->epoch_size)) {
34377 list_add(&epoch->list, &tconn->current_epoch->list);
34378 tconn->current_epoch = epoch;
34379 tconn->epochs++;
34380@@ -2170,7 +2170,7 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
34381
34382 err = wait_for_and_update_peer_seq(mdev, peer_seq);
34383 drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
34384- atomic_inc(&tconn->current_epoch->epoch_size);
34385+ atomic_inc_unchecked(&tconn->current_epoch->epoch_size);
34386 err2 = drbd_drain_block(mdev, pi->size);
34387 if (!err)
34388 err = err2;
34389@@ -2204,7 +2204,7 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
34390
34391 spin_lock(&tconn->epoch_lock);
34392 peer_req->epoch = tconn->current_epoch;
34393- atomic_inc(&peer_req->epoch->epoch_size);
34394+ atomic_inc_unchecked(&peer_req->epoch->epoch_size);
34395 atomic_inc(&peer_req->epoch->active);
34396 spin_unlock(&tconn->epoch_lock);
34397
34398@@ -4346,7 +4346,7 @@ struct data_cmd {
34399 int expect_payload;
34400 size_t pkt_size;
34401 int (*fn)(struct drbd_tconn *, struct packet_info *);
34402-};
34403+} __do_const;
34404
34405 static struct data_cmd drbd_cmd_handler[] = {
34406 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
34407@@ -4466,7 +4466,7 @@ static void conn_disconnect(struct drbd_tconn *tconn)
34408 if (!list_empty(&tconn->current_epoch->list))
34409 conn_err(tconn, "ASSERTION FAILED: tconn->current_epoch->list not empty\n");
34410 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
34411- atomic_set(&tconn->current_epoch->epoch_size, 0);
34412+ atomic_set_unchecked(&tconn->current_epoch->epoch_size, 0);
34413 tconn->send.seen_any_write_yet = false;
34414
34415 conn_info(tconn, "Connection closed\n");
34416@@ -5222,7 +5222,7 @@ static int tconn_finish_peer_reqs(struct drbd_tconn *tconn)
34417 struct asender_cmd {
34418 size_t pkt_size;
34419 int (*fn)(struct drbd_tconn *tconn, struct packet_info *);
34420-};
34421+} __do_const;
34422
34423 static struct asender_cmd asender_tbl[] = {
34424 [P_PING] = { 0, got_Ping },
34425diff --git a/drivers/block/loop.c b/drivers/block/loop.c
34426index f74f2c0..bb668af 100644
34427--- a/drivers/block/loop.c
34428+++ b/drivers/block/loop.c
34429@@ -226,7 +226,7 @@ static int __do_lo_send_write(struct file *file,
34430 mm_segment_t old_fs = get_fs();
34431
34432 set_fs(get_ds());
34433- bw = file->f_op->write(file, buf, len, &pos);
34434+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
34435 set_fs(old_fs);
34436 if (likely(bw == len))
34437 return 0;
34438diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
34439index 2e7de7a..ed86dc0 100644
34440--- a/drivers/block/pktcdvd.c
34441+++ b/drivers/block/pktcdvd.c
34442@@ -83,7 +83,7 @@
34443
34444 #define MAX_SPEED 0xffff
34445
34446-#define ZONE(sector, pd) (((sector) + (pd)->offset) & ~((pd)->settings.size - 1))
34447+#define ZONE(sector, pd) (((sector) + (pd)->offset) & ~((pd)->settings.size - 1UL))
34448
34449 static DEFINE_MUTEX(pktcdvd_mutex);
34450 static struct pktcdvd_device *pkt_devs[MAX_WRITERS];
34451diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
34452index d620b44..587561e 100644
34453--- a/drivers/cdrom/cdrom.c
34454+++ b/drivers/cdrom/cdrom.c
34455@@ -416,7 +416,6 @@ int register_cdrom(struct cdrom_device_info *cdi)
34456 ENSURE(reset, CDC_RESET);
34457 ENSURE(generic_packet, CDC_GENERIC_PACKET);
34458 cdi->mc_flags = 0;
34459- cdo->n_minors = 0;
34460 cdi->options = CDO_USE_FFLAGS;
34461
34462 if (autoclose==1 && CDROM_CAN(CDC_CLOSE_TRAY))
34463@@ -436,8 +435,11 @@ int register_cdrom(struct cdrom_device_info *cdi)
34464 else
34465 cdi->cdda_method = CDDA_OLD;
34466
34467- if (!cdo->generic_packet)
34468- cdo->generic_packet = cdrom_dummy_generic_packet;
34469+ if (!cdo->generic_packet) {
34470+ pax_open_kernel();
34471+ *(void **)&cdo->generic_packet = cdrom_dummy_generic_packet;
34472+ pax_close_kernel();
34473+ }
34474
34475 cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" registered\n", cdi->name);
34476 mutex_lock(&cdrom_mutex);
34477@@ -458,7 +460,6 @@ void unregister_cdrom(struct cdrom_device_info *cdi)
34478 if (cdi->exit)
34479 cdi->exit(cdi);
34480
34481- cdi->ops->n_minors--;
34482 cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name);
34483 }
34484
34485diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
34486index d59cdcb..11afddf 100644
34487--- a/drivers/cdrom/gdrom.c
34488+++ b/drivers/cdrom/gdrom.c
34489@@ -491,7 +491,6 @@ static struct cdrom_device_ops gdrom_ops = {
34490 .audio_ioctl = gdrom_audio_ioctl,
34491 .capability = CDC_MULTI_SESSION | CDC_MEDIA_CHANGED |
34492 CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R,
34493- .n_minors = 1,
34494 };
34495
34496 static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
34497diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
34498index 72bedad..8181ce1 100644
34499--- a/drivers/char/Kconfig
34500+++ b/drivers/char/Kconfig
34501@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
34502
34503 config DEVKMEM
34504 bool "/dev/kmem virtual device support"
34505- default y
34506+ default n
34507+ depends on !GRKERNSEC_KMEM
34508 help
34509 Say Y here if you want to support the /dev/kmem device. The
34510 /dev/kmem device is rarely used, but can be used for certain
34511@@ -581,6 +582,7 @@ config DEVPORT
34512 bool
34513 depends on !M68K
34514 depends on ISA || PCI
34515+ depends on !GRKERNSEC_KMEM
34516 default y
34517
34518 source "drivers/s390/char/Kconfig"
34519diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
34520index 2e04433..22afc64 100644
34521--- a/drivers/char/agp/frontend.c
34522+++ b/drivers/char/agp/frontend.c
34523@@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
34524 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
34525 return -EFAULT;
34526
34527- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
34528+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
34529 return -EFAULT;
34530
34531 client = agp_find_client_by_pid(reserve.pid);
34532diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
34533index 21cb980..f15107c 100644
34534--- a/drivers/char/genrtc.c
34535+++ b/drivers/char/genrtc.c
34536@@ -272,6 +272,7 @@ static int gen_rtc_ioctl(struct file *file,
34537 switch (cmd) {
34538
34539 case RTC_PLL_GET:
34540+ memset(&pll, 0, sizeof(pll));
34541 if (get_rtc_pll(&pll))
34542 return -EINVAL;
34543 else
34544diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
34545index 615d262..15d5c9d 100644
34546--- a/drivers/char/hpet.c
34547+++ b/drivers/char/hpet.c
34548@@ -559,7 +559,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
34549 }
34550
34551 static int
34552-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
34553+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
34554 struct hpet_info *info)
34555 {
34556 struct hpet_timer __iomem *timer;
34557diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
34558index 053201b0..8335cce 100644
34559--- a/drivers/char/ipmi/ipmi_msghandler.c
34560+++ b/drivers/char/ipmi/ipmi_msghandler.c
34561@@ -420,7 +420,7 @@ struct ipmi_smi {
34562 struct proc_dir_entry *proc_dir;
34563 char proc_dir_name[10];
34564
34565- atomic_t stats[IPMI_NUM_STATS];
34566+ atomic_unchecked_t stats[IPMI_NUM_STATS];
34567
34568 /*
34569 * run_to_completion duplicate of smb_info, smi_info
34570@@ -453,9 +453,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
34571
34572
34573 #define ipmi_inc_stat(intf, stat) \
34574- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
34575+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
34576 #define ipmi_get_stat(intf, stat) \
34577- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
34578+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
34579
34580 static int is_lan_addr(struct ipmi_addr *addr)
34581 {
34582@@ -2884,7 +2884,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
34583 INIT_LIST_HEAD(&intf->cmd_rcvrs);
34584 init_waitqueue_head(&intf->waitq);
34585 for (i = 0; i < IPMI_NUM_STATS; i++)
34586- atomic_set(&intf->stats[i], 0);
34587+ atomic_set_unchecked(&intf->stats[i], 0);
34588
34589 intf->proc_dir = NULL;
34590
34591diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
34592index 1c7fdcd..4899100 100644
34593--- a/drivers/char/ipmi/ipmi_si_intf.c
34594+++ b/drivers/char/ipmi/ipmi_si_intf.c
34595@@ -275,7 +275,7 @@ struct smi_info {
34596 unsigned char slave_addr;
34597
34598 /* Counters and things for the proc filesystem. */
34599- atomic_t stats[SI_NUM_STATS];
34600+ atomic_unchecked_t stats[SI_NUM_STATS];
34601
34602 struct task_struct *thread;
34603
34604@@ -284,9 +284,9 @@ struct smi_info {
34605 };
34606
34607 #define smi_inc_stat(smi, stat) \
34608- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
34609+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
34610 #define smi_get_stat(smi, stat) \
34611- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
34612+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
34613
34614 #define SI_MAX_PARMS 4
34615
34616@@ -3225,7 +3225,7 @@ static int try_smi_init(struct smi_info *new_smi)
34617 atomic_set(&new_smi->req_events, 0);
34618 new_smi->run_to_completion = 0;
34619 for (i = 0; i < SI_NUM_STATS; i++)
34620- atomic_set(&new_smi->stats[i], 0);
34621+ atomic_set_unchecked(&new_smi->stats[i], 0);
34622
34623 new_smi->interrupt_disabled = 1;
34624 atomic_set(&new_smi->stop_operation, 0);
34625diff --git a/drivers/char/mem.c b/drivers/char/mem.c
34626index c6fa3bc..4ca3e42 100644
34627--- a/drivers/char/mem.c
34628+++ b/drivers/char/mem.c
34629@@ -18,6 +18,7 @@
34630 #include <linux/raw.h>
34631 #include <linux/tty.h>
34632 #include <linux/capability.h>
34633+#include <linux/security.h>
34634 #include <linux/ptrace.h>
34635 #include <linux/device.h>
34636 #include <linux/highmem.h>
34637@@ -37,6 +38,10 @@
34638
34639 #define DEVPORT_MINOR 4
34640
34641+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
34642+extern const struct file_operations grsec_fops;
34643+#endif
34644+
34645 static inline unsigned long size_inside_page(unsigned long start,
34646 unsigned long size)
34647 {
34648@@ -68,9 +73,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
34649
34650 while (cursor < to) {
34651 if (!devmem_is_allowed(pfn)) {
34652+#ifdef CONFIG_GRKERNSEC_KMEM
34653+ gr_handle_mem_readwrite(from, to);
34654+#else
34655 printk(KERN_INFO
34656 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
34657 current->comm, from, to);
34658+#endif
34659 return 0;
34660 }
34661 cursor += PAGE_SIZE;
34662@@ -78,6 +87,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
34663 }
34664 return 1;
34665 }
34666+#elif defined(CONFIG_GRKERNSEC_KMEM)
34667+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
34668+{
34669+ return 0;
34670+}
34671 #else
34672 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
34673 {
34674@@ -120,6 +134,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
34675
34676 while (count > 0) {
34677 unsigned long remaining;
34678+ char *temp;
34679
34680 sz = size_inside_page(p, count);
34681
34682@@ -135,7 +150,23 @@ static ssize_t read_mem(struct file *file, char __user *buf,
34683 if (!ptr)
34684 return -EFAULT;
34685
34686- remaining = copy_to_user(buf, ptr, sz);
34687+#ifdef CONFIG_PAX_USERCOPY
34688+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
34689+ if (!temp) {
34690+ unxlate_dev_mem_ptr(p, ptr);
34691+ return -ENOMEM;
34692+ }
34693+ memcpy(temp, ptr, sz);
34694+#else
34695+ temp = ptr;
34696+#endif
34697+
34698+ remaining = copy_to_user(buf, temp, sz);
34699+
34700+#ifdef CONFIG_PAX_USERCOPY
34701+ kfree(temp);
34702+#endif
34703+
34704 unxlate_dev_mem_ptr(p, ptr);
34705 if (remaining)
34706 return -EFAULT;
34707@@ -398,9 +429,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
34708 size_t count, loff_t *ppos)
34709 {
34710 unsigned long p = *ppos;
34711- ssize_t low_count, read, sz;
34712+ ssize_t low_count, read, sz, err = 0;
34713 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
34714- int err = 0;
34715
34716 read = 0;
34717 if (p < (unsigned long) high_memory) {
34718@@ -422,6 +452,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
34719 }
34720 #endif
34721 while (low_count > 0) {
34722+ char *temp;
34723+
34724 sz = size_inside_page(p, low_count);
34725
34726 /*
34727@@ -431,7 +463,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
34728 */
34729 kbuf = xlate_dev_kmem_ptr((char *)p);
34730
34731- if (copy_to_user(buf, kbuf, sz))
34732+#ifdef CONFIG_PAX_USERCOPY
34733+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
34734+ if (!temp)
34735+ return -ENOMEM;
34736+ memcpy(temp, kbuf, sz);
34737+#else
34738+ temp = kbuf;
34739+#endif
34740+
34741+ err = copy_to_user(buf, temp, sz);
34742+
34743+#ifdef CONFIG_PAX_USERCOPY
34744+ kfree(temp);
34745+#endif
34746+
34747+ if (err)
34748 return -EFAULT;
34749 buf += sz;
34750 p += sz;
34751@@ -833,6 +880,9 @@ static const struct memdev {
34752 #ifdef CONFIG_CRASH_DUMP
34753 [12] = { "oldmem", 0, &oldmem_fops, NULL },
34754 #endif
34755+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
34756+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
34757+#endif
34758 };
34759
34760 static int memory_open(struct inode *inode, struct file *filp)
34761diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
34762index 9df78e2..01ba9ae 100644
34763--- a/drivers/char/nvram.c
34764+++ b/drivers/char/nvram.c
34765@@ -247,7 +247,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
34766
34767 spin_unlock_irq(&rtc_lock);
34768
34769- if (copy_to_user(buf, contents, tmp - contents))
34770+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
34771 return -EFAULT;
34772
34773 *ppos = i;
34774diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
34775index b66eaa0..2619d1b 100644
34776--- a/drivers/char/pcmcia/synclink_cs.c
34777+++ b/drivers/char/pcmcia/synclink_cs.c
34778@@ -2348,9 +2348,9 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
34779
34780 if (debug_level >= DEBUG_LEVEL_INFO)
34781 printk("%s(%d):mgslpc_close(%s) entry, count=%d\n",
34782- __FILE__,__LINE__, info->device_name, port->count);
34783+ __FILE__,__LINE__, info->device_name, atomic_read(&port->count));
34784
34785- WARN_ON(!port->count);
34786+ WARN_ON(!atomic_read(&port->count));
34787
34788 if (tty_port_close_start(port, tty, filp) == 0)
34789 goto cleanup;
34790@@ -2368,7 +2368,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
34791 cleanup:
34792 if (debug_level >= DEBUG_LEVEL_INFO)
34793 printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__,__LINE__,
34794- tty->driver->name, port->count);
34795+ tty->driver->name, atomic_read(&port->count));
34796 }
34797
34798 /* Wait until the transmitter is empty.
34799@@ -2510,7 +2510,7 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
34800
34801 if (debug_level >= DEBUG_LEVEL_INFO)
34802 printk("%s(%d):mgslpc_open(%s), old ref count = %d\n",
34803- __FILE__,__LINE__,tty->driver->name, port->count);
34804+ __FILE__,__LINE__,tty->driver->name, atomic_read(&port->count));
34805
34806 /* If port is closing, signal caller to try again */
34807 if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING){
34808@@ -2530,11 +2530,11 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
34809 goto cleanup;
34810 }
34811 spin_lock(&port->lock);
34812- port->count++;
34813+ atomic_inc(&port->count);
34814 spin_unlock(&port->lock);
34815 spin_unlock_irqrestore(&info->netlock, flags);
34816
34817- if (port->count == 1) {
34818+ if (atomic_read(&port->count) == 1) {
34819 /* 1st open on this device, init hardware */
34820 retval = startup(info, tty);
34821 if (retval < 0)
34822@@ -3889,7 +3889,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
34823 unsigned short new_crctype;
34824
34825 /* return error if TTY interface open */
34826- if (info->port.count)
34827+ if (atomic_read(&info->port.count))
34828 return -EBUSY;
34829
34830 switch (encoding)
34831@@ -3992,7 +3992,7 @@ static int hdlcdev_open(struct net_device *dev)
34832
34833 /* arbitrate between network and tty opens */
34834 spin_lock_irqsave(&info->netlock, flags);
34835- if (info->port.count != 0 || info->netcount != 0) {
34836+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
34837 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
34838 spin_unlock_irqrestore(&info->netlock, flags);
34839 return -EBUSY;
34840@@ -4081,7 +4081,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
34841 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
34842
34843 /* return error if TTY interface open */
34844- if (info->port.count)
34845+ if (atomic_read(&info->port.count))
34846 return -EBUSY;
34847
34848 if (cmd != SIOCWANDEV)
34849diff --git a/drivers/char/random.c b/drivers/char/random.c
34850index 57d4b15..253207b 100644
34851--- a/drivers/char/random.c
34852+++ b/drivers/char/random.c
34853@@ -272,8 +272,13 @@
34854 /*
34855 * Configuration information
34856 */
34857+#ifdef CONFIG_GRKERNSEC_RANDNET
34858+#define INPUT_POOL_WORDS 512
34859+#define OUTPUT_POOL_WORDS 128
34860+#else
34861 #define INPUT_POOL_WORDS 128
34862 #define OUTPUT_POOL_WORDS 32
34863+#endif
34864 #define SEC_XFER_SIZE 512
34865 #define EXTRACT_SIZE 10
34866
34867@@ -313,10 +318,17 @@ static struct poolinfo {
34868 int poolwords;
34869 int tap1, tap2, tap3, tap4, tap5;
34870 } poolinfo_table[] = {
34871+#ifdef CONFIG_GRKERNSEC_RANDNET
34872+ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
34873+ { 512, 411, 308, 208, 104, 1 },
34874+ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
34875+ { 128, 103, 76, 51, 25, 1 },
34876+#else
34877 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
34878 { 128, 103, 76, 51, 25, 1 },
34879 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
34880 { 32, 26, 20, 14, 7, 1 },
34881+#endif
34882 #if 0
34883 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
34884 { 2048, 1638, 1231, 819, 411, 1 },
34885@@ -524,8 +536,8 @@ static void _mix_pool_bytes(struct entropy_store *r, const void *in,
34886 input_rotate += i ? 7 : 14;
34887 }
34888
34889- ACCESS_ONCE(r->input_rotate) = input_rotate;
34890- ACCESS_ONCE(r->add_ptr) = i;
34891+ ACCESS_ONCE_RW(r->input_rotate) = input_rotate;
34892+ ACCESS_ONCE_RW(r->add_ptr) = i;
34893 smp_wmb();
34894
34895 if (out)
34896@@ -1024,7 +1036,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
34897
34898 extract_buf(r, tmp);
34899 i = min_t(int, nbytes, EXTRACT_SIZE);
34900- if (copy_to_user(buf, tmp, i)) {
34901+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
34902 ret = -EFAULT;
34903 break;
34904 }
34905@@ -1360,7 +1372,7 @@ EXPORT_SYMBOL(generate_random_uuid);
34906 #include <linux/sysctl.h>
34907
34908 static int min_read_thresh = 8, min_write_thresh;
34909-static int max_read_thresh = INPUT_POOL_WORDS * 32;
34910+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
34911 static int max_write_thresh = INPUT_POOL_WORDS * 32;
34912 static char sysctl_bootid[16];
34913
34914@@ -1376,7 +1388,7 @@ static char sysctl_bootid[16];
34915 static int proc_do_uuid(ctl_table *table, int write,
34916 void __user *buffer, size_t *lenp, loff_t *ppos)
34917 {
34918- ctl_table fake_table;
34919+ ctl_table_no_const fake_table;
34920 unsigned char buf[64], tmp_uuid[16], *uuid;
34921
34922 uuid = table->data;
34923diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
34924index d780295..b29f3a8 100644
34925--- a/drivers/char/sonypi.c
34926+++ b/drivers/char/sonypi.c
34927@@ -54,6 +54,7 @@
34928
34929 #include <asm/uaccess.h>
34930 #include <asm/io.h>
34931+#include <asm/local.h>
34932
34933 #include <linux/sonypi.h>
34934
34935@@ -490,7 +491,7 @@ static struct sonypi_device {
34936 spinlock_t fifo_lock;
34937 wait_queue_head_t fifo_proc_list;
34938 struct fasync_struct *fifo_async;
34939- int open_count;
34940+ local_t open_count;
34941 int model;
34942 struct input_dev *input_jog_dev;
34943 struct input_dev *input_key_dev;
34944@@ -897,7 +898,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
34945 static int sonypi_misc_release(struct inode *inode, struct file *file)
34946 {
34947 mutex_lock(&sonypi_device.lock);
34948- sonypi_device.open_count--;
34949+ local_dec(&sonypi_device.open_count);
34950 mutex_unlock(&sonypi_device.lock);
34951 return 0;
34952 }
34953@@ -906,9 +907,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
34954 {
34955 mutex_lock(&sonypi_device.lock);
34956 /* Flush input queue on first open */
34957- if (!sonypi_device.open_count)
34958+ if (!local_read(&sonypi_device.open_count))
34959 kfifo_reset(&sonypi_device.fifo);
34960- sonypi_device.open_count++;
34961+ local_inc(&sonypi_device.open_count);
34962 mutex_unlock(&sonypi_device.lock);
34963
34964 return 0;
34965diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
34966index 93211df..c7805f7 100644
34967--- a/drivers/char/tpm/tpm.c
34968+++ b/drivers/char/tpm/tpm.c
34969@@ -410,7 +410,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
34970 chip->vendor.req_complete_val)
34971 goto out_recv;
34972
34973- if ((status == chip->vendor.req_canceled)) {
34974+ if (status == chip->vendor.req_canceled) {
34975 dev_err(chip->dev, "Operation Canceled\n");
34976 rc = -ECANCELED;
34977 goto out;
34978diff --git a/drivers/char/tpm/tpm_acpi.c b/drivers/char/tpm/tpm_acpi.c
34979index 56051d0..11cf3b7 100644
34980--- a/drivers/char/tpm/tpm_acpi.c
34981+++ b/drivers/char/tpm/tpm_acpi.c
34982@@ -98,11 +98,12 @@ int read_log(struct tpm_bios_log *log)
34983 virt = acpi_os_map_memory(start, len);
34984 if (!virt) {
34985 kfree(log->bios_event_log);
34986+ log->bios_event_log = NULL;
34987 printk("%s: ERROR - Unable to map memory\n", __func__);
34988 return -EIO;
34989 }
34990
34991- memcpy_fromio(log->bios_event_log, virt, len);
34992+ memcpy_fromio(log->bios_event_log, (const char __force_kernel *)virt, len);
34993
34994 acpi_os_unmap_memory(virt, len);
34995 return 0;
34996diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c
34997index 84ddc55..1d32f1e 100644
34998--- a/drivers/char/tpm/tpm_eventlog.c
34999+++ b/drivers/char/tpm/tpm_eventlog.c
35000@@ -95,7 +95,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
35001 event = addr;
35002
35003 if ((event->event_type == 0 && event->event_size == 0) ||
35004- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
35005+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
35006 return NULL;
35007
35008 return addr;
35009@@ -120,7 +120,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
35010 return NULL;
35011
35012 if ((event->event_type == 0 && event->event_size == 0) ||
35013- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
35014+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
35015 return NULL;
35016
35017 (*pos)++;
35018@@ -213,7 +213,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
35019 int i;
35020
35021 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
35022- seq_putc(m, data[i]);
35023+ if (!seq_putc(m, data[i]))
35024+ return -EFAULT;
35025
35026 return 0;
35027 }
35028diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
35029index a4b7aa0..2faa0bc 100644
35030--- a/drivers/char/virtio_console.c
35031+++ b/drivers/char/virtio_console.c
35032@@ -685,7 +685,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
35033 if (to_user) {
35034 ssize_t ret;
35035
35036- ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
35037+ ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
35038 if (ret)
35039 return -EFAULT;
35040 } else {
35041@@ -784,7 +784,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
35042 if (!port_has_data(port) && !port->host_connected)
35043 return 0;
35044
35045- return fill_readbuf(port, ubuf, count, true);
35046+ return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
35047 }
35048
35049 static int wait_port_writable(struct port *port, bool nonblock)
35050diff --git a/drivers/clocksource/arm_generic.c b/drivers/clocksource/arm_generic.c
35051index 8ae1a61..9c00613 100644
35052--- a/drivers/clocksource/arm_generic.c
35053+++ b/drivers/clocksource/arm_generic.c
35054@@ -181,7 +181,7 @@ static int __cpuinit arch_timer_cpu_notify(struct notifier_block *self,
35055 return NOTIFY_OK;
35056 }
35057
35058-static struct notifier_block __cpuinitdata arch_timer_cpu_nb = {
35059+static struct notifier_block arch_timer_cpu_nb = {
35060 .notifier_call = arch_timer_cpu_notify,
35061 };
35062
35063diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
35064index 7b0d49d..134fac9 100644
35065--- a/drivers/cpufreq/acpi-cpufreq.c
35066+++ b/drivers/cpufreq/acpi-cpufreq.c
35067@@ -172,7 +172,7 @@ static ssize_t show_global_boost(struct kobject *kobj,
35068 return sprintf(buf, "%u\n", boost_enabled);
35069 }
35070
35071-static struct global_attr global_boost = __ATTR(boost, 0644,
35072+static global_attr_no_const global_boost = __ATTR(boost, 0644,
35073 show_global_boost,
35074 store_global_boost);
35075
35076@@ -712,8 +712,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
35077 data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
35078 per_cpu(acfreq_data, cpu) = data;
35079
35080- if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
35081- acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
35082+ if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
35083+ pax_open_kernel();
35084+ *(u8 *)&acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
35085+ pax_close_kernel();
35086+ }
35087
35088 result = acpi_processor_register_performance(data->acpi_data, cpu);
35089 if (result)
35090@@ -835,7 +838,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
35091 policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
35092 break;
35093 case ACPI_ADR_SPACE_FIXED_HARDWARE:
35094- acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
35095+ pax_open_kernel();
35096+ *(void **)&acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
35097+ pax_close_kernel();
35098 policy->cur = get_cur_freq_on_cpu(cpu);
35099 break;
35100 default:
35101@@ -846,8 +851,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
35102 acpi_processor_notify_smm(THIS_MODULE);
35103
35104 /* Check for APERF/MPERF support in hardware */
35105- if (boot_cpu_has(X86_FEATURE_APERFMPERF))
35106- acpi_cpufreq_driver.getavg = cpufreq_get_measured_perf;
35107+ if (boot_cpu_has(X86_FEATURE_APERFMPERF)) {
35108+ pax_open_kernel();
35109+ *(void **)&acpi_cpufreq_driver.getavg = cpufreq_get_measured_perf;
35110+ pax_close_kernel();
35111+ }
35112
35113 pr_debug("CPU%u - ACPI performance management activated.\n", cpu);
35114 for (i = 0; i < perf->state_count; i++)
35115diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
35116index 1f93dbd..305cef1 100644
35117--- a/drivers/cpufreq/cpufreq.c
35118+++ b/drivers/cpufreq/cpufreq.c
35119@@ -1843,7 +1843,7 @@ static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
35120 return NOTIFY_OK;
35121 }
35122
35123-static struct notifier_block __refdata cpufreq_cpu_notifier = {
35124+static struct notifier_block cpufreq_cpu_notifier = {
35125 .notifier_call = cpufreq_cpu_callback,
35126 };
35127
35128@@ -1875,8 +1875,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
35129
35130 pr_debug("trying to register driver %s\n", driver_data->name);
35131
35132- if (driver_data->setpolicy)
35133- driver_data->flags |= CPUFREQ_CONST_LOOPS;
35134+ if (driver_data->setpolicy) {
35135+ pax_open_kernel();
35136+ *(u8 *)&driver_data->flags |= CPUFREQ_CONST_LOOPS;
35137+ pax_close_kernel();
35138+ }
35139
35140 spin_lock_irqsave(&cpufreq_driver_lock, flags);
35141 if (cpufreq_driver) {
35142diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
35143index 6c5f1d3..c7e2f35e 100644
35144--- a/drivers/cpufreq/cpufreq_governor.c
35145+++ b/drivers/cpufreq/cpufreq_governor.c
35146@@ -243,7 +243,7 @@ int cpufreq_governor_dbs(struct dbs_data *dbs_data,
35147 * governor, thus we are bound to jiffes/HZ
35148 */
35149 if (dbs_data->governor == GOV_CONSERVATIVE) {
35150- struct cs_ops *ops = dbs_data->gov_ops;
35151+ const struct cs_ops *ops = dbs_data->gov_ops;
35152
35153 cpufreq_register_notifier(ops->notifier_block,
35154 CPUFREQ_TRANSITION_NOTIFIER);
35155@@ -251,7 +251,7 @@ int cpufreq_governor_dbs(struct dbs_data *dbs_data,
35156 dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
35157 jiffies_to_usecs(10);
35158 } else {
35159- struct od_ops *ops = dbs_data->gov_ops;
35160+ const struct od_ops *ops = dbs_data->gov_ops;
35161
35162 od_tuners->io_is_busy = ops->io_busy();
35163 }
35164@@ -268,7 +268,7 @@ second_time:
35165 cs_dbs_info->enable = 1;
35166 cs_dbs_info->requested_freq = policy->cur;
35167 } else {
35168- struct od_ops *ops = dbs_data->gov_ops;
35169+ const struct od_ops *ops = dbs_data->gov_ops;
35170 od_dbs_info->rate_mult = 1;
35171 od_dbs_info->sample_type = OD_NORMAL_SAMPLE;
35172 ops->powersave_bias_init_cpu(cpu);
35173@@ -289,7 +289,7 @@ second_time:
35174 mutex_destroy(&cpu_cdbs->timer_mutex);
35175 dbs_data->enable--;
35176 if (!dbs_data->enable) {
35177- struct cs_ops *ops = dbs_data->gov_ops;
35178+ const struct cs_ops *ops = dbs_data->gov_ops;
35179
35180 sysfs_remove_group(cpufreq_global_kobject,
35181 dbs_data->attr_group);
35182diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
35183index f661654..6c8e638 100644
35184--- a/drivers/cpufreq/cpufreq_governor.h
35185+++ b/drivers/cpufreq/cpufreq_governor.h
35186@@ -142,7 +142,7 @@ struct dbs_data {
35187 void (*gov_check_cpu)(int cpu, unsigned int load);
35188
35189 /* Governor specific ops, see below */
35190- void *gov_ops;
35191+ const void *gov_ops;
35192 };
35193
35194 /* Governor specific ops, will be passed to dbs_data->gov_ops */
35195diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
35196index 9d7732b..0b1a793 100644
35197--- a/drivers/cpufreq/cpufreq_stats.c
35198+++ b/drivers/cpufreq/cpufreq_stats.c
35199@@ -340,7 +340,7 @@ static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb,
35200 }
35201
35202 /* priority=1 so this will get called before cpufreq_remove_dev */
35203-static struct notifier_block cpufreq_stat_cpu_notifier __refdata = {
35204+static struct notifier_block cpufreq_stat_cpu_notifier = {
35205 .notifier_call = cpufreq_stat_cpu_callback,
35206 .priority = 1,
35207 };
35208diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
35209index 827629c9..0bc6a03 100644
35210--- a/drivers/cpufreq/p4-clockmod.c
35211+++ b/drivers/cpufreq/p4-clockmod.c
35212@@ -167,10 +167,14 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
35213 case 0x0F: /* Core Duo */
35214 case 0x16: /* Celeron Core */
35215 case 0x1C: /* Atom */
35216- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
35217+ pax_open_kernel();
35218+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
35219+ pax_close_kernel();
35220 return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE);
35221 case 0x0D: /* Pentium M (Dothan) */
35222- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
35223+ pax_open_kernel();
35224+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
35225+ pax_close_kernel();
35226 /* fall through */
35227 case 0x09: /* Pentium M (Banias) */
35228 return speedstep_get_frequency(SPEEDSTEP_CPU_PM);
35229@@ -182,7 +186,9 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
35230
35231 /* on P-4s, the TSC runs with constant frequency independent whether
35232 * throttling is active or not. */
35233- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
35234+ pax_open_kernel();
35235+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
35236+ pax_close_kernel();
35237
35238 if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4M) {
35239 printk(KERN_WARNING PFX "Warning: Pentium 4-M detected. "
35240diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
35241index 3a953d5..f5993f6 100644
35242--- a/drivers/cpufreq/speedstep-centrino.c
35243+++ b/drivers/cpufreq/speedstep-centrino.c
35244@@ -353,8 +353,11 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
35245 !cpu_has(cpu, X86_FEATURE_EST))
35246 return -ENODEV;
35247
35248- if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC))
35249- centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
35250+ if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC)) {
35251+ pax_open_kernel();
35252+ *(u8 *)&centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
35253+ pax_close_kernel();
35254+ }
35255
35256 if (policy->cpu != 0)
35257 return -ENODEV;
35258diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
35259index e1f6860..f8de20b 100644
35260--- a/drivers/cpuidle/cpuidle.c
35261+++ b/drivers/cpuidle/cpuidle.c
35262@@ -279,7 +279,7 @@ static int poll_idle(struct cpuidle_device *dev,
35263
35264 static void poll_idle_init(struct cpuidle_driver *drv)
35265 {
35266- struct cpuidle_state *state = &drv->states[0];
35267+ cpuidle_state_no_const *state = &drv->states[0];
35268
35269 snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
35270 snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
35271diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
35272index ea2f8e7..70ac501 100644
35273--- a/drivers/cpuidle/governor.c
35274+++ b/drivers/cpuidle/governor.c
35275@@ -87,7 +87,7 @@ int cpuidle_register_governor(struct cpuidle_governor *gov)
35276 mutex_lock(&cpuidle_lock);
35277 if (__cpuidle_find_governor(gov->name) == NULL) {
35278 ret = 0;
35279- list_add_tail(&gov->governor_list, &cpuidle_governors);
35280+ pax_list_add_tail((struct list_head *)&gov->governor_list, &cpuidle_governors);
35281 if (!cpuidle_curr_governor ||
35282 cpuidle_curr_governor->rating < gov->rating)
35283 cpuidle_switch_governor(gov);
35284@@ -135,7 +135,7 @@ void cpuidle_unregister_governor(struct cpuidle_governor *gov)
35285 new_gov = cpuidle_replace_governor(gov->rating);
35286 cpuidle_switch_governor(new_gov);
35287 }
35288- list_del(&gov->governor_list);
35289+ pax_list_del((struct list_head *)&gov->governor_list);
35290 mutex_unlock(&cpuidle_lock);
35291 }
35292
35293diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
35294index 428754a..8bdf9cc 100644
35295--- a/drivers/cpuidle/sysfs.c
35296+++ b/drivers/cpuidle/sysfs.c
35297@@ -131,7 +131,7 @@ static struct attribute *cpuidle_switch_attrs[] = {
35298 NULL
35299 };
35300
35301-static struct attribute_group cpuidle_attr_group = {
35302+static attribute_group_no_const cpuidle_attr_group = {
35303 .attrs = cpuidle_default_attrs,
35304 .name = "cpuidle",
35305 };
35306diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
35307index 3b36797..289c16a 100644
35308--- a/drivers/devfreq/devfreq.c
35309+++ b/drivers/devfreq/devfreq.c
35310@@ -588,7 +588,7 @@ int devfreq_add_governor(struct devfreq_governor *governor)
35311 goto err_out;
35312 }
35313
35314- list_add(&governor->node, &devfreq_governor_list);
35315+ pax_list_add((struct list_head *)&governor->node, &devfreq_governor_list);
35316
35317 list_for_each_entry(devfreq, &devfreq_list, node) {
35318 int ret = 0;
35319@@ -676,7 +676,7 @@ int devfreq_remove_governor(struct devfreq_governor *governor)
35320 }
35321 }
35322
35323- list_del(&governor->node);
35324+ pax_list_del((struct list_head *)&governor->node);
35325 err_out:
35326 mutex_unlock(&devfreq_list_lock);
35327
35328diff --git a/drivers/dma/sh/shdma.c b/drivers/dma/sh/shdma.c
35329index b70709b..1d8d02a 100644
35330--- a/drivers/dma/sh/shdma.c
35331+++ b/drivers/dma/sh/shdma.c
35332@@ -476,7 +476,7 @@ static int sh_dmae_nmi_handler(struct notifier_block *self,
35333 return ret;
35334 }
35335
35336-static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
35337+static struct notifier_block sh_dmae_nmi_notifier = {
35338 .notifier_call = sh_dmae_nmi_handler,
35339
35340 /* Run before NMI debug handler and KGDB */
35341diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
35342index 0ca1ca7..6e6f454 100644
35343--- a/drivers/edac/edac_mc_sysfs.c
35344+++ b/drivers/edac/edac_mc_sysfs.c
35345@@ -148,7 +148,7 @@ static const char *edac_caps[] = {
35346 struct dev_ch_attribute {
35347 struct device_attribute attr;
35348 int channel;
35349-};
35350+} __do_const;
35351
35352 #define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \
35353 struct dev_ch_attribute dev_attr_legacy_##_name = \
35354diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
35355index 0056c4d..23b54d9 100644
35356--- a/drivers/edac/edac_pci_sysfs.c
35357+++ b/drivers/edac/edac_pci_sysfs.c
35358@@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
35359 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
35360 static int edac_pci_poll_msec = 1000; /* one second workq period */
35361
35362-static atomic_t pci_parity_count = ATOMIC_INIT(0);
35363-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
35364+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
35365+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
35366
35367 static struct kobject *edac_pci_top_main_kobj;
35368 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
35369@@ -235,7 +235,7 @@ struct edac_pci_dev_attribute {
35370 void *value;
35371 ssize_t(*show) (void *, char *);
35372 ssize_t(*store) (void *, const char *, size_t);
35373-};
35374+} __do_const;
35375
35376 /* Set of show/store abstract level functions for PCI Parity object */
35377 static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr,
35378@@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
35379 edac_printk(KERN_CRIT, EDAC_PCI,
35380 "Signaled System Error on %s\n",
35381 pci_name(dev));
35382- atomic_inc(&pci_nonparity_count);
35383+ atomic_inc_unchecked(&pci_nonparity_count);
35384 }
35385
35386 if (status & (PCI_STATUS_PARITY)) {
35387@@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
35388 "Master Data Parity Error on %s\n",
35389 pci_name(dev));
35390
35391- atomic_inc(&pci_parity_count);
35392+ atomic_inc_unchecked(&pci_parity_count);
35393 }
35394
35395 if (status & (PCI_STATUS_DETECTED_PARITY)) {
35396@@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
35397 "Detected Parity Error on %s\n",
35398 pci_name(dev));
35399
35400- atomic_inc(&pci_parity_count);
35401+ atomic_inc_unchecked(&pci_parity_count);
35402 }
35403 }
35404
35405@@ -618,7 +618,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
35406 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
35407 "Signaled System Error on %s\n",
35408 pci_name(dev));
35409- atomic_inc(&pci_nonparity_count);
35410+ atomic_inc_unchecked(&pci_nonparity_count);
35411 }
35412
35413 if (status & (PCI_STATUS_PARITY)) {
35414@@ -626,7 +626,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
35415 "Master Data Parity Error on "
35416 "%s\n", pci_name(dev));
35417
35418- atomic_inc(&pci_parity_count);
35419+ atomic_inc_unchecked(&pci_parity_count);
35420 }
35421
35422 if (status & (PCI_STATUS_DETECTED_PARITY)) {
35423@@ -634,7 +634,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
35424 "Detected Parity Error on %s\n",
35425 pci_name(dev));
35426
35427- atomic_inc(&pci_parity_count);
35428+ atomic_inc_unchecked(&pci_parity_count);
35429 }
35430 }
35431 }
35432@@ -672,7 +672,7 @@ void edac_pci_do_parity_check(void)
35433 if (!check_pci_errors)
35434 return;
35435
35436- before_count = atomic_read(&pci_parity_count);
35437+ before_count = atomic_read_unchecked(&pci_parity_count);
35438
35439 /* scan all PCI devices looking for a Parity Error on devices and
35440 * bridges.
35441@@ -684,7 +684,7 @@ void edac_pci_do_parity_check(void)
35442 /* Only if operator has selected panic on PCI Error */
35443 if (edac_pci_get_panic_on_pe()) {
35444 /* If the count is different 'after' from 'before' */
35445- if (before_count != atomic_read(&pci_parity_count))
35446+ if (before_count != atomic_read_unchecked(&pci_parity_count))
35447 panic("EDAC: PCI Parity Error");
35448 }
35449 }
35450diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
35451index 6796799..99e8377 100644
35452--- a/drivers/edac/mce_amd.h
35453+++ b/drivers/edac/mce_amd.h
35454@@ -78,7 +78,7 @@ extern const char * const ii_msgs[];
35455 struct amd_decoder_ops {
35456 bool (*mc0_mce)(u16, u8);
35457 bool (*mc1_mce)(u16, u8);
35458-};
35459+} __no_const;
35460
35461 void amd_report_gart_errors(bool);
35462 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
35463diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
35464index 57ea7f4..789e3c3 100644
35465--- a/drivers/firewire/core-card.c
35466+++ b/drivers/firewire/core-card.c
35467@@ -680,7 +680,7 @@ EXPORT_SYMBOL_GPL(fw_card_release);
35468
35469 void fw_core_remove_card(struct fw_card *card)
35470 {
35471- struct fw_card_driver dummy_driver = dummy_driver_template;
35472+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
35473
35474 card->driver->update_phy_reg(card, 4,
35475 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
35476diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
35477index f8d2287..5aaf4db 100644
35478--- a/drivers/firewire/core-cdev.c
35479+++ b/drivers/firewire/core-cdev.c
35480@@ -1365,8 +1365,7 @@ static int init_iso_resource(struct client *client,
35481 int ret;
35482
35483 if ((request->channels == 0 && request->bandwidth == 0) ||
35484- request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
35485- request->bandwidth < 0)
35486+ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
35487 return -EINVAL;
35488
35489 r = kmalloc(sizeof(*r), GFP_KERNEL);
35490diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
35491index af3e8aa..eb2f227 100644
35492--- a/drivers/firewire/core-device.c
35493+++ b/drivers/firewire/core-device.c
35494@@ -232,7 +232,7 @@ EXPORT_SYMBOL(fw_device_enable_phys_dma);
35495 struct config_rom_attribute {
35496 struct device_attribute attr;
35497 u32 key;
35498-};
35499+} __do_const;
35500
35501 static ssize_t show_immediate(struct device *dev,
35502 struct device_attribute *dattr, char *buf)
35503diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
35504index 28a94c7..58da63a 100644
35505--- a/drivers/firewire/core-transaction.c
35506+++ b/drivers/firewire/core-transaction.c
35507@@ -38,6 +38,7 @@
35508 #include <linux/timer.h>
35509 #include <linux/types.h>
35510 #include <linux/workqueue.h>
35511+#include <linux/sched.h>
35512
35513 #include <asm/byteorder.h>
35514
35515diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
35516index 515a42c..5ecf3ba 100644
35517--- a/drivers/firewire/core.h
35518+++ b/drivers/firewire/core.h
35519@@ -111,6 +111,7 @@ struct fw_card_driver {
35520
35521 int (*stop_iso)(struct fw_iso_context *ctx);
35522 };
35523+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
35524
35525 void fw_card_initialize(struct fw_card *card,
35526 const struct fw_card_driver *driver, struct device *device);
35527diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c
35528index 94a58a0..f5eba42 100644
35529--- a/drivers/firmware/dmi-id.c
35530+++ b/drivers/firmware/dmi-id.c
35531@@ -16,7 +16,7 @@
35532 struct dmi_device_attribute{
35533 struct device_attribute dev_attr;
35534 int field;
35535-};
35536+} __do_const;
35537 #define to_dmi_dev_attr(_dev_attr) \
35538 container_of(_dev_attr, struct dmi_device_attribute, dev_attr)
35539
35540diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
35541index 4cd392d..4b629e1 100644
35542--- a/drivers/firmware/dmi_scan.c
35543+++ b/drivers/firmware/dmi_scan.c
35544@@ -490,11 +490,6 @@ void __init dmi_scan_machine(void)
35545 }
35546 }
35547 else {
35548- /*
35549- * no iounmap() for that ioremap(); it would be a no-op, but
35550- * it's so early in setup that sucker gets confused into doing
35551- * what it shouldn't if we actually call it.
35552- */
35553 p = dmi_ioremap(0xF0000, 0x10000);
35554 if (p == NULL)
35555 goto error;
35556@@ -769,7 +764,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
35557 if (buf == NULL)
35558 return -1;
35559
35560- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
35561+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
35562
35563 iounmap(buf);
35564 return 0;
35565diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
35566index b07cb37..2a51037 100644
35567--- a/drivers/firmware/efivars.c
35568+++ b/drivers/firmware/efivars.c
35569@@ -138,7 +138,7 @@ struct efivar_attribute {
35570 };
35571
35572 static struct efivars __efivars;
35573-static struct efivar_operations ops;
35574+static efivar_operations_no_const ops __read_only;
35575
35576 #define PSTORE_EFI_ATTRIBUTES \
35577 (EFI_VARIABLE_NON_VOLATILE | \
35578@@ -1834,7 +1834,7 @@ efivar_create_sysfs_entry(struct efivars *efivars,
35579 static int
35580 create_efivars_bin_attributes(struct efivars *efivars)
35581 {
35582- struct bin_attribute *attr;
35583+ bin_attribute_no_const *attr;
35584 int error;
35585
35586 /* new_var */
35587diff --git a/drivers/firmware/google/memconsole.c b/drivers/firmware/google/memconsole.c
35588index 2a90ba6..07f3733 100644
35589--- a/drivers/firmware/google/memconsole.c
35590+++ b/drivers/firmware/google/memconsole.c
35591@@ -147,7 +147,9 @@ static int __init memconsole_init(void)
35592 if (!found_memconsole())
35593 return -ENODEV;
35594
35595- memconsole_bin_attr.size = memconsole_length;
35596+ pax_open_kernel();
35597+ *(size_t *)&memconsole_bin_attr.size = memconsole_length;
35598+ pax_close_kernel();
35599
35600 ret = sysfs_create_bin_file(firmware_kobj, &memconsole_bin_attr);
35601
35602diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
35603index 6f2306d..af9476a 100644
35604--- a/drivers/gpio/gpio-ich.c
35605+++ b/drivers/gpio/gpio-ich.c
35606@@ -69,7 +69,7 @@ struct ichx_desc {
35607 /* Some chipsets have quirks, let these use their own request/get */
35608 int (*request)(struct gpio_chip *chip, unsigned offset);
35609 int (*get)(struct gpio_chip *chip, unsigned offset);
35610-};
35611+} __do_const;
35612
35613 static struct {
35614 spinlock_t lock;
35615diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
35616index 9902732..64b62dd 100644
35617--- a/drivers/gpio/gpio-vr41xx.c
35618+++ b/drivers/gpio/gpio-vr41xx.c
35619@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
35620 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
35621 maskl, pendl, maskh, pendh);
35622
35623- atomic_inc(&irq_err_count);
35624+ atomic_inc_unchecked(&irq_err_count);
35625
35626 return -EINVAL;
35627 }
35628diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
35629index 7b2d378..cc947ea 100644
35630--- a/drivers/gpu/drm/drm_crtc_helper.c
35631+++ b/drivers/gpu/drm/drm_crtc_helper.c
35632@@ -319,7 +319,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
35633 struct drm_crtc *tmp;
35634 int crtc_mask = 1;
35635
35636- WARN(!crtc, "checking null crtc?\n");
35637+ BUG_ON(!crtc);
35638
35639 dev = crtc->dev;
35640
35641diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
35642index be174ca..7f38143 100644
35643--- a/drivers/gpu/drm/drm_drv.c
35644+++ b/drivers/gpu/drm/drm_drv.c
35645@@ -307,7 +307,7 @@ module_exit(drm_core_exit);
35646 /**
35647 * Copy and IOCTL return string to user space
35648 */
35649-static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
35650+static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
35651 {
35652 int len;
35653
35654@@ -377,7 +377,7 @@ long drm_ioctl(struct file *filp,
35655 struct drm_file *file_priv = filp->private_data;
35656 struct drm_device *dev;
35657 struct drm_ioctl_desc *ioctl;
35658- drm_ioctl_t *func;
35659+ drm_ioctl_no_const_t func;
35660 unsigned int nr = DRM_IOCTL_NR(cmd);
35661 int retcode = -EINVAL;
35662 char stack_kdata[128];
35663@@ -390,7 +390,7 @@ long drm_ioctl(struct file *filp,
35664 return -ENODEV;
35665
35666 atomic_inc(&dev->ioctl_count);
35667- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
35668+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
35669 ++file_priv->ioctl_count;
35670
35671 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
35672diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
35673index 32d7775..c8be5e1 100644
35674--- a/drivers/gpu/drm/drm_fops.c
35675+++ b/drivers/gpu/drm/drm_fops.c
35676@@ -71,7 +71,7 @@ static int drm_setup(struct drm_device * dev)
35677 }
35678
35679 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
35680- atomic_set(&dev->counts[i], 0);
35681+ atomic_set_unchecked(&dev->counts[i], 0);
35682
35683 dev->sigdata.lock = NULL;
35684
35685@@ -135,7 +135,7 @@ int drm_open(struct inode *inode, struct file *filp)
35686 if (drm_device_is_unplugged(dev))
35687 return -ENODEV;
35688
35689- if (!dev->open_count++)
35690+ if (local_inc_return(&dev->open_count) == 1)
35691 need_setup = 1;
35692 mutex_lock(&dev->struct_mutex);
35693 old_imapping = inode->i_mapping;
35694@@ -151,7 +151,7 @@ int drm_open(struct inode *inode, struct file *filp)
35695 retcode = drm_open_helper(inode, filp, dev);
35696 if (retcode)
35697 goto err_undo;
35698- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
35699+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
35700 if (need_setup) {
35701 retcode = drm_setup(dev);
35702 if (retcode)
35703@@ -166,7 +166,7 @@ err_undo:
35704 iput(container_of(dev->dev_mapping, struct inode, i_data));
35705 dev->dev_mapping = old_mapping;
35706 mutex_unlock(&dev->struct_mutex);
35707- dev->open_count--;
35708+ local_dec(&dev->open_count);
35709 return retcode;
35710 }
35711 EXPORT_SYMBOL(drm_open);
35712@@ -440,7 +440,7 @@ int drm_release(struct inode *inode, struct file *filp)
35713
35714 mutex_lock(&drm_global_mutex);
35715
35716- DRM_DEBUG("open_count = %d\n", dev->open_count);
35717+ DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count));
35718
35719 if (dev->driver->preclose)
35720 dev->driver->preclose(dev, file_priv);
35721@@ -449,10 +449,10 @@ int drm_release(struct inode *inode, struct file *filp)
35722 * Begin inline drm_release
35723 */
35724
35725- DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
35726+ DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n",
35727 task_pid_nr(current),
35728 (long)old_encode_dev(file_priv->minor->device),
35729- dev->open_count);
35730+ local_read(&dev->open_count));
35731
35732 /* Release any auth tokens that might point to this file_priv,
35733 (do that under the drm_global_mutex) */
35734@@ -549,8 +549,8 @@ int drm_release(struct inode *inode, struct file *filp)
35735 * End inline drm_release
35736 */
35737
35738- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
35739- if (!--dev->open_count) {
35740+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
35741+ if (local_dec_and_test(&dev->open_count)) {
35742 if (atomic_read(&dev->ioctl_count)) {
35743 DRM_ERROR("Device busy: %d\n",
35744 atomic_read(&dev->ioctl_count));
35745diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
35746index f731116..629842c 100644
35747--- a/drivers/gpu/drm/drm_global.c
35748+++ b/drivers/gpu/drm/drm_global.c
35749@@ -36,7 +36,7 @@
35750 struct drm_global_item {
35751 struct mutex mutex;
35752 void *object;
35753- int refcount;
35754+ atomic_t refcount;
35755 };
35756
35757 static struct drm_global_item glob[DRM_GLOBAL_NUM];
35758@@ -49,7 +49,7 @@ void drm_global_init(void)
35759 struct drm_global_item *item = &glob[i];
35760 mutex_init(&item->mutex);
35761 item->object = NULL;
35762- item->refcount = 0;
35763+ atomic_set(&item->refcount, 0);
35764 }
35765 }
35766
35767@@ -59,7 +59,7 @@ void drm_global_release(void)
35768 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
35769 struct drm_global_item *item = &glob[i];
35770 BUG_ON(item->object != NULL);
35771- BUG_ON(item->refcount != 0);
35772+ BUG_ON(atomic_read(&item->refcount) != 0);
35773 }
35774 }
35775
35776@@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
35777 void *object;
35778
35779 mutex_lock(&item->mutex);
35780- if (item->refcount == 0) {
35781+ if (atomic_read(&item->refcount) == 0) {
35782 item->object = kzalloc(ref->size, GFP_KERNEL);
35783 if (unlikely(item->object == NULL)) {
35784 ret = -ENOMEM;
35785@@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
35786 goto out_err;
35787
35788 }
35789- ++item->refcount;
35790+ atomic_inc(&item->refcount);
35791 ref->object = item->object;
35792 object = item->object;
35793 mutex_unlock(&item->mutex);
35794@@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
35795 struct drm_global_item *item = &glob[ref->global_type];
35796
35797 mutex_lock(&item->mutex);
35798- BUG_ON(item->refcount == 0);
35799+ BUG_ON(atomic_read(&item->refcount) == 0);
35800 BUG_ON(ref->object != item->object);
35801- if (--item->refcount == 0) {
35802+ if (atomic_dec_and_test(&item->refcount)) {
35803 ref->release(ref);
35804 item->object = NULL;
35805 }
35806diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
35807index d4b20ce..77a8d41 100644
35808--- a/drivers/gpu/drm/drm_info.c
35809+++ b/drivers/gpu/drm/drm_info.c
35810@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
35811 struct drm_local_map *map;
35812 struct drm_map_list *r_list;
35813
35814- /* Hardcoded from _DRM_FRAME_BUFFER,
35815- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
35816- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
35817- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
35818+ static const char * const types[] = {
35819+ [_DRM_FRAME_BUFFER] = "FB",
35820+ [_DRM_REGISTERS] = "REG",
35821+ [_DRM_SHM] = "SHM",
35822+ [_DRM_AGP] = "AGP",
35823+ [_DRM_SCATTER_GATHER] = "SG",
35824+ [_DRM_CONSISTENT] = "PCI",
35825+ [_DRM_GEM] = "GEM" };
35826 const char *type;
35827 int i;
35828
35829@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
35830 map = r_list->map;
35831 if (!map)
35832 continue;
35833- if (map->type < 0 || map->type > 5)
35834+ if (map->type >= ARRAY_SIZE(types))
35835 type = "??";
35836 else
35837 type = types[map->type];
35838@@ -253,7 +257,11 @@ int drm_vma_info(struct seq_file *m, void *data)
35839 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
35840 vma->vm_flags & VM_LOCKED ? 'l' : '-',
35841 vma->vm_flags & VM_IO ? 'i' : '-',
35842+#ifdef CONFIG_GRKERNSEC_HIDESYM
35843+ 0);
35844+#else
35845 vma->vm_pgoff);
35846+#endif
35847
35848 #if defined(__i386__)
35849 pgprot = pgprot_val(vma->vm_page_prot);
35850diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
35851index 2f4c434..dd12cd2 100644
35852--- a/drivers/gpu/drm/drm_ioc32.c
35853+++ b/drivers/gpu/drm/drm_ioc32.c
35854@@ -457,7 +457,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
35855 request = compat_alloc_user_space(nbytes);
35856 if (!access_ok(VERIFY_WRITE, request, nbytes))
35857 return -EFAULT;
35858- list = (struct drm_buf_desc *) (request + 1);
35859+ list = (struct drm_buf_desc __user *) (request + 1);
35860
35861 if (__put_user(count, &request->count)
35862 || __put_user(list, &request->list))
35863@@ -518,7 +518,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
35864 request = compat_alloc_user_space(nbytes);
35865 if (!access_ok(VERIFY_WRITE, request, nbytes))
35866 return -EFAULT;
35867- list = (struct drm_buf_pub *) (request + 1);
35868+ list = (struct drm_buf_pub __user *) (request + 1);
35869
35870 if (__put_user(count, &request->count)
35871 || __put_user(list, &request->list))
35872@@ -1016,7 +1016,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
35873 return 0;
35874 }
35875
35876-drm_ioctl_compat_t *drm_compat_ioctls[] = {
35877+drm_ioctl_compat_t drm_compat_ioctls[] = {
35878 [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
35879 [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
35880 [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP32)] = compat_drm_getmap,
35881@@ -1062,7 +1062,6 @@ drm_ioctl_compat_t *drm_compat_ioctls[] = {
35882 long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
35883 {
35884 unsigned int nr = DRM_IOCTL_NR(cmd);
35885- drm_ioctl_compat_t *fn;
35886 int ret;
35887
35888 /* Assume that ioctls without an explicit compat routine will just
35889@@ -1072,10 +1071,8 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
35890 if (nr >= ARRAY_SIZE(drm_compat_ioctls))
35891 return drm_ioctl(filp, cmd, arg);
35892
35893- fn = drm_compat_ioctls[nr];
35894-
35895- if (fn != NULL)
35896- ret = (*fn) (filp, cmd, arg);
35897+ if (drm_compat_ioctls[nr] != NULL)
35898+ ret = (*drm_compat_ioctls[nr]) (filp, cmd, arg);
35899 else
35900 ret = drm_ioctl(filp, cmd, arg);
35901
35902diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
35903index e77bd8b..1571b85 100644
35904--- a/drivers/gpu/drm/drm_ioctl.c
35905+++ b/drivers/gpu/drm/drm_ioctl.c
35906@@ -252,7 +252,7 @@ int drm_getstats(struct drm_device *dev, void *data,
35907 stats->data[i].value =
35908 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
35909 else
35910- stats->data[i].value = atomic_read(&dev->counts[i]);
35911+ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
35912 stats->data[i].type = dev->types[i];
35913 }
35914
35915diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
35916index d752c96..fe08455 100644
35917--- a/drivers/gpu/drm/drm_lock.c
35918+++ b/drivers/gpu/drm/drm_lock.c
35919@@ -86,7 +86,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
35920 if (drm_lock_take(&master->lock, lock->context)) {
35921 master->lock.file_priv = file_priv;
35922 master->lock.lock_time = jiffies;
35923- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
35924+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
35925 break; /* Got lock */
35926 }
35927
35928@@ -157,7 +157,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
35929 return -EINVAL;
35930 }
35931
35932- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
35933+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
35934
35935 if (drm_lock_free(&master->lock, lock->context)) {
35936 /* FIXME: Should really bail out here. */
35937diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
35938index 200e104..59facda 100644
35939--- a/drivers/gpu/drm/drm_stub.c
35940+++ b/drivers/gpu/drm/drm_stub.c
35941@@ -516,7 +516,7 @@ void drm_unplug_dev(struct drm_device *dev)
35942
35943 drm_device_set_unplugged(dev);
35944
35945- if (dev->open_count == 0) {
35946+ if (local_read(&dev->open_count) == 0) {
35947 drm_put_dev(dev);
35948 }
35949 mutex_unlock(&drm_global_mutex);
35950diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
35951index 004ecdf..db1f6e0 100644
35952--- a/drivers/gpu/drm/i810/i810_dma.c
35953+++ b/drivers/gpu/drm/i810/i810_dma.c
35954@@ -945,8 +945,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
35955 dma->buflist[vertex->idx],
35956 vertex->discard, vertex->used);
35957
35958- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
35959- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
35960+ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
35961+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
35962 sarea_priv->last_enqueue = dev_priv->counter - 1;
35963 sarea_priv->last_dispatch = (int)hw_status[5];
35964
35965@@ -1106,8 +1106,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
35966 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
35967 mc->last_render);
35968
35969- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
35970- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
35971+ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
35972+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
35973 sarea_priv->last_enqueue = dev_priv->counter - 1;
35974 sarea_priv->last_dispatch = (int)hw_status[5];
35975
35976diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
35977index 6e0acad..93c8289 100644
35978--- a/drivers/gpu/drm/i810/i810_drv.h
35979+++ b/drivers/gpu/drm/i810/i810_drv.h
35980@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
35981 int page_flipping;
35982
35983 wait_queue_head_t irq_queue;
35984- atomic_t irq_received;
35985- atomic_t irq_emitted;
35986+ atomic_unchecked_t irq_received;
35987+ atomic_unchecked_t irq_emitted;
35988
35989 int front_offset;
35990 } drm_i810_private_t;
35991diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
35992index 261efc8e..27af8a5 100644
35993--- a/drivers/gpu/drm/i915/i915_debugfs.c
35994+++ b/drivers/gpu/drm/i915/i915_debugfs.c
35995@@ -496,7 +496,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
35996 I915_READ(GTIMR));
35997 }
35998 seq_printf(m, "Interrupts received: %d\n",
35999- atomic_read(&dev_priv->irq_received));
36000+ atomic_read_unchecked(&dev_priv->irq_received));
36001 for_each_ring(ring, dev_priv, i) {
36002 if (IS_GEN6(dev) || IS_GEN7(dev)) {
36003 seq_printf(m,
36004diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
36005index 99daa89..84ebd44 100644
36006--- a/drivers/gpu/drm/i915/i915_dma.c
36007+++ b/drivers/gpu/drm/i915/i915_dma.c
36008@@ -1253,7 +1253,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
36009 bool can_switch;
36010
36011 spin_lock(&dev->count_lock);
36012- can_switch = (dev->open_count == 0);
36013+ can_switch = (local_read(&dev->open_count) == 0);
36014 spin_unlock(&dev->count_lock);
36015 return can_switch;
36016 }
36017diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
36018index 7339a4b..445aaba 100644
36019--- a/drivers/gpu/drm/i915/i915_drv.h
36020+++ b/drivers/gpu/drm/i915/i915_drv.h
36021@@ -656,7 +656,7 @@ typedef struct drm_i915_private {
36022 drm_dma_handle_t *status_page_dmah;
36023 struct resource mch_res;
36024
36025- atomic_t irq_received;
36026+ atomic_unchecked_t irq_received;
36027
36028 /* protects the irq masks */
36029 spinlock_t irq_lock;
36030@@ -1102,7 +1102,7 @@ struct drm_i915_gem_object {
36031 * will be page flipped away on the next vblank. When it
36032 * reaches 0, dev_priv->pending_flip_queue will be woken up.
36033 */
36034- atomic_t pending_flip;
36035+ atomic_unchecked_t pending_flip;
36036 };
36037 #define to_gem_object(obj) (&((struct drm_i915_gem_object *)(obj))->base)
36038
36039@@ -1633,7 +1633,7 @@ extern struct i2c_adapter *intel_gmbus_get_adapter(
36040 struct drm_i915_private *dev_priv, unsigned port);
36041 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
36042 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
36043-extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
36044+static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
36045 {
36046 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
36047 }
36048diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
36049index ba8805a..39d5330 100644
36050--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
36051+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
36052@@ -672,7 +672,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
36053 i915_gem_clflush_object(obj);
36054
36055 if (obj->base.pending_write_domain)
36056- flips |= atomic_read(&obj->pending_flip);
36057+ flips |= atomic_read_unchecked(&obj->pending_flip);
36058
36059 flush_domains |= obj->base.write_domain;
36060 }
36061@@ -703,9 +703,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
36062
36063 static int
36064 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
36065- int count)
36066+ unsigned int count)
36067 {
36068- int i;
36069+ unsigned int i;
36070 int relocs_total = 0;
36071 int relocs_max = INT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
36072
36073@@ -1202,7 +1202,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
36074 return -ENOMEM;
36075 }
36076 ret = copy_from_user(exec2_list,
36077- (struct drm_i915_relocation_entry __user *)
36078+ (struct drm_i915_gem_exec_object2 __user *)
36079 (uintptr_t) args->buffers_ptr,
36080 sizeof(*exec2_list) * args->buffer_count);
36081 if (ret != 0) {
36082diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
36083index 3c59584..500f2e9 100644
36084--- a/drivers/gpu/drm/i915/i915_ioc32.c
36085+++ b/drivers/gpu/drm/i915/i915_ioc32.c
36086@@ -181,7 +181,7 @@ static int compat_i915_alloc(struct file *file, unsigned int cmd,
36087 (unsigned long)request);
36088 }
36089
36090-static drm_ioctl_compat_t *i915_compat_ioctls[] = {
36091+static drm_ioctl_compat_t i915_compat_ioctls[] = {
36092 [DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
36093 [DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
36094 [DRM_I915_GETPARAM] = compat_i915_getparam,
36095@@ -202,18 +202,15 @@ static drm_ioctl_compat_t *i915_compat_ioctls[] = {
36096 long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
36097 {
36098 unsigned int nr = DRM_IOCTL_NR(cmd);
36099- drm_ioctl_compat_t *fn = NULL;
36100 int ret;
36101
36102 if (nr < DRM_COMMAND_BASE)
36103 return drm_compat_ioctl(filp, cmd, arg);
36104
36105- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls))
36106- fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
36107-
36108- if (fn != NULL)
36109+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls)) {
36110+ drm_ioctl_compat_t fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
36111 ret = (*fn) (filp, cmd, arg);
36112- else
36113+ } else
36114 ret = drm_ioctl(filp, cmd, arg);
36115
36116 return ret;
36117diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
36118index fe84338..a863190 100644
36119--- a/drivers/gpu/drm/i915/i915_irq.c
36120+++ b/drivers/gpu/drm/i915/i915_irq.c
36121@@ -535,7 +535,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
36122 u32 pipe_stats[I915_MAX_PIPES];
36123 bool blc_event;
36124
36125- atomic_inc(&dev_priv->irq_received);
36126+ atomic_inc_unchecked(&dev_priv->irq_received);
36127
36128 while (true) {
36129 iir = I915_READ(VLV_IIR);
36130@@ -688,7 +688,7 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
36131 irqreturn_t ret = IRQ_NONE;
36132 int i;
36133
36134- atomic_inc(&dev_priv->irq_received);
36135+ atomic_inc_unchecked(&dev_priv->irq_received);
36136
36137 /* disable master interrupt before clearing iir */
36138 de_ier = I915_READ(DEIER);
36139@@ -760,7 +760,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
36140 int ret = IRQ_NONE;
36141 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
36142
36143- atomic_inc(&dev_priv->irq_received);
36144+ atomic_inc_unchecked(&dev_priv->irq_received);
36145
36146 /* disable master interrupt before clearing iir */
36147 de_ier = I915_READ(DEIER);
36148@@ -1787,7 +1787,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
36149 {
36150 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
36151
36152- atomic_set(&dev_priv->irq_received, 0);
36153+ atomic_set_unchecked(&dev_priv->irq_received, 0);
36154
36155 I915_WRITE(HWSTAM, 0xeffe);
36156
36157@@ -1813,7 +1813,7 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
36158 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
36159 int pipe;
36160
36161- atomic_set(&dev_priv->irq_received, 0);
36162+ atomic_set_unchecked(&dev_priv->irq_received, 0);
36163
36164 /* VLV magic */
36165 I915_WRITE(VLV_IMR, 0);
36166@@ -2108,7 +2108,7 @@ static void i8xx_irq_preinstall(struct drm_device * dev)
36167 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
36168 int pipe;
36169
36170- atomic_set(&dev_priv->irq_received, 0);
36171+ atomic_set_unchecked(&dev_priv->irq_received, 0);
36172
36173 for_each_pipe(pipe)
36174 I915_WRITE(PIPESTAT(pipe), 0);
36175@@ -2159,7 +2159,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
36176 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
36177 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
36178
36179- atomic_inc(&dev_priv->irq_received);
36180+ atomic_inc_unchecked(&dev_priv->irq_received);
36181
36182 iir = I915_READ16(IIR);
36183 if (iir == 0)
36184@@ -2244,7 +2244,7 @@ static void i915_irq_preinstall(struct drm_device * dev)
36185 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
36186 int pipe;
36187
36188- atomic_set(&dev_priv->irq_received, 0);
36189+ atomic_set_unchecked(&dev_priv->irq_received, 0);
36190
36191 if (I915_HAS_HOTPLUG(dev)) {
36192 I915_WRITE(PORT_HOTPLUG_EN, 0);
36193@@ -2339,7 +2339,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
36194 };
36195 int pipe, ret = IRQ_NONE;
36196
36197- atomic_inc(&dev_priv->irq_received);
36198+ atomic_inc_unchecked(&dev_priv->irq_received);
36199
36200 iir = I915_READ(IIR);
36201 do {
36202@@ -2465,7 +2465,7 @@ static void i965_irq_preinstall(struct drm_device * dev)
36203 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
36204 int pipe;
36205
36206- atomic_set(&dev_priv->irq_received, 0);
36207+ atomic_set_unchecked(&dev_priv->irq_received, 0);
36208
36209 I915_WRITE(PORT_HOTPLUG_EN, 0);
36210 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
36211@@ -2572,7 +2572,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
36212 int irq_received;
36213 int ret = IRQ_NONE, pipe;
36214
36215- atomic_inc(&dev_priv->irq_received);
36216+ atomic_inc_unchecked(&dev_priv->irq_received);
36217
36218 iir = I915_READ(IIR);
36219
36220diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
36221index d3f834a..0ad1b37 100644
36222--- a/drivers/gpu/drm/i915/intel_display.c
36223+++ b/drivers/gpu/drm/i915/intel_display.c
36224@@ -2255,7 +2255,7 @@ intel_finish_fb(struct drm_framebuffer *old_fb)
36225
36226 wait_event(dev_priv->pending_flip_queue,
36227 atomic_read(&dev_priv->mm.wedged) ||
36228- atomic_read(&obj->pending_flip) == 0);
36229+ atomic_read_unchecked(&obj->pending_flip) == 0);
36230
36231 /* Big Hammer, we also need to ensure that any pending
36232 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
36233@@ -7122,8 +7122,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
36234
36235 obj = work->old_fb_obj;
36236
36237- atomic_clear_mask(1 << intel_crtc->plane,
36238- &obj->pending_flip.counter);
36239+ atomic_clear_mask_unchecked(1 << intel_crtc->plane, &obj->pending_flip);
36240 wake_up(&dev_priv->pending_flip_queue);
36241
36242 queue_work(dev_priv->wq, &work->work);
36243@@ -7486,7 +7485,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
36244 /* Block clients from rendering to the new back buffer until
36245 * the flip occurs and the object is no longer visible.
36246 */
36247- atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
36248+ atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
36249 atomic_inc(&intel_crtc->unpin_work_count);
36250
36251 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
36252@@ -7504,7 +7503,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
36253 cleanup_pending:
36254 atomic_dec(&intel_crtc->unpin_work_count);
36255 crtc->fb = old_fb;
36256- atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
36257+ atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
36258 drm_gem_object_unreference(&work->old_fb_obj->base);
36259 drm_gem_object_unreference(&obj->base);
36260 mutex_unlock(&dev->struct_mutex);
36261@@ -8846,13 +8845,13 @@ struct intel_quirk {
36262 int subsystem_vendor;
36263 int subsystem_device;
36264 void (*hook)(struct drm_device *dev);
36265-};
36266+} __do_const;
36267
36268 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
36269 struct intel_dmi_quirk {
36270 void (*hook)(struct drm_device *dev);
36271 const struct dmi_system_id (*dmi_id_list)[];
36272-};
36273+} __do_const;
36274
36275 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
36276 {
36277@@ -8860,18 +8859,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
36278 return 1;
36279 }
36280
36281+static const struct dmi_system_id intel_dmi_quirks_table[] = {
36282+ {
36283+ .callback = intel_dmi_reverse_brightness,
36284+ .ident = "NCR Corporation",
36285+ .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
36286+ DMI_MATCH(DMI_PRODUCT_NAME, ""),
36287+ },
36288+ },
36289+ { } /* terminating entry */
36290+};
36291+
36292 static const struct intel_dmi_quirk intel_dmi_quirks[] = {
36293 {
36294- .dmi_id_list = &(const struct dmi_system_id[]) {
36295- {
36296- .callback = intel_dmi_reverse_brightness,
36297- .ident = "NCR Corporation",
36298- .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
36299- DMI_MATCH(DMI_PRODUCT_NAME, ""),
36300- },
36301- },
36302- { } /* terminating entry */
36303- },
36304+ .dmi_id_list = &intel_dmi_quirks_table,
36305 .hook = quirk_invert_brightness,
36306 },
36307 };
36308diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
36309index 54558a0..2d97005 100644
36310--- a/drivers/gpu/drm/mga/mga_drv.h
36311+++ b/drivers/gpu/drm/mga/mga_drv.h
36312@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
36313 u32 clear_cmd;
36314 u32 maccess;
36315
36316- atomic_t vbl_received; /**< Number of vblanks received. */
36317+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
36318 wait_queue_head_t fence_queue;
36319- atomic_t last_fence_retired;
36320+ atomic_unchecked_t last_fence_retired;
36321 u32 next_fence_to_post;
36322
36323 unsigned int fb_cpp;
36324diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c
36325index 709e90d..89a1c0d 100644
36326--- a/drivers/gpu/drm/mga/mga_ioc32.c
36327+++ b/drivers/gpu/drm/mga/mga_ioc32.c
36328@@ -189,7 +189,7 @@ static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd,
36329 return 0;
36330 }
36331
36332-drm_ioctl_compat_t *mga_compat_ioctls[] = {
36333+drm_ioctl_compat_t mga_compat_ioctls[] = {
36334 [DRM_MGA_INIT] = compat_mga_init,
36335 [DRM_MGA_GETPARAM] = compat_mga_getparam,
36336 [DRM_MGA_DMA_BOOTSTRAP] = compat_mga_dma_bootstrap,
36337@@ -207,18 +207,15 @@ drm_ioctl_compat_t *mga_compat_ioctls[] = {
36338 long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
36339 {
36340 unsigned int nr = DRM_IOCTL_NR(cmd);
36341- drm_ioctl_compat_t *fn = NULL;
36342 int ret;
36343
36344 if (nr < DRM_COMMAND_BASE)
36345 return drm_compat_ioctl(filp, cmd, arg);
36346
36347- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls))
36348- fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
36349-
36350- if (fn != NULL)
36351+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls)) {
36352+ drm_ioctl_compat_t fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
36353 ret = (*fn) (filp, cmd, arg);
36354- else
36355+ } else
36356 ret = drm_ioctl(filp, cmd, arg);
36357
36358 return ret;
36359diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
36360index 598c281..60d590e 100644
36361--- a/drivers/gpu/drm/mga/mga_irq.c
36362+++ b/drivers/gpu/drm/mga/mga_irq.c
36363@@ -43,7 +43,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
36364 if (crtc != 0)
36365 return 0;
36366
36367- return atomic_read(&dev_priv->vbl_received);
36368+ return atomic_read_unchecked(&dev_priv->vbl_received);
36369 }
36370
36371
36372@@ -59,7 +59,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
36373 /* VBLANK interrupt */
36374 if (status & MGA_VLINEPEN) {
36375 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
36376- atomic_inc(&dev_priv->vbl_received);
36377+ atomic_inc_unchecked(&dev_priv->vbl_received);
36378 drm_handle_vblank(dev, 0);
36379 handled = 1;
36380 }
36381@@ -78,7 +78,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
36382 if ((prim_start & ~0x03) != (prim_end & ~0x03))
36383 MGA_WRITE(MGA_PRIMEND, prim_end);
36384
36385- atomic_inc(&dev_priv->last_fence_retired);
36386+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
36387 DRM_WAKEUP(&dev_priv->fence_queue);
36388 handled = 1;
36389 }
36390@@ -129,7 +129,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
36391 * using fences.
36392 */
36393 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
36394- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
36395+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
36396 - *sequence) <= (1 << 23)));
36397
36398 *sequence = cur_fence;
36399diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
36400index 865eddf..62c4cc3 100644
36401--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
36402+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
36403@@ -1015,7 +1015,7 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios,
36404 struct bit_table {
36405 const char id;
36406 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
36407-};
36408+} __no_const;
36409
36410 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
36411
36412diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
36413index aa89eb9..d45d38b 100644
36414--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
36415+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
36416@@ -80,7 +80,7 @@ struct nouveau_drm {
36417 struct drm_global_reference mem_global_ref;
36418 struct ttm_bo_global_ref bo_global_ref;
36419 struct ttm_bo_device bdev;
36420- atomic_t validate_sequence;
36421+ atomic_unchecked_t validate_sequence;
36422 int (*move)(struct nouveau_channel *,
36423 struct ttm_buffer_object *,
36424 struct ttm_mem_reg *, struct ttm_mem_reg *);
36425diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
36426index cdb83ac..27f0a16 100644
36427--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
36428+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
36429@@ -43,7 +43,7 @@ struct nouveau_fence_priv {
36430 int (*sync)(struct nouveau_fence *, struct nouveau_channel *,
36431 struct nouveau_channel *);
36432 u32 (*read)(struct nouveau_channel *);
36433-};
36434+} __no_const;
36435
36436 #define nouveau_fence(drm) ((struct nouveau_fence_priv *)(drm)->fence)
36437
36438diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
36439index 8bf695c..9fbc90a 100644
36440--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
36441+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
36442@@ -321,7 +321,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
36443 int trycnt = 0;
36444 int ret, i;
36445
36446- sequence = atomic_add_return(1, &drm->ttm.validate_sequence);
36447+ sequence = atomic_add_return_unchecked(1, &drm->ttm.validate_sequence);
36448 retry:
36449 if (++trycnt > 100000) {
36450 NV_ERROR(drm, "%s failed and gave up.\n", __func__);
36451diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
36452index 08214bc..9208577 100644
36453--- a/drivers/gpu/drm/nouveau/nouveau_ioc32.c
36454+++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
36455@@ -50,7 +50,7 @@ long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
36456 unsigned long arg)
36457 {
36458 unsigned int nr = DRM_IOCTL_NR(cmd);
36459- drm_ioctl_compat_t *fn = NULL;
36460+ drm_ioctl_compat_t fn = NULL;
36461 int ret;
36462
36463 if (nr < DRM_COMMAND_BASE)
36464diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
36465index 25d3495..d81aaf6 100644
36466--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
36467+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
36468@@ -62,7 +62,7 @@ nouveau_switcheroo_can_switch(struct pci_dev *pdev)
36469 bool can_switch;
36470
36471 spin_lock(&dev->count_lock);
36472- can_switch = (dev->open_count == 0);
36473+ can_switch = (local_read(&dev->open_count) == 0);
36474 spin_unlock(&dev->count_lock);
36475 return can_switch;
36476 }
36477diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
36478index d4660cf..70dbe65 100644
36479--- a/drivers/gpu/drm/r128/r128_cce.c
36480+++ b/drivers/gpu/drm/r128/r128_cce.c
36481@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
36482
36483 /* GH: Simple idle check.
36484 */
36485- atomic_set(&dev_priv->idle_count, 0);
36486+ atomic_set_unchecked(&dev_priv->idle_count, 0);
36487
36488 /* We don't support anything other than bus-mastering ring mode,
36489 * but the ring can be in either AGP or PCI space for the ring
36490diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
36491index 930c71b..499aded 100644
36492--- a/drivers/gpu/drm/r128/r128_drv.h
36493+++ b/drivers/gpu/drm/r128/r128_drv.h
36494@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
36495 int is_pci;
36496 unsigned long cce_buffers_offset;
36497
36498- atomic_t idle_count;
36499+ atomic_unchecked_t idle_count;
36500
36501 int page_flipping;
36502 int current_page;
36503 u32 crtc_offset;
36504 u32 crtc_offset_cntl;
36505
36506- atomic_t vbl_received;
36507+ atomic_unchecked_t vbl_received;
36508
36509 u32 color_fmt;
36510 unsigned int front_offset;
36511diff --git a/drivers/gpu/drm/r128/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c
36512index a954c54..9cc595c 100644
36513--- a/drivers/gpu/drm/r128/r128_ioc32.c
36514+++ b/drivers/gpu/drm/r128/r128_ioc32.c
36515@@ -177,7 +177,7 @@ static int compat_r128_getparam(struct file *file, unsigned int cmd,
36516 return drm_ioctl(file, DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam);
36517 }
36518
36519-drm_ioctl_compat_t *r128_compat_ioctls[] = {
36520+drm_ioctl_compat_t r128_compat_ioctls[] = {
36521 [DRM_R128_INIT] = compat_r128_init,
36522 [DRM_R128_DEPTH] = compat_r128_depth,
36523 [DRM_R128_STIPPLE] = compat_r128_stipple,
36524@@ -196,18 +196,15 @@ drm_ioctl_compat_t *r128_compat_ioctls[] = {
36525 long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
36526 {
36527 unsigned int nr = DRM_IOCTL_NR(cmd);
36528- drm_ioctl_compat_t *fn = NULL;
36529 int ret;
36530
36531 if (nr < DRM_COMMAND_BASE)
36532 return drm_compat_ioctl(filp, cmd, arg);
36533
36534- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(r128_compat_ioctls))
36535- fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
36536-
36537- if (fn != NULL)
36538+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(r128_compat_ioctls)) {
36539+ drm_ioctl_compat_t fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
36540 ret = (*fn) (filp, cmd, arg);
36541- else
36542+ } else
36543 ret = drm_ioctl(filp, cmd, arg);
36544
36545 return ret;
36546diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
36547index 2ea4f09..d391371 100644
36548--- a/drivers/gpu/drm/r128/r128_irq.c
36549+++ b/drivers/gpu/drm/r128/r128_irq.c
36550@@ -41,7 +41,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
36551 if (crtc != 0)
36552 return 0;
36553
36554- return atomic_read(&dev_priv->vbl_received);
36555+ return atomic_read_unchecked(&dev_priv->vbl_received);
36556 }
36557
36558 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
36559@@ -55,7 +55,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
36560 /* VBLANK interrupt */
36561 if (status & R128_CRTC_VBLANK_INT) {
36562 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
36563- atomic_inc(&dev_priv->vbl_received);
36564+ atomic_inc_unchecked(&dev_priv->vbl_received);
36565 drm_handle_vblank(dev, 0);
36566 return IRQ_HANDLED;
36567 }
36568diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
36569index 19bb7e6..de7e2a2 100644
36570--- a/drivers/gpu/drm/r128/r128_state.c
36571+++ b/drivers/gpu/drm/r128/r128_state.c
36572@@ -320,10 +320,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
36573
36574 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
36575 {
36576- if (atomic_read(&dev_priv->idle_count) == 0)
36577+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
36578 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
36579 else
36580- atomic_set(&dev_priv->idle_count, 0);
36581+ atomic_set_unchecked(&dev_priv->idle_count, 0);
36582 }
36583
36584 #endif
36585diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
36586index 5a82b6b..9e69c73 100644
36587--- a/drivers/gpu/drm/radeon/mkregtable.c
36588+++ b/drivers/gpu/drm/radeon/mkregtable.c
36589@@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
36590 regex_t mask_rex;
36591 regmatch_t match[4];
36592 char buf[1024];
36593- size_t end;
36594+ long end;
36595 int len;
36596 int done = 0;
36597 int r;
36598 unsigned o;
36599 struct offset *offset;
36600 char last_reg_s[10];
36601- int last_reg;
36602+ unsigned long last_reg;
36603
36604 if (regcomp
36605 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
36606diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
36607index 0d6562b..a154330 100644
36608--- a/drivers/gpu/drm/radeon/radeon_device.c
36609+++ b/drivers/gpu/drm/radeon/radeon_device.c
36610@@ -969,7 +969,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
36611 bool can_switch;
36612
36613 spin_lock(&dev->count_lock);
36614- can_switch = (dev->open_count == 0);
36615+ can_switch = (local_read(&dev->open_count) == 0);
36616 spin_unlock(&dev->count_lock);
36617 return can_switch;
36618 }
36619diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
36620index e7fdf16..f4f6490 100644
36621--- a/drivers/gpu/drm/radeon/radeon_drv.h
36622+++ b/drivers/gpu/drm/radeon/radeon_drv.h
36623@@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
36624
36625 /* SW interrupt */
36626 wait_queue_head_t swi_queue;
36627- atomic_t swi_emitted;
36628+ atomic_unchecked_t swi_emitted;
36629 int vblank_crtc;
36630 uint32_t irq_enable_reg;
36631 uint32_t r500_disp_irq_reg;
36632diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
36633index c180df8..5fd8186 100644
36634--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
36635+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
36636@@ -358,7 +358,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
36637 request = compat_alloc_user_space(sizeof(*request));
36638 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
36639 || __put_user(req32.param, &request->param)
36640- || __put_user((void __user *)(unsigned long)req32.value,
36641+ || __put_user((unsigned long)req32.value,
36642 &request->value))
36643 return -EFAULT;
36644
36645@@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
36646 #define compat_radeon_cp_setparam NULL
36647 #endif /* X86_64 || IA64 */
36648
36649-static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
36650+static drm_ioctl_compat_t radeon_compat_ioctls[] = {
36651 [DRM_RADEON_CP_INIT] = compat_radeon_cp_init,
36652 [DRM_RADEON_CLEAR] = compat_radeon_cp_clear,
36653 [DRM_RADEON_STIPPLE] = compat_radeon_cp_stipple,
36654@@ -393,18 +393,15 @@ static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
36655 long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
36656 {
36657 unsigned int nr = DRM_IOCTL_NR(cmd);
36658- drm_ioctl_compat_t *fn = NULL;
36659 int ret;
36660
36661 if (nr < DRM_COMMAND_BASE)
36662 return drm_compat_ioctl(filp, cmd, arg);
36663
36664- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls))
36665- fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
36666-
36667- if (fn != NULL)
36668+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls)) {
36669+ drm_ioctl_compat_t fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
36670 ret = (*fn) (filp, cmd, arg);
36671- else
36672+ } else
36673 ret = drm_ioctl(filp, cmd, arg);
36674
36675 return ret;
36676diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
36677index e771033..a0bc6b3 100644
36678--- a/drivers/gpu/drm/radeon/radeon_irq.c
36679+++ b/drivers/gpu/drm/radeon/radeon_irq.c
36680@@ -224,8 +224,8 @@ static int radeon_emit_irq(struct drm_device * dev)
36681 unsigned int ret;
36682 RING_LOCALS;
36683
36684- atomic_inc(&dev_priv->swi_emitted);
36685- ret = atomic_read(&dev_priv->swi_emitted);
36686+ atomic_inc_unchecked(&dev_priv->swi_emitted);
36687+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
36688
36689 BEGIN_RING(4);
36690 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
36691@@ -351,7 +351,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
36692 drm_radeon_private_t *dev_priv =
36693 (drm_radeon_private_t *) dev->dev_private;
36694
36695- atomic_set(&dev_priv->swi_emitted, 0);
36696+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
36697 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
36698
36699 dev->max_vblank_count = 0x001fffff;
36700diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
36701index 8e9057b..af6dacb 100644
36702--- a/drivers/gpu/drm/radeon/radeon_state.c
36703+++ b/drivers/gpu/drm/radeon/radeon_state.c
36704@@ -2166,7 +2166,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
36705 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
36706 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
36707
36708- if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
36709+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
36710 sarea_priv->nbox * sizeof(depth_boxes[0])))
36711 return -EFAULT;
36712
36713@@ -3029,7 +3029,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
36714 {
36715 drm_radeon_private_t *dev_priv = dev->dev_private;
36716 drm_radeon_getparam_t *param = data;
36717- int value;
36718+ int value = 0;
36719
36720 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
36721
36722diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
36723index 93f760e..8088227 100644
36724--- a/drivers/gpu/drm/radeon/radeon_ttm.c
36725+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
36726@@ -782,7 +782,7 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
36727 man->size = size >> PAGE_SHIFT;
36728 }
36729
36730-static struct vm_operations_struct radeon_ttm_vm_ops;
36731+static vm_operations_struct_no_const radeon_ttm_vm_ops __read_only;
36732 static const struct vm_operations_struct *ttm_vm_ops = NULL;
36733
36734 static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
36735@@ -823,8 +823,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
36736 }
36737 if (unlikely(ttm_vm_ops == NULL)) {
36738 ttm_vm_ops = vma->vm_ops;
36739+ pax_open_kernel();
36740 radeon_ttm_vm_ops = *ttm_vm_ops;
36741 radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
36742+ pax_close_kernel();
36743 }
36744 vma->vm_ops = &radeon_ttm_vm_ops;
36745 return 0;
36746@@ -862,28 +864,33 @@ static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
36747 sprintf(radeon_mem_types_names[i], "radeon_vram_mm");
36748 else
36749 sprintf(radeon_mem_types_names[i], "radeon_gtt_mm");
36750- radeon_mem_types_list[i].name = radeon_mem_types_names[i];
36751- radeon_mem_types_list[i].show = &radeon_mm_dump_table;
36752- radeon_mem_types_list[i].driver_features = 0;
36753+ pax_open_kernel();
36754+ *(const char **)&radeon_mem_types_list[i].name = radeon_mem_types_names[i];
36755+ *(void **)&radeon_mem_types_list[i].show = &radeon_mm_dump_table;
36756+ *(u32 *)&radeon_mem_types_list[i].driver_features = 0;
36757 if (i == 0)
36758- radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv;
36759+ *(void **)&radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv;
36760 else
36761- radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_TT].priv;
36762-
36763+ *(void **)&radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_TT].priv;
36764+ pax_close_kernel();
36765 }
36766 /* Add ttm page pool to debugfs */
36767 sprintf(radeon_mem_types_names[i], "ttm_page_pool");
36768- radeon_mem_types_list[i].name = radeon_mem_types_names[i];
36769- radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs;
36770- radeon_mem_types_list[i].driver_features = 0;
36771- radeon_mem_types_list[i++].data = NULL;
36772+ pax_open_kernel();
36773+ *(const char **)&radeon_mem_types_list[i].name = radeon_mem_types_names[i];
36774+ *(void **)&radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs;
36775+ *(u32 *)&radeon_mem_types_list[i].driver_features = 0;
36776+ *(void **)&radeon_mem_types_list[i++].data = NULL;
36777+ pax_close_kernel();
36778 #ifdef CONFIG_SWIOTLB
36779 if (swiotlb_nr_tbl()) {
36780 sprintf(radeon_mem_types_names[i], "ttm_dma_page_pool");
36781- radeon_mem_types_list[i].name = radeon_mem_types_names[i];
36782- radeon_mem_types_list[i].show = &ttm_dma_page_alloc_debugfs;
36783- radeon_mem_types_list[i].driver_features = 0;
36784- radeon_mem_types_list[i++].data = NULL;
36785+ pax_open_kernel();
36786+ *(const char **)&radeon_mem_types_list[i].name = radeon_mem_types_names[i];
36787+ *(void **)&radeon_mem_types_list[i].show = &ttm_dma_page_alloc_debugfs;
36788+ *(u32 *)&radeon_mem_types_list[i].driver_features = 0;
36789+ *(void **)&radeon_mem_types_list[i++].data = NULL;
36790+ pax_close_kernel();
36791 }
36792 #endif
36793 return radeon_debugfs_add_files(rdev, radeon_mem_types_list, i);
36794diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
36795index 5706d2a..17aedaa 100644
36796--- a/drivers/gpu/drm/radeon/rs690.c
36797+++ b/drivers/gpu/drm/radeon/rs690.c
36798@@ -304,9 +304,11 @@ static void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
36799 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
36800 rdev->pm.sideport_bandwidth.full)
36801 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
36802- read_delay_latency.full = dfixed_const(370 * 800 * 1000);
36803+ read_delay_latency.full = dfixed_const(800 * 1000);
36804 read_delay_latency.full = dfixed_div(read_delay_latency,
36805 rdev->pm.igp_sideport_mclk);
36806+ a.full = dfixed_const(370);
36807+ read_delay_latency.full = dfixed_mul(read_delay_latency, a);
36808 } else {
36809 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
36810 rdev->pm.k8_bandwidth.full)
36811diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
36812index bd2a3b4..122d9ad 100644
36813--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
36814+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
36815@@ -394,9 +394,9 @@ static int ttm_pool_get_num_unused_pages(void)
36816 static int ttm_pool_mm_shrink(struct shrinker *shrink,
36817 struct shrink_control *sc)
36818 {
36819- static atomic_t start_pool = ATOMIC_INIT(0);
36820+ static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
36821 unsigned i;
36822- unsigned pool_offset = atomic_add_return(1, &start_pool);
36823+ unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
36824 struct ttm_page_pool *pool;
36825 int shrink_pages = sc->nr_to_scan;
36826
36827diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
36828index 1eb060c..188b1fc 100644
36829--- a/drivers/gpu/drm/udl/udl_fb.c
36830+++ b/drivers/gpu/drm/udl/udl_fb.c
36831@@ -367,7 +367,6 @@ static int udl_fb_release(struct fb_info *info, int user)
36832 fb_deferred_io_cleanup(info);
36833 kfree(info->fbdefio);
36834 info->fbdefio = NULL;
36835- info->fbops->fb_mmap = udl_fb_mmap;
36836 }
36837
36838 pr_warn("released /dev/fb%d user=%d count=%d\n",
36839diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
36840index 893a650..6190d3b 100644
36841--- a/drivers/gpu/drm/via/via_drv.h
36842+++ b/drivers/gpu/drm/via/via_drv.h
36843@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
36844 typedef uint32_t maskarray_t[5];
36845
36846 typedef struct drm_via_irq {
36847- atomic_t irq_received;
36848+ atomic_unchecked_t irq_received;
36849 uint32_t pending_mask;
36850 uint32_t enable_mask;
36851 wait_queue_head_t irq_queue;
36852@@ -75,7 +75,7 @@ typedef struct drm_via_private {
36853 struct timeval last_vblank;
36854 int last_vblank_valid;
36855 unsigned usec_per_vblank;
36856- atomic_t vbl_received;
36857+ atomic_unchecked_t vbl_received;
36858 drm_via_state_t hc_state;
36859 char pci_buf[VIA_PCI_BUF_SIZE];
36860 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
36861diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
36862index ac98964..5dbf512 100644
36863--- a/drivers/gpu/drm/via/via_irq.c
36864+++ b/drivers/gpu/drm/via/via_irq.c
36865@@ -101,7 +101,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
36866 if (crtc != 0)
36867 return 0;
36868
36869- return atomic_read(&dev_priv->vbl_received);
36870+ return atomic_read_unchecked(&dev_priv->vbl_received);
36871 }
36872
36873 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
36874@@ -116,8 +116,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
36875
36876 status = VIA_READ(VIA_REG_INTERRUPT);
36877 if (status & VIA_IRQ_VBLANK_PENDING) {
36878- atomic_inc(&dev_priv->vbl_received);
36879- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
36880+ atomic_inc_unchecked(&dev_priv->vbl_received);
36881+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
36882 do_gettimeofday(&cur_vblank);
36883 if (dev_priv->last_vblank_valid) {
36884 dev_priv->usec_per_vblank =
36885@@ -127,7 +127,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
36886 dev_priv->last_vblank = cur_vblank;
36887 dev_priv->last_vblank_valid = 1;
36888 }
36889- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
36890+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
36891 DRM_DEBUG("US per vblank is: %u\n",
36892 dev_priv->usec_per_vblank);
36893 }
36894@@ -137,7 +137,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
36895
36896 for (i = 0; i < dev_priv->num_irqs; ++i) {
36897 if (status & cur_irq->pending_mask) {
36898- atomic_inc(&cur_irq->irq_received);
36899+ atomic_inc_unchecked(&cur_irq->irq_received);
36900 DRM_WAKEUP(&cur_irq->irq_queue);
36901 handled = 1;
36902 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
36903@@ -242,11 +242,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
36904 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
36905 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
36906 masks[irq][4]));
36907- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
36908+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
36909 } else {
36910 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
36911 (((cur_irq_sequence =
36912- atomic_read(&cur_irq->irq_received)) -
36913+ atomic_read_unchecked(&cur_irq->irq_received)) -
36914 *sequence) <= (1 << 23)));
36915 }
36916 *sequence = cur_irq_sequence;
36917@@ -284,7 +284,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
36918 }
36919
36920 for (i = 0; i < dev_priv->num_irqs; ++i) {
36921- atomic_set(&cur_irq->irq_received, 0);
36922+ atomic_set_unchecked(&cur_irq->irq_received, 0);
36923 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
36924 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
36925 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
36926@@ -366,7 +366,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
36927 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
36928 case VIA_IRQ_RELATIVE:
36929 irqwait->request.sequence +=
36930- atomic_read(&cur_irq->irq_received);
36931+ atomic_read_unchecked(&cur_irq->irq_received);
36932 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
36933 case VIA_IRQ_ABSOLUTE:
36934 break;
36935diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
36936index 13aeda7..4a952d1 100644
36937--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
36938+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
36939@@ -290,7 +290,7 @@ struct vmw_private {
36940 * Fencing and IRQs.
36941 */
36942
36943- atomic_t marker_seq;
36944+ atomic_unchecked_t marker_seq;
36945 wait_queue_head_t fence_queue;
36946 wait_queue_head_t fifo_queue;
36947 int fence_queue_waiters; /* Protected by hw_mutex */
36948diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
36949index 3eb1486..0a47ee9 100644
36950--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
36951+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
36952@@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
36953 (unsigned int) min,
36954 (unsigned int) fifo->capabilities);
36955
36956- atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
36957+ atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
36958 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
36959 vmw_marker_queue_init(&fifo->marker_queue);
36960 return vmw_fifo_send_fence(dev_priv, &dummy);
36961@@ -355,7 +355,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
36962 if (reserveable)
36963 iowrite32(bytes, fifo_mem +
36964 SVGA_FIFO_RESERVED);
36965- return fifo_mem + (next_cmd >> 2);
36966+ return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
36967 } else {
36968 need_bounce = true;
36969 }
36970@@ -475,7 +475,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
36971
36972 fm = vmw_fifo_reserve(dev_priv, bytes);
36973 if (unlikely(fm == NULL)) {
36974- *seqno = atomic_read(&dev_priv->marker_seq);
36975+ *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
36976 ret = -ENOMEM;
36977 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
36978 false, 3*HZ);
36979@@ -483,7 +483,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
36980 }
36981
36982 do {
36983- *seqno = atomic_add_return(1, &dev_priv->marker_seq);
36984+ *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
36985 } while (*seqno == 0);
36986
36987 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
36988diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
36989index 4640adb..e1384ed 100644
36990--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
36991+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
36992@@ -107,7 +107,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
36993 * emitted. Then the fence is stale and signaled.
36994 */
36995
36996- ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
36997+ ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
36998 > VMW_FENCE_WRAP);
36999
37000 return ret;
37001@@ -138,7 +138,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
37002
37003 if (fifo_idle)
37004 down_read(&fifo_state->rwsem);
37005- signal_seq = atomic_read(&dev_priv->marker_seq);
37006+ signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
37007 ret = 0;
37008
37009 for (;;) {
37010diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
37011index 8a8725c..afed796 100644
37012--- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
37013+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
37014@@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
37015 while (!vmw_lag_lt(queue, us)) {
37016 spin_lock(&queue->lock);
37017 if (list_empty(&queue->head))
37018- seqno = atomic_read(&dev_priv->marker_seq);
37019+ seqno = atomic_read_unchecked(&dev_priv->marker_seq);
37020 else {
37021 marker = list_first_entry(&queue->head,
37022 struct vmw_marker, head);
37023diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
37024index ceb3040..6160c5c 100644
37025--- a/drivers/hid/hid-core.c
37026+++ b/drivers/hid/hid-core.c
37027@@ -2242,7 +2242,7 @@ EXPORT_SYMBOL_GPL(hid_ignore);
37028
37029 int hid_add_device(struct hid_device *hdev)
37030 {
37031- static atomic_t id = ATOMIC_INIT(0);
37032+ static atomic_unchecked_t id = ATOMIC_INIT(0);
37033 int ret;
37034
37035 if (WARN_ON(hdev->status & HID_STAT_ADDED))
37036@@ -2276,7 +2276,7 @@ int hid_add_device(struct hid_device *hdev)
37037 /* XXX hack, any other cleaner solution after the driver core
37038 * is converted to allow more than 20 bytes as the device name? */
37039 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
37040- hdev->vendor, hdev->product, atomic_inc_return(&id));
37041+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
37042
37043 hid_debug_register(hdev, dev_name(&hdev->dev));
37044 ret = device_add(&hdev->dev);
37045diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c
37046index eec3291..8ed706b 100644
37047--- a/drivers/hid/hid-wiimote-debug.c
37048+++ b/drivers/hid/hid-wiimote-debug.c
37049@@ -66,7 +66,7 @@ static ssize_t wiidebug_eeprom_read(struct file *f, char __user *u, size_t s,
37050 else if (size == 0)
37051 return -EIO;
37052
37053- if (copy_to_user(u, buf, size))
37054+ if (size > sizeof(buf) || copy_to_user(u, buf, size))
37055 return -EFAULT;
37056
37057 *off += size;
37058diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
37059index 773a2f2..7ce08bc 100644
37060--- a/drivers/hv/channel.c
37061+++ b/drivers/hv/channel.c
37062@@ -394,8 +394,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
37063 int ret = 0;
37064 int t;
37065
37066- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
37067- atomic_inc(&vmbus_connection.next_gpadl_handle);
37068+ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
37069+ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
37070
37071 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
37072 if (ret)
37073diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
37074index 3648f8f..30ef30d 100644
37075--- a/drivers/hv/hv.c
37076+++ b/drivers/hv/hv.c
37077@@ -111,7 +111,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
37078 u64 output_address = (output) ? virt_to_phys(output) : 0;
37079 u32 output_address_hi = output_address >> 32;
37080 u32 output_address_lo = output_address & 0xFFFFFFFF;
37081- void *hypercall_page = hv_context.hypercall_page;
37082+ void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
37083
37084 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
37085 "=a"(hv_status_lo) : "d" (control_hi),
37086diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
37087index d8d1fad..b91caf7 100644
37088--- a/drivers/hv/hyperv_vmbus.h
37089+++ b/drivers/hv/hyperv_vmbus.h
37090@@ -594,7 +594,7 @@ enum vmbus_connect_state {
37091 struct vmbus_connection {
37092 enum vmbus_connect_state conn_state;
37093
37094- atomic_t next_gpadl_handle;
37095+ atomic_unchecked_t next_gpadl_handle;
37096
37097 /*
37098 * Represents channel interrupts. Each bit position represents a
37099diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
37100index 8e1a9ec..4687821 100644
37101--- a/drivers/hv/vmbus_drv.c
37102+++ b/drivers/hv/vmbus_drv.c
37103@@ -629,10 +629,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
37104 {
37105 int ret = 0;
37106
37107- static atomic_t device_num = ATOMIC_INIT(0);
37108+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
37109
37110 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
37111- atomic_inc_return(&device_num));
37112+ atomic_inc_return_unchecked(&device_num));
37113
37114 child_device_obj->device.bus = &hv_bus;
37115 child_device_obj->device.parent = &hv_acpi_dev->dev;
37116diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
37117index 1672e2a..4a6297c 100644
37118--- a/drivers/hwmon/acpi_power_meter.c
37119+++ b/drivers/hwmon/acpi_power_meter.c
37120@@ -117,7 +117,7 @@ struct sensor_template {
37121 struct device_attribute *devattr,
37122 const char *buf, size_t count);
37123 int index;
37124-};
37125+} __do_const;
37126
37127 /* Averaging interval */
37128 static int update_avg_interval(struct acpi_power_meter_resource *resource)
37129@@ -629,7 +629,7 @@ static int register_attrs(struct acpi_power_meter_resource *resource,
37130 struct sensor_template *attrs)
37131 {
37132 struct device *dev = &resource->acpi_dev->dev;
37133- struct sensor_device_attribute *sensors =
37134+ sensor_device_attribute_no_const *sensors =
37135 &resource->sensors[resource->num_sensors];
37136 int res = 0;
37137
37138diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
37139index b41baff..4953e4d 100644
37140--- a/drivers/hwmon/applesmc.c
37141+++ b/drivers/hwmon/applesmc.c
37142@@ -1084,7 +1084,7 @@ static int applesmc_create_nodes(struct applesmc_node_group *groups, int num)
37143 {
37144 struct applesmc_node_group *grp;
37145 struct applesmc_dev_attr *node;
37146- struct attribute *attr;
37147+ attribute_no_const *attr;
37148 int ret, i;
37149
37150 for (grp = groups; grp->format; grp++) {
37151diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
37152index 56dbcfb..9874bf1 100644
37153--- a/drivers/hwmon/asus_atk0110.c
37154+++ b/drivers/hwmon/asus_atk0110.c
37155@@ -152,10 +152,10 @@ MODULE_DEVICE_TABLE(acpi, atk_ids);
37156 struct atk_sensor_data {
37157 struct list_head list;
37158 struct atk_data *data;
37159- struct device_attribute label_attr;
37160- struct device_attribute input_attr;
37161- struct device_attribute limit1_attr;
37162- struct device_attribute limit2_attr;
37163+ device_attribute_no_const label_attr;
37164+ device_attribute_no_const input_attr;
37165+ device_attribute_no_const limit1_attr;
37166+ device_attribute_no_const limit2_attr;
37167 char label_attr_name[ATTR_NAME_SIZE];
37168 char input_attr_name[ATTR_NAME_SIZE];
37169 char limit1_attr_name[ATTR_NAME_SIZE];
37170@@ -275,7 +275,7 @@ static ssize_t atk_name_show(struct device *dev,
37171 static struct device_attribute atk_name_attr =
37172 __ATTR(name, 0444, atk_name_show, NULL);
37173
37174-static void atk_init_attribute(struct device_attribute *attr, char *name,
37175+static void atk_init_attribute(device_attribute_no_const *attr, char *name,
37176 sysfs_show_func show)
37177 {
37178 sysfs_attr_init(&attr->attr);
37179diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
37180index d64923d..72591e8 100644
37181--- a/drivers/hwmon/coretemp.c
37182+++ b/drivers/hwmon/coretemp.c
37183@@ -790,7 +790,7 @@ static int __cpuinit coretemp_cpu_callback(struct notifier_block *nfb,
37184 return NOTIFY_OK;
37185 }
37186
37187-static struct notifier_block coretemp_cpu_notifier __refdata = {
37188+static struct notifier_block coretemp_cpu_notifier = {
37189 .notifier_call = coretemp_cpu_callback,
37190 };
37191
37192diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
37193index a14f634..2916ee2 100644
37194--- a/drivers/hwmon/ibmaem.c
37195+++ b/drivers/hwmon/ibmaem.c
37196@@ -925,7 +925,7 @@ static int aem_register_sensors(struct aem_data *data,
37197 struct aem_rw_sensor_template *rw)
37198 {
37199 struct device *dev = &data->pdev->dev;
37200- struct sensor_device_attribute *sensors = data->sensors;
37201+ sensor_device_attribute_no_const *sensors = data->sensors;
37202 int err;
37203
37204 /* Set up read-only sensors */
37205diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
37206index 7d19b1b..8fdaaac 100644
37207--- a/drivers/hwmon/pmbus/pmbus_core.c
37208+++ b/drivers/hwmon/pmbus/pmbus_core.c
37209@@ -811,7 +811,7 @@ static ssize_t pmbus_show_label(struct device *dev,
37210
37211 #define PMBUS_ADD_ATTR(data, _name, _idx, _mode, _type, _show, _set) \
37212 do { \
37213- struct sensor_device_attribute *a \
37214+ sensor_device_attribute_no_const *a \
37215 = &data->_type##s[data->num_##_type##s].attribute; \
37216 BUG_ON(data->num_attributes >= data->max_attributes); \
37217 sysfs_attr_init(&a->dev_attr.attr); \
37218diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
37219index 8047fed..1e956f0 100644
37220--- a/drivers/hwmon/sht15.c
37221+++ b/drivers/hwmon/sht15.c
37222@@ -169,7 +169,7 @@ struct sht15_data {
37223 int supply_uV;
37224 bool supply_uV_valid;
37225 struct work_struct update_supply_work;
37226- atomic_t interrupt_handled;
37227+ atomic_unchecked_t interrupt_handled;
37228 };
37229
37230 /**
37231@@ -512,13 +512,13 @@ static int sht15_measurement(struct sht15_data *data,
37232 return ret;
37233
37234 gpio_direction_input(data->pdata->gpio_data);
37235- atomic_set(&data->interrupt_handled, 0);
37236+ atomic_set_unchecked(&data->interrupt_handled, 0);
37237
37238 enable_irq(gpio_to_irq(data->pdata->gpio_data));
37239 if (gpio_get_value(data->pdata->gpio_data) == 0) {
37240 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
37241 /* Only relevant if the interrupt hasn't occurred. */
37242- if (!atomic_read(&data->interrupt_handled))
37243+ if (!atomic_read_unchecked(&data->interrupt_handled))
37244 schedule_work(&data->read_work);
37245 }
37246 ret = wait_event_timeout(data->wait_queue,
37247@@ -785,7 +785,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
37248
37249 /* First disable the interrupt */
37250 disable_irq_nosync(irq);
37251- atomic_inc(&data->interrupt_handled);
37252+ atomic_inc_unchecked(&data->interrupt_handled);
37253 /* Then schedule a reading work struct */
37254 if (data->state != SHT15_READING_NOTHING)
37255 schedule_work(&data->read_work);
37256@@ -807,11 +807,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
37257 * If not, then start the interrupt again - care here as could
37258 * have gone low in meantime so verify it hasn't!
37259 */
37260- atomic_set(&data->interrupt_handled, 0);
37261+ atomic_set_unchecked(&data->interrupt_handled, 0);
37262 enable_irq(gpio_to_irq(data->pdata->gpio_data));
37263 /* If still not occurred or another handler was scheduled */
37264 if (gpio_get_value(data->pdata->gpio_data)
37265- || atomic_read(&data->interrupt_handled))
37266+ || atomic_read_unchecked(&data->interrupt_handled))
37267 return;
37268 }
37269
37270diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c
37271index 76f157b..9c0db1b 100644
37272--- a/drivers/hwmon/via-cputemp.c
37273+++ b/drivers/hwmon/via-cputemp.c
37274@@ -296,7 +296,7 @@ static int __cpuinit via_cputemp_cpu_callback(struct notifier_block *nfb,
37275 return NOTIFY_OK;
37276 }
37277
37278-static struct notifier_block via_cputemp_cpu_notifier __refdata = {
37279+static struct notifier_block via_cputemp_cpu_notifier = {
37280 .notifier_call = via_cputemp_cpu_callback,
37281 };
37282
37283diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
37284index 378fcb5..5e91fa8 100644
37285--- a/drivers/i2c/busses/i2c-amd756-s4882.c
37286+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
37287@@ -43,7 +43,7 @@
37288 extern struct i2c_adapter amd756_smbus;
37289
37290 static struct i2c_adapter *s4882_adapter;
37291-static struct i2c_algorithm *s4882_algo;
37292+static i2c_algorithm_no_const *s4882_algo;
37293
37294 /* Wrapper access functions for multiplexed SMBus */
37295 static DEFINE_MUTEX(amd756_lock);
37296diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
37297index 29015eb..af2d8e9 100644
37298--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
37299+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
37300@@ -41,7 +41,7 @@
37301 extern struct i2c_adapter *nforce2_smbus;
37302
37303 static struct i2c_adapter *s4985_adapter;
37304-static struct i2c_algorithm *s4985_algo;
37305+static i2c_algorithm_no_const *s4985_algo;
37306
37307 /* Wrapper access functions for multiplexed SMBus */
37308 static DEFINE_MUTEX(nforce2_lock);
37309diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
37310index 8126824..55a2798 100644
37311--- a/drivers/ide/ide-cd.c
37312+++ b/drivers/ide/ide-cd.c
37313@@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
37314 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
37315 if ((unsigned long)buf & alignment
37316 || blk_rq_bytes(rq) & q->dma_pad_mask
37317- || object_is_on_stack(buf))
37318+ || object_starts_on_stack(buf))
37319 drive->dma = 0;
37320 }
37321 }
37322diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
37323index 8848f16..f8e6dd8 100644
37324--- a/drivers/iio/industrialio-core.c
37325+++ b/drivers/iio/industrialio-core.c
37326@@ -506,7 +506,7 @@ static ssize_t iio_write_channel_info(struct device *dev,
37327 }
37328
37329 static
37330-int __iio_device_attr_init(struct device_attribute *dev_attr,
37331+int __iio_device_attr_init(device_attribute_no_const *dev_attr,
37332 const char *postfix,
37333 struct iio_chan_spec const *chan,
37334 ssize_t (*readfunc)(struct device *dev,
37335diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
37336index 394fea2..c833880 100644
37337--- a/drivers/infiniband/core/cm.c
37338+++ b/drivers/infiniband/core/cm.c
37339@@ -114,7 +114,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
37340
37341 struct cm_counter_group {
37342 struct kobject obj;
37343- atomic_long_t counter[CM_ATTR_COUNT];
37344+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
37345 };
37346
37347 struct cm_counter_attribute {
37348@@ -1394,7 +1394,7 @@ static void cm_dup_req_handler(struct cm_work *work,
37349 struct ib_mad_send_buf *msg = NULL;
37350 int ret;
37351
37352- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
37353+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
37354 counter[CM_REQ_COUNTER]);
37355
37356 /* Quick state check to discard duplicate REQs. */
37357@@ -1778,7 +1778,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
37358 if (!cm_id_priv)
37359 return;
37360
37361- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
37362+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
37363 counter[CM_REP_COUNTER]);
37364 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
37365 if (ret)
37366@@ -1945,7 +1945,7 @@ static int cm_rtu_handler(struct cm_work *work)
37367 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
37368 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
37369 spin_unlock_irq(&cm_id_priv->lock);
37370- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
37371+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
37372 counter[CM_RTU_COUNTER]);
37373 goto out;
37374 }
37375@@ -2128,7 +2128,7 @@ static int cm_dreq_handler(struct cm_work *work)
37376 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
37377 dreq_msg->local_comm_id);
37378 if (!cm_id_priv) {
37379- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
37380+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
37381 counter[CM_DREQ_COUNTER]);
37382 cm_issue_drep(work->port, work->mad_recv_wc);
37383 return -EINVAL;
37384@@ -2153,7 +2153,7 @@ static int cm_dreq_handler(struct cm_work *work)
37385 case IB_CM_MRA_REP_RCVD:
37386 break;
37387 case IB_CM_TIMEWAIT:
37388- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
37389+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
37390 counter[CM_DREQ_COUNTER]);
37391 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
37392 goto unlock;
37393@@ -2167,7 +2167,7 @@ static int cm_dreq_handler(struct cm_work *work)
37394 cm_free_msg(msg);
37395 goto deref;
37396 case IB_CM_DREQ_RCVD:
37397- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
37398+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
37399 counter[CM_DREQ_COUNTER]);
37400 goto unlock;
37401 default:
37402@@ -2534,7 +2534,7 @@ static int cm_mra_handler(struct cm_work *work)
37403 ib_modify_mad(cm_id_priv->av.port->mad_agent,
37404 cm_id_priv->msg, timeout)) {
37405 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
37406- atomic_long_inc(&work->port->
37407+ atomic_long_inc_unchecked(&work->port->
37408 counter_group[CM_RECV_DUPLICATES].
37409 counter[CM_MRA_COUNTER]);
37410 goto out;
37411@@ -2543,7 +2543,7 @@ static int cm_mra_handler(struct cm_work *work)
37412 break;
37413 case IB_CM_MRA_REQ_RCVD:
37414 case IB_CM_MRA_REP_RCVD:
37415- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
37416+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
37417 counter[CM_MRA_COUNTER]);
37418 /* fall through */
37419 default:
37420@@ -2705,7 +2705,7 @@ static int cm_lap_handler(struct cm_work *work)
37421 case IB_CM_LAP_IDLE:
37422 break;
37423 case IB_CM_MRA_LAP_SENT:
37424- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
37425+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
37426 counter[CM_LAP_COUNTER]);
37427 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
37428 goto unlock;
37429@@ -2721,7 +2721,7 @@ static int cm_lap_handler(struct cm_work *work)
37430 cm_free_msg(msg);
37431 goto deref;
37432 case IB_CM_LAP_RCVD:
37433- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
37434+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
37435 counter[CM_LAP_COUNTER]);
37436 goto unlock;
37437 default:
37438@@ -3005,7 +3005,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
37439 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
37440 if (cur_cm_id_priv) {
37441 spin_unlock_irq(&cm.lock);
37442- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
37443+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
37444 counter[CM_SIDR_REQ_COUNTER]);
37445 goto out; /* Duplicate message. */
37446 }
37447@@ -3217,10 +3217,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
37448 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
37449 msg->retries = 1;
37450
37451- atomic_long_add(1 + msg->retries,
37452+ atomic_long_add_unchecked(1 + msg->retries,
37453 &port->counter_group[CM_XMIT].counter[attr_index]);
37454 if (msg->retries)
37455- atomic_long_add(msg->retries,
37456+ atomic_long_add_unchecked(msg->retries,
37457 &port->counter_group[CM_XMIT_RETRIES].
37458 counter[attr_index]);
37459
37460@@ -3430,7 +3430,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
37461 }
37462
37463 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
37464- atomic_long_inc(&port->counter_group[CM_RECV].
37465+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
37466 counter[attr_id - CM_ATTR_ID_OFFSET]);
37467
37468 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
37469@@ -3635,7 +3635,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
37470 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
37471
37472 return sprintf(buf, "%ld\n",
37473- atomic_long_read(&group->counter[cm_attr->index]));
37474+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
37475 }
37476
37477 static const struct sysfs_ops cm_counter_ops = {
37478diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
37479index 176c8f9..2627b62 100644
37480--- a/drivers/infiniband/core/fmr_pool.c
37481+++ b/drivers/infiniband/core/fmr_pool.c
37482@@ -98,8 +98,8 @@ struct ib_fmr_pool {
37483
37484 struct task_struct *thread;
37485
37486- atomic_t req_ser;
37487- atomic_t flush_ser;
37488+ atomic_unchecked_t req_ser;
37489+ atomic_unchecked_t flush_ser;
37490
37491 wait_queue_head_t force_wait;
37492 };
37493@@ -180,10 +180,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
37494 struct ib_fmr_pool *pool = pool_ptr;
37495
37496 do {
37497- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
37498+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
37499 ib_fmr_batch_release(pool);
37500
37501- atomic_inc(&pool->flush_ser);
37502+ atomic_inc_unchecked(&pool->flush_ser);
37503 wake_up_interruptible(&pool->force_wait);
37504
37505 if (pool->flush_function)
37506@@ -191,7 +191,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
37507 }
37508
37509 set_current_state(TASK_INTERRUPTIBLE);
37510- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
37511+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
37512 !kthread_should_stop())
37513 schedule();
37514 __set_current_state(TASK_RUNNING);
37515@@ -283,8 +283,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
37516 pool->dirty_watermark = params->dirty_watermark;
37517 pool->dirty_len = 0;
37518 spin_lock_init(&pool->pool_lock);
37519- atomic_set(&pool->req_ser, 0);
37520- atomic_set(&pool->flush_ser, 0);
37521+ atomic_set_unchecked(&pool->req_ser, 0);
37522+ atomic_set_unchecked(&pool->flush_ser, 0);
37523 init_waitqueue_head(&pool->force_wait);
37524
37525 pool->thread = kthread_run(ib_fmr_cleanup_thread,
37526@@ -412,11 +412,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
37527 }
37528 spin_unlock_irq(&pool->pool_lock);
37529
37530- serial = atomic_inc_return(&pool->req_ser);
37531+ serial = atomic_inc_return_unchecked(&pool->req_ser);
37532 wake_up_process(pool->thread);
37533
37534 if (wait_event_interruptible(pool->force_wait,
37535- atomic_read(&pool->flush_ser) - serial >= 0))
37536+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
37537 return -EINTR;
37538
37539 return 0;
37540@@ -526,7 +526,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
37541 } else {
37542 list_add_tail(&fmr->list, &pool->dirty_list);
37543 if (++pool->dirty_len >= pool->dirty_watermark) {
37544- atomic_inc(&pool->req_ser);
37545+ atomic_inc_unchecked(&pool->req_ser);
37546 wake_up_process(pool->thread);
37547 }
37548 }
37549diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
37550index afd8179..598063f 100644
37551--- a/drivers/infiniband/hw/cxgb4/mem.c
37552+++ b/drivers/infiniband/hw/cxgb4/mem.c
37553@@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
37554 int err;
37555 struct fw_ri_tpte tpt;
37556 u32 stag_idx;
37557- static atomic_t key;
37558+ static atomic_unchecked_t key;
37559
37560 if (c4iw_fatal_error(rdev))
37561 return -EIO;
37562@@ -139,7 +139,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
37563 if (rdev->stats.stag.cur > rdev->stats.stag.max)
37564 rdev->stats.stag.max = rdev->stats.stag.cur;
37565 mutex_unlock(&rdev->stats.lock);
37566- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
37567+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
37568 }
37569 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
37570 __func__, stag_state, type, pdid, stag_idx);
37571diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
37572index 79b3dbc..96e5fcc 100644
37573--- a/drivers/infiniband/hw/ipath/ipath_rc.c
37574+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
37575@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
37576 struct ib_atomic_eth *ateth;
37577 struct ipath_ack_entry *e;
37578 u64 vaddr;
37579- atomic64_t *maddr;
37580+ atomic64_unchecked_t *maddr;
37581 u64 sdata;
37582 u32 rkey;
37583 u8 next;
37584@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
37585 IB_ACCESS_REMOTE_ATOMIC)))
37586 goto nack_acc_unlck;
37587 /* Perform atomic OP and save result. */
37588- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
37589+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
37590 sdata = be64_to_cpu(ateth->swap_data);
37591 e = &qp->s_ack_queue[qp->r_head_ack_queue];
37592 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
37593- (u64) atomic64_add_return(sdata, maddr) - sdata :
37594+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
37595 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
37596 be64_to_cpu(ateth->compare_data),
37597 sdata);
37598diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
37599index 1f95bba..9530f87 100644
37600--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
37601+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
37602@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
37603 unsigned long flags;
37604 struct ib_wc wc;
37605 u64 sdata;
37606- atomic64_t *maddr;
37607+ atomic64_unchecked_t *maddr;
37608 enum ib_wc_status send_status;
37609
37610 /*
37611@@ -382,11 +382,11 @@ again:
37612 IB_ACCESS_REMOTE_ATOMIC)))
37613 goto acc_err;
37614 /* Perform atomic OP and save result. */
37615- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
37616+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
37617 sdata = wqe->wr.wr.atomic.compare_add;
37618 *(u64 *) sqp->s_sge.sge.vaddr =
37619 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
37620- (u64) atomic64_add_return(sdata, maddr) - sdata :
37621+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
37622 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
37623 sdata, wqe->wr.wr.atomic.swap);
37624 goto send_comp;
37625diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
37626index 9d3e5c1..d9afe4a 100644
37627--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
37628+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
37629@@ -772,7 +772,7 @@ static void mthca_setup_cmd_doorbells(struct mthca_dev *dev, u64 base)
37630 mthca_dbg(dev, "Mapped doorbell page for posting FW commands\n");
37631 }
37632
37633-int mthca_QUERY_FW(struct mthca_dev *dev)
37634+int __intentional_overflow(-1) mthca_QUERY_FW(struct mthca_dev *dev)
37635 {
37636 struct mthca_mailbox *mailbox;
37637 u32 *outbox;
37638diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
37639index ed9a989..e0c5871 100644
37640--- a/drivers/infiniband/hw/mthca/mthca_mr.c
37641+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
37642@@ -426,7 +426,7 @@ static inline u32 adjust_key(struct mthca_dev *dev, u32 key)
37643 return key;
37644 }
37645
37646-int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
37647+int __intentional_overflow(-1) mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
37648 u64 iova, u64 total_size, u32 access, struct mthca_mr *mr)
37649 {
37650 struct mthca_mailbox *mailbox;
37651diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
37652index 5b152a3..c1f3e83 100644
37653--- a/drivers/infiniband/hw/nes/nes.c
37654+++ b/drivers/infiniband/hw/nes/nes.c
37655@@ -98,7 +98,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
37656 LIST_HEAD(nes_adapter_list);
37657 static LIST_HEAD(nes_dev_list);
37658
37659-atomic_t qps_destroyed;
37660+atomic_unchecked_t qps_destroyed;
37661
37662 static unsigned int ee_flsh_adapter;
37663 static unsigned int sysfs_nonidx_addr;
37664@@ -267,7 +267,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
37665 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
37666 struct nes_adapter *nesadapter = nesdev->nesadapter;
37667
37668- atomic_inc(&qps_destroyed);
37669+ atomic_inc_unchecked(&qps_destroyed);
37670
37671 /* Free the control structures */
37672
37673diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
37674index 33cc589..3bd6538 100644
37675--- a/drivers/infiniband/hw/nes/nes.h
37676+++ b/drivers/infiniband/hw/nes/nes.h
37677@@ -177,17 +177,17 @@ extern unsigned int nes_debug_level;
37678 extern unsigned int wqm_quanta;
37679 extern struct list_head nes_adapter_list;
37680
37681-extern atomic_t cm_connects;
37682-extern atomic_t cm_accepts;
37683-extern atomic_t cm_disconnects;
37684-extern atomic_t cm_closes;
37685-extern atomic_t cm_connecteds;
37686-extern atomic_t cm_connect_reqs;
37687-extern atomic_t cm_rejects;
37688-extern atomic_t mod_qp_timouts;
37689-extern atomic_t qps_created;
37690-extern atomic_t qps_destroyed;
37691-extern atomic_t sw_qps_destroyed;
37692+extern atomic_unchecked_t cm_connects;
37693+extern atomic_unchecked_t cm_accepts;
37694+extern atomic_unchecked_t cm_disconnects;
37695+extern atomic_unchecked_t cm_closes;
37696+extern atomic_unchecked_t cm_connecteds;
37697+extern atomic_unchecked_t cm_connect_reqs;
37698+extern atomic_unchecked_t cm_rejects;
37699+extern atomic_unchecked_t mod_qp_timouts;
37700+extern atomic_unchecked_t qps_created;
37701+extern atomic_unchecked_t qps_destroyed;
37702+extern atomic_unchecked_t sw_qps_destroyed;
37703 extern u32 mh_detected;
37704 extern u32 mh_pauses_sent;
37705 extern u32 cm_packets_sent;
37706@@ -196,16 +196,16 @@ extern u32 cm_packets_created;
37707 extern u32 cm_packets_received;
37708 extern u32 cm_packets_dropped;
37709 extern u32 cm_packets_retrans;
37710-extern atomic_t cm_listens_created;
37711-extern atomic_t cm_listens_destroyed;
37712+extern atomic_unchecked_t cm_listens_created;
37713+extern atomic_unchecked_t cm_listens_destroyed;
37714 extern u32 cm_backlog_drops;
37715-extern atomic_t cm_loopbacks;
37716-extern atomic_t cm_nodes_created;
37717-extern atomic_t cm_nodes_destroyed;
37718-extern atomic_t cm_accel_dropped_pkts;
37719-extern atomic_t cm_resets_recvd;
37720-extern atomic_t pau_qps_created;
37721-extern atomic_t pau_qps_destroyed;
37722+extern atomic_unchecked_t cm_loopbacks;
37723+extern atomic_unchecked_t cm_nodes_created;
37724+extern atomic_unchecked_t cm_nodes_destroyed;
37725+extern atomic_unchecked_t cm_accel_dropped_pkts;
37726+extern atomic_unchecked_t cm_resets_recvd;
37727+extern atomic_unchecked_t pau_qps_created;
37728+extern atomic_unchecked_t pau_qps_destroyed;
37729
37730 extern u32 int_mod_timer_init;
37731 extern u32 int_mod_cq_depth_256;
37732diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
37733index 22ea67e..dcbe3bc 100644
37734--- a/drivers/infiniband/hw/nes/nes_cm.c
37735+++ b/drivers/infiniband/hw/nes/nes_cm.c
37736@@ -68,14 +68,14 @@ u32 cm_packets_dropped;
37737 u32 cm_packets_retrans;
37738 u32 cm_packets_created;
37739 u32 cm_packets_received;
37740-atomic_t cm_listens_created;
37741-atomic_t cm_listens_destroyed;
37742+atomic_unchecked_t cm_listens_created;
37743+atomic_unchecked_t cm_listens_destroyed;
37744 u32 cm_backlog_drops;
37745-atomic_t cm_loopbacks;
37746-atomic_t cm_nodes_created;
37747-atomic_t cm_nodes_destroyed;
37748-atomic_t cm_accel_dropped_pkts;
37749-atomic_t cm_resets_recvd;
37750+atomic_unchecked_t cm_loopbacks;
37751+atomic_unchecked_t cm_nodes_created;
37752+atomic_unchecked_t cm_nodes_destroyed;
37753+atomic_unchecked_t cm_accel_dropped_pkts;
37754+atomic_unchecked_t cm_resets_recvd;
37755
37756 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
37757 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
37758@@ -148,13 +148,13 @@ static struct nes_cm_ops nes_cm_api = {
37759
37760 static struct nes_cm_core *g_cm_core;
37761
37762-atomic_t cm_connects;
37763-atomic_t cm_accepts;
37764-atomic_t cm_disconnects;
37765-atomic_t cm_closes;
37766-atomic_t cm_connecteds;
37767-atomic_t cm_connect_reqs;
37768-atomic_t cm_rejects;
37769+atomic_unchecked_t cm_connects;
37770+atomic_unchecked_t cm_accepts;
37771+atomic_unchecked_t cm_disconnects;
37772+atomic_unchecked_t cm_closes;
37773+atomic_unchecked_t cm_connecteds;
37774+atomic_unchecked_t cm_connect_reqs;
37775+atomic_unchecked_t cm_rejects;
37776
37777 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
37778 {
37779@@ -1272,7 +1272,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
37780 kfree(listener);
37781 listener = NULL;
37782 ret = 0;
37783- atomic_inc(&cm_listens_destroyed);
37784+ atomic_inc_unchecked(&cm_listens_destroyed);
37785 } else {
37786 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
37787 }
37788@@ -1466,7 +1466,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
37789 cm_node->rem_mac);
37790
37791 add_hte_node(cm_core, cm_node);
37792- atomic_inc(&cm_nodes_created);
37793+ atomic_inc_unchecked(&cm_nodes_created);
37794
37795 return cm_node;
37796 }
37797@@ -1524,7 +1524,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
37798 }
37799
37800 atomic_dec(&cm_core->node_cnt);
37801- atomic_inc(&cm_nodes_destroyed);
37802+ atomic_inc_unchecked(&cm_nodes_destroyed);
37803 nesqp = cm_node->nesqp;
37804 if (nesqp) {
37805 nesqp->cm_node = NULL;
37806@@ -1588,7 +1588,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
37807
37808 static void drop_packet(struct sk_buff *skb)
37809 {
37810- atomic_inc(&cm_accel_dropped_pkts);
37811+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
37812 dev_kfree_skb_any(skb);
37813 }
37814
37815@@ -1651,7 +1651,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
37816 {
37817
37818 int reset = 0; /* whether to send reset in case of err.. */
37819- atomic_inc(&cm_resets_recvd);
37820+ atomic_inc_unchecked(&cm_resets_recvd);
37821 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
37822 " refcnt=%d\n", cm_node, cm_node->state,
37823 atomic_read(&cm_node->ref_count));
37824@@ -2292,7 +2292,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
37825 rem_ref_cm_node(cm_node->cm_core, cm_node);
37826 return NULL;
37827 }
37828- atomic_inc(&cm_loopbacks);
37829+ atomic_inc_unchecked(&cm_loopbacks);
37830 loopbackremotenode->loopbackpartner = cm_node;
37831 loopbackremotenode->tcp_cntxt.rcv_wscale =
37832 NES_CM_DEFAULT_RCV_WND_SCALE;
37833@@ -2567,7 +2567,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
37834 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
37835 else {
37836 rem_ref_cm_node(cm_core, cm_node);
37837- atomic_inc(&cm_accel_dropped_pkts);
37838+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
37839 dev_kfree_skb_any(skb);
37840 }
37841 break;
37842@@ -2875,7 +2875,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
37843
37844 if ((cm_id) && (cm_id->event_handler)) {
37845 if (issue_disconn) {
37846- atomic_inc(&cm_disconnects);
37847+ atomic_inc_unchecked(&cm_disconnects);
37848 cm_event.event = IW_CM_EVENT_DISCONNECT;
37849 cm_event.status = disconn_status;
37850 cm_event.local_addr = cm_id->local_addr;
37851@@ -2897,7 +2897,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
37852 }
37853
37854 if (issue_close) {
37855- atomic_inc(&cm_closes);
37856+ atomic_inc_unchecked(&cm_closes);
37857 nes_disconnect(nesqp, 1);
37858
37859 cm_id->provider_data = nesqp;
37860@@ -3033,7 +3033,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
37861
37862 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
37863 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
37864- atomic_inc(&cm_accepts);
37865+ atomic_inc_unchecked(&cm_accepts);
37866
37867 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
37868 netdev_refcnt_read(nesvnic->netdev));
37869@@ -3228,7 +3228,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
37870 struct nes_cm_core *cm_core;
37871 u8 *start_buff;
37872
37873- atomic_inc(&cm_rejects);
37874+ atomic_inc_unchecked(&cm_rejects);
37875 cm_node = (struct nes_cm_node *)cm_id->provider_data;
37876 loopback = cm_node->loopbackpartner;
37877 cm_core = cm_node->cm_core;
37878@@ -3288,7 +3288,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
37879 ntohl(cm_id->local_addr.sin_addr.s_addr),
37880 ntohs(cm_id->local_addr.sin_port));
37881
37882- atomic_inc(&cm_connects);
37883+ atomic_inc_unchecked(&cm_connects);
37884 nesqp->active_conn = 1;
37885
37886 /* cache the cm_id in the qp */
37887@@ -3398,7 +3398,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
37888 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
37889 return err;
37890 }
37891- atomic_inc(&cm_listens_created);
37892+ atomic_inc_unchecked(&cm_listens_created);
37893 }
37894
37895 cm_id->add_ref(cm_id);
37896@@ -3499,7 +3499,7 @@ static void cm_event_connected(struct nes_cm_event *event)
37897
37898 if (nesqp->destroyed)
37899 return;
37900- atomic_inc(&cm_connecteds);
37901+ atomic_inc_unchecked(&cm_connecteds);
37902 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
37903 " local port 0x%04X. jiffies = %lu.\n",
37904 nesqp->hwqp.qp_id,
37905@@ -3679,7 +3679,7 @@ static void cm_event_reset(struct nes_cm_event *event)
37906
37907 cm_id->add_ref(cm_id);
37908 ret = cm_id->event_handler(cm_id, &cm_event);
37909- atomic_inc(&cm_closes);
37910+ atomic_inc_unchecked(&cm_closes);
37911 cm_event.event = IW_CM_EVENT_CLOSE;
37912 cm_event.status = 0;
37913 cm_event.provider_data = cm_id->provider_data;
37914@@ -3715,7 +3715,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
37915 return;
37916 cm_id = cm_node->cm_id;
37917
37918- atomic_inc(&cm_connect_reqs);
37919+ atomic_inc_unchecked(&cm_connect_reqs);
37920 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
37921 cm_node, cm_id, jiffies);
37922
37923@@ -3755,7 +3755,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
37924 return;
37925 cm_id = cm_node->cm_id;
37926
37927- atomic_inc(&cm_connect_reqs);
37928+ atomic_inc_unchecked(&cm_connect_reqs);
37929 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
37930 cm_node, cm_id, jiffies);
37931
37932diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
37933index 4166452..fc952c3 100644
37934--- a/drivers/infiniband/hw/nes/nes_mgt.c
37935+++ b/drivers/infiniband/hw/nes/nes_mgt.c
37936@@ -40,8 +40,8 @@
37937 #include "nes.h"
37938 #include "nes_mgt.h"
37939
37940-atomic_t pau_qps_created;
37941-atomic_t pau_qps_destroyed;
37942+atomic_unchecked_t pau_qps_created;
37943+atomic_unchecked_t pau_qps_destroyed;
37944
37945 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
37946 {
37947@@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
37948 {
37949 struct sk_buff *skb;
37950 unsigned long flags;
37951- atomic_inc(&pau_qps_destroyed);
37952+ atomic_inc_unchecked(&pau_qps_destroyed);
37953
37954 /* Free packets that have not yet been forwarded */
37955 /* Lock is acquired by skb_dequeue when removing the skb */
37956@@ -810,7 +810,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
37957 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
37958 skb_queue_head_init(&nesqp->pau_list);
37959 spin_lock_init(&nesqp->pau_lock);
37960- atomic_inc(&pau_qps_created);
37961+ atomic_inc_unchecked(&pau_qps_created);
37962 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
37963 }
37964
37965diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
37966index 9542e16..a008c40 100644
37967--- a/drivers/infiniband/hw/nes/nes_nic.c
37968+++ b/drivers/infiniband/hw/nes/nes_nic.c
37969@@ -1273,39 +1273,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
37970 target_stat_values[++index] = mh_detected;
37971 target_stat_values[++index] = mh_pauses_sent;
37972 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
37973- target_stat_values[++index] = atomic_read(&cm_connects);
37974- target_stat_values[++index] = atomic_read(&cm_accepts);
37975- target_stat_values[++index] = atomic_read(&cm_disconnects);
37976- target_stat_values[++index] = atomic_read(&cm_connecteds);
37977- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
37978- target_stat_values[++index] = atomic_read(&cm_rejects);
37979- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
37980- target_stat_values[++index] = atomic_read(&qps_created);
37981- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
37982- target_stat_values[++index] = atomic_read(&qps_destroyed);
37983- target_stat_values[++index] = atomic_read(&cm_closes);
37984+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
37985+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
37986+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
37987+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
37988+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
37989+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
37990+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
37991+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
37992+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
37993+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
37994+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
37995 target_stat_values[++index] = cm_packets_sent;
37996 target_stat_values[++index] = cm_packets_bounced;
37997 target_stat_values[++index] = cm_packets_created;
37998 target_stat_values[++index] = cm_packets_received;
37999 target_stat_values[++index] = cm_packets_dropped;
38000 target_stat_values[++index] = cm_packets_retrans;
38001- target_stat_values[++index] = atomic_read(&cm_listens_created);
38002- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
38003+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
38004+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
38005 target_stat_values[++index] = cm_backlog_drops;
38006- target_stat_values[++index] = atomic_read(&cm_loopbacks);
38007- target_stat_values[++index] = atomic_read(&cm_nodes_created);
38008- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
38009- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
38010- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
38011+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
38012+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
38013+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
38014+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
38015+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
38016 target_stat_values[++index] = nesadapter->free_4kpbl;
38017 target_stat_values[++index] = nesadapter->free_256pbl;
38018 target_stat_values[++index] = int_mod_timer_init;
38019 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
38020 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
38021 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
38022- target_stat_values[++index] = atomic_read(&pau_qps_created);
38023- target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
38024+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
38025+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
38026 }
38027
38028 /**
38029diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
38030index 07e4fba..685f041 100644
38031--- a/drivers/infiniband/hw/nes/nes_verbs.c
38032+++ b/drivers/infiniband/hw/nes/nes_verbs.c
38033@@ -46,9 +46,9 @@
38034
38035 #include <rdma/ib_umem.h>
38036
38037-atomic_t mod_qp_timouts;
38038-atomic_t qps_created;
38039-atomic_t sw_qps_destroyed;
38040+atomic_unchecked_t mod_qp_timouts;
38041+atomic_unchecked_t qps_created;
38042+atomic_unchecked_t sw_qps_destroyed;
38043
38044 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
38045
38046@@ -1131,7 +1131,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
38047 if (init_attr->create_flags)
38048 return ERR_PTR(-EINVAL);
38049
38050- atomic_inc(&qps_created);
38051+ atomic_inc_unchecked(&qps_created);
38052 switch (init_attr->qp_type) {
38053 case IB_QPT_RC:
38054 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
38055@@ -1462,7 +1462,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
38056 struct iw_cm_event cm_event;
38057 int ret = 0;
38058
38059- atomic_inc(&sw_qps_destroyed);
38060+ atomic_inc_unchecked(&sw_qps_destroyed);
38061 nesqp->destroyed = 1;
38062
38063 /* Blow away the connection if it exists. */
38064diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
38065index 4d11575..3e890e5 100644
38066--- a/drivers/infiniband/hw/qib/qib.h
38067+++ b/drivers/infiniband/hw/qib/qib.h
38068@@ -51,6 +51,7 @@
38069 #include <linux/completion.h>
38070 #include <linux/kref.h>
38071 #include <linux/sched.h>
38072+#include <linux/slab.h>
38073
38074 #include "qib_common.h"
38075 #include "qib_verbs.h"
38076diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
38077index da739d9..da1c7f4 100644
38078--- a/drivers/input/gameport/gameport.c
38079+++ b/drivers/input/gameport/gameport.c
38080@@ -487,14 +487,14 @@ EXPORT_SYMBOL(gameport_set_phys);
38081 */
38082 static void gameport_init_port(struct gameport *gameport)
38083 {
38084- static atomic_t gameport_no = ATOMIC_INIT(0);
38085+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
38086
38087 __module_get(THIS_MODULE);
38088
38089 mutex_init(&gameport->drv_mutex);
38090 device_initialize(&gameport->dev);
38091 dev_set_name(&gameport->dev, "gameport%lu",
38092- (unsigned long)atomic_inc_return(&gameport_no) - 1);
38093+ (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
38094 gameport->dev.bus = &gameport_bus;
38095 gameport->dev.release = gameport_release_port;
38096 if (gameport->parent)
38097diff --git a/drivers/input/input.c b/drivers/input/input.c
38098index c044699..174d71a 100644
38099--- a/drivers/input/input.c
38100+++ b/drivers/input/input.c
38101@@ -2019,7 +2019,7 @@ static void devm_input_device_unregister(struct device *dev, void *res)
38102 */
38103 int input_register_device(struct input_dev *dev)
38104 {
38105- static atomic_t input_no = ATOMIC_INIT(0);
38106+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
38107 struct input_devres *devres = NULL;
38108 struct input_handler *handler;
38109 unsigned int packet_size;
38110@@ -2074,7 +2074,7 @@ int input_register_device(struct input_dev *dev)
38111 dev->setkeycode = input_default_setkeycode;
38112
38113 dev_set_name(&dev->dev, "input%ld",
38114- (unsigned long) atomic_inc_return(&input_no) - 1);
38115+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
38116
38117 error = device_add(&dev->dev);
38118 if (error)
38119diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
38120index 04c69af..5f92d00 100644
38121--- a/drivers/input/joystick/sidewinder.c
38122+++ b/drivers/input/joystick/sidewinder.c
38123@@ -30,6 +30,7 @@
38124 #include <linux/kernel.h>
38125 #include <linux/module.h>
38126 #include <linux/slab.h>
38127+#include <linux/sched.h>
38128 #include <linux/init.h>
38129 #include <linux/input.h>
38130 #include <linux/gameport.h>
38131diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
38132index d6cbfe9..6225402 100644
38133--- a/drivers/input/joystick/xpad.c
38134+++ b/drivers/input/joystick/xpad.c
38135@@ -735,7 +735,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
38136
38137 static int xpad_led_probe(struct usb_xpad *xpad)
38138 {
38139- static atomic_t led_seq = ATOMIC_INIT(0);
38140+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
38141 long led_no;
38142 struct xpad_led *led;
38143 struct led_classdev *led_cdev;
38144@@ -748,7 +748,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
38145 if (!led)
38146 return -ENOMEM;
38147
38148- led_no = (long)atomic_inc_return(&led_seq) - 1;
38149+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
38150
38151 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
38152 led->xpad = xpad;
38153diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h
38154index fe1df23..5b710f3 100644
38155--- a/drivers/input/mouse/psmouse.h
38156+++ b/drivers/input/mouse/psmouse.h
38157@@ -115,7 +115,7 @@ struct psmouse_attribute {
38158 ssize_t (*set)(struct psmouse *psmouse, void *data,
38159 const char *buf, size_t count);
38160 bool protect;
38161-};
38162+} __do_const;
38163 #define to_psmouse_attr(a) container_of((a), struct psmouse_attribute, dattr)
38164
38165 ssize_t psmouse_attr_show_helper(struct device *dev, struct device_attribute *attr,
38166diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
38167index 4c842c3..590b0bf 100644
38168--- a/drivers/input/mousedev.c
38169+++ b/drivers/input/mousedev.c
38170@@ -738,7 +738,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
38171
38172 spin_unlock_irq(&client->packet_lock);
38173
38174- if (copy_to_user(buffer, data, count))
38175+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
38176 return -EFAULT;
38177
38178 return count;
38179diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
38180index 25fc597..558bf3b 100644
38181--- a/drivers/input/serio/serio.c
38182+++ b/drivers/input/serio/serio.c
38183@@ -496,7 +496,7 @@ static void serio_release_port(struct device *dev)
38184 */
38185 static void serio_init_port(struct serio *serio)
38186 {
38187- static atomic_t serio_no = ATOMIC_INIT(0);
38188+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
38189
38190 __module_get(THIS_MODULE);
38191
38192@@ -507,7 +507,7 @@ static void serio_init_port(struct serio *serio)
38193 mutex_init(&serio->drv_mutex);
38194 device_initialize(&serio->dev);
38195 dev_set_name(&serio->dev, "serio%ld",
38196- (long)atomic_inc_return(&serio_no) - 1);
38197+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
38198 serio->dev.bus = &serio_bus;
38199 serio->dev.release = serio_release_port;
38200 serio->dev.groups = serio_device_attr_groups;
38201diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
38202index ddbdaca..be18a78 100644
38203--- a/drivers/iommu/iommu.c
38204+++ b/drivers/iommu/iommu.c
38205@@ -554,7 +554,7 @@ static struct notifier_block iommu_bus_nb = {
38206 static void iommu_bus_init(struct bus_type *bus, struct iommu_ops *ops)
38207 {
38208 bus_register_notifier(bus, &iommu_bus_nb);
38209- bus_for_each_dev(bus, NULL, ops, add_iommu_group);
38210+ bus_for_each_dev(bus, NULL, (void *)ops, add_iommu_group);
38211 }
38212
38213 /**
38214diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
38215index 89562a8..218999b 100644
38216--- a/drivers/isdn/capi/capi.c
38217+++ b/drivers/isdn/capi/capi.c
38218@@ -81,8 +81,8 @@ struct capiminor {
38219
38220 struct capi20_appl *ap;
38221 u32 ncci;
38222- atomic_t datahandle;
38223- atomic_t msgid;
38224+ atomic_unchecked_t datahandle;
38225+ atomic_unchecked_t msgid;
38226
38227 struct tty_port port;
38228 int ttyinstop;
38229@@ -391,7 +391,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
38230 capimsg_setu16(s, 2, mp->ap->applid);
38231 capimsg_setu8 (s, 4, CAPI_DATA_B3);
38232 capimsg_setu8 (s, 5, CAPI_RESP);
38233- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
38234+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
38235 capimsg_setu32(s, 8, mp->ncci);
38236 capimsg_setu16(s, 12, datahandle);
38237 }
38238@@ -512,14 +512,14 @@ static void handle_minor_send(struct capiminor *mp)
38239 mp->outbytes -= len;
38240 spin_unlock_bh(&mp->outlock);
38241
38242- datahandle = atomic_inc_return(&mp->datahandle);
38243+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
38244 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
38245 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
38246 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
38247 capimsg_setu16(skb->data, 2, mp->ap->applid);
38248 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
38249 capimsg_setu8 (skb->data, 5, CAPI_REQ);
38250- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
38251+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
38252 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
38253 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
38254 capimsg_setu16(skb->data, 16, len); /* Data length */
38255diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
38256index 67abf3f..076b3a6 100644
38257--- a/drivers/isdn/gigaset/interface.c
38258+++ b/drivers/isdn/gigaset/interface.c
38259@@ -160,9 +160,9 @@ static int if_open(struct tty_struct *tty, struct file *filp)
38260 }
38261 tty->driver_data = cs;
38262
38263- ++cs->port.count;
38264+ atomic_inc(&cs->port.count);
38265
38266- if (cs->port.count == 1) {
38267+ if (atomic_read(&cs->port.count) == 1) {
38268 tty_port_tty_set(&cs->port, tty);
38269 tty->low_latency = 1;
38270 }
38271@@ -186,9 +186,9 @@ static void if_close(struct tty_struct *tty, struct file *filp)
38272
38273 if (!cs->connected)
38274 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
38275- else if (!cs->port.count)
38276+ else if (!atomic_read(&cs->port.count))
38277 dev_warn(cs->dev, "%s: device not opened\n", __func__);
38278- else if (!--cs->port.count)
38279+ else if (!atomic_dec_return(&cs->port.count))
38280 tty_port_tty_set(&cs->port, NULL);
38281
38282 mutex_unlock(&cs->mutex);
38283diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
38284index 821f7ac..28d4030 100644
38285--- a/drivers/isdn/hardware/avm/b1.c
38286+++ b/drivers/isdn/hardware/avm/b1.c
38287@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart *t4file)
38288 }
38289 if (left) {
38290 if (t4file->user) {
38291- if (copy_from_user(buf, dp, left))
38292+ if (left > sizeof buf || copy_from_user(buf, dp, left))
38293 return -EFAULT;
38294 } else {
38295 memcpy(buf, dp, left);
38296@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart *config)
38297 }
38298 if (left) {
38299 if (config->user) {
38300- if (copy_from_user(buf, dp, left))
38301+ if (left > sizeof buf || copy_from_user(buf, dp, left))
38302 return -EFAULT;
38303 } else {
38304 memcpy(buf, dp, left);
38305diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
38306index e09dc8a..15e2efb 100644
38307--- a/drivers/isdn/i4l/isdn_tty.c
38308+++ b/drivers/isdn/i4l/isdn_tty.c
38309@@ -1513,9 +1513,9 @@ isdn_tty_open(struct tty_struct *tty, struct file *filp)
38310
38311 #ifdef ISDN_DEBUG_MODEM_OPEN
38312 printk(KERN_DEBUG "isdn_tty_open %s, count = %d\n", tty->name,
38313- port->count);
38314+ atomic_read(&port->count));
38315 #endif
38316- port->count++;
38317+ atomic_inc(&port->count);
38318 port->tty = tty;
38319 /*
38320 * Start up serial port
38321@@ -1559,7 +1559,7 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
38322 #endif
38323 return;
38324 }
38325- if ((tty->count == 1) && (port->count != 1)) {
38326+ if ((tty->count == 1) && (atomic_read(&port->count) != 1)) {
38327 /*
38328 * Uh, oh. tty->count is 1, which means that the tty
38329 * structure will be freed. Info->count should always
38330@@ -1568,15 +1568,15 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
38331 * serial port won't be shutdown.
38332 */
38333 printk(KERN_ERR "isdn_tty_close: bad port count; tty->count is 1, "
38334- "info->count is %d\n", port->count);
38335- port->count = 1;
38336+ "info->count is %d\n", atomic_read(&port->count));
38337+ atomic_set(&port->count, 1);
38338 }
38339- if (--port->count < 0) {
38340+ if (atomic_dec_return(&port->count) < 0) {
38341 printk(KERN_ERR "isdn_tty_close: bad port count for ttyi%d: %d\n",
38342- info->line, port->count);
38343- port->count = 0;
38344+ info->line, atomic_read(&port->count));
38345+ atomic_set(&port->count, 0);
38346 }
38347- if (port->count) {
38348+ if (atomic_read(&port->count)) {
38349 #ifdef ISDN_DEBUG_MODEM_OPEN
38350 printk(KERN_DEBUG "isdn_tty_close after info->count != 0\n");
38351 #endif
38352@@ -1630,7 +1630,7 @@ isdn_tty_hangup(struct tty_struct *tty)
38353 if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_hangup"))
38354 return;
38355 isdn_tty_shutdown(info);
38356- port->count = 0;
38357+ atomic_set(&port->count, 0);
38358 port->flags &= ~ASYNC_NORMAL_ACTIVE;
38359 port->tty = NULL;
38360 wake_up_interruptible(&port->open_wait);
38361@@ -1975,7 +1975,7 @@ isdn_tty_find_icall(int di, int ch, setup_parm *setup)
38362 for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
38363 modem_info *info = &dev->mdm.info[i];
38364
38365- if (info->port.count == 0)
38366+ if (atomic_read(&info->port.count) == 0)
38367 continue;
38368 if ((info->emu.mdmreg[REG_SI1] & si2bit[si1]) && /* SI1 is matching */
38369 (info->emu.mdmreg[REG_SI2] == si2)) { /* SI2 is matching */
38370diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
38371index e74df7c..03a03ba 100644
38372--- a/drivers/isdn/icn/icn.c
38373+++ b/drivers/isdn/icn/icn.c
38374@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char *buf, int len, int user, icn_card *card)
38375 if (count > len)
38376 count = len;
38377 if (user) {
38378- if (copy_from_user(msg, buf, count))
38379+ if (count > sizeof msg || copy_from_user(msg, buf, count))
38380 return -EFAULT;
38381 } else
38382 memcpy(msg, buf, count);
38383diff --git a/drivers/leds/leds-clevo-mail.c b/drivers/leds/leds-clevo-mail.c
38384index 6a8405d..0bd1c7e 100644
38385--- a/drivers/leds/leds-clevo-mail.c
38386+++ b/drivers/leds/leds-clevo-mail.c
38387@@ -40,7 +40,7 @@ static int __init clevo_mail_led_dmi_callback(const struct dmi_system_id *id)
38388 * detected as working, but in reality it is not) as low as
38389 * possible.
38390 */
38391-static struct dmi_system_id __initdata clevo_mail_led_dmi_table[] = {
38392+static const struct dmi_system_id __initconst clevo_mail_led_dmi_table[] = {
38393 {
38394 .callback = clevo_mail_led_dmi_callback,
38395 .ident = "Clevo D410J",
38396diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c
38397index ec9b287..65c9bf4 100644
38398--- a/drivers/leds/leds-ss4200.c
38399+++ b/drivers/leds/leds-ss4200.c
38400@@ -92,7 +92,7 @@ MODULE_PARM_DESC(nodetect, "Skip DMI-based hardware detection");
38401 * detected as working, but in reality it is not) as low as
38402 * possible.
38403 */
38404-static struct dmi_system_id __initdata nas_led_whitelist[] = {
38405+static const struct dmi_system_id __initconst nas_led_whitelist[] = {
38406 {
38407 .callback = ss4200_led_dmi_callback,
38408 .ident = "Intel SS4200-E",
38409diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
38410index a5ebc00..982886f 100644
38411--- a/drivers/lguest/core.c
38412+++ b/drivers/lguest/core.c
38413@@ -92,9 +92,17 @@ static __init int map_switcher(void)
38414 * it's worked so far. The end address needs +1 because __get_vm_area
38415 * allocates an extra guard page, so we need space for that.
38416 */
38417+
38418+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
38419+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
38420+ VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
38421+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
38422+#else
38423 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
38424 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
38425 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
38426+#endif
38427+
38428 if (!switcher_vma) {
38429 err = -ENOMEM;
38430 printk("lguest: could not map switcher pages high\n");
38431@@ -119,7 +127,7 @@ static __init int map_switcher(void)
38432 * Now the Switcher is mapped at the right address, we can't fail!
38433 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
38434 */
38435- memcpy(switcher_vma->addr, start_switcher_text,
38436+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
38437 end_switcher_text - start_switcher_text);
38438
38439 printk(KERN_INFO "lguest: mapped switcher at %p\n",
38440diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
38441index 3b62be16..e33134a 100644
38442--- a/drivers/lguest/page_tables.c
38443+++ b/drivers/lguest/page_tables.c
38444@@ -532,7 +532,7 @@ void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
38445 /*:*/
38446
38447 #ifdef CONFIG_X86_PAE
38448-static void release_pmd(pmd_t *spmd)
38449+static void __intentional_overflow(-1) release_pmd(pmd_t *spmd)
38450 {
38451 /* If the entry's not present, there's nothing to release. */
38452 if (pmd_flags(*spmd) & _PAGE_PRESENT) {
38453diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
38454index 4af12e1..0e89afe 100644
38455--- a/drivers/lguest/x86/core.c
38456+++ b/drivers/lguest/x86/core.c
38457@@ -59,7 +59,7 @@ static struct {
38458 /* Offset from where switcher.S was compiled to where we've copied it */
38459 static unsigned long switcher_offset(void)
38460 {
38461- return SWITCHER_ADDR - (unsigned long)start_switcher_text;
38462+ return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
38463 }
38464
38465 /* This cpu's struct lguest_pages. */
38466@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
38467 * These copies are pretty cheap, so we do them unconditionally: */
38468 /* Save the current Host top-level page directory.
38469 */
38470+
38471+#ifdef CONFIG_PAX_PER_CPU_PGD
38472+ pages->state.host_cr3 = read_cr3();
38473+#else
38474 pages->state.host_cr3 = __pa(current->mm->pgd);
38475+#endif
38476+
38477 /*
38478 * Set up the Guest's page tables to see this CPU's pages (and no
38479 * other CPU's pages).
38480@@ -476,7 +482,7 @@ void __init lguest_arch_host_init(void)
38481 * compiled-in switcher code and the high-mapped copy we just made.
38482 */
38483 for (i = 0; i < IDT_ENTRIES; i++)
38484- default_idt_entries[i] += switcher_offset();
38485+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
38486
38487 /*
38488 * Set up the Switcher's per-cpu areas.
38489@@ -559,7 +565,7 @@ void __init lguest_arch_host_init(void)
38490 * it will be undisturbed when we switch. To change %cs and jump we
38491 * need this structure to feed to Intel's "lcall" instruction.
38492 */
38493- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
38494+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
38495 lguest_entry.segment = LGUEST_CS;
38496
38497 /*
38498diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
38499index 40634b0..4f5855e 100644
38500--- a/drivers/lguest/x86/switcher_32.S
38501+++ b/drivers/lguest/x86/switcher_32.S
38502@@ -87,6 +87,7 @@
38503 #include <asm/page.h>
38504 #include <asm/segment.h>
38505 #include <asm/lguest.h>
38506+#include <asm/processor-flags.h>
38507
38508 // We mark the start of the code to copy
38509 // It's placed in .text tho it's never run here
38510@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
38511 // Changes type when we load it: damn Intel!
38512 // For after we switch over our page tables
38513 // That entry will be read-only: we'd crash.
38514+
38515+#ifdef CONFIG_PAX_KERNEXEC
38516+ mov %cr0, %edx
38517+ xor $X86_CR0_WP, %edx
38518+ mov %edx, %cr0
38519+#endif
38520+
38521 movl $(GDT_ENTRY_TSS*8), %edx
38522 ltr %dx
38523
38524@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
38525 // Let's clear it again for our return.
38526 // The GDT descriptor of the Host
38527 // Points to the table after two "size" bytes
38528- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
38529+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
38530 // Clear "used" from type field (byte 5, bit 2)
38531- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
38532+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
38533+
38534+#ifdef CONFIG_PAX_KERNEXEC
38535+ mov %cr0, %eax
38536+ xor $X86_CR0_WP, %eax
38537+ mov %eax, %cr0
38538+#endif
38539
38540 // Once our page table's switched, the Guest is live!
38541 // The Host fades as we run this final step.
38542@@ -295,13 +309,12 @@ deliver_to_host:
38543 // I consulted gcc, and it gave
38544 // These instructions, which I gladly credit:
38545 leal (%edx,%ebx,8), %eax
38546- movzwl (%eax),%edx
38547- movl 4(%eax), %eax
38548- xorw %ax, %ax
38549- orl %eax, %edx
38550+ movl 4(%eax), %edx
38551+ movw (%eax), %dx
38552 // Now the address of the handler's in %edx
38553 // We call it now: its "iret" drops us home.
38554- jmp *%edx
38555+ ljmp $__KERNEL_CS, $1f
38556+1: jmp *%edx
38557
38558 // Every interrupt can come to us here
38559 // But we must truly tell each apart.
38560diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
38561index 7155945..4bcc562 100644
38562--- a/drivers/md/bitmap.c
38563+++ b/drivers/md/bitmap.c
38564@@ -1779,7 +1779,7 @@ void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
38565 chunk_kb ? "KB" : "B");
38566 if (bitmap->storage.file) {
38567 seq_printf(seq, ", file: ");
38568- seq_path(seq, &bitmap->storage.file->f_path, " \t\n");
38569+ seq_path(seq, &bitmap->storage.file->f_path, " \t\n\\");
38570 }
38571
38572 seq_printf(seq, "\n");
38573diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
38574index eee353d..74504c4 100644
38575--- a/drivers/md/dm-ioctl.c
38576+++ b/drivers/md/dm-ioctl.c
38577@@ -1632,7 +1632,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
38578 cmd == DM_LIST_VERSIONS_CMD)
38579 return 0;
38580
38581- if ((cmd == DM_DEV_CREATE_CMD)) {
38582+ if (cmd == DM_DEV_CREATE_CMD) {
38583 if (!*param->name) {
38584 DMWARN("name not supplied when creating device");
38585 return -EINVAL;
38586diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
38587index 7f24190..0e18099 100644
38588--- a/drivers/md/dm-raid1.c
38589+++ b/drivers/md/dm-raid1.c
38590@@ -40,7 +40,7 @@ enum dm_raid1_error {
38591
38592 struct mirror {
38593 struct mirror_set *ms;
38594- atomic_t error_count;
38595+ atomic_unchecked_t error_count;
38596 unsigned long error_type;
38597 struct dm_dev *dev;
38598 sector_t offset;
38599@@ -183,7 +183,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
38600 struct mirror *m;
38601
38602 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
38603- if (!atomic_read(&m->error_count))
38604+ if (!atomic_read_unchecked(&m->error_count))
38605 return m;
38606
38607 return NULL;
38608@@ -215,7 +215,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
38609 * simple way to tell if a device has encountered
38610 * errors.
38611 */
38612- atomic_inc(&m->error_count);
38613+ atomic_inc_unchecked(&m->error_count);
38614
38615 if (test_and_set_bit(error_type, &m->error_type))
38616 return;
38617@@ -406,7 +406,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
38618 struct mirror *m = get_default_mirror(ms);
38619
38620 do {
38621- if (likely(!atomic_read(&m->error_count)))
38622+ if (likely(!atomic_read_unchecked(&m->error_count)))
38623 return m;
38624
38625 if (m-- == ms->mirror)
38626@@ -420,7 +420,7 @@ static int default_ok(struct mirror *m)
38627 {
38628 struct mirror *default_mirror = get_default_mirror(m->ms);
38629
38630- return !atomic_read(&default_mirror->error_count);
38631+ return !atomic_read_unchecked(&default_mirror->error_count);
38632 }
38633
38634 static int mirror_available(struct mirror_set *ms, struct bio *bio)
38635@@ -557,7 +557,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
38636 */
38637 if (likely(region_in_sync(ms, region, 1)))
38638 m = choose_mirror(ms, bio->bi_sector);
38639- else if (m && atomic_read(&m->error_count))
38640+ else if (m && atomic_read_unchecked(&m->error_count))
38641 m = NULL;
38642
38643 if (likely(m))
38644@@ -924,7 +924,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
38645 }
38646
38647 ms->mirror[mirror].ms = ms;
38648- atomic_set(&(ms->mirror[mirror].error_count), 0);
38649+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
38650 ms->mirror[mirror].error_type = 0;
38651 ms->mirror[mirror].offset = offset;
38652
38653@@ -1337,7 +1337,7 @@ static void mirror_resume(struct dm_target *ti)
38654 */
38655 static char device_status_char(struct mirror *m)
38656 {
38657- if (!atomic_read(&(m->error_count)))
38658+ if (!atomic_read_unchecked(&(m->error_count)))
38659 return 'A';
38660
38661 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
38662diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
38663index aaecefa..23b3026 100644
38664--- a/drivers/md/dm-stripe.c
38665+++ b/drivers/md/dm-stripe.c
38666@@ -20,7 +20,7 @@ struct stripe {
38667 struct dm_dev *dev;
38668 sector_t physical_start;
38669
38670- atomic_t error_count;
38671+ atomic_unchecked_t error_count;
38672 };
38673
38674 struct stripe_c {
38675@@ -184,7 +184,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
38676 kfree(sc);
38677 return r;
38678 }
38679- atomic_set(&(sc->stripe[i].error_count), 0);
38680+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
38681 }
38682
38683 ti->private = sc;
38684@@ -325,7 +325,7 @@ static void stripe_status(struct dm_target *ti, status_type_t type,
38685 DMEMIT("%d ", sc->stripes);
38686 for (i = 0; i < sc->stripes; i++) {
38687 DMEMIT("%s ", sc->stripe[i].dev->name);
38688- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
38689+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
38690 'D' : 'A';
38691 }
38692 buffer[i] = '\0';
38693@@ -370,8 +370,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error)
38694 */
38695 for (i = 0; i < sc->stripes; i++)
38696 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
38697- atomic_inc(&(sc->stripe[i].error_count));
38698- if (atomic_read(&(sc->stripe[i].error_count)) <
38699+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
38700+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
38701 DM_IO_ERROR_THRESHOLD)
38702 schedule_work(&sc->trigger_event);
38703 }
38704diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
38705index daf25d0..d74f49f 100644
38706--- a/drivers/md/dm-table.c
38707+++ b/drivers/md/dm-table.c
38708@@ -390,7 +390,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
38709 if (!dev_size)
38710 return 0;
38711
38712- if ((start >= dev_size) || (start + len > dev_size)) {
38713+ if ((start >= dev_size) || (len > dev_size - start)) {
38714 DMWARN("%s: %s too small for target: "
38715 "start=%llu, len=%llu, dev_size=%llu",
38716 dm_device_name(ti->table->md), bdevname(bdev, b),
38717diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
38718index 4d6e853..a234157 100644
38719--- a/drivers/md/dm-thin-metadata.c
38720+++ b/drivers/md/dm-thin-metadata.c
38721@@ -397,7 +397,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
38722 {
38723 pmd->info.tm = pmd->tm;
38724 pmd->info.levels = 2;
38725- pmd->info.value_type.context = pmd->data_sm;
38726+ pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
38727 pmd->info.value_type.size = sizeof(__le64);
38728 pmd->info.value_type.inc = data_block_inc;
38729 pmd->info.value_type.dec = data_block_dec;
38730@@ -416,7 +416,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
38731
38732 pmd->bl_info.tm = pmd->tm;
38733 pmd->bl_info.levels = 1;
38734- pmd->bl_info.value_type.context = pmd->data_sm;
38735+ pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
38736 pmd->bl_info.value_type.size = sizeof(__le64);
38737 pmd->bl_info.value_type.inc = data_block_inc;
38738 pmd->bl_info.value_type.dec = data_block_dec;
38739diff --git a/drivers/md/dm.c b/drivers/md/dm.c
38740index 0d8f086..f5a91d5 100644
38741--- a/drivers/md/dm.c
38742+++ b/drivers/md/dm.c
38743@@ -170,9 +170,9 @@ struct mapped_device {
38744 /*
38745 * Event handling.
38746 */
38747- atomic_t event_nr;
38748+ atomic_unchecked_t event_nr;
38749 wait_queue_head_t eventq;
38750- atomic_t uevent_seq;
38751+ atomic_unchecked_t uevent_seq;
38752 struct list_head uevent_list;
38753 spinlock_t uevent_lock; /* Protect access to uevent_list */
38754
38755@@ -1872,8 +1872,8 @@ static struct mapped_device *alloc_dev(int minor)
38756 rwlock_init(&md->map_lock);
38757 atomic_set(&md->holders, 1);
38758 atomic_set(&md->open_count, 0);
38759- atomic_set(&md->event_nr, 0);
38760- atomic_set(&md->uevent_seq, 0);
38761+ atomic_set_unchecked(&md->event_nr, 0);
38762+ atomic_set_unchecked(&md->uevent_seq, 0);
38763 INIT_LIST_HEAD(&md->uevent_list);
38764 spin_lock_init(&md->uevent_lock);
38765
38766@@ -2026,7 +2026,7 @@ static void event_callback(void *context)
38767
38768 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
38769
38770- atomic_inc(&md->event_nr);
38771+ atomic_inc_unchecked(&md->event_nr);
38772 wake_up(&md->eventq);
38773 }
38774
38775@@ -2683,18 +2683,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
38776
38777 uint32_t dm_next_uevent_seq(struct mapped_device *md)
38778 {
38779- return atomic_add_return(1, &md->uevent_seq);
38780+ return atomic_add_return_unchecked(1, &md->uevent_seq);
38781 }
38782
38783 uint32_t dm_get_event_nr(struct mapped_device *md)
38784 {
38785- return atomic_read(&md->event_nr);
38786+ return atomic_read_unchecked(&md->event_nr);
38787 }
38788
38789 int dm_wait_event(struct mapped_device *md, int event_nr)
38790 {
38791 return wait_event_interruptible(md->eventq,
38792- (event_nr != atomic_read(&md->event_nr)));
38793+ (event_nr != atomic_read_unchecked(&md->event_nr)));
38794 }
38795
38796 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
38797diff --git a/drivers/md/md.c b/drivers/md/md.c
38798index f363135..9b38815 100644
38799--- a/drivers/md/md.c
38800+++ b/drivers/md/md.c
38801@@ -240,10 +240,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio);
38802 * start build, activate spare
38803 */
38804 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
38805-static atomic_t md_event_count;
38806+static atomic_unchecked_t md_event_count;
38807 void md_new_event(struct mddev *mddev)
38808 {
38809- atomic_inc(&md_event_count);
38810+ atomic_inc_unchecked(&md_event_count);
38811 wake_up(&md_event_waiters);
38812 }
38813 EXPORT_SYMBOL_GPL(md_new_event);
38814@@ -253,7 +253,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
38815 */
38816 static void md_new_event_inintr(struct mddev *mddev)
38817 {
38818- atomic_inc(&md_event_count);
38819+ atomic_inc_unchecked(&md_event_count);
38820 wake_up(&md_event_waiters);
38821 }
38822
38823@@ -1507,7 +1507,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
38824 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
38825 (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
38826 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
38827- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
38828+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
38829
38830 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
38831 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
38832@@ -1751,7 +1751,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
38833 else
38834 sb->resync_offset = cpu_to_le64(0);
38835
38836- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
38837+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
38838
38839 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
38840 sb->size = cpu_to_le64(mddev->dev_sectors);
38841@@ -2751,7 +2751,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
38842 static ssize_t
38843 errors_show(struct md_rdev *rdev, char *page)
38844 {
38845- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
38846+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
38847 }
38848
38849 static ssize_t
38850@@ -2760,7 +2760,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
38851 char *e;
38852 unsigned long n = simple_strtoul(buf, &e, 10);
38853 if (*buf && (*e == 0 || *e == '\n')) {
38854- atomic_set(&rdev->corrected_errors, n);
38855+ atomic_set_unchecked(&rdev->corrected_errors, n);
38856 return len;
38857 }
38858 return -EINVAL;
38859@@ -3210,8 +3210,8 @@ int md_rdev_init(struct md_rdev *rdev)
38860 rdev->sb_loaded = 0;
38861 rdev->bb_page = NULL;
38862 atomic_set(&rdev->nr_pending, 0);
38863- atomic_set(&rdev->read_errors, 0);
38864- atomic_set(&rdev->corrected_errors, 0);
38865+ atomic_set_unchecked(&rdev->read_errors, 0);
38866+ atomic_set_unchecked(&rdev->corrected_errors, 0);
38867
38868 INIT_LIST_HEAD(&rdev->same_set);
38869 init_waitqueue_head(&rdev->blocked_wait);
38870@@ -6987,7 +6987,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
38871
38872 spin_unlock(&pers_lock);
38873 seq_printf(seq, "\n");
38874- seq->poll_event = atomic_read(&md_event_count);
38875+ seq->poll_event = atomic_read_unchecked(&md_event_count);
38876 return 0;
38877 }
38878 if (v == (void*)2) {
38879@@ -7090,7 +7090,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
38880 return error;
38881
38882 seq = file->private_data;
38883- seq->poll_event = atomic_read(&md_event_count);
38884+ seq->poll_event = atomic_read_unchecked(&md_event_count);
38885 return error;
38886 }
38887
38888@@ -7104,7 +7104,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
38889 /* always allow read */
38890 mask = POLLIN | POLLRDNORM;
38891
38892- if (seq->poll_event != atomic_read(&md_event_count))
38893+ if (seq->poll_event != atomic_read_unchecked(&md_event_count))
38894 mask |= POLLERR | POLLPRI;
38895 return mask;
38896 }
38897@@ -7148,7 +7148,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
38898 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
38899 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
38900 (int)part_stat_read(&disk->part0, sectors[1]) -
38901- atomic_read(&disk->sync_io);
38902+ atomic_read_unchecked(&disk->sync_io);
38903 /* sync IO will cause sync_io to increase before the disk_stats
38904 * as sync_io is counted when a request starts, and
38905 * disk_stats is counted when it completes.
38906diff --git a/drivers/md/md.h b/drivers/md/md.h
38907index eca59c3..7c42285 100644
38908--- a/drivers/md/md.h
38909+++ b/drivers/md/md.h
38910@@ -94,13 +94,13 @@ struct md_rdev {
38911 * only maintained for arrays that
38912 * support hot removal
38913 */
38914- atomic_t read_errors; /* number of consecutive read errors that
38915+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
38916 * we have tried to ignore.
38917 */
38918 struct timespec last_read_error; /* monotonic time since our
38919 * last read error
38920 */
38921- atomic_t corrected_errors; /* number of corrected read errors,
38922+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
38923 * for reporting to userspace and storing
38924 * in superblock.
38925 */
38926@@ -434,7 +434,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
38927
38928 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
38929 {
38930- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
38931+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
38932 }
38933
38934 struct md_personality
38935diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
38936index 1cbfc6b..56e1dbb 100644
38937--- a/drivers/md/persistent-data/dm-space-map.h
38938+++ b/drivers/md/persistent-data/dm-space-map.h
38939@@ -60,6 +60,7 @@ struct dm_space_map {
38940 int (*root_size)(struct dm_space_map *sm, size_t *result);
38941 int (*copy_root)(struct dm_space_map *sm, void *copy_to_here_le, size_t len);
38942 };
38943+typedef struct dm_space_map __no_const dm_space_map_no_const;
38944
38945 /*----------------------------------------------------------------*/
38946
38947diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
38948index fd86b37..a5389ef 100644
38949--- a/drivers/md/raid1.c
38950+++ b/drivers/md/raid1.c
38951@@ -1821,7 +1821,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
38952 if (r1_sync_page_io(rdev, sect, s,
38953 bio->bi_io_vec[idx].bv_page,
38954 READ) != 0)
38955- atomic_add(s, &rdev->corrected_errors);
38956+ atomic_add_unchecked(s, &rdev->corrected_errors);
38957 }
38958 sectors -= s;
38959 sect += s;
38960@@ -2043,7 +2043,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
38961 test_bit(In_sync, &rdev->flags)) {
38962 if (r1_sync_page_io(rdev, sect, s,
38963 conf->tmppage, READ)) {
38964- atomic_add(s, &rdev->corrected_errors);
38965+ atomic_add_unchecked(s, &rdev->corrected_errors);
38966 printk(KERN_INFO
38967 "md/raid1:%s: read error corrected "
38968 "(%d sectors at %llu on %s)\n",
38969diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
38970index b3898d4..23a462b 100644
38971--- a/drivers/md/raid10.c
38972+++ b/drivers/md/raid10.c
38973@@ -1881,7 +1881,7 @@ static void end_sync_read(struct bio *bio, int error)
38974 /* The write handler will notice the lack of
38975 * R10BIO_Uptodate and record any errors etc
38976 */
38977- atomic_add(r10_bio->sectors,
38978+ atomic_add_unchecked(r10_bio->sectors,
38979 &conf->mirrors[d].rdev->corrected_errors);
38980
38981 /* for reconstruct, we always reschedule after a read.
38982@@ -2230,7 +2230,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
38983 {
38984 struct timespec cur_time_mon;
38985 unsigned long hours_since_last;
38986- unsigned int read_errors = atomic_read(&rdev->read_errors);
38987+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
38988
38989 ktime_get_ts(&cur_time_mon);
38990
38991@@ -2252,9 +2252,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
38992 * overflowing the shift of read_errors by hours_since_last.
38993 */
38994 if (hours_since_last >= 8 * sizeof(read_errors))
38995- atomic_set(&rdev->read_errors, 0);
38996+ atomic_set_unchecked(&rdev->read_errors, 0);
38997 else
38998- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
38999+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
39000 }
39001
39002 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
39003@@ -2308,8 +2308,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
39004 return;
39005
39006 check_decay_read_errors(mddev, rdev);
39007- atomic_inc(&rdev->read_errors);
39008- if (atomic_read(&rdev->read_errors) > max_read_errors) {
39009+ atomic_inc_unchecked(&rdev->read_errors);
39010+ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
39011 char b[BDEVNAME_SIZE];
39012 bdevname(rdev->bdev, b);
39013
39014@@ -2317,7 +2317,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
39015 "md/raid10:%s: %s: Raid device exceeded "
39016 "read_error threshold [cur %d:max %d]\n",
39017 mdname(mddev), b,
39018- atomic_read(&rdev->read_errors), max_read_errors);
39019+ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
39020 printk(KERN_NOTICE
39021 "md/raid10:%s: %s: Failing raid device\n",
39022 mdname(mddev), b);
39023@@ -2472,7 +2472,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
39024 sect +
39025 choose_data_offset(r10_bio, rdev)),
39026 bdevname(rdev->bdev, b));
39027- atomic_add(s, &rdev->corrected_errors);
39028+ atomic_add_unchecked(s, &rdev->corrected_errors);
39029 }
39030
39031 rdev_dec_pending(rdev, mddev);
39032diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
39033index 94ce78e..df99e24 100644
39034--- a/drivers/md/raid5.c
39035+++ b/drivers/md/raid5.c
39036@@ -1800,21 +1800,21 @@ static void raid5_end_read_request(struct bio * bi, int error)
39037 mdname(conf->mddev), STRIPE_SECTORS,
39038 (unsigned long long)s,
39039 bdevname(rdev->bdev, b));
39040- atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
39041+ atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
39042 clear_bit(R5_ReadError, &sh->dev[i].flags);
39043 clear_bit(R5_ReWrite, &sh->dev[i].flags);
39044 } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
39045 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
39046
39047- if (atomic_read(&rdev->read_errors))
39048- atomic_set(&rdev->read_errors, 0);
39049+ if (atomic_read_unchecked(&rdev->read_errors))
39050+ atomic_set_unchecked(&rdev->read_errors, 0);
39051 } else {
39052 const char *bdn = bdevname(rdev->bdev, b);
39053 int retry = 0;
39054 int set_bad = 0;
39055
39056 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
39057- atomic_inc(&rdev->read_errors);
39058+ atomic_inc_unchecked(&rdev->read_errors);
39059 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
39060 printk_ratelimited(
39061 KERN_WARNING
39062@@ -1842,7 +1842,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
39063 mdname(conf->mddev),
39064 (unsigned long long)s,
39065 bdn);
39066- } else if (atomic_read(&rdev->read_errors)
39067+ } else if (atomic_read_unchecked(&rdev->read_errors)
39068 > conf->max_nr_stripes)
39069 printk(KERN_WARNING
39070 "md/raid:%s: Too many read errors, failing device %s.\n",
39071diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
39072index d33101a..6b13069 100644
39073--- a/drivers/media/dvb-core/dvbdev.c
39074+++ b/drivers/media/dvb-core/dvbdev.c
39075@@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
39076 const struct dvb_device *template, void *priv, int type)
39077 {
39078 struct dvb_device *dvbdev;
39079- struct file_operations *dvbdevfops;
39080+ file_operations_no_const *dvbdevfops;
39081 struct device *clsdev;
39082 int minor;
39083 int id;
39084diff --git a/drivers/media/dvb-frontends/dib3000.h b/drivers/media/dvb-frontends/dib3000.h
39085index 404f63a..4796533 100644
39086--- a/drivers/media/dvb-frontends/dib3000.h
39087+++ b/drivers/media/dvb-frontends/dib3000.h
39088@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
39089 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
39090 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
39091 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
39092-};
39093+} __no_const;
39094
39095 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
39096 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
39097diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
39098index bc78354..42c9459 100644
39099--- a/drivers/media/pci/cx88/cx88-video.c
39100+++ b/drivers/media/pci/cx88/cx88-video.c
39101@@ -50,9 +50,9 @@ MODULE_VERSION(CX88_VERSION);
39102
39103 /* ------------------------------------------------------------------ */
39104
39105-static unsigned int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
39106-static unsigned int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
39107-static unsigned int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
39108+static int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
39109+static int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
39110+static int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
39111
39112 module_param_array(video_nr, int, NULL, 0444);
39113 module_param_array(vbi_nr, int, NULL, 0444);
39114diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
39115index 8e9a668..78d6310 100644
39116--- a/drivers/media/platform/omap/omap_vout.c
39117+++ b/drivers/media/platform/omap/omap_vout.c
39118@@ -63,7 +63,6 @@ enum omap_vout_channels {
39119 OMAP_VIDEO2,
39120 };
39121
39122-static struct videobuf_queue_ops video_vbq_ops;
39123 /* Variables configurable through module params*/
39124 static u32 video1_numbuffers = 3;
39125 static u32 video2_numbuffers = 3;
39126@@ -1012,6 +1011,12 @@ static int omap_vout_open(struct file *file)
39127 {
39128 struct videobuf_queue *q;
39129 struct omap_vout_device *vout = NULL;
39130+ static struct videobuf_queue_ops video_vbq_ops = {
39131+ .buf_setup = omap_vout_buffer_setup,
39132+ .buf_prepare = omap_vout_buffer_prepare,
39133+ .buf_release = omap_vout_buffer_release,
39134+ .buf_queue = omap_vout_buffer_queue,
39135+ };
39136
39137 vout = video_drvdata(file);
39138 v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
39139@@ -1029,10 +1034,6 @@ static int omap_vout_open(struct file *file)
39140 vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
39141
39142 q = &vout->vbq;
39143- video_vbq_ops.buf_setup = omap_vout_buffer_setup;
39144- video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
39145- video_vbq_ops.buf_release = omap_vout_buffer_release;
39146- video_vbq_ops.buf_queue = omap_vout_buffer_queue;
39147 spin_lock_init(&vout->vbq_lock);
39148
39149 videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
39150diff --git a/drivers/media/platform/s5p-tv/mixer.h b/drivers/media/platform/s5p-tv/mixer.h
39151index b671e20..34088b7 100644
39152--- a/drivers/media/platform/s5p-tv/mixer.h
39153+++ b/drivers/media/platform/s5p-tv/mixer.h
39154@@ -155,7 +155,7 @@ struct mxr_layer {
39155 /** layer index (unique identifier) */
39156 int idx;
39157 /** callbacks for layer methods */
39158- struct mxr_layer_ops ops;
39159+ struct mxr_layer_ops *ops;
39160 /** format array */
39161 const struct mxr_format **fmt_array;
39162 /** size of format array */
39163diff --git a/drivers/media/platform/s5p-tv/mixer_grp_layer.c b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
39164index b93a21f..2535195 100644
39165--- a/drivers/media/platform/s5p-tv/mixer_grp_layer.c
39166+++ b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
39167@@ -235,7 +235,7 @@ struct mxr_layer *mxr_graph_layer_create(struct mxr_device *mdev, int idx)
39168 {
39169 struct mxr_layer *layer;
39170 int ret;
39171- struct mxr_layer_ops ops = {
39172+ static struct mxr_layer_ops ops = {
39173 .release = mxr_graph_layer_release,
39174 .buffer_set = mxr_graph_buffer_set,
39175 .stream_set = mxr_graph_stream_set,
39176diff --git a/drivers/media/platform/s5p-tv/mixer_reg.c b/drivers/media/platform/s5p-tv/mixer_reg.c
39177index 3b1670a..595c939 100644
39178--- a/drivers/media/platform/s5p-tv/mixer_reg.c
39179+++ b/drivers/media/platform/s5p-tv/mixer_reg.c
39180@@ -276,7 +276,7 @@ static void mxr_irq_layer_handle(struct mxr_layer *layer)
39181 layer->update_buf = next;
39182 }
39183
39184- layer->ops.buffer_set(layer, layer->update_buf);
39185+ layer->ops->buffer_set(layer, layer->update_buf);
39186
39187 if (done && done != layer->shadow_buf)
39188 vb2_buffer_done(&done->vb, VB2_BUF_STATE_DONE);
39189diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c
39190index 1f3b743..e839271 100644
39191--- a/drivers/media/platform/s5p-tv/mixer_video.c
39192+++ b/drivers/media/platform/s5p-tv/mixer_video.c
39193@@ -208,7 +208,7 @@ static void mxr_layer_default_geo(struct mxr_layer *layer)
39194 layer->geo.src.height = layer->geo.src.full_height;
39195
39196 mxr_geometry_dump(mdev, &layer->geo);
39197- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
39198+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
39199 mxr_geometry_dump(mdev, &layer->geo);
39200 }
39201
39202@@ -226,7 +226,7 @@ static void mxr_layer_update_output(struct mxr_layer *layer)
39203 layer->geo.dst.full_width = mbus_fmt.width;
39204 layer->geo.dst.full_height = mbus_fmt.height;
39205 layer->geo.dst.field = mbus_fmt.field;
39206- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
39207+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
39208
39209 mxr_geometry_dump(mdev, &layer->geo);
39210 }
39211@@ -332,7 +332,7 @@ static int mxr_s_fmt(struct file *file, void *priv,
39212 /* set source size to highest accepted value */
39213 geo->src.full_width = max(geo->dst.full_width, pix->width);
39214 geo->src.full_height = max(geo->dst.full_height, pix->height);
39215- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
39216+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
39217 mxr_geometry_dump(mdev, &layer->geo);
39218 /* set cropping to total visible screen */
39219 geo->src.width = pix->width;
39220@@ -340,12 +340,12 @@ static int mxr_s_fmt(struct file *file, void *priv,
39221 geo->src.x_offset = 0;
39222 geo->src.y_offset = 0;
39223 /* assure consistency of geometry */
39224- layer->ops.fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
39225+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
39226 mxr_geometry_dump(mdev, &layer->geo);
39227 /* set full size to lowest possible value */
39228 geo->src.full_width = 0;
39229 geo->src.full_height = 0;
39230- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
39231+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
39232 mxr_geometry_dump(mdev, &layer->geo);
39233
39234 /* returning results */
39235@@ -472,7 +472,7 @@ static int mxr_s_selection(struct file *file, void *fh,
39236 target->width = s->r.width;
39237 target->height = s->r.height;
39238
39239- layer->ops.fix_geometry(layer, stage, s->flags);
39240+ layer->ops->fix_geometry(layer, stage, s->flags);
39241
39242 /* retrieve update selection rectangle */
39243 res.left = target->x_offset;
39244@@ -937,13 +937,13 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
39245 mxr_output_get(mdev);
39246
39247 mxr_layer_update_output(layer);
39248- layer->ops.format_set(layer);
39249+ layer->ops->format_set(layer);
39250 /* enabling layer in hardware */
39251 spin_lock_irqsave(&layer->enq_slock, flags);
39252 layer->state = MXR_LAYER_STREAMING;
39253 spin_unlock_irqrestore(&layer->enq_slock, flags);
39254
39255- layer->ops.stream_set(layer, MXR_ENABLE);
39256+ layer->ops->stream_set(layer, MXR_ENABLE);
39257 mxr_streamer_get(mdev);
39258
39259 return 0;
39260@@ -1013,7 +1013,7 @@ static int stop_streaming(struct vb2_queue *vq)
39261 spin_unlock_irqrestore(&layer->enq_slock, flags);
39262
39263 /* disabling layer in hardware */
39264- layer->ops.stream_set(layer, MXR_DISABLE);
39265+ layer->ops->stream_set(layer, MXR_DISABLE);
39266 /* remove one streamer */
39267 mxr_streamer_put(mdev);
39268 /* allow changes in output configuration */
39269@@ -1052,8 +1052,8 @@ void mxr_base_layer_unregister(struct mxr_layer *layer)
39270
39271 void mxr_layer_release(struct mxr_layer *layer)
39272 {
39273- if (layer->ops.release)
39274- layer->ops.release(layer);
39275+ if (layer->ops->release)
39276+ layer->ops->release(layer);
39277 }
39278
39279 void mxr_base_layer_release(struct mxr_layer *layer)
39280@@ -1079,7 +1079,7 @@ struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
39281
39282 layer->mdev = mdev;
39283 layer->idx = idx;
39284- layer->ops = *ops;
39285+ layer->ops = ops;
39286
39287 spin_lock_init(&layer->enq_slock);
39288 INIT_LIST_HEAD(&layer->enq_list);
39289diff --git a/drivers/media/platform/s5p-tv/mixer_vp_layer.c b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
39290index 3d13a63..da31bf1 100644
39291--- a/drivers/media/platform/s5p-tv/mixer_vp_layer.c
39292+++ b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
39293@@ -206,7 +206,7 @@ struct mxr_layer *mxr_vp_layer_create(struct mxr_device *mdev, int idx)
39294 {
39295 struct mxr_layer *layer;
39296 int ret;
39297- struct mxr_layer_ops ops = {
39298+ static struct mxr_layer_ops ops = {
39299 .release = mxr_vp_layer_release,
39300 .buffer_set = mxr_vp_buffer_set,
39301 .stream_set = mxr_vp_stream_set,
39302diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
39303index 643d80a..56bb96b 100644
39304--- a/drivers/media/radio/radio-cadet.c
39305+++ b/drivers/media/radio/radio-cadet.c
39306@@ -302,6 +302,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
39307 unsigned char readbuf[RDS_BUFFER];
39308 int i = 0;
39309
39310+ if (count > RDS_BUFFER)
39311+ return -EFAULT;
39312 mutex_lock(&dev->lock);
39313 if (dev->rdsstat == 0)
39314 cadet_start_rds(dev);
39315@@ -317,7 +319,7 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
39316 while (i < count && dev->rdsin != dev->rdsout)
39317 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
39318
39319- if (i && copy_to_user(data, readbuf, i))
39320+ if (i > sizeof(readbuf) || copy_to_user(data, readbuf, i))
39321 i = -EFAULT;
39322 unlock:
39323 mutex_unlock(&dev->lock);
39324diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
39325index 3940bb0..fb3952a 100644
39326--- a/drivers/media/usb/dvb-usb/cxusb.c
39327+++ b/drivers/media/usb/dvb-usb/cxusb.c
39328@@ -1068,7 +1068,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
39329
39330 struct dib0700_adapter_state {
39331 int (*set_param_save) (struct dvb_frontend *);
39332-};
39333+} __no_const;
39334
39335 static int dib7070_set_param_override(struct dvb_frontend *fe)
39336 {
39337diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
39338index 9382895..ac8093c 100644
39339--- a/drivers/media/usb/dvb-usb/dw2102.c
39340+++ b/drivers/media/usb/dvb-usb/dw2102.c
39341@@ -95,7 +95,7 @@ struct su3000_state {
39342
39343 struct s6x0_state {
39344 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
39345-};
39346+} __no_const;
39347
39348 /* debug */
39349 static int dvb_usb_dw2102_debug;
39350diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
39351index aa6e7c7..4cd8061 100644
39352--- a/drivers/media/v4l2-core/v4l2-ioctl.c
39353+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
39354@@ -1923,7 +1923,8 @@ struct v4l2_ioctl_info {
39355 struct file *file, void *fh, void *p);
39356 } u;
39357 void (*debug)(const void *arg, bool write_only);
39358-};
39359+} __do_const;
39360+typedef struct v4l2_ioctl_info __no_const v4l2_ioctl_info_no_const;
39361
39362 /* This control needs a priority check */
39363 #define INFO_FL_PRIO (1 << 0)
39364@@ -2108,7 +2109,7 @@ static long __video_do_ioctl(struct file *file,
39365 struct video_device *vfd = video_devdata(file);
39366 const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;
39367 bool write_only = false;
39368- struct v4l2_ioctl_info default_info;
39369+ v4l2_ioctl_info_no_const default_info;
39370 const struct v4l2_ioctl_info *info;
39371 void *fh = file->private_data;
39372 struct v4l2_fh *vfh = NULL;
39373diff --git a/drivers/memstick/host/r592.c b/drivers/memstick/host/r592.c
39374index 29b2172..a7c5b31 100644
39375--- a/drivers/memstick/host/r592.c
39376+++ b/drivers/memstick/host/r592.c
39377@@ -454,7 +454,7 @@ static int r592_transfer_fifo_pio(struct r592_device *dev)
39378 /* Executes one TPC (data is read/written from small or large fifo) */
39379 static void r592_execute_tpc(struct r592_device *dev)
39380 {
39381- bool is_write = dev->req->tpc >= MS_TPC_SET_RW_REG_ADRS;
39382+ bool is_write;
39383 int len, error;
39384 u32 status, reg;
39385
39386@@ -463,6 +463,7 @@ static void r592_execute_tpc(struct r592_device *dev)
39387 return;
39388 }
39389
39390+ is_write = dev->req->tpc >= MS_TPC_SET_RW_REG_ADRS;
39391 len = dev->req->long_data ?
39392 dev->req->sg.length : dev->req->data_len;
39393
39394diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
39395index fb69baa..3aeea2e 100644
39396--- a/drivers/message/fusion/mptbase.c
39397+++ b/drivers/message/fusion/mptbase.c
39398@@ -6755,8 +6755,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
39399 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
39400 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
39401
39402+#ifdef CONFIG_GRKERNSEC_HIDESYM
39403+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
39404+#else
39405 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
39406 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
39407+#endif
39408+
39409 /*
39410 * Rounding UP to nearest 4-kB boundary here...
39411 */
39412@@ -6769,7 +6774,11 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
39413 ioc->facts.GlobalCredits);
39414
39415 seq_printf(m, " Frames @ 0x%p (Dma @ 0x%p)\n",
39416+#ifdef CONFIG_GRKERNSEC_HIDESYM
39417+ NULL, NULL);
39418+#else
39419 (void *)ioc->alloc, (void *)(ulong)ioc->alloc_dma);
39420+#endif
39421 sz = (ioc->reply_sz * ioc->reply_depth) + 128;
39422 seq_printf(m, " {CurRepSz=%d} x {CurRepDepth=%d} = %d bytes ^= 0x%x\n",
39423 ioc->reply_sz, ioc->reply_depth, ioc->reply_sz*ioc->reply_depth, sz);
39424diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
39425index fa43c39..daeb158 100644
39426--- a/drivers/message/fusion/mptsas.c
39427+++ b/drivers/message/fusion/mptsas.c
39428@@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
39429 return 0;
39430 }
39431
39432+static inline void
39433+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
39434+{
39435+ if (phy_info->port_details) {
39436+ phy_info->port_details->rphy = rphy;
39437+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
39438+ ioc->name, rphy));
39439+ }
39440+
39441+ if (rphy) {
39442+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
39443+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
39444+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
39445+ ioc->name, rphy, rphy->dev.release));
39446+ }
39447+}
39448+
39449 /* no mutex */
39450 static void
39451 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
39452@@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
39453 return NULL;
39454 }
39455
39456-static inline void
39457-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
39458-{
39459- if (phy_info->port_details) {
39460- phy_info->port_details->rphy = rphy;
39461- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
39462- ioc->name, rphy));
39463- }
39464-
39465- if (rphy) {
39466- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
39467- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
39468- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
39469- ioc->name, rphy, rphy->dev.release));
39470- }
39471-}
39472-
39473 static inline struct sas_port *
39474 mptsas_get_port(struct mptsas_phyinfo *phy_info)
39475 {
39476diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
39477index 164afa7..b6b2e74 100644
39478--- a/drivers/message/fusion/mptscsih.c
39479+++ b/drivers/message/fusion/mptscsih.c
39480@@ -1271,15 +1271,16 @@ mptscsih_info(struct Scsi_Host *SChost)
39481
39482 h = shost_priv(SChost);
39483
39484- if (h) {
39485- if (h->info_kbuf == NULL)
39486- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
39487- return h->info_kbuf;
39488- h->info_kbuf[0] = '\0';
39489+ if (!h)
39490+ return NULL;
39491
39492- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
39493- h->info_kbuf[size-1] = '\0';
39494- }
39495+ if (h->info_kbuf == NULL)
39496+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
39497+ return h->info_kbuf;
39498+ h->info_kbuf[0] = '\0';
39499+
39500+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
39501+ h->info_kbuf[size-1] = '\0';
39502
39503 return h->info_kbuf;
39504 }
39505diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
39506index 8001aa6..b137580 100644
39507--- a/drivers/message/i2o/i2o_proc.c
39508+++ b/drivers/message/i2o/i2o_proc.c
39509@@ -255,12 +255,6 @@ static char *scsi_devices[] = {
39510 "Array Controller Device"
39511 };
39512
39513-static char *chtostr(char *tmp, u8 *chars, int n)
39514-{
39515- tmp[0] = 0;
39516- return strncat(tmp, (char *)chars, n);
39517-}
39518-
39519 static int i2o_report_query_status(struct seq_file *seq, int block_status,
39520 char *group)
39521 {
39522@@ -790,7 +784,6 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
39523 } *result;
39524
39525 i2o_exec_execute_ddm_table ddm_table;
39526- char tmp[28 + 1];
39527
39528 result = kmalloc(sizeof(*result), GFP_KERNEL);
39529 if (!result)
39530@@ -825,8 +818,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
39531
39532 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
39533 seq_printf(seq, "%-#8x", ddm_table.module_id);
39534- seq_printf(seq, "%-29s",
39535- chtostr(tmp, ddm_table.module_name_version, 28));
39536+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
39537 seq_printf(seq, "%9d ", ddm_table.data_size);
39538 seq_printf(seq, "%8d", ddm_table.code_size);
39539
39540@@ -893,7 +885,6 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
39541
39542 i2o_driver_result_table *result;
39543 i2o_driver_store_table *dst;
39544- char tmp[28 + 1];
39545
39546 result = kmalloc(sizeof(i2o_driver_result_table), GFP_KERNEL);
39547 if (result == NULL)
39548@@ -928,9 +919,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
39549
39550 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
39551 seq_printf(seq, "%-#8x", dst->module_id);
39552- seq_printf(seq, "%-29s",
39553- chtostr(tmp, dst->module_name_version, 28));
39554- seq_printf(seq, "%-9s", chtostr(tmp, dst->date, 8));
39555+ seq_printf(seq, "%-.28s", dst->module_name_version);
39556+ seq_printf(seq, "%-.8s", dst->date);
39557 seq_printf(seq, "%8d ", dst->module_size);
39558 seq_printf(seq, "%8d ", dst->mpb_size);
39559 seq_printf(seq, "0x%04x", dst->module_flags);
39560@@ -1250,7 +1240,6 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
39561 // == (allow) 512d bytes (max)
39562 static u16 *work16 = (u16 *) work32;
39563 int token;
39564- char tmp[16 + 1];
39565
39566 token = i2o_parm_field_get(d, 0xF100, -1, &work32, sizeof(work32));
39567
39568@@ -1262,14 +1251,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
39569 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
39570 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
39571 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
39572- seq_printf(seq, "Vendor info : %s\n",
39573- chtostr(tmp, (u8 *) (work32 + 2), 16));
39574- seq_printf(seq, "Product info : %s\n",
39575- chtostr(tmp, (u8 *) (work32 + 6), 16));
39576- seq_printf(seq, "Description : %s\n",
39577- chtostr(tmp, (u8 *) (work32 + 10), 16));
39578- seq_printf(seq, "Product rev. : %s\n",
39579- chtostr(tmp, (u8 *) (work32 + 14), 8));
39580+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
39581+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
39582+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
39583+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
39584
39585 seq_printf(seq, "Serial number : ");
39586 print_serial_number(seq, (u8 *) (work32 + 16),
39587@@ -1306,8 +1291,6 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
39588 u8 pad[256]; // allow up to 256 byte (max) serial number
39589 } result;
39590
39591- char tmp[24 + 1];
39592-
39593 token = i2o_parm_field_get(d, 0xF101, -1, &result, sizeof(result));
39594
39595 if (token < 0) {
39596@@ -1316,10 +1299,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
39597 }
39598
39599 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
39600- seq_printf(seq, "Module name : %s\n",
39601- chtostr(tmp, result.module_name, 24));
39602- seq_printf(seq, "Module revision : %s\n",
39603- chtostr(tmp, result.module_rev, 8));
39604+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
39605+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
39606
39607 seq_printf(seq, "Serial number : ");
39608 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
39609@@ -1343,8 +1324,6 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
39610 u8 instance_number[4];
39611 } result;
39612
39613- char tmp[64 + 1];
39614-
39615 token = i2o_parm_field_get(d, 0xF102, -1, &result, sizeof(result));
39616
39617 if (token < 0) {
39618@@ -1352,14 +1331,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
39619 return 0;
39620 }
39621
39622- seq_printf(seq, "Device name : %s\n",
39623- chtostr(tmp, result.device_name, 64));
39624- seq_printf(seq, "Service name : %s\n",
39625- chtostr(tmp, result.service_name, 64));
39626- seq_printf(seq, "Physical name : %s\n",
39627- chtostr(tmp, result.physical_location, 64));
39628- seq_printf(seq, "Instance number : %s\n",
39629- chtostr(tmp, result.instance_number, 4));
39630+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
39631+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
39632+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
39633+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
39634
39635 return 0;
39636 }
39637diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
39638index a8c08f3..155fe3d 100644
39639--- a/drivers/message/i2o/iop.c
39640+++ b/drivers/message/i2o/iop.c
39641@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
39642
39643 spin_lock_irqsave(&c->context_list_lock, flags);
39644
39645- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
39646- atomic_inc(&c->context_list_counter);
39647+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
39648+ atomic_inc_unchecked(&c->context_list_counter);
39649
39650- entry->context = atomic_read(&c->context_list_counter);
39651+ entry->context = atomic_read_unchecked(&c->context_list_counter);
39652
39653 list_add(&entry->list, &c->context_list);
39654
39655@@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(void)
39656
39657 #if BITS_PER_LONG == 64
39658 spin_lock_init(&c->context_list_lock);
39659- atomic_set(&c->context_list_counter, 0);
39660+ atomic_set_unchecked(&c->context_list_counter, 0);
39661 INIT_LIST_HEAD(&c->context_list);
39662 #endif
39663
39664diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
39665index 45ece11..8efa218 100644
39666--- a/drivers/mfd/janz-cmodio.c
39667+++ b/drivers/mfd/janz-cmodio.c
39668@@ -13,6 +13,7 @@
39669
39670 #include <linux/kernel.h>
39671 #include <linux/module.h>
39672+#include <linux/slab.h>
39673 #include <linux/init.h>
39674 #include <linux/pci.h>
39675 #include <linux/interrupt.h>
39676diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
39677index a5f9888..1c0ed56 100644
39678--- a/drivers/mfd/twl4030-irq.c
39679+++ b/drivers/mfd/twl4030-irq.c
39680@@ -35,6 +35,7 @@
39681 #include <linux/of.h>
39682 #include <linux/irqdomain.h>
39683 #include <linux/i2c/twl.h>
39684+#include <asm/pgtable.h>
39685
39686 #include "twl-core.h"
39687
39688@@ -728,10 +729,12 @@ int twl4030_init_irq(struct device *dev, int irq_num)
39689 * Install an irq handler for each of the SIH modules;
39690 * clone dummy irq_chip since PIH can't *do* anything
39691 */
39692- twl4030_irq_chip = dummy_irq_chip;
39693- twl4030_irq_chip.name = "twl4030";
39694+ pax_open_kernel();
39695+ memcpy((void *)&twl4030_irq_chip, &dummy_irq_chip, sizeof twl4030_irq_chip);
39696+ *(const char **)&twl4030_irq_chip.name = "twl4030";
39697
39698- twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
39699+ *(void **)&twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
39700+ pax_close_kernel();
39701
39702 for (i = irq_base; i < irq_end; i++) {
39703 irq_set_chip_and_handler(i, &twl4030_irq_chip,
39704diff --git a/drivers/mfd/twl6030-irq.c b/drivers/mfd/twl6030-irq.c
39705index 277a8db..0e0b754 100644
39706--- a/drivers/mfd/twl6030-irq.c
39707+++ b/drivers/mfd/twl6030-irq.c
39708@@ -387,10 +387,12 @@ int twl6030_init_irq(struct device *dev, int irq_num)
39709 * install an irq handler for each of the modules;
39710 * clone dummy irq_chip since PIH can't *do* anything
39711 */
39712- twl6030_irq_chip = dummy_irq_chip;
39713- twl6030_irq_chip.name = "twl6030";
39714- twl6030_irq_chip.irq_set_type = NULL;
39715- twl6030_irq_chip.irq_set_wake = twl6030_irq_set_wake;
39716+ pax_open_kernel();
39717+ memcpy((void *)&twl6030_irq_chip, &dummy_irq_chip, sizeof twl6030_irq_chip);
39718+ *(const char **)&twl6030_irq_chip.name = "twl6030";
39719+ *(void **)&twl6030_irq_chip.irq_set_type = NULL;
39720+ *(void **)&twl6030_irq_chip.irq_set_wake = twl6030_irq_set_wake;
39721+ pax_close_kernel();
39722
39723 for (i = irq_base; i < irq_end; i++) {
39724 irq_set_chip_and_handler(i, &twl6030_irq_chip,
39725diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
39726index f428d86..274c368 100644
39727--- a/drivers/misc/c2port/core.c
39728+++ b/drivers/misc/c2port/core.c
39729@@ -924,7 +924,9 @@ struct c2port_device *c2port_device_register(char *name,
39730 mutex_init(&c2dev->mutex);
39731
39732 /* Create binary file */
39733- c2port_bin_attrs.size = ops->blocks_num * ops->block_size;
39734+ pax_open_kernel();
39735+ *(size_t *)&c2port_bin_attrs.size = ops->blocks_num * ops->block_size;
39736+ pax_close_kernel();
39737 ret = device_create_bin_file(c2dev->dev, &c2port_bin_attrs);
39738 if (unlikely(ret))
39739 goto error_device_create_bin_file;
39740diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
39741index 3aa9a96..59cf685 100644
39742--- a/drivers/misc/kgdbts.c
39743+++ b/drivers/misc/kgdbts.c
39744@@ -832,7 +832,7 @@ static void run_plant_and_detach_test(int is_early)
39745 char before[BREAK_INSTR_SIZE];
39746 char after[BREAK_INSTR_SIZE];
39747
39748- probe_kernel_read(before, (char *)kgdbts_break_test,
39749+ probe_kernel_read(before, ktla_ktva((char *)kgdbts_break_test),
39750 BREAK_INSTR_SIZE);
39751 init_simple_test();
39752 ts.tst = plant_and_detach_test;
39753@@ -840,7 +840,7 @@ static void run_plant_and_detach_test(int is_early)
39754 /* Activate test with initial breakpoint */
39755 if (!is_early)
39756 kgdb_breakpoint();
39757- probe_kernel_read(after, (char *)kgdbts_break_test,
39758+ probe_kernel_read(after, ktla_ktva((char *)kgdbts_break_test),
39759 BREAK_INSTR_SIZE);
39760 if (memcmp(before, after, BREAK_INSTR_SIZE)) {
39761 printk(KERN_CRIT "kgdbts: ERROR kgdb corrupted memory\n");
39762diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
39763index 4a87e5c..76bdf5c 100644
39764--- a/drivers/misc/lis3lv02d/lis3lv02d.c
39765+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
39766@@ -498,7 +498,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
39767 * the lid is closed. This leads to interrupts as soon as a little move
39768 * is done.
39769 */
39770- atomic_inc(&lis3->count);
39771+ atomic_inc_unchecked(&lis3->count);
39772
39773 wake_up_interruptible(&lis3->misc_wait);
39774 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
39775@@ -584,7 +584,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
39776 if (lis3->pm_dev)
39777 pm_runtime_get_sync(lis3->pm_dev);
39778
39779- atomic_set(&lis3->count, 0);
39780+ atomic_set_unchecked(&lis3->count, 0);
39781 return 0;
39782 }
39783
39784@@ -617,7 +617,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
39785 add_wait_queue(&lis3->misc_wait, &wait);
39786 while (true) {
39787 set_current_state(TASK_INTERRUPTIBLE);
39788- data = atomic_xchg(&lis3->count, 0);
39789+ data = atomic_xchg_unchecked(&lis3->count, 0);
39790 if (data)
39791 break;
39792
39793@@ -658,7 +658,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
39794 struct lis3lv02d, miscdev);
39795
39796 poll_wait(file, &lis3->misc_wait, wait);
39797- if (atomic_read(&lis3->count))
39798+ if (atomic_read_unchecked(&lis3->count))
39799 return POLLIN | POLLRDNORM;
39800 return 0;
39801 }
39802diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
39803index c439c82..1f20f57 100644
39804--- a/drivers/misc/lis3lv02d/lis3lv02d.h
39805+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
39806@@ -297,7 +297,7 @@ struct lis3lv02d {
39807 struct input_polled_dev *idev; /* input device */
39808 struct platform_device *pdev; /* platform device */
39809 struct regulator_bulk_data regulators[2];
39810- atomic_t count; /* interrupt count after last read */
39811+ atomic_unchecked_t count; /* interrupt count after last read */
39812 union axis_conversion ac; /* hw -> logical axis */
39813 int mapped_btns[3];
39814
39815diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
39816index 2f30bad..c4c13d0 100644
39817--- a/drivers/misc/sgi-gru/gruhandles.c
39818+++ b/drivers/misc/sgi-gru/gruhandles.c
39819@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
39820 unsigned long nsec;
39821
39822 nsec = CLKS2NSEC(clks);
39823- atomic_long_inc(&mcs_op_statistics[op].count);
39824- atomic_long_add(nsec, &mcs_op_statistics[op].total);
39825+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
39826+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
39827 if (mcs_op_statistics[op].max < nsec)
39828 mcs_op_statistics[op].max = nsec;
39829 }
39830diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
39831index 950dbe9..eeef0f8 100644
39832--- a/drivers/misc/sgi-gru/gruprocfs.c
39833+++ b/drivers/misc/sgi-gru/gruprocfs.c
39834@@ -32,9 +32,9 @@
39835
39836 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
39837
39838-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
39839+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
39840 {
39841- unsigned long val = atomic_long_read(v);
39842+ unsigned long val = atomic_long_read_unchecked(v);
39843
39844 seq_printf(s, "%16lu %s\n", val, id);
39845 }
39846@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
39847
39848 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
39849 for (op = 0; op < mcsop_last; op++) {
39850- count = atomic_long_read(&mcs_op_statistics[op].count);
39851- total = atomic_long_read(&mcs_op_statistics[op].total);
39852+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
39853+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
39854 max = mcs_op_statistics[op].max;
39855 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
39856 count ? total / count : 0, max);
39857diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
39858index 5c3ce24..4915ccb 100644
39859--- a/drivers/misc/sgi-gru/grutables.h
39860+++ b/drivers/misc/sgi-gru/grutables.h
39861@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
39862 * GRU statistics.
39863 */
39864 struct gru_stats_s {
39865- atomic_long_t vdata_alloc;
39866- atomic_long_t vdata_free;
39867- atomic_long_t gts_alloc;
39868- atomic_long_t gts_free;
39869- atomic_long_t gms_alloc;
39870- atomic_long_t gms_free;
39871- atomic_long_t gts_double_allocate;
39872- atomic_long_t assign_context;
39873- atomic_long_t assign_context_failed;
39874- atomic_long_t free_context;
39875- atomic_long_t load_user_context;
39876- atomic_long_t load_kernel_context;
39877- atomic_long_t lock_kernel_context;
39878- atomic_long_t unlock_kernel_context;
39879- atomic_long_t steal_user_context;
39880- atomic_long_t steal_kernel_context;
39881- atomic_long_t steal_context_failed;
39882- atomic_long_t nopfn;
39883- atomic_long_t asid_new;
39884- atomic_long_t asid_next;
39885- atomic_long_t asid_wrap;
39886- atomic_long_t asid_reuse;
39887- atomic_long_t intr;
39888- atomic_long_t intr_cbr;
39889- atomic_long_t intr_tfh;
39890- atomic_long_t intr_spurious;
39891- atomic_long_t intr_mm_lock_failed;
39892- atomic_long_t call_os;
39893- atomic_long_t call_os_wait_queue;
39894- atomic_long_t user_flush_tlb;
39895- atomic_long_t user_unload_context;
39896- atomic_long_t user_exception;
39897- atomic_long_t set_context_option;
39898- atomic_long_t check_context_retarget_intr;
39899- atomic_long_t check_context_unload;
39900- atomic_long_t tlb_dropin;
39901- atomic_long_t tlb_preload_page;
39902- atomic_long_t tlb_dropin_fail_no_asid;
39903- atomic_long_t tlb_dropin_fail_upm;
39904- atomic_long_t tlb_dropin_fail_invalid;
39905- atomic_long_t tlb_dropin_fail_range_active;
39906- atomic_long_t tlb_dropin_fail_idle;
39907- atomic_long_t tlb_dropin_fail_fmm;
39908- atomic_long_t tlb_dropin_fail_no_exception;
39909- atomic_long_t tfh_stale_on_fault;
39910- atomic_long_t mmu_invalidate_range;
39911- atomic_long_t mmu_invalidate_page;
39912- atomic_long_t flush_tlb;
39913- atomic_long_t flush_tlb_gru;
39914- atomic_long_t flush_tlb_gru_tgh;
39915- atomic_long_t flush_tlb_gru_zero_asid;
39916+ atomic_long_unchecked_t vdata_alloc;
39917+ atomic_long_unchecked_t vdata_free;
39918+ atomic_long_unchecked_t gts_alloc;
39919+ atomic_long_unchecked_t gts_free;
39920+ atomic_long_unchecked_t gms_alloc;
39921+ atomic_long_unchecked_t gms_free;
39922+ atomic_long_unchecked_t gts_double_allocate;
39923+ atomic_long_unchecked_t assign_context;
39924+ atomic_long_unchecked_t assign_context_failed;
39925+ atomic_long_unchecked_t free_context;
39926+ atomic_long_unchecked_t load_user_context;
39927+ atomic_long_unchecked_t load_kernel_context;
39928+ atomic_long_unchecked_t lock_kernel_context;
39929+ atomic_long_unchecked_t unlock_kernel_context;
39930+ atomic_long_unchecked_t steal_user_context;
39931+ atomic_long_unchecked_t steal_kernel_context;
39932+ atomic_long_unchecked_t steal_context_failed;
39933+ atomic_long_unchecked_t nopfn;
39934+ atomic_long_unchecked_t asid_new;
39935+ atomic_long_unchecked_t asid_next;
39936+ atomic_long_unchecked_t asid_wrap;
39937+ atomic_long_unchecked_t asid_reuse;
39938+ atomic_long_unchecked_t intr;
39939+ atomic_long_unchecked_t intr_cbr;
39940+ atomic_long_unchecked_t intr_tfh;
39941+ atomic_long_unchecked_t intr_spurious;
39942+ atomic_long_unchecked_t intr_mm_lock_failed;
39943+ atomic_long_unchecked_t call_os;
39944+ atomic_long_unchecked_t call_os_wait_queue;
39945+ atomic_long_unchecked_t user_flush_tlb;
39946+ atomic_long_unchecked_t user_unload_context;
39947+ atomic_long_unchecked_t user_exception;
39948+ atomic_long_unchecked_t set_context_option;
39949+ atomic_long_unchecked_t check_context_retarget_intr;
39950+ atomic_long_unchecked_t check_context_unload;
39951+ atomic_long_unchecked_t tlb_dropin;
39952+ atomic_long_unchecked_t tlb_preload_page;
39953+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
39954+ atomic_long_unchecked_t tlb_dropin_fail_upm;
39955+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
39956+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
39957+ atomic_long_unchecked_t tlb_dropin_fail_idle;
39958+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
39959+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
39960+ atomic_long_unchecked_t tfh_stale_on_fault;
39961+ atomic_long_unchecked_t mmu_invalidate_range;
39962+ atomic_long_unchecked_t mmu_invalidate_page;
39963+ atomic_long_unchecked_t flush_tlb;
39964+ atomic_long_unchecked_t flush_tlb_gru;
39965+ atomic_long_unchecked_t flush_tlb_gru_tgh;
39966+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
39967
39968- atomic_long_t copy_gpa;
39969- atomic_long_t read_gpa;
39970+ atomic_long_unchecked_t copy_gpa;
39971+ atomic_long_unchecked_t read_gpa;
39972
39973- atomic_long_t mesq_receive;
39974- atomic_long_t mesq_receive_none;
39975- atomic_long_t mesq_send;
39976- atomic_long_t mesq_send_failed;
39977- atomic_long_t mesq_noop;
39978- atomic_long_t mesq_send_unexpected_error;
39979- atomic_long_t mesq_send_lb_overflow;
39980- atomic_long_t mesq_send_qlimit_reached;
39981- atomic_long_t mesq_send_amo_nacked;
39982- atomic_long_t mesq_send_put_nacked;
39983- atomic_long_t mesq_page_overflow;
39984- atomic_long_t mesq_qf_locked;
39985- atomic_long_t mesq_qf_noop_not_full;
39986- atomic_long_t mesq_qf_switch_head_failed;
39987- atomic_long_t mesq_qf_unexpected_error;
39988- atomic_long_t mesq_noop_unexpected_error;
39989- atomic_long_t mesq_noop_lb_overflow;
39990- atomic_long_t mesq_noop_qlimit_reached;
39991- atomic_long_t mesq_noop_amo_nacked;
39992- atomic_long_t mesq_noop_put_nacked;
39993- atomic_long_t mesq_noop_page_overflow;
39994+ atomic_long_unchecked_t mesq_receive;
39995+ atomic_long_unchecked_t mesq_receive_none;
39996+ atomic_long_unchecked_t mesq_send;
39997+ atomic_long_unchecked_t mesq_send_failed;
39998+ atomic_long_unchecked_t mesq_noop;
39999+ atomic_long_unchecked_t mesq_send_unexpected_error;
40000+ atomic_long_unchecked_t mesq_send_lb_overflow;
40001+ atomic_long_unchecked_t mesq_send_qlimit_reached;
40002+ atomic_long_unchecked_t mesq_send_amo_nacked;
40003+ atomic_long_unchecked_t mesq_send_put_nacked;
40004+ atomic_long_unchecked_t mesq_page_overflow;
40005+ atomic_long_unchecked_t mesq_qf_locked;
40006+ atomic_long_unchecked_t mesq_qf_noop_not_full;
40007+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
40008+ atomic_long_unchecked_t mesq_qf_unexpected_error;
40009+ atomic_long_unchecked_t mesq_noop_unexpected_error;
40010+ atomic_long_unchecked_t mesq_noop_lb_overflow;
40011+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
40012+ atomic_long_unchecked_t mesq_noop_amo_nacked;
40013+ atomic_long_unchecked_t mesq_noop_put_nacked;
40014+ atomic_long_unchecked_t mesq_noop_page_overflow;
40015
40016 };
40017
40018@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
40019 tghop_invalidate, mcsop_last};
40020
40021 struct mcs_op_statistic {
40022- atomic_long_t count;
40023- atomic_long_t total;
40024+ atomic_long_unchecked_t count;
40025+ atomic_long_unchecked_t total;
40026 unsigned long max;
40027 };
40028
40029@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
40030
40031 #define STAT(id) do { \
40032 if (gru_options & OPT_STATS) \
40033- atomic_long_inc(&gru_stats.id); \
40034+ atomic_long_inc_unchecked(&gru_stats.id); \
40035 } while (0)
40036
40037 #ifdef CONFIG_SGI_GRU_DEBUG
40038diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
40039index c862cd4..0d176fe 100644
40040--- a/drivers/misc/sgi-xp/xp.h
40041+++ b/drivers/misc/sgi-xp/xp.h
40042@@ -288,7 +288,7 @@ struct xpc_interface {
40043 xpc_notify_func, void *);
40044 void (*received) (short, int, void *);
40045 enum xp_retval (*partid_to_nasids) (short, void *);
40046-};
40047+} __no_const;
40048
40049 extern struct xpc_interface xpc_interface;
40050
40051diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
40052index b94d5f7..7f494c5 100644
40053--- a/drivers/misc/sgi-xp/xpc.h
40054+++ b/drivers/misc/sgi-xp/xpc.h
40055@@ -835,6 +835,7 @@ struct xpc_arch_operations {
40056 void (*received_payload) (struct xpc_channel *, void *);
40057 void (*notify_senders_of_disconnect) (struct xpc_channel *);
40058 };
40059+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
40060
40061 /* struct xpc_partition act_state values (for XPC HB) */
40062
40063@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
40064 /* found in xpc_main.c */
40065 extern struct device *xpc_part;
40066 extern struct device *xpc_chan;
40067-extern struct xpc_arch_operations xpc_arch_ops;
40068+extern xpc_arch_operations_no_const xpc_arch_ops;
40069 extern int xpc_disengage_timelimit;
40070 extern int xpc_disengage_timedout;
40071 extern int xpc_activate_IRQ_rcvd;
40072diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
40073index d971817..33bdca5 100644
40074--- a/drivers/misc/sgi-xp/xpc_main.c
40075+++ b/drivers/misc/sgi-xp/xpc_main.c
40076@@ -166,7 +166,7 @@ static struct notifier_block xpc_die_notifier = {
40077 .notifier_call = xpc_system_die,
40078 };
40079
40080-struct xpc_arch_operations xpc_arch_ops;
40081+xpc_arch_operations_no_const xpc_arch_ops;
40082
40083 /*
40084 * Timer function to enforce the timelimit on the partition disengage.
40085@@ -1210,7 +1210,7 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args)
40086
40087 if (((die_args->trapnr == X86_TRAP_MF) ||
40088 (die_args->trapnr == X86_TRAP_XF)) &&
40089- !user_mode_vm(die_args->regs))
40090+ !user_mode(die_args->regs))
40091 xpc_die_deactivate();
40092
40093 break;
40094diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
40095index 6d8f701..35b6369 100644
40096--- a/drivers/mmc/core/mmc_ops.c
40097+++ b/drivers/mmc/core/mmc_ops.c
40098@@ -247,7 +247,7 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
40099 void *data_buf;
40100 int is_on_stack;
40101
40102- is_on_stack = object_is_on_stack(buf);
40103+ is_on_stack = object_starts_on_stack(buf);
40104 if (is_on_stack) {
40105 /*
40106 * dma onto stack is unsafe/nonportable, but callers to this
40107diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
40108index 53b8fd9..615b462 100644
40109--- a/drivers/mmc/host/dw_mmc.h
40110+++ b/drivers/mmc/host/dw_mmc.h
40111@@ -205,5 +205,5 @@ struct dw_mci_drv_data {
40112 int (*parse_dt)(struct dw_mci *host);
40113 int (*setup_bus)(struct dw_mci *host,
40114 struct device_node *slot_np, u8 bus_width);
40115-};
40116+} __do_const;
40117 #endif /* _DW_MMC_H_ */
40118diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
40119index 82a8de1..3c56ccb 100644
40120--- a/drivers/mmc/host/sdhci-s3c.c
40121+++ b/drivers/mmc/host/sdhci-s3c.c
40122@@ -721,9 +721,11 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
40123 * we can use overriding functions instead of default.
40124 */
40125 if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK) {
40126- sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
40127- sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
40128- sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
40129+ pax_open_kernel();
40130+ *(void **)&sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
40131+ *(void **)&sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
40132+ *(void **)&sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
40133+ pax_close_kernel();
40134 }
40135
40136 /* It supports additional host capabilities if needed */
40137diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
40138index a4eb8b5..8c0628f 100644
40139--- a/drivers/mtd/devices/doc2000.c
40140+++ b/drivers/mtd/devices/doc2000.c
40141@@ -753,7 +753,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
40142
40143 /* The ECC will not be calculated correctly if less than 512 is written */
40144 /* DBB-
40145- if (len != 0x200 && eccbuf)
40146+ if (len != 0x200)
40147 printk(KERN_WARNING
40148 "ECC needs a full sector write (adr: %lx size %lx)\n",
40149 (long) to, (long) len);
40150diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
40151index 0c8bb6b..6f35deb 100644
40152--- a/drivers/mtd/nand/denali.c
40153+++ b/drivers/mtd/nand/denali.c
40154@@ -24,6 +24,7 @@
40155 #include <linux/slab.h>
40156 #include <linux/mtd/mtd.h>
40157 #include <linux/module.h>
40158+#include <linux/slab.h>
40159
40160 #include "denali.h"
40161
40162diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
40163index 51b9d6a..52af9a7 100644
40164--- a/drivers/mtd/nftlmount.c
40165+++ b/drivers/mtd/nftlmount.c
40166@@ -24,6 +24,7 @@
40167 #include <asm/errno.h>
40168 #include <linux/delay.h>
40169 #include <linux/slab.h>
40170+#include <linux/sched.h>
40171 #include <linux/mtd/mtd.h>
40172 #include <linux/mtd/nand.h>
40173 #include <linux/mtd/nftl.h>
40174diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
40175index 8dd6ba5..419cc1d 100644
40176--- a/drivers/mtd/sm_ftl.c
40177+++ b/drivers/mtd/sm_ftl.c
40178@@ -56,7 +56,7 @@ ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr,
40179 #define SM_CIS_VENDOR_OFFSET 0x59
40180 struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
40181 {
40182- struct attribute_group *attr_group;
40183+ attribute_group_no_const *attr_group;
40184 struct attribute **attributes;
40185 struct sm_sysfs_attribute *vendor_attribute;
40186
40187diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
40188index 27cdf1f..8c37357 100644
40189--- a/drivers/net/bonding/bond_main.c
40190+++ b/drivers/net/bonding/bond_main.c
40191@@ -4859,7 +4859,7 @@ static unsigned int bond_get_num_tx_queues(void)
40192 return tx_queues;
40193 }
40194
40195-static struct rtnl_link_ops bond_link_ops __read_mostly = {
40196+static struct rtnl_link_ops bond_link_ops = {
40197 .kind = "bond",
40198 .priv_size = sizeof(struct bonding),
40199 .setup = bond_setup,
40200@@ -4975,8 +4975,8 @@ static void __exit bonding_exit(void)
40201
40202 bond_destroy_debugfs();
40203
40204- rtnl_link_unregister(&bond_link_ops);
40205 unregister_pernet_subsys(&bond_net_ops);
40206+ rtnl_link_unregister(&bond_link_ops);
40207
40208 #ifdef CONFIG_NET_POLL_CONTROLLER
40209 /*
40210diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
40211index 70dba5d..11a0919 100644
40212--- a/drivers/net/ethernet/8390/ax88796.c
40213+++ b/drivers/net/ethernet/8390/ax88796.c
40214@@ -872,9 +872,11 @@ static int ax_probe(struct platform_device *pdev)
40215 if (ax->plat->reg_offsets)
40216 ei_local->reg_offset = ax->plat->reg_offsets;
40217 else {
40218+ resource_size_t _mem_size = mem_size;
40219+ do_div(_mem_size, 0x18);
40220 ei_local->reg_offset = ax->reg_offsets;
40221 for (ret = 0; ret < 0x18; ret++)
40222- ax->reg_offsets[ret] = (mem_size / 0x18) * ret;
40223+ ax->reg_offsets[ret] = _mem_size * ret;
40224 }
40225
40226 if (!request_mem_region(mem->start, mem_size, pdev->name)) {
40227diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
40228index 0991534..8098e92 100644
40229--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
40230+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
40231@@ -1094,7 +1094,7 @@ static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
40232 static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
40233 {
40234 /* RX_MODE controlling object */
40235- bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj);
40236+ bnx2x_init_rx_mode_obj(bp);
40237
40238 /* multicast configuration controlling object */
40239 bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid,
40240diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
40241index 10bc093..a2fb42a 100644
40242--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
40243+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
40244@@ -2136,12 +2136,12 @@ static u8 bnx2x_dcbnl_get_cap(struct net_device *netdev, int capid, u8 *cap)
40245 break;
40246 default:
40247 BNX2X_ERR("Non valid capability ID\n");
40248- rval = -EINVAL;
40249+ rval = 1;
40250 break;
40251 }
40252 } else {
40253 DP(BNX2X_MSG_DCB, "DCB disabled\n");
40254- rval = -EINVAL;
40255+ rval = 1;
40256 }
40257
40258 DP(BNX2X_MSG_DCB, "capid %d:%x\n", capid, *cap);
40259@@ -2167,12 +2167,12 @@ static int bnx2x_dcbnl_get_numtcs(struct net_device *netdev, int tcid, u8 *num)
40260 break;
40261 default:
40262 BNX2X_ERR("Non valid TC-ID\n");
40263- rval = -EINVAL;
40264+ rval = 1;
40265 break;
40266 }
40267 } else {
40268 DP(BNX2X_MSG_DCB, "DCB disabled\n");
40269- rval = -EINVAL;
40270+ rval = 1;
40271 }
40272
40273 return rval;
40274@@ -2185,7 +2185,7 @@ static int bnx2x_dcbnl_set_numtcs(struct net_device *netdev, int tcid, u8 num)
40275 return -EINVAL;
40276 }
40277
40278-static u8 bnx2x_dcbnl_get_pfc_state(struct net_device *netdev)
40279+static u8 bnx2x_dcbnl_get_pfc_state(struct net_device *netdev)
40280 {
40281 struct bnx2x *bp = netdev_priv(netdev);
40282 DP(BNX2X_MSG_DCB, "state = %d\n", bp->dcbx_local_feat.pfc.enabled);
40283@@ -2387,12 +2387,12 @@ static u8 bnx2x_dcbnl_get_featcfg(struct net_device *netdev, int featid,
40284 break;
40285 default:
40286 BNX2X_ERR("Non valid featrue-ID\n");
40287- rval = -EINVAL;
40288+ rval = 1;
40289 break;
40290 }
40291 } else {
40292 DP(BNX2X_MSG_DCB, "DCB disabled\n");
40293- rval = -EINVAL;
40294+ rval = 1;
40295 }
40296
40297 return rval;
40298@@ -2428,12 +2428,12 @@ static u8 bnx2x_dcbnl_set_featcfg(struct net_device *netdev, int featid,
40299 break;
40300 default:
40301 BNX2X_ERR("Non valid featrue-ID\n");
40302- rval = -EINVAL;
40303+ rval = 1;
40304 break;
40305 }
40306 } else {
40307 DP(BNX2X_MSG_DCB, "dcbnl call not valid\n");
40308- rval = -EINVAL;
40309+ rval = 1;
40310 }
40311
40312 return rval;
40313diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
40314index 5523da3..4fcf274 100644
40315--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
40316+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
40317@@ -4767,7 +4767,7 @@ static void bnx2x_after_function_update(struct bnx2x *bp)
40318 q);
40319 }
40320
40321- if (!NO_FCOE(bp)) {
40322+ if (!NO_FCOE(bp) && CNIC_ENABLED(bp)) {
40323 fp = &bp->fp[FCOE_IDX(bp)];
40324 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
40325
40326@@ -13047,6 +13047,7 @@ static int bnx2x_unregister_cnic(struct net_device *dev)
40327 RCU_INIT_POINTER(bp->cnic_ops, NULL);
40328 mutex_unlock(&bp->cnic_mutex);
40329 synchronize_rcu();
40330+ bp->cnic_enabled = false;
40331 kfree(bp->cnic_kwq);
40332 bp->cnic_kwq = NULL;
40333
40334diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
40335index 09b625e..15b16fe 100644
40336--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
40337+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
40338@@ -2375,15 +2375,14 @@ int bnx2x_config_rx_mode(struct bnx2x *bp,
40339 return rc;
40340 }
40341
40342-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
40343- struct bnx2x_rx_mode_obj *o)
40344+void bnx2x_init_rx_mode_obj(struct bnx2x *bp)
40345 {
40346 if (CHIP_IS_E1x(bp)) {
40347- o->wait_comp = bnx2x_empty_rx_mode_wait;
40348- o->config_rx_mode = bnx2x_set_rx_mode_e1x;
40349+ bp->rx_mode_obj.wait_comp = bnx2x_empty_rx_mode_wait;
40350+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e1x;
40351 } else {
40352- o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
40353- o->config_rx_mode = bnx2x_set_rx_mode_e2;
40354+ bp->rx_mode_obj.wait_comp = bnx2x_wait_rx_mode_comp_e2;
40355+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e2;
40356 }
40357 }
40358
40359diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
40360index adbd91b..58ec94a 100644
40361--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
40362+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
40363@@ -1293,8 +1293,7 @@ int bnx2x_vlan_mac_move(struct bnx2x *bp,
40364
40365 /********************* RX MODE ****************/
40366
40367-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
40368- struct bnx2x_rx_mode_obj *o);
40369+void bnx2x_init_rx_mode_obj(struct bnx2x *bp);
40370
40371 /**
40372 * bnx2x_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters.
40373diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
40374index 6f9b74c..7f219b8 100644
40375--- a/drivers/net/ethernet/broadcom/tg3.h
40376+++ b/drivers/net/ethernet/broadcom/tg3.h
40377@@ -146,6 +146,7 @@
40378 #define CHIPREV_ID_5750_A0 0x4000
40379 #define CHIPREV_ID_5750_A1 0x4001
40380 #define CHIPREV_ID_5750_A3 0x4003
40381+#define CHIPREV_ID_5750_C1 0x4201
40382 #define CHIPREV_ID_5750_C2 0x4202
40383 #define CHIPREV_ID_5752_A0_HW 0x5000
40384 #define CHIPREV_ID_5752_A0 0x6000
40385diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
40386index 8cffcdf..aadf043 100644
40387--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
40388+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
40389@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
40390 */
40391 struct l2t_skb_cb {
40392 arp_failure_handler_func arp_failure_handler;
40393-};
40394+} __no_const;
40395
40396 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
40397
40398diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
40399index 4c83003..2a2a5b9 100644
40400--- a/drivers/net/ethernet/dec/tulip/de4x5.c
40401+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
40402@@ -5388,7 +5388,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
40403 for (i=0; i<ETH_ALEN; i++) {
40404 tmp.addr[i] = dev->dev_addr[i];
40405 }
40406- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
40407+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
40408 break;
40409
40410 case DE4X5_SET_HWADDR: /* Set the hardware address */
40411@@ -5428,7 +5428,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
40412 spin_lock_irqsave(&lp->lock, flags);
40413 memcpy(&statbuf, &lp->pktStats, ioc->len);
40414 spin_unlock_irqrestore(&lp->lock, flags);
40415- if (copy_to_user(ioc->data, &statbuf, ioc->len))
40416+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
40417 return -EFAULT;
40418 break;
40419 }
40420diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
40421index 4d6f3c5..449bc5c 100644
40422--- a/drivers/net/ethernet/emulex/benet/be_main.c
40423+++ b/drivers/net/ethernet/emulex/benet/be_main.c
40424@@ -455,7 +455,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
40425
40426 if (wrapped)
40427 newacc += 65536;
40428- ACCESS_ONCE(*acc) = newacc;
40429+ ACCESS_ONCE_RW(*acc) = newacc;
40430 }
40431
40432 void be_parse_stats(struct be_adapter *adapter)
40433@@ -759,8 +759,9 @@ static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
40434
40435 if (vlan_tx_tag_present(skb)) {
40436 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
40437- __vlan_put_tag(skb, vlan_tag);
40438- skb->vlan_tci = 0;
40439+ skb = __vlan_put_tag(skb, vlan_tag);
40440+ if (skb)
40441+ skb->vlan_tci = 0;
40442 }
40443
40444 return skb;
40445diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
40446index 74d749e..eefb1bd 100644
40447--- a/drivers/net/ethernet/faraday/ftgmac100.c
40448+++ b/drivers/net/ethernet/faraday/ftgmac100.c
40449@@ -31,6 +31,8 @@
40450 #include <linux/netdevice.h>
40451 #include <linux/phy.h>
40452 #include <linux/platform_device.h>
40453+#include <linux/interrupt.h>
40454+#include <linux/irqreturn.h>
40455 #include <net/ip.h>
40456
40457 #include "ftgmac100.h"
40458diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
40459index b901a01..1ff32ee 100644
40460--- a/drivers/net/ethernet/faraday/ftmac100.c
40461+++ b/drivers/net/ethernet/faraday/ftmac100.c
40462@@ -31,6 +31,8 @@
40463 #include <linux/module.h>
40464 #include <linux/netdevice.h>
40465 #include <linux/platform_device.h>
40466+#include <linux/interrupt.h>
40467+#include <linux/irqreturn.h>
40468
40469 #include "ftmac100.h"
40470
40471diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
40472index a59f077..7925d77 100644
40473--- a/drivers/net/ethernet/intel/e100.c
40474+++ b/drivers/net/ethernet/intel/e100.c
40475@@ -870,7 +870,7 @@ err_unlock:
40476 }
40477
40478 static int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
40479- void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
40480+ int (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
40481 {
40482 struct cb *cb;
40483 unsigned long flags;
40484@@ -888,10 +888,13 @@ static int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
40485 nic->cbs_avail--;
40486 cb->skb = skb;
40487
40488+ err = cb_prepare(nic, cb, skb);
40489+ if (err)
40490+ goto err_unlock;
40491+
40492 if (unlikely(!nic->cbs_avail))
40493 err = -ENOSPC;
40494
40495- cb_prepare(nic, cb, skb);
40496
40497 /* Order is important otherwise we'll be in a race with h/w:
40498 * set S-bit in current first, then clear S-bit in previous. */
40499@@ -1091,7 +1094,7 @@ static void e100_get_defaults(struct nic *nic)
40500 nic->mii.mdio_write = mdio_write;
40501 }
40502
40503-static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
40504+static int e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
40505 {
40506 struct config *config = &cb->u.config;
40507 u8 *c = (u8 *)config;
40508@@ -1181,6 +1184,7 @@ static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
40509 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
40510 "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
40511 c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]);
40512+ return 0;
40513 }
40514
40515 /*************************************************************************
40516@@ -1331,7 +1335,7 @@ static const struct firmware *e100_request_firmware(struct nic *nic)
40517 return fw;
40518 }
40519
40520-static void e100_setup_ucode(struct nic *nic, struct cb *cb,
40521+static int e100_setup_ucode(struct nic *nic, struct cb *cb,
40522 struct sk_buff *skb)
40523 {
40524 const struct firmware *fw = (void *)skb;
40525@@ -1358,6 +1362,7 @@ static void e100_setup_ucode(struct nic *nic, struct cb *cb,
40526 cb->u.ucode[min_size] |= cpu_to_le32((BUNDLESMALL) ? 0xFFFF : 0xFF80);
40527
40528 cb->command = cpu_to_le16(cb_ucode | cb_el);
40529+ return 0;
40530 }
40531
40532 static inline int e100_load_ucode_wait(struct nic *nic)
40533@@ -1400,18 +1405,20 @@ static inline int e100_load_ucode_wait(struct nic *nic)
40534 return err;
40535 }
40536
40537-static void e100_setup_iaaddr(struct nic *nic, struct cb *cb,
40538+static int e100_setup_iaaddr(struct nic *nic, struct cb *cb,
40539 struct sk_buff *skb)
40540 {
40541 cb->command = cpu_to_le16(cb_iaaddr);
40542 memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN);
40543+ return 0;
40544 }
40545
40546-static void e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb)
40547+static int e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb)
40548 {
40549 cb->command = cpu_to_le16(cb_dump);
40550 cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr +
40551 offsetof(struct mem, dump_buf));
40552+ return 0;
40553 }
40554
40555 static int e100_phy_check_without_mii(struct nic *nic)
40556@@ -1581,7 +1588,7 @@ static int e100_hw_init(struct nic *nic)
40557 return 0;
40558 }
40559
40560-static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
40561+static int e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
40562 {
40563 struct net_device *netdev = nic->netdev;
40564 struct netdev_hw_addr *ha;
40565@@ -1596,6 +1603,7 @@ static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
40566 memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &ha->addr,
40567 ETH_ALEN);
40568 }
40569+ return 0;
40570 }
40571
40572 static void e100_set_multicast_list(struct net_device *netdev)
40573@@ -1756,11 +1764,18 @@ static void e100_watchdog(unsigned long data)
40574 round_jiffies(jiffies + E100_WATCHDOG_PERIOD));
40575 }
40576
40577-static void e100_xmit_prepare(struct nic *nic, struct cb *cb,
40578+static int e100_xmit_prepare(struct nic *nic, struct cb *cb,
40579 struct sk_buff *skb)
40580 {
40581+ dma_addr_t dma_addr;
40582 cb->command = nic->tx_command;
40583
40584+ dma_addr = pci_map_single(nic->pdev,
40585+ skb->data, skb->len, PCI_DMA_TODEVICE);
40586+ /* If we can't map the skb, have the upper layer try later */
40587+ if (pci_dma_mapping_error(nic->pdev, dma_addr))
40588+ return -ENOMEM;
40589+
40590 /*
40591 * Use the last 4 bytes of the SKB payload packet as the CRC, used for
40592 * testing, ie sending frames with bad CRC.
40593@@ -1777,11 +1792,10 @@ static void e100_xmit_prepare(struct nic *nic, struct cb *cb,
40594 cb->u.tcb.tcb_byte_count = 0;
40595 cb->u.tcb.threshold = nic->tx_threshold;
40596 cb->u.tcb.tbd_count = 1;
40597- cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev,
40598- skb->data, skb->len, PCI_DMA_TODEVICE));
40599- /* check for mapping failure? */
40600+ cb->u.tcb.tbd.buf_addr = cpu_to_le32(dma_addr);
40601 cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
40602 skb_tx_timestamp(skb);
40603+ return 0;
40604 }
40605
40606 static netdev_tx_t e100_xmit_frame(struct sk_buff *skb,
40607diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
40608index bb9256a..56d8752 100644
40609--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
40610+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
40611@@ -806,7 +806,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
40612 }
40613
40614 /* update the base incval used to calculate frequency adjustment */
40615- ACCESS_ONCE(adapter->base_incval) = incval;
40616+ ACCESS_ONCE_RW(adapter->base_incval) = incval;
40617 smp_mb();
40618
40619 /* need lock to prevent incorrect read while modifying cyclecounter */
40620diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
40621index c124e67..db9b897 100644
40622--- a/drivers/net/ethernet/lantiq_etop.c
40623+++ b/drivers/net/ethernet/lantiq_etop.c
40624@@ -769,7 +769,7 @@ ltq_etop_probe(struct platform_device *pdev)
40625 return 0;
40626
40627 err_free:
40628- kfree(dev);
40629+ free_netdev(dev);
40630 err_out:
40631 return err;
40632 }
40633diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
40634index fbe5363..266b4e3 100644
40635--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
40636+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
40637@@ -3461,7 +3461,10 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
40638 struct __vxge_hw_fifo *fifo;
40639 struct vxge_hw_fifo_config *config;
40640 u32 txdl_size, txdl_per_memblock;
40641- struct vxge_hw_mempool_cbs fifo_mp_callback;
40642+ static struct vxge_hw_mempool_cbs fifo_mp_callback = {
40643+ .item_func_alloc = __vxge_hw_fifo_mempool_item_alloc,
40644+ };
40645+
40646 struct __vxge_hw_virtualpath *vpath;
40647
40648 if ((vp == NULL) || (attr == NULL)) {
40649@@ -3544,8 +3547,6 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
40650 goto exit;
40651 }
40652
40653- fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
40654-
40655 fifo->mempool =
40656 __vxge_hw_mempool_create(vpath->hldev,
40657 fifo->config->memblock_size,
40658diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
40659index 2d849da..23bba3b 100644
40660--- a/drivers/net/ethernet/realtek/r8169.c
40661+++ b/drivers/net/ethernet/realtek/r8169.c
40662@@ -741,22 +741,22 @@ struct rtl8169_private {
40663 struct mdio_ops {
40664 void (*write)(struct rtl8169_private *, int, int);
40665 int (*read)(struct rtl8169_private *, int);
40666- } mdio_ops;
40667+ } __no_const mdio_ops;
40668
40669 struct pll_power_ops {
40670 void (*down)(struct rtl8169_private *);
40671 void (*up)(struct rtl8169_private *);
40672- } pll_power_ops;
40673+ } __no_const pll_power_ops;
40674
40675 struct jumbo_ops {
40676 void (*enable)(struct rtl8169_private *);
40677 void (*disable)(struct rtl8169_private *);
40678- } jumbo_ops;
40679+ } __no_const jumbo_ops;
40680
40681 struct csi_ops {
40682 void (*write)(struct rtl8169_private *, int, int);
40683 u32 (*read)(struct rtl8169_private *, int);
40684- } csi_ops;
40685+ } __no_const csi_ops;
40686
40687 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
40688 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
40689diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
40690index 3f93624..cf01144 100644
40691--- a/drivers/net/ethernet/sfc/ptp.c
40692+++ b/drivers/net/ethernet/sfc/ptp.c
40693@@ -553,7 +553,7 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
40694 (u32)((u64)ptp->start.dma_addr >> 32));
40695
40696 /* Clear flag that signals MC ready */
40697- ACCESS_ONCE(*start) = 0;
40698+ ACCESS_ONCE_RW(*start) = 0;
40699 efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
40700 MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
40701
40702diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
40703index 0c74a70..3bc6f68 100644
40704--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
40705+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
40706@@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
40707
40708 writel(value, ioaddr + MMC_CNTRL);
40709
40710- pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
40711- MMC_CNTRL, value);
40712+// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
40713+// MMC_CNTRL, value);
40714 }
40715
40716 /* To mask all all interrupts.*/
40717diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
40718index e6fe0d8..2b7d752 100644
40719--- a/drivers/net/hyperv/hyperv_net.h
40720+++ b/drivers/net/hyperv/hyperv_net.h
40721@@ -101,7 +101,7 @@ struct rndis_device {
40722
40723 enum rndis_device_state state;
40724 bool link_state;
40725- atomic_t new_req_id;
40726+ atomic_unchecked_t new_req_id;
40727
40728 spinlock_t request_lock;
40729 struct list_head req_list;
40730diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
40731index 2b657d4..9903bc0 100644
40732--- a/drivers/net/hyperv/rndis_filter.c
40733+++ b/drivers/net/hyperv/rndis_filter.c
40734@@ -107,7 +107,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
40735 * template
40736 */
40737 set = &rndis_msg->msg.set_req;
40738- set->req_id = atomic_inc_return(&dev->new_req_id);
40739+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
40740
40741 /* Add to the request list */
40742 spin_lock_irqsave(&dev->request_lock, flags);
40743@@ -758,7 +758,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
40744
40745 /* Setup the rndis set */
40746 halt = &request->request_msg.msg.halt_req;
40747- halt->req_id = atomic_inc_return(&dev->new_req_id);
40748+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
40749
40750 /* Ignore return since this msg is optional. */
40751 rndis_filter_send_request(dev, request);
40752diff --git a/drivers/net/ieee802154/fakehard.c b/drivers/net/ieee802154/fakehard.c
40753index 1e9cb0b..7839125 100644
40754--- a/drivers/net/ieee802154/fakehard.c
40755+++ b/drivers/net/ieee802154/fakehard.c
40756@@ -386,7 +386,7 @@ static int ieee802154fake_probe(struct platform_device *pdev)
40757 phy->transmit_power = 0xbf;
40758
40759 dev->netdev_ops = &fake_ops;
40760- dev->ml_priv = &fake_mlme;
40761+ dev->ml_priv = (void *)&fake_mlme;
40762
40763 priv = netdev_priv(dev);
40764 priv->phy = phy;
40765diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
40766index e5cb723..1fc0461 100644
40767--- a/drivers/net/macvlan.c
40768+++ b/drivers/net/macvlan.c
40769@@ -852,13 +852,15 @@ static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = {
40770 int macvlan_link_register(struct rtnl_link_ops *ops)
40771 {
40772 /* common fields */
40773- ops->priv_size = sizeof(struct macvlan_dev);
40774- ops->validate = macvlan_validate;
40775- ops->maxtype = IFLA_MACVLAN_MAX;
40776- ops->policy = macvlan_policy;
40777- ops->changelink = macvlan_changelink;
40778- ops->get_size = macvlan_get_size;
40779- ops->fill_info = macvlan_fill_info;
40780+ pax_open_kernel();
40781+ *(size_t *)&ops->priv_size = sizeof(struct macvlan_dev);
40782+ *(void **)&ops->validate = macvlan_validate;
40783+ *(int *)&ops->maxtype = IFLA_MACVLAN_MAX;
40784+ *(const void **)&ops->policy = macvlan_policy;
40785+ *(void **)&ops->changelink = macvlan_changelink;
40786+ *(void **)&ops->get_size = macvlan_get_size;
40787+ *(void **)&ops->fill_info = macvlan_fill_info;
40788+ pax_close_kernel();
40789
40790 return rtnl_link_register(ops);
40791 };
40792@@ -914,7 +916,7 @@ static int macvlan_device_event(struct notifier_block *unused,
40793 return NOTIFY_DONE;
40794 }
40795
40796-static struct notifier_block macvlan_notifier_block __read_mostly = {
40797+static struct notifier_block macvlan_notifier_block = {
40798 .notifier_call = macvlan_device_event,
40799 };
40800
40801diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
40802index 0f0f9ce..0ca5819 100644
40803--- a/drivers/net/macvtap.c
40804+++ b/drivers/net/macvtap.c
40805@@ -1100,7 +1100,7 @@ static int macvtap_device_event(struct notifier_block *unused,
40806 return NOTIFY_DONE;
40807 }
40808
40809-static struct notifier_block macvtap_notifier_block __read_mostly = {
40810+static struct notifier_block macvtap_notifier_block = {
40811 .notifier_call = macvtap_device_event,
40812 };
40813
40814diff --git a/drivers/net/phy/mdio-bitbang.c b/drivers/net/phy/mdio-bitbang.c
40815index daec9b0..6428fcb 100644
40816--- a/drivers/net/phy/mdio-bitbang.c
40817+++ b/drivers/net/phy/mdio-bitbang.c
40818@@ -234,6 +234,7 @@ void free_mdio_bitbang(struct mii_bus *bus)
40819 struct mdiobb_ctrl *ctrl = bus->priv;
40820
40821 module_put(ctrl->ops->owner);
40822+ mdiobus_unregister(bus);
40823 mdiobus_free(bus);
40824 }
40825 EXPORT_SYMBOL(free_mdio_bitbang);
40826diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
40827index 508570e..f706dc7 100644
40828--- a/drivers/net/ppp/ppp_generic.c
40829+++ b/drivers/net/ppp/ppp_generic.c
40830@@ -999,7 +999,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
40831 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
40832 struct ppp_stats stats;
40833 struct ppp_comp_stats cstats;
40834- char *vers;
40835
40836 switch (cmd) {
40837 case SIOCGPPPSTATS:
40838@@ -1021,8 +1020,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
40839 break;
40840
40841 case SIOCGPPPVER:
40842- vers = PPP_VERSION;
40843- if (copy_to_user(addr, vers, strlen(vers) + 1))
40844+ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
40845 break;
40846 err = 0;
40847 break;
40848diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
40849index 8efe47a..a8075c5 100644
40850--- a/drivers/net/team/team.c
40851+++ b/drivers/net/team/team.c
40852@@ -2603,7 +2603,7 @@ static int team_device_event(struct notifier_block *unused,
40853 return NOTIFY_DONE;
40854 }
40855
40856-static struct notifier_block team_notifier_block __read_mostly = {
40857+static struct notifier_block team_notifier_block = {
40858 .notifier_call = team_device_event,
40859 };
40860
40861diff --git a/drivers/net/tun.c b/drivers/net/tun.c
40862index cb95fe5..16909e2 100644
40863--- a/drivers/net/tun.c
40864+++ b/drivers/net/tun.c
40865@@ -1594,7 +1594,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
40866
40867 if (tun->flags & TUN_TAP_MQ &&
40868 (tun->numqueues + tun->numdisabled > 1))
40869- return err;
40870+ return -EBUSY;
40871 }
40872 else {
40873 char *name;
40874@@ -1838,7 +1838,7 @@ unlock:
40875 }
40876
40877 static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
40878- unsigned long arg, int ifreq_len)
40879+ unsigned long arg, size_t ifreq_len)
40880 {
40881 struct tun_file *tfile = file->private_data;
40882 struct tun_struct *tun;
40883@@ -1850,6 +1850,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
40884 int vnet_hdr_sz;
40885 int ret;
40886
40887+ if (ifreq_len > sizeof ifr)
40888+ return -EFAULT;
40889+
40890 if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
40891 if (copy_from_user(&ifr, argp, ifreq_len))
40892 return -EFAULT;
40893diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
40894index 16c8429..6bd9167 100644
40895--- a/drivers/net/usb/cdc_mbim.c
40896+++ b/drivers/net/usb/cdc_mbim.c
40897@@ -134,7 +134,7 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb
40898 goto error;
40899
40900 if (skb) {
40901- if (skb->len <= sizeof(ETH_HLEN))
40902+ if (skb->len <= ETH_HLEN)
40903 goto error;
40904
40905 /* mapping VLANs to MBIM sessions:
40906diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
40907index cd8ccb2..cff5144 100644
40908--- a/drivers/net/usb/hso.c
40909+++ b/drivers/net/usb/hso.c
40910@@ -71,7 +71,7 @@
40911 #include <asm/byteorder.h>
40912 #include <linux/serial_core.h>
40913 #include <linux/serial.h>
40914-
40915+#include <asm/local.h>
40916
40917 #define MOD_AUTHOR "Option Wireless"
40918 #define MOD_DESCRIPTION "USB High Speed Option driver"
40919@@ -1180,7 +1180,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
40920 struct urb *urb;
40921
40922 urb = serial->rx_urb[0];
40923- if (serial->port.count > 0) {
40924+ if (atomic_read(&serial->port.count) > 0) {
40925 count = put_rxbuf_data(urb, serial);
40926 if (count == -1)
40927 return;
40928@@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
40929 DUMP1(urb->transfer_buffer, urb->actual_length);
40930
40931 /* Anyone listening? */
40932- if (serial->port.count == 0)
40933+ if (atomic_read(&serial->port.count) == 0)
40934 return;
40935
40936 if (status == 0) {
40937@@ -1298,8 +1298,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
40938 tty_port_tty_set(&serial->port, tty);
40939
40940 /* check for port already opened, if not set the termios */
40941- serial->port.count++;
40942- if (serial->port.count == 1) {
40943+ if (atomic_inc_return(&serial->port.count) == 1) {
40944 serial->rx_state = RX_IDLE;
40945 /* Force default termio settings */
40946 _hso_serial_set_termios(tty, NULL);
40947@@ -1311,7 +1310,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
40948 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
40949 if (result) {
40950 hso_stop_serial_device(serial->parent);
40951- serial->port.count--;
40952+ atomic_dec(&serial->port.count);
40953 kref_put(&serial->parent->ref, hso_serial_ref_free);
40954 }
40955 } else {
40956@@ -1348,10 +1347,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
40957
40958 /* reset the rts and dtr */
40959 /* do the actual close */
40960- serial->port.count--;
40961+ atomic_dec(&serial->port.count);
40962
40963- if (serial->port.count <= 0) {
40964- serial->port.count = 0;
40965+ if (atomic_read(&serial->port.count) <= 0) {
40966+ atomic_set(&serial->port.count, 0);
40967 tty_port_tty_set(&serial->port, NULL);
40968 if (!usb_gone)
40969 hso_stop_serial_device(serial->parent);
40970@@ -1427,7 +1426,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
40971
40972 /* the actual setup */
40973 spin_lock_irqsave(&serial->serial_lock, flags);
40974- if (serial->port.count)
40975+ if (atomic_read(&serial->port.count))
40976 _hso_serial_set_termios(tty, old);
40977 else
40978 tty->termios = *old;
40979@@ -1886,7 +1885,7 @@ static void intr_callback(struct urb *urb)
40980 D1("Pending read interrupt on port %d\n", i);
40981 spin_lock(&serial->serial_lock);
40982 if (serial->rx_state == RX_IDLE &&
40983- serial->port.count > 0) {
40984+ atomic_read(&serial->port.count) > 0) {
40985 /* Setup and send a ctrl req read on
40986 * port i */
40987 if (!serial->rx_urb_filled[0]) {
40988@@ -3079,7 +3078,7 @@ static int hso_resume(struct usb_interface *iface)
40989 /* Start all serial ports */
40990 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
40991 if (serial_table[i] && (serial_table[i]->interface == iface)) {
40992- if (dev2ser(serial_table[i])->port.count) {
40993+ if (atomic_read(&dev2ser(serial_table[i])->port.count)) {
40994 result =
40995 hso_start_serial_device(serial_table[i], GFP_NOIO);
40996 hso_kick_transmit(dev2ser(serial_table[i]));
40997diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
40998index 6993bfa..9053a34 100644
40999--- a/drivers/net/vxlan.c
41000+++ b/drivers/net/vxlan.c
41001@@ -1428,7 +1428,7 @@ nla_put_failure:
41002 return -EMSGSIZE;
41003 }
41004
41005-static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
41006+static struct rtnl_link_ops vxlan_link_ops = {
41007 .kind = "vxlan",
41008 .maxtype = IFLA_VXLAN_MAX,
41009 .policy = vxlan_policy,
41010diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
41011index 77fa428..996b355 100644
41012--- a/drivers/net/wireless/at76c50x-usb.c
41013+++ b/drivers/net/wireless/at76c50x-usb.c
41014@@ -353,7 +353,7 @@ static int at76_dfu_get_state(struct usb_device *udev, u8 *state)
41015 }
41016
41017 /* Convert timeout from the DFU status to jiffies */
41018-static inline unsigned long at76_get_timeout(struct dfu_status *s)
41019+static inline unsigned long __intentional_overflow(-1) at76_get_timeout(struct dfu_status *s)
41020 {
41021 return msecs_to_jiffies((s->poll_timeout[2] << 16)
41022 | (s->poll_timeout[1] << 8)
41023diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
41024index 8d78253..bebbb68 100644
41025--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
41026+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
41027@@ -184,8 +184,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
41028 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
41029 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
41030
41031- ACCESS_ONCE(ads->ds_link) = i->link;
41032- ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
41033+ ACCESS_ONCE_RW(ads->ds_link) = i->link;
41034+ ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
41035
41036 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
41037 ctl6 = SM(i->keytype, AR_EncrType);
41038@@ -199,26 +199,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
41039
41040 if ((i->is_first || i->is_last) &&
41041 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
41042- ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
41043+ ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
41044 | set11nTries(i->rates, 1)
41045 | set11nTries(i->rates, 2)
41046 | set11nTries(i->rates, 3)
41047 | (i->dur_update ? AR_DurUpdateEna : 0)
41048 | SM(0, AR_BurstDur);
41049
41050- ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
41051+ ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
41052 | set11nRate(i->rates, 1)
41053 | set11nRate(i->rates, 2)
41054 | set11nRate(i->rates, 3);
41055 } else {
41056- ACCESS_ONCE(ads->ds_ctl2) = 0;
41057- ACCESS_ONCE(ads->ds_ctl3) = 0;
41058+ ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
41059+ ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
41060 }
41061
41062 if (!i->is_first) {
41063- ACCESS_ONCE(ads->ds_ctl0) = 0;
41064- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
41065- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
41066+ ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
41067+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
41068+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
41069 return;
41070 }
41071
41072@@ -243,7 +243,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
41073 break;
41074 }
41075
41076- ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
41077+ ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
41078 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
41079 | SM(i->txpower, AR_XmitPower)
41080 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
41081@@ -253,19 +253,19 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
41082 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
41083 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
41084
41085- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
41086- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
41087+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
41088+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
41089
41090 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
41091 return;
41092
41093- ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
41094+ ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
41095 | set11nPktDurRTSCTS(i->rates, 1);
41096
41097- ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
41098+ ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
41099 | set11nPktDurRTSCTS(i->rates, 3);
41100
41101- ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
41102+ ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
41103 | set11nRateFlags(i->rates, 1)
41104 | set11nRateFlags(i->rates, 2)
41105 | set11nRateFlags(i->rates, 3)
41106diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
41107index 301bf72..3f5654f 100644
41108--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
41109+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
41110@@ -39,47 +39,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
41111 (i->qcu << AR_TxQcuNum_S) | desc_len;
41112
41113 checksum += val;
41114- ACCESS_ONCE(ads->info) = val;
41115+ ACCESS_ONCE_RW(ads->info) = val;
41116
41117 checksum += i->link;
41118- ACCESS_ONCE(ads->link) = i->link;
41119+ ACCESS_ONCE_RW(ads->link) = i->link;
41120
41121 checksum += i->buf_addr[0];
41122- ACCESS_ONCE(ads->data0) = i->buf_addr[0];
41123+ ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
41124 checksum += i->buf_addr[1];
41125- ACCESS_ONCE(ads->data1) = i->buf_addr[1];
41126+ ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
41127 checksum += i->buf_addr[2];
41128- ACCESS_ONCE(ads->data2) = i->buf_addr[2];
41129+ ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
41130 checksum += i->buf_addr[3];
41131- ACCESS_ONCE(ads->data3) = i->buf_addr[3];
41132+ ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
41133
41134 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
41135- ACCESS_ONCE(ads->ctl3) = val;
41136+ ACCESS_ONCE_RW(ads->ctl3) = val;
41137 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
41138- ACCESS_ONCE(ads->ctl5) = val;
41139+ ACCESS_ONCE_RW(ads->ctl5) = val;
41140 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
41141- ACCESS_ONCE(ads->ctl7) = val;
41142+ ACCESS_ONCE_RW(ads->ctl7) = val;
41143 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
41144- ACCESS_ONCE(ads->ctl9) = val;
41145+ ACCESS_ONCE_RW(ads->ctl9) = val;
41146
41147 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
41148- ACCESS_ONCE(ads->ctl10) = checksum;
41149+ ACCESS_ONCE_RW(ads->ctl10) = checksum;
41150
41151 if (i->is_first || i->is_last) {
41152- ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
41153+ ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
41154 | set11nTries(i->rates, 1)
41155 | set11nTries(i->rates, 2)
41156 | set11nTries(i->rates, 3)
41157 | (i->dur_update ? AR_DurUpdateEna : 0)
41158 | SM(0, AR_BurstDur);
41159
41160- ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
41161+ ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
41162 | set11nRate(i->rates, 1)
41163 | set11nRate(i->rates, 2)
41164 | set11nRate(i->rates, 3);
41165 } else {
41166- ACCESS_ONCE(ads->ctl13) = 0;
41167- ACCESS_ONCE(ads->ctl14) = 0;
41168+ ACCESS_ONCE_RW(ads->ctl13) = 0;
41169+ ACCESS_ONCE_RW(ads->ctl14) = 0;
41170 }
41171
41172 ads->ctl20 = 0;
41173@@ -89,17 +89,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
41174
41175 ctl17 = SM(i->keytype, AR_EncrType);
41176 if (!i->is_first) {
41177- ACCESS_ONCE(ads->ctl11) = 0;
41178- ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
41179- ACCESS_ONCE(ads->ctl15) = 0;
41180- ACCESS_ONCE(ads->ctl16) = 0;
41181- ACCESS_ONCE(ads->ctl17) = ctl17;
41182- ACCESS_ONCE(ads->ctl18) = 0;
41183- ACCESS_ONCE(ads->ctl19) = 0;
41184+ ACCESS_ONCE_RW(ads->ctl11) = 0;
41185+ ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
41186+ ACCESS_ONCE_RW(ads->ctl15) = 0;
41187+ ACCESS_ONCE_RW(ads->ctl16) = 0;
41188+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
41189+ ACCESS_ONCE_RW(ads->ctl18) = 0;
41190+ ACCESS_ONCE_RW(ads->ctl19) = 0;
41191 return;
41192 }
41193
41194- ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
41195+ ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
41196 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
41197 | SM(i->txpower, AR_XmitPower)
41198 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
41199@@ -135,22 +135,22 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
41200 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
41201 ctl12 |= SM(val, AR_PAPRDChainMask);
41202
41203- ACCESS_ONCE(ads->ctl12) = ctl12;
41204- ACCESS_ONCE(ads->ctl17) = ctl17;
41205+ ACCESS_ONCE_RW(ads->ctl12) = ctl12;
41206+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
41207
41208- ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
41209+ ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
41210 | set11nPktDurRTSCTS(i->rates, 1);
41211
41212- ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
41213+ ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
41214 | set11nPktDurRTSCTS(i->rates, 3);
41215
41216- ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
41217+ ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
41218 | set11nRateFlags(i->rates, 1)
41219 | set11nRateFlags(i->rates, 2)
41220 | set11nRateFlags(i->rates, 3)
41221 | SM(i->rtscts_rate, AR_RTSCTSRate);
41222
41223- ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
41224+ ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
41225 }
41226
41227 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
41228diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
41229index 9d26fc5..60d9f14 100644
41230--- a/drivers/net/wireless/ath/ath9k/hw.h
41231+++ b/drivers/net/wireless/ath/ath9k/hw.h
41232@@ -658,7 +658,7 @@ struct ath_hw_private_ops {
41233
41234 /* ANI */
41235 void (*ani_cache_ini_regs)(struct ath_hw *ah);
41236-};
41237+} __no_const;
41238
41239 /**
41240 * struct ath_hw_ops - callbacks used by hardware code and driver code
41241@@ -688,7 +688,7 @@ struct ath_hw_ops {
41242 void (*antdiv_comb_conf_set)(struct ath_hw *ah,
41243 struct ath_hw_antcomb_conf *antconf);
41244 void (*antctrl_shared_chain_lnadiv)(struct ath_hw *hw, bool enable);
41245-};
41246+} __no_const;
41247
41248 struct ath_nf_limits {
41249 s16 max;
41250diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
41251index 3726cd6..b655808 100644
41252--- a/drivers/net/wireless/iwlegacy/3945-mac.c
41253+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
41254@@ -3615,7 +3615,9 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
41255 */
41256 if (il3945_mod_params.disable_hw_scan) {
41257 D_INFO("Disabling hw_scan\n");
41258- il3945_mac_ops.hw_scan = NULL;
41259+ pax_open_kernel();
41260+ *(void **)&il3945_mac_ops.hw_scan = NULL;
41261+ pax_close_kernel();
41262 }
41263
41264 D_INFO("*** LOAD DRIVER ***\n");
41265diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
41266index 5b9533e..7733880 100644
41267--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
41268+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
41269@@ -203,7 +203,7 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
41270 {
41271 struct iwl_priv *priv = file->private_data;
41272 char buf[64];
41273- int buf_size;
41274+ size_t buf_size;
41275 u32 offset, len;
41276
41277 memset(buf, 0, sizeof(buf));
41278@@ -473,7 +473,7 @@ static ssize_t iwl_dbgfs_rx_handlers_write(struct file *file,
41279 struct iwl_priv *priv = file->private_data;
41280
41281 char buf[8];
41282- int buf_size;
41283+ size_t buf_size;
41284 u32 reset_flag;
41285
41286 memset(buf, 0, sizeof(buf));
41287@@ -554,7 +554,7 @@ static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file,
41288 {
41289 struct iwl_priv *priv = file->private_data;
41290 char buf[8];
41291- int buf_size;
41292+ size_t buf_size;
41293 int ht40;
41294
41295 memset(buf, 0, sizeof(buf));
41296@@ -606,7 +606,7 @@ static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
41297 {
41298 struct iwl_priv *priv = file->private_data;
41299 char buf[8];
41300- int buf_size;
41301+ size_t buf_size;
41302 int value;
41303
41304 memset(buf, 0, sizeof(buf));
41305@@ -1871,7 +1871,7 @@ static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
41306 {
41307 struct iwl_priv *priv = file->private_data;
41308 char buf[8];
41309- int buf_size;
41310+ size_t buf_size;
41311 int clear;
41312
41313 memset(buf, 0, sizeof(buf));
41314@@ -1916,7 +1916,7 @@ static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
41315 {
41316 struct iwl_priv *priv = file->private_data;
41317 char buf[8];
41318- int buf_size;
41319+ size_t buf_size;
41320 int trace;
41321
41322 memset(buf, 0, sizeof(buf));
41323@@ -1987,7 +1987,7 @@ static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file,
41324 {
41325 struct iwl_priv *priv = file->private_data;
41326 char buf[8];
41327- int buf_size;
41328+ size_t buf_size;
41329 int missed;
41330
41331 memset(buf, 0, sizeof(buf));
41332@@ -2028,7 +2028,7 @@ static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
41333
41334 struct iwl_priv *priv = file->private_data;
41335 char buf[8];
41336- int buf_size;
41337+ size_t buf_size;
41338 int plcp;
41339
41340 memset(buf, 0, sizeof(buf));
41341@@ -2088,7 +2088,7 @@ static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file,
41342
41343 struct iwl_priv *priv = file->private_data;
41344 char buf[8];
41345- int buf_size;
41346+ size_t buf_size;
41347 int flush;
41348
41349 memset(buf, 0, sizeof(buf));
41350@@ -2178,7 +2178,7 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
41351
41352 struct iwl_priv *priv = file->private_data;
41353 char buf[8];
41354- int buf_size;
41355+ size_t buf_size;
41356 int rts;
41357
41358 if (!priv->cfg->ht_params)
41359@@ -2220,7 +2220,7 @@ static ssize_t iwl_dbgfs_echo_test_write(struct file *file,
41360 {
41361 struct iwl_priv *priv = file->private_data;
41362 char buf[8];
41363- int buf_size;
41364+ size_t buf_size;
41365
41366 memset(buf, 0, sizeof(buf));
41367 buf_size = min(count, sizeof(buf) - 1);
41368@@ -2256,7 +2256,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
41369 struct iwl_priv *priv = file->private_data;
41370 u32 event_log_flag;
41371 char buf[8];
41372- int buf_size;
41373+ size_t buf_size;
41374
41375 /* check that the interface is up */
41376 if (!iwl_is_ready(priv))
41377@@ -2310,7 +2310,7 @@ static ssize_t iwl_dbgfs_calib_disabled_write(struct file *file,
41378 struct iwl_priv *priv = file->private_data;
41379 char buf[8];
41380 u32 calib_disabled;
41381- int buf_size;
41382+ size_t buf_size;
41383
41384 memset(buf, 0, sizeof(buf));
41385 buf_size = min(count, sizeof(buf) - 1);
41386diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
41387index 35708b9..31f7754 100644
41388--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
41389+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
41390@@ -1100,7 +1100,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
41391 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
41392
41393 char buf[8];
41394- int buf_size;
41395+ size_t buf_size;
41396 u32 reset_flag;
41397
41398 memset(buf, 0, sizeof(buf));
41399@@ -1121,7 +1121,7 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
41400 {
41401 struct iwl_trans *trans = file->private_data;
41402 char buf[8];
41403- int buf_size;
41404+ size_t buf_size;
41405 int csr;
41406
41407 memset(buf, 0, sizeof(buf));
41408diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
41409index ff90855..e46d223 100644
41410--- a/drivers/net/wireless/mac80211_hwsim.c
41411+++ b/drivers/net/wireless/mac80211_hwsim.c
41412@@ -2062,25 +2062,19 @@ static int __init init_mac80211_hwsim(void)
41413
41414 if (channels > 1) {
41415 hwsim_if_comb.num_different_channels = channels;
41416- mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
41417- mac80211_hwsim_ops.cancel_hw_scan =
41418- mac80211_hwsim_cancel_hw_scan;
41419- mac80211_hwsim_ops.sw_scan_start = NULL;
41420- mac80211_hwsim_ops.sw_scan_complete = NULL;
41421- mac80211_hwsim_ops.remain_on_channel =
41422- mac80211_hwsim_roc;
41423- mac80211_hwsim_ops.cancel_remain_on_channel =
41424- mac80211_hwsim_croc;
41425- mac80211_hwsim_ops.add_chanctx =
41426- mac80211_hwsim_add_chanctx;
41427- mac80211_hwsim_ops.remove_chanctx =
41428- mac80211_hwsim_remove_chanctx;
41429- mac80211_hwsim_ops.change_chanctx =
41430- mac80211_hwsim_change_chanctx;
41431- mac80211_hwsim_ops.assign_vif_chanctx =
41432- mac80211_hwsim_assign_vif_chanctx;
41433- mac80211_hwsim_ops.unassign_vif_chanctx =
41434- mac80211_hwsim_unassign_vif_chanctx;
41435+ pax_open_kernel();
41436+ *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
41437+ *(void **)&mac80211_hwsim_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
41438+ *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
41439+ *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
41440+ *(void **)&mac80211_hwsim_ops.remain_on_channel = mac80211_hwsim_roc;
41441+ *(void **)&mac80211_hwsim_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
41442+ *(void **)&mac80211_hwsim_ops.add_chanctx = mac80211_hwsim_add_chanctx;
41443+ *(void **)&mac80211_hwsim_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
41444+ *(void **)&mac80211_hwsim_ops.change_chanctx = mac80211_hwsim_change_chanctx;
41445+ *(void **)&mac80211_hwsim_ops.assign_vif_chanctx = mac80211_hwsim_assign_vif_chanctx;
41446+ *(void **)&mac80211_hwsim_ops.unassign_vif_chanctx = mac80211_hwsim_unassign_vif_chanctx;
41447+ pax_close_kernel();
41448 }
41449
41450 spin_lock_init(&hwsim_radio_lock);
41451diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
41452index abe1d03..fb02c22 100644
41453--- a/drivers/net/wireless/rndis_wlan.c
41454+++ b/drivers/net/wireless/rndis_wlan.c
41455@@ -1238,7 +1238,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
41456
41457 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
41458
41459- if (rts_threshold < 0 || rts_threshold > 2347)
41460+ if (rts_threshold > 2347)
41461 rts_threshold = 2347;
41462
41463 tmp = cpu_to_le32(rts_threshold);
41464diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
41465index 0751b35..246ba3e 100644
41466--- a/drivers/net/wireless/rt2x00/rt2x00.h
41467+++ b/drivers/net/wireless/rt2x00/rt2x00.h
41468@@ -398,7 +398,7 @@ struct rt2x00_intf {
41469 * for hardware which doesn't support hardware
41470 * sequence counting.
41471 */
41472- atomic_t seqno;
41473+ atomic_unchecked_t seqno;
41474 };
41475
41476 static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
41477diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
41478index e488b94..14b6a0c 100644
41479--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
41480+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
41481@@ -240,9 +240,9 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
41482 * sequence counter given by mac80211.
41483 */
41484 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
41485- seqno = atomic_add_return(0x10, &intf->seqno);
41486+ seqno = atomic_add_return_unchecked(0x10, &intf->seqno);
41487 else
41488- seqno = atomic_read(&intf->seqno);
41489+ seqno = atomic_read_unchecked(&intf->seqno);
41490
41491 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
41492 hdr->seq_ctrl |= cpu_to_le16(seqno);
41493diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
41494index e57ee48..541cf6c 100644
41495--- a/drivers/net/wireless/ti/wl1251/sdio.c
41496+++ b/drivers/net/wireless/ti/wl1251/sdio.c
41497@@ -269,13 +269,17 @@ static int wl1251_sdio_probe(struct sdio_func *func,
41498
41499 irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
41500
41501- wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
41502- wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
41503+ pax_open_kernel();
41504+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
41505+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
41506+ pax_close_kernel();
41507
41508 wl1251_info("using dedicated interrupt line");
41509 } else {
41510- wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
41511- wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
41512+ pax_open_kernel();
41513+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
41514+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
41515+ pax_close_kernel();
41516
41517 wl1251_info("using SDIO interrupt");
41518 }
41519diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
41520index e5f5f8f..fdf15b7 100644
41521--- a/drivers/net/wireless/ti/wl12xx/main.c
41522+++ b/drivers/net/wireless/ti/wl12xx/main.c
41523@@ -644,7 +644,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
41524 sizeof(wl->conf.mem));
41525
41526 /* read data preparation is only needed by wl127x */
41527- wl->ops->prepare_read = wl127x_prepare_read;
41528+ pax_open_kernel();
41529+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
41530+ pax_close_kernel();
41531
41532 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER, WL127X_IFTYPE_VER,
41533 WL127X_MAJOR_VER, WL127X_SUBTYPE_VER,
41534@@ -665,7 +667,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
41535 sizeof(wl->conf.mem));
41536
41537 /* read data preparation is only needed by wl127x */
41538- wl->ops->prepare_read = wl127x_prepare_read;
41539+ pax_open_kernel();
41540+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
41541+ pax_close_kernel();
41542
41543 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER, WL127X_IFTYPE_VER,
41544 WL127X_MAJOR_VER, WL127X_SUBTYPE_VER,
41545diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
41546index 8d8c1f8..e754844 100644
41547--- a/drivers/net/wireless/ti/wl18xx/main.c
41548+++ b/drivers/net/wireless/ti/wl18xx/main.c
41549@@ -1489,8 +1489,10 @@ static int wl18xx_setup(struct wl1271 *wl)
41550 }
41551
41552 if (!checksum_param) {
41553- wl18xx_ops.set_rx_csum = NULL;
41554- wl18xx_ops.init_vif = NULL;
41555+ pax_open_kernel();
41556+ *(void **)&wl18xx_ops.set_rx_csum = NULL;
41557+ *(void **)&wl18xx_ops.init_vif = NULL;
41558+ pax_close_kernel();
41559 }
41560
41561 /* Enable 11a Band only if we have 5G antennas */
41562diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
41563index ef2b171..bb513a6 100644
41564--- a/drivers/net/wireless/zd1211rw/zd_usb.c
41565+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
41566@@ -387,7 +387,7 @@ static inline void handle_regs_int(struct urb *urb)
41567 {
41568 struct zd_usb *usb = urb->context;
41569 struct zd_usb_interrupt *intr = &usb->intr;
41570- int len;
41571+ unsigned int len;
41572 u16 int_num;
41573
41574 ZD_ASSERT(in_interrupt());
41575diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
41576index d93b2b6..ae50401 100644
41577--- a/drivers/oprofile/buffer_sync.c
41578+++ b/drivers/oprofile/buffer_sync.c
41579@@ -332,7 +332,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
41580 if (cookie == NO_COOKIE)
41581 offset = pc;
41582 if (cookie == INVALID_COOKIE) {
41583- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
41584+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
41585 offset = pc;
41586 }
41587 if (cookie != last_cookie) {
41588@@ -376,14 +376,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
41589 /* add userspace sample */
41590
41591 if (!mm) {
41592- atomic_inc(&oprofile_stats.sample_lost_no_mm);
41593+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
41594 return 0;
41595 }
41596
41597 cookie = lookup_dcookie(mm, s->eip, &offset);
41598
41599 if (cookie == INVALID_COOKIE) {
41600- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
41601+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
41602 return 0;
41603 }
41604
41605@@ -552,7 +552,7 @@ void sync_buffer(int cpu)
41606 /* ignore backtraces if failed to add a sample */
41607 if (state == sb_bt_start) {
41608 state = sb_bt_ignore;
41609- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
41610+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
41611 }
41612 }
41613 release_mm(mm);
41614diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
41615index c0cc4e7..44d4e54 100644
41616--- a/drivers/oprofile/event_buffer.c
41617+++ b/drivers/oprofile/event_buffer.c
41618@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
41619 }
41620
41621 if (buffer_pos == buffer_size) {
41622- atomic_inc(&oprofile_stats.event_lost_overflow);
41623+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
41624 return;
41625 }
41626
41627diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
41628index ed2c3ec..deda85a 100644
41629--- a/drivers/oprofile/oprof.c
41630+++ b/drivers/oprofile/oprof.c
41631@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
41632 if (oprofile_ops.switch_events())
41633 return;
41634
41635- atomic_inc(&oprofile_stats.multiplex_counter);
41636+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
41637 start_switch_worker();
41638 }
41639
41640diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
41641index 84a208d..d61b0a1 100644
41642--- a/drivers/oprofile/oprofile_files.c
41643+++ b/drivers/oprofile/oprofile_files.c
41644@@ -27,7 +27,7 @@ unsigned long oprofile_time_slice;
41645
41646 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
41647
41648-static ssize_t timeout_read(struct file *file, char __user *buf,
41649+static ssize_t __intentional_overflow(-1) timeout_read(struct file *file, char __user *buf,
41650 size_t count, loff_t *offset)
41651 {
41652 return oprofilefs_ulong_to_user(jiffies_to_msecs(oprofile_time_slice),
41653diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
41654index 917d28e..d62d981 100644
41655--- a/drivers/oprofile/oprofile_stats.c
41656+++ b/drivers/oprofile/oprofile_stats.c
41657@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
41658 cpu_buf->sample_invalid_eip = 0;
41659 }
41660
41661- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
41662- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
41663- atomic_set(&oprofile_stats.event_lost_overflow, 0);
41664- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
41665- atomic_set(&oprofile_stats.multiplex_counter, 0);
41666+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
41667+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
41668+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
41669+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
41670+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
41671 }
41672
41673
41674diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
41675index 38b6fc0..b5cbfce 100644
41676--- a/drivers/oprofile/oprofile_stats.h
41677+++ b/drivers/oprofile/oprofile_stats.h
41678@@ -13,11 +13,11 @@
41679 #include <linux/atomic.h>
41680
41681 struct oprofile_stat_struct {
41682- atomic_t sample_lost_no_mm;
41683- atomic_t sample_lost_no_mapping;
41684- atomic_t bt_lost_no_mapping;
41685- atomic_t event_lost_overflow;
41686- atomic_t multiplex_counter;
41687+ atomic_unchecked_t sample_lost_no_mm;
41688+ atomic_unchecked_t sample_lost_no_mapping;
41689+ atomic_unchecked_t bt_lost_no_mapping;
41690+ atomic_unchecked_t event_lost_overflow;
41691+ atomic_unchecked_t multiplex_counter;
41692 };
41693
41694 extern struct oprofile_stat_struct oprofile_stats;
41695diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
41696index 849357c..b83c1e0 100644
41697--- a/drivers/oprofile/oprofilefs.c
41698+++ b/drivers/oprofile/oprofilefs.c
41699@@ -185,7 +185,7 @@ static const struct file_operations atomic_ro_fops = {
41700
41701
41702 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
41703- char const *name, atomic_t *val)
41704+ char const *name, atomic_unchecked_t *val)
41705 {
41706 return __oprofilefs_create_file(sb, root, name,
41707 &atomic_ro_fops, 0444, val);
41708diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
41709index 93404f7..4a313d8 100644
41710--- a/drivers/oprofile/timer_int.c
41711+++ b/drivers/oprofile/timer_int.c
41712@@ -93,7 +93,7 @@ static int __cpuinit oprofile_cpu_notify(struct notifier_block *self,
41713 return NOTIFY_OK;
41714 }
41715
41716-static struct notifier_block __refdata oprofile_cpu_notifier = {
41717+static struct notifier_block oprofile_cpu_notifier = {
41718 .notifier_call = oprofile_cpu_notify,
41719 };
41720
41721diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
41722index 3f56bc0..707d642 100644
41723--- a/drivers/parport/procfs.c
41724+++ b/drivers/parport/procfs.c
41725@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
41726
41727 *ppos += len;
41728
41729- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
41730+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
41731 }
41732
41733 #ifdef CONFIG_PARPORT_1284
41734@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
41735
41736 *ppos += len;
41737
41738- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
41739+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
41740 }
41741 #endif /* IEEE1284.3 support. */
41742
41743diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
41744index c35e8ad..fc33beb 100644
41745--- a/drivers/pci/hotplug/acpiphp_ibm.c
41746+++ b/drivers/pci/hotplug/acpiphp_ibm.c
41747@@ -464,7 +464,9 @@ static int __init ibm_acpiphp_init(void)
41748 goto init_cleanup;
41749 }
41750
41751- ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
41752+ pax_open_kernel();
41753+ *(size_t *)&ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
41754+ pax_close_kernel();
41755 retval = sysfs_create_bin_file(sysdir, &ibm_apci_table_attr);
41756
41757 return retval;
41758diff --git a/drivers/pci/hotplug/cpcihp_generic.c b/drivers/pci/hotplug/cpcihp_generic.c
41759index a6a71c4..c91097b 100644
41760--- a/drivers/pci/hotplug/cpcihp_generic.c
41761+++ b/drivers/pci/hotplug/cpcihp_generic.c
41762@@ -73,7 +73,6 @@ static u16 port;
41763 static unsigned int enum_bit;
41764 static u8 enum_mask;
41765
41766-static struct cpci_hp_controller_ops generic_hpc_ops;
41767 static struct cpci_hp_controller generic_hpc;
41768
41769 static int __init validate_parameters(void)
41770@@ -139,6 +138,10 @@ static int query_enum(void)
41771 return ((value & enum_mask) == enum_mask);
41772 }
41773
41774+static struct cpci_hp_controller_ops generic_hpc_ops = {
41775+ .query_enum = query_enum,
41776+};
41777+
41778 static int __init cpcihp_generic_init(void)
41779 {
41780 int status;
41781@@ -165,7 +168,6 @@ static int __init cpcihp_generic_init(void)
41782 pci_dev_put(dev);
41783
41784 memset(&generic_hpc, 0, sizeof (struct cpci_hp_controller));
41785- generic_hpc_ops.query_enum = query_enum;
41786 generic_hpc.ops = &generic_hpc_ops;
41787
41788 status = cpci_hp_register_controller(&generic_hpc);
41789diff --git a/drivers/pci/hotplug/cpcihp_zt5550.c b/drivers/pci/hotplug/cpcihp_zt5550.c
41790index 449b4bb..257e2e8 100644
41791--- a/drivers/pci/hotplug/cpcihp_zt5550.c
41792+++ b/drivers/pci/hotplug/cpcihp_zt5550.c
41793@@ -59,7 +59,6 @@
41794 /* local variables */
41795 static bool debug;
41796 static bool poll;
41797-static struct cpci_hp_controller_ops zt5550_hpc_ops;
41798 static struct cpci_hp_controller zt5550_hpc;
41799
41800 /* Primary cPCI bus bridge device */
41801@@ -205,6 +204,10 @@ static int zt5550_hc_disable_irq(void)
41802 return 0;
41803 }
41804
41805+static struct cpci_hp_controller_ops zt5550_hpc_ops = {
41806+ .query_enum = zt5550_hc_query_enum,
41807+};
41808+
41809 static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
41810 {
41811 int status;
41812@@ -216,16 +219,17 @@ static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id
41813 dbg("returned from zt5550_hc_config");
41814
41815 memset(&zt5550_hpc, 0, sizeof (struct cpci_hp_controller));
41816- zt5550_hpc_ops.query_enum = zt5550_hc_query_enum;
41817 zt5550_hpc.ops = &zt5550_hpc_ops;
41818 if(!poll) {
41819 zt5550_hpc.irq = hc_dev->irq;
41820 zt5550_hpc.irq_flags = IRQF_SHARED;
41821 zt5550_hpc.dev_id = hc_dev;
41822
41823- zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
41824- zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
41825- zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
41826+ pax_open_kernel();
41827+ *(void **)&zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
41828+ *(void **)&zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
41829+ *(void **)&zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
41830+ pax_open_kernel();
41831 } else {
41832 info("using ENUM# polling mode");
41833 }
41834diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
41835index 76ba8a1..20ca857 100644
41836--- a/drivers/pci/hotplug/cpqphp_nvram.c
41837+++ b/drivers/pci/hotplug/cpqphp_nvram.c
41838@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
41839
41840 void compaq_nvram_init (void __iomem *rom_start)
41841 {
41842+
41843+#ifndef CONFIG_PAX_KERNEXEC
41844 if (rom_start) {
41845 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
41846 }
41847+#endif
41848+
41849 dbg("int15 entry = %p\n", compaq_int15_entry_point);
41850
41851 /* initialize our int15 lock */
41852diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
41853index 202f4a9..8ee47d0 100644
41854--- a/drivers/pci/hotplug/pci_hotplug_core.c
41855+++ b/drivers/pci/hotplug/pci_hotplug_core.c
41856@@ -448,8 +448,10 @@ int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus,
41857 return -EINVAL;
41858 }
41859
41860- slot->ops->owner = owner;
41861- slot->ops->mod_name = mod_name;
41862+ pax_open_kernel();
41863+ *(struct module **)&slot->ops->owner = owner;
41864+ *(const char **)&slot->ops->mod_name = mod_name;
41865+ pax_close_kernel();
41866
41867 mutex_lock(&pci_hp_mutex);
41868 /*
41869diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
41870index 939bd1d..a1459c9 100644
41871--- a/drivers/pci/hotplug/pciehp_core.c
41872+++ b/drivers/pci/hotplug/pciehp_core.c
41873@@ -91,7 +91,7 @@ static int init_slot(struct controller *ctrl)
41874 struct slot *slot = ctrl->slot;
41875 struct hotplug_slot *hotplug = NULL;
41876 struct hotplug_slot_info *info = NULL;
41877- struct hotplug_slot_ops *ops = NULL;
41878+ hotplug_slot_ops_no_const *ops = NULL;
41879 char name[SLOT_NAME_SIZE];
41880 int retval = -ENOMEM;
41881
41882diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
41883index 9c6e9bb..2916736 100644
41884--- a/drivers/pci/pci-sysfs.c
41885+++ b/drivers/pci/pci-sysfs.c
41886@@ -1071,7 +1071,7 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
41887 {
41888 /* allocate attribute structure, piggyback attribute name */
41889 int name_len = write_combine ? 13 : 10;
41890- struct bin_attribute *res_attr;
41891+ bin_attribute_no_const *res_attr;
41892 int retval;
41893
41894 res_attr = kzalloc(sizeof(*res_attr) + name_len, GFP_ATOMIC);
41895@@ -1256,7 +1256,7 @@ static struct device_attribute reset_attr = __ATTR(reset, 0200, NULL, reset_stor
41896 static int pci_create_capabilities_sysfs(struct pci_dev *dev)
41897 {
41898 int retval;
41899- struct bin_attribute *attr;
41900+ bin_attribute_no_const *attr;
41901
41902 /* If the device has VPD, try to expose it in sysfs. */
41903 if (dev->vpd) {
41904@@ -1303,7 +1303,7 @@ int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev)
41905 {
41906 int retval;
41907 int rom_size = 0;
41908- struct bin_attribute *attr;
41909+ bin_attribute_no_const *attr;
41910
41911 if (!sysfs_initialized)
41912 return -EACCES;
41913diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
41914index e851829..a1a7196 100644
41915--- a/drivers/pci/pci.h
41916+++ b/drivers/pci/pci.h
41917@@ -98,7 +98,7 @@ struct pci_vpd_ops {
41918 struct pci_vpd {
41919 unsigned int len;
41920 const struct pci_vpd_ops *ops;
41921- struct bin_attribute *attr; /* descriptor for sysfs VPD entry */
41922+ bin_attribute_no_const *attr; /* descriptor for sysfs VPD entry */
41923 };
41924
41925 extern int pci_vpd_pci22_init(struct pci_dev *dev);
41926diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
41927index 8474b6a..ee81993 100644
41928--- a/drivers/pci/pcie/aspm.c
41929+++ b/drivers/pci/pcie/aspm.c
41930@@ -27,9 +27,9 @@
41931 #define MODULE_PARAM_PREFIX "pcie_aspm."
41932
41933 /* Note: those are not register definitions */
41934-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
41935-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
41936-#define ASPM_STATE_L1 (4) /* L1 state */
41937+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
41938+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
41939+#define ASPM_STATE_L1 (4U) /* L1 state */
41940 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
41941 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
41942
41943diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
41944index 6186f03..1a78714 100644
41945--- a/drivers/pci/probe.c
41946+++ b/drivers/pci/probe.c
41947@@ -173,7 +173,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
41948 struct pci_bus_region region;
41949 bool bar_too_big = false, bar_disabled = false;
41950
41951- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
41952+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
41953
41954 /* No printks while decoding is disabled! */
41955 if (!dev->mmio_always_on) {
41956diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
41957index 9b8505c..f00870a 100644
41958--- a/drivers/pci/proc.c
41959+++ b/drivers/pci/proc.c
41960@@ -465,7 +465,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
41961 static int __init pci_proc_init(void)
41962 {
41963 struct pci_dev *dev = NULL;
41964+
41965+#ifdef CONFIG_GRKERNSEC_PROC_ADD
41966+#ifdef CONFIG_GRKERNSEC_PROC_USER
41967+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
41968+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
41969+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
41970+#endif
41971+#else
41972 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
41973+#endif
41974 proc_create("devices", 0, proc_bus_pci_dir,
41975 &proc_bus_pci_dev_operations);
41976 proc_initialized = 1;
41977diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
41978index 2111dbb..79e434b 100644
41979--- a/drivers/platform/x86/msi-laptop.c
41980+++ b/drivers/platform/x86/msi-laptop.c
41981@@ -820,12 +820,14 @@ static int __init load_scm_model_init(struct platform_device *sdev)
41982 int result;
41983
41984 /* allow userland write sysfs file */
41985- dev_attr_bluetooth.store = store_bluetooth;
41986- dev_attr_wlan.store = store_wlan;
41987- dev_attr_threeg.store = store_threeg;
41988- dev_attr_bluetooth.attr.mode |= S_IWUSR;
41989- dev_attr_wlan.attr.mode |= S_IWUSR;
41990- dev_attr_threeg.attr.mode |= S_IWUSR;
41991+ pax_open_kernel();
41992+ *(void **)&dev_attr_bluetooth.store = store_bluetooth;
41993+ *(void **)&dev_attr_wlan.store = store_wlan;
41994+ *(void **)&dev_attr_threeg.store = store_threeg;
41995+ *(umode_t *)&dev_attr_bluetooth.attr.mode |= S_IWUSR;
41996+ *(umode_t *)&dev_attr_wlan.attr.mode |= S_IWUSR;
41997+ *(umode_t *)&dev_attr_threeg.attr.mode |= S_IWUSR;
41998+ pax_close_kernel();
41999
42000 /* disable hardware control by fn key */
42001 result = ec_read(MSI_STANDARD_EC_SCM_LOAD_ADDRESS, &data);
42002diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
42003index 0fe987f..6f3d5c3 100644
42004--- a/drivers/platform/x86/sony-laptop.c
42005+++ b/drivers/platform/x86/sony-laptop.c
42006@@ -2356,7 +2356,7 @@ static void sony_nc_lid_resume_cleanup(struct platform_device *pd)
42007 }
42008
42009 /* High speed charging function */
42010-static struct device_attribute *hsc_handle;
42011+static device_attribute_no_const *hsc_handle;
42012
42013 static ssize_t sony_nc_highspeed_charging_store(struct device *dev,
42014 struct device_attribute *attr,
42015diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
42016index f946ca7..f25c833 100644
42017--- a/drivers/platform/x86/thinkpad_acpi.c
42018+++ b/drivers/platform/x86/thinkpad_acpi.c
42019@@ -2097,7 +2097,7 @@ static int hotkey_mask_get(void)
42020 return 0;
42021 }
42022
42023-void static hotkey_mask_warn_incomplete_mask(void)
42024+static void hotkey_mask_warn_incomplete_mask(void)
42025 {
42026 /* log only what the user can fix... */
42027 const u32 wantedmask = hotkey_driver_mask &
42028@@ -2328,11 +2328,6 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
42029 }
42030 }
42031
42032-static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
42033- struct tp_nvram_state *newn,
42034- const u32 event_mask)
42035-{
42036-
42037 #define TPACPI_COMPARE_KEY(__scancode, __member) \
42038 do { \
42039 if ((event_mask & (1 << __scancode)) && \
42040@@ -2346,36 +2341,42 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
42041 tpacpi_hotkey_send_key(__scancode); \
42042 } while (0)
42043
42044- void issue_volchange(const unsigned int oldvol,
42045- const unsigned int newvol)
42046- {
42047- unsigned int i = oldvol;
42048+static void issue_volchange(const unsigned int oldvol,
42049+ const unsigned int newvol,
42050+ const u32 event_mask)
42051+{
42052+ unsigned int i = oldvol;
42053
42054- while (i > newvol) {
42055- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
42056- i--;
42057- }
42058- while (i < newvol) {
42059- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
42060- i++;
42061- }
42062+ while (i > newvol) {
42063+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
42064+ i--;
42065 }
42066+ while (i < newvol) {
42067+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
42068+ i++;
42069+ }
42070+}
42071
42072- void issue_brightnesschange(const unsigned int oldbrt,
42073- const unsigned int newbrt)
42074- {
42075- unsigned int i = oldbrt;
42076+static void issue_brightnesschange(const unsigned int oldbrt,
42077+ const unsigned int newbrt,
42078+ const u32 event_mask)
42079+{
42080+ unsigned int i = oldbrt;
42081
42082- while (i > newbrt) {
42083- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
42084- i--;
42085- }
42086- while (i < newbrt) {
42087- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
42088- i++;
42089- }
42090+ while (i > newbrt) {
42091+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
42092+ i--;
42093+ }
42094+ while (i < newbrt) {
42095+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
42096+ i++;
42097 }
42098+}
42099
42100+static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
42101+ struct tp_nvram_state *newn,
42102+ const u32 event_mask)
42103+{
42104 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
42105 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
42106 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
42107@@ -2409,7 +2410,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
42108 oldn->volume_level != newn->volume_level) {
42109 /* recently muted, or repeated mute keypress, or
42110 * multiple presses ending in mute */
42111- issue_volchange(oldn->volume_level, newn->volume_level);
42112+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
42113 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
42114 }
42115 } else {
42116@@ -2419,7 +2420,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
42117 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
42118 }
42119 if (oldn->volume_level != newn->volume_level) {
42120- issue_volchange(oldn->volume_level, newn->volume_level);
42121+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
42122 } else if (oldn->volume_toggle != newn->volume_toggle) {
42123 /* repeated vol up/down keypress at end of scale ? */
42124 if (newn->volume_level == 0)
42125@@ -2432,7 +2433,8 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
42126 /* handle brightness */
42127 if (oldn->brightness_level != newn->brightness_level) {
42128 issue_brightnesschange(oldn->brightness_level,
42129- newn->brightness_level);
42130+ newn->brightness_level,
42131+ event_mask);
42132 } else if (oldn->brightness_toggle != newn->brightness_toggle) {
42133 /* repeated key presses that didn't change state */
42134 if (newn->brightness_level == 0)
42135@@ -2441,10 +2443,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
42136 && !tp_features.bright_unkfw)
42137 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
42138 }
42139+}
42140
42141 #undef TPACPI_COMPARE_KEY
42142 #undef TPACPI_MAY_SEND_KEY
42143-}
42144
42145 /*
42146 * Polling driver
42147diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
42148index 769d265..a3a05ca 100644
42149--- a/drivers/pnp/pnpbios/bioscalls.c
42150+++ b/drivers/pnp/pnpbios/bioscalls.c
42151@@ -58,7 +58,7 @@ do { \
42152 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
42153 } while(0)
42154
42155-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
42156+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
42157 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
42158
42159 /*
42160@@ -95,7 +95,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
42161
42162 cpu = get_cpu();
42163 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
42164+
42165+ pax_open_kernel();
42166 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
42167+ pax_close_kernel();
42168
42169 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
42170 spin_lock_irqsave(&pnp_bios_lock, flags);
42171@@ -133,7 +136,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
42172 :"memory");
42173 spin_unlock_irqrestore(&pnp_bios_lock, flags);
42174
42175+ pax_open_kernel();
42176 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
42177+ pax_close_kernel();
42178+
42179 put_cpu();
42180
42181 /* If we get here and this is set then the PnP BIOS faulted on us. */
42182@@ -467,7 +473,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
42183 return status;
42184 }
42185
42186-void pnpbios_calls_init(union pnp_bios_install_struct *header)
42187+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
42188 {
42189 int i;
42190
42191@@ -475,6 +481,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
42192 pnp_bios_callpoint.offset = header->fields.pm16offset;
42193 pnp_bios_callpoint.segment = PNP_CS16;
42194
42195+ pax_open_kernel();
42196+
42197 for_each_possible_cpu(i) {
42198 struct desc_struct *gdt = get_cpu_gdt_table(i);
42199 if (!gdt)
42200@@ -486,4 +494,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
42201 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
42202 (unsigned long)__va(header->fields.pm16dseg));
42203 }
42204+
42205+ pax_close_kernel();
42206 }
42207diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
42208index 3e6db1c..1fbbdae 100644
42209--- a/drivers/pnp/resource.c
42210+++ b/drivers/pnp/resource.c
42211@@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
42212 return 1;
42213
42214 /* check if the resource is valid */
42215- if (*irq < 0 || *irq > 15)
42216+ if (*irq > 15)
42217 return 0;
42218
42219 /* check if the resource is reserved */
42220@@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
42221 return 1;
42222
42223 /* check if the resource is valid */
42224- if (*dma < 0 || *dma == 4 || *dma > 7)
42225+ if (*dma == 4 || *dma > 7)
42226 return 0;
42227
42228 /* check if the resource is reserved */
42229diff --git a/drivers/power/pda_power.c b/drivers/power/pda_power.c
42230index 7df7c5f..bd48c47 100644
42231--- a/drivers/power/pda_power.c
42232+++ b/drivers/power/pda_power.c
42233@@ -37,7 +37,11 @@ static int polling;
42234
42235 #ifdef CONFIG_USB_OTG_UTILS
42236 static struct usb_phy *transceiver;
42237-static struct notifier_block otg_nb;
42238+static int otg_handle_notification(struct notifier_block *nb,
42239+ unsigned long event, void *unused);
42240+static struct notifier_block otg_nb = {
42241+ .notifier_call = otg_handle_notification
42242+};
42243 #endif
42244
42245 static struct regulator *ac_draw;
42246@@ -369,7 +373,6 @@ static int pda_power_probe(struct platform_device *pdev)
42247
42248 #ifdef CONFIG_USB_OTG_UTILS
42249 if (!IS_ERR_OR_NULL(transceiver) && pdata->use_otg_notifier) {
42250- otg_nb.notifier_call = otg_handle_notification;
42251 ret = usb_register_notifier(transceiver, &otg_nb);
42252 if (ret) {
42253 dev_err(dev, "failure to register otg notifier\n");
42254diff --git a/drivers/power/power_supply.h b/drivers/power/power_supply.h
42255index cc439fd..8fa30df 100644
42256--- a/drivers/power/power_supply.h
42257+++ b/drivers/power/power_supply.h
42258@@ -16,12 +16,12 @@ struct power_supply;
42259
42260 #ifdef CONFIG_SYSFS
42261
42262-extern void power_supply_init_attrs(struct device_type *dev_type);
42263+extern void power_supply_init_attrs(void);
42264 extern int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env);
42265
42266 #else
42267
42268-static inline void power_supply_init_attrs(struct device_type *dev_type) {}
42269+static inline void power_supply_init_attrs(void) {}
42270 #define power_supply_uevent NULL
42271
42272 #endif /* CONFIG_SYSFS */
42273diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
42274index 8a7cfb3..72e6e9b 100644
42275--- a/drivers/power/power_supply_core.c
42276+++ b/drivers/power/power_supply_core.c
42277@@ -24,7 +24,10 @@
42278 struct class *power_supply_class;
42279 EXPORT_SYMBOL_GPL(power_supply_class);
42280
42281-static struct device_type power_supply_dev_type;
42282+extern const struct attribute_group *power_supply_attr_groups[];
42283+static struct device_type power_supply_dev_type = {
42284+ .groups = power_supply_attr_groups,
42285+};
42286
42287 static int __power_supply_changed_work(struct device *dev, void *data)
42288 {
42289@@ -393,7 +396,7 @@ static int __init power_supply_class_init(void)
42290 return PTR_ERR(power_supply_class);
42291
42292 power_supply_class->dev_uevent = power_supply_uevent;
42293- power_supply_init_attrs(&power_supply_dev_type);
42294+ power_supply_init_attrs();
42295
42296 return 0;
42297 }
42298diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
42299index 40fa3b7..d9c2e0e 100644
42300--- a/drivers/power/power_supply_sysfs.c
42301+++ b/drivers/power/power_supply_sysfs.c
42302@@ -229,17 +229,15 @@ static struct attribute_group power_supply_attr_group = {
42303 .is_visible = power_supply_attr_is_visible,
42304 };
42305
42306-static const struct attribute_group *power_supply_attr_groups[] = {
42307+const struct attribute_group *power_supply_attr_groups[] = {
42308 &power_supply_attr_group,
42309 NULL,
42310 };
42311
42312-void power_supply_init_attrs(struct device_type *dev_type)
42313+void power_supply_init_attrs(void)
42314 {
42315 int i;
42316
42317- dev_type->groups = power_supply_attr_groups;
42318-
42319 for (i = 0; i < ARRAY_SIZE(power_supply_attrs); i++)
42320 __power_supply_attrs[i] = &power_supply_attrs[i].attr;
42321 }
42322diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
42323index 4d7c635..9860196 100644
42324--- a/drivers/regulator/max8660.c
42325+++ b/drivers/regulator/max8660.c
42326@@ -333,8 +333,10 @@ static int max8660_probe(struct i2c_client *client,
42327 max8660->shadow_regs[MAX8660_OVER1] = 5;
42328 } else {
42329 /* Otherwise devices can be toggled via software */
42330- max8660_dcdc_ops.enable = max8660_dcdc_enable;
42331- max8660_dcdc_ops.disable = max8660_dcdc_disable;
42332+ pax_open_kernel();
42333+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
42334+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
42335+ pax_close_kernel();
42336 }
42337
42338 /*
42339diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
42340index 9a8ea91..c483dd9 100644
42341--- a/drivers/regulator/max8973-regulator.c
42342+++ b/drivers/regulator/max8973-regulator.c
42343@@ -401,9 +401,11 @@ static int max8973_probe(struct i2c_client *client,
42344 if (!pdata->enable_ext_control) {
42345 max->desc.enable_reg = MAX8973_VOUT;
42346 max->desc.enable_mask = MAX8973_VOUT_ENABLE;
42347- max8973_dcdc_ops.enable = regulator_enable_regmap;
42348- max8973_dcdc_ops.disable = regulator_disable_regmap;
42349- max8973_dcdc_ops.is_enabled = regulator_is_enabled_regmap;
42350+ pax_open_kernel();
42351+ *(void **)&max8973_dcdc_ops.enable = regulator_enable_regmap;
42352+ *(void **)&max8973_dcdc_ops.disable = regulator_disable_regmap;
42353+ *(void **)&max8973_dcdc_ops.is_enabled = regulator_is_enabled_regmap;
42354+ pax_close_kernel();
42355 }
42356
42357 max->enable_external_control = pdata->enable_ext_control;
42358diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
42359index 0d84b1f..c2da6ac 100644
42360--- a/drivers/regulator/mc13892-regulator.c
42361+++ b/drivers/regulator/mc13892-regulator.c
42362@@ -540,10 +540,12 @@ static int mc13892_regulator_probe(struct platform_device *pdev)
42363 }
42364 mc13xxx_unlock(mc13892);
42365
42366- mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
42367+ pax_open_kernel();
42368+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
42369 = mc13892_vcam_set_mode;
42370- mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
42371+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
42372 = mc13892_vcam_get_mode;
42373+ pax_close_kernel();
42374
42375 mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
42376 ARRAY_SIZE(mc13892_regulators));
42377diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
42378index 16630aa..6afc992 100644
42379--- a/drivers/rtc/rtc-cmos.c
42380+++ b/drivers/rtc/rtc-cmos.c
42381@@ -724,7 +724,9 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
42382 hpet_rtc_timer_init();
42383
42384 /* export at least the first block of NVRAM */
42385- nvram.size = address_space - NVRAM_OFFSET;
42386+ pax_open_kernel();
42387+ *(size_t *)&nvram.size = address_space - NVRAM_OFFSET;
42388+ pax_close_kernel();
42389 retval = sysfs_create_bin_file(&dev->kobj, &nvram);
42390 if (retval < 0) {
42391 dev_dbg(dev, "can't create nvram file? %d\n", retval);
42392diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
42393index 9a86b4b..3a383dc 100644
42394--- a/drivers/rtc/rtc-dev.c
42395+++ b/drivers/rtc/rtc-dev.c
42396@@ -14,6 +14,7 @@
42397 #include <linux/module.h>
42398 #include <linux/rtc.h>
42399 #include <linux/sched.h>
42400+#include <linux/grsecurity.h>
42401 #include "rtc-core.h"
42402
42403 static dev_t rtc_devt;
42404@@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *file,
42405 if (copy_from_user(&tm, uarg, sizeof(tm)))
42406 return -EFAULT;
42407
42408+ gr_log_timechange();
42409+
42410 return rtc_set_time(rtc, &tm);
42411
42412 case RTC_PIE_ON:
42413diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
42414index e0d0ba4..3c65868 100644
42415--- a/drivers/rtc/rtc-ds1307.c
42416+++ b/drivers/rtc/rtc-ds1307.c
42417@@ -106,7 +106,7 @@ struct ds1307 {
42418 u8 offset; /* register's offset */
42419 u8 regs[11];
42420 u16 nvram_offset;
42421- struct bin_attribute *nvram;
42422+ bin_attribute_no_const *nvram;
42423 enum ds_type type;
42424 unsigned long flags;
42425 #define HAS_NVRAM 0 /* bit 0 == sysfs file active */
42426diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
42427index 130f29a..6179d03 100644
42428--- a/drivers/rtc/rtc-m48t59.c
42429+++ b/drivers/rtc/rtc-m48t59.c
42430@@ -482,7 +482,9 @@ static int m48t59_rtc_probe(struct platform_device *pdev)
42431 goto out;
42432 }
42433
42434- m48t59_nvram_attr.size = pdata->offset;
42435+ pax_open_kernel();
42436+ *(size_t *)&m48t59_nvram_attr.size = pdata->offset;
42437+ pax_close_kernel();
42438
42439 ret = sysfs_create_bin_file(&pdev->dev.kobj, &m48t59_nvram_attr);
42440 if (ret) {
42441diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
42442index e693af6..2e525b6 100644
42443--- a/drivers/scsi/bfa/bfa_fcpim.h
42444+++ b/drivers/scsi/bfa/bfa_fcpim.h
42445@@ -36,7 +36,7 @@ struct bfa_iotag_s {
42446
42447 struct bfa_itn_s {
42448 bfa_isr_func_t isr;
42449-};
42450+} __no_const;
42451
42452 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
42453 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
42454diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
42455index 23a90e7..9cf04ee 100644
42456--- a/drivers/scsi/bfa/bfa_ioc.h
42457+++ b/drivers/scsi/bfa/bfa_ioc.h
42458@@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
42459 bfa_ioc_disable_cbfn_t disable_cbfn;
42460 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
42461 bfa_ioc_reset_cbfn_t reset_cbfn;
42462-};
42463+} __no_const;
42464
42465 /*
42466 * IOC event notification mechanism.
42467@@ -346,7 +346,7 @@ struct bfa_ioc_hwif_s {
42468 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
42469 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
42470 bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc);
42471-};
42472+} __no_const;
42473
42474 /*
42475 * Queue element to wait for room in request queue. FIFO order is
42476diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
42477index 593085a..47aa999 100644
42478--- a/drivers/scsi/hosts.c
42479+++ b/drivers/scsi/hosts.c
42480@@ -42,7 +42,7 @@
42481 #include "scsi_logging.h"
42482
42483
42484-static atomic_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
42485+static atomic_unchecked_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
42486
42487
42488 static void scsi_host_cls_release(struct device *dev)
42489@@ -361,7 +361,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
42490 * subtract one because we increment first then return, but we need to
42491 * know what the next host number was before increment
42492 */
42493- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
42494+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
42495 shost->dma_channel = 0xff;
42496
42497 /* These three are default values which can be overridden */
42498diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
42499index 4f33806..afd6f60 100644
42500--- a/drivers/scsi/hpsa.c
42501+++ b/drivers/scsi/hpsa.c
42502@@ -554,7 +554,7 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
42503 unsigned long flags;
42504
42505 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
42506- return h->access.command_completed(h, q);
42507+ return h->access->command_completed(h, q);
42508
42509 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
42510 a = rq->head[rq->current_entry];
42511@@ -3374,7 +3374,7 @@ static void start_io(struct ctlr_info *h)
42512 while (!list_empty(&h->reqQ)) {
42513 c = list_entry(h->reqQ.next, struct CommandList, list);
42514 /* can't do anything if fifo is full */
42515- if ((h->access.fifo_full(h))) {
42516+ if ((h->access->fifo_full(h))) {
42517 dev_warn(&h->pdev->dev, "fifo full\n");
42518 break;
42519 }
42520@@ -3396,7 +3396,7 @@ static void start_io(struct ctlr_info *h)
42521
42522 /* Tell the controller execute command */
42523 spin_unlock_irqrestore(&h->lock, flags);
42524- h->access.submit_command(h, c);
42525+ h->access->submit_command(h, c);
42526 spin_lock_irqsave(&h->lock, flags);
42527 }
42528 spin_unlock_irqrestore(&h->lock, flags);
42529@@ -3404,17 +3404,17 @@ static void start_io(struct ctlr_info *h)
42530
42531 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
42532 {
42533- return h->access.command_completed(h, q);
42534+ return h->access->command_completed(h, q);
42535 }
42536
42537 static inline bool interrupt_pending(struct ctlr_info *h)
42538 {
42539- return h->access.intr_pending(h);
42540+ return h->access->intr_pending(h);
42541 }
42542
42543 static inline long interrupt_not_for_us(struct ctlr_info *h)
42544 {
42545- return (h->access.intr_pending(h) == 0) ||
42546+ return (h->access->intr_pending(h) == 0) ||
42547 (h->interrupts_enabled == 0);
42548 }
42549
42550@@ -4316,7 +4316,7 @@ static int hpsa_pci_init(struct ctlr_info *h)
42551 if (prod_index < 0)
42552 return -ENODEV;
42553 h->product_name = products[prod_index].product_name;
42554- h->access = *(products[prod_index].access);
42555+ h->access = products[prod_index].access;
42556
42557 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
42558 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
42559@@ -4598,7 +4598,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
42560
42561 assert_spin_locked(&lockup_detector_lock);
42562 remove_ctlr_from_lockup_detector_list(h);
42563- h->access.set_intr_mask(h, HPSA_INTR_OFF);
42564+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
42565 spin_lock_irqsave(&h->lock, flags);
42566 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
42567 spin_unlock_irqrestore(&h->lock, flags);
42568@@ -4775,7 +4775,7 @@ reinit_after_soft_reset:
42569 }
42570
42571 /* make sure the board interrupts are off */
42572- h->access.set_intr_mask(h, HPSA_INTR_OFF);
42573+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
42574
42575 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
42576 goto clean2;
42577@@ -4809,7 +4809,7 @@ reinit_after_soft_reset:
42578 * fake ones to scoop up any residual completions.
42579 */
42580 spin_lock_irqsave(&h->lock, flags);
42581- h->access.set_intr_mask(h, HPSA_INTR_OFF);
42582+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
42583 spin_unlock_irqrestore(&h->lock, flags);
42584 free_irqs(h);
42585 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
42586@@ -4828,9 +4828,9 @@ reinit_after_soft_reset:
42587 dev_info(&h->pdev->dev, "Board READY.\n");
42588 dev_info(&h->pdev->dev,
42589 "Waiting for stale completions to drain.\n");
42590- h->access.set_intr_mask(h, HPSA_INTR_ON);
42591+ h->access->set_intr_mask(h, HPSA_INTR_ON);
42592 msleep(10000);
42593- h->access.set_intr_mask(h, HPSA_INTR_OFF);
42594+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
42595
42596 rc = controller_reset_failed(h->cfgtable);
42597 if (rc)
42598@@ -4851,7 +4851,7 @@ reinit_after_soft_reset:
42599 }
42600
42601 /* Turn the interrupts on so we can service requests */
42602- h->access.set_intr_mask(h, HPSA_INTR_ON);
42603+ h->access->set_intr_mask(h, HPSA_INTR_ON);
42604
42605 hpsa_hba_inquiry(h);
42606 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
42607@@ -4903,7 +4903,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
42608 * To write all data in the battery backed cache to disks
42609 */
42610 hpsa_flush_cache(h);
42611- h->access.set_intr_mask(h, HPSA_INTR_OFF);
42612+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
42613 hpsa_free_irqs_and_disable_msix(h);
42614 }
42615
42616@@ -5071,7 +5071,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 use_short_tags)
42617 return;
42618 }
42619 /* Change the access methods to the performant access methods */
42620- h->access = SA5_performant_access;
42621+ h->access = &SA5_performant_access;
42622 h->transMethod = CFGTBL_Trans_Performant;
42623 }
42624
42625diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
42626index 9816479..c5d4e97 100644
42627--- a/drivers/scsi/hpsa.h
42628+++ b/drivers/scsi/hpsa.h
42629@@ -79,7 +79,7 @@ struct ctlr_info {
42630 unsigned int msix_vector;
42631 unsigned int msi_vector;
42632 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
42633- struct access_method access;
42634+ struct access_method *access;
42635
42636 /* queue and queue Info */
42637 struct list_head reqQ;
42638diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
42639index c772d8d..35c362c 100644
42640--- a/drivers/scsi/libfc/fc_exch.c
42641+++ b/drivers/scsi/libfc/fc_exch.c
42642@@ -100,12 +100,12 @@ struct fc_exch_mgr {
42643 u16 pool_max_index;
42644
42645 struct {
42646- atomic_t no_free_exch;
42647- atomic_t no_free_exch_xid;
42648- atomic_t xid_not_found;
42649- atomic_t xid_busy;
42650- atomic_t seq_not_found;
42651- atomic_t non_bls_resp;
42652+ atomic_unchecked_t no_free_exch;
42653+ atomic_unchecked_t no_free_exch_xid;
42654+ atomic_unchecked_t xid_not_found;
42655+ atomic_unchecked_t xid_busy;
42656+ atomic_unchecked_t seq_not_found;
42657+ atomic_unchecked_t non_bls_resp;
42658 } stats;
42659 };
42660
42661@@ -725,7 +725,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
42662 /* allocate memory for exchange */
42663 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
42664 if (!ep) {
42665- atomic_inc(&mp->stats.no_free_exch);
42666+ atomic_inc_unchecked(&mp->stats.no_free_exch);
42667 goto out;
42668 }
42669 memset(ep, 0, sizeof(*ep));
42670@@ -786,7 +786,7 @@ out:
42671 return ep;
42672 err:
42673 spin_unlock_bh(&pool->lock);
42674- atomic_inc(&mp->stats.no_free_exch_xid);
42675+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
42676 mempool_free(ep, mp->ep_pool);
42677 return NULL;
42678 }
42679@@ -929,7 +929,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
42680 xid = ntohs(fh->fh_ox_id); /* we originated exch */
42681 ep = fc_exch_find(mp, xid);
42682 if (!ep) {
42683- atomic_inc(&mp->stats.xid_not_found);
42684+ atomic_inc_unchecked(&mp->stats.xid_not_found);
42685 reject = FC_RJT_OX_ID;
42686 goto out;
42687 }
42688@@ -959,7 +959,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
42689 ep = fc_exch_find(mp, xid);
42690 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
42691 if (ep) {
42692- atomic_inc(&mp->stats.xid_busy);
42693+ atomic_inc_unchecked(&mp->stats.xid_busy);
42694 reject = FC_RJT_RX_ID;
42695 goto rel;
42696 }
42697@@ -970,7 +970,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
42698 }
42699 xid = ep->xid; /* get our XID */
42700 } else if (!ep) {
42701- atomic_inc(&mp->stats.xid_not_found);
42702+ atomic_inc_unchecked(&mp->stats.xid_not_found);
42703 reject = FC_RJT_RX_ID; /* XID not found */
42704 goto out;
42705 }
42706@@ -987,7 +987,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
42707 } else {
42708 sp = &ep->seq;
42709 if (sp->id != fh->fh_seq_id) {
42710- atomic_inc(&mp->stats.seq_not_found);
42711+ atomic_inc_unchecked(&mp->stats.seq_not_found);
42712 if (f_ctl & FC_FC_END_SEQ) {
42713 /*
42714 * Update sequence_id based on incoming last
42715@@ -1437,22 +1437,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
42716
42717 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
42718 if (!ep) {
42719- atomic_inc(&mp->stats.xid_not_found);
42720+ atomic_inc_unchecked(&mp->stats.xid_not_found);
42721 goto out;
42722 }
42723 if (ep->esb_stat & ESB_ST_COMPLETE) {
42724- atomic_inc(&mp->stats.xid_not_found);
42725+ atomic_inc_unchecked(&mp->stats.xid_not_found);
42726 goto rel;
42727 }
42728 if (ep->rxid == FC_XID_UNKNOWN)
42729 ep->rxid = ntohs(fh->fh_rx_id);
42730 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
42731- atomic_inc(&mp->stats.xid_not_found);
42732+ atomic_inc_unchecked(&mp->stats.xid_not_found);
42733 goto rel;
42734 }
42735 if (ep->did != ntoh24(fh->fh_s_id) &&
42736 ep->did != FC_FID_FLOGI) {
42737- atomic_inc(&mp->stats.xid_not_found);
42738+ atomic_inc_unchecked(&mp->stats.xid_not_found);
42739 goto rel;
42740 }
42741 sof = fr_sof(fp);
42742@@ -1461,7 +1461,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
42743 sp->ssb_stat |= SSB_ST_RESP;
42744 sp->id = fh->fh_seq_id;
42745 } else if (sp->id != fh->fh_seq_id) {
42746- atomic_inc(&mp->stats.seq_not_found);
42747+ atomic_inc_unchecked(&mp->stats.seq_not_found);
42748 goto rel;
42749 }
42750
42751@@ -1525,9 +1525,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
42752 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
42753
42754 if (!sp)
42755- atomic_inc(&mp->stats.xid_not_found);
42756+ atomic_inc_unchecked(&mp->stats.xid_not_found);
42757 else
42758- atomic_inc(&mp->stats.non_bls_resp);
42759+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
42760
42761 fc_frame_free(fp);
42762 }
42763@@ -2174,13 +2174,13 @@ void fc_exch_update_stats(struct fc_lport *lport)
42764
42765 list_for_each_entry(ema, &lport->ema_list, ema_list) {
42766 mp = ema->mp;
42767- st->fc_no_free_exch += atomic_read(&mp->stats.no_free_exch);
42768+ st->fc_no_free_exch += atomic_read_unchecked(&mp->stats.no_free_exch);
42769 st->fc_no_free_exch_xid +=
42770- atomic_read(&mp->stats.no_free_exch_xid);
42771- st->fc_xid_not_found += atomic_read(&mp->stats.xid_not_found);
42772- st->fc_xid_busy += atomic_read(&mp->stats.xid_busy);
42773- st->fc_seq_not_found += atomic_read(&mp->stats.seq_not_found);
42774- st->fc_non_bls_resp += atomic_read(&mp->stats.non_bls_resp);
42775+ atomic_read_unchecked(&mp->stats.no_free_exch_xid);
42776+ st->fc_xid_not_found += atomic_read_unchecked(&mp->stats.xid_not_found);
42777+ st->fc_xid_busy += atomic_read_unchecked(&mp->stats.xid_busy);
42778+ st->fc_seq_not_found += atomic_read_unchecked(&mp->stats.seq_not_found);
42779+ st->fc_non_bls_resp += atomic_read_unchecked(&mp->stats.non_bls_resp);
42780 }
42781 }
42782 EXPORT_SYMBOL(fc_exch_update_stats);
42783diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
42784index bdb81cd..d3c7c2c 100644
42785--- a/drivers/scsi/libsas/sas_ata.c
42786+++ b/drivers/scsi/libsas/sas_ata.c
42787@@ -554,7 +554,7 @@ static struct ata_port_operations sas_sata_ops = {
42788 .postreset = ata_std_postreset,
42789 .error_handler = ata_std_error_handler,
42790 .post_internal_cmd = sas_ata_post_internal,
42791- .qc_defer = ata_std_qc_defer,
42792+ .qc_defer = ata_std_qc_defer,
42793 .qc_prep = ata_noop_qc_prep,
42794 .qc_issue = sas_ata_qc_issue,
42795 .qc_fill_rtf = sas_ata_qc_fill_rtf,
42796diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
42797index df4c13a..a51e90c 100644
42798--- a/drivers/scsi/lpfc/lpfc.h
42799+++ b/drivers/scsi/lpfc/lpfc.h
42800@@ -424,7 +424,7 @@ struct lpfc_vport {
42801 struct dentry *debug_nodelist;
42802 struct dentry *vport_debugfs_root;
42803 struct lpfc_debugfs_trc *disc_trc;
42804- atomic_t disc_trc_cnt;
42805+ atomic_unchecked_t disc_trc_cnt;
42806 #endif
42807 uint8_t stat_data_enabled;
42808 uint8_t stat_data_blocked;
42809@@ -842,8 +842,8 @@ struct lpfc_hba {
42810 struct timer_list fabric_block_timer;
42811 unsigned long bit_flags;
42812 #define FABRIC_COMANDS_BLOCKED 0
42813- atomic_t num_rsrc_err;
42814- atomic_t num_cmd_success;
42815+ atomic_unchecked_t num_rsrc_err;
42816+ atomic_unchecked_t num_cmd_success;
42817 unsigned long last_rsrc_error_time;
42818 unsigned long last_ramp_down_time;
42819 unsigned long last_ramp_up_time;
42820@@ -879,7 +879,7 @@ struct lpfc_hba {
42821
42822 struct dentry *debug_slow_ring_trc;
42823 struct lpfc_debugfs_trc *slow_ring_trc;
42824- atomic_t slow_ring_trc_cnt;
42825+ atomic_unchecked_t slow_ring_trc_cnt;
42826 /* iDiag debugfs sub-directory */
42827 struct dentry *idiag_root;
42828 struct dentry *idiag_pci_cfg;
42829diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
42830index f63f5ff..de29189 100644
42831--- a/drivers/scsi/lpfc/lpfc_debugfs.c
42832+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
42833@@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
42834
42835 #include <linux/debugfs.h>
42836
42837-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
42838+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
42839 static unsigned long lpfc_debugfs_start_time = 0L;
42840
42841 /* iDiag */
42842@@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
42843 lpfc_debugfs_enable = 0;
42844
42845 len = 0;
42846- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
42847+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
42848 (lpfc_debugfs_max_disc_trc - 1);
42849 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
42850 dtp = vport->disc_trc + i;
42851@@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
42852 lpfc_debugfs_enable = 0;
42853
42854 len = 0;
42855- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
42856+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
42857 (lpfc_debugfs_max_slow_ring_trc - 1);
42858 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
42859 dtp = phba->slow_ring_trc + i;
42860@@ -646,14 +646,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
42861 !vport || !vport->disc_trc)
42862 return;
42863
42864- index = atomic_inc_return(&vport->disc_trc_cnt) &
42865+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
42866 (lpfc_debugfs_max_disc_trc - 1);
42867 dtp = vport->disc_trc + index;
42868 dtp->fmt = fmt;
42869 dtp->data1 = data1;
42870 dtp->data2 = data2;
42871 dtp->data3 = data3;
42872- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
42873+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
42874 dtp->jif = jiffies;
42875 #endif
42876 return;
42877@@ -684,14 +684,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
42878 !phba || !phba->slow_ring_trc)
42879 return;
42880
42881- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
42882+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
42883 (lpfc_debugfs_max_slow_ring_trc - 1);
42884 dtp = phba->slow_ring_trc + index;
42885 dtp->fmt = fmt;
42886 dtp->data1 = data1;
42887 dtp->data2 = data2;
42888 dtp->data3 = data3;
42889- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
42890+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
42891 dtp->jif = jiffies;
42892 #endif
42893 return;
42894@@ -4182,7 +4182,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
42895 "slow_ring buffer\n");
42896 goto debug_failed;
42897 }
42898- atomic_set(&phba->slow_ring_trc_cnt, 0);
42899+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
42900 memset(phba->slow_ring_trc, 0,
42901 (sizeof(struct lpfc_debugfs_trc) *
42902 lpfc_debugfs_max_slow_ring_trc));
42903@@ -4228,7 +4228,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
42904 "buffer\n");
42905 goto debug_failed;
42906 }
42907- atomic_set(&vport->disc_trc_cnt, 0);
42908+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
42909
42910 snprintf(name, sizeof(name), "discovery_trace");
42911 vport->debug_disc_trc =
42912diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
42913index 89ad558..76956c4 100644
42914--- a/drivers/scsi/lpfc/lpfc_init.c
42915+++ b/drivers/scsi/lpfc/lpfc_init.c
42916@@ -10618,8 +10618,10 @@ lpfc_init(void)
42917 "misc_register returned with status %d", error);
42918
42919 if (lpfc_enable_npiv) {
42920- lpfc_transport_functions.vport_create = lpfc_vport_create;
42921- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
42922+ pax_open_kernel();
42923+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
42924+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
42925+ pax_close_kernel();
42926 }
42927 lpfc_transport_template =
42928 fc_attach_transport(&lpfc_transport_functions);
42929diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
42930index 60e5a17..ff7a793 100644
42931--- a/drivers/scsi/lpfc/lpfc_scsi.c
42932+++ b/drivers/scsi/lpfc/lpfc_scsi.c
42933@@ -305,7 +305,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
42934 uint32_t evt_posted;
42935
42936 spin_lock_irqsave(&phba->hbalock, flags);
42937- atomic_inc(&phba->num_rsrc_err);
42938+ atomic_inc_unchecked(&phba->num_rsrc_err);
42939 phba->last_rsrc_error_time = jiffies;
42940
42941 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
42942@@ -346,7 +346,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
42943 unsigned long flags;
42944 struct lpfc_hba *phba = vport->phba;
42945 uint32_t evt_posted;
42946- atomic_inc(&phba->num_cmd_success);
42947+ atomic_inc_unchecked(&phba->num_cmd_success);
42948
42949 if (vport->cfg_lun_queue_depth <= queue_depth)
42950 return;
42951@@ -390,8 +390,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
42952 unsigned long num_rsrc_err, num_cmd_success;
42953 int i;
42954
42955- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
42956- num_cmd_success = atomic_read(&phba->num_cmd_success);
42957+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
42958+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
42959
42960 /*
42961 * The error and success command counters are global per
42962@@ -419,8 +419,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
42963 }
42964 }
42965 lpfc_destroy_vport_work_array(phba, vports);
42966- atomic_set(&phba->num_rsrc_err, 0);
42967- atomic_set(&phba->num_cmd_success, 0);
42968+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
42969+ atomic_set_unchecked(&phba->num_cmd_success, 0);
42970 }
42971
42972 /**
42973@@ -454,8 +454,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
42974 }
42975 }
42976 lpfc_destroy_vport_work_array(phba, vports);
42977- atomic_set(&phba->num_rsrc_err, 0);
42978- atomic_set(&phba->num_cmd_success, 0);
42979+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
42980+ atomic_set_unchecked(&phba->num_cmd_success, 0);
42981 }
42982
42983 /**
42984diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
42985index b46f5e9..c4c4ccb 100644
42986--- a/drivers/scsi/pmcraid.c
42987+++ b/drivers/scsi/pmcraid.c
42988@@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
42989 res->scsi_dev = scsi_dev;
42990 scsi_dev->hostdata = res;
42991 res->change_detected = 0;
42992- atomic_set(&res->read_failures, 0);
42993- atomic_set(&res->write_failures, 0);
42994+ atomic_set_unchecked(&res->read_failures, 0);
42995+ atomic_set_unchecked(&res->write_failures, 0);
42996 rc = 0;
42997 }
42998 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
42999@@ -2676,9 +2676,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
43000
43001 /* If this was a SCSI read/write command keep count of errors */
43002 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
43003- atomic_inc(&res->read_failures);
43004+ atomic_inc_unchecked(&res->read_failures);
43005 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
43006- atomic_inc(&res->write_failures);
43007+ atomic_inc_unchecked(&res->write_failures);
43008
43009 if (!RES_IS_GSCSI(res->cfg_entry) &&
43010 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
43011@@ -3534,7 +3534,7 @@ static int pmcraid_queuecommand_lck(
43012 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
43013 * hrrq_id assigned here in queuecommand
43014 */
43015- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
43016+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
43017 pinstance->num_hrrq;
43018 cmd->cmd_done = pmcraid_io_done;
43019
43020@@ -3859,7 +3859,7 @@ static long pmcraid_ioctl_passthrough(
43021 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
43022 * hrrq_id assigned here in queuecommand
43023 */
43024- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
43025+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
43026 pinstance->num_hrrq;
43027
43028 if (request_size) {
43029@@ -4497,7 +4497,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
43030
43031 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
43032 /* add resources only after host is added into system */
43033- if (!atomic_read(&pinstance->expose_resources))
43034+ if (!atomic_read_unchecked(&pinstance->expose_resources))
43035 return;
43036
43037 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
43038@@ -5324,8 +5324,8 @@ static int pmcraid_init_instance(struct pci_dev *pdev, struct Scsi_Host *host,
43039 init_waitqueue_head(&pinstance->reset_wait_q);
43040
43041 atomic_set(&pinstance->outstanding_cmds, 0);
43042- atomic_set(&pinstance->last_message_id, 0);
43043- atomic_set(&pinstance->expose_resources, 0);
43044+ atomic_set_unchecked(&pinstance->last_message_id, 0);
43045+ atomic_set_unchecked(&pinstance->expose_resources, 0);
43046
43047 INIT_LIST_HEAD(&pinstance->free_res_q);
43048 INIT_LIST_HEAD(&pinstance->used_res_q);
43049@@ -6038,7 +6038,7 @@ static int pmcraid_probe(struct pci_dev *pdev,
43050 /* Schedule worker thread to handle CCN and take care of adding and
43051 * removing devices to OS
43052 */
43053- atomic_set(&pinstance->expose_resources, 1);
43054+ atomic_set_unchecked(&pinstance->expose_resources, 1);
43055 schedule_work(&pinstance->worker_q);
43056 return rc;
43057
43058diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
43059index e1d150f..6c6df44 100644
43060--- a/drivers/scsi/pmcraid.h
43061+++ b/drivers/scsi/pmcraid.h
43062@@ -748,7 +748,7 @@ struct pmcraid_instance {
43063 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
43064
43065 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
43066- atomic_t last_message_id;
43067+ atomic_unchecked_t last_message_id;
43068
43069 /* configuration table */
43070 struct pmcraid_config_table *cfg_table;
43071@@ -777,7 +777,7 @@ struct pmcraid_instance {
43072 atomic_t outstanding_cmds;
43073
43074 /* should add/delete resources to mid-layer now ?*/
43075- atomic_t expose_resources;
43076+ atomic_unchecked_t expose_resources;
43077
43078
43079
43080@@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
43081 struct pmcraid_config_table_entry_ext cfg_entry_ext;
43082 };
43083 struct scsi_device *scsi_dev; /* Link scsi_device structure */
43084- atomic_t read_failures; /* count of failed READ commands */
43085- atomic_t write_failures; /* count of failed WRITE commands */
43086+ atomic_unchecked_t read_failures; /* count of failed READ commands */
43087+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
43088
43089 /* To indicate add/delete/modify during CCN */
43090 u8 change_detected;
43091diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
43092index 83d7984..a27d947 100644
43093--- a/drivers/scsi/qla2xxx/qla_attr.c
43094+++ b/drivers/scsi/qla2xxx/qla_attr.c
43095@@ -1969,7 +1969,7 @@ qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
43096 return 0;
43097 }
43098
43099-struct fc_function_template qla2xxx_transport_functions = {
43100+fc_function_template_no_const qla2xxx_transport_functions = {
43101
43102 .show_host_node_name = 1,
43103 .show_host_port_name = 1,
43104@@ -2016,7 +2016,7 @@ struct fc_function_template qla2xxx_transport_functions = {
43105 .bsg_timeout = qla24xx_bsg_timeout,
43106 };
43107
43108-struct fc_function_template qla2xxx_transport_vport_functions = {
43109+fc_function_template_no_const qla2xxx_transport_vport_functions = {
43110
43111 .show_host_node_name = 1,
43112 .show_host_port_name = 1,
43113diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
43114index 2411d1a..4673766 100644
43115--- a/drivers/scsi/qla2xxx/qla_gbl.h
43116+++ b/drivers/scsi/qla2xxx/qla_gbl.h
43117@@ -515,8 +515,8 @@ extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *);
43118 struct device_attribute;
43119 extern struct device_attribute *qla2x00_host_attrs[];
43120 struct fc_function_template;
43121-extern struct fc_function_template qla2xxx_transport_functions;
43122-extern struct fc_function_template qla2xxx_transport_vport_functions;
43123+extern fc_function_template_no_const qla2xxx_transport_functions;
43124+extern fc_function_template_no_const qla2xxx_transport_vport_functions;
43125 extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
43126 extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *);
43127 extern void qla2x00_init_host_attr(scsi_qla_host_t *);
43128diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
43129index 10d23f8..a7d5d4c 100644
43130--- a/drivers/scsi/qla2xxx/qla_os.c
43131+++ b/drivers/scsi/qla2xxx/qla_os.c
43132@@ -1472,8 +1472,10 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha)
43133 !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
43134 /* Ok, a 64bit DMA mask is applicable. */
43135 ha->flags.enable_64bit_addressing = 1;
43136- ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
43137- ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
43138+ pax_open_kernel();
43139+ *(void **)&ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
43140+ *(void **)&ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
43141+ pax_close_kernel();
43142 return;
43143 }
43144 }
43145diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
43146index 329d553..f20d31d 100644
43147--- a/drivers/scsi/qla4xxx/ql4_def.h
43148+++ b/drivers/scsi/qla4xxx/ql4_def.h
43149@@ -273,7 +273,7 @@ struct ddb_entry {
43150 * (4000 only) */
43151 atomic_t relogin_timer; /* Max Time to wait for
43152 * relogin to complete */
43153- atomic_t relogin_retry_count; /* Num of times relogin has been
43154+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
43155 * retried */
43156 uint32_t default_time2wait; /* Default Min time between
43157 * relogins (+aens) */
43158diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
43159index 4cec123..7c1329f 100644
43160--- a/drivers/scsi/qla4xxx/ql4_os.c
43161+++ b/drivers/scsi/qla4xxx/ql4_os.c
43162@@ -2621,12 +2621,12 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
43163 */
43164 if (!iscsi_is_session_online(cls_sess)) {
43165 /* Reset retry relogin timer */
43166- atomic_inc(&ddb_entry->relogin_retry_count);
43167+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
43168 DEBUG2(ql4_printk(KERN_INFO, ha,
43169 "%s: index[%d] relogin timed out-retrying"
43170 " relogin (%d), retry (%d)\n", __func__,
43171 ddb_entry->fw_ddb_index,
43172- atomic_read(&ddb_entry->relogin_retry_count),
43173+ atomic_read_unchecked(&ddb_entry->relogin_retry_count),
43174 ddb_entry->default_time2wait + 4));
43175 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
43176 atomic_set(&ddb_entry->retry_relogin_timer,
43177@@ -4738,7 +4738,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
43178
43179 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
43180 atomic_set(&ddb_entry->relogin_timer, 0);
43181- atomic_set(&ddb_entry->relogin_retry_count, 0);
43182+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
43183 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
43184 ddb_entry->default_relogin_timeout =
43185 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
43186diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
43187index 2c0d0ec..4e8681a 100644
43188--- a/drivers/scsi/scsi.c
43189+++ b/drivers/scsi/scsi.c
43190@@ -661,7 +661,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
43191 unsigned long timeout;
43192 int rtn = 0;
43193
43194- atomic_inc(&cmd->device->iorequest_cnt);
43195+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
43196
43197 /* check if the device is still usable */
43198 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
43199diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
43200index f1bf5af..f67e943 100644
43201--- a/drivers/scsi/scsi_lib.c
43202+++ b/drivers/scsi/scsi_lib.c
43203@@ -1454,7 +1454,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
43204 shost = sdev->host;
43205 scsi_init_cmd_errh(cmd);
43206 cmd->result = DID_NO_CONNECT << 16;
43207- atomic_inc(&cmd->device->iorequest_cnt);
43208+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
43209
43210 /*
43211 * SCSI request completion path will do scsi_device_unbusy(),
43212@@ -1480,9 +1480,9 @@ static void scsi_softirq_done(struct request *rq)
43213
43214 INIT_LIST_HEAD(&cmd->eh_entry);
43215
43216- atomic_inc(&cmd->device->iodone_cnt);
43217+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
43218 if (cmd->result)
43219- atomic_inc(&cmd->device->ioerr_cnt);
43220+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
43221
43222 disposition = scsi_decide_disposition(cmd);
43223 if (disposition != SUCCESS &&
43224diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
43225index 931a7d9..0c2a754 100644
43226--- a/drivers/scsi/scsi_sysfs.c
43227+++ b/drivers/scsi/scsi_sysfs.c
43228@@ -658,7 +658,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
43229 char *buf) \
43230 { \
43231 struct scsi_device *sdev = to_scsi_device(dev); \
43232- unsigned long long count = atomic_read(&sdev->field); \
43233+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
43234 return snprintf(buf, 20, "0x%llx\n", count); \
43235 } \
43236 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
43237diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
43238index 84a1fdf..693b0d6 100644
43239--- a/drivers/scsi/scsi_tgt_lib.c
43240+++ b/drivers/scsi/scsi_tgt_lib.c
43241@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
43242 int err;
43243
43244 dprintk("%lx %u\n", uaddr, len);
43245- err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
43246+ err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
43247 if (err) {
43248 /*
43249 * TODO: need to fixup sg_tablesize, max_segment_size,
43250diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
43251index e894ca7..de9d7660 100644
43252--- a/drivers/scsi/scsi_transport_fc.c
43253+++ b/drivers/scsi/scsi_transport_fc.c
43254@@ -498,7 +498,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
43255 * Netlink Infrastructure
43256 */
43257
43258-static atomic_t fc_event_seq;
43259+static atomic_unchecked_t fc_event_seq;
43260
43261 /**
43262 * fc_get_event_number - Obtain the next sequential FC event number
43263@@ -511,7 +511,7 @@ static atomic_t fc_event_seq;
43264 u32
43265 fc_get_event_number(void)
43266 {
43267- return atomic_add_return(1, &fc_event_seq);
43268+ return atomic_add_return_unchecked(1, &fc_event_seq);
43269 }
43270 EXPORT_SYMBOL(fc_get_event_number);
43271
43272@@ -659,7 +659,7 @@ static __init int fc_transport_init(void)
43273 {
43274 int error;
43275
43276- atomic_set(&fc_event_seq, 0);
43277+ atomic_set_unchecked(&fc_event_seq, 0);
43278
43279 error = transport_class_register(&fc_host_class);
43280 if (error)
43281@@ -849,7 +849,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
43282 char *cp;
43283
43284 *val = simple_strtoul(buf, &cp, 0);
43285- if ((*cp && (*cp != '\n')) || (*val < 0))
43286+ if (*cp && (*cp != '\n'))
43287 return -EINVAL;
43288 /*
43289 * Check for overflow; dev_loss_tmo is u32
43290diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
43291index 31969f2..2b348f0 100644
43292--- a/drivers/scsi/scsi_transport_iscsi.c
43293+++ b/drivers/scsi/scsi_transport_iscsi.c
43294@@ -79,7 +79,7 @@ struct iscsi_internal {
43295 struct transport_container session_cont;
43296 };
43297
43298-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
43299+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
43300 static struct workqueue_struct *iscsi_eh_timer_workq;
43301
43302 static DEFINE_IDA(iscsi_sess_ida);
43303@@ -1064,7 +1064,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
43304 int err;
43305
43306 ihost = shost->shost_data;
43307- session->sid = atomic_add_return(1, &iscsi_session_nr);
43308+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
43309
43310 if (target_id == ISCSI_MAX_TARGET) {
43311 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
43312@@ -2943,7 +2943,7 @@ static __init int iscsi_transport_init(void)
43313 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
43314 ISCSI_TRANSPORT_VERSION);
43315
43316- atomic_set(&iscsi_session_nr, 0);
43317+ atomic_set_unchecked(&iscsi_session_nr, 0);
43318
43319 err = class_register(&iscsi_transport_class);
43320 if (err)
43321diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
43322index f379c7f..e8fc69c 100644
43323--- a/drivers/scsi/scsi_transport_srp.c
43324+++ b/drivers/scsi/scsi_transport_srp.c
43325@@ -33,7 +33,7 @@
43326 #include "scsi_transport_srp_internal.h"
43327
43328 struct srp_host_attrs {
43329- atomic_t next_port_id;
43330+ atomic_unchecked_t next_port_id;
43331 };
43332 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
43333
43334@@ -61,7 +61,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
43335 struct Scsi_Host *shost = dev_to_shost(dev);
43336 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
43337
43338- atomic_set(&srp_host->next_port_id, 0);
43339+ atomic_set_unchecked(&srp_host->next_port_id, 0);
43340 return 0;
43341 }
43342
43343@@ -210,7 +210,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
43344 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
43345 rport->roles = ids->roles;
43346
43347- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
43348+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
43349 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
43350
43351 transport_setup_device(&rport->dev);
43352diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
43353index 7992635..609faf8 100644
43354--- a/drivers/scsi/sd.c
43355+++ b/drivers/scsi/sd.c
43356@@ -2909,7 +2909,7 @@ static int sd_probe(struct device *dev)
43357 sdkp->disk = gd;
43358 sdkp->index = index;
43359 atomic_set(&sdkp->openers, 0);
43360- atomic_set(&sdkp->device->ioerr_cnt, 0);
43361+ atomic_set_unchecked(&sdkp->device->ioerr_cnt, 0);
43362
43363 if (!sdp->request_queue->rq_timeout) {
43364 if (sdp->type != TYPE_MOD)
43365diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
43366index be2c9a6..275525c 100644
43367--- a/drivers/scsi/sg.c
43368+++ b/drivers/scsi/sg.c
43369@@ -1101,7 +1101,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
43370 sdp->disk->disk_name,
43371 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
43372 NULL,
43373- (char *)arg);
43374+ (char __user *)arg);
43375 case BLKTRACESTART:
43376 return blk_trace_startstop(sdp->device->request_queue, 1);
43377 case BLKTRACESTOP:
43378diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
43379index 19ee901..6e8c2ef 100644
43380--- a/drivers/spi/spi.c
43381+++ b/drivers/spi/spi.c
43382@@ -1616,7 +1616,7 @@ int spi_bus_unlock(struct spi_master *master)
43383 EXPORT_SYMBOL_GPL(spi_bus_unlock);
43384
43385 /* portable code must never pass more than 32 bytes */
43386-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
43387+#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
43388
43389 static u8 *buf;
43390
43391diff --git a/drivers/staging/iio/iio_hwmon.c b/drivers/staging/iio/iio_hwmon.c
43392index c7a5f97..71ecd35 100644
43393--- a/drivers/staging/iio/iio_hwmon.c
43394+++ b/drivers/staging/iio/iio_hwmon.c
43395@@ -72,7 +72,7 @@ static void iio_hwmon_free_attrs(struct iio_hwmon_state *st)
43396 static int iio_hwmon_probe(struct platform_device *pdev)
43397 {
43398 struct iio_hwmon_state *st;
43399- struct sensor_device_attribute *a;
43400+ sensor_device_attribute_no_const *a;
43401 int ret, i;
43402 int in_i = 1, temp_i = 1, curr_i = 1;
43403 enum iio_chan_type type;
43404diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
43405index 34afc16..ffe44dd 100644
43406--- a/drivers/staging/octeon/ethernet-rx.c
43407+++ b/drivers/staging/octeon/ethernet-rx.c
43408@@ -421,11 +421,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
43409 /* Increment RX stats for virtual ports */
43410 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
43411 #ifdef CONFIG_64BIT
43412- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
43413- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
43414+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
43415+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
43416 #else
43417- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
43418- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
43419+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
43420+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
43421 #endif
43422 }
43423 netif_receive_skb(skb);
43424@@ -437,9 +437,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
43425 dev->name);
43426 */
43427 #ifdef CONFIG_64BIT
43428- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
43429+ atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
43430 #else
43431- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
43432+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
43433 #endif
43434 dev_kfree_skb_irq(skb);
43435 }
43436diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
43437index ef32dc1..a159d68 100644
43438--- a/drivers/staging/octeon/ethernet.c
43439+++ b/drivers/staging/octeon/ethernet.c
43440@@ -252,11 +252,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
43441 * since the RX tasklet also increments it.
43442 */
43443 #ifdef CONFIG_64BIT
43444- atomic64_add(rx_status.dropped_packets,
43445- (atomic64_t *)&priv->stats.rx_dropped);
43446+ atomic64_add_unchecked(rx_status.dropped_packets,
43447+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
43448 #else
43449- atomic_add(rx_status.dropped_packets,
43450- (atomic_t *)&priv->stats.rx_dropped);
43451+ atomic_add_unchecked(rx_status.dropped_packets,
43452+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
43453 #endif
43454 }
43455
43456diff --git a/drivers/staging/ramster/tmem.c b/drivers/staging/ramster/tmem.c
43457index a2b7e03..aaf3630 100644
43458--- a/drivers/staging/ramster/tmem.c
43459+++ b/drivers/staging/ramster/tmem.c
43460@@ -50,25 +50,25 @@
43461 * A tmem host implementation must use this function to register callbacks
43462 * for memory allocation.
43463 */
43464-static struct tmem_hostops tmem_hostops;
43465+static struct tmem_hostops *tmem_hostops;
43466
43467 static void tmem_objnode_tree_init(void);
43468
43469 void tmem_register_hostops(struct tmem_hostops *m)
43470 {
43471 tmem_objnode_tree_init();
43472- tmem_hostops = *m;
43473+ tmem_hostops = m;
43474 }
43475
43476 /*
43477 * A tmem host implementation must use this function to register
43478 * callbacks for a page-accessible memory (PAM) implementation.
43479 */
43480-static struct tmem_pamops tmem_pamops;
43481+static struct tmem_pamops *tmem_pamops;
43482
43483 void tmem_register_pamops(struct tmem_pamops *m)
43484 {
43485- tmem_pamops = *m;
43486+ tmem_pamops = m;
43487 }
43488
43489 /*
43490@@ -174,7 +174,7 @@ static void tmem_obj_init(struct tmem_obj *obj, struct tmem_hashbucket *hb,
43491 obj->pampd_count = 0;
43492 #ifdef CONFIG_RAMSTER
43493 if (tmem_pamops.new_obj != NULL)
43494- (*tmem_pamops.new_obj)(obj);
43495+ (tmem_pamops->new_obj)(obj);
43496 #endif
43497 SET_SENTINEL(obj, OBJ);
43498
43499@@ -210,7 +210,7 @@ static void tmem_pool_flush(struct tmem_pool *pool, bool destroy)
43500 rbnode = rb_next(rbnode);
43501 tmem_pampd_destroy_all_in_obj(obj, true);
43502 tmem_obj_free(obj, hb);
43503- (*tmem_hostops.obj_free)(obj, pool);
43504+ (tmem_hostops->obj_free)(obj, pool);
43505 }
43506 spin_unlock(&hb->lock);
43507 }
43508@@ -261,7 +261,7 @@ static struct tmem_objnode *tmem_objnode_alloc(struct tmem_obj *obj)
43509 ASSERT_SENTINEL(obj, OBJ);
43510 BUG_ON(obj->pool == NULL);
43511 ASSERT_SENTINEL(obj->pool, POOL);
43512- objnode = (*tmem_hostops.objnode_alloc)(obj->pool);
43513+ objnode = (tmem_hostops->objnode_alloc)(obj->pool);
43514 if (unlikely(objnode == NULL))
43515 goto out;
43516 objnode->obj = obj;
43517@@ -290,7 +290,7 @@ static void tmem_objnode_free(struct tmem_objnode *objnode)
43518 ASSERT_SENTINEL(pool, POOL);
43519 objnode->obj->objnode_count--;
43520 objnode->obj = NULL;
43521- (*tmem_hostops.objnode_free)(objnode, pool);
43522+ (tmem_hostops->objnode_free)(objnode, pool);
43523 }
43524
43525 /*
43526@@ -348,7 +348,7 @@ static void *tmem_pampd_replace_in_obj(struct tmem_obj *obj, uint32_t index,
43527 void *old_pampd = *(void **)slot;
43528 *(void **)slot = new_pampd;
43529 if (!no_free)
43530- (*tmem_pamops.free)(old_pampd, obj->pool,
43531+ (tmem_pamops->free)(old_pampd, obj->pool,
43532 NULL, 0, false);
43533 ret = new_pampd;
43534 }
43535@@ -505,7 +505,7 @@ static void tmem_objnode_node_destroy(struct tmem_obj *obj,
43536 if (objnode->slots[i]) {
43537 if (ht == 1) {
43538 obj->pampd_count--;
43539- (*tmem_pamops.free)(objnode->slots[i],
43540+ (tmem_pamops->free)(objnode->slots[i],
43541 obj->pool, NULL, 0, true);
43542 objnode->slots[i] = NULL;
43543 continue;
43544@@ -524,7 +524,7 @@ static void tmem_pampd_destroy_all_in_obj(struct tmem_obj *obj,
43545 return;
43546 if (obj->objnode_tree_height == 0) {
43547 obj->pampd_count--;
43548- (*tmem_pamops.free)(obj->objnode_tree_root,
43549+ (tmem_pamops->free)(obj->objnode_tree_root,
43550 obj->pool, NULL, 0, true);
43551 } else {
43552 tmem_objnode_node_destroy(obj, obj->objnode_tree_root,
43553@@ -535,7 +535,7 @@ static void tmem_pampd_destroy_all_in_obj(struct tmem_obj *obj,
43554 obj->objnode_tree_root = NULL;
43555 #ifdef CONFIG_RAMSTER
43556 if (tmem_pamops.free_obj != NULL)
43557- (*tmem_pamops.free_obj)(obj->pool, obj, pool_destroy);
43558+ (tmem_pamops->free_obj)(obj->pool, obj, pool_destroy);
43559 #endif
43560 }
43561
43562@@ -574,7 +574,7 @@ int tmem_put(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
43563 /* if found, is a dup put, flush the old one */
43564 pampd_del = tmem_pampd_delete_from_obj(obj, index);
43565 BUG_ON(pampd_del != pampd);
43566- (*tmem_pamops.free)(pampd, pool, oidp, index, true);
43567+ (tmem_pamops->free)(pampd, pool, oidp, index, true);
43568 if (obj->pampd_count == 0) {
43569 objnew = obj;
43570 objfound = NULL;
43571@@ -582,7 +582,7 @@ int tmem_put(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
43572 pampd = NULL;
43573 }
43574 } else {
43575- obj = objnew = (*tmem_hostops.obj_alloc)(pool);
43576+ obj = objnew = (tmem_hostops->obj_alloc)(pool);
43577 if (unlikely(obj == NULL)) {
43578 ret = -ENOMEM;
43579 goto out;
43580@@ -597,16 +597,16 @@ int tmem_put(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
43581 if (unlikely(ret == -ENOMEM))
43582 /* may have partially built objnode tree ("stump") */
43583 goto delete_and_free;
43584- (*tmem_pamops.create_finish)(pampd, is_ephemeral(pool));
43585+ (tmem_pamops->create_finish)(pampd, is_ephemeral(pool));
43586 goto out;
43587
43588 delete_and_free:
43589 (void)tmem_pampd_delete_from_obj(obj, index);
43590 if (pampd)
43591- (*tmem_pamops.free)(pampd, pool, NULL, 0, true);
43592+ (tmem_pamops->free)(pampd, pool, NULL, 0, true);
43593 if (objnew) {
43594 tmem_obj_free(objnew, hb);
43595- (*tmem_hostops.obj_free)(objnew, pool);
43596+ (tmem_hostops->obj_free)(objnew, pool);
43597 }
43598 out:
43599 spin_unlock(&hb->lock);
43600@@ -651,7 +651,7 @@ void tmem_localify_finish(struct tmem_obj *obj, uint32_t index,
43601 if (pampd != NULL) {
43602 BUG_ON(obj == NULL);
43603 (void)tmem_pampd_replace_in_obj(obj, index, pampd, 1);
43604- (*tmem_pamops.create_finish)(pampd, is_ephemeral(obj->pool));
43605+ (tmem_pamops->create_finish)(pampd, is_ephemeral(obj->pool));
43606 } else if (delete) {
43607 BUG_ON(obj == NULL);
43608 (void)tmem_pampd_delete_from_obj(obj, index);
43609@@ -671,7 +671,7 @@ static int tmem_repatriate(void **ppampd, struct tmem_hashbucket *hb,
43610 int ret = 0;
43611
43612 if (!is_ephemeral(pool))
43613- new_pampd = (*tmem_pamops.repatriate_preload)(
43614+ new_pampd = (tmem_pamops->repatriate_preload)(
43615 old_pampd, pool, oidp, index, &intransit);
43616 if (intransit)
43617 ret = -EAGAIN;
43618@@ -680,7 +680,7 @@ static int tmem_repatriate(void **ppampd, struct tmem_hashbucket *hb,
43619 /* must release the hb->lock else repatriate can't sleep */
43620 spin_unlock(&hb->lock);
43621 if (!intransit)
43622- ret = (*tmem_pamops.repatriate)(old_pampd, new_pampd, pool,
43623+ ret = (tmem_pamops->repatriate)(old_pampd, new_pampd, pool,
43624 oidp, index, free, data);
43625 if (ret == -EAGAIN) {
43626 /* rare I think, but should cond_resched()??? */
43627@@ -714,7 +714,7 @@ int tmem_replace(struct tmem_pool *pool, struct tmem_oid *oidp,
43628 new_pampd = tmem_pampd_replace_in_obj(obj, index, new_pampd, 0);
43629 /* if we bug here, pamops wasn't properly set up for ramster */
43630 BUG_ON(tmem_pamops.replace_in_obj == NULL);
43631- ret = (*tmem_pamops.replace_in_obj)(new_pampd, obj);
43632+ ret = (tmem_pamops->replace_in_obj)(new_pampd, obj);
43633 out:
43634 spin_unlock(&hb->lock);
43635 return ret;
43636@@ -776,15 +776,15 @@ int tmem_get(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
43637 if (free) {
43638 if (obj->pampd_count == 0) {
43639 tmem_obj_free(obj, hb);
43640- (*tmem_hostops.obj_free)(obj, pool);
43641+ (tmem_hostops->obj_free)(obj, pool);
43642 obj = NULL;
43643 }
43644 }
43645 if (free)
43646- ret = (*tmem_pamops.get_data_and_free)(
43647+ ret = (tmem_pamops->get_data_and_free)(
43648 data, sizep, raw, pampd, pool, oidp, index);
43649 else
43650- ret = (*tmem_pamops.get_data)(
43651+ ret = (tmem_pamops->get_data)(
43652 data, sizep, raw, pampd, pool, oidp, index);
43653 if (ret < 0)
43654 goto out;
43655@@ -816,10 +816,10 @@ int tmem_flush_page(struct tmem_pool *pool,
43656 pampd = tmem_pampd_delete_from_obj(obj, index);
43657 if (pampd == NULL)
43658 goto out;
43659- (*tmem_pamops.free)(pampd, pool, oidp, index, true);
43660+ (tmem_pamops->free)(pampd, pool, oidp, index, true);
43661 if (obj->pampd_count == 0) {
43662 tmem_obj_free(obj, hb);
43663- (*tmem_hostops.obj_free)(obj, pool);
43664+ (tmem_hostops->obj_free)(obj, pool);
43665 }
43666 ret = 0;
43667
43668@@ -844,7 +844,7 @@ int tmem_flush_object(struct tmem_pool *pool, struct tmem_oid *oidp)
43669 goto out;
43670 tmem_pampd_destroy_all_in_obj(obj, false);
43671 tmem_obj_free(obj, hb);
43672- (*tmem_hostops.obj_free)(obj, pool);
43673+ (tmem_hostops->obj_free)(obj, pool);
43674 ret = 0;
43675
43676 out:
43677diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
43678index dc23395..cf7e9b1 100644
43679--- a/drivers/staging/rtl8712/rtl871x_io.h
43680+++ b/drivers/staging/rtl8712/rtl871x_io.h
43681@@ -108,7 +108,7 @@ struct _io_ops {
43682 u8 *pmem);
43683 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
43684 u8 *pmem);
43685-};
43686+} __no_const;
43687
43688 struct io_req {
43689 struct list_head list;
43690diff --git a/drivers/staging/sbe-2t3e3/netdev.c b/drivers/staging/sbe-2t3e3/netdev.c
43691index 1f5088b..0e59820 100644
43692--- a/drivers/staging/sbe-2t3e3/netdev.c
43693+++ b/drivers/staging/sbe-2t3e3/netdev.c
43694@@ -51,7 +51,7 @@ static int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
43695 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
43696
43697 if (rlen)
43698- if (copy_to_user(data, &resp, rlen))
43699+ if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
43700 return -EFAULT;
43701
43702 return 0;
43703diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
43704index 5dddc4d..34fcb2f 100644
43705--- a/drivers/staging/usbip/vhci.h
43706+++ b/drivers/staging/usbip/vhci.h
43707@@ -83,7 +83,7 @@ struct vhci_hcd {
43708 unsigned resuming:1;
43709 unsigned long re_timeout;
43710
43711- atomic_t seqnum;
43712+ atomic_unchecked_t seqnum;
43713
43714 /*
43715 * NOTE:
43716diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
43717index c3aa219..bf8b3de 100644
43718--- a/drivers/staging/usbip/vhci_hcd.c
43719+++ b/drivers/staging/usbip/vhci_hcd.c
43720@@ -451,7 +451,7 @@ static void vhci_tx_urb(struct urb *urb)
43721 return;
43722 }
43723
43724- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
43725+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
43726 if (priv->seqnum == 0xffff)
43727 dev_info(&urb->dev->dev, "seqnum max\n");
43728
43729@@ -703,7 +703,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
43730 return -ENOMEM;
43731 }
43732
43733- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
43734+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
43735 if (unlink->seqnum == 0xffff)
43736 pr_info("seqnum max\n");
43737
43738@@ -907,7 +907,7 @@ static int vhci_start(struct usb_hcd *hcd)
43739 vdev->rhport = rhport;
43740 }
43741
43742- atomic_set(&vhci->seqnum, 0);
43743+ atomic_set_unchecked(&vhci->seqnum, 0);
43744 spin_lock_init(&vhci->lock);
43745
43746 hcd->power_budget = 0; /* no limit */
43747diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
43748index ba5f1c0..11d8122 100644
43749--- a/drivers/staging/usbip/vhci_rx.c
43750+++ b/drivers/staging/usbip/vhci_rx.c
43751@@ -77,7 +77,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
43752 if (!urb) {
43753 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
43754 pr_info("max seqnum %d\n",
43755- atomic_read(&the_controller->seqnum));
43756+ atomic_read_unchecked(&the_controller->seqnum));
43757 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
43758 return;
43759 }
43760diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
43761index 5f13890..36a044b 100644
43762--- a/drivers/staging/vt6655/hostap.c
43763+++ b/drivers/staging/vt6655/hostap.c
43764@@ -73,14 +73,13 @@ static int msglevel =MSG_LEVEL_INFO;
43765 *
43766 */
43767
43768+static net_device_ops_no_const apdev_netdev_ops;
43769+
43770 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
43771 {
43772 PSDevice apdev_priv;
43773 struct net_device *dev = pDevice->dev;
43774 int ret;
43775- const struct net_device_ops apdev_netdev_ops = {
43776- .ndo_start_xmit = pDevice->tx_80211,
43777- };
43778
43779 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
43780
43781@@ -92,6 +91,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
43782 *apdev_priv = *pDevice;
43783 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
43784
43785+ /* only half broken now */
43786+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
43787 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
43788
43789 pDevice->apdev->type = ARPHRD_IEEE80211;
43790diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
43791index 26a7d0e..897b083 100644
43792--- a/drivers/staging/vt6656/hostap.c
43793+++ b/drivers/staging/vt6656/hostap.c
43794@@ -60,14 +60,13 @@ static int msglevel =MSG_LEVEL_INFO;
43795 *
43796 */
43797
43798+static net_device_ops_no_const apdev_netdev_ops;
43799+
43800 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
43801 {
43802 PSDevice apdev_priv;
43803 struct net_device *dev = pDevice->dev;
43804 int ret;
43805- const struct net_device_ops apdev_netdev_ops = {
43806- .ndo_start_xmit = pDevice->tx_80211,
43807- };
43808
43809 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
43810
43811@@ -79,6 +78,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
43812 *apdev_priv = *pDevice;
43813 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
43814
43815+ /* only half broken now */
43816+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
43817 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
43818
43819 pDevice->apdev->type = ARPHRD_IEEE80211;
43820diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c
43821index 56c8e60..1920c63 100644
43822--- a/drivers/staging/zcache/tmem.c
43823+++ b/drivers/staging/zcache/tmem.c
43824@@ -39,7 +39,7 @@
43825 * A tmem host implementation must use this function to register callbacks
43826 * for memory allocation.
43827 */
43828-static struct tmem_hostops tmem_hostops;
43829+static tmem_hostops_no_const tmem_hostops;
43830
43831 static void tmem_objnode_tree_init(void);
43832
43833@@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_hostops *m)
43834 * A tmem host implementation must use this function to register
43835 * callbacks for a page-accessible memory (PAM) implementation
43836 */
43837-static struct tmem_pamops tmem_pamops;
43838+static tmem_pamops_no_const tmem_pamops;
43839
43840 void tmem_register_pamops(struct tmem_pamops *m)
43841 {
43842diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h
43843index 0d4aa82..f7832d4 100644
43844--- a/drivers/staging/zcache/tmem.h
43845+++ b/drivers/staging/zcache/tmem.h
43846@@ -180,6 +180,7 @@ struct tmem_pamops {
43847 void (*new_obj)(struct tmem_obj *);
43848 int (*replace_in_obj)(void *, struct tmem_obj *);
43849 };
43850+typedef struct tmem_pamops __no_const tmem_pamops_no_const;
43851 extern void tmem_register_pamops(struct tmem_pamops *m);
43852
43853 /* memory allocation methods provided by the host implementation */
43854@@ -189,6 +190,7 @@ struct tmem_hostops {
43855 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
43856 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
43857 };
43858+typedef struct tmem_hostops __no_const tmem_hostops_no_const;
43859 extern void tmem_register_hostops(struct tmem_hostops *m);
43860
43861 /* core tmem accessor functions */
43862diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
43863index 96f4981..4daaa7e 100644
43864--- a/drivers/target/target_core_device.c
43865+++ b/drivers/target/target_core_device.c
43866@@ -1370,7 +1370,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
43867 spin_lock_init(&dev->se_port_lock);
43868 spin_lock_init(&dev->se_tmr_lock);
43869 spin_lock_init(&dev->qf_cmd_lock);
43870- atomic_set(&dev->dev_ordered_id, 0);
43871+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
43872 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
43873 spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
43874 INIT_LIST_HEAD(&dev->t10_pr.registration_list);
43875diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
43876index fcf880f..a4d1e8f 100644
43877--- a/drivers/target/target_core_transport.c
43878+++ b/drivers/target/target_core_transport.c
43879@@ -1077,7 +1077,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
43880 * Used to determine when ORDERED commands should go from
43881 * Dormant to Active status.
43882 */
43883- cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id);
43884+ cmd->se_ordered_id = atomic_inc_return_unchecked(&dev->dev_ordered_id);
43885 smp_mb__after_atomic_inc();
43886 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
43887 cmd->se_ordered_id, cmd->sam_task_attr,
43888diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
43889index b09c8d1f..c4225c0 100644
43890--- a/drivers/tty/cyclades.c
43891+++ b/drivers/tty/cyclades.c
43892@@ -1589,10 +1589,10 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
43893 printk(KERN_DEBUG "cyc:cy_open ttyC%d, count = %d\n", info->line,
43894 info->port.count);
43895 #endif
43896- info->port.count++;
43897+ atomic_inc(&info->port.count);
43898 #ifdef CY_DEBUG_COUNT
43899 printk(KERN_DEBUG "cyc:cy_open (%d): incrementing count to %d\n",
43900- current->pid, info->port.count);
43901+ current->pid, atomic_read(&info->port.count));
43902 #endif
43903
43904 /*
43905@@ -3991,7 +3991,7 @@ static int cyclades_proc_show(struct seq_file *m, void *v)
43906 for (j = 0; j < cy_card[i].nports; j++) {
43907 info = &cy_card[i].ports[j];
43908
43909- if (info->port.count) {
43910+ if (atomic_read(&info->port.count)) {
43911 /* XXX is the ldisc num worth this? */
43912 struct tty_struct *tty;
43913 struct tty_ldisc *ld;
43914diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
43915index 13ee53b..418d164 100644
43916--- a/drivers/tty/hvc/hvc_console.c
43917+++ b/drivers/tty/hvc/hvc_console.c
43918@@ -338,7 +338,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
43919
43920 spin_lock_irqsave(&hp->port.lock, flags);
43921 /* Check and then increment for fast path open. */
43922- if (hp->port.count++ > 0) {
43923+ if (atomic_inc_return(&hp->port.count) > 1) {
43924 spin_unlock_irqrestore(&hp->port.lock, flags);
43925 hvc_kick();
43926 return 0;
43927@@ -388,7 +388,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
43928
43929 spin_lock_irqsave(&hp->port.lock, flags);
43930
43931- if (--hp->port.count == 0) {
43932+ if (atomic_dec_return(&hp->port.count) == 0) {
43933 spin_unlock_irqrestore(&hp->port.lock, flags);
43934 /* We are done with the tty pointer now. */
43935 tty_port_tty_set(&hp->port, NULL);
43936@@ -406,9 +406,9 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
43937 */
43938 tty_wait_until_sent_from_close(tty, HVC_CLOSE_WAIT);
43939 } else {
43940- if (hp->port.count < 0)
43941+ if (atomic_read(&hp->port.count) < 0)
43942 printk(KERN_ERR "hvc_close %X: oops, count is %d\n",
43943- hp->vtermno, hp->port.count);
43944+ hp->vtermno, atomic_read(&hp->port.count));
43945 spin_unlock_irqrestore(&hp->port.lock, flags);
43946 }
43947 }
43948@@ -438,12 +438,12 @@ static void hvc_hangup(struct tty_struct *tty)
43949 * open->hangup case this can be called after the final close so prevent
43950 * that from happening for now.
43951 */
43952- if (hp->port.count <= 0) {
43953+ if (atomic_read(&hp->port.count) <= 0) {
43954 spin_unlock_irqrestore(&hp->port.lock, flags);
43955 return;
43956 }
43957
43958- hp->port.count = 0;
43959+ atomic_set(&hp->port.count, 0);
43960 spin_unlock_irqrestore(&hp->port.lock, flags);
43961 tty_port_tty_set(&hp->port, NULL);
43962
43963@@ -491,7 +491,7 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
43964 return -EPIPE;
43965
43966 /* FIXME what's this (unprotected) check for? */
43967- if (hp->port.count <= 0)
43968+ if (atomic_read(&hp->port.count) <= 0)
43969 return -EIO;
43970
43971 spin_lock_irqsave(&hp->lock, flags);
43972diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
43973index 8776357..b2d4afd 100644
43974--- a/drivers/tty/hvc/hvcs.c
43975+++ b/drivers/tty/hvc/hvcs.c
43976@@ -83,6 +83,7 @@
43977 #include <asm/hvcserver.h>
43978 #include <asm/uaccess.h>
43979 #include <asm/vio.h>
43980+#include <asm/local.h>
43981
43982 /*
43983 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
43984@@ -416,7 +417,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
43985
43986 spin_lock_irqsave(&hvcsd->lock, flags);
43987
43988- if (hvcsd->port.count > 0) {
43989+ if (atomic_read(&hvcsd->port.count) > 0) {
43990 spin_unlock_irqrestore(&hvcsd->lock, flags);
43991 printk(KERN_INFO "HVCS: vterm state unchanged. "
43992 "The hvcs device node is still in use.\n");
43993@@ -1132,7 +1133,7 @@ static int hvcs_install(struct tty_driver *driver, struct tty_struct *tty)
43994 }
43995 }
43996
43997- hvcsd->port.count = 0;
43998+ atomic_set(&hvcsd->port.count, 0);
43999 hvcsd->port.tty = tty;
44000 tty->driver_data = hvcsd;
44001
44002@@ -1185,7 +1186,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
44003 unsigned long flags;
44004
44005 spin_lock_irqsave(&hvcsd->lock, flags);
44006- hvcsd->port.count++;
44007+ atomic_inc(&hvcsd->port.count);
44008 hvcsd->todo_mask |= HVCS_SCHED_READ;
44009 spin_unlock_irqrestore(&hvcsd->lock, flags);
44010
44011@@ -1221,7 +1222,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
44012 hvcsd = tty->driver_data;
44013
44014 spin_lock_irqsave(&hvcsd->lock, flags);
44015- if (--hvcsd->port.count == 0) {
44016+ if (atomic_dec_and_test(&hvcsd->port.count)) {
44017
44018 vio_disable_interrupts(hvcsd->vdev);
44019
44020@@ -1246,10 +1247,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
44021
44022 free_irq(irq, hvcsd);
44023 return;
44024- } else if (hvcsd->port.count < 0) {
44025+ } else if (atomic_read(&hvcsd->port.count) < 0) {
44026 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
44027 " is missmanaged.\n",
44028- hvcsd->vdev->unit_address, hvcsd->port.count);
44029+ hvcsd->vdev->unit_address, atomic_read(&hvcsd->port.count));
44030 }
44031
44032 spin_unlock_irqrestore(&hvcsd->lock, flags);
44033@@ -1271,7 +1272,7 @@ static void hvcs_hangup(struct tty_struct * tty)
44034
44035 spin_lock_irqsave(&hvcsd->lock, flags);
44036 /* Preserve this so that we know how many kref refs to put */
44037- temp_open_count = hvcsd->port.count;
44038+ temp_open_count = atomic_read(&hvcsd->port.count);
44039
44040 /*
44041 * Don't kref put inside the spinlock because the destruction
44042@@ -1286,7 +1287,7 @@ static void hvcs_hangup(struct tty_struct * tty)
44043 tty->driver_data = NULL;
44044 hvcsd->port.tty = NULL;
44045
44046- hvcsd->port.count = 0;
44047+ atomic_set(&hvcsd->port.count, 0);
44048
44049 /* This will drop any buffered data on the floor which is OK in a hangup
44050 * scenario. */
44051@@ -1357,7 +1358,7 @@ static int hvcs_write(struct tty_struct *tty,
44052 * the middle of a write operation? This is a crummy place to do this
44053 * but we want to keep it all in the spinlock.
44054 */
44055- if (hvcsd->port.count <= 0) {
44056+ if (atomic_read(&hvcsd->port.count) <= 0) {
44057 spin_unlock_irqrestore(&hvcsd->lock, flags);
44058 return -ENODEV;
44059 }
44060@@ -1431,7 +1432,7 @@ static int hvcs_write_room(struct tty_struct *tty)
44061 {
44062 struct hvcs_struct *hvcsd = tty->driver_data;
44063
44064- if (!hvcsd || hvcsd->port.count <= 0)
44065+ if (!hvcsd || atomic_read(&hvcsd->port.count) <= 0)
44066 return 0;
44067
44068 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
44069diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
44070index 2cde13d..645d78f 100644
44071--- a/drivers/tty/ipwireless/tty.c
44072+++ b/drivers/tty/ipwireless/tty.c
44073@@ -29,6 +29,7 @@
44074 #include <linux/tty_driver.h>
44075 #include <linux/tty_flip.h>
44076 #include <linux/uaccess.h>
44077+#include <asm/local.h>
44078
44079 #include "tty.h"
44080 #include "network.h"
44081@@ -99,10 +100,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
44082 mutex_unlock(&tty->ipw_tty_mutex);
44083 return -ENODEV;
44084 }
44085- if (tty->port.count == 0)
44086+ if (atomic_read(&tty->port.count) == 0)
44087 tty->tx_bytes_queued = 0;
44088
44089- tty->port.count++;
44090+ atomic_inc(&tty->port.count);
44091
44092 tty->port.tty = linux_tty;
44093 linux_tty->driver_data = tty;
44094@@ -118,9 +119,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
44095
44096 static void do_ipw_close(struct ipw_tty *tty)
44097 {
44098- tty->port.count--;
44099-
44100- if (tty->port.count == 0) {
44101+ if (atomic_dec_return(&tty->port.count) == 0) {
44102 struct tty_struct *linux_tty = tty->port.tty;
44103
44104 if (linux_tty != NULL) {
44105@@ -141,7 +140,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
44106 return;
44107
44108 mutex_lock(&tty->ipw_tty_mutex);
44109- if (tty->port.count == 0) {
44110+ if (atomic_read(&tty->port.count) == 0) {
44111 mutex_unlock(&tty->ipw_tty_mutex);
44112 return;
44113 }
44114@@ -170,7 +169,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
44115 return;
44116 }
44117
44118- if (!tty->port.count) {
44119+ if (!atomic_read(&tty->port.count)) {
44120 mutex_unlock(&tty->ipw_tty_mutex);
44121 return;
44122 }
44123@@ -212,7 +211,7 @@ static int ipw_write(struct tty_struct *linux_tty,
44124 return -ENODEV;
44125
44126 mutex_lock(&tty->ipw_tty_mutex);
44127- if (!tty->port.count) {
44128+ if (!atomic_read(&tty->port.count)) {
44129 mutex_unlock(&tty->ipw_tty_mutex);
44130 return -EINVAL;
44131 }
44132@@ -252,7 +251,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
44133 if (!tty)
44134 return -ENODEV;
44135
44136- if (!tty->port.count)
44137+ if (!atomic_read(&tty->port.count))
44138 return -EINVAL;
44139
44140 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
44141@@ -294,7 +293,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
44142 if (!tty)
44143 return 0;
44144
44145- if (!tty->port.count)
44146+ if (!atomic_read(&tty->port.count))
44147 return 0;
44148
44149 return tty->tx_bytes_queued;
44150@@ -375,7 +374,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
44151 if (!tty)
44152 return -ENODEV;
44153
44154- if (!tty->port.count)
44155+ if (!atomic_read(&tty->port.count))
44156 return -EINVAL;
44157
44158 return get_control_lines(tty);
44159@@ -391,7 +390,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
44160 if (!tty)
44161 return -ENODEV;
44162
44163- if (!tty->port.count)
44164+ if (!atomic_read(&tty->port.count))
44165 return -EINVAL;
44166
44167 return set_control_lines(tty, set, clear);
44168@@ -405,7 +404,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
44169 if (!tty)
44170 return -ENODEV;
44171
44172- if (!tty->port.count)
44173+ if (!atomic_read(&tty->port.count))
44174 return -EINVAL;
44175
44176 /* FIXME: Exactly how is the tty object locked here .. */
44177@@ -561,7 +560,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
44178 * are gone */
44179 mutex_lock(&ttyj->ipw_tty_mutex);
44180 }
44181- while (ttyj->port.count)
44182+ while (atomic_read(&ttyj->port.count))
44183 do_ipw_close(ttyj);
44184 ipwireless_disassociate_network_ttys(network,
44185 ttyj->channel_idx);
44186diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
44187index f9d2850..b006f04 100644
44188--- a/drivers/tty/moxa.c
44189+++ b/drivers/tty/moxa.c
44190@@ -1193,7 +1193,7 @@ static int moxa_open(struct tty_struct *tty, struct file *filp)
44191 }
44192
44193 ch = &brd->ports[port % MAX_PORTS_PER_BOARD];
44194- ch->port.count++;
44195+ atomic_inc(&ch->port.count);
44196 tty->driver_data = ch;
44197 tty_port_tty_set(&ch->port, tty);
44198 mutex_lock(&ch->port.mutex);
44199diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
44200index bfd6771..e0d93c4 100644
44201--- a/drivers/tty/n_gsm.c
44202+++ b/drivers/tty/n_gsm.c
44203@@ -1636,7 +1636,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
44204 spin_lock_init(&dlci->lock);
44205 mutex_init(&dlci->mutex);
44206 dlci->fifo = &dlci->_fifo;
44207- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
44208+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
44209 kfree(dlci);
44210 return NULL;
44211 }
44212@@ -2936,7 +2936,7 @@ static int gsmtty_open(struct tty_struct *tty, struct file *filp)
44213 struct gsm_dlci *dlci = tty->driver_data;
44214 struct tty_port *port = &dlci->port;
44215
44216- port->count++;
44217+ atomic_inc(&port->count);
44218 dlci_get(dlci);
44219 dlci_get(dlci->gsm->dlci[0]);
44220 mux_get(dlci->gsm);
44221diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
44222index 19083ef..6e34e97 100644
44223--- a/drivers/tty/n_tty.c
44224+++ b/drivers/tty/n_tty.c
44225@@ -2196,6 +2196,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
44226 {
44227 *ops = tty_ldisc_N_TTY;
44228 ops->owner = NULL;
44229- ops->refcount = ops->flags = 0;
44230+ atomic_set(&ops->refcount, 0);
44231+ ops->flags = 0;
44232 }
44233 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
44234diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
44235index ac35c90..c47deac 100644
44236--- a/drivers/tty/pty.c
44237+++ b/drivers/tty/pty.c
44238@@ -790,8 +790,10 @@ static void __init unix98_pty_init(void)
44239 panic("Couldn't register Unix98 pts driver");
44240
44241 /* Now create the /dev/ptmx special device */
44242+ pax_open_kernel();
44243 tty_default_fops(&ptmx_fops);
44244- ptmx_fops.open = ptmx_open;
44245+ *(void **)&ptmx_fops.open = ptmx_open;
44246+ pax_close_kernel();
44247
44248 cdev_init(&ptmx_cdev, &ptmx_fops);
44249 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
44250diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
44251index e42009a..566a036 100644
44252--- a/drivers/tty/rocket.c
44253+++ b/drivers/tty/rocket.c
44254@@ -925,7 +925,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
44255 tty->driver_data = info;
44256 tty_port_tty_set(port, tty);
44257
44258- if (port->count++ == 0) {
44259+ if (atomic_inc_return(&port->count) == 1) {
44260 atomic_inc(&rp_num_ports_open);
44261
44262 #ifdef ROCKET_DEBUG_OPEN
44263@@ -934,7 +934,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
44264 #endif
44265 }
44266 #ifdef ROCKET_DEBUG_OPEN
44267- printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, info->port.count);
44268+ printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, atomic-read(&info->port.count));
44269 #endif
44270
44271 /*
44272@@ -1529,7 +1529,7 @@ static void rp_hangup(struct tty_struct *tty)
44273 spin_unlock_irqrestore(&info->port.lock, flags);
44274 return;
44275 }
44276- if (info->port.count)
44277+ if (atomic_read(&info->port.count))
44278 atomic_dec(&rp_num_ports_open);
44279 clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]);
44280 spin_unlock_irqrestore(&info->port.lock, flags);
44281diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
44282index 1002054..dd644a8 100644
44283--- a/drivers/tty/serial/kgdboc.c
44284+++ b/drivers/tty/serial/kgdboc.c
44285@@ -24,8 +24,9 @@
44286 #define MAX_CONFIG_LEN 40
44287
44288 static struct kgdb_io kgdboc_io_ops;
44289+static struct kgdb_io kgdboc_io_ops_console;
44290
44291-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
44292+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
44293 static int configured = -1;
44294
44295 static char config[MAX_CONFIG_LEN];
44296@@ -151,6 +152,8 @@ static void cleanup_kgdboc(void)
44297 kgdboc_unregister_kbd();
44298 if (configured == 1)
44299 kgdb_unregister_io_module(&kgdboc_io_ops);
44300+ else if (configured == 2)
44301+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
44302 }
44303
44304 static int configure_kgdboc(void)
44305@@ -160,13 +163,13 @@ static int configure_kgdboc(void)
44306 int err;
44307 char *cptr = config;
44308 struct console *cons;
44309+ int is_console = 0;
44310
44311 err = kgdboc_option_setup(config);
44312 if (err || !strlen(config) || isspace(config[0]))
44313 goto noconfig;
44314
44315 err = -ENODEV;
44316- kgdboc_io_ops.is_console = 0;
44317 kgdb_tty_driver = NULL;
44318
44319 kgdboc_use_kms = 0;
44320@@ -187,7 +190,7 @@ static int configure_kgdboc(void)
44321 int idx;
44322 if (cons->device && cons->device(cons, &idx) == p &&
44323 idx == tty_line) {
44324- kgdboc_io_ops.is_console = 1;
44325+ is_console = 1;
44326 break;
44327 }
44328 cons = cons->next;
44329@@ -197,7 +200,13 @@ static int configure_kgdboc(void)
44330 kgdb_tty_line = tty_line;
44331
44332 do_register:
44333- err = kgdb_register_io_module(&kgdboc_io_ops);
44334+ if (is_console) {
44335+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
44336+ configured = 2;
44337+ } else {
44338+ err = kgdb_register_io_module(&kgdboc_io_ops);
44339+ configured = 1;
44340+ }
44341 if (err)
44342 goto noconfig;
44343
44344@@ -205,8 +214,6 @@ do_register:
44345 if (err)
44346 goto nmi_con_failed;
44347
44348- configured = 1;
44349-
44350 return 0;
44351
44352 nmi_con_failed:
44353@@ -223,7 +230,7 @@ noconfig:
44354 static int __init init_kgdboc(void)
44355 {
44356 /* Already configured? */
44357- if (configured == 1)
44358+ if (configured >= 1)
44359 return 0;
44360
44361 return configure_kgdboc();
44362@@ -272,7 +279,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
44363 if (config[len - 1] == '\n')
44364 config[len - 1] = '\0';
44365
44366- if (configured == 1)
44367+ if (configured >= 1)
44368 cleanup_kgdboc();
44369
44370 /* Go and configure with the new params. */
44371@@ -312,6 +319,15 @@ static struct kgdb_io kgdboc_io_ops = {
44372 .post_exception = kgdboc_post_exp_handler,
44373 };
44374
44375+static struct kgdb_io kgdboc_io_ops_console = {
44376+ .name = "kgdboc",
44377+ .read_char = kgdboc_get_char,
44378+ .write_char = kgdboc_put_char,
44379+ .pre_exception = kgdboc_pre_exp_handler,
44380+ .post_exception = kgdboc_post_exp_handler,
44381+ .is_console = 1
44382+};
44383+
44384 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
44385 /* This is only available if kgdboc is a built in for early debugging */
44386 static int __init kgdboc_early_init(char *opt)
44387diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
44388index e514b3a..c73d614 100644
44389--- a/drivers/tty/serial/samsung.c
44390+++ b/drivers/tty/serial/samsung.c
44391@@ -453,11 +453,16 @@ static void s3c24xx_serial_shutdown(struct uart_port *port)
44392 }
44393 }
44394
44395+static int s3c64xx_serial_startup(struct uart_port *port);
44396 static int s3c24xx_serial_startup(struct uart_port *port)
44397 {
44398 struct s3c24xx_uart_port *ourport = to_ourport(port);
44399 int ret;
44400
44401+ /* Startup sequence is different for s3c64xx and higher SoC's */
44402+ if (s3c24xx_serial_has_interrupt_mask(port))
44403+ return s3c64xx_serial_startup(port);
44404+
44405 dbg("s3c24xx_serial_startup: port=%p (%08lx,%p)\n",
44406 port->mapbase, port->membase);
44407
44408@@ -1122,10 +1127,6 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
44409 /* setup info for port */
44410 port->dev = &platdev->dev;
44411
44412- /* Startup sequence is different for s3c64xx and higher SoC's */
44413- if (s3c24xx_serial_has_interrupt_mask(port))
44414- s3c24xx_serial_ops.startup = s3c64xx_serial_startup;
44415-
44416 port->uartclk = 1;
44417
44418 if (cfg->uart_flags & UPF_CONS_FLOW) {
44419diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
44420index 2c7230a..2104f16 100644
44421--- a/drivers/tty/serial/serial_core.c
44422+++ b/drivers/tty/serial/serial_core.c
44423@@ -1455,7 +1455,7 @@ static void uart_hangup(struct tty_struct *tty)
44424 uart_flush_buffer(tty);
44425 uart_shutdown(tty, state);
44426 spin_lock_irqsave(&port->lock, flags);
44427- port->count = 0;
44428+ atomic_set(&port->count, 0);
44429 clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags);
44430 spin_unlock_irqrestore(&port->lock, flags);
44431 tty_port_tty_set(port, NULL);
44432@@ -1551,7 +1551,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
44433 goto end;
44434 }
44435
44436- port->count++;
44437+ atomic_inc(&port->count);
44438 if (!state->uart_port || state->uart_port->flags & UPF_DEAD) {
44439 retval = -ENXIO;
44440 goto err_dec_count;
44441@@ -1578,7 +1578,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
44442 /*
44443 * Make sure the device is in D0 state.
44444 */
44445- if (port->count == 1)
44446+ if (atomic_read(&port->count) == 1)
44447 uart_change_pm(state, 0);
44448
44449 /*
44450@@ -1596,7 +1596,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
44451 end:
44452 return retval;
44453 err_dec_count:
44454- port->count--;
44455+ atomic_inc(&port->count);
44456 mutex_unlock(&port->mutex);
44457 goto end;
44458 }
44459diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
44460index 9e071f6..f30ae69 100644
44461--- a/drivers/tty/synclink.c
44462+++ b/drivers/tty/synclink.c
44463@@ -3095,7 +3095,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
44464
44465 if (debug_level >= DEBUG_LEVEL_INFO)
44466 printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
44467- __FILE__,__LINE__, info->device_name, info->port.count);
44468+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
44469
44470 if (tty_port_close_start(&info->port, tty, filp) == 0)
44471 goto cleanup;
44472@@ -3113,7 +3113,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
44473 cleanup:
44474 if (debug_level >= DEBUG_LEVEL_INFO)
44475 printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
44476- tty->driver->name, info->port.count);
44477+ tty->driver->name, atomic_read(&info->port.count));
44478
44479 } /* end of mgsl_close() */
44480
44481@@ -3212,8 +3212,8 @@ static void mgsl_hangup(struct tty_struct *tty)
44482
44483 mgsl_flush_buffer(tty);
44484 shutdown(info);
44485-
44486- info->port.count = 0;
44487+
44488+ atomic_set(&info->port.count, 0);
44489 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
44490 info->port.tty = NULL;
44491
44492@@ -3302,12 +3302,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
44493
44494 if (debug_level >= DEBUG_LEVEL_INFO)
44495 printk("%s(%d):block_til_ready before block on %s count=%d\n",
44496- __FILE__,__LINE__, tty->driver->name, port->count );
44497+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
44498
44499 spin_lock_irqsave(&info->irq_spinlock, flags);
44500 if (!tty_hung_up_p(filp)) {
44501 extra_count = true;
44502- port->count--;
44503+ atomic_dec(&port->count);
44504 }
44505 spin_unlock_irqrestore(&info->irq_spinlock, flags);
44506 port->blocked_open++;
44507@@ -3336,7 +3336,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
44508
44509 if (debug_level >= DEBUG_LEVEL_INFO)
44510 printk("%s(%d):block_til_ready blocking on %s count=%d\n",
44511- __FILE__,__LINE__, tty->driver->name, port->count );
44512+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
44513
44514 tty_unlock(tty);
44515 schedule();
44516@@ -3348,12 +3348,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
44517
44518 /* FIXME: Racy on hangup during close wait */
44519 if (extra_count)
44520- port->count++;
44521+ atomic_inc(&port->count);
44522 port->blocked_open--;
44523
44524 if (debug_level >= DEBUG_LEVEL_INFO)
44525 printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
44526- __FILE__,__LINE__, tty->driver->name, port->count );
44527+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
44528
44529 if (!retval)
44530 port->flags |= ASYNC_NORMAL_ACTIVE;
44531@@ -3405,7 +3405,7 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
44532
44533 if (debug_level >= DEBUG_LEVEL_INFO)
44534 printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
44535- __FILE__,__LINE__,tty->driver->name, info->port.count);
44536+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
44537
44538 /* If port is closing, signal caller to try again */
44539 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
44540@@ -3424,10 +3424,10 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
44541 spin_unlock_irqrestore(&info->netlock, flags);
44542 goto cleanup;
44543 }
44544- info->port.count++;
44545+ atomic_inc(&info->port.count);
44546 spin_unlock_irqrestore(&info->netlock, flags);
44547
44548- if (info->port.count == 1) {
44549+ if (atomic_read(&info->port.count) == 1) {
44550 /* 1st open on this device, init hardware */
44551 retval = startup(info);
44552 if (retval < 0)
44553@@ -3451,8 +3451,8 @@ cleanup:
44554 if (retval) {
44555 if (tty->count == 1)
44556 info->port.tty = NULL; /* tty layer will release tty struct */
44557- if(info->port.count)
44558- info->port.count--;
44559+ if (atomic_read(&info->port.count))
44560+ atomic_dec(&info->port.count);
44561 }
44562
44563 return retval;
44564@@ -7662,7 +7662,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
44565 unsigned short new_crctype;
44566
44567 /* return error if TTY interface open */
44568- if (info->port.count)
44569+ if (atomic_read(&info->port.count))
44570 return -EBUSY;
44571
44572 switch (encoding)
44573@@ -7757,7 +7757,7 @@ static int hdlcdev_open(struct net_device *dev)
44574
44575 /* arbitrate between network and tty opens */
44576 spin_lock_irqsave(&info->netlock, flags);
44577- if (info->port.count != 0 || info->netcount != 0) {
44578+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
44579 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
44580 spin_unlock_irqrestore(&info->netlock, flags);
44581 return -EBUSY;
44582@@ -7843,7 +7843,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
44583 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
44584
44585 /* return error if TTY interface open */
44586- if (info->port.count)
44587+ if (atomic_read(&info->port.count))
44588 return -EBUSY;
44589
44590 if (cmd != SIOCWANDEV)
44591diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
44592index aba1e59..877ac33 100644
44593--- a/drivers/tty/synclink_gt.c
44594+++ b/drivers/tty/synclink_gt.c
44595@@ -671,7 +671,7 @@ static int open(struct tty_struct *tty, struct file *filp)
44596 tty->driver_data = info;
44597 info->port.tty = tty;
44598
44599- DBGINFO(("%s open, old ref count = %d\n", info->device_name, info->port.count));
44600+ DBGINFO(("%s open, old ref count = %d\n", info->device_name, atomic_read(&info->port.count)));
44601
44602 /* If port is closing, signal caller to try again */
44603 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
44604@@ -692,10 +692,10 @@ static int open(struct tty_struct *tty, struct file *filp)
44605 mutex_unlock(&info->port.mutex);
44606 goto cleanup;
44607 }
44608- info->port.count++;
44609+ atomic_inc(&info->port.count);
44610 spin_unlock_irqrestore(&info->netlock, flags);
44611
44612- if (info->port.count == 1) {
44613+ if (atomic_read(&info->port.count) == 1) {
44614 /* 1st open on this device, init hardware */
44615 retval = startup(info);
44616 if (retval < 0) {
44617@@ -716,8 +716,8 @@ cleanup:
44618 if (retval) {
44619 if (tty->count == 1)
44620 info->port.tty = NULL; /* tty layer will release tty struct */
44621- if(info->port.count)
44622- info->port.count--;
44623+ if(atomic_read(&info->port.count))
44624+ atomic_dec(&info->port.count);
44625 }
44626
44627 DBGINFO(("%s open rc=%d\n", info->device_name, retval));
44628@@ -730,7 +730,7 @@ static void close(struct tty_struct *tty, struct file *filp)
44629
44630 if (sanity_check(info, tty->name, "close"))
44631 return;
44632- DBGINFO(("%s close entry, count=%d\n", info->device_name, info->port.count));
44633+ DBGINFO(("%s close entry, count=%d\n", info->device_name, atomic_read(&info->port.count)));
44634
44635 if (tty_port_close_start(&info->port, tty, filp) == 0)
44636 goto cleanup;
44637@@ -747,7 +747,7 @@ static void close(struct tty_struct *tty, struct file *filp)
44638 tty_port_close_end(&info->port, tty);
44639 info->port.tty = NULL;
44640 cleanup:
44641- DBGINFO(("%s close exit, count=%d\n", tty->driver->name, info->port.count));
44642+ DBGINFO(("%s close exit, count=%d\n", tty->driver->name, atomic_read(&info->port.count)));
44643 }
44644
44645 static void hangup(struct tty_struct *tty)
44646@@ -765,7 +765,7 @@ static void hangup(struct tty_struct *tty)
44647 shutdown(info);
44648
44649 spin_lock_irqsave(&info->port.lock, flags);
44650- info->port.count = 0;
44651+ atomic_set(&info->port.count, 0);
44652 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
44653 info->port.tty = NULL;
44654 spin_unlock_irqrestore(&info->port.lock, flags);
44655@@ -1450,7 +1450,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
44656 unsigned short new_crctype;
44657
44658 /* return error if TTY interface open */
44659- if (info->port.count)
44660+ if (atomic_read(&info->port.count))
44661 return -EBUSY;
44662
44663 DBGINFO(("%s hdlcdev_attach\n", info->device_name));
44664@@ -1545,7 +1545,7 @@ static int hdlcdev_open(struct net_device *dev)
44665
44666 /* arbitrate between network and tty opens */
44667 spin_lock_irqsave(&info->netlock, flags);
44668- if (info->port.count != 0 || info->netcount != 0) {
44669+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
44670 DBGINFO(("%s hdlc_open busy\n", dev->name));
44671 spin_unlock_irqrestore(&info->netlock, flags);
44672 return -EBUSY;
44673@@ -1630,7 +1630,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
44674 DBGINFO(("%s hdlcdev_ioctl\n", dev->name));
44675
44676 /* return error if TTY interface open */
44677- if (info->port.count)
44678+ if (atomic_read(&info->port.count))
44679 return -EBUSY;
44680
44681 if (cmd != SIOCWANDEV)
44682@@ -2419,7 +2419,7 @@ static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
44683 if (port == NULL)
44684 continue;
44685 spin_lock(&port->lock);
44686- if ((port->port.count || port->netcount) &&
44687+ if ((atomic_read(&port->port.count) || port->netcount) &&
44688 port->pending_bh && !port->bh_running &&
44689 !port->bh_requested) {
44690 DBGISR(("%s bh queued\n", port->device_name));
44691@@ -3308,7 +3308,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
44692 spin_lock_irqsave(&info->lock, flags);
44693 if (!tty_hung_up_p(filp)) {
44694 extra_count = true;
44695- port->count--;
44696+ atomic_dec(&port->count);
44697 }
44698 spin_unlock_irqrestore(&info->lock, flags);
44699 port->blocked_open++;
44700@@ -3345,7 +3345,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
44701 remove_wait_queue(&port->open_wait, &wait);
44702
44703 if (extra_count)
44704- port->count++;
44705+ atomic_inc(&port->count);
44706 port->blocked_open--;
44707
44708 if (!retval)
44709diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
44710index fd43fb6..34704ad 100644
44711--- a/drivers/tty/synclinkmp.c
44712+++ b/drivers/tty/synclinkmp.c
44713@@ -751,7 +751,7 @@ static int open(struct tty_struct *tty, struct file *filp)
44714
44715 if (debug_level >= DEBUG_LEVEL_INFO)
44716 printk("%s(%d):%s open(), old ref count = %d\n",
44717- __FILE__,__LINE__,tty->driver->name, info->port.count);
44718+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
44719
44720 /* If port is closing, signal caller to try again */
44721 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
44722@@ -770,10 +770,10 @@ static int open(struct tty_struct *tty, struct file *filp)
44723 spin_unlock_irqrestore(&info->netlock, flags);
44724 goto cleanup;
44725 }
44726- info->port.count++;
44727+ atomic_inc(&info->port.count);
44728 spin_unlock_irqrestore(&info->netlock, flags);
44729
44730- if (info->port.count == 1) {
44731+ if (atomic_read(&info->port.count) == 1) {
44732 /* 1st open on this device, init hardware */
44733 retval = startup(info);
44734 if (retval < 0)
44735@@ -797,8 +797,8 @@ cleanup:
44736 if (retval) {
44737 if (tty->count == 1)
44738 info->port.tty = NULL; /* tty layer will release tty struct */
44739- if(info->port.count)
44740- info->port.count--;
44741+ if(atomic_read(&info->port.count))
44742+ atomic_dec(&info->port.count);
44743 }
44744
44745 return retval;
44746@@ -816,7 +816,7 @@ static void close(struct tty_struct *tty, struct file *filp)
44747
44748 if (debug_level >= DEBUG_LEVEL_INFO)
44749 printk("%s(%d):%s close() entry, count=%d\n",
44750- __FILE__,__LINE__, info->device_name, info->port.count);
44751+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
44752
44753 if (tty_port_close_start(&info->port, tty, filp) == 0)
44754 goto cleanup;
44755@@ -835,7 +835,7 @@ static void close(struct tty_struct *tty, struct file *filp)
44756 cleanup:
44757 if (debug_level >= DEBUG_LEVEL_INFO)
44758 printk("%s(%d):%s close() exit, count=%d\n", __FILE__,__LINE__,
44759- tty->driver->name, info->port.count);
44760+ tty->driver->name, atomic_read(&info->port.count));
44761 }
44762
44763 /* Called by tty_hangup() when a hangup is signaled.
44764@@ -858,7 +858,7 @@ static void hangup(struct tty_struct *tty)
44765 shutdown(info);
44766
44767 spin_lock_irqsave(&info->port.lock, flags);
44768- info->port.count = 0;
44769+ atomic_set(&info->port.count, 0);
44770 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
44771 info->port.tty = NULL;
44772 spin_unlock_irqrestore(&info->port.lock, flags);
44773@@ -1566,7 +1566,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
44774 unsigned short new_crctype;
44775
44776 /* return error if TTY interface open */
44777- if (info->port.count)
44778+ if (atomic_read(&info->port.count))
44779 return -EBUSY;
44780
44781 switch (encoding)
44782@@ -1661,7 +1661,7 @@ static int hdlcdev_open(struct net_device *dev)
44783
44784 /* arbitrate between network and tty opens */
44785 spin_lock_irqsave(&info->netlock, flags);
44786- if (info->port.count != 0 || info->netcount != 0) {
44787+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
44788 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
44789 spin_unlock_irqrestore(&info->netlock, flags);
44790 return -EBUSY;
44791@@ -1747,7 +1747,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
44792 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
44793
44794 /* return error if TTY interface open */
44795- if (info->port.count)
44796+ if (atomic_read(&info->port.count))
44797 return -EBUSY;
44798
44799 if (cmd != SIOCWANDEV)
44800@@ -2632,7 +2632,7 @@ static irqreturn_t synclinkmp_interrupt(int dummy, void *dev_id)
44801 * do not request bottom half processing if the
44802 * device is not open in a normal mode.
44803 */
44804- if ( port && (port->port.count || port->netcount) &&
44805+ if ( port && (atomic_read(&port->port.count) || port->netcount) &&
44806 port->pending_bh && !port->bh_running &&
44807 !port->bh_requested ) {
44808 if ( debug_level >= DEBUG_LEVEL_ISR )
44809@@ -3330,12 +3330,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
44810
44811 if (debug_level >= DEBUG_LEVEL_INFO)
44812 printk("%s(%d):%s block_til_ready() before block, count=%d\n",
44813- __FILE__,__LINE__, tty->driver->name, port->count );
44814+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
44815
44816 spin_lock_irqsave(&info->lock, flags);
44817 if (!tty_hung_up_p(filp)) {
44818 extra_count = true;
44819- port->count--;
44820+ atomic_dec(&port->count);
44821 }
44822 spin_unlock_irqrestore(&info->lock, flags);
44823 port->blocked_open++;
44824@@ -3364,7 +3364,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
44825
44826 if (debug_level >= DEBUG_LEVEL_INFO)
44827 printk("%s(%d):%s block_til_ready() count=%d\n",
44828- __FILE__,__LINE__, tty->driver->name, port->count );
44829+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
44830
44831 tty_unlock(tty);
44832 schedule();
44833@@ -3375,12 +3375,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
44834 remove_wait_queue(&port->open_wait, &wait);
44835
44836 if (extra_count)
44837- port->count++;
44838+ atomic_inc(&port->count);
44839 port->blocked_open--;
44840
44841 if (debug_level >= DEBUG_LEVEL_INFO)
44842 printk("%s(%d):%s block_til_ready() after, count=%d\n",
44843- __FILE__,__LINE__, tty->driver->name, port->count );
44844+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
44845
44846 if (!retval)
44847 port->flags |= ASYNC_NORMAL_ACTIVE;
44848diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
44849index b3c4a25..723916f 100644
44850--- a/drivers/tty/sysrq.c
44851+++ b/drivers/tty/sysrq.c
44852@@ -867,7 +867,7 @@ EXPORT_SYMBOL(unregister_sysrq_key);
44853 static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
44854 size_t count, loff_t *ppos)
44855 {
44856- if (count) {
44857+ if (count && capable(CAP_SYS_ADMIN)) {
44858 char c;
44859
44860 if (get_user(c, buf))
44861diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
44862index da9fde8..621d6dc 100644
44863--- a/drivers/tty/tty_io.c
44864+++ b/drivers/tty/tty_io.c
44865@@ -941,6 +941,14 @@ void start_tty(struct tty_struct *tty)
44866
44867 EXPORT_SYMBOL(start_tty);
44868
44869+static void tty_update_time(struct timespec *time)
44870+{
44871+ unsigned long sec = get_seconds();
44872+ sec -= sec % 60;
44873+ if ((long)(sec - time->tv_sec) > 0)
44874+ time->tv_sec = sec;
44875+}
44876+
44877 /**
44878 * tty_read - read method for tty device files
44879 * @file: pointer to tty file
44880@@ -977,8 +985,10 @@ static ssize_t tty_read(struct file *file, char __user *buf, size_t count,
44881 else
44882 i = -EIO;
44883 tty_ldisc_deref(ld);
44884+
44885 if (i > 0)
44886- inode->i_atime = current_fs_time(inode->i_sb);
44887+ tty_update_time(&inode->i_atime);
44888+
44889 return i;
44890 }
44891
44892@@ -1080,8 +1090,7 @@ static inline ssize_t do_tty_write(
44893 cond_resched();
44894 }
44895 if (written) {
44896- struct inode *inode = file->f_path.dentry->d_inode;
44897- inode->i_mtime = current_fs_time(inode->i_sb);
44898+ tty_update_time(&file->f_path.dentry->d_inode->i_mtime);
44899 ret = written;
44900 }
44901 out:
44902@@ -3391,7 +3400,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
44903
44904 void tty_default_fops(struct file_operations *fops)
44905 {
44906- *fops = tty_fops;
44907+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
44908 }
44909
44910 /*
44911diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
44912index 78f1be2..3e98910 100644
44913--- a/drivers/tty/tty_ldisc.c
44914+++ b/drivers/tty/tty_ldisc.c
44915@@ -56,7 +56,7 @@ static void put_ldisc(struct tty_ldisc *ld)
44916 if (atomic_dec_and_test(&ld->users)) {
44917 struct tty_ldisc_ops *ldo = ld->ops;
44918
44919- ldo->refcount--;
44920+ atomic_dec(&ldo->refcount);
44921 module_put(ldo->owner);
44922 raw_spin_unlock_irqrestore(&tty_ldisc_lock, flags);
44923
44924@@ -91,7 +91,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
44925 raw_spin_lock_irqsave(&tty_ldisc_lock, flags);
44926 tty_ldiscs[disc] = new_ldisc;
44927 new_ldisc->num = disc;
44928- new_ldisc->refcount = 0;
44929+ atomic_set(&new_ldisc->refcount, 0);
44930 raw_spin_unlock_irqrestore(&tty_ldisc_lock, flags);
44931
44932 return ret;
44933@@ -119,7 +119,7 @@ int tty_unregister_ldisc(int disc)
44934 return -EINVAL;
44935
44936 raw_spin_lock_irqsave(&tty_ldisc_lock, flags);
44937- if (tty_ldiscs[disc]->refcount)
44938+ if (atomic_read(&tty_ldiscs[disc]->refcount))
44939 ret = -EBUSY;
44940 else
44941 tty_ldiscs[disc] = NULL;
44942@@ -140,7 +140,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
44943 if (ldops) {
44944 ret = ERR_PTR(-EAGAIN);
44945 if (try_module_get(ldops->owner)) {
44946- ldops->refcount++;
44947+ atomic_inc(&ldops->refcount);
44948 ret = ldops;
44949 }
44950 }
44951@@ -153,7 +153,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
44952 unsigned long flags;
44953
44954 raw_spin_lock_irqsave(&tty_ldisc_lock, flags);
44955- ldops->refcount--;
44956+ atomic_dec(&ldops->refcount);
44957 module_put(ldops->owner);
44958 raw_spin_unlock_irqrestore(&tty_ldisc_lock, flags);
44959 }
44960diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
44961index b7ff59d..7c6105e 100644
44962--- a/drivers/tty/tty_port.c
44963+++ b/drivers/tty/tty_port.c
44964@@ -218,7 +218,7 @@ void tty_port_hangup(struct tty_port *port)
44965 unsigned long flags;
44966
44967 spin_lock_irqsave(&port->lock, flags);
44968- port->count = 0;
44969+ atomic_set(&port->count, 0);
44970 port->flags &= ~ASYNC_NORMAL_ACTIVE;
44971 if (port->tty) {
44972 set_bit(TTY_IO_ERROR, &port->tty->flags);
44973@@ -344,7 +344,7 @@ int tty_port_block_til_ready(struct tty_port *port,
44974 /* The port lock protects the port counts */
44975 spin_lock_irqsave(&port->lock, flags);
44976 if (!tty_hung_up_p(filp))
44977- port->count--;
44978+ atomic_dec(&port->count);
44979 port->blocked_open++;
44980 spin_unlock_irqrestore(&port->lock, flags);
44981
44982@@ -386,7 +386,7 @@ int tty_port_block_til_ready(struct tty_port *port,
44983 we must not mess that up further */
44984 spin_lock_irqsave(&port->lock, flags);
44985 if (!tty_hung_up_p(filp))
44986- port->count++;
44987+ atomic_inc(&port->count);
44988 port->blocked_open--;
44989 if (retval == 0)
44990 port->flags |= ASYNC_NORMAL_ACTIVE;
44991@@ -406,19 +406,19 @@ int tty_port_close_start(struct tty_port *port,
44992 return 0;
44993 }
44994
44995- if (tty->count == 1 && port->count != 1) {
44996+ if (tty->count == 1 && atomic_read(&port->count) != 1) {
44997 printk(KERN_WARNING
44998 "tty_port_close_start: tty->count = 1 port count = %d.\n",
44999- port->count);
45000- port->count = 1;
45001+ atomic_read(&port->count));
45002+ atomic_set(&port->count, 1);
45003 }
45004- if (--port->count < 0) {
45005+ if (atomic_dec_return(&port->count) < 0) {
45006 printk(KERN_WARNING "tty_port_close_start: count = %d\n",
45007- port->count);
45008- port->count = 0;
45009+ atomic_read(&port->count));
45010+ atomic_set(&port->count, 0);
45011 }
45012
45013- if (port->count) {
45014+ if (atomic_read(&port->count)) {
45015 spin_unlock_irqrestore(&port->lock, flags);
45016 if (port->ops->drop)
45017 port->ops->drop(port);
45018@@ -516,7 +516,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty,
45019 {
45020 spin_lock_irq(&port->lock);
45021 if (!tty_hung_up_p(filp))
45022- ++port->count;
45023+ atomic_inc(&port->count);
45024 spin_unlock_irq(&port->lock);
45025 tty_port_tty_set(port, tty);
45026
45027diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
45028index 681765b..d3ccdf2 100644
45029--- a/drivers/tty/vt/keyboard.c
45030+++ b/drivers/tty/vt/keyboard.c
45031@@ -660,6 +660,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
45032 kbd->kbdmode == VC_OFF) &&
45033 value != KVAL(K_SAK))
45034 return; /* SAK is allowed even in raw mode */
45035+
45036+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45037+ {
45038+ void *func = fn_handler[value];
45039+ if (func == fn_show_state || func == fn_show_ptregs ||
45040+ func == fn_show_mem)
45041+ return;
45042+ }
45043+#endif
45044+
45045 fn_handler[value](vc);
45046 }
45047
45048@@ -1808,9 +1818,6 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
45049 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
45050 return -EFAULT;
45051
45052- if (!capable(CAP_SYS_TTY_CONFIG))
45053- perm = 0;
45054-
45055 switch (cmd) {
45056 case KDGKBENT:
45057 /* Ensure another thread doesn't free it under us */
45058@@ -1825,6 +1832,9 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
45059 spin_unlock_irqrestore(&kbd_event_lock, flags);
45060 return put_user(val, &user_kbe->kb_value);
45061 case KDSKBENT:
45062+ if (!capable(CAP_SYS_TTY_CONFIG))
45063+ perm = 0;
45064+
45065 if (!perm)
45066 return -EPERM;
45067 if (!i && v == K_NOSUCHMAP) {
45068@@ -1915,9 +1925,6 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
45069 int i, j, k;
45070 int ret;
45071
45072- if (!capable(CAP_SYS_TTY_CONFIG))
45073- perm = 0;
45074-
45075 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
45076 if (!kbs) {
45077 ret = -ENOMEM;
45078@@ -1951,6 +1958,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
45079 kfree(kbs);
45080 return ((p && *p) ? -EOVERFLOW : 0);
45081 case KDSKBSENT:
45082+ if (!capable(CAP_SYS_TTY_CONFIG))
45083+ perm = 0;
45084+
45085 if (!perm) {
45086 ret = -EPERM;
45087 goto reterr;
45088diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
45089index 5110f36..8dc0a74 100644
45090--- a/drivers/uio/uio.c
45091+++ b/drivers/uio/uio.c
45092@@ -25,6 +25,7 @@
45093 #include <linux/kobject.h>
45094 #include <linux/cdev.h>
45095 #include <linux/uio_driver.h>
45096+#include <asm/local.h>
45097
45098 #define UIO_MAX_DEVICES (1U << MINORBITS)
45099
45100@@ -32,10 +33,10 @@ struct uio_device {
45101 struct module *owner;
45102 struct device *dev;
45103 int minor;
45104- atomic_t event;
45105+ atomic_unchecked_t event;
45106 struct fasync_struct *async_queue;
45107 wait_queue_head_t wait;
45108- int vma_count;
45109+ local_t vma_count;
45110 struct uio_info *info;
45111 struct kobject *map_dir;
45112 struct kobject *portio_dir;
45113@@ -242,7 +243,7 @@ static ssize_t show_event(struct device *dev,
45114 struct device_attribute *attr, char *buf)
45115 {
45116 struct uio_device *idev = dev_get_drvdata(dev);
45117- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
45118+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
45119 }
45120
45121 static struct device_attribute uio_class_attributes[] = {
45122@@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *info)
45123 {
45124 struct uio_device *idev = info->uio_dev;
45125
45126- atomic_inc(&idev->event);
45127+ atomic_inc_unchecked(&idev->event);
45128 wake_up_interruptible(&idev->wait);
45129 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
45130 }
45131@@ -461,7 +462,7 @@ static int uio_open(struct inode *inode, struct file *filep)
45132 }
45133
45134 listener->dev = idev;
45135- listener->event_count = atomic_read(&idev->event);
45136+ listener->event_count = atomic_read_unchecked(&idev->event);
45137 filep->private_data = listener;
45138
45139 if (idev->info->open) {
45140@@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
45141 return -EIO;
45142
45143 poll_wait(filep, &idev->wait, wait);
45144- if (listener->event_count != atomic_read(&idev->event))
45145+ if (listener->event_count != atomic_read_unchecked(&idev->event))
45146 return POLLIN | POLLRDNORM;
45147 return 0;
45148 }
45149@@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
45150 do {
45151 set_current_state(TASK_INTERRUPTIBLE);
45152
45153- event_count = atomic_read(&idev->event);
45154+ event_count = atomic_read_unchecked(&idev->event);
45155 if (event_count != listener->event_count) {
45156 if (copy_to_user(buf, &event_count, count))
45157 retval = -EFAULT;
45158@@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
45159 static void uio_vma_open(struct vm_area_struct *vma)
45160 {
45161 struct uio_device *idev = vma->vm_private_data;
45162- idev->vma_count++;
45163+ local_inc(&idev->vma_count);
45164 }
45165
45166 static void uio_vma_close(struct vm_area_struct *vma)
45167 {
45168 struct uio_device *idev = vma->vm_private_data;
45169- idev->vma_count--;
45170+ local_dec(&idev->vma_count);
45171 }
45172
45173 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
45174@@ -819,7 +820,7 @@ int __uio_register_device(struct module *owner,
45175 idev->owner = owner;
45176 idev->info = info;
45177 init_waitqueue_head(&idev->wait);
45178- atomic_set(&idev->event, 0);
45179+ atomic_set_unchecked(&idev->event, 0);
45180
45181 ret = uio_get_minor(idev);
45182 if (ret)
45183diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
45184index b7eb86a..36d28af 100644
45185--- a/drivers/usb/atm/cxacru.c
45186+++ b/drivers/usb/atm/cxacru.c
45187@@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
45188 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
45189 if (ret < 2)
45190 return -EINVAL;
45191- if (index < 0 || index > 0x7f)
45192+ if (index > 0x7f)
45193 return -EINVAL;
45194 pos += tmp;
45195
45196diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
45197index 35f10bf..6a38a0b 100644
45198--- a/drivers/usb/atm/usbatm.c
45199+++ b/drivers/usb/atm/usbatm.c
45200@@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
45201 if (printk_ratelimit())
45202 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
45203 __func__, vpi, vci);
45204- atomic_inc(&vcc->stats->rx_err);
45205+ atomic_inc_unchecked(&vcc->stats->rx_err);
45206 return;
45207 }
45208
45209@@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
45210 if (length > ATM_MAX_AAL5_PDU) {
45211 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
45212 __func__, length, vcc);
45213- atomic_inc(&vcc->stats->rx_err);
45214+ atomic_inc_unchecked(&vcc->stats->rx_err);
45215 goto out;
45216 }
45217
45218@@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
45219 if (sarb->len < pdu_length) {
45220 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
45221 __func__, pdu_length, sarb->len, vcc);
45222- atomic_inc(&vcc->stats->rx_err);
45223+ atomic_inc_unchecked(&vcc->stats->rx_err);
45224 goto out;
45225 }
45226
45227 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
45228 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
45229 __func__, vcc);
45230- atomic_inc(&vcc->stats->rx_err);
45231+ atomic_inc_unchecked(&vcc->stats->rx_err);
45232 goto out;
45233 }
45234
45235@@ -389,7 +389,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
45236 if (printk_ratelimit())
45237 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
45238 __func__, length);
45239- atomic_inc(&vcc->stats->rx_drop);
45240+ atomic_inc_unchecked(&vcc->stats->rx_drop);
45241 goto out;
45242 }
45243
45244@@ -417,7 +417,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
45245
45246 vcc->push(vcc, skb);
45247
45248- atomic_inc(&vcc->stats->rx);
45249+ atomic_inc_unchecked(&vcc->stats->rx);
45250 out:
45251 skb_trim(sarb, 0);
45252 }
45253@@ -623,7 +623,7 @@ static void usbatm_tx_process(unsigned long data)
45254 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
45255
45256 usbatm_pop(vcc, skb);
45257- atomic_inc(&vcc->stats->tx);
45258+ atomic_inc_unchecked(&vcc->stats->tx);
45259
45260 skb = skb_dequeue(&instance->sndqueue);
45261 }
45262@@ -779,11 +779,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
45263 if (!left--)
45264 return sprintf(page,
45265 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
45266- atomic_read(&atm_dev->stats.aal5.tx),
45267- atomic_read(&atm_dev->stats.aal5.tx_err),
45268- atomic_read(&atm_dev->stats.aal5.rx),
45269- atomic_read(&atm_dev->stats.aal5.rx_err),
45270- atomic_read(&atm_dev->stats.aal5.rx_drop));
45271+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
45272+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
45273+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
45274+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
45275+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
45276
45277 if (!left--) {
45278 if (instance->disconnected)
45279diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
45280index cbacea9..246cccd 100644
45281--- a/drivers/usb/core/devices.c
45282+++ b/drivers/usb/core/devices.c
45283@@ -126,7 +126,7 @@ static const char format_endpt[] =
45284 * time it gets called.
45285 */
45286 static struct device_connect_event {
45287- atomic_t count;
45288+ atomic_unchecked_t count;
45289 wait_queue_head_t wait;
45290 } device_event = {
45291 .count = ATOMIC_INIT(1),
45292@@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
45293
45294 void usbfs_conn_disc_event(void)
45295 {
45296- atomic_add(2, &device_event.count);
45297+ atomic_add_unchecked(2, &device_event.count);
45298 wake_up(&device_event.wait);
45299 }
45300
45301@@ -645,7 +645,7 @@ static unsigned int usb_device_poll(struct file *file,
45302
45303 poll_wait(file, &device_event.wait, wait);
45304
45305- event_count = atomic_read(&device_event.count);
45306+ event_count = atomic_read_unchecked(&device_event.count);
45307 if (file->f_version != event_count) {
45308 file->f_version = event_count;
45309 return POLLIN | POLLRDNORM;
45310diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
45311index 8e64adf..9a33a3c 100644
45312--- a/drivers/usb/core/hcd.c
45313+++ b/drivers/usb/core/hcd.c
45314@@ -1522,7 +1522,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
45315 */
45316 usb_get_urb(urb);
45317 atomic_inc(&urb->use_count);
45318- atomic_inc(&urb->dev->urbnum);
45319+ atomic_inc_unchecked(&urb->dev->urbnum);
45320 usbmon_urb_submit(&hcd->self, urb);
45321
45322 /* NOTE requirements on root-hub callers (usbfs and the hub
45323@@ -1549,7 +1549,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
45324 urb->hcpriv = NULL;
45325 INIT_LIST_HEAD(&urb->urb_list);
45326 atomic_dec(&urb->use_count);
45327- atomic_dec(&urb->dev->urbnum);
45328+ atomic_dec_unchecked(&urb->dev->urbnum);
45329 if (atomic_read(&urb->reject))
45330 wake_up(&usb_kill_urb_queue);
45331 usb_put_urb(urb);
45332diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
45333index 131f736..99004c3 100644
45334--- a/drivers/usb/core/message.c
45335+++ b/drivers/usb/core/message.c
45336@@ -129,7 +129,7 @@ static int usb_internal_control_msg(struct usb_device *usb_dev,
45337 * method can wait for it to complete. Since you don't have a handle on the
45338 * URB used, you can't cancel the request.
45339 */
45340-int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
45341+int __intentional_overflow(-1) usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
45342 __u8 requesttype, __u16 value, __u16 index, void *data,
45343 __u16 size, int timeout)
45344 {
45345diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
45346index 818e4a0..0fc9589 100644
45347--- a/drivers/usb/core/sysfs.c
45348+++ b/drivers/usb/core/sysfs.c
45349@@ -226,7 +226,7 @@ show_urbnum(struct device *dev, struct device_attribute *attr, char *buf)
45350 struct usb_device *udev;
45351
45352 udev = to_usb_device(dev);
45353- return sprintf(buf, "%d\n", atomic_read(&udev->urbnum));
45354+ return sprintf(buf, "%d\n", atomic_read_unchecked(&udev->urbnum));
45355 }
45356 static DEVICE_ATTR(urbnum, S_IRUGO, show_urbnum, NULL);
45357
45358diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
45359index f81b925..78d22ec 100644
45360--- a/drivers/usb/core/usb.c
45361+++ b/drivers/usb/core/usb.c
45362@@ -388,7 +388,7 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
45363 set_dev_node(&dev->dev, dev_to_node(bus->controller));
45364 dev->state = USB_STATE_ATTACHED;
45365 dev->lpm_disable_count = 1;
45366- atomic_set(&dev->urbnum, 0);
45367+ atomic_set_unchecked(&dev->urbnum, 0);
45368
45369 INIT_LIST_HEAD(&dev->ep0.urb_list);
45370 dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE;
45371diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
45372index 5e29dde..eca992f 100644
45373--- a/drivers/usb/early/ehci-dbgp.c
45374+++ b/drivers/usb/early/ehci-dbgp.c
45375@@ -98,7 +98,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
45376
45377 #ifdef CONFIG_KGDB
45378 static struct kgdb_io kgdbdbgp_io_ops;
45379-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
45380+static struct kgdb_io kgdbdbgp_io_ops_console;
45381+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
45382 #else
45383 #define dbgp_kgdb_mode (0)
45384 #endif
45385@@ -1047,6 +1048,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
45386 .write_char = kgdbdbgp_write_char,
45387 };
45388
45389+static struct kgdb_io kgdbdbgp_io_ops_console = {
45390+ .name = "kgdbdbgp",
45391+ .read_char = kgdbdbgp_read_char,
45392+ .write_char = kgdbdbgp_write_char,
45393+ .is_console = 1
45394+};
45395+
45396 static int kgdbdbgp_wait_time;
45397
45398 static int __init kgdbdbgp_parse_config(char *str)
45399@@ -1062,8 +1070,10 @@ static int __init kgdbdbgp_parse_config(char *str)
45400 ptr++;
45401 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
45402 }
45403- kgdb_register_io_module(&kgdbdbgp_io_ops);
45404- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
45405+ if (early_dbgp_console.index != -1)
45406+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
45407+ else
45408+ kgdb_register_io_module(&kgdbdbgp_io_ops);
45409
45410 return 0;
45411 }
45412diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c
45413index 598dcc1..032dd4f 100644
45414--- a/drivers/usb/gadget/u_serial.c
45415+++ b/drivers/usb/gadget/u_serial.c
45416@@ -735,9 +735,9 @@ static int gs_open(struct tty_struct *tty, struct file *file)
45417 spin_lock_irq(&port->port_lock);
45418
45419 /* already open? Great. */
45420- if (port->port.count) {
45421+ if (atomic_read(&port->port.count)) {
45422 status = 0;
45423- port->port.count++;
45424+ atomic_inc(&port->port.count);
45425
45426 /* currently opening/closing? wait ... */
45427 } else if (port->openclose) {
45428@@ -796,7 +796,7 @@ static int gs_open(struct tty_struct *tty, struct file *file)
45429 tty->driver_data = port;
45430 port->port.tty = tty;
45431
45432- port->port.count = 1;
45433+ atomic_set(&port->port.count, 1);
45434 port->openclose = false;
45435
45436 /* if connected, start the I/O stream */
45437@@ -838,11 +838,11 @@ static void gs_close(struct tty_struct *tty, struct file *file)
45438
45439 spin_lock_irq(&port->port_lock);
45440
45441- if (port->port.count != 1) {
45442- if (port->port.count == 0)
45443+ if (atomic_read(&port->port.count) != 1) {
45444+ if (atomic_read(&port->port.count) == 0)
45445 WARN_ON(1);
45446 else
45447- --port->port.count;
45448+ atomic_dec(&port->port.count);
45449 goto exit;
45450 }
45451
45452@@ -852,7 +852,7 @@ static void gs_close(struct tty_struct *tty, struct file *file)
45453 * and sleep if necessary
45454 */
45455 port->openclose = true;
45456- port->port.count = 0;
45457+ atomic_set(&port->port.count, 0);
45458
45459 gser = port->port_usb;
45460 if (gser && gser->disconnect)
45461@@ -1159,7 +1159,7 @@ static int gs_closed(struct gs_port *port)
45462 int cond;
45463
45464 spin_lock_irq(&port->port_lock);
45465- cond = (port->port.count == 0) && !port->openclose;
45466+ cond = (atomic_read(&port->port.count) == 0) && !port->openclose;
45467 spin_unlock_irq(&port->port_lock);
45468 return cond;
45469 }
45470@@ -1273,7 +1273,7 @@ int gserial_connect(struct gserial *gser, u8 port_num)
45471 /* if it's already open, start I/O ... and notify the serial
45472 * protocol about open/close status (connect/disconnect).
45473 */
45474- if (port->port.count) {
45475+ if (atomic_read(&port->port.count)) {
45476 pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
45477 gs_start_io(port);
45478 if (gser->connect)
45479@@ -1320,7 +1320,7 @@ void gserial_disconnect(struct gserial *gser)
45480
45481 port->port_usb = NULL;
45482 gser->ioport = NULL;
45483- if (port->port.count > 0 || port->openclose) {
45484+ if (atomic_read(&port->port.count) > 0 || port->openclose) {
45485 wake_up_interruptible(&port->drain_wait);
45486 if (port->port.tty)
45487 tty_hangup(port->port.tty);
45488@@ -1336,7 +1336,7 @@ void gserial_disconnect(struct gserial *gser)
45489
45490 /* finally, free any unused/unusable I/O buffers */
45491 spin_lock_irqsave(&port->port_lock, flags);
45492- if (port->port.count == 0 && !port->openclose)
45493+ if (atomic_read(&port->port.count) == 0 && !port->openclose)
45494 gs_buf_free(&port->port_write_buf);
45495 gs_free_requests(gser->out, &port->read_pool, NULL);
45496 gs_free_requests(gser->out, &port->read_queue, NULL);
45497diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
45498index 5f3bcd3..bfca43f 100644
45499--- a/drivers/usb/serial/console.c
45500+++ b/drivers/usb/serial/console.c
45501@@ -124,7 +124,7 @@ static int usb_console_setup(struct console *co, char *options)
45502
45503 info->port = port;
45504
45505- ++port->port.count;
45506+ atomic_inc(&port->port.count);
45507 if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags)) {
45508 if (serial->type->set_termios) {
45509 /*
45510@@ -174,7 +174,7 @@ static int usb_console_setup(struct console *co, char *options)
45511 }
45512 /* Now that any required fake tty operations are completed restore
45513 * the tty port count */
45514- --port->port.count;
45515+ atomic_dec(&port->port.count);
45516 /* The console is special in terms of closing the device so
45517 * indicate this port is now acting as a system console. */
45518 port->port.console = 1;
45519@@ -187,7 +187,7 @@ static int usb_console_setup(struct console *co, char *options)
45520 free_tty:
45521 kfree(tty);
45522 reset_open_count:
45523- port->port.count = 0;
45524+ atomic_set(&port->port.count, 0);
45525 usb_autopm_put_interface(serial->interface);
45526 error_get_interface:
45527 usb_serial_put(serial);
45528diff --git a/drivers/usb/storage/realtek_cr.c b/drivers/usb/storage/realtek_cr.c
45529index 6c3586a..a94e621 100644
45530--- a/drivers/usb/storage/realtek_cr.c
45531+++ b/drivers/usb/storage/realtek_cr.c
45532@@ -429,7 +429,7 @@ static int rts51x_read_status(struct us_data *us,
45533
45534 buf = kmalloc(len, GFP_NOIO);
45535 if (buf == NULL)
45536- return USB_STOR_TRANSPORT_ERROR;
45537+ return -ENOMEM;
45538
45539 US_DEBUGP("%s, lun = %d\n", __func__, lun);
45540
45541diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
45542index 75f70f0..d467e1a 100644
45543--- a/drivers/usb/storage/usb.h
45544+++ b/drivers/usb/storage/usb.h
45545@@ -63,7 +63,7 @@ struct us_unusual_dev {
45546 __u8 useProtocol;
45547 __u8 useTransport;
45548 int (*initFunction)(struct us_data *);
45549-};
45550+} __do_const;
45551
45552
45553 /* Dynamic bitflag definitions (us->dflags): used in set_bit() etc. */
45554diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
45555index d6bea3e..60b250e 100644
45556--- a/drivers/usb/wusbcore/wa-hc.h
45557+++ b/drivers/usb/wusbcore/wa-hc.h
45558@@ -192,7 +192,7 @@ struct wahc {
45559 struct list_head xfer_delayed_list;
45560 spinlock_t xfer_list_lock;
45561 struct work_struct xfer_work;
45562- atomic_t xfer_id_count;
45563+ atomic_unchecked_t xfer_id_count;
45564 };
45565
45566
45567@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
45568 INIT_LIST_HEAD(&wa->xfer_delayed_list);
45569 spin_lock_init(&wa->xfer_list_lock);
45570 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
45571- atomic_set(&wa->xfer_id_count, 1);
45572+ atomic_set_unchecked(&wa->xfer_id_count, 1);
45573 }
45574
45575 /**
45576diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
45577index 57c01ab..8a05959 100644
45578--- a/drivers/usb/wusbcore/wa-xfer.c
45579+++ b/drivers/usb/wusbcore/wa-xfer.c
45580@@ -296,7 +296,7 @@ out:
45581 */
45582 static void wa_xfer_id_init(struct wa_xfer *xfer)
45583 {
45584- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
45585+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
45586 }
45587
45588 /*
45589diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
45590index 8c55011..eed4ae1a 100644
45591--- a/drivers/video/aty/aty128fb.c
45592+++ b/drivers/video/aty/aty128fb.c
45593@@ -149,7 +149,7 @@ enum {
45594 };
45595
45596 /* Must match above enum */
45597-static char * const r128_family[] = {
45598+static const char * const r128_family[] = {
45599 "AGP",
45600 "PCI",
45601 "PRO AGP",
45602diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
45603index 4f27fdc..d3537e6 100644
45604--- a/drivers/video/aty/atyfb_base.c
45605+++ b/drivers/video/aty/atyfb_base.c
45606@@ -1325,10 +1325,14 @@ static int atyfb_set_par(struct fb_info *info)
45607 par->accel_flags = var->accel_flags; /* hack */
45608
45609 if (var->accel_flags) {
45610- info->fbops->fb_sync = atyfb_sync;
45611+ pax_open_kernel();
45612+ *(void **)&info->fbops->fb_sync = atyfb_sync;
45613+ pax_close_kernel();
45614 info->flags &= ~FBINFO_HWACCEL_DISABLED;
45615 } else {
45616- info->fbops->fb_sync = NULL;
45617+ pax_open_kernel();
45618+ *(void **)&info->fbops->fb_sync = NULL;
45619+ pax_close_kernel();
45620 info->flags |= FBINFO_HWACCEL_DISABLED;
45621 }
45622
45623diff --git a/drivers/video/aty/mach64_cursor.c b/drivers/video/aty/mach64_cursor.c
45624index 95ec042..e6affdd 100644
45625--- a/drivers/video/aty/mach64_cursor.c
45626+++ b/drivers/video/aty/mach64_cursor.c
45627@@ -7,6 +7,7 @@
45628 #include <linux/string.h>
45629
45630 #include <asm/io.h>
45631+#include <asm/pgtable.h>
45632
45633 #ifdef __sparc__
45634 #include <asm/fbio.h>
45635@@ -208,7 +209,9 @@ int aty_init_cursor(struct fb_info *info)
45636 info->sprite.buf_align = 16; /* and 64 lines tall. */
45637 info->sprite.flags = FB_PIXMAP_IO;
45638
45639- info->fbops->fb_cursor = atyfb_cursor;
45640+ pax_open_kernel();
45641+ *(void **)&info->fbops->fb_cursor = atyfb_cursor;
45642+ pax_close_kernel();
45643
45644 return 0;
45645 }
45646diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
45647index 6c5ed6b..b727c88 100644
45648--- a/drivers/video/backlight/kb3886_bl.c
45649+++ b/drivers/video/backlight/kb3886_bl.c
45650@@ -78,7 +78,7 @@ static struct kb3886bl_machinfo *bl_machinfo;
45651 static unsigned long kb3886bl_flags;
45652 #define KB3886BL_SUSPENDED 0x01
45653
45654-static struct dmi_system_id __initdata kb3886bl_device_table[] = {
45655+static const struct dmi_system_id __initconst kb3886bl_device_table[] = {
45656 {
45657 .ident = "Sahara Touch-iT",
45658 .matches = {
45659diff --git a/drivers/video/fb_defio.c b/drivers/video/fb_defio.c
45660index 88cad6b..dd746c7 100644
45661--- a/drivers/video/fb_defio.c
45662+++ b/drivers/video/fb_defio.c
45663@@ -206,7 +206,9 @@ void fb_deferred_io_init(struct fb_info *info)
45664
45665 BUG_ON(!fbdefio);
45666 mutex_init(&fbdefio->lock);
45667- info->fbops->fb_mmap = fb_deferred_io_mmap;
45668+ pax_open_kernel();
45669+ *(void **)&info->fbops->fb_mmap = fb_deferred_io_mmap;
45670+ pax_close_kernel();
45671 INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
45672 INIT_LIST_HEAD(&fbdefio->pagelist);
45673 if (fbdefio->delay == 0) /* set a default of 1 s */
45674@@ -237,7 +239,7 @@ void fb_deferred_io_cleanup(struct fb_info *info)
45675 page->mapping = NULL;
45676 }
45677
45678- info->fbops->fb_mmap = NULL;
45679+ *(void **)&info->fbops->fb_mmap = NULL;
45680 mutex_destroy(&fbdefio->lock);
45681 }
45682 EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
45683diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
45684index 5c3960d..15cf8fc 100644
45685--- a/drivers/video/fbcmap.c
45686+++ b/drivers/video/fbcmap.c
45687@@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
45688 rc = -ENODEV;
45689 goto out;
45690 }
45691- if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
45692- !info->fbops->fb_setcmap)) {
45693+ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
45694 rc = -EINVAL;
45695 goto out1;
45696 }
45697diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
45698index 0a49456..fd5be1b 100644
45699--- a/drivers/video/fbmem.c
45700+++ b/drivers/video/fbmem.c
45701@@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
45702 image->dx += image->width + 8;
45703 }
45704 } else if (rotate == FB_ROTATE_UD) {
45705- for (x = 0; x < num && image->dx >= 0; x++) {
45706+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
45707 info->fbops->fb_imageblit(info, image);
45708 image->dx -= image->width + 8;
45709 }
45710@@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
45711 image->dy += image->height + 8;
45712 }
45713 } else if (rotate == FB_ROTATE_CCW) {
45714- for (x = 0; x < num && image->dy >= 0; x++) {
45715+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
45716 info->fbops->fb_imageblit(info, image);
45717 image->dy -= image->height + 8;
45718 }
45719@@ -1166,7 +1166,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
45720 return -EFAULT;
45721 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
45722 return -EINVAL;
45723- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
45724+ if (con2fb.framebuffer >= FB_MAX)
45725 return -EINVAL;
45726 if (!registered_fb[con2fb.framebuffer])
45727 request_module("fb%d", con2fb.framebuffer);
45728diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
45729index 7672d2e..b56437f 100644
45730--- a/drivers/video/i810/i810_accel.c
45731+++ b/drivers/video/i810/i810_accel.c
45732@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
45733 }
45734 }
45735 printk("ringbuffer lockup!!!\n");
45736+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
45737 i810_report_error(mmio);
45738 par->dev_flags |= LOCKUP;
45739 info->pixmap.scan_align = 1;
45740diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
45741index 3c14e43..eafa544 100644
45742--- a/drivers/video/logo/logo_linux_clut224.ppm
45743+++ b/drivers/video/logo/logo_linux_clut224.ppm
45744@@ -1,1604 +1,1123 @@
45745 P3
45746-# Standard 224-color Linux logo
45747 80 80
45748 255
45749- 0 0 0 0 0 0 0 0 0 0 0 0
45750- 0 0 0 0 0 0 0 0 0 0 0 0
45751- 0 0 0 0 0 0 0 0 0 0 0 0
45752- 0 0 0 0 0 0 0 0 0 0 0 0
45753- 0 0 0 0 0 0 0 0 0 0 0 0
45754- 0 0 0 0 0 0 0 0 0 0 0 0
45755- 0 0 0 0 0 0 0 0 0 0 0 0
45756- 0 0 0 0 0 0 0 0 0 0 0 0
45757- 0 0 0 0 0 0 0 0 0 0 0 0
45758- 6 6 6 6 6 6 10 10 10 10 10 10
45759- 10 10 10 6 6 6 6 6 6 6 6 6
45760- 0 0 0 0 0 0 0 0 0 0 0 0
45761- 0 0 0 0 0 0 0 0 0 0 0 0
45762- 0 0 0 0 0 0 0 0 0 0 0 0
45763- 0 0 0 0 0 0 0 0 0 0 0 0
45764- 0 0 0 0 0 0 0 0 0 0 0 0
45765- 0 0 0 0 0 0 0 0 0 0 0 0
45766- 0 0 0 0 0 0 0 0 0 0 0 0
45767- 0 0 0 0 0 0 0 0 0 0 0 0
45768- 0 0 0 0 0 0 0 0 0 0 0 0
45769- 0 0 0 0 0 0 0 0 0 0 0 0
45770- 0 0 0 0 0 0 0 0 0 0 0 0
45771- 0 0 0 0 0 0 0 0 0 0 0 0
45772- 0 0 0 0 0 0 0 0 0 0 0 0
45773- 0 0 0 0 0 0 0 0 0 0 0 0
45774- 0 0 0 0 0 0 0 0 0 0 0 0
45775- 0 0 0 0 0 0 0 0 0 0 0 0
45776- 0 0 0 0 0 0 0 0 0 0 0 0
45777- 0 0 0 6 6 6 10 10 10 14 14 14
45778- 22 22 22 26 26 26 30 30 30 34 34 34
45779- 30 30 30 30 30 30 26 26 26 18 18 18
45780- 14 14 14 10 10 10 6 6 6 0 0 0
45781- 0 0 0 0 0 0 0 0 0 0 0 0
45782- 0 0 0 0 0 0 0 0 0 0 0 0
45783- 0 0 0 0 0 0 0 0 0 0 0 0
45784- 0 0 0 0 0 0 0 0 0 0 0 0
45785- 0 0 0 0 0 0 0 0 0 0 0 0
45786- 0 0 0 0 0 0 0 0 0 0 0 0
45787- 0 0 0 0 0 0 0 0 0 0 0 0
45788- 0 0 0 0 0 0 0 0 0 0 0 0
45789- 0 0 0 0 0 0 0 0 0 0 0 0
45790- 0 0 0 0 0 1 0 0 1 0 0 0
45791- 0 0 0 0 0 0 0 0 0 0 0 0
45792- 0 0 0 0 0 0 0 0 0 0 0 0
45793- 0 0 0 0 0 0 0 0 0 0 0 0
45794- 0 0 0 0 0 0 0 0 0 0 0 0
45795- 0 0 0 0 0 0 0 0 0 0 0 0
45796- 0 0 0 0 0 0 0 0 0 0 0 0
45797- 6 6 6 14 14 14 26 26 26 42 42 42
45798- 54 54 54 66 66 66 78 78 78 78 78 78
45799- 78 78 78 74 74 74 66 66 66 54 54 54
45800- 42 42 42 26 26 26 18 18 18 10 10 10
45801- 6 6 6 0 0 0 0 0 0 0 0 0
45802- 0 0 0 0 0 0 0 0 0 0 0 0
45803- 0 0 0 0 0 0 0 0 0 0 0 0
45804- 0 0 0 0 0 0 0 0 0 0 0 0
45805- 0 0 0 0 0 0 0 0 0 0 0 0
45806- 0 0 0 0 0 0 0 0 0 0 0 0
45807- 0 0 0 0 0 0 0 0 0 0 0 0
45808- 0 0 0 0 0 0 0 0 0 0 0 0
45809- 0 0 0 0 0 0 0 0 0 0 0 0
45810- 0 0 1 0 0 0 0 0 0 0 0 0
45811- 0 0 0 0 0 0 0 0 0 0 0 0
45812- 0 0 0 0 0 0 0 0 0 0 0 0
45813- 0 0 0 0 0 0 0 0 0 0 0 0
45814- 0 0 0 0 0 0 0 0 0 0 0 0
45815- 0 0 0 0 0 0 0 0 0 0 0 0
45816- 0 0 0 0 0 0 0 0 0 10 10 10
45817- 22 22 22 42 42 42 66 66 66 86 86 86
45818- 66 66 66 38 38 38 38 38 38 22 22 22
45819- 26 26 26 34 34 34 54 54 54 66 66 66
45820- 86 86 86 70 70 70 46 46 46 26 26 26
45821- 14 14 14 6 6 6 0 0 0 0 0 0
45822- 0 0 0 0 0 0 0 0 0 0 0 0
45823- 0 0 0 0 0 0 0 0 0 0 0 0
45824- 0 0 0 0 0 0 0 0 0 0 0 0
45825- 0 0 0 0 0 0 0 0 0 0 0 0
45826- 0 0 0 0 0 0 0 0 0 0 0 0
45827- 0 0 0 0 0 0 0 0 0 0 0 0
45828- 0 0 0 0 0 0 0 0 0 0 0 0
45829- 0 0 0 0 0 0 0 0 0 0 0 0
45830- 0 0 1 0 0 1 0 0 1 0 0 0
45831- 0 0 0 0 0 0 0 0 0 0 0 0
45832- 0 0 0 0 0 0 0 0 0 0 0 0
45833- 0 0 0 0 0 0 0 0 0 0 0 0
45834- 0 0 0 0 0 0 0 0 0 0 0 0
45835- 0 0 0 0 0 0 0 0 0 0 0 0
45836- 0 0 0 0 0 0 10 10 10 26 26 26
45837- 50 50 50 82 82 82 58 58 58 6 6 6
45838- 2 2 6 2 2 6 2 2 6 2 2 6
45839- 2 2 6 2 2 6 2 2 6 2 2 6
45840- 6 6 6 54 54 54 86 86 86 66 66 66
45841- 38 38 38 18 18 18 6 6 6 0 0 0
45842- 0 0 0 0 0 0 0 0 0 0 0 0
45843- 0 0 0 0 0 0 0 0 0 0 0 0
45844- 0 0 0 0 0 0 0 0 0 0 0 0
45845- 0 0 0 0 0 0 0 0 0 0 0 0
45846- 0 0 0 0 0 0 0 0 0 0 0 0
45847- 0 0 0 0 0 0 0 0 0 0 0 0
45848- 0 0 0 0 0 0 0 0 0 0 0 0
45849- 0 0 0 0 0 0 0 0 0 0 0 0
45850- 0 0 0 0 0 0 0 0 0 0 0 0
45851- 0 0 0 0 0 0 0 0 0 0 0 0
45852- 0 0 0 0 0 0 0 0 0 0 0 0
45853- 0 0 0 0 0 0 0 0 0 0 0 0
45854- 0 0 0 0 0 0 0 0 0 0 0 0
45855- 0 0 0 0 0 0 0 0 0 0 0 0
45856- 0 0 0 6 6 6 22 22 22 50 50 50
45857- 78 78 78 34 34 34 2 2 6 2 2 6
45858- 2 2 6 2 2 6 2 2 6 2 2 6
45859- 2 2 6 2 2 6 2 2 6 2 2 6
45860- 2 2 6 2 2 6 6 6 6 70 70 70
45861- 78 78 78 46 46 46 22 22 22 6 6 6
45862- 0 0 0 0 0 0 0 0 0 0 0 0
45863- 0 0 0 0 0 0 0 0 0 0 0 0
45864- 0 0 0 0 0 0 0 0 0 0 0 0
45865- 0 0 0 0 0 0 0 0 0 0 0 0
45866- 0 0 0 0 0 0 0 0 0 0 0 0
45867- 0 0 0 0 0 0 0 0 0 0 0 0
45868- 0 0 0 0 0 0 0 0 0 0 0 0
45869- 0 0 0 0 0 0 0 0 0 0 0 0
45870- 0 0 1 0 0 1 0 0 1 0 0 0
45871- 0 0 0 0 0 0 0 0 0 0 0 0
45872- 0 0 0 0 0 0 0 0 0 0 0 0
45873- 0 0 0 0 0 0 0 0 0 0 0 0
45874- 0 0 0 0 0 0 0 0 0 0 0 0
45875- 0 0 0 0 0 0 0 0 0 0 0 0
45876- 6 6 6 18 18 18 42 42 42 82 82 82
45877- 26 26 26 2 2 6 2 2 6 2 2 6
45878- 2 2 6 2 2 6 2 2 6 2 2 6
45879- 2 2 6 2 2 6 2 2 6 14 14 14
45880- 46 46 46 34 34 34 6 6 6 2 2 6
45881- 42 42 42 78 78 78 42 42 42 18 18 18
45882- 6 6 6 0 0 0 0 0 0 0 0 0
45883- 0 0 0 0 0 0 0 0 0 0 0 0
45884- 0 0 0 0 0 0 0 0 0 0 0 0
45885- 0 0 0 0 0 0 0 0 0 0 0 0
45886- 0 0 0 0 0 0 0 0 0 0 0 0
45887- 0 0 0 0 0 0 0 0 0 0 0 0
45888- 0 0 0 0 0 0 0 0 0 0 0 0
45889- 0 0 0 0 0 0 0 0 0 0 0 0
45890- 0 0 1 0 0 0 0 0 1 0 0 0
45891- 0 0 0 0 0 0 0 0 0 0 0 0
45892- 0 0 0 0 0 0 0 0 0 0 0 0
45893- 0 0 0 0 0 0 0 0 0 0 0 0
45894- 0 0 0 0 0 0 0 0 0 0 0 0
45895- 0 0 0 0 0 0 0 0 0 0 0 0
45896- 10 10 10 30 30 30 66 66 66 58 58 58
45897- 2 2 6 2 2 6 2 2 6 2 2 6
45898- 2 2 6 2 2 6 2 2 6 2 2 6
45899- 2 2 6 2 2 6 2 2 6 26 26 26
45900- 86 86 86 101 101 101 46 46 46 10 10 10
45901- 2 2 6 58 58 58 70 70 70 34 34 34
45902- 10 10 10 0 0 0 0 0 0 0 0 0
45903- 0 0 0 0 0 0 0 0 0 0 0 0
45904- 0 0 0 0 0 0 0 0 0 0 0 0
45905- 0 0 0 0 0 0 0 0 0 0 0 0
45906- 0 0 0 0 0 0 0 0 0 0 0 0
45907- 0 0 0 0 0 0 0 0 0 0 0 0
45908- 0 0 0 0 0 0 0 0 0 0 0 0
45909- 0 0 0 0 0 0 0 0 0 0 0 0
45910- 0 0 1 0 0 1 0 0 1 0 0 0
45911- 0 0 0 0 0 0 0 0 0 0 0 0
45912- 0 0 0 0 0 0 0 0 0 0 0 0
45913- 0 0 0 0 0 0 0 0 0 0 0 0
45914- 0 0 0 0 0 0 0 0 0 0 0 0
45915- 0 0 0 0 0 0 0 0 0 0 0 0
45916- 14 14 14 42 42 42 86 86 86 10 10 10
45917- 2 2 6 2 2 6 2 2 6 2 2 6
45918- 2 2 6 2 2 6 2 2 6 2 2 6
45919- 2 2 6 2 2 6 2 2 6 30 30 30
45920- 94 94 94 94 94 94 58 58 58 26 26 26
45921- 2 2 6 6 6 6 78 78 78 54 54 54
45922- 22 22 22 6 6 6 0 0 0 0 0 0
45923- 0 0 0 0 0 0 0 0 0 0 0 0
45924- 0 0 0 0 0 0 0 0 0 0 0 0
45925- 0 0 0 0 0 0 0 0 0 0 0 0
45926- 0 0 0 0 0 0 0 0 0 0 0 0
45927- 0 0 0 0 0 0 0 0 0 0 0 0
45928- 0 0 0 0 0 0 0 0 0 0 0 0
45929- 0 0 0 0 0 0 0 0 0 0 0 0
45930- 0 0 0 0 0 0 0 0 0 0 0 0
45931- 0 0 0 0 0 0 0 0 0 0 0 0
45932- 0 0 0 0 0 0 0 0 0 0 0 0
45933- 0 0 0 0 0 0 0 0 0 0 0 0
45934- 0 0 0 0 0 0 0 0 0 0 0 0
45935- 0 0 0 0 0 0 0 0 0 6 6 6
45936- 22 22 22 62 62 62 62 62 62 2 2 6
45937- 2 2 6 2 2 6 2 2 6 2 2 6
45938- 2 2 6 2 2 6 2 2 6 2 2 6
45939- 2 2 6 2 2 6 2 2 6 26 26 26
45940- 54 54 54 38 38 38 18 18 18 10 10 10
45941- 2 2 6 2 2 6 34 34 34 82 82 82
45942- 38 38 38 14 14 14 0 0 0 0 0 0
45943- 0 0 0 0 0 0 0 0 0 0 0 0
45944- 0 0 0 0 0 0 0 0 0 0 0 0
45945- 0 0 0 0 0 0 0 0 0 0 0 0
45946- 0 0 0 0 0 0 0 0 0 0 0 0
45947- 0 0 0 0 0 0 0 0 0 0 0 0
45948- 0 0 0 0 0 0 0 0 0 0 0 0
45949- 0 0 0 0 0 0 0 0 0 0 0 0
45950- 0 0 0 0 0 1 0 0 1 0 0 0
45951- 0 0 0 0 0 0 0 0 0 0 0 0
45952- 0 0 0 0 0 0 0 0 0 0 0 0
45953- 0 0 0 0 0 0 0 0 0 0 0 0
45954- 0 0 0 0 0 0 0 0 0 0 0 0
45955- 0 0 0 0 0 0 0 0 0 6 6 6
45956- 30 30 30 78 78 78 30 30 30 2 2 6
45957- 2 2 6 2 2 6 2 2 6 2 2 6
45958- 2 2 6 2 2 6 2 2 6 2 2 6
45959- 2 2 6 2 2 6 2 2 6 10 10 10
45960- 10 10 10 2 2 6 2 2 6 2 2 6
45961- 2 2 6 2 2 6 2 2 6 78 78 78
45962- 50 50 50 18 18 18 6 6 6 0 0 0
45963- 0 0 0 0 0 0 0 0 0 0 0 0
45964- 0 0 0 0 0 0 0 0 0 0 0 0
45965- 0 0 0 0 0 0 0 0 0 0 0 0
45966- 0 0 0 0 0 0 0 0 0 0 0 0
45967- 0 0 0 0 0 0 0 0 0 0 0 0
45968- 0 0 0 0 0 0 0 0 0 0 0 0
45969- 0 0 0 0 0 0 0 0 0 0 0 0
45970- 0 0 1 0 0 0 0 0 0 0 0 0
45971- 0 0 0 0 0 0 0 0 0 0 0 0
45972- 0 0 0 0 0 0 0 0 0 0 0 0
45973- 0 0 0 0 0 0 0 0 0 0 0 0
45974- 0 0 0 0 0 0 0 0 0 0 0 0
45975- 0 0 0 0 0 0 0 0 0 10 10 10
45976- 38 38 38 86 86 86 14 14 14 2 2 6
45977- 2 2 6 2 2 6 2 2 6 2 2 6
45978- 2 2 6 2 2 6 2 2 6 2 2 6
45979- 2 2 6 2 2 6 2 2 6 2 2 6
45980- 2 2 6 2 2 6 2 2 6 2 2 6
45981- 2 2 6 2 2 6 2 2 6 54 54 54
45982- 66 66 66 26 26 26 6 6 6 0 0 0
45983- 0 0 0 0 0 0 0 0 0 0 0 0
45984- 0 0 0 0 0 0 0 0 0 0 0 0
45985- 0 0 0 0 0 0 0 0 0 0 0 0
45986- 0 0 0 0 0 0 0 0 0 0 0 0
45987- 0 0 0 0 0 0 0 0 0 0 0 0
45988- 0 0 0 0 0 0 0 0 0 0 0 0
45989- 0 0 0 0 0 0 0 0 0 0 0 0
45990- 0 0 0 0 0 1 0 0 1 0 0 0
45991- 0 0 0 0 0 0 0 0 0 0 0 0
45992- 0 0 0 0 0 0 0 0 0 0 0 0
45993- 0 0 0 0 0 0 0 0 0 0 0 0
45994- 0 0 0 0 0 0 0 0 0 0 0 0
45995- 0 0 0 0 0 0 0 0 0 14 14 14
45996- 42 42 42 82 82 82 2 2 6 2 2 6
45997- 2 2 6 6 6 6 10 10 10 2 2 6
45998- 2 2 6 2 2 6 2 2 6 2 2 6
45999- 2 2 6 2 2 6 2 2 6 6 6 6
46000- 14 14 14 10 10 10 2 2 6 2 2 6
46001- 2 2 6 2 2 6 2 2 6 18 18 18
46002- 82 82 82 34 34 34 10 10 10 0 0 0
46003- 0 0 0 0 0 0 0 0 0 0 0 0
46004- 0 0 0 0 0 0 0 0 0 0 0 0
46005- 0 0 0 0 0 0 0 0 0 0 0 0
46006- 0 0 0 0 0 0 0 0 0 0 0 0
46007- 0 0 0 0 0 0 0 0 0 0 0 0
46008- 0 0 0 0 0 0 0 0 0 0 0 0
46009- 0 0 0 0 0 0 0 0 0 0 0 0
46010- 0 0 1 0 0 0 0 0 0 0 0 0
46011- 0 0 0 0 0 0 0 0 0 0 0 0
46012- 0 0 0 0 0 0 0 0 0 0 0 0
46013- 0 0 0 0 0 0 0 0 0 0 0 0
46014- 0 0 0 0 0 0 0 0 0 0 0 0
46015- 0 0 0 0 0 0 0 0 0 14 14 14
46016- 46 46 46 86 86 86 2 2 6 2 2 6
46017- 6 6 6 6 6 6 22 22 22 34 34 34
46018- 6 6 6 2 2 6 2 2 6 2 2 6
46019- 2 2 6 2 2 6 18 18 18 34 34 34
46020- 10 10 10 50 50 50 22 22 22 2 2 6
46021- 2 2 6 2 2 6 2 2 6 10 10 10
46022- 86 86 86 42 42 42 14 14 14 0 0 0
46023- 0 0 0 0 0 0 0 0 0 0 0 0
46024- 0 0 0 0 0 0 0 0 0 0 0 0
46025- 0 0 0 0 0 0 0 0 0 0 0 0
46026- 0 0 0 0 0 0 0 0 0 0 0 0
46027- 0 0 0 0 0 0 0 0 0 0 0 0
46028- 0 0 0 0 0 0 0 0 0 0 0 0
46029- 0 0 0 0 0 0 0 0 0 0 0 0
46030- 0 0 1 0 0 1 0 0 1 0 0 0
46031- 0 0 0 0 0 0 0 0 0 0 0 0
46032- 0 0 0 0 0 0 0 0 0 0 0 0
46033- 0 0 0 0 0 0 0 0 0 0 0 0
46034- 0 0 0 0 0 0 0 0 0 0 0 0
46035- 0 0 0 0 0 0 0 0 0 14 14 14
46036- 46 46 46 86 86 86 2 2 6 2 2 6
46037- 38 38 38 116 116 116 94 94 94 22 22 22
46038- 22 22 22 2 2 6 2 2 6 2 2 6
46039- 14 14 14 86 86 86 138 138 138 162 162 162
46040-154 154 154 38 38 38 26 26 26 6 6 6
46041- 2 2 6 2 2 6 2 2 6 2 2 6
46042- 86 86 86 46 46 46 14 14 14 0 0 0
46043- 0 0 0 0 0 0 0 0 0 0 0 0
46044- 0 0 0 0 0 0 0 0 0 0 0 0
46045- 0 0 0 0 0 0 0 0 0 0 0 0
46046- 0 0 0 0 0 0 0 0 0 0 0 0
46047- 0 0 0 0 0 0 0 0 0 0 0 0
46048- 0 0 0 0 0 0 0 0 0 0 0 0
46049- 0 0 0 0 0 0 0 0 0 0 0 0
46050- 0 0 0 0 0 0 0 0 0 0 0 0
46051- 0 0 0 0 0 0 0 0 0 0 0 0
46052- 0 0 0 0 0 0 0 0 0 0 0 0
46053- 0 0 0 0 0 0 0 0 0 0 0 0
46054- 0 0 0 0 0 0 0 0 0 0 0 0
46055- 0 0 0 0 0 0 0 0 0 14 14 14
46056- 46 46 46 86 86 86 2 2 6 14 14 14
46057-134 134 134 198 198 198 195 195 195 116 116 116
46058- 10 10 10 2 2 6 2 2 6 6 6 6
46059-101 98 89 187 187 187 210 210 210 218 218 218
46060-214 214 214 134 134 134 14 14 14 6 6 6
46061- 2 2 6 2 2 6 2 2 6 2 2 6
46062- 86 86 86 50 50 50 18 18 18 6 6 6
46063- 0 0 0 0 0 0 0 0 0 0 0 0
46064- 0 0 0 0 0 0 0 0 0 0 0 0
46065- 0 0 0 0 0 0 0 0 0 0 0 0
46066- 0 0 0 0 0 0 0 0 0 0 0 0
46067- 0 0 0 0 0 0 0 0 0 0 0 0
46068- 0 0 0 0 0 0 0 0 0 0 0 0
46069- 0 0 0 0 0 0 0 0 1 0 0 0
46070- 0 0 1 0 0 1 0 0 1 0 0 0
46071- 0 0 0 0 0 0 0 0 0 0 0 0
46072- 0 0 0 0 0 0 0 0 0 0 0 0
46073- 0 0 0 0 0 0 0 0 0 0 0 0
46074- 0 0 0 0 0 0 0 0 0 0 0 0
46075- 0 0 0 0 0 0 0 0 0 14 14 14
46076- 46 46 46 86 86 86 2 2 6 54 54 54
46077-218 218 218 195 195 195 226 226 226 246 246 246
46078- 58 58 58 2 2 6 2 2 6 30 30 30
46079-210 210 210 253 253 253 174 174 174 123 123 123
46080-221 221 221 234 234 234 74 74 74 2 2 6
46081- 2 2 6 2 2 6 2 2 6 2 2 6
46082- 70 70 70 58 58 58 22 22 22 6 6 6
46083- 0 0 0 0 0 0 0 0 0 0 0 0
46084- 0 0 0 0 0 0 0 0 0 0 0 0
46085- 0 0 0 0 0 0 0 0 0 0 0 0
46086- 0 0 0 0 0 0 0 0 0 0 0 0
46087- 0 0 0 0 0 0 0 0 0 0 0 0
46088- 0 0 0 0 0 0 0 0 0 0 0 0
46089- 0 0 0 0 0 0 0 0 0 0 0 0
46090- 0 0 0 0 0 0 0 0 0 0 0 0
46091- 0 0 0 0 0 0 0 0 0 0 0 0
46092- 0 0 0 0 0 0 0 0 0 0 0 0
46093- 0 0 0 0 0 0 0 0 0 0 0 0
46094- 0 0 0 0 0 0 0 0 0 0 0 0
46095- 0 0 0 0 0 0 0 0 0 14 14 14
46096- 46 46 46 82 82 82 2 2 6 106 106 106
46097-170 170 170 26 26 26 86 86 86 226 226 226
46098-123 123 123 10 10 10 14 14 14 46 46 46
46099-231 231 231 190 190 190 6 6 6 70 70 70
46100- 90 90 90 238 238 238 158 158 158 2 2 6
46101- 2 2 6 2 2 6 2 2 6 2 2 6
46102- 70 70 70 58 58 58 22 22 22 6 6 6
46103- 0 0 0 0 0 0 0 0 0 0 0 0
46104- 0 0 0 0 0 0 0 0 0 0 0 0
46105- 0 0 0 0 0 0 0 0 0 0 0 0
46106- 0 0 0 0 0 0 0 0 0 0 0 0
46107- 0 0 0 0 0 0 0 0 0 0 0 0
46108- 0 0 0 0 0 0 0 0 0 0 0 0
46109- 0 0 0 0 0 0 0 0 1 0 0 0
46110- 0 0 1 0 0 1 0 0 1 0 0 0
46111- 0 0 0 0 0 0 0 0 0 0 0 0
46112- 0 0 0 0 0 0 0 0 0 0 0 0
46113- 0 0 0 0 0 0 0 0 0 0 0 0
46114- 0 0 0 0 0 0 0 0 0 0 0 0
46115- 0 0 0 0 0 0 0 0 0 14 14 14
46116- 42 42 42 86 86 86 6 6 6 116 116 116
46117-106 106 106 6 6 6 70 70 70 149 149 149
46118-128 128 128 18 18 18 38 38 38 54 54 54
46119-221 221 221 106 106 106 2 2 6 14 14 14
46120- 46 46 46 190 190 190 198 198 198 2 2 6
46121- 2 2 6 2 2 6 2 2 6 2 2 6
46122- 74 74 74 62 62 62 22 22 22 6 6 6
46123- 0 0 0 0 0 0 0 0 0 0 0 0
46124- 0 0 0 0 0 0 0 0 0 0 0 0
46125- 0 0 0 0 0 0 0 0 0 0 0 0
46126- 0 0 0 0 0 0 0 0 0 0 0 0
46127- 0 0 0 0 0 0 0 0 0 0 0 0
46128- 0 0 0 0 0 0 0 0 0 0 0 0
46129- 0 0 0 0 0 0 0 0 1 0 0 0
46130- 0 0 1 0 0 0 0 0 1 0 0 0
46131- 0 0 0 0 0 0 0 0 0 0 0 0
46132- 0 0 0 0 0 0 0 0 0 0 0 0
46133- 0 0 0 0 0 0 0 0 0 0 0 0
46134- 0 0 0 0 0 0 0 0 0 0 0 0
46135- 0 0 0 0 0 0 0 0 0 14 14 14
46136- 42 42 42 94 94 94 14 14 14 101 101 101
46137-128 128 128 2 2 6 18 18 18 116 116 116
46138-118 98 46 121 92 8 121 92 8 98 78 10
46139-162 162 162 106 106 106 2 2 6 2 2 6
46140- 2 2 6 195 195 195 195 195 195 6 6 6
46141- 2 2 6 2 2 6 2 2 6 2 2 6
46142- 74 74 74 62 62 62 22 22 22 6 6 6
46143- 0 0 0 0 0 0 0 0 0 0 0 0
46144- 0 0 0 0 0 0 0 0 0 0 0 0
46145- 0 0 0 0 0 0 0 0 0 0 0 0
46146- 0 0 0 0 0 0 0 0 0 0 0 0
46147- 0 0 0 0 0 0 0 0 0 0 0 0
46148- 0 0 0 0 0 0 0 0 0 0 0 0
46149- 0 0 0 0 0 0 0 0 1 0 0 1
46150- 0 0 1 0 0 0 0 0 1 0 0 0
46151- 0 0 0 0 0 0 0 0 0 0 0 0
46152- 0 0 0 0 0 0 0 0 0 0 0 0
46153- 0 0 0 0 0 0 0 0 0 0 0 0
46154- 0 0 0 0 0 0 0 0 0 0 0 0
46155- 0 0 0 0 0 0 0 0 0 10 10 10
46156- 38 38 38 90 90 90 14 14 14 58 58 58
46157-210 210 210 26 26 26 54 38 6 154 114 10
46158-226 170 11 236 186 11 225 175 15 184 144 12
46159-215 174 15 175 146 61 37 26 9 2 2 6
46160- 70 70 70 246 246 246 138 138 138 2 2 6
46161- 2 2 6 2 2 6 2 2 6 2 2 6
46162- 70 70 70 66 66 66 26 26 26 6 6 6
46163- 0 0 0 0 0 0 0 0 0 0 0 0
46164- 0 0 0 0 0 0 0 0 0 0 0 0
46165- 0 0 0 0 0 0 0 0 0 0 0 0
46166- 0 0 0 0 0 0 0 0 0 0 0 0
46167- 0 0 0 0 0 0 0 0 0 0 0 0
46168- 0 0 0 0 0 0 0 0 0 0 0 0
46169- 0 0 0 0 0 0 0 0 0 0 0 0
46170- 0 0 0 0 0 0 0 0 0 0 0 0
46171- 0 0 0 0 0 0 0 0 0 0 0 0
46172- 0 0 0 0 0 0 0 0 0 0 0 0
46173- 0 0 0 0 0 0 0 0 0 0 0 0
46174- 0 0 0 0 0 0 0 0 0 0 0 0
46175- 0 0 0 0 0 0 0 0 0 10 10 10
46176- 38 38 38 86 86 86 14 14 14 10 10 10
46177-195 195 195 188 164 115 192 133 9 225 175 15
46178-239 182 13 234 190 10 232 195 16 232 200 30
46179-245 207 45 241 208 19 232 195 16 184 144 12
46180-218 194 134 211 206 186 42 42 42 2 2 6
46181- 2 2 6 2 2 6 2 2 6 2 2 6
46182- 50 50 50 74 74 74 30 30 30 6 6 6
46183- 0 0 0 0 0 0 0 0 0 0 0 0
46184- 0 0 0 0 0 0 0 0 0 0 0 0
46185- 0 0 0 0 0 0 0 0 0 0 0 0
46186- 0 0 0 0 0 0 0 0 0 0 0 0
46187- 0 0 0 0 0 0 0 0 0 0 0 0
46188- 0 0 0 0 0 0 0 0 0 0 0 0
46189- 0 0 0 0 0 0 0 0 0 0 0 0
46190- 0 0 0 0 0 0 0 0 0 0 0 0
46191- 0 0 0 0 0 0 0 0 0 0 0 0
46192- 0 0 0 0 0 0 0 0 0 0 0 0
46193- 0 0 0 0 0 0 0 0 0 0 0 0
46194- 0 0 0 0 0 0 0 0 0 0 0 0
46195- 0 0 0 0 0 0 0 0 0 10 10 10
46196- 34 34 34 86 86 86 14 14 14 2 2 6
46197-121 87 25 192 133 9 219 162 10 239 182 13
46198-236 186 11 232 195 16 241 208 19 244 214 54
46199-246 218 60 246 218 38 246 215 20 241 208 19
46200-241 208 19 226 184 13 121 87 25 2 2 6
46201- 2 2 6 2 2 6 2 2 6 2 2 6
46202- 50 50 50 82 82 82 34 34 34 10 10 10
46203- 0 0 0 0 0 0 0 0 0 0 0 0
46204- 0 0 0 0 0 0 0 0 0 0 0 0
46205- 0 0 0 0 0 0 0 0 0 0 0 0
46206- 0 0 0 0 0 0 0 0 0 0 0 0
46207- 0 0 0 0 0 0 0 0 0 0 0 0
46208- 0 0 0 0 0 0 0 0 0 0 0 0
46209- 0 0 0 0 0 0 0 0 0 0 0 0
46210- 0 0 0 0 0 0 0 0 0 0 0 0
46211- 0 0 0 0 0 0 0 0 0 0 0 0
46212- 0 0 0 0 0 0 0 0 0 0 0 0
46213- 0 0 0 0 0 0 0 0 0 0 0 0
46214- 0 0 0 0 0 0 0 0 0 0 0 0
46215- 0 0 0 0 0 0 0 0 0 10 10 10
46216- 34 34 34 82 82 82 30 30 30 61 42 6
46217-180 123 7 206 145 10 230 174 11 239 182 13
46218-234 190 10 238 202 15 241 208 19 246 218 74
46219-246 218 38 246 215 20 246 215 20 246 215 20
46220-226 184 13 215 174 15 184 144 12 6 6 6
46221- 2 2 6 2 2 6 2 2 6 2 2 6
46222- 26 26 26 94 94 94 42 42 42 14 14 14
46223- 0 0 0 0 0 0 0 0 0 0 0 0
46224- 0 0 0 0 0 0 0 0 0 0 0 0
46225- 0 0 0 0 0 0 0 0 0 0 0 0
46226- 0 0 0 0 0 0 0 0 0 0 0 0
46227- 0 0 0 0 0 0 0 0 0 0 0 0
46228- 0 0 0 0 0 0 0 0 0 0 0 0
46229- 0 0 0 0 0 0 0 0 0 0 0 0
46230- 0 0 0 0 0 0 0 0 0 0 0 0
46231- 0 0 0 0 0 0 0 0 0 0 0 0
46232- 0 0 0 0 0 0 0 0 0 0 0 0
46233- 0 0 0 0 0 0 0 0 0 0 0 0
46234- 0 0 0 0 0 0 0 0 0 0 0 0
46235- 0 0 0 0 0 0 0 0 0 10 10 10
46236- 30 30 30 78 78 78 50 50 50 104 69 6
46237-192 133 9 216 158 10 236 178 12 236 186 11
46238-232 195 16 241 208 19 244 214 54 245 215 43
46239-246 215 20 246 215 20 241 208 19 198 155 10
46240-200 144 11 216 158 10 156 118 10 2 2 6
46241- 2 2 6 2 2 6 2 2 6 2 2 6
46242- 6 6 6 90 90 90 54 54 54 18 18 18
46243- 6 6 6 0 0 0 0 0 0 0 0 0
46244- 0 0 0 0 0 0 0 0 0 0 0 0
46245- 0 0 0 0 0 0 0 0 0 0 0 0
46246- 0 0 0 0 0 0 0 0 0 0 0 0
46247- 0 0 0 0 0 0 0 0 0 0 0 0
46248- 0 0 0 0 0 0 0 0 0 0 0 0
46249- 0 0 0 0 0 0 0 0 0 0 0 0
46250- 0 0 0 0 0 0 0 0 0 0 0 0
46251- 0 0 0 0 0 0 0 0 0 0 0 0
46252- 0 0 0 0 0 0 0 0 0 0 0 0
46253- 0 0 0 0 0 0 0 0 0 0 0 0
46254- 0 0 0 0 0 0 0 0 0 0 0 0
46255- 0 0 0 0 0 0 0 0 0 10 10 10
46256- 30 30 30 78 78 78 46 46 46 22 22 22
46257-137 92 6 210 162 10 239 182 13 238 190 10
46258-238 202 15 241 208 19 246 215 20 246 215 20
46259-241 208 19 203 166 17 185 133 11 210 150 10
46260-216 158 10 210 150 10 102 78 10 2 2 6
46261- 6 6 6 54 54 54 14 14 14 2 2 6
46262- 2 2 6 62 62 62 74 74 74 30 30 30
46263- 10 10 10 0 0 0 0 0 0 0 0 0
46264- 0 0 0 0 0 0 0 0 0 0 0 0
46265- 0 0 0 0 0 0 0 0 0 0 0 0
46266- 0 0 0 0 0 0 0 0 0 0 0 0
46267- 0 0 0 0 0 0 0 0 0 0 0 0
46268- 0 0 0 0 0 0 0 0 0 0 0 0
46269- 0 0 0 0 0 0 0 0 0 0 0 0
46270- 0 0 0 0 0 0 0 0 0 0 0 0
46271- 0 0 0 0 0 0 0 0 0 0 0 0
46272- 0 0 0 0 0 0 0 0 0 0 0 0
46273- 0 0 0 0 0 0 0 0 0 0 0 0
46274- 0 0 0 0 0 0 0 0 0 0 0 0
46275- 0 0 0 0 0 0 0 0 0 10 10 10
46276- 34 34 34 78 78 78 50 50 50 6 6 6
46277- 94 70 30 139 102 15 190 146 13 226 184 13
46278-232 200 30 232 195 16 215 174 15 190 146 13
46279-168 122 10 192 133 9 210 150 10 213 154 11
46280-202 150 34 182 157 106 101 98 89 2 2 6
46281- 2 2 6 78 78 78 116 116 116 58 58 58
46282- 2 2 6 22 22 22 90 90 90 46 46 46
46283- 18 18 18 6 6 6 0 0 0 0 0 0
46284- 0 0 0 0 0 0 0 0 0 0 0 0
46285- 0 0 0 0 0 0 0 0 0 0 0 0
46286- 0 0 0 0 0 0 0 0 0 0 0 0
46287- 0 0 0 0 0 0 0 0 0 0 0 0
46288- 0 0 0 0 0 0 0 0 0 0 0 0
46289- 0 0 0 0 0 0 0 0 0 0 0 0
46290- 0 0 0 0 0 0 0 0 0 0 0 0
46291- 0 0 0 0 0 0 0 0 0 0 0 0
46292- 0 0 0 0 0 0 0 0 0 0 0 0
46293- 0 0 0 0 0 0 0 0 0 0 0 0
46294- 0 0 0 0 0 0 0 0 0 0 0 0
46295- 0 0 0 0 0 0 0 0 0 10 10 10
46296- 38 38 38 86 86 86 50 50 50 6 6 6
46297-128 128 128 174 154 114 156 107 11 168 122 10
46298-198 155 10 184 144 12 197 138 11 200 144 11
46299-206 145 10 206 145 10 197 138 11 188 164 115
46300-195 195 195 198 198 198 174 174 174 14 14 14
46301- 2 2 6 22 22 22 116 116 116 116 116 116
46302- 22 22 22 2 2 6 74 74 74 70 70 70
46303- 30 30 30 10 10 10 0 0 0 0 0 0
46304- 0 0 0 0 0 0 0 0 0 0 0 0
46305- 0 0 0 0 0 0 0 0 0 0 0 0
46306- 0 0 0 0 0 0 0 0 0 0 0 0
46307- 0 0 0 0 0 0 0 0 0 0 0 0
46308- 0 0 0 0 0 0 0 0 0 0 0 0
46309- 0 0 0 0 0 0 0 0 0 0 0 0
46310- 0 0 0 0 0 0 0 0 0 0 0 0
46311- 0 0 0 0 0 0 0 0 0 0 0 0
46312- 0 0 0 0 0 0 0 0 0 0 0 0
46313- 0 0 0 0 0 0 0 0 0 0 0 0
46314- 0 0 0 0 0 0 0 0 0 0 0 0
46315- 0 0 0 0 0 0 6 6 6 18 18 18
46316- 50 50 50 101 101 101 26 26 26 10 10 10
46317-138 138 138 190 190 190 174 154 114 156 107 11
46318-197 138 11 200 144 11 197 138 11 192 133 9
46319-180 123 7 190 142 34 190 178 144 187 187 187
46320-202 202 202 221 221 221 214 214 214 66 66 66
46321- 2 2 6 2 2 6 50 50 50 62 62 62
46322- 6 6 6 2 2 6 10 10 10 90 90 90
46323- 50 50 50 18 18 18 6 6 6 0 0 0
46324- 0 0 0 0 0 0 0 0 0 0 0 0
46325- 0 0 0 0 0 0 0 0 0 0 0 0
46326- 0 0 0 0 0 0 0 0 0 0 0 0
46327- 0 0 0 0 0 0 0 0 0 0 0 0
46328- 0 0 0 0 0 0 0 0 0 0 0 0
46329- 0 0 0 0 0 0 0 0 0 0 0 0
46330- 0 0 0 0 0 0 0 0 0 0 0 0
46331- 0 0 0 0 0 0 0 0 0 0 0 0
46332- 0 0 0 0 0 0 0 0 0 0 0 0
46333- 0 0 0 0 0 0 0 0 0 0 0 0
46334- 0 0 0 0 0 0 0 0 0 0 0 0
46335- 0 0 0 0 0 0 10 10 10 34 34 34
46336- 74 74 74 74 74 74 2 2 6 6 6 6
46337-144 144 144 198 198 198 190 190 190 178 166 146
46338-154 121 60 156 107 11 156 107 11 168 124 44
46339-174 154 114 187 187 187 190 190 190 210 210 210
46340-246 246 246 253 253 253 253 253 253 182 182 182
46341- 6 6 6 2 2 6 2 2 6 2 2 6
46342- 2 2 6 2 2 6 2 2 6 62 62 62
46343- 74 74 74 34 34 34 14 14 14 0 0 0
46344- 0 0 0 0 0 0 0 0 0 0 0 0
46345- 0 0 0 0 0 0 0 0 0 0 0 0
46346- 0 0 0 0 0 0 0 0 0 0 0 0
46347- 0 0 0 0 0 0 0 0 0 0 0 0
46348- 0 0 0 0 0 0 0 0 0 0 0 0
46349- 0 0 0 0 0 0 0 0 0 0 0 0
46350- 0 0 0 0 0 0 0 0 0 0 0 0
46351- 0 0 0 0 0 0 0 0 0 0 0 0
46352- 0 0 0 0 0 0 0 0 0 0 0 0
46353- 0 0 0 0 0 0 0 0 0 0 0 0
46354- 0 0 0 0 0 0 0 0 0 0 0 0
46355- 0 0 0 10 10 10 22 22 22 54 54 54
46356- 94 94 94 18 18 18 2 2 6 46 46 46
46357-234 234 234 221 221 221 190 190 190 190 190 190
46358-190 190 190 187 187 187 187 187 187 190 190 190
46359-190 190 190 195 195 195 214 214 214 242 242 242
46360-253 253 253 253 253 253 253 253 253 253 253 253
46361- 82 82 82 2 2 6 2 2 6 2 2 6
46362- 2 2 6 2 2 6 2 2 6 14 14 14
46363- 86 86 86 54 54 54 22 22 22 6 6 6
46364- 0 0 0 0 0 0 0 0 0 0 0 0
46365- 0 0 0 0 0 0 0 0 0 0 0 0
46366- 0 0 0 0 0 0 0 0 0 0 0 0
46367- 0 0 0 0 0 0 0 0 0 0 0 0
46368- 0 0 0 0 0 0 0 0 0 0 0 0
46369- 0 0 0 0 0 0 0 0 0 0 0 0
46370- 0 0 0 0 0 0 0 0 0 0 0 0
46371- 0 0 0 0 0 0 0 0 0 0 0 0
46372- 0 0 0 0 0 0 0 0 0 0 0 0
46373- 0 0 0 0 0 0 0 0 0 0 0 0
46374- 0 0 0 0 0 0 0 0 0 0 0 0
46375- 6 6 6 18 18 18 46 46 46 90 90 90
46376- 46 46 46 18 18 18 6 6 6 182 182 182
46377-253 253 253 246 246 246 206 206 206 190 190 190
46378-190 190 190 190 190 190 190 190 190 190 190 190
46379-206 206 206 231 231 231 250 250 250 253 253 253
46380-253 253 253 253 253 253 253 253 253 253 253 253
46381-202 202 202 14 14 14 2 2 6 2 2 6
46382- 2 2 6 2 2 6 2 2 6 2 2 6
46383- 42 42 42 86 86 86 42 42 42 18 18 18
46384- 6 6 6 0 0 0 0 0 0 0 0 0
46385- 0 0 0 0 0 0 0 0 0 0 0 0
46386- 0 0 0 0 0 0 0 0 0 0 0 0
46387- 0 0 0 0 0 0 0 0 0 0 0 0
46388- 0 0 0 0 0 0 0 0 0 0 0 0
46389- 0 0 0 0 0 0 0 0 0 0 0 0
46390- 0 0 0 0 0 0 0 0 0 0 0 0
46391- 0 0 0 0 0 0 0 0 0 0 0 0
46392- 0 0 0 0 0 0 0 0 0 0 0 0
46393- 0 0 0 0 0 0 0 0 0 0 0 0
46394- 0 0 0 0 0 0 0 0 0 6 6 6
46395- 14 14 14 38 38 38 74 74 74 66 66 66
46396- 2 2 6 6 6 6 90 90 90 250 250 250
46397-253 253 253 253 253 253 238 238 238 198 198 198
46398-190 190 190 190 190 190 195 195 195 221 221 221
46399-246 246 246 253 253 253 253 253 253 253 253 253
46400-253 253 253 253 253 253 253 253 253 253 253 253
46401-253 253 253 82 82 82 2 2 6 2 2 6
46402- 2 2 6 2 2 6 2 2 6 2 2 6
46403- 2 2 6 78 78 78 70 70 70 34 34 34
46404- 14 14 14 6 6 6 0 0 0 0 0 0
46405- 0 0 0 0 0 0 0 0 0 0 0 0
46406- 0 0 0 0 0 0 0 0 0 0 0 0
46407- 0 0 0 0 0 0 0 0 0 0 0 0
46408- 0 0 0 0 0 0 0 0 0 0 0 0
46409- 0 0 0 0 0 0 0 0 0 0 0 0
46410- 0 0 0 0 0 0 0 0 0 0 0 0
46411- 0 0 0 0 0 0 0 0 0 0 0 0
46412- 0 0 0 0 0 0 0 0 0 0 0 0
46413- 0 0 0 0 0 0 0 0 0 0 0 0
46414- 0 0 0 0 0 0 0 0 0 14 14 14
46415- 34 34 34 66 66 66 78 78 78 6 6 6
46416- 2 2 6 18 18 18 218 218 218 253 253 253
46417-253 253 253 253 253 253 253 253 253 246 246 246
46418-226 226 226 231 231 231 246 246 246 253 253 253
46419-253 253 253 253 253 253 253 253 253 253 253 253
46420-253 253 253 253 253 253 253 253 253 253 253 253
46421-253 253 253 178 178 178 2 2 6 2 2 6
46422- 2 2 6 2 2 6 2 2 6 2 2 6
46423- 2 2 6 18 18 18 90 90 90 62 62 62
46424- 30 30 30 10 10 10 0 0 0 0 0 0
46425- 0 0 0 0 0 0 0 0 0 0 0 0
46426- 0 0 0 0 0 0 0 0 0 0 0 0
46427- 0 0 0 0 0 0 0 0 0 0 0 0
46428- 0 0 0 0 0 0 0 0 0 0 0 0
46429- 0 0 0 0 0 0 0 0 0 0 0 0
46430- 0 0 0 0 0 0 0 0 0 0 0 0
46431- 0 0 0 0 0 0 0 0 0 0 0 0
46432- 0 0 0 0 0 0 0 0 0 0 0 0
46433- 0 0 0 0 0 0 0 0 0 0 0 0
46434- 0 0 0 0 0 0 10 10 10 26 26 26
46435- 58 58 58 90 90 90 18 18 18 2 2 6
46436- 2 2 6 110 110 110 253 253 253 253 253 253
46437-253 253 253 253 253 253 253 253 253 253 253 253
46438-250 250 250 253 253 253 253 253 253 253 253 253
46439-253 253 253 253 253 253 253 253 253 253 253 253
46440-253 253 253 253 253 253 253 253 253 253 253 253
46441-253 253 253 231 231 231 18 18 18 2 2 6
46442- 2 2 6 2 2 6 2 2 6 2 2 6
46443- 2 2 6 2 2 6 18 18 18 94 94 94
46444- 54 54 54 26 26 26 10 10 10 0 0 0
46445- 0 0 0 0 0 0 0 0 0 0 0 0
46446- 0 0 0 0 0 0 0 0 0 0 0 0
46447- 0 0 0 0 0 0 0 0 0 0 0 0
46448- 0 0 0 0 0 0 0 0 0 0 0 0
46449- 0 0 0 0 0 0 0 0 0 0 0 0
46450- 0 0 0 0 0 0 0 0 0 0 0 0
46451- 0 0 0 0 0 0 0 0 0 0 0 0
46452- 0 0 0 0 0 0 0 0 0 0 0 0
46453- 0 0 0 0 0 0 0 0 0 0 0 0
46454- 0 0 0 6 6 6 22 22 22 50 50 50
46455- 90 90 90 26 26 26 2 2 6 2 2 6
46456- 14 14 14 195 195 195 250 250 250 253 253 253
46457-253 253 253 253 253 253 253 253 253 253 253 253
46458-253 253 253 253 253 253 253 253 253 253 253 253
46459-253 253 253 253 253 253 253 253 253 253 253 253
46460-253 253 253 253 253 253 253 253 253 253 253 253
46461-250 250 250 242 242 242 54 54 54 2 2 6
46462- 2 2 6 2 2 6 2 2 6 2 2 6
46463- 2 2 6 2 2 6 2 2 6 38 38 38
46464- 86 86 86 50 50 50 22 22 22 6 6 6
46465- 0 0 0 0 0 0 0 0 0 0 0 0
46466- 0 0 0 0 0 0 0 0 0 0 0 0
46467- 0 0 0 0 0 0 0 0 0 0 0 0
46468- 0 0 0 0 0 0 0 0 0 0 0 0
46469- 0 0 0 0 0 0 0 0 0 0 0 0
46470- 0 0 0 0 0 0 0 0 0 0 0 0
46471- 0 0 0 0 0 0 0 0 0 0 0 0
46472- 0 0 0 0 0 0 0 0 0 0 0 0
46473- 0 0 0 0 0 0 0 0 0 0 0 0
46474- 6 6 6 14 14 14 38 38 38 82 82 82
46475- 34 34 34 2 2 6 2 2 6 2 2 6
46476- 42 42 42 195 195 195 246 246 246 253 253 253
46477-253 253 253 253 253 253 253 253 253 250 250 250
46478-242 242 242 242 242 242 250 250 250 253 253 253
46479-253 253 253 253 253 253 253 253 253 253 253 253
46480-253 253 253 250 250 250 246 246 246 238 238 238
46481-226 226 226 231 231 231 101 101 101 6 6 6
46482- 2 2 6 2 2 6 2 2 6 2 2 6
46483- 2 2 6 2 2 6 2 2 6 2 2 6
46484- 38 38 38 82 82 82 42 42 42 14 14 14
46485- 6 6 6 0 0 0 0 0 0 0 0 0
46486- 0 0 0 0 0 0 0 0 0 0 0 0
46487- 0 0 0 0 0 0 0 0 0 0 0 0
46488- 0 0 0 0 0 0 0 0 0 0 0 0
46489- 0 0 0 0 0 0 0 0 0 0 0 0
46490- 0 0 0 0 0 0 0 0 0 0 0 0
46491- 0 0 0 0 0 0 0 0 0 0 0 0
46492- 0 0 0 0 0 0 0 0 0 0 0 0
46493- 0 0 0 0 0 0 0 0 0 0 0 0
46494- 10 10 10 26 26 26 62 62 62 66 66 66
46495- 2 2 6 2 2 6 2 2 6 6 6 6
46496- 70 70 70 170 170 170 206 206 206 234 234 234
46497-246 246 246 250 250 250 250 250 250 238 238 238
46498-226 226 226 231 231 231 238 238 238 250 250 250
46499-250 250 250 250 250 250 246 246 246 231 231 231
46500-214 214 214 206 206 206 202 202 202 202 202 202
46501-198 198 198 202 202 202 182 182 182 18 18 18
46502- 2 2 6 2 2 6 2 2 6 2 2 6
46503- 2 2 6 2 2 6 2 2 6 2 2 6
46504- 2 2 6 62 62 62 66 66 66 30 30 30
46505- 10 10 10 0 0 0 0 0 0 0 0 0
46506- 0 0 0 0 0 0 0 0 0 0 0 0
46507- 0 0 0 0 0 0 0 0 0 0 0 0
46508- 0 0 0 0 0 0 0 0 0 0 0 0
46509- 0 0 0 0 0 0 0 0 0 0 0 0
46510- 0 0 0 0 0 0 0 0 0 0 0 0
46511- 0 0 0 0 0 0 0 0 0 0 0 0
46512- 0 0 0 0 0 0 0 0 0 0 0 0
46513- 0 0 0 0 0 0 0 0 0 0 0 0
46514- 14 14 14 42 42 42 82 82 82 18 18 18
46515- 2 2 6 2 2 6 2 2 6 10 10 10
46516- 94 94 94 182 182 182 218 218 218 242 242 242
46517-250 250 250 253 253 253 253 253 253 250 250 250
46518-234 234 234 253 253 253 253 253 253 253 253 253
46519-253 253 253 253 253 253 253 253 253 246 246 246
46520-238 238 238 226 226 226 210 210 210 202 202 202
46521-195 195 195 195 195 195 210 210 210 158 158 158
46522- 6 6 6 14 14 14 50 50 50 14 14 14
46523- 2 2 6 2 2 6 2 2 6 2 2 6
46524- 2 2 6 6 6 6 86 86 86 46 46 46
46525- 18 18 18 6 6 6 0 0 0 0 0 0
46526- 0 0 0 0 0 0 0 0 0 0 0 0
46527- 0 0 0 0 0 0 0 0 0 0 0 0
46528- 0 0 0 0 0 0 0 0 0 0 0 0
46529- 0 0 0 0 0 0 0 0 0 0 0 0
46530- 0 0 0 0 0 0 0 0 0 0 0 0
46531- 0 0 0 0 0 0 0 0 0 0 0 0
46532- 0 0 0 0 0 0 0 0 0 0 0 0
46533- 0 0 0 0 0 0 0 0 0 6 6 6
46534- 22 22 22 54 54 54 70 70 70 2 2 6
46535- 2 2 6 10 10 10 2 2 6 22 22 22
46536-166 166 166 231 231 231 250 250 250 253 253 253
46537-253 253 253 253 253 253 253 253 253 250 250 250
46538-242 242 242 253 253 253 253 253 253 253 253 253
46539-253 253 253 253 253 253 253 253 253 253 253 253
46540-253 253 253 253 253 253 253 253 253 246 246 246
46541-231 231 231 206 206 206 198 198 198 226 226 226
46542- 94 94 94 2 2 6 6 6 6 38 38 38
46543- 30 30 30 2 2 6 2 2 6 2 2 6
46544- 2 2 6 2 2 6 62 62 62 66 66 66
46545- 26 26 26 10 10 10 0 0 0 0 0 0
46546- 0 0 0 0 0 0 0 0 0 0 0 0
46547- 0 0 0 0 0 0 0 0 0 0 0 0
46548- 0 0 0 0 0 0 0 0 0 0 0 0
46549- 0 0 0 0 0 0 0 0 0 0 0 0
46550- 0 0 0 0 0 0 0 0 0 0 0 0
46551- 0 0 0 0 0 0 0 0 0 0 0 0
46552- 0 0 0 0 0 0 0 0 0 0 0 0
46553- 0 0 0 0 0 0 0 0 0 10 10 10
46554- 30 30 30 74 74 74 50 50 50 2 2 6
46555- 26 26 26 26 26 26 2 2 6 106 106 106
46556-238 238 238 253 253 253 253 253 253 253 253 253
46557-253 253 253 253 253 253 253 253 253 253 253 253
46558-253 253 253 253 253 253 253 253 253 253 253 253
46559-253 253 253 253 253 253 253 253 253 253 253 253
46560-253 253 253 253 253 253 253 253 253 253 253 253
46561-253 253 253 246 246 246 218 218 218 202 202 202
46562-210 210 210 14 14 14 2 2 6 2 2 6
46563- 30 30 30 22 22 22 2 2 6 2 2 6
46564- 2 2 6 2 2 6 18 18 18 86 86 86
46565- 42 42 42 14 14 14 0 0 0 0 0 0
46566- 0 0 0 0 0 0 0 0 0 0 0 0
46567- 0 0 0 0 0 0 0 0 0 0 0 0
46568- 0 0 0 0 0 0 0 0 0 0 0 0
46569- 0 0 0 0 0 0 0 0 0 0 0 0
46570- 0 0 0 0 0 0 0 0 0 0 0 0
46571- 0 0 0 0 0 0 0 0 0 0 0 0
46572- 0 0 0 0 0 0 0 0 0 0 0 0
46573- 0 0 0 0 0 0 0 0 0 14 14 14
46574- 42 42 42 90 90 90 22 22 22 2 2 6
46575- 42 42 42 2 2 6 18 18 18 218 218 218
46576-253 253 253 253 253 253 253 253 253 253 253 253
46577-253 253 253 253 253 253 253 253 253 253 253 253
46578-253 253 253 253 253 253 253 253 253 253 253 253
46579-253 253 253 253 253 253 253 253 253 253 253 253
46580-253 253 253 253 253 253 253 253 253 253 253 253
46581-253 253 253 253 253 253 250 250 250 221 221 221
46582-218 218 218 101 101 101 2 2 6 14 14 14
46583- 18 18 18 38 38 38 10 10 10 2 2 6
46584- 2 2 6 2 2 6 2 2 6 78 78 78
46585- 58 58 58 22 22 22 6 6 6 0 0 0
46586- 0 0 0 0 0 0 0 0 0 0 0 0
46587- 0 0 0 0 0 0 0 0 0 0 0 0
46588- 0 0 0 0 0 0 0 0 0 0 0 0
46589- 0 0 0 0 0 0 0 0 0 0 0 0
46590- 0 0 0 0 0 0 0 0 0 0 0 0
46591- 0 0 0 0 0 0 0 0 0 0 0 0
46592- 0 0 0 0 0 0 0 0 0 0 0 0
46593- 0 0 0 0 0 0 6 6 6 18 18 18
46594- 54 54 54 82 82 82 2 2 6 26 26 26
46595- 22 22 22 2 2 6 123 123 123 253 253 253
46596-253 253 253 253 253 253 253 253 253 253 253 253
46597-253 253 253 253 253 253 253 253 253 253 253 253
46598-253 253 253 253 253 253 253 253 253 253 253 253
46599-253 253 253 253 253 253 253 253 253 253 253 253
46600-253 253 253 253 253 253 253 253 253 253 253 253
46601-253 253 253 253 253 253 253 253 253 250 250 250
46602-238 238 238 198 198 198 6 6 6 38 38 38
46603- 58 58 58 26 26 26 38 38 38 2 2 6
46604- 2 2 6 2 2 6 2 2 6 46 46 46
46605- 78 78 78 30 30 30 10 10 10 0 0 0
46606- 0 0 0 0 0 0 0 0 0 0 0 0
46607- 0 0 0 0 0 0 0 0 0 0 0 0
46608- 0 0 0 0 0 0 0 0 0 0 0 0
46609- 0 0 0 0 0 0 0 0 0 0 0 0
46610- 0 0 0 0 0 0 0 0 0 0 0 0
46611- 0 0 0 0 0 0 0 0 0 0 0 0
46612- 0 0 0 0 0 0 0 0 0 0 0 0
46613- 0 0 0 0 0 0 10 10 10 30 30 30
46614- 74 74 74 58 58 58 2 2 6 42 42 42
46615- 2 2 6 22 22 22 231 231 231 253 253 253
46616-253 253 253 253 253 253 253 253 253 253 253 253
46617-253 253 253 253 253 253 253 253 253 250 250 250
46618-253 253 253 253 253 253 253 253 253 253 253 253
46619-253 253 253 253 253 253 253 253 253 253 253 253
46620-253 253 253 253 253 253 253 253 253 253 253 253
46621-253 253 253 253 253 253 253 253 253 253 253 253
46622-253 253 253 246 246 246 46 46 46 38 38 38
46623- 42 42 42 14 14 14 38 38 38 14 14 14
46624- 2 2 6 2 2 6 2 2 6 6 6 6
46625- 86 86 86 46 46 46 14 14 14 0 0 0
46626- 0 0 0 0 0 0 0 0 0 0 0 0
46627- 0 0 0 0 0 0 0 0 0 0 0 0
46628- 0 0 0 0 0 0 0 0 0 0 0 0
46629- 0 0 0 0 0 0 0 0 0 0 0 0
46630- 0 0 0 0 0 0 0 0 0 0 0 0
46631- 0 0 0 0 0 0 0 0 0 0 0 0
46632- 0 0 0 0 0 0 0 0 0 0 0 0
46633- 0 0 0 6 6 6 14 14 14 42 42 42
46634- 90 90 90 18 18 18 18 18 18 26 26 26
46635- 2 2 6 116 116 116 253 253 253 253 253 253
46636-253 253 253 253 253 253 253 253 253 253 253 253
46637-253 253 253 253 253 253 250 250 250 238 238 238
46638-253 253 253 253 253 253 253 253 253 253 253 253
46639-253 253 253 253 253 253 253 253 253 253 253 253
46640-253 253 253 253 253 253 253 253 253 253 253 253
46641-253 253 253 253 253 253 253 253 253 253 253 253
46642-253 253 253 253 253 253 94 94 94 6 6 6
46643- 2 2 6 2 2 6 10 10 10 34 34 34
46644- 2 2 6 2 2 6 2 2 6 2 2 6
46645- 74 74 74 58 58 58 22 22 22 6 6 6
46646- 0 0 0 0 0 0 0 0 0 0 0 0
46647- 0 0 0 0 0 0 0 0 0 0 0 0
46648- 0 0 0 0 0 0 0 0 0 0 0 0
46649- 0 0 0 0 0 0 0 0 0 0 0 0
46650- 0 0 0 0 0 0 0 0 0 0 0 0
46651- 0 0 0 0 0 0 0 0 0 0 0 0
46652- 0 0 0 0 0 0 0 0 0 0 0 0
46653- 0 0 0 10 10 10 26 26 26 66 66 66
46654- 82 82 82 2 2 6 38 38 38 6 6 6
46655- 14 14 14 210 210 210 253 253 253 253 253 253
46656-253 253 253 253 253 253 253 253 253 253 253 253
46657-253 253 253 253 253 253 246 246 246 242 242 242
46658-253 253 253 253 253 253 253 253 253 253 253 253
46659-253 253 253 253 253 253 253 253 253 253 253 253
46660-253 253 253 253 253 253 253 253 253 253 253 253
46661-253 253 253 253 253 253 253 253 253 253 253 253
46662-253 253 253 253 253 253 144 144 144 2 2 6
46663- 2 2 6 2 2 6 2 2 6 46 46 46
46664- 2 2 6 2 2 6 2 2 6 2 2 6
46665- 42 42 42 74 74 74 30 30 30 10 10 10
46666- 0 0 0 0 0 0 0 0 0 0 0 0
46667- 0 0 0 0 0 0 0 0 0 0 0 0
46668- 0 0 0 0 0 0 0 0 0 0 0 0
46669- 0 0 0 0 0 0 0 0 0 0 0 0
46670- 0 0 0 0 0 0 0 0 0 0 0 0
46671- 0 0 0 0 0 0 0 0 0 0 0 0
46672- 0 0 0 0 0 0 0 0 0 0 0 0
46673- 6 6 6 14 14 14 42 42 42 90 90 90
46674- 26 26 26 6 6 6 42 42 42 2 2 6
46675- 74 74 74 250 250 250 253 253 253 253 253 253
46676-253 253 253 253 253 253 253 253 253 253 253 253
46677-253 253 253 253 253 253 242 242 242 242 242 242
46678-253 253 253 253 253 253 253 253 253 253 253 253
46679-253 253 253 253 253 253 253 253 253 253 253 253
46680-253 253 253 253 253 253 253 253 253 253 253 253
46681-253 253 253 253 253 253 253 253 253 253 253 253
46682-253 253 253 253 253 253 182 182 182 2 2 6
46683- 2 2 6 2 2 6 2 2 6 46 46 46
46684- 2 2 6 2 2 6 2 2 6 2 2 6
46685- 10 10 10 86 86 86 38 38 38 10 10 10
46686- 0 0 0 0 0 0 0 0 0 0 0 0
46687- 0 0 0 0 0 0 0 0 0 0 0 0
46688- 0 0 0 0 0 0 0 0 0 0 0 0
46689- 0 0 0 0 0 0 0 0 0 0 0 0
46690- 0 0 0 0 0 0 0 0 0 0 0 0
46691- 0 0 0 0 0 0 0 0 0 0 0 0
46692- 0 0 0 0 0 0 0 0 0 0 0 0
46693- 10 10 10 26 26 26 66 66 66 82 82 82
46694- 2 2 6 22 22 22 18 18 18 2 2 6
46695-149 149 149 253 253 253 253 253 253 253 253 253
46696-253 253 253 253 253 253 253 253 253 253 253 253
46697-253 253 253 253 253 253 234 234 234 242 242 242
46698-253 253 253 253 253 253 253 253 253 253 253 253
46699-253 253 253 253 253 253 253 253 253 253 253 253
46700-253 253 253 253 253 253 253 253 253 253 253 253
46701-253 253 253 253 253 253 253 253 253 253 253 253
46702-253 253 253 253 253 253 206 206 206 2 2 6
46703- 2 2 6 2 2 6 2 2 6 38 38 38
46704- 2 2 6 2 2 6 2 2 6 2 2 6
46705- 6 6 6 86 86 86 46 46 46 14 14 14
46706- 0 0 0 0 0 0 0 0 0 0 0 0
46707- 0 0 0 0 0 0 0 0 0 0 0 0
46708- 0 0 0 0 0 0 0 0 0 0 0 0
46709- 0 0 0 0 0 0 0 0 0 0 0 0
46710- 0 0 0 0 0 0 0 0 0 0 0 0
46711- 0 0 0 0 0 0 0 0 0 0 0 0
46712- 0 0 0 0 0 0 0 0 0 6 6 6
46713- 18 18 18 46 46 46 86 86 86 18 18 18
46714- 2 2 6 34 34 34 10 10 10 6 6 6
46715-210 210 210 253 253 253 253 253 253 253 253 253
46716-253 253 253 253 253 253 253 253 253 253 253 253
46717-253 253 253 253 253 253 234 234 234 242 242 242
46718-253 253 253 253 253 253 253 253 253 253 253 253
46719-253 253 253 253 253 253 253 253 253 253 253 253
46720-253 253 253 253 253 253 253 253 253 253 253 253
46721-253 253 253 253 253 253 253 253 253 253 253 253
46722-253 253 253 253 253 253 221 221 221 6 6 6
46723- 2 2 6 2 2 6 6 6 6 30 30 30
46724- 2 2 6 2 2 6 2 2 6 2 2 6
46725- 2 2 6 82 82 82 54 54 54 18 18 18
46726- 6 6 6 0 0 0 0 0 0 0 0 0
46727- 0 0 0 0 0 0 0 0 0 0 0 0
46728- 0 0 0 0 0 0 0 0 0 0 0 0
46729- 0 0 0 0 0 0 0 0 0 0 0 0
46730- 0 0 0 0 0 0 0 0 0 0 0 0
46731- 0 0 0 0 0 0 0 0 0 0 0 0
46732- 0 0 0 0 0 0 0 0 0 10 10 10
46733- 26 26 26 66 66 66 62 62 62 2 2 6
46734- 2 2 6 38 38 38 10 10 10 26 26 26
46735-238 238 238 253 253 253 253 253 253 253 253 253
46736-253 253 253 253 253 253 253 253 253 253 253 253
46737-253 253 253 253 253 253 231 231 231 238 238 238
46738-253 253 253 253 253 253 253 253 253 253 253 253
46739-253 253 253 253 253 253 253 253 253 253 253 253
46740-253 253 253 253 253 253 253 253 253 253 253 253
46741-253 253 253 253 253 253 253 253 253 253 253 253
46742-253 253 253 253 253 253 231 231 231 6 6 6
46743- 2 2 6 2 2 6 10 10 10 30 30 30
46744- 2 2 6 2 2 6 2 2 6 2 2 6
46745- 2 2 6 66 66 66 58 58 58 22 22 22
46746- 6 6 6 0 0 0 0 0 0 0 0 0
46747- 0 0 0 0 0 0 0 0 0 0 0 0
46748- 0 0 0 0 0 0 0 0 0 0 0 0
46749- 0 0 0 0 0 0 0 0 0 0 0 0
46750- 0 0 0 0 0 0 0 0 0 0 0 0
46751- 0 0 0 0 0 0 0 0 0 0 0 0
46752- 0 0 0 0 0 0 0 0 0 10 10 10
46753- 38 38 38 78 78 78 6 6 6 2 2 6
46754- 2 2 6 46 46 46 14 14 14 42 42 42
46755-246 246 246 253 253 253 253 253 253 253 253 253
46756-253 253 253 253 253 253 253 253 253 253 253 253
46757-253 253 253 253 253 253 231 231 231 242 242 242
46758-253 253 253 253 253 253 253 253 253 253 253 253
46759-253 253 253 253 253 253 253 253 253 253 253 253
46760-253 253 253 253 253 253 253 253 253 253 253 253
46761-253 253 253 253 253 253 253 253 253 253 253 253
46762-253 253 253 253 253 253 234 234 234 10 10 10
46763- 2 2 6 2 2 6 22 22 22 14 14 14
46764- 2 2 6 2 2 6 2 2 6 2 2 6
46765- 2 2 6 66 66 66 62 62 62 22 22 22
46766- 6 6 6 0 0 0 0 0 0 0 0 0
46767- 0 0 0 0 0 0 0 0 0 0 0 0
46768- 0 0 0 0 0 0 0 0 0 0 0 0
46769- 0 0 0 0 0 0 0 0 0 0 0 0
46770- 0 0 0 0 0 0 0 0 0 0 0 0
46771- 0 0 0 0 0 0 0 0 0 0 0 0
46772- 0 0 0 0 0 0 6 6 6 18 18 18
46773- 50 50 50 74 74 74 2 2 6 2 2 6
46774- 14 14 14 70 70 70 34 34 34 62 62 62
46775-250 250 250 253 253 253 253 253 253 253 253 253
46776-253 253 253 253 253 253 253 253 253 253 253 253
46777-253 253 253 253 253 253 231 231 231 246 246 246
46778-253 253 253 253 253 253 253 253 253 253 253 253
46779-253 253 253 253 253 253 253 253 253 253 253 253
46780-253 253 253 253 253 253 253 253 253 253 253 253
46781-253 253 253 253 253 253 253 253 253 253 253 253
46782-253 253 253 253 253 253 234 234 234 14 14 14
46783- 2 2 6 2 2 6 30 30 30 2 2 6
46784- 2 2 6 2 2 6 2 2 6 2 2 6
46785- 2 2 6 66 66 66 62 62 62 22 22 22
46786- 6 6 6 0 0 0 0 0 0 0 0 0
46787- 0 0 0 0 0 0 0 0 0 0 0 0
46788- 0 0 0 0 0 0 0 0 0 0 0 0
46789- 0 0 0 0 0 0 0 0 0 0 0 0
46790- 0 0 0 0 0 0 0 0 0 0 0 0
46791- 0 0 0 0 0 0 0 0 0 0 0 0
46792- 0 0 0 0 0 0 6 6 6 18 18 18
46793- 54 54 54 62 62 62 2 2 6 2 2 6
46794- 2 2 6 30 30 30 46 46 46 70 70 70
46795-250 250 250 253 253 253 253 253 253 253 253 253
46796-253 253 253 253 253 253 253 253 253 253 253 253
46797-253 253 253 253 253 253 231 231 231 246 246 246
46798-253 253 253 253 253 253 253 253 253 253 253 253
46799-253 253 253 253 253 253 253 253 253 253 253 253
46800-253 253 253 253 253 253 253 253 253 253 253 253
46801-253 253 253 253 253 253 253 253 253 253 253 253
46802-253 253 253 253 253 253 226 226 226 10 10 10
46803- 2 2 6 6 6 6 30 30 30 2 2 6
46804- 2 2 6 2 2 6 2 2 6 2 2 6
46805- 2 2 6 66 66 66 58 58 58 22 22 22
46806- 6 6 6 0 0 0 0 0 0 0 0 0
46807- 0 0 0 0 0 0 0 0 0 0 0 0
46808- 0 0 0 0 0 0 0 0 0 0 0 0
46809- 0 0 0 0 0 0 0 0 0 0 0 0
46810- 0 0 0 0 0 0 0 0 0 0 0 0
46811- 0 0 0 0 0 0 0 0 0 0 0 0
46812- 0 0 0 0 0 0 6 6 6 22 22 22
46813- 58 58 58 62 62 62 2 2 6 2 2 6
46814- 2 2 6 2 2 6 30 30 30 78 78 78
46815-250 250 250 253 253 253 253 253 253 253 253 253
46816-253 253 253 253 253 253 253 253 253 253 253 253
46817-253 253 253 253 253 253 231 231 231 246 246 246
46818-253 253 253 253 253 253 253 253 253 253 253 253
46819-253 253 253 253 253 253 253 253 253 253 253 253
46820-253 253 253 253 253 253 253 253 253 253 253 253
46821-253 253 253 253 253 253 253 253 253 253 253 253
46822-253 253 253 253 253 253 206 206 206 2 2 6
46823- 22 22 22 34 34 34 18 14 6 22 22 22
46824- 26 26 26 18 18 18 6 6 6 2 2 6
46825- 2 2 6 82 82 82 54 54 54 18 18 18
46826- 6 6 6 0 0 0 0 0 0 0 0 0
46827- 0 0 0 0 0 0 0 0 0 0 0 0
46828- 0 0 0 0 0 0 0 0 0 0 0 0
46829- 0 0 0 0 0 0 0 0 0 0 0 0
46830- 0 0 0 0 0 0 0 0 0 0 0 0
46831- 0 0 0 0 0 0 0 0 0 0 0 0
46832- 0 0 0 0 0 0 6 6 6 26 26 26
46833- 62 62 62 106 106 106 74 54 14 185 133 11
46834-210 162 10 121 92 8 6 6 6 62 62 62
46835-238 238 238 253 253 253 253 253 253 253 253 253
46836-253 253 253 253 253 253 253 253 253 253 253 253
46837-253 253 253 253 253 253 231 231 231 246 246 246
46838-253 253 253 253 253 253 253 253 253 253 253 253
46839-253 253 253 253 253 253 253 253 253 253 253 253
46840-253 253 253 253 253 253 253 253 253 253 253 253
46841-253 253 253 253 253 253 253 253 253 253 253 253
46842-253 253 253 253 253 253 158 158 158 18 18 18
46843- 14 14 14 2 2 6 2 2 6 2 2 6
46844- 6 6 6 18 18 18 66 66 66 38 38 38
46845- 6 6 6 94 94 94 50 50 50 18 18 18
46846- 6 6 6 0 0 0 0 0 0 0 0 0
46847- 0 0 0 0 0 0 0 0 0 0 0 0
46848- 0 0 0 0 0 0 0 0 0 0 0 0
46849- 0 0 0 0 0 0 0 0 0 0 0 0
46850- 0 0 0 0 0 0 0 0 0 0 0 0
46851- 0 0 0 0 0 0 0 0 0 6 6 6
46852- 10 10 10 10 10 10 18 18 18 38 38 38
46853- 78 78 78 142 134 106 216 158 10 242 186 14
46854-246 190 14 246 190 14 156 118 10 10 10 10
46855- 90 90 90 238 238 238 253 253 253 253 253 253
46856-253 253 253 253 253 253 253 253 253 253 253 253
46857-253 253 253 253 253 253 231 231 231 250 250 250
46858-253 253 253 253 253 253 253 253 253 253 253 253
46859-253 253 253 253 253 253 253 253 253 253 253 253
46860-253 253 253 253 253 253 253 253 253 253 253 253
46861-253 253 253 253 253 253 253 253 253 246 230 190
46862-238 204 91 238 204 91 181 142 44 37 26 9
46863- 2 2 6 2 2 6 2 2 6 2 2 6
46864- 2 2 6 2 2 6 38 38 38 46 46 46
46865- 26 26 26 106 106 106 54 54 54 18 18 18
46866- 6 6 6 0 0 0 0 0 0 0 0 0
46867- 0 0 0 0 0 0 0 0 0 0 0 0
46868- 0 0 0 0 0 0 0 0 0 0 0 0
46869- 0 0 0 0 0 0 0 0 0 0 0 0
46870- 0 0 0 0 0 0 0 0 0 0 0 0
46871- 0 0 0 6 6 6 14 14 14 22 22 22
46872- 30 30 30 38 38 38 50 50 50 70 70 70
46873-106 106 106 190 142 34 226 170 11 242 186 14
46874-246 190 14 246 190 14 246 190 14 154 114 10
46875- 6 6 6 74 74 74 226 226 226 253 253 253
46876-253 253 253 253 253 253 253 253 253 253 253 253
46877-253 253 253 253 253 253 231 231 231 250 250 250
46878-253 253 253 253 253 253 253 253 253 253 253 253
46879-253 253 253 253 253 253 253 253 253 253 253 253
46880-253 253 253 253 253 253 253 253 253 253 253 253
46881-253 253 253 253 253 253 253 253 253 228 184 62
46882-241 196 14 241 208 19 232 195 16 38 30 10
46883- 2 2 6 2 2 6 2 2 6 2 2 6
46884- 2 2 6 6 6 6 30 30 30 26 26 26
46885-203 166 17 154 142 90 66 66 66 26 26 26
46886- 6 6 6 0 0 0 0 0 0 0 0 0
46887- 0 0 0 0 0 0 0 0 0 0 0 0
46888- 0 0 0 0 0 0 0 0 0 0 0 0
46889- 0 0 0 0 0 0 0 0 0 0 0 0
46890- 0 0 0 0 0 0 0 0 0 0 0 0
46891- 6 6 6 18 18 18 38 38 38 58 58 58
46892- 78 78 78 86 86 86 101 101 101 123 123 123
46893-175 146 61 210 150 10 234 174 13 246 186 14
46894-246 190 14 246 190 14 246 190 14 238 190 10
46895-102 78 10 2 2 6 46 46 46 198 198 198
46896-253 253 253 253 253 253 253 253 253 253 253 253
46897-253 253 253 253 253 253 234 234 234 242 242 242
46898-253 253 253 253 253 253 253 253 253 253 253 253
46899-253 253 253 253 253 253 253 253 253 253 253 253
46900-253 253 253 253 253 253 253 253 253 253 253 253
46901-253 253 253 253 253 253 253 253 253 224 178 62
46902-242 186 14 241 196 14 210 166 10 22 18 6
46903- 2 2 6 2 2 6 2 2 6 2 2 6
46904- 2 2 6 2 2 6 6 6 6 121 92 8
46905-238 202 15 232 195 16 82 82 82 34 34 34
46906- 10 10 10 0 0 0 0 0 0 0 0 0
46907- 0 0 0 0 0 0 0 0 0 0 0 0
46908- 0 0 0 0 0 0 0 0 0 0 0 0
46909- 0 0 0 0 0 0 0 0 0 0 0 0
46910- 0 0 0 0 0 0 0 0 0 0 0 0
46911- 14 14 14 38 38 38 70 70 70 154 122 46
46912-190 142 34 200 144 11 197 138 11 197 138 11
46913-213 154 11 226 170 11 242 186 14 246 190 14
46914-246 190 14 246 190 14 246 190 14 246 190 14
46915-225 175 15 46 32 6 2 2 6 22 22 22
46916-158 158 158 250 250 250 253 253 253 253 253 253
46917-253 253 253 253 253 253 253 253 253 253 253 253
46918-253 253 253 253 253 253 253 253 253 253 253 253
46919-253 253 253 253 253 253 253 253 253 253 253 253
46920-253 253 253 253 253 253 253 253 253 253 253 253
46921-253 253 253 250 250 250 242 242 242 224 178 62
46922-239 182 13 236 186 11 213 154 11 46 32 6
46923- 2 2 6 2 2 6 2 2 6 2 2 6
46924- 2 2 6 2 2 6 61 42 6 225 175 15
46925-238 190 10 236 186 11 112 100 78 42 42 42
46926- 14 14 14 0 0 0 0 0 0 0 0 0
46927- 0 0 0 0 0 0 0 0 0 0 0 0
46928- 0 0 0 0 0 0 0 0 0 0 0 0
46929- 0 0 0 0 0 0 0 0 0 0 0 0
46930- 0 0 0 0 0 0 0 0 0 6 6 6
46931- 22 22 22 54 54 54 154 122 46 213 154 11
46932-226 170 11 230 174 11 226 170 11 226 170 11
46933-236 178 12 242 186 14 246 190 14 246 190 14
46934-246 190 14 246 190 14 246 190 14 246 190 14
46935-241 196 14 184 144 12 10 10 10 2 2 6
46936- 6 6 6 116 116 116 242 242 242 253 253 253
46937-253 253 253 253 253 253 253 253 253 253 253 253
46938-253 253 253 253 253 253 253 253 253 253 253 253
46939-253 253 253 253 253 253 253 253 253 253 253 253
46940-253 253 253 253 253 253 253 253 253 253 253 253
46941-253 253 253 231 231 231 198 198 198 214 170 54
46942-236 178 12 236 178 12 210 150 10 137 92 6
46943- 18 14 6 2 2 6 2 2 6 2 2 6
46944- 6 6 6 70 47 6 200 144 11 236 178 12
46945-239 182 13 239 182 13 124 112 88 58 58 58
46946- 22 22 22 6 6 6 0 0 0 0 0 0
46947- 0 0 0 0 0 0 0 0 0 0 0 0
46948- 0 0 0 0 0 0 0 0 0 0 0 0
46949- 0 0 0 0 0 0 0 0 0 0 0 0
46950- 0 0 0 0 0 0 0 0 0 10 10 10
46951- 30 30 30 70 70 70 180 133 36 226 170 11
46952-239 182 13 242 186 14 242 186 14 246 186 14
46953-246 190 14 246 190 14 246 190 14 246 190 14
46954-246 190 14 246 190 14 246 190 14 246 190 14
46955-246 190 14 232 195 16 98 70 6 2 2 6
46956- 2 2 6 2 2 6 66 66 66 221 221 221
46957-253 253 253 253 253 253 253 253 253 253 253 253
46958-253 253 253 253 253 253 253 253 253 253 253 253
46959-253 253 253 253 253 253 253 253 253 253 253 253
46960-253 253 253 253 253 253 253 253 253 253 253 253
46961-253 253 253 206 206 206 198 198 198 214 166 58
46962-230 174 11 230 174 11 216 158 10 192 133 9
46963-163 110 8 116 81 8 102 78 10 116 81 8
46964-167 114 7 197 138 11 226 170 11 239 182 13
46965-242 186 14 242 186 14 162 146 94 78 78 78
46966- 34 34 34 14 14 14 6 6 6 0 0 0
46967- 0 0 0 0 0 0 0 0 0 0 0 0
46968- 0 0 0 0 0 0 0 0 0 0 0 0
46969- 0 0 0 0 0 0 0 0 0 0 0 0
46970- 0 0 0 0 0 0 0 0 0 6 6 6
46971- 30 30 30 78 78 78 190 142 34 226 170 11
46972-239 182 13 246 190 14 246 190 14 246 190 14
46973-246 190 14 246 190 14 246 190 14 246 190 14
46974-246 190 14 246 190 14 246 190 14 246 190 14
46975-246 190 14 241 196 14 203 166 17 22 18 6
46976- 2 2 6 2 2 6 2 2 6 38 38 38
46977-218 218 218 253 253 253 253 253 253 253 253 253
46978-253 253 253 253 253 253 253 253 253 253 253 253
46979-253 253 253 253 253 253 253 253 253 253 253 253
46980-253 253 253 253 253 253 253 253 253 253 253 253
46981-250 250 250 206 206 206 198 198 198 202 162 69
46982-226 170 11 236 178 12 224 166 10 210 150 10
46983-200 144 11 197 138 11 192 133 9 197 138 11
46984-210 150 10 226 170 11 242 186 14 246 190 14
46985-246 190 14 246 186 14 225 175 15 124 112 88
46986- 62 62 62 30 30 30 14 14 14 6 6 6
46987- 0 0 0 0 0 0 0 0 0 0 0 0
46988- 0 0 0 0 0 0 0 0 0 0 0 0
46989- 0 0 0 0 0 0 0 0 0 0 0 0
46990- 0 0 0 0 0 0 0 0 0 10 10 10
46991- 30 30 30 78 78 78 174 135 50 224 166 10
46992-239 182 13 246 190 14 246 190 14 246 190 14
46993-246 190 14 246 190 14 246 190 14 246 190 14
46994-246 190 14 246 190 14 246 190 14 246 190 14
46995-246 190 14 246 190 14 241 196 14 139 102 15
46996- 2 2 6 2 2 6 2 2 6 2 2 6
46997- 78 78 78 250 250 250 253 253 253 253 253 253
46998-253 253 253 253 253 253 253 253 253 253 253 253
46999-253 253 253 253 253 253 253 253 253 253 253 253
47000-253 253 253 253 253 253 253 253 253 253 253 253
47001-250 250 250 214 214 214 198 198 198 190 150 46
47002-219 162 10 236 178 12 234 174 13 224 166 10
47003-216 158 10 213 154 11 213 154 11 216 158 10
47004-226 170 11 239 182 13 246 190 14 246 190 14
47005-246 190 14 246 190 14 242 186 14 206 162 42
47006-101 101 101 58 58 58 30 30 30 14 14 14
47007- 6 6 6 0 0 0 0 0 0 0 0 0
47008- 0 0 0 0 0 0 0 0 0 0 0 0
47009- 0 0 0 0 0 0 0 0 0 0 0 0
47010- 0 0 0 0 0 0 0 0 0 10 10 10
47011- 30 30 30 74 74 74 174 135 50 216 158 10
47012-236 178 12 246 190 14 246 190 14 246 190 14
47013-246 190 14 246 190 14 246 190 14 246 190 14
47014-246 190 14 246 190 14 246 190 14 246 190 14
47015-246 190 14 246 190 14 241 196 14 226 184 13
47016- 61 42 6 2 2 6 2 2 6 2 2 6
47017- 22 22 22 238 238 238 253 253 253 253 253 253
47018-253 253 253 253 253 253 253 253 253 253 253 253
47019-253 253 253 253 253 253 253 253 253 253 253 253
47020-253 253 253 253 253 253 253 253 253 253 253 253
47021-253 253 253 226 226 226 187 187 187 180 133 36
47022-216 158 10 236 178 12 239 182 13 236 178 12
47023-230 174 11 226 170 11 226 170 11 230 174 11
47024-236 178 12 242 186 14 246 190 14 246 190 14
47025-246 190 14 246 190 14 246 186 14 239 182 13
47026-206 162 42 106 106 106 66 66 66 34 34 34
47027- 14 14 14 6 6 6 0 0 0 0 0 0
47028- 0 0 0 0 0 0 0 0 0 0 0 0
47029- 0 0 0 0 0 0 0 0 0 0 0 0
47030- 0 0 0 0 0 0 0 0 0 6 6 6
47031- 26 26 26 70 70 70 163 133 67 213 154 11
47032-236 178 12 246 190 14 246 190 14 246 190 14
47033-246 190 14 246 190 14 246 190 14 246 190 14
47034-246 190 14 246 190 14 246 190 14 246 190 14
47035-246 190 14 246 190 14 246 190 14 241 196 14
47036-190 146 13 18 14 6 2 2 6 2 2 6
47037- 46 46 46 246 246 246 253 253 253 253 253 253
47038-253 253 253 253 253 253 253 253 253 253 253 253
47039-253 253 253 253 253 253 253 253 253 253 253 253
47040-253 253 253 253 253 253 253 253 253 253 253 253
47041-253 253 253 221 221 221 86 86 86 156 107 11
47042-216 158 10 236 178 12 242 186 14 246 186 14
47043-242 186 14 239 182 13 239 182 13 242 186 14
47044-242 186 14 246 186 14 246 190 14 246 190 14
47045-246 190 14 246 190 14 246 190 14 246 190 14
47046-242 186 14 225 175 15 142 122 72 66 66 66
47047- 30 30 30 10 10 10 0 0 0 0 0 0
47048- 0 0 0 0 0 0 0 0 0 0 0 0
47049- 0 0 0 0 0 0 0 0 0 0 0 0
47050- 0 0 0 0 0 0 0 0 0 6 6 6
47051- 26 26 26 70 70 70 163 133 67 210 150 10
47052-236 178 12 246 190 14 246 190 14 246 190 14
47053-246 190 14 246 190 14 246 190 14 246 190 14
47054-246 190 14 246 190 14 246 190 14 246 190 14
47055-246 190 14 246 190 14 246 190 14 246 190 14
47056-232 195 16 121 92 8 34 34 34 106 106 106
47057-221 221 221 253 253 253 253 253 253 253 253 253
47058-253 253 253 253 253 253 253 253 253 253 253 253
47059-253 253 253 253 253 253 253 253 253 253 253 253
47060-253 253 253 253 253 253 253 253 253 253 253 253
47061-242 242 242 82 82 82 18 14 6 163 110 8
47062-216 158 10 236 178 12 242 186 14 246 190 14
47063-246 190 14 246 190 14 246 190 14 246 190 14
47064-246 190 14 246 190 14 246 190 14 246 190 14
47065-246 190 14 246 190 14 246 190 14 246 190 14
47066-246 190 14 246 190 14 242 186 14 163 133 67
47067- 46 46 46 18 18 18 6 6 6 0 0 0
47068- 0 0 0 0 0 0 0 0 0 0 0 0
47069- 0 0 0 0 0 0 0 0 0 0 0 0
47070- 0 0 0 0 0 0 0 0 0 10 10 10
47071- 30 30 30 78 78 78 163 133 67 210 150 10
47072-236 178 12 246 186 14 246 190 14 246 190 14
47073-246 190 14 246 190 14 246 190 14 246 190 14
47074-246 190 14 246 190 14 246 190 14 246 190 14
47075-246 190 14 246 190 14 246 190 14 246 190 14
47076-241 196 14 215 174 15 190 178 144 253 253 253
47077-253 253 253 253 253 253 253 253 253 253 253 253
47078-253 253 253 253 253 253 253 253 253 253 253 253
47079-253 253 253 253 253 253 253 253 253 253 253 253
47080-253 253 253 253 253 253 253 253 253 218 218 218
47081- 58 58 58 2 2 6 22 18 6 167 114 7
47082-216 158 10 236 178 12 246 186 14 246 190 14
47083-246 190 14 246 190 14 246 190 14 246 190 14
47084-246 190 14 246 190 14 246 190 14 246 190 14
47085-246 190 14 246 190 14 246 190 14 246 190 14
47086-246 190 14 246 186 14 242 186 14 190 150 46
47087- 54 54 54 22 22 22 6 6 6 0 0 0
47088- 0 0 0 0 0 0 0 0 0 0 0 0
47089- 0 0 0 0 0 0 0 0 0 0 0 0
47090- 0 0 0 0 0 0 0 0 0 14 14 14
47091- 38 38 38 86 86 86 180 133 36 213 154 11
47092-236 178 12 246 186 14 246 190 14 246 190 14
47093-246 190 14 246 190 14 246 190 14 246 190 14
47094-246 190 14 246 190 14 246 190 14 246 190 14
47095-246 190 14 246 190 14 246 190 14 246 190 14
47096-246 190 14 232 195 16 190 146 13 214 214 214
47097-253 253 253 253 253 253 253 253 253 253 253 253
47098-253 253 253 253 253 253 253 253 253 253 253 253
47099-253 253 253 253 253 253 253 253 253 253 253 253
47100-253 253 253 250 250 250 170 170 170 26 26 26
47101- 2 2 6 2 2 6 37 26 9 163 110 8
47102-219 162 10 239 182 13 246 186 14 246 190 14
47103-246 190 14 246 190 14 246 190 14 246 190 14
47104-246 190 14 246 190 14 246 190 14 246 190 14
47105-246 190 14 246 190 14 246 190 14 246 190 14
47106-246 186 14 236 178 12 224 166 10 142 122 72
47107- 46 46 46 18 18 18 6 6 6 0 0 0
47108- 0 0 0 0 0 0 0 0 0 0 0 0
47109- 0 0 0 0 0 0 0 0 0 0 0 0
47110- 0 0 0 0 0 0 6 6 6 18 18 18
47111- 50 50 50 109 106 95 192 133 9 224 166 10
47112-242 186 14 246 190 14 246 190 14 246 190 14
47113-246 190 14 246 190 14 246 190 14 246 190 14
47114-246 190 14 246 190 14 246 190 14 246 190 14
47115-246 190 14 246 190 14 246 190 14 246 190 14
47116-242 186 14 226 184 13 210 162 10 142 110 46
47117-226 226 226 253 253 253 253 253 253 253 253 253
47118-253 253 253 253 253 253 253 253 253 253 253 253
47119-253 253 253 253 253 253 253 253 253 253 253 253
47120-198 198 198 66 66 66 2 2 6 2 2 6
47121- 2 2 6 2 2 6 50 34 6 156 107 11
47122-219 162 10 239 182 13 246 186 14 246 190 14
47123-246 190 14 246 190 14 246 190 14 246 190 14
47124-246 190 14 246 190 14 246 190 14 246 190 14
47125-246 190 14 246 190 14 246 190 14 242 186 14
47126-234 174 13 213 154 11 154 122 46 66 66 66
47127- 30 30 30 10 10 10 0 0 0 0 0 0
47128- 0 0 0 0 0 0 0 0 0 0 0 0
47129- 0 0 0 0 0 0 0 0 0 0 0 0
47130- 0 0 0 0 0 0 6 6 6 22 22 22
47131- 58 58 58 154 121 60 206 145 10 234 174 13
47132-242 186 14 246 186 14 246 190 14 246 190 14
47133-246 190 14 246 190 14 246 190 14 246 190 14
47134-246 190 14 246 190 14 246 190 14 246 190 14
47135-246 190 14 246 190 14 246 190 14 246 190 14
47136-246 186 14 236 178 12 210 162 10 163 110 8
47137- 61 42 6 138 138 138 218 218 218 250 250 250
47138-253 253 253 253 253 253 253 253 253 250 250 250
47139-242 242 242 210 210 210 144 144 144 66 66 66
47140- 6 6 6 2 2 6 2 2 6 2 2 6
47141- 2 2 6 2 2 6 61 42 6 163 110 8
47142-216 158 10 236 178 12 246 190 14 246 190 14
47143-246 190 14 246 190 14 246 190 14 246 190 14
47144-246 190 14 246 190 14 246 190 14 246 190 14
47145-246 190 14 239 182 13 230 174 11 216 158 10
47146-190 142 34 124 112 88 70 70 70 38 38 38
47147- 18 18 18 6 6 6 0 0 0 0 0 0
47148- 0 0 0 0 0 0 0 0 0 0 0 0
47149- 0 0 0 0 0 0 0 0 0 0 0 0
47150- 0 0 0 0 0 0 6 6 6 22 22 22
47151- 62 62 62 168 124 44 206 145 10 224 166 10
47152-236 178 12 239 182 13 242 186 14 242 186 14
47153-246 186 14 246 190 14 246 190 14 246 190 14
47154-246 190 14 246 190 14 246 190 14 246 190 14
47155-246 190 14 246 190 14 246 190 14 246 190 14
47156-246 190 14 236 178 12 216 158 10 175 118 6
47157- 80 54 7 2 2 6 6 6 6 30 30 30
47158- 54 54 54 62 62 62 50 50 50 38 38 38
47159- 14 14 14 2 2 6 2 2 6 2 2 6
47160- 2 2 6 2 2 6 2 2 6 2 2 6
47161- 2 2 6 6 6 6 80 54 7 167 114 7
47162-213 154 11 236 178 12 246 190 14 246 190 14
47163-246 190 14 246 190 14 246 190 14 246 190 14
47164-246 190 14 242 186 14 239 182 13 239 182 13
47165-230 174 11 210 150 10 174 135 50 124 112 88
47166- 82 82 82 54 54 54 34 34 34 18 18 18
47167- 6 6 6 0 0 0 0 0 0 0 0 0
47168- 0 0 0 0 0 0 0 0 0 0 0 0
47169- 0 0 0 0 0 0 0 0 0 0 0 0
47170- 0 0 0 0 0 0 6 6 6 18 18 18
47171- 50 50 50 158 118 36 192 133 9 200 144 11
47172-216 158 10 219 162 10 224 166 10 226 170 11
47173-230 174 11 236 178 12 239 182 13 239 182 13
47174-242 186 14 246 186 14 246 190 14 246 190 14
47175-246 190 14 246 190 14 246 190 14 246 190 14
47176-246 186 14 230 174 11 210 150 10 163 110 8
47177-104 69 6 10 10 10 2 2 6 2 2 6
47178- 2 2 6 2 2 6 2 2 6 2 2 6
47179- 2 2 6 2 2 6 2 2 6 2 2 6
47180- 2 2 6 2 2 6 2 2 6 2 2 6
47181- 2 2 6 6 6 6 91 60 6 167 114 7
47182-206 145 10 230 174 11 242 186 14 246 190 14
47183-246 190 14 246 190 14 246 186 14 242 186 14
47184-239 182 13 230 174 11 224 166 10 213 154 11
47185-180 133 36 124 112 88 86 86 86 58 58 58
47186- 38 38 38 22 22 22 10 10 10 6 6 6
47187- 0 0 0 0 0 0 0 0 0 0 0 0
47188- 0 0 0 0 0 0 0 0 0 0 0 0
47189- 0 0 0 0 0 0 0 0 0 0 0 0
47190- 0 0 0 0 0 0 0 0 0 14 14 14
47191- 34 34 34 70 70 70 138 110 50 158 118 36
47192-167 114 7 180 123 7 192 133 9 197 138 11
47193-200 144 11 206 145 10 213 154 11 219 162 10
47194-224 166 10 230 174 11 239 182 13 242 186 14
47195-246 186 14 246 186 14 246 186 14 246 186 14
47196-239 182 13 216 158 10 185 133 11 152 99 6
47197-104 69 6 18 14 6 2 2 6 2 2 6
47198- 2 2 6 2 2 6 2 2 6 2 2 6
47199- 2 2 6 2 2 6 2 2 6 2 2 6
47200- 2 2 6 2 2 6 2 2 6 2 2 6
47201- 2 2 6 6 6 6 80 54 7 152 99 6
47202-192 133 9 219 162 10 236 178 12 239 182 13
47203-246 186 14 242 186 14 239 182 13 236 178 12
47204-224 166 10 206 145 10 192 133 9 154 121 60
47205- 94 94 94 62 62 62 42 42 42 22 22 22
47206- 14 14 14 6 6 6 0 0 0 0 0 0
47207- 0 0 0 0 0 0 0 0 0 0 0 0
47208- 0 0 0 0 0 0 0 0 0 0 0 0
47209- 0 0 0 0 0 0 0 0 0 0 0 0
47210- 0 0 0 0 0 0 0 0 0 6 6 6
47211- 18 18 18 34 34 34 58 58 58 78 78 78
47212-101 98 89 124 112 88 142 110 46 156 107 11
47213-163 110 8 167 114 7 175 118 6 180 123 7
47214-185 133 11 197 138 11 210 150 10 219 162 10
47215-226 170 11 236 178 12 236 178 12 234 174 13
47216-219 162 10 197 138 11 163 110 8 130 83 6
47217- 91 60 6 10 10 10 2 2 6 2 2 6
47218- 18 18 18 38 38 38 38 38 38 38 38 38
47219- 38 38 38 38 38 38 38 38 38 38 38 38
47220- 38 38 38 38 38 38 26 26 26 2 2 6
47221- 2 2 6 6 6 6 70 47 6 137 92 6
47222-175 118 6 200 144 11 219 162 10 230 174 11
47223-234 174 13 230 174 11 219 162 10 210 150 10
47224-192 133 9 163 110 8 124 112 88 82 82 82
47225- 50 50 50 30 30 30 14 14 14 6 6 6
47226- 0 0 0 0 0 0 0 0 0 0 0 0
47227- 0 0 0 0 0 0 0 0 0 0 0 0
47228- 0 0 0 0 0 0 0 0 0 0 0 0
47229- 0 0 0 0 0 0 0 0 0 0 0 0
47230- 0 0 0 0 0 0 0 0 0 0 0 0
47231- 6 6 6 14 14 14 22 22 22 34 34 34
47232- 42 42 42 58 58 58 74 74 74 86 86 86
47233-101 98 89 122 102 70 130 98 46 121 87 25
47234-137 92 6 152 99 6 163 110 8 180 123 7
47235-185 133 11 197 138 11 206 145 10 200 144 11
47236-180 123 7 156 107 11 130 83 6 104 69 6
47237- 50 34 6 54 54 54 110 110 110 101 98 89
47238- 86 86 86 82 82 82 78 78 78 78 78 78
47239- 78 78 78 78 78 78 78 78 78 78 78 78
47240- 78 78 78 82 82 82 86 86 86 94 94 94
47241-106 106 106 101 101 101 86 66 34 124 80 6
47242-156 107 11 180 123 7 192 133 9 200 144 11
47243-206 145 10 200 144 11 192 133 9 175 118 6
47244-139 102 15 109 106 95 70 70 70 42 42 42
47245- 22 22 22 10 10 10 0 0 0 0 0 0
47246- 0 0 0 0 0 0 0 0 0 0 0 0
47247- 0 0 0 0 0 0 0 0 0 0 0 0
47248- 0 0 0 0 0 0 0 0 0 0 0 0
47249- 0 0 0 0 0 0 0 0 0 0 0 0
47250- 0 0 0 0 0 0 0 0 0 0 0 0
47251- 0 0 0 0 0 0 6 6 6 10 10 10
47252- 14 14 14 22 22 22 30 30 30 38 38 38
47253- 50 50 50 62 62 62 74 74 74 90 90 90
47254-101 98 89 112 100 78 121 87 25 124 80 6
47255-137 92 6 152 99 6 152 99 6 152 99 6
47256-138 86 6 124 80 6 98 70 6 86 66 30
47257-101 98 89 82 82 82 58 58 58 46 46 46
47258- 38 38 38 34 34 34 34 34 34 34 34 34
47259- 34 34 34 34 34 34 34 34 34 34 34 34
47260- 34 34 34 34 34 34 38 38 38 42 42 42
47261- 54 54 54 82 82 82 94 86 76 91 60 6
47262-134 86 6 156 107 11 167 114 7 175 118 6
47263-175 118 6 167 114 7 152 99 6 121 87 25
47264-101 98 89 62 62 62 34 34 34 18 18 18
47265- 6 6 6 0 0 0 0 0 0 0 0 0
47266- 0 0 0 0 0 0 0 0 0 0 0 0
47267- 0 0 0 0 0 0 0 0 0 0 0 0
47268- 0 0 0 0 0 0 0 0 0 0 0 0
47269- 0 0 0 0 0 0 0 0 0 0 0 0
47270- 0 0 0 0 0 0 0 0 0 0 0 0
47271- 0 0 0 0 0 0 0 0 0 0 0 0
47272- 0 0 0 6 6 6 6 6 6 10 10 10
47273- 18 18 18 22 22 22 30 30 30 42 42 42
47274- 50 50 50 66 66 66 86 86 86 101 98 89
47275-106 86 58 98 70 6 104 69 6 104 69 6
47276-104 69 6 91 60 6 82 62 34 90 90 90
47277- 62 62 62 38 38 38 22 22 22 14 14 14
47278- 10 10 10 10 10 10 10 10 10 10 10 10
47279- 10 10 10 10 10 10 6 6 6 10 10 10
47280- 10 10 10 10 10 10 10 10 10 14 14 14
47281- 22 22 22 42 42 42 70 70 70 89 81 66
47282- 80 54 7 104 69 6 124 80 6 137 92 6
47283-134 86 6 116 81 8 100 82 52 86 86 86
47284- 58 58 58 30 30 30 14 14 14 6 6 6
47285- 0 0 0 0 0 0 0 0 0 0 0 0
47286- 0 0 0 0 0 0 0 0 0 0 0 0
47287- 0 0 0 0 0 0 0 0 0 0 0 0
47288- 0 0 0 0 0 0 0 0 0 0 0 0
47289- 0 0 0 0 0 0 0 0 0 0 0 0
47290- 0 0 0 0 0 0 0 0 0 0 0 0
47291- 0 0 0 0 0 0 0 0 0 0 0 0
47292- 0 0 0 0 0 0 0 0 0 0 0 0
47293- 0 0 0 6 6 6 10 10 10 14 14 14
47294- 18 18 18 26 26 26 38 38 38 54 54 54
47295- 70 70 70 86 86 86 94 86 76 89 81 66
47296- 89 81 66 86 86 86 74 74 74 50 50 50
47297- 30 30 30 14 14 14 6 6 6 0 0 0
47298- 0 0 0 0 0 0 0 0 0 0 0 0
47299- 0 0 0 0 0 0 0 0 0 0 0 0
47300- 0 0 0 0 0 0 0 0 0 0 0 0
47301- 6 6 6 18 18 18 34 34 34 58 58 58
47302- 82 82 82 89 81 66 89 81 66 89 81 66
47303- 94 86 66 94 86 76 74 74 74 50 50 50
47304- 26 26 26 14 14 14 6 6 6 0 0 0
47305- 0 0 0 0 0 0 0 0 0 0 0 0
47306- 0 0 0 0 0 0 0 0 0 0 0 0
47307- 0 0 0 0 0 0 0 0 0 0 0 0
47308- 0 0 0 0 0 0 0 0 0 0 0 0
47309- 0 0 0 0 0 0 0 0 0 0 0 0
47310- 0 0 0 0 0 0 0 0 0 0 0 0
47311- 0 0 0 0 0 0 0 0 0 0 0 0
47312- 0 0 0 0 0 0 0 0 0 0 0 0
47313- 0 0 0 0 0 0 0 0 0 0 0 0
47314- 6 6 6 6 6 6 14 14 14 18 18 18
47315- 30 30 30 38 38 38 46 46 46 54 54 54
47316- 50 50 50 42 42 42 30 30 30 18 18 18
47317- 10 10 10 0 0 0 0 0 0 0 0 0
47318- 0 0 0 0 0 0 0 0 0 0 0 0
47319- 0 0 0 0 0 0 0 0 0 0 0 0
47320- 0 0 0 0 0 0 0 0 0 0 0 0
47321- 0 0 0 6 6 6 14 14 14 26 26 26
47322- 38 38 38 50 50 50 58 58 58 58 58 58
47323- 54 54 54 42 42 42 30 30 30 18 18 18
47324- 10 10 10 0 0 0 0 0 0 0 0 0
47325- 0 0 0 0 0 0 0 0 0 0 0 0
47326- 0 0 0 0 0 0 0 0 0 0 0 0
47327- 0 0 0 0 0 0 0 0 0 0 0 0
47328- 0 0 0 0 0 0 0 0 0 0 0 0
47329- 0 0 0 0 0 0 0 0 0 0 0 0
47330- 0 0 0 0 0 0 0 0 0 0 0 0
47331- 0 0 0 0 0 0 0 0 0 0 0 0
47332- 0 0 0 0 0 0 0 0 0 0 0 0
47333- 0 0 0 0 0 0 0 0 0 0 0 0
47334- 0 0 0 0 0 0 0 0 0 6 6 6
47335- 6 6 6 10 10 10 14 14 14 18 18 18
47336- 18 18 18 14 14 14 10 10 10 6 6 6
47337- 0 0 0 0 0 0 0 0 0 0 0 0
47338- 0 0 0 0 0 0 0 0 0 0 0 0
47339- 0 0 0 0 0 0 0 0 0 0 0 0
47340- 0 0 0 0 0 0 0 0 0 0 0 0
47341- 0 0 0 0 0 0 0 0 0 6 6 6
47342- 14 14 14 18 18 18 22 22 22 22 22 22
47343- 18 18 18 14 14 14 10 10 10 6 6 6
47344- 0 0 0 0 0 0 0 0 0 0 0 0
47345- 0 0 0 0 0 0 0 0 0 0 0 0
47346- 0 0 0 0 0 0 0 0 0 0 0 0
47347- 0 0 0 0 0 0 0 0 0 0 0 0
47348- 0 0 0 0 0 0 0 0 0 0 0 0
47349+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47350+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47351+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47352+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47353+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47354+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47355+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47356+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47357+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47358+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47359+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47360+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47361+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47362+4 4 4 4 4 4
47363+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47364+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47365+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47366+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47367+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47368+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47369+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47370+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47371+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47372+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47373+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47374+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47375+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47376+4 4 4 4 4 4
47377+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47378+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47379+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47380+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47381+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47382+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47383+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47384+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47385+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47386+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47387+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47388+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47389+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47390+4 4 4 4 4 4
47391+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47392+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47393+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47394+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47395+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47396+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47397+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47398+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47399+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47400+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47401+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47402+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47403+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47404+4 4 4 4 4 4
47405+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47406+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47407+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47408+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47409+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47410+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47411+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47412+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47413+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47414+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47415+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47416+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47417+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47418+4 4 4 4 4 4
47419+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47420+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47421+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47422+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47423+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47424+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47425+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47426+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47427+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47428+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47429+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47430+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47431+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47432+4 4 4 4 4 4
47433+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47434+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47435+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47436+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47437+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
47438+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
47439+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47440+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47441+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47442+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
47443+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
47444+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
47445+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47446+4 4 4 4 4 4
47447+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47448+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47449+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47450+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47451+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
47452+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
47453+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47454+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47455+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47456+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
47457+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
47458+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
47459+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47460+4 4 4 4 4 4
47461+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47462+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47463+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47464+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47465+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
47466+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
47467+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
47468+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47469+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47470+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
47471+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
47472+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
47473+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
47474+4 4 4 4 4 4
47475+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47476+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47477+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47478+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
47479+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
47480+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
47481+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
47482+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47483+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
47484+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
47485+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
47486+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
47487+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
47488+4 4 4 4 4 4
47489+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47490+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47491+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47492+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
47493+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
47494+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
47495+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
47496+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
47497+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
47498+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
47499+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
47500+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
47501+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
47502+4 4 4 4 4 4
47503+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47504+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47505+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
47506+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
47507+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
47508+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
47509+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
47510+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
47511+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
47512+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
47513+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
47514+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
47515+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
47516+4 4 4 4 4 4
47517+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47518+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47519+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
47520+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
47521+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
47522+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
47523+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
47524+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
47525+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
47526+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
47527+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
47528+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
47529+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
47530+4 4 4 4 4 4
47531+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47532+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47533+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
47534+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
47535+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
47536+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
47537+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
47538+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
47539+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
47540+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
47541+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
47542+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
47543+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
47544+4 4 4 4 4 4
47545+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47546+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47547+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
47548+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
47549+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
47550+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
47551+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
47552+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
47553+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
47554+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
47555+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
47556+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
47557+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
47558+4 4 4 4 4 4
47559+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47560+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47561+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
47562+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
47563+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
47564+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
47565+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
47566+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
47567+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
47568+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
47569+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
47570+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
47571+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
47572+4 4 4 4 4 4
47573+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47574+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
47575+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
47576+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
47577+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
47578+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
47579+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
47580+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
47581+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
47582+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
47583+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
47584+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
47585+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
47586+4 4 4 4 4 4
47587+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47588+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
47589+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
47590+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
47591+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
47592+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
47593+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
47594+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
47595+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
47596+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
47597+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
47598+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
47599+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
47600+0 0 0 4 4 4
47601+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
47602+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
47603+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
47604+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
47605+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
47606+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
47607+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
47608+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
47609+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
47610+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
47611+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
47612+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
47613+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
47614+2 0 0 0 0 0
47615+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
47616+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
47617+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
47618+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
47619+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
47620+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
47621+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
47622+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
47623+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
47624+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
47625+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
47626+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
47627+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
47628+37 38 37 0 0 0
47629+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
47630+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
47631+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
47632+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
47633+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
47634+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
47635+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
47636+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
47637+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
47638+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
47639+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
47640+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
47641+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
47642+85 115 134 4 0 0
47643+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
47644+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
47645+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
47646+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
47647+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
47648+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
47649+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
47650+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
47651+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
47652+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
47653+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
47654+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
47655+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
47656+60 73 81 4 0 0
47657+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
47658+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
47659+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
47660+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
47661+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
47662+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
47663+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
47664+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
47665+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
47666+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
47667+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
47668+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
47669+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
47670+16 19 21 4 0 0
47671+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
47672+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
47673+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
47674+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
47675+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
47676+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
47677+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
47678+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
47679+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
47680+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
47681+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
47682+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
47683+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
47684+4 0 0 4 3 3
47685+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
47686+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
47687+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
47688+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
47689+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
47690+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
47691+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
47692+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
47693+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
47694+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
47695+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
47696+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
47697+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
47698+3 2 2 4 4 4
47699+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
47700+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
47701+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
47702+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
47703+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
47704+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
47705+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
47706+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
47707+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
47708+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
47709+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
47710+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
47711+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
47712+4 4 4 4 4 4
47713+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
47714+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
47715+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
47716+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
47717+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
47718+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
47719+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
47720+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
47721+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
47722+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
47723+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
47724+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
47725+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
47726+4 4 4 4 4 4
47727+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
47728+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
47729+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
47730+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
47731+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
47732+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
47733+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
47734+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
47735+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
47736+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
47737+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
47738+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
47739+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
47740+5 5 5 5 5 5
47741+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
47742+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
47743+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
47744+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
47745+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
47746+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
47747+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
47748+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
47749+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
47750+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
47751+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
47752+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
47753+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
47754+5 5 5 4 4 4
47755+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
47756+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
47757+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
47758+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
47759+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
47760+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
47761+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
47762+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
47763+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
47764+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
47765+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
47766+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
47767+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47768+4 4 4 4 4 4
47769+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
47770+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
47771+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
47772+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
47773+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
47774+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
47775+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
47776+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
47777+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
47778+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
47779+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
47780+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
47781+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47782+4 4 4 4 4 4
47783+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
47784+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
47785+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
47786+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
47787+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
47788+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
47789+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
47790+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
47791+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
47792+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
47793+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
47794+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47795+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47796+4 4 4 4 4 4
47797+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
47798+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
47799+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
47800+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
47801+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
47802+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
47803+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
47804+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
47805+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
47806+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
47807+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
47808+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47809+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47810+4 4 4 4 4 4
47811+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
47812+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
47813+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
47814+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
47815+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
47816+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
47817+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
47818+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
47819+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
47820+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
47821+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47822+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47823+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47824+4 4 4 4 4 4
47825+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
47826+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
47827+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
47828+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
47829+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
47830+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
47831+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
47832+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
47833+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
47834+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
47835+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
47836+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47837+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47838+4 4 4 4 4 4
47839+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
47840+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
47841+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
47842+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
47843+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
47844+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
47845+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
47846+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
47847+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
47848+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
47849+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
47850+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47851+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47852+4 4 4 4 4 4
47853+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
47854+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
47855+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
47856+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
47857+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
47858+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
47859+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
47860+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
47861+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
47862+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
47863+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47864+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47865+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47866+4 4 4 4 4 4
47867+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
47868+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
47869+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
47870+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
47871+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
47872+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
47873+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
47874+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
47875+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
47876+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
47877+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47878+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47879+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47880+4 4 4 4 4 4
47881+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
47882+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
47883+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
47884+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
47885+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
47886+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
47887+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
47888+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
47889+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
47890+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
47891+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47892+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47893+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47894+4 4 4 4 4 4
47895+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
47896+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
47897+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
47898+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
47899+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
47900+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
47901+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
47902+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
47903+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
47904+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47905+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47906+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47907+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47908+4 4 4 4 4 4
47909+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
47910+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
47911+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
47912+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
47913+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
47914+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
47915+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
47916+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
47917+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
47918+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47919+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47920+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47921+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47922+4 4 4 4 4 4
47923+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
47924+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
47925+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
47926+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
47927+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
47928+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
47929+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
47930+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
47931+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
47932+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47933+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47934+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47935+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47936+4 4 4 4 4 4
47937+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
47938+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
47939+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
47940+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
47941+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
47942+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
47943+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
47944+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
47945+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
47946+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47947+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47948+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47949+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47950+4 4 4 4 4 4
47951+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
47952+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
47953+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
47954+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
47955+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
47956+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
47957+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
47958+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
47959+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
47960+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47961+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47962+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47963+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47964+4 4 4 4 4 4
47965+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
47966+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
47967+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
47968+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
47969+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
47970+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
47971+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
47972+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
47973+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
47974+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47975+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47976+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47977+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47978+4 4 4 4 4 4
47979+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
47980+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
47981+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
47982+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
47983+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
47984+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
47985+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
47986+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
47987+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
47988+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47989+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47990+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47991+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47992+4 4 4 4 4 4
47993+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
47994+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
47995+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
47996+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
47997+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
47998+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
47999+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
48000+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
48001+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
48002+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
48003+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
48004+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48005+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48006+4 4 4 4 4 4
48007+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
48008+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
48009+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
48010+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
48011+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
48012+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
48013+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
48014+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
48015+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
48016+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
48017+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
48018+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48019+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48020+4 4 4 4 4 4
48021+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
48022+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
48023+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
48024+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
48025+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
48026+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
48027+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
48028+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
48029+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
48030+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
48031+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
48032+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48033+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48034+4 4 4 4 4 4
48035+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
48036+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
48037+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
48038+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
48039+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
48040+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
48041+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
48042+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
48043+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
48044+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
48045+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
48046+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48047+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48048+4 4 4 4 4 4
48049+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
48050+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
48051+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
48052+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
48053+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
48054+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
48055+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
48056+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
48057+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
48058+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
48059+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
48060+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48061+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48062+4 4 4 4 4 4
48063+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
48064+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
48065+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
48066+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
48067+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
48068+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
48069+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
48070+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
48071+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
48072+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
48073+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
48074+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48075+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48076+4 4 4 4 4 4
48077+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
48078+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
48079+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
48080+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
48081+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
48082+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
48083+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
48084+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
48085+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
48086+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
48087+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
48088+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48089+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48090+4 4 4 4 4 4
48091+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
48092+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
48093+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
48094+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
48095+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
48096+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
48097+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
48098+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
48099+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
48100+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
48101+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
48102+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48103+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48104+4 4 4 4 4 4
48105+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
48106+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
48107+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
48108+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
48109+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
48110+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
48111+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
48112+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
48113+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
48114+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
48115+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
48116+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48117+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48118+4 4 4 4 4 4
48119+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
48120+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
48121+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
48122+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
48123+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
48124+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
48125+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
48126+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
48127+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
48128+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
48129+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48130+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48131+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48132+4 4 4 4 4 4
48133+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
48134+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
48135+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
48136+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
48137+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
48138+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
48139+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48140+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
48141+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
48142+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
48143+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
48144+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48145+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48146+4 4 4 4 4 4
48147+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
48148+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
48149+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
48150+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
48151+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
48152+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
48153+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
48154+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
48155+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
48156+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
48157+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48158+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48159+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48160+4 4 4 4 4 4
48161+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
48162+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
48163+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
48164+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
48165+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
48166+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
48167+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
48168+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
48169+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
48170+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
48171+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48172+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48173+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48174+4 4 4 4 4 4
48175+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
48176+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
48177+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
48178+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
48179+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
48180+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
48181+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
48182+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
48183+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
48184+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
48185+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48186+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48187+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48188+4 4 4 4 4 4
48189+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
48190+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
48191+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
48192+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
48193+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
48194+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
48195+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
48196+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
48197+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
48198+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
48199+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48200+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48201+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48202+4 4 4 4 4 4
48203+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
48204+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
48205+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
48206+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
48207+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
48208+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
48209+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
48210+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
48211+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
48212+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
48213+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48214+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48215+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48216+4 4 4 4 4 4
48217+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
48218+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
48219+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
48220+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
48221+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
48222+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
48223+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
48224+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
48225+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
48226+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48227+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48228+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48229+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48230+4 4 4 4 4 4
48231+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
48232+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
48233+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
48234+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
48235+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
48236+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
48237+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
48238+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
48239+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
48240+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48241+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48242+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48243+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48244+4 4 4 4 4 4
48245+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
48246+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
48247+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
48248+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
48249+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
48250+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
48251+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
48252+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
48253+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48254+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48255+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48256+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48257+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48258+4 4 4 4 4 4
48259+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
48260+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
48261+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
48262+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
48263+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
48264+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
48265+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
48266+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
48267+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48268+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48269+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48270+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48271+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48272+4 4 4 4 4 4
48273+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
48274+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
48275+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
48276+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
48277+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
48278+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
48279+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
48280+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
48281+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48282+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48283+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48284+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48285+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48286+4 4 4 4 4 4
48287+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
48288+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
48289+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
48290+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
48291+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
48292+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
48293+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
48294+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
48295+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48296+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48297+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48298+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48299+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48300+4 4 4 4 4 4
48301+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48302+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
48303+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
48304+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
48305+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
48306+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
48307+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
48308+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
48309+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48310+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48311+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48312+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48313+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48314+4 4 4 4 4 4
48315+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48316+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
48317+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
48318+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
48319+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
48320+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
48321+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
48322+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
48323+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48324+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48325+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48326+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48327+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48328+4 4 4 4 4 4
48329+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48330+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48331+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
48332+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
48333+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
48334+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
48335+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
48336+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
48337+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48338+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48339+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48340+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48341+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48342+4 4 4 4 4 4
48343+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48344+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48345+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
48346+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
48347+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
48348+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
48349+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
48350+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48351+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48352+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48353+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48354+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48355+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48356+4 4 4 4 4 4
48357+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48358+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48359+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48360+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
48361+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
48362+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
48363+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
48364+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48365+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48366+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48367+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48368+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48369+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48370+4 4 4 4 4 4
48371+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48372+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48373+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48374+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
48375+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
48376+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
48377+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
48378+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48379+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48380+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48381+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48382+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48383+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48384+4 4 4 4 4 4
48385+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48386+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48387+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48388+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
48389+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
48390+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
48391+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
48392+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48393+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48394+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48395+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48396+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48397+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48398+4 4 4 4 4 4
48399+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48400+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48401+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48402+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
48403+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
48404+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
48405+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
48406+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48407+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48408+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48409+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48410+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48411+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48412+4 4 4 4 4 4
48413+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48414+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48415+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48416+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48417+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
48418+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
48419+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
48420+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48421+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48422+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48423+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48424+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48425+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48426+4 4 4 4 4 4
48427+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48428+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48429+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48430+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48431+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
48432+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
48433+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48434+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48435+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48436+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48437+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48438+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48439+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48440+4 4 4 4 4 4
48441+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48442+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48443+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48444+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48445+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
48446+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
48447+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48448+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48449+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48450+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48451+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48452+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48453+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48454+4 4 4 4 4 4
48455+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48456+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48457+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48458+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48459+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
48460+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
48461+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48462+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48463+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48464+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48465+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48466+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48467+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48468+4 4 4 4 4 4
48469diff --git a/drivers/video/mb862xx/mb862xxfb_accel.c b/drivers/video/mb862xx/mb862xxfb_accel.c
48470index fe92eed..106e085 100644
48471--- a/drivers/video/mb862xx/mb862xxfb_accel.c
48472+++ b/drivers/video/mb862xx/mb862xxfb_accel.c
48473@@ -312,14 +312,18 @@ void mb862xxfb_init_accel(struct fb_info *info, int xres)
48474 struct mb862xxfb_par *par = info->par;
48475
48476 if (info->var.bits_per_pixel == 32) {
48477- info->fbops->fb_fillrect = cfb_fillrect;
48478- info->fbops->fb_copyarea = cfb_copyarea;
48479- info->fbops->fb_imageblit = cfb_imageblit;
48480+ pax_open_kernel();
48481+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
48482+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
48483+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
48484+ pax_close_kernel();
48485 } else {
48486 outreg(disp, GC_L0EM, 3);
48487- info->fbops->fb_fillrect = mb86290fb_fillrect;
48488- info->fbops->fb_copyarea = mb86290fb_copyarea;
48489- info->fbops->fb_imageblit = mb86290fb_imageblit;
48490+ pax_open_kernel();
48491+ *(void **)&info->fbops->fb_fillrect = mb86290fb_fillrect;
48492+ *(void **)&info->fbops->fb_copyarea = mb86290fb_copyarea;
48493+ *(void **)&info->fbops->fb_imageblit = mb86290fb_imageblit;
48494+ pax_close_kernel();
48495 }
48496 outreg(draw, GDC_REG_DRAW_BASE, 0);
48497 outreg(draw, GDC_REG_MODE_MISC, 0x8000);
48498diff --git a/drivers/video/nvidia/nvidia.c b/drivers/video/nvidia/nvidia.c
48499index ff22871..b129bed 100644
48500--- a/drivers/video/nvidia/nvidia.c
48501+++ b/drivers/video/nvidia/nvidia.c
48502@@ -669,19 +669,23 @@ static int nvidiafb_set_par(struct fb_info *info)
48503 info->fix.line_length = (info->var.xres_virtual *
48504 info->var.bits_per_pixel) >> 3;
48505 if (info->var.accel_flags) {
48506- info->fbops->fb_imageblit = nvidiafb_imageblit;
48507- info->fbops->fb_fillrect = nvidiafb_fillrect;
48508- info->fbops->fb_copyarea = nvidiafb_copyarea;
48509- info->fbops->fb_sync = nvidiafb_sync;
48510+ pax_open_kernel();
48511+ *(void **)&info->fbops->fb_imageblit = nvidiafb_imageblit;
48512+ *(void **)&info->fbops->fb_fillrect = nvidiafb_fillrect;
48513+ *(void **)&info->fbops->fb_copyarea = nvidiafb_copyarea;
48514+ *(void **)&info->fbops->fb_sync = nvidiafb_sync;
48515+ pax_close_kernel();
48516 info->pixmap.scan_align = 4;
48517 info->flags &= ~FBINFO_HWACCEL_DISABLED;
48518 info->flags |= FBINFO_READS_FAST;
48519 NVResetGraphics(info);
48520 } else {
48521- info->fbops->fb_imageblit = cfb_imageblit;
48522- info->fbops->fb_fillrect = cfb_fillrect;
48523- info->fbops->fb_copyarea = cfb_copyarea;
48524- info->fbops->fb_sync = NULL;
48525+ pax_open_kernel();
48526+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
48527+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
48528+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
48529+ *(void **)&info->fbops->fb_sync = NULL;
48530+ pax_close_kernel();
48531 info->pixmap.scan_align = 1;
48532 info->flags |= FBINFO_HWACCEL_DISABLED;
48533 info->flags &= ~FBINFO_READS_FAST;
48534@@ -1173,8 +1177,11 @@ static int nvidia_set_fbinfo(struct fb_info *info)
48535 info->pixmap.size = 8 * 1024;
48536 info->pixmap.flags = FB_PIXMAP_SYSTEM;
48537
48538- if (!hwcur)
48539- info->fbops->fb_cursor = NULL;
48540+ if (!hwcur) {
48541+ pax_open_kernel();
48542+ *(void **)&info->fbops->fb_cursor = NULL;
48543+ pax_close_kernel();
48544+ }
48545
48546 info->var.accel_flags = (!noaccel);
48547
48548diff --git a/drivers/video/s1d13xxxfb.c b/drivers/video/s1d13xxxfb.c
48549index 76d9053..dec2bfd 100644
48550--- a/drivers/video/s1d13xxxfb.c
48551+++ b/drivers/video/s1d13xxxfb.c
48552@@ -881,8 +881,10 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
48553
48554 switch(prod_id) {
48555 case S1D13506_PROD_ID: /* activate acceleration */
48556- s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
48557- s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
48558+ pax_open_kernel();
48559+ *(void **)&s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
48560+ *(void **)&s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
48561+ pax_close_kernel();
48562 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN |
48563 FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_COPYAREA;
48564 break;
48565diff --git a/drivers/video/smscufx.c b/drivers/video/smscufx.c
48566index 97bd662..39fab85 100644
48567--- a/drivers/video/smscufx.c
48568+++ b/drivers/video/smscufx.c
48569@@ -1171,7 +1171,9 @@ static int ufx_ops_release(struct fb_info *info, int user)
48570 fb_deferred_io_cleanup(info);
48571 kfree(info->fbdefio);
48572 info->fbdefio = NULL;
48573- info->fbops->fb_mmap = ufx_ops_mmap;
48574+ pax_open_kernel();
48575+ *(void **)&info->fbops->fb_mmap = ufx_ops_mmap;
48576+ pax_close_kernel();
48577 }
48578
48579 pr_debug("released /dev/fb%d user=%d count=%d",
48580diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
48581index 86d449e..8e04dc5 100644
48582--- a/drivers/video/udlfb.c
48583+++ b/drivers/video/udlfb.c
48584@@ -619,11 +619,11 @@ int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
48585 dlfb_urb_completion(urb);
48586
48587 error:
48588- atomic_add(bytes_sent, &dev->bytes_sent);
48589- atomic_add(bytes_identical, &dev->bytes_identical);
48590- atomic_add(width*height*2, &dev->bytes_rendered);
48591+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
48592+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
48593+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
48594 end_cycles = get_cycles();
48595- atomic_add(((unsigned int) ((end_cycles - start_cycles)
48596+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
48597 >> 10)), /* Kcycles */
48598 &dev->cpu_kcycles_used);
48599
48600@@ -744,11 +744,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
48601 dlfb_urb_completion(urb);
48602
48603 error:
48604- atomic_add(bytes_sent, &dev->bytes_sent);
48605- atomic_add(bytes_identical, &dev->bytes_identical);
48606- atomic_add(bytes_rendered, &dev->bytes_rendered);
48607+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
48608+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
48609+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
48610 end_cycles = get_cycles();
48611- atomic_add(((unsigned int) ((end_cycles - start_cycles)
48612+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
48613 >> 10)), /* Kcycles */
48614 &dev->cpu_kcycles_used);
48615 }
48616@@ -989,7 +989,9 @@ static int dlfb_ops_release(struct fb_info *info, int user)
48617 fb_deferred_io_cleanup(info);
48618 kfree(info->fbdefio);
48619 info->fbdefio = NULL;
48620- info->fbops->fb_mmap = dlfb_ops_mmap;
48621+ pax_open_kernel();
48622+ *(void **)&info->fbops->fb_mmap = dlfb_ops_mmap;
48623+ pax_close_kernel();
48624 }
48625
48626 pr_warn("released /dev/fb%d user=%d count=%d\n",
48627@@ -1372,7 +1374,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
48628 struct fb_info *fb_info = dev_get_drvdata(fbdev);
48629 struct dlfb_data *dev = fb_info->par;
48630 return snprintf(buf, PAGE_SIZE, "%u\n",
48631- atomic_read(&dev->bytes_rendered));
48632+ atomic_read_unchecked(&dev->bytes_rendered));
48633 }
48634
48635 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
48636@@ -1380,7 +1382,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
48637 struct fb_info *fb_info = dev_get_drvdata(fbdev);
48638 struct dlfb_data *dev = fb_info->par;
48639 return snprintf(buf, PAGE_SIZE, "%u\n",
48640- atomic_read(&dev->bytes_identical));
48641+ atomic_read_unchecked(&dev->bytes_identical));
48642 }
48643
48644 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
48645@@ -1388,7 +1390,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
48646 struct fb_info *fb_info = dev_get_drvdata(fbdev);
48647 struct dlfb_data *dev = fb_info->par;
48648 return snprintf(buf, PAGE_SIZE, "%u\n",
48649- atomic_read(&dev->bytes_sent));
48650+ atomic_read_unchecked(&dev->bytes_sent));
48651 }
48652
48653 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
48654@@ -1396,7 +1398,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
48655 struct fb_info *fb_info = dev_get_drvdata(fbdev);
48656 struct dlfb_data *dev = fb_info->par;
48657 return snprintf(buf, PAGE_SIZE, "%u\n",
48658- atomic_read(&dev->cpu_kcycles_used));
48659+ atomic_read_unchecked(&dev->cpu_kcycles_used));
48660 }
48661
48662 static ssize_t edid_show(
48663@@ -1456,10 +1458,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
48664 struct fb_info *fb_info = dev_get_drvdata(fbdev);
48665 struct dlfb_data *dev = fb_info->par;
48666
48667- atomic_set(&dev->bytes_rendered, 0);
48668- atomic_set(&dev->bytes_identical, 0);
48669- atomic_set(&dev->bytes_sent, 0);
48670- atomic_set(&dev->cpu_kcycles_used, 0);
48671+ atomic_set_unchecked(&dev->bytes_rendered, 0);
48672+ atomic_set_unchecked(&dev->bytes_identical, 0);
48673+ atomic_set_unchecked(&dev->bytes_sent, 0);
48674+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
48675
48676 return count;
48677 }
48678diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
48679index b75db01..ad2f34a 100644
48680--- a/drivers/video/uvesafb.c
48681+++ b/drivers/video/uvesafb.c
48682@@ -19,6 +19,7 @@
48683 #include <linux/io.h>
48684 #include <linux/mutex.h>
48685 #include <linux/slab.h>
48686+#include <linux/moduleloader.h>
48687 #include <video/edid.h>
48688 #include <video/uvesafb.h>
48689 #ifdef CONFIG_X86
48690@@ -569,10 +570,32 @@ static int uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
48691 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
48692 par->pmi_setpal = par->ypan = 0;
48693 } else {
48694+
48695+#ifdef CONFIG_PAX_KERNEXEC
48696+#ifdef CONFIG_MODULES
48697+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
48698+#endif
48699+ if (!par->pmi_code) {
48700+ par->pmi_setpal = par->ypan = 0;
48701+ return 0;
48702+ }
48703+#endif
48704+
48705 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
48706 + task->t.regs.edi);
48707+
48708+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
48709+ pax_open_kernel();
48710+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
48711+ pax_close_kernel();
48712+
48713+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
48714+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
48715+#else
48716 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
48717 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
48718+#endif
48719+
48720 printk(KERN_INFO "uvesafb: protected mode interface info at "
48721 "%04x:%04x\n",
48722 (u16)task->t.regs.es, (u16)task->t.regs.edi);
48723@@ -817,13 +840,14 @@ static int uvesafb_vbe_init(struct fb_info *info)
48724 par->ypan = ypan;
48725
48726 if (par->pmi_setpal || par->ypan) {
48727+#if !defined(CONFIG_MODULES) || !defined(CONFIG_PAX_KERNEXEC)
48728 if (__supported_pte_mask & _PAGE_NX) {
48729 par->pmi_setpal = par->ypan = 0;
48730 printk(KERN_WARNING "uvesafb: NX protection is actively."
48731 "We have better not to use the PMI.\n");
48732- } else {
48733+ } else
48734+#endif
48735 uvesafb_vbe_getpmi(task, par);
48736- }
48737 }
48738 #else
48739 /* The protected mode interface is not available on non-x86. */
48740@@ -1457,8 +1481,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
48741 info->fix.ywrapstep = (par->ypan > 1) ? 1 : 0;
48742
48743 /* Disable blanking if the user requested so. */
48744- if (!blank)
48745- info->fbops->fb_blank = NULL;
48746+ if (!blank) {
48747+ pax_open_kernel();
48748+ *(void **)&info->fbops->fb_blank = NULL;
48749+ pax_close_kernel();
48750+ }
48751
48752 /*
48753 * Find out how much IO memory is required for the mode with
48754@@ -1534,8 +1561,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
48755 info->flags = FBINFO_FLAG_DEFAULT |
48756 (par->ypan ? FBINFO_HWACCEL_YPAN : 0);
48757
48758- if (!par->ypan)
48759- info->fbops->fb_pan_display = NULL;
48760+ if (!par->ypan) {
48761+ pax_open_kernel();
48762+ *(void **)&info->fbops->fb_pan_display = NULL;
48763+ pax_close_kernel();
48764+ }
48765 }
48766
48767 static void uvesafb_init_mtrr(struct fb_info *info)
48768@@ -1836,6 +1866,11 @@ out:
48769 if (par->vbe_modes)
48770 kfree(par->vbe_modes);
48771
48772+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
48773+ if (par->pmi_code)
48774+ module_free_exec(NULL, par->pmi_code);
48775+#endif
48776+
48777 framebuffer_release(info);
48778 return err;
48779 }
48780@@ -1862,6 +1897,12 @@ static int uvesafb_remove(struct platform_device *dev)
48781 kfree(par->vbe_state_orig);
48782 if (par->vbe_state_saved)
48783 kfree(par->vbe_state_saved);
48784+
48785+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
48786+ if (par->pmi_code)
48787+ module_free_exec(NULL, par->pmi_code);
48788+#endif
48789+
48790 }
48791
48792 framebuffer_release(info);
48793diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
48794index 501b340..d80aa17 100644
48795--- a/drivers/video/vesafb.c
48796+++ b/drivers/video/vesafb.c
48797@@ -9,6 +9,7 @@
48798 */
48799
48800 #include <linux/module.h>
48801+#include <linux/moduleloader.h>
48802 #include <linux/kernel.h>
48803 #include <linux/errno.h>
48804 #include <linux/string.h>
48805@@ -52,8 +53,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
48806 static int vram_total __initdata; /* Set total amount of memory */
48807 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
48808 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
48809-static void (*pmi_start)(void) __read_mostly;
48810-static void (*pmi_pal) (void) __read_mostly;
48811+static void (*pmi_start)(void) __read_only;
48812+static void (*pmi_pal) (void) __read_only;
48813 static int depth __read_mostly;
48814 static int vga_compat __read_mostly;
48815 /* --------------------------------------------------------------------- */
48816@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
48817 unsigned int size_vmode;
48818 unsigned int size_remap;
48819 unsigned int size_total;
48820+ void *pmi_code = NULL;
48821
48822 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
48823 return -ENODEV;
48824@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
48825 size_remap = size_total;
48826 vesafb_fix.smem_len = size_remap;
48827
48828-#ifndef __i386__
48829- screen_info.vesapm_seg = 0;
48830-#endif
48831-
48832 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
48833 printk(KERN_WARNING
48834 "vesafb: cannot reserve video memory at 0x%lx\n",
48835@@ -307,9 +305,21 @@ static int __init vesafb_probe(struct platform_device *dev)
48836 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
48837 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
48838
48839+#ifdef __i386__
48840+
48841+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
48842+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
48843+ if (!pmi_code)
48844+#elif !defined(CONFIG_PAX_KERNEXEC)
48845+ if (0)
48846+#endif
48847+
48848+#endif
48849+ screen_info.vesapm_seg = 0;
48850+
48851 if (screen_info.vesapm_seg) {
48852- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
48853- screen_info.vesapm_seg,screen_info.vesapm_off);
48854+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
48855+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
48856 }
48857
48858 if (screen_info.vesapm_seg < 0xc000)
48859@@ -317,9 +327,25 @@ static int __init vesafb_probe(struct platform_device *dev)
48860
48861 if (ypan || pmi_setpal) {
48862 unsigned short *pmi_base;
48863+
48864 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
48865- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
48866- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
48867+
48868+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
48869+ pax_open_kernel();
48870+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
48871+#else
48872+ pmi_code = pmi_base;
48873+#endif
48874+
48875+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
48876+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
48877+
48878+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
48879+ pmi_start = ktva_ktla(pmi_start);
48880+ pmi_pal = ktva_ktla(pmi_pal);
48881+ pax_close_kernel();
48882+#endif
48883+
48884 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
48885 if (pmi_base[3]) {
48886 printk(KERN_INFO "vesafb: pmi: ports = ");
48887@@ -472,8 +498,11 @@ static int __init vesafb_probe(struct platform_device *dev)
48888 info->flags = FBINFO_FLAG_DEFAULT | FBINFO_MISC_FIRMWARE |
48889 (ypan ? FBINFO_HWACCEL_YPAN : 0);
48890
48891- if (!ypan)
48892- info->fbops->fb_pan_display = NULL;
48893+ if (!ypan) {
48894+ pax_open_kernel();
48895+ *(void **)&info->fbops->fb_pan_display = NULL;
48896+ pax_close_kernel();
48897+ }
48898
48899 if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
48900 err = -ENOMEM;
48901@@ -488,6 +517,11 @@ static int __init vesafb_probe(struct platform_device *dev)
48902 info->node, info->fix.id);
48903 return 0;
48904 err:
48905+
48906+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
48907+ module_free_exec(NULL, pmi_code);
48908+#endif
48909+
48910 if (info->screen_base)
48911 iounmap(info->screen_base);
48912 framebuffer_release(info);
48913diff --git a/drivers/video/via/via_clock.h b/drivers/video/via/via_clock.h
48914index 88714ae..16c2e11 100644
48915--- a/drivers/video/via/via_clock.h
48916+++ b/drivers/video/via/via_clock.h
48917@@ -56,7 +56,7 @@ struct via_clock {
48918
48919 void (*set_engine_pll_state)(u8 state);
48920 void (*set_engine_pll)(struct via_pll_config config);
48921-};
48922+} __no_const;
48923
48924
48925 static inline u32 get_pll_internal_frequency(u32 ref_freq,
48926diff --git a/drivers/xen/xenfs/xenstored.c b/drivers/xen/xenfs/xenstored.c
48927index fef20db..d28b1ab 100644
48928--- a/drivers/xen/xenfs/xenstored.c
48929+++ b/drivers/xen/xenfs/xenstored.c
48930@@ -24,7 +24,12 @@ static int xsd_release(struct inode *inode, struct file *file)
48931 static int xsd_kva_open(struct inode *inode, struct file *file)
48932 {
48933 file->private_data = (void *)kasprintf(GFP_KERNEL, "0x%p",
48934+#ifdef CONFIG_GRKERNSEC_HIDESYM
48935+ NULL);
48936+#else
48937 xen_store_interface);
48938+#endif
48939+
48940 if (!file->private_data)
48941 return -ENOMEM;
48942 return 0;
48943diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
48944index 890bed5..17ae73e 100644
48945--- a/fs/9p/vfs_inode.c
48946+++ b/fs/9p/vfs_inode.c
48947@@ -1329,7 +1329,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
48948 void
48949 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
48950 {
48951- char *s = nd_get_link(nd);
48952+ const char *s = nd_get_link(nd);
48953
48954 p9_debug(P9_DEBUG_VFS, " %s %s\n",
48955 dentry->d_name.name, IS_ERR(s) ? "<error>" : s);
48956diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
48957index 0efd152..b5802ad 100644
48958--- a/fs/Kconfig.binfmt
48959+++ b/fs/Kconfig.binfmt
48960@@ -89,7 +89,7 @@ config HAVE_AOUT
48961
48962 config BINFMT_AOUT
48963 tristate "Kernel support for a.out and ECOFF binaries"
48964- depends on HAVE_AOUT
48965+ depends on HAVE_AOUT && BROKEN
48966 ---help---
48967 A.out (Assembler.OUTput) is a set of formats for libraries and
48968 executables used in the earliest versions of UNIX. Linux used
48969diff --git a/fs/aio.c b/fs/aio.c
48970index 71f613c..ee07789 100644
48971--- a/fs/aio.c
48972+++ b/fs/aio.c
48973@@ -111,7 +111,7 @@ static int aio_setup_ring(struct kioctx *ctx)
48974 size += sizeof(struct io_event) * nr_events;
48975 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
48976
48977- if (nr_pages < 0)
48978+ if (nr_pages <= 0)
48979 return -EINVAL;
48980
48981 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
48982@@ -1027,9 +1027,9 @@ static int aio_read_evt(struct kioctx *ioctx, struct io_event *ent)
48983 spin_unlock(&info->ring_lock);
48984
48985 out:
48986- kunmap_atomic(ring);
48987 dprintk("leaving aio_read_evt: %d h%lu t%lu\n", ret,
48988 (unsigned long)ring->head, (unsigned long)ring->tail);
48989+ kunmap_atomic(ring);
48990 return ret;
48991 }
48992
48993@@ -1373,18 +1373,19 @@ static ssize_t aio_fsync(struct kiocb *iocb)
48994 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
48995 {
48996 ssize_t ret;
48997+ struct iovec iovstack;
48998
48999 #ifdef CONFIG_COMPAT
49000 if (compat)
49001 ret = compat_rw_copy_check_uvector(type,
49002 (struct compat_iovec __user *)kiocb->ki_buf,
49003- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
49004+ kiocb->ki_nbytes, 1, &iovstack,
49005 &kiocb->ki_iovec);
49006 else
49007 #endif
49008 ret = rw_copy_check_uvector(type,
49009 (struct iovec __user *)kiocb->ki_buf,
49010- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
49011+ kiocb->ki_nbytes, 1, &iovstack,
49012 &kiocb->ki_iovec);
49013 if (ret < 0)
49014 goto out;
49015@@ -1393,6 +1394,10 @@ static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
49016 if (ret < 0)
49017 goto out;
49018
49019+ if (kiocb->ki_iovec == &iovstack) {
49020+ kiocb->ki_inline_vec = iovstack;
49021+ kiocb->ki_iovec = &kiocb->ki_inline_vec;
49022+ }
49023 kiocb->ki_nr_segs = kiocb->ki_nbytes;
49024 kiocb->ki_cur_seg = 0;
49025 /* ki_nbytes/left now reflect bytes instead of segs */
49026diff --git a/fs/attr.c b/fs/attr.c
49027index 1449adb..a2038c2 100644
49028--- a/fs/attr.c
49029+++ b/fs/attr.c
49030@@ -102,6 +102,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
49031 unsigned long limit;
49032
49033 limit = rlimit(RLIMIT_FSIZE);
49034+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
49035 if (limit != RLIM_INFINITY && offset > limit)
49036 goto out_sig;
49037 if (offset > inode->i_sb->s_maxbytes)
49038diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
49039index 03bc1d3..6205356 100644
49040--- a/fs/autofs4/waitq.c
49041+++ b/fs/autofs4/waitq.c
49042@@ -61,7 +61,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
49043 {
49044 unsigned long sigpipe, flags;
49045 mm_segment_t fs;
49046- const char *data = (const char *)addr;
49047+ const char __user *data = (const char __force_user *)addr;
49048 ssize_t wr = 0;
49049
49050 sigpipe = sigismember(&current->pending.signal, SIGPIPE);
49051@@ -348,6 +348,10 @@ static int validate_request(struct autofs_wait_queue **wait,
49052 return 1;
49053 }
49054
49055+#ifdef CONFIG_GRKERNSEC_HIDESYM
49056+static atomic_unchecked_t autofs_dummy_name_id = ATOMIC_INIT(0);
49057+#endif
49058+
49059 int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
49060 enum autofs_notify notify)
49061 {
49062@@ -381,7 +385,12 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
49063
49064 /* If this is a direct mount request create a dummy name */
49065 if (IS_ROOT(dentry) && autofs_type_trigger(sbi->type))
49066+#ifdef CONFIG_GRKERNSEC_HIDESYM
49067+ /* this name does get written to userland via autofs4_write() */
49068+ qstr.len = sprintf(name, "%08x", atomic_inc_return_unchecked(&autofs_dummy_name_id));
49069+#else
49070 qstr.len = sprintf(name, "%p", dentry);
49071+#endif
49072 else {
49073 qstr.len = autofs4_getpath(sbi, dentry, &name);
49074 if (!qstr.len) {
49075diff --git a/fs/befs/endian.h b/fs/befs/endian.h
49076index 2722387..c8dd2a7 100644
49077--- a/fs/befs/endian.h
49078+++ b/fs/befs/endian.h
49079@@ -11,7 +11,7 @@
49080
49081 #include <asm/byteorder.h>
49082
49083-static inline u64
49084+static inline u64 __intentional_overflow(-1)
49085 fs64_to_cpu(const struct super_block *sb, fs64 n)
49086 {
49087 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
49088@@ -29,7 +29,7 @@ cpu_to_fs64(const struct super_block *sb, u64 n)
49089 return (__force fs64)cpu_to_be64(n);
49090 }
49091
49092-static inline u32
49093+static inline u32 __intentional_overflow(-1)
49094 fs32_to_cpu(const struct super_block *sb, fs32 n)
49095 {
49096 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
49097diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
49098index 2b3bda8..6a2d4be 100644
49099--- a/fs/befs/linuxvfs.c
49100+++ b/fs/befs/linuxvfs.c
49101@@ -510,7 +510,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
49102 {
49103 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
49104 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
49105- char *link = nd_get_link(nd);
49106+ const char *link = nd_get_link(nd);
49107 if (!IS_ERR(link))
49108 kfree(link);
49109 }
49110diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
49111index 6043567..16a9239 100644
49112--- a/fs/binfmt_aout.c
49113+++ b/fs/binfmt_aout.c
49114@@ -16,6 +16,7 @@
49115 #include <linux/string.h>
49116 #include <linux/fs.h>
49117 #include <linux/file.h>
49118+#include <linux/security.h>
49119 #include <linux/stat.h>
49120 #include <linux/fcntl.h>
49121 #include <linux/ptrace.h>
49122@@ -59,6 +60,8 @@ static int aout_core_dump(struct coredump_params *cprm)
49123 #endif
49124 # define START_STACK(u) ((void __user *)u.start_stack)
49125
49126+ memset(&dump, 0, sizeof(dump));
49127+
49128 fs = get_fs();
49129 set_fs(KERNEL_DS);
49130 has_dumped = 1;
49131@@ -70,10 +73,12 @@ static int aout_core_dump(struct coredump_params *cprm)
49132
49133 /* If the size of the dump file exceeds the rlimit, then see what would happen
49134 if we wrote the stack, but not the data area. */
49135+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
49136 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
49137 dump.u_dsize = 0;
49138
49139 /* Make sure we have enough room to write the stack and data areas. */
49140+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
49141 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
49142 dump.u_ssize = 0;
49143
49144@@ -234,6 +239,8 @@ static int load_aout_binary(struct linux_binprm * bprm)
49145 rlim = rlimit(RLIMIT_DATA);
49146 if (rlim >= RLIM_INFINITY)
49147 rlim = ~0;
49148+
49149+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
49150 if (ex.a_data + ex.a_bss > rlim)
49151 return -ENOMEM;
49152
49153@@ -268,6 +275,27 @@ static int load_aout_binary(struct linux_binprm * bprm)
49154
49155 install_exec_creds(bprm);
49156
49157+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
49158+ current->mm->pax_flags = 0UL;
49159+#endif
49160+
49161+#ifdef CONFIG_PAX_PAGEEXEC
49162+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
49163+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
49164+
49165+#ifdef CONFIG_PAX_EMUTRAMP
49166+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
49167+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
49168+#endif
49169+
49170+#ifdef CONFIG_PAX_MPROTECT
49171+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
49172+ current->mm->pax_flags |= MF_PAX_MPROTECT;
49173+#endif
49174+
49175+ }
49176+#endif
49177+
49178 if (N_MAGIC(ex) == OMAGIC) {
49179 unsigned long text_addr, map_size;
49180 loff_t pos;
49181@@ -333,7 +361,7 @@ static int load_aout_binary(struct linux_binprm * bprm)
49182 }
49183
49184 error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
49185- PROT_READ | PROT_WRITE | PROT_EXEC,
49186+ PROT_READ | PROT_WRITE,
49187 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
49188 fd_offset + ex.a_text);
49189 if (error != N_DATADDR(ex)) {
49190diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
49191index 5843a47..160fbe2 100644
49192--- a/fs/binfmt_elf.c
49193+++ b/fs/binfmt_elf.c
49194@@ -33,6 +33,7 @@
49195 #include <linux/elf.h>
49196 #include <linux/utsname.h>
49197 #include <linux/coredump.h>
49198+#include <linux/xattr.h>
49199 #include <asm/uaccess.h>
49200 #include <asm/param.h>
49201 #include <asm/page.h>
49202@@ -59,6 +60,10 @@ static int elf_core_dump(struct coredump_params *cprm);
49203 #define elf_core_dump NULL
49204 #endif
49205
49206+#ifdef CONFIG_PAX_MPROTECT
49207+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
49208+#endif
49209+
49210 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
49211 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
49212 #else
49213@@ -78,6 +83,11 @@ static struct linux_binfmt elf_format = {
49214 .load_binary = load_elf_binary,
49215 .load_shlib = load_elf_library,
49216 .core_dump = elf_core_dump,
49217+
49218+#ifdef CONFIG_PAX_MPROTECT
49219+ .handle_mprotect= elf_handle_mprotect,
49220+#endif
49221+
49222 .min_coredump = ELF_EXEC_PAGESIZE,
49223 };
49224
49225@@ -85,6 +95,8 @@ static struct linux_binfmt elf_format = {
49226
49227 static int set_brk(unsigned long start, unsigned long end)
49228 {
49229+ unsigned long e = end;
49230+
49231 start = ELF_PAGEALIGN(start);
49232 end = ELF_PAGEALIGN(end);
49233 if (end > start) {
49234@@ -93,7 +105,7 @@ static int set_brk(unsigned long start, unsigned long end)
49235 if (BAD_ADDR(addr))
49236 return addr;
49237 }
49238- current->mm->start_brk = current->mm->brk = end;
49239+ current->mm->start_brk = current->mm->brk = e;
49240 return 0;
49241 }
49242
49243@@ -154,12 +166,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
49244 elf_addr_t __user *u_rand_bytes;
49245 const char *k_platform = ELF_PLATFORM;
49246 const char *k_base_platform = ELF_BASE_PLATFORM;
49247- unsigned char k_rand_bytes[16];
49248+ u32 k_rand_bytes[4];
49249 int items;
49250 elf_addr_t *elf_info;
49251 int ei_index = 0;
49252 const struct cred *cred = current_cred();
49253 struct vm_area_struct *vma;
49254+ unsigned long saved_auxv[AT_VECTOR_SIZE];
49255
49256 /*
49257 * In some cases (e.g. Hyper-Threading), we want to avoid L1
49258@@ -201,8 +214,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
49259 * Generate 16 random bytes for userspace PRNG seeding.
49260 */
49261 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
49262- u_rand_bytes = (elf_addr_t __user *)
49263- STACK_ALLOC(p, sizeof(k_rand_bytes));
49264+ srandom32(k_rand_bytes[0] ^ random32());
49265+ srandom32(k_rand_bytes[1] ^ random32());
49266+ srandom32(k_rand_bytes[2] ^ random32());
49267+ srandom32(k_rand_bytes[3] ^ random32());
49268+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
49269+ u_rand_bytes = (elf_addr_t __user *) p;
49270 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
49271 return -EFAULT;
49272
49273@@ -314,9 +331,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
49274 return -EFAULT;
49275 current->mm->env_end = p;
49276
49277+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
49278+
49279 /* Put the elf_info on the stack in the right place. */
49280 sp = (elf_addr_t __user *)envp + 1;
49281- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
49282+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
49283 return -EFAULT;
49284 return 0;
49285 }
49286@@ -380,15 +399,14 @@ static unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
49287 an ELF header */
49288
49289 static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
49290- struct file *interpreter, unsigned long *interp_map_addr,
49291- unsigned long no_base)
49292+ struct file *interpreter, unsigned long no_base)
49293 {
49294 struct elf_phdr *elf_phdata;
49295 struct elf_phdr *eppnt;
49296- unsigned long load_addr = 0;
49297+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
49298 int load_addr_set = 0;
49299 unsigned long last_bss = 0, elf_bss = 0;
49300- unsigned long error = ~0UL;
49301+ unsigned long error = -EINVAL;
49302 unsigned long total_size;
49303 int retval, i, size;
49304
49305@@ -434,6 +452,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
49306 goto out_close;
49307 }
49308
49309+#ifdef CONFIG_PAX_SEGMEXEC
49310+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
49311+ pax_task_size = SEGMEXEC_TASK_SIZE;
49312+#endif
49313+
49314 eppnt = elf_phdata;
49315 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
49316 if (eppnt->p_type == PT_LOAD) {
49317@@ -457,8 +480,6 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
49318 map_addr = elf_map(interpreter, load_addr + vaddr,
49319 eppnt, elf_prot, elf_type, total_size);
49320 total_size = 0;
49321- if (!*interp_map_addr)
49322- *interp_map_addr = map_addr;
49323 error = map_addr;
49324 if (BAD_ADDR(map_addr))
49325 goto out_close;
49326@@ -477,8 +498,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
49327 k = load_addr + eppnt->p_vaddr;
49328 if (BAD_ADDR(k) ||
49329 eppnt->p_filesz > eppnt->p_memsz ||
49330- eppnt->p_memsz > TASK_SIZE ||
49331- TASK_SIZE - eppnt->p_memsz < k) {
49332+ eppnt->p_memsz > pax_task_size ||
49333+ pax_task_size - eppnt->p_memsz < k) {
49334 error = -ENOMEM;
49335 goto out_close;
49336 }
49337@@ -530,6 +551,315 @@ out:
49338 return error;
49339 }
49340
49341+#ifdef CONFIG_PAX_PT_PAX_FLAGS
49342+#ifdef CONFIG_PAX_SOFTMODE
49343+static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
49344+{
49345+ unsigned long pax_flags = 0UL;
49346+
49347+#ifdef CONFIG_PAX_PAGEEXEC
49348+ if (elf_phdata->p_flags & PF_PAGEEXEC)
49349+ pax_flags |= MF_PAX_PAGEEXEC;
49350+#endif
49351+
49352+#ifdef CONFIG_PAX_SEGMEXEC
49353+ if (elf_phdata->p_flags & PF_SEGMEXEC)
49354+ pax_flags |= MF_PAX_SEGMEXEC;
49355+#endif
49356+
49357+#ifdef CONFIG_PAX_EMUTRAMP
49358+ if ((elf_phdata->p_flags & PF_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
49359+ pax_flags |= MF_PAX_EMUTRAMP;
49360+#endif
49361+
49362+#ifdef CONFIG_PAX_MPROTECT
49363+ if (elf_phdata->p_flags & PF_MPROTECT)
49364+ pax_flags |= MF_PAX_MPROTECT;
49365+#endif
49366+
49367+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
49368+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
49369+ pax_flags |= MF_PAX_RANDMMAP;
49370+#endif
49371+
49372+ return pax_flags;
49373+}
49374+#endif
49375+
49376+static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
49377+{
49378+ unsigned long pax_flags = 0UL;
49379+
49380+#ifdef CONFIG_PAX_PAGEEXEC
49381+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
49382+ pax_flags |= MF_PAX_PAGEEXEC;
49383+#endif
49384+
49385+#ifdef CONFIG_PAX_SEGMEXEC
49386+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
49387+ pax_flags |= MF_PAX_SEGMEXEC;
49388+#endif
49389+
49390+#ifdef CONFIG_PAX_EMUTRAMP
49391+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
49392+ pax_flags |= MF_PAX_EMUTRAMP;
49393+#endif
49394+
49395+#ifdef CONFIG_PAX_MPROTECT
49396+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
49397+ pax_flags |= MF_PAX_MPROTECT;
49398+#endif
49399+
49400+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
49401+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
49402+ pax_flags |= MF_PAX_RANDMMAP;
49403+#endif
49404+
49405+ return pax_flags;
49406+}
49407+#endif
49408+
49409+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
49410+#ifdef CONFIG_PAX_SOFTMODE
49411+static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
49412+{
49413+ unsigned long pax_flags = 0UL;
49414+
49415+#ifdef CONFIG_PAX_PAGEEXEC
49416+ if (pax_flags_softmode & MF_PAX_PAGEEXEC)
49417+ pax_flags |= MF_PAX_PAGEEXEC;
49418+#endif
49419+
49420+#ifdef CONFIG_PAX_SEGMEXEC
49421+ if (pax_flags_softmode & MF_PAX_SEGMEXEC)
49422+ pax_flags |= MF_PAX_SEGMEXEC;
49423+#endif
49424+
49425+#ifdef CONFIG_PAX_EMUTRAMP
49426+ if (pax_flags_softmode & MF_PAX_EMUTRAMP)
49427+ pax_flags |= MF_PAX_EMUTRAMP;
49428+#endif
49429+
49430+#ifdef CONFIG_PAX_MPROTECT
49431+ if (pax_flags_softmode & MF_PAX_MPROTECT)
49432+ pax_flags |= MF_PAX_MPROTECT;
49433+#endif
49434+
49435+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
49436+ if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
49437+ pax_flags |= MF_PAX_RANDMMAP;
49438+#endif
49439+
49440+ return pax_flags;
49441+}
49442+#endif
49443+
49444+static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
49445+{
49446+ unsigned long pax_flags = 0UL;
49447+
49448+#ifdef CONFIG_PAX_PAGEEXEC
49449+ if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
49450+ pax_flags |= MF_PAX_PAGEEXEC;
49451+#endif
49452+
49453+#ifdef CONFIG_PAX_SEGMEXEC
49454+ if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
49455+ pax_flags |= MF_PAX_SEGMEXEC;
49456+#endif
49457+
49458+#ifdef CONFIG_PAX_EMUTRAMP
49459+ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
49460+ pax_flags |= MF_PAX_EMUTRAMP;
49461+#endif
49462+
49463+#ifdef CONFIG_PAX_MPROTECT
49464+ if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
49465+ pax_flags |= MF_PAX_MPROTECT;
49466+#endif
49467+
49468+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
49469+ if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
49470+ pax_flags |= MF_PAX_RANDMMAP;
49471+#endif
49472+
49473+ return pax_flags;
49474+}
49475+#endif
49476+
49477+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
49478+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
49479+{
49480+ unsigned long pax_flags = 0UL;
49481+
49482+#ifdef CONFIG_PAX_EI_PAX
49483+
49484+#ifdef CONFIG_PAX_PAGEEXEC
49485+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
49486+ pax_flags |= MF_PAX_PAGEEXEC;
49487+#endif
49488+
49489+#ifdef CONFIG_PAX_SEGMEXEC
49490+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
49491+ pax_flags |= MF_PAX_SEGMEXEC;
49492+#endif
49493+
49494+#ifdef CONFIG_PAX_EMUTRAMP
49495+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
49496+ pax_flags |= MF_PAX_EMUTRAMP;
49497+#endif
49498+
49499+#ifdef CONFIG_PAX_MPROTECT
49500+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
49501+ pax_flags |= MF_PAX_MPROTECT;
49502+#endif
49503+
49504+#ifdef CONFIG_PAX_ASLR
49505+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
49506+ pax_flags |= MF_PAX_RANDMMAP;
49507+#endif
49508+
49509+#else
49510+
49511+#ifdef CONFIG_PAX_PAGEEXEC
49512+ pax_flags |= MF_PAX_PAGEEXEC;
49513+#endif
49514+
49515+#ifdef CONFIG_PAX_SEGMEXEC
49516+ pax_flags |= MF_PAX_SEGMEXEC;
49517+#endif
49518+
49519+#ifdef CONFIG_PAX_MPROTECT
49520+ pax_flags |= MF_PAX_MPROTECT;
49521+#endif
49522+
49523+#ifdef CONFIG_PAX_RANDMMAP
49524+ if (randomize_va_space)
49525+ pax_flags |= MF_PAX_RANDMMAP;
49526+#endif
49527+
49528+#endif
49529+
49530+ return pax_flags;
49531+}
49532+
49533+static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
49534+{
49535+
49536+#ifdef CONFIG_PAX_PT_PAX_FLAGS
49537+ unsigned long i;
49538+
49539+ for (i = 0UL; i < elf_ex->e_phnum; i++)
49540+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
49541+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
49542+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
49543+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
49544+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
49545+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
49546+ return ~0UL;
49547+
49548+#ifdef CONFIG_PAX_SOFTMODE
49549+ if (pax_softmode)
49550+ return pax_parse_pt_pax_softmode(&elf_phdata[i]);
49551+ else
49552+#endif
49553+
49554+ return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
49555+ break;
49556+ }
49557+#endif
49558+
49559+ return ~0UL;
49560+}
49561+
49562+static unsigned long pax_parse_xattr_pax(struct file * const file)
49563+{
49564+
49565+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
49566+ ssize_t xattr_size, i;
49567+ unsigned char xattr_value[5];
49568+ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
49569+
49570+ xattr_size = vfs_getxattr(file->f_path.dentry, XATTR_NAME_PAX_FLAGS, xattr_value, sizeof xattr_value);
49571+ if (xattr_size <= 0 || xattr_size > 5)
49572+ return ~0UL;
49573+
49574+ for (i = 0; i < xattr_size; i++)
49575+ switch (xattr_value[i]) {
49576+ default:
49577+ return ~0UL;
49578+
49579+#define parse_flag(option1, option2, flag) \
49580+ case option1: \
49581+ if (pax_flags_hardmode & MF_PAX_##flag) \
49582+ return ~0UL; \
49583+ pax_flags_hardmode |= MF_PAX_##flag; \
49584+ break; \
49585+ case option2: \
49586+ if (pax_flags_softmode & MF_PAX_##flag) \
49587+ return ~0UL; \
49588+ pax_flags_softmode |= MF_PAX_##flag; \
49589+ break;
49590+
49591+ parse_flag('p', 'P', PAGEEXEC);
49592+ parse_flag('e', 'E', EMUTRAMP);
49593+ parse_flag('m', 'M', MPROTECT);
49594+ parse_flag('r', 'R', RANDMMAP);
49595+ parse_flag('s', 'S', SEGMEXEC);
49596+
49597+#undef parse_flag
49598+ }
49599+
49600+ if (pax_flags_hardmode & pax_flags_softmode)
49601+ return ~0UL;
49602+
49603+#ifdef CONFIG_PAX_SOFTMODE
49604+ if (pax_softmode)
49605+ return pax_parse_xattr_pax_softmode(pax_flags_softmode);
49606+ else
49607+#endif
49608+
49609+ return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
49610+#else
49611+ return ~0UL;
49612+#endif
49613+
49614+}
49615+
49616+static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
49617+{
49618+ unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
49619+
49620+ pax_flags = pax_parse_ei_pax(elf_ex);
49621+ pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
49622+ xattr_pax_flags = pax_parse_xattr_pax(file);
49623+
49624+ if (pt_pax_flags == ~0UL)
49625+ pt_pax_flags = xattr_pax_flags;
49626+ else if (xattr_pax_flags == ~0UL)
49627+ xattr_pax_flags = pt_pax_flags;
49628+ if (pt_pax_flags != xattr_pax_flags)
49629+ return -EINVAL;
49630+ if (pt_pax_flags != ~0UL)
49631+ pax_flags = pt_pax_flags;
49632+
49633+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
49634+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
49635+ if ((__supported_pte_mask & _PAGE_NX))
49636+ pax_flags &= ~MF_PAX_SEGMEXEC;
49637+ else
49638+ pax_flags &= ~MF_PAX_PAGEEXEC;
49639+ }
49640+#endif
49641+
49642+ if (0 > pax_check_flags(&pax_flags))
49643+ return -EINVAL;
49644+
49645+ current->mm->pax_flags = pax_flags;
49646+ return 0;
49647+}
49648+#endif
49649+
49650 /*
49651 * These are the functions used to load ELF style executables and shared
49652 * libraries. There is no binary dependent code anywhere else.
49653@@ -546,6 +876,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
49654 {
49655 unsigned int random_variable = 0;
49656
49657+#ifdef CONFIG_PAX_RANDUSTACK
49658+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
49659+ return stack_top - current->mm->delta_stack;
49660+#endif
49661+
49662 if ((current->flags & PF_RANDOMIZE) &&
49663 !(current->personality & ADDR_NO_RANDOMIZE)) {
49664 random_variable = get_random_int() & STACK_RND_MASK;
49665@@ -564,7 +899,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
49666 unsigned long load_addr = 0, load_bias = 0;
49667 int load_addr_set = 0;
49668 char * elf_interpreter = NULL;
49669- unsigned long error;
49670+ unsigned long error = 0;
49671 struct elf_phdr *elf_ppnt, *elf_phdata;
49672 unsigned long elf_bss, elf_brk;
49673 int retval, i;
49674@@ -574,12 +909,12 @@ static int load_elf_binary(struct linux_binprm *bprm)
49675 unsigned long start_code, end_code, start_data, end_data;
49676 unsigned long reloc_func_desc __maybe_unused = 0;
49677 int executable_stack = EXSTACK_DEFAULT;
49678- unsigned long def_flags = 0;
49679 struct pt_regs *regs = current_pt_regs();
49680 struct {
49681 struct elfhdr elf_ex;
49682 struct elfhdr interp_elf_ex;
49683 } *loc;
49684+ unsigned long pax_task_size = TASK_SIZE;
49685
49686 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
49687 if (!loc) {
49688@@ -715,11 +1050,81 @@ static int load_elf_binary(struct linux_binprm *bprm)
49689 goto out_free_dentry;
49690
49691 /* OK, This is the point of no return */
49692- current->mm->def_flags = def_flags;
49693+
49694+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
49695+ current->mm->pax_flags = 0UL;
49696+#endif
49697+
49698+#ifdef CONFIG_PAX_DLRESOLVE
49699+ current->mm->call_dl_resolve = 0UL;
49700+#endif
49701+
49702+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
49703+ current->mm->call_syscall = 0UL;
49704+#endif
49705+
49706+#ifdef CONFIG_PAX_ASLR
49707+ current->mm->delta_mmap = 0UL;
49708+ current->mm->delta_stack = 0UL;
49709+#endif
49710+
49711+ current->mm->def_flags = 0;
49712+
49713+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
49714+ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
49715+ send_sig(SIGKILL, current, 0);
49716+ goto out_free_dentry;
49717+ }
49718+#endif
49719+
49720+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
49721+ pax_set_initial_flags(bprm);
49722+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
49723+ if (pax_set_initial_flags_func)
49724+ (pax_set_initial_flags_func)(bprm);
49725+#endif
49726+
49727+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
49728+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
49729+ current->mm->context.user_cs_limit = PAGE_SIZE;
49730+ current->mm->def_flags |= VM_PAGEEXEC | VM_NOHUGEPAGE;
49731+ }
49732+#endif
49733+
49734+#ifdef CONFIG_PAX_SEGMEXEC
49735+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
49736+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
49737+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
49738+ pax_task_size = SEGMEXEC_TASK_SIZE;
49739+ current->mm->def_flags |= VM_NOHUGEPAGE;
49740+ }
49741+#endif
49742+
49743+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
49744+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
49745+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
49746+ put_cpu();
49747+ }
49748+#endif
49749
49750 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
49751 may depend on the personality. */
49752 SET_PERSONALITY(loc->elf_ex);
49753+
49754+#ifdef CONFIG_PAX_ASLR
49755+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
49756+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
49757+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
49758+ }
49759+#endif
49760+
49761+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
49762+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
49763+ executable_stack = EXSTACK_DISABLE_X;
49764+ current->personality &= ~READ_IMPLIES_EXEC;
49765+ } else
49766+#endif
49767+
49768 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
49769 current->personality |= READ_IMPLIES_EXEC;
49770
49771@@ -810,6 +1215,20 @@ static int load_elf_binary(struct linux_binprm *bprm)
49772 #else
49773 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
49774 #endif
49775+
49776+#ifdef CONFIG_PAX_RANDMMAP
49777+ /* PaX: randomize base address at the default exe base if requested */
49778+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
49779+#ifdef CONFIG_SPARC64
49780+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
49781+#else
49782+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
49783+#endif
49784+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
49785+ elf_flags |= MAP_FIXED;
49786+ }
49787+#endif
49788+
49789 }
49790
49791 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
49792@@ -842,9 +1261,9 @@ static int load_elf_binary(struct linux_binprm *bprm)
49793 * allowed task size. Note that p_filesz must always be
49794 * <= p_memsz so it is only necessary to check p_memsz.
49795 */
49796- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
49797- elf_ppnt->p_memsz > TASK_SIZE ||
49798- TASK_SIZE - elf_ppnt->p_memsz < k) {
49799+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
49800+ elf_ppnt->p_memsz > pax_task_size ||
49801+ pax_task_size - elf_ppnt->p_memsz < k) {
49802 /* set_brk can never work. Avoid overflows. */
49803 send_sig(SIGKILL, current, 0);
49804 retval = -EINVAL;
49805@@ -883,17 +1302,44 @@ static int load_elf_binary(struct linux_binprm *bprm)
49806 goto out_free_dentry;
49807 }
49808 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
49809- send_sig(SIGSEGV, current, 0);
49810- retval = -EFAULT; /* Nobody gets to see this, but.. */
49811- goto out_free_dentry;
49812+ /*
49813+ * This bss-zeroing can fail if the ELF
49814+ * file specifies odd protections. So
49815+ * we don't check the return value
49816+ */
49817 }
49818
49819+#ifdef CONFIG_PAX_RANDMMAP
49820+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
49821+ unsigned long start, size, flags, vm_flags;
49822+
49823+ start = ELF_PAGEALIGN(elf_brk);
49824+ size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
49825+ flags = MAP_FIXED | MAP_PRIVATE;
49826+ vm_flags = VM_DONTEXPAND | VM_DONTDUMP;
49827+
49828+ down_write(&current->mm->mmap_sem);
49829+ start = get_unmapped_area(NULL, start, PAGE_ALIGN(size), 0, flags);
49830+ retval = -ENOMEM;
49831+ if (!IS_ERR_VALUE(start) && !find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) {
49832+// if (current->personality & ADDR_NO_RANDOMIZE)
49833+// vm_flags |= VM_READ | VM_MAYREAD;
49834+ start = mmap_region(NULL, start, PAGE_ALIGN(size), flags, vm_flags, 0);
49835+ retval = IS_ERR_VALUE(start) ? start : 0;
49836+ }
49837+ up_write(&current->mm->mmap_sem);
49838+ if (retval == 0)
49839+ retval = set_brk(start + size, start + size + PAGE_SIZE);
49840+ if (retval < 0) {
49841+ send_sig(SIGKILL, current, 0);
49842+ goto out_free_dentry;
49843+ }
49844+ }
49845+#endif
49846+
49847 if (elf_interpreter) {
49848- unsigned long interp_map_addr = 0;
49849-
49850 elf_entry = load_elf_interp(&loc->interp_elf_ex,
49851 interpreter,
49852- &interp_map_addr,
49853 load_bias);
49854 if (!IS_ERR((void *)elf_entry)) {
49855 /*
49856@@ -1115,7 +1561,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
49857 * Decide what to dump of a segment, part, all or none.
49858 */
49859 static unsigned long vma_dump_size(struct vm_area_struct *vma,
49860- unsigned long mm_flags)
49861+ unsigned long mm_flags, long signr)
49862 {
49863 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
49864
49865@@ -1153,7 +1599,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
49866 if (vma->vm_file == NULL)
49867 return 0;
49868
49869- if (FILTER(MAPPED_PRIVATE))
49870+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
49871 goto whole;
49872
49873 /*
49874@@ -1375,9 +1821,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
49875 {
49876 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
49877 int i = 0;
49878- do
49879+ do {
49880 i += 2;
49881- while (auxv[i - 2] != AT_NULL);
49882+ } while (auxv[i - 2] != AT_NULL);
49883 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
49884 }
49885
49886@@ -2007,14 +2453,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
49887 }
49888
49889 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
49890- unsigned long mm_flags)
49891+ struct coredump_params *cprm)
49892 {
49893 struct vm_area_struct *vma;
49894 size_t size = 0;
49895
49896 for (vma = first_vma(current, gate_vma); vma != NULL;
49897 vma = next_vma(vma, gate_vma))
49898- size += vma_dump_size(vma, mm_flags);
49899+ size += vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
49900 return size;
49901 }
49902
49903@@ -2108,7 +2554,7 @@ static int elf_core_dump(struct coredump_params *cprm)
49904
49905 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
49906
49907- offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
49908+ offset += elf_core_vma_data_size(gate_vma, cprm);
49909 offset += elf_core_extra_data_size();
49910 e_shoff = offset;
49911
49912@@ -2122,10 +2568,12 @@ static int elf_core_dump(struct coredump_params *cprm)
49913 offset = dataoff;
49914
49915 size += sizeof(*elf);
49916+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
49917 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
49918 goto end_coredump;
49919
49920 size += sizeof(*phdr4note);
49921+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
49922 if (size > cprm->limit
49923 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
49924 goto end_coredump;
49925@@ -2139,7 +2587,7 @@ static int elf_core_dump(struct coredump_params *cprm)
49926 phdr.p_offset = offset;
49927 phdr.p_vaddr = vma->vm_start;
49928 phdr.p_paddr = 0;
49929- phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
49930+ phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
49931 phdr.p_memsz = vma->vm_end - vma->vm_start;
49932 offset += phdr.p_filesz;
49933 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
49934@@ -2150,6 +2598,7 @@ static int elf_core_dump(struct coredump_params *cprm)
49935 phdr.p_align = ELF_EXEC_PAGESIZE;
49936
49937 size += sizeof(phdr);
49938+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
49939 if (size > cprm->limit
49940 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
49941 goto end_coredump;
49942@@ -2174,7 +2623,7 @@ static int elf_core_dump(struct coredump_params *cprm)
49943 unsigned long addr;
49944 unsigned long end;
49945
49946- end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
49947+ end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
49948
49949 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
49950 struct page *page;
49951@@ -2183,6 +2632,7 @@ static int elf_core_dump(struct coredump_params *cprm)
49952 page = get_dump_page(addr);
49953 if (page) {
49954 void *kaddr = kmap(page);
49955+ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
49956 stop = ((size += PAGE_SIZE) > cprm->limit) ||
49957 !dump_write(cprm->file, kaddr,
49958 PAGE_SIZE);
49959@@ -2200,6 +2650,7 @@ static int elf_core_dump(struct coredump_params *cprm)
49960
49961 if (e_phnum == PN_XNUM) {
49962 size += sizeof(*shdr4extnum);
49963+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
49964 if (size > cprm->limit
49965 || !dump_write(cprm->file, shdr4extnum,
49966 sizeof(*shdr4extnum)))
49967@@ -2220,6 +2671,97 @@ out:
49968
49969 #endif /* CONFIG_ELF_CORE */
49970
49971+#ifdef CONFIG_PAX_MPROTECT
49972+/* PaX: non-PIC ELF libraries need relocations on their executable segments
49973+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
49974+ * we'll remove VM_MAYWRITE for good on RELRO segments.
49975+ *
49976+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
49977+ * basis because we want to allow the common case and not the special ones.
49978+ */
49979+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
49980+{
49981+ struct elfhdr elf_h;
49982+ struct elf_phdr elf_p;
49983+ unsigned long i;
49984+ unsigned long oldflags;
49985+ bool is_textrel_rw, is_textrel_rx, is_relro;
49986+
49987+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
49988+ return;
49989+
49990+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
49991+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
49992+
49993+#ifdef CONFIG_PAX_ELFRELOCS
49994+ /* possible TEXTREL */
49995+ is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
49996+ is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
49997+#else
49998+ is_textrel_rw = false;
49999+ is_textrel_rx = false;
50000+#endif
50001+
50002+ /* possible RELRO */
50003+ is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
50004+
50005+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
50006+ return;
50007+
50008+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
50009+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
50010+
50011+#ifdef CONFIG_PAX_ETEXECRELOCS
50012+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
50013+#else
50014+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
50015+#endif
50016+
50017+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
50018+ !elf_check_arch(&elf_h) ||
50019+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
50020+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
50021+ return;
50022+
50023+ for (i = 0UL; i < elf_h.e_phnum; i++) {
50024+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
50025+ return;
50026+ switch (elf_p.p_type) {
50027+ case PT_DYNAMIC:
50028+ if (!is_textrel_rw && !is_textrel_rx)
50029+ continue;
50030+ i = 0UL;
50031+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
50032+ elf_dyn dyn;
50033+
50034+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
50035+ return;
50036+ if (dyn.d_tag == DT_NULL)
50037+ return;
50038+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
50039+ gr_log_textrel(vma);
50040+ if (is_textrel_rw)
50041+ vma->vm_flags |= VM_MAYWRITE;
50042+ else
50043+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
50044+ vma->vm_flags &= ~VM_MAYWRITE;
50045+ return;
50046+ }
50047+ i++;
50048+ }
50049+ return;
50050+
50051+ case PT_GNU_RELRO:
50052+ if (!is_relro)
50053+ continue;
50054+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
50055+ vma->vm_flags &= ~VM_MAYWRITE;
50056+ return;
50057+ }
50058+ }
50059+}
50060+#endif
50061+
50062 static int __init init_elf_binfmt(void)
50063 {
50064 register_binfmt(&elf_format);
50065diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
50066index b563719..3868998 100644
50067--- a/fs/binfmt_flat.c
50068+++ b/fs/binfmt_flat.c
50069@@ -562,7 +562,9 @@ static int load_flat_file(struct linux_binprm * bprm,
50070 realdatastart = (unsigned long) -ENOMEM;
50071 printk("Unable to allocate RAM for process data, errno %d\n",
50072 (int)-realdatastart);
50073+ down_write(&current->mm->mmap_sem);
50074 vm_munmap(textpos, text_len);
50075+ up_write(&current->mm->mmap_sem);
50076 ret = realdatastart;
50077 goto err;
50078 }
50079@@ -586,8 +588,10 @@ static int load_flat_file(struct linux_binprm * bprm,
50080 }
50081 if (IS_ERR_VALUE(result)) {
50082 printk("Unable to read data+bss, errno %d\n", (int)-result);
50083+ down_write(&current->mm->mmap_sem);
50084 vm_munmap(textpos, text_len);
50085 vm_munmap(realdatastart, len);
50086+ up_write(&current->mm->mmap_sem);
50087 ret = result;
50088 goto err;
50089 }
50090@@ -654,8 +658,10 @@ static int load_flat_file(struct linux_binprm * bprm,
50091 }
50092 if (IS_ERR_VALUE(result)) {
50093 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
50094+ down_write(&current->mm->mmap_sem);
50095 vm_munmap(textpos, text_len + data_len + extra +
50096 MAX_SHARED_LIBS * sizeof(unsigned long));
50097+ up_write(&current->mm->mmap_sem);
50098 ret = result;
50099 goto err;
50100 }
50101diff --git a/fs/bio.c b/fs/bio.c
50102index b96fc6c..431d628 100644
50103--- a/fs/bio.c
50104+++ b/fs/bio.c
50105@@ -818,7 +818,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
50106 /*
50107 * Overflow, abort
50108 */
50109- if (end < start)
50110+ if (end < start || end - start > INT_MAX - nr_pages)
50111 return ERR_PTR(-EINVAL);
50112
50113 nr_pages += end - start;
50114@@ -952,7 +952,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
50115 /*
50116 * Overflow, abort
50117 */
50118- if (end < start)
50119+ if (end < start || end - start > INT_MAX - nr_pages)
50120 return ERR_PTR(-EINVAL);
50121
50122 nr_pages += end - start;
50123@@ -1214,7 +1214,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
50124 const int read = bio_data_dir(bio) == READ;
50125 struct bio_map_data *bmd = bio->bi_private;
50126 int i;
50127- char *p = bmd->sgvecs[0].iov_base;
50128+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
50129
50130 __bio_for_each_segment(bvec, bio, i, 0) {
50131 char *addr = page_address(bvec->bv_page);
50132diff --git a/fs/block_dev.c b/fs/block_dev.c
50133index 883dc49..f27794a 100644
50134--- a/fs/block_dev.c
50135+++ b/fs/block_dev.c
50136@@ -652,7 +652,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
50137 else if (bdev->bd_contains == bdev)
50138 return true; /* is a whole device which isn't held */
50139
50140- else if (whole->bd_holder == bd_may_claim)
50141+ else if (whole->bd_holder == (void *)bd_may_claim)
50142 return true; /* is a partition of a device that is being partitioned */
50143 else if (whole->bd_holder != NULL)
50144 return false; /* is a partition of a held device */
50145diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
50146index ce1c169..1ef484f 100644
50147--- a/fs/btrfs/ctree.c
50148+++ b/fs/btrfs/ctree.c
50149@@ -1036,9 +1036,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
50150 free_extent_buffer(buf);
50151 add_root_to_dirty_list(root);
50152 } else {
50153- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
50154- parent_start = parent->start;
50155- else
50156+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
50157+ if (parent)
50158+ parent_start = parent->start;
50159+ else
50160+ parent_start = 0;
50161+ } else
50162 parent_start = 0;
50163
50164 WARN_ON(trans->transid != btrfs_header_generation(parent));
50165diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
50166index d170412..a575d77 100644
50167--- a/fs/btrfs/extent-tree.c
50168+++ b/fs/btrfs/extent-tree.c
50169@@ -6019,7 +6019,7 @@ again:
50170 if (ret == -ENOSPC) {
50171 if (!final_tried) {
50172 num_bytes = num_bytes >> 1;
50173- num_bytes = num_bytes & ~(root->sectorsize - 1);
50174+ num_bytes = num_bytes & ~((u64)root->sectorsize - 1);
50175 num_bytes = max(num_bytes, min_alloc_size);
50176 if (num_bytes == min_alloc_size)
50177 final_tried = true;
50178diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
50179index 7c4e6cc..8ad78b2 100644
50180--- a/fs/btrfs/inode.c
50181+++ b/fs/btrfs/inode.c
50182@@ -17,6 +17,7 @@
50183 */
50184
50185 #include <linux/kernel.h>
50186+#include <linux/module.h>
50187 #include <linux/bio.h>
50188 #include <linux/buffer_head.h>
50189 #include <linux/file.h>
50190@@ -7314,7 +7315,7 @@ fail:
50191 return -ENOMEM;
50192 }
50193
50194-static int btrfs_getattr(struct vfsmount *mnt,
50195+int btrfs_getattr(struct vfsmount *mnt,
50196 struct dentry *dentry, struct kstat *stat)
50197 {
50198 struct inode *inode = dentry->d_inode;
50199@@ -7328,6 +7329,14 @@ static int btrfs_getattr(struct vfsmount *mnt,
50200 return 0;
50201 }
50202
50203+EXPORT_SYMBOL(btrfs_getattr);
50204+
50205+dev_t get_btrfs_dev_from_inode(struct inode *inode)
50206+{
50207+ return BTRFS_I(inode)->root->anon_dev;
50208+}
50209+EXPORT_SYMBOL(get_btrfs_dev_from_inode);
50210+
50211 /*
50212 * If a file is moved, it will inherit the cow and compression flags of the new
50213 * directory.
50214diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
50215index 338f259..b657640 100644
50216--- a/fs/btrfs/ioctl.c
50217+++ b/fs/btrfs/ioctl.c
50218@@ -3033,9 +3033,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
50219 for (i = 0; i < num_types; i++) {
50220 struct btrfs_space_info *tmp;
50221
50222+ /* Don't copy in more than we allocated */
50223 if (!slot_count)
50224 break;
50225
50226+ slot_count--;
50227+
50228 info = NULL;
50229 rcu_read_lock();
50230 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
50231@@ -3057,10 +3060,7 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
50232 memcpy(dest, &space, sizeof(space));
50233 dest++;
50234 space_args.total_spaces++;
50235- slot_count--;
50236 }
50237- if (!slot_count)
50238- break;
50239 }
50240 up_read(&info->groups_sem);
50241 }
50242diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
50243index 300e09a..9fe4539 100644
50244--- a/fs/btrfs/relocation.c
50245+++ b/fs/btrfs/relocation.c
50246@@ -1269,7 +1269,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
50247 }
50248 spin_unlock(&rc->reloc_root_tree.lock);
50249
50250- BUG_ON((struct btrfs_root *)node->data != root);
50251+ BUG_ON(!node || (struct btrfs_root *)node->data != root);
50252
50253 if (!del) {
50254 spin_lock(&rc->reloc_root_tree.lock);
50255diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
50256index d8982e9..29a85fa 100644
50257--- a/fs/btrfs/super.c
50258+++ b/fs/btrfs/super.c
50259@@ -267,7 +267,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
50260 function, line, errstr);
50261 return;
50262 }
50263- ACCESS_ONCE(trans->transaction->aborted) = errno;
50264+ ACCESS_ONCE_RW(trans->transaction->aborted) = errno;
50265 __btrfs_std_error(root->fs_info, function, line, errno, NULL);
50266 }
50267 /*
50268diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
50269index 622f469..e8d2d55 100644
50270--- a/fs/cachefiles/bind.c
50271+++ b/fs/cachefiles/bind.c
50272@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
50273 args);
50274
50275 /* start by checking things over */
50276- ASSERT(cache->fstop_percent >= 0 &&
50277- cache->fstop_percent < cache->fcull_percent &&
50278+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
50279 cache->fcull_percent < cache->frun_percent &&
50280 cache->frun_percent < 100);
50281
50282- ASSERT(cache->bstop_percent >= 0 &&
50283- cache->bstop_percent < cache->bcull_percent &&
50284+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
50285 cache->bcull_percent < cache->brun_percent &&
50286 cache->brun_percent < 100);
50287
50288diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
50289index 0a1467b..6a53245 100644
50290--- a/fs/cachefiles/daemon.c
50291+++ b/fs/cachefiles/daemon.c
50292@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
50293 if (n > buflen)
50294 return -EMSGSIZE;
50295
50296- if (copy_to_user(_buffer, buffer, n) != 0)
50297+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
50298 return -EFAULT;
50299
50300 return n;
50301@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
50302 if (test_bit(CACHEFILES_DEAD, &cache->flags))
50303 return -EIO;
50304
50305- if (datalen < 0 || datalen > PAGE_SIZE - 1)
50306+ if (datalen > PAGE_SIZE - 1)
50307 return -EOPNOTSUPP;
50308
50309 /* drag the command string into the kernel so we can parse it */
50310@@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
50311 if (args[0] != '%' || args[1] != '\0')
50312 return -EINVAL;
50313
50314- if (fstop < 0 || fstop >= cache->fcull_percent)
50315+ if (fstop >= cache->fcull_percent)
50316 return cachefiles_daemon_range_error(cache, args);
50317
50318 cache->fstop_percent = fstop;
50319@@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
50320 if (args[0] != '%' || args[1] != '\0')
50321 return -EINVAL;
50322
50323- if (bstop < 0 || bstop >= cache->bcull_percent)
50324+ if (bstop >= cache->bcull_percent)
50325 return cachefiles_daemon_range_error(cache, args);
50326
50327 cache->bstop_percent = bstop;
50328diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
50329index 4938251..7e01445 100644
50330--- a/fs/cachefiles/internal.h
50331+++ b/fs/cachefiles/internal.h
50332@@ -59,7 +59,7 @@ struct cachefiles_cache {
50333 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
50334 struct rb_root active_nodes; /* active nodes (can't be culled) */
50335 rwlock_t active_lock; /* lock for active_nodes */
50336- atomic_t gravecounter; /* graveyard uniquifier */
50337+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
50338 unsigned frun_percent; /* when to stop culling (% files) */
50339 unsigned fcull_percent; /* when to start culling (% files) */
50340 unsigned fstop_percent; /* when to stop allocating (% files) */
50341@@ -171,19 +171,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
50342 * proc.c
50343 */
50344 #ifdef CONFIG_CACHEFILES_HISTOGRAM
50345-extern atomic_t cachefiles_lookup_histogram[HZ];
50346-extern atomic_t cachefiles_mkdir_histogram[HZ];
50347-extern atomic_t cachefiles_create_histogram[HZ];
50348+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
50349+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
50350+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
50351
50352 extern int __init cachefiles_proc_init(void);
50353 extern void cachefiles_proc_cleanup(void);
50354 static inline
50355-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
50356+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
50357 {
50358 unsigned long jif = jiffies - start_jif;
50359 if (jif >= HZ)
50360 jif = HZ - 1;
50361- atomic_inc(&histogram[jif]);
50362+ atomic_inc_unchecked(&histogram[jif]);
50363 }
50364
50365 #else
50366diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
50367index 8c01c5fc..15f982e 100644
50368--- a/fs/cachefiles/namei.c
50369+++ b/fs/cachefiles/namei.c
50370@@ -317,7 +317,7 @@ try_again:
50371 /* first step is to make up a grave dentry in the graveyard */
50372 sprintf(nbuffer, "%08x%08x",
50373 (uint32_t) get_seconds(),
50374- (uint32_t) atomic_inc_return(&cache->gravecounter));
50375+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
50376
50377 /* do the multiway lock magic */
50378 trap = lock_rename(cache->graveyard, dir);
50379diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
50380index eccd339..4c1d995 100644
50381--- a/fs/cachefiles/proc.c
50382+++ b/fs/cachefiles/proc.c
50383@@ -14,9 +14,9 @@
50384 #include <linux/seq_file.h>
50385 #include "internal.h"
50386
50387-atomic_t cachefiles_lookup_histogram[HZ];
50388-atomic_t cachefiles_mkdir_histogram[HZ];
50389-atomic_t cachefiles_create_histogram[HZ];
50390+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
50391+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
50392+atomic_unchecked_t cachefiles_create_histogram[HZ];
50393
50394 /*
50395 * display the latency histogram
50396@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
50397 return 0;
50398 default:
50399 index = (unsigned long) v - 3;
50400- x = atomic_read(&cachefiles_lookup_histogram[index]);
50401- y = atomic_read(&cachefiles_mkdir_histogram[index]);
50402- z = atomic_read(&cachefiles_create_histogram[index]);
50403+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
50404+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
50405+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
50406 if (x == 0 && y == 0 && z == 0)
50407 return 0;
50408
50409diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
50410index 4809922..aab2c39 100644
50411--- a/fs/cachefiles/rdwr.c
50412+++ b/fs/cachefiles/rdwr.c
50413@@ -965,7 +965,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
50414 old_fs = get_fs();
50415 set_fs(KERNEL_DS);
50416 ret = file->f_op->write(
50417- file, (const void __user *) data, len, &pos);
50418+ file, (const void __force_user *) data, len, &pos);
50419 set_fs(old_fs);
50420 kunmap(page);
50421 if (ret != len)
50422diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
50423index 8c1aabe..bbf856a 100644
50424--- a/fs/ceph/dir.c
50425+++ b/fs/ceph/dir.c
50426@@ -243,7 +243,7 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
50427 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
50428 struct ceph_mds_client *mdsc = fsc->mdsc;
50429 unsigned frag = fpos_frag(filp->f_pos);
50430- int off = fpos_off(filp->f_pos);
50431+ unsigned int off = fpos_off(filp->f_pos);
50432 int err;
50433 u32 ftype;
50434 struct ceph_mds_reply_info_parsed *rinfo;
50435diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
50436index d9ea6ed..1e6c8ac 100644
50437--- a/fs/cifs/cifs_debug.c
50438+++ b/fs/cifs/cifs_debug.c
50439@@ -267,8 +267,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
50440
50441 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
50442 #ifdef CONFIG_CIFS_STATS2
50443- atomic_set(&totBufAllocCount, 0);
50444- atomic_set(&totSmBufAllocCount, 0);
50445+ atomic_set_unchecked(&totBufAllocCount, 0);
50446+ atomic_set_unchecked(&totSmBufAllocCount, 0);
50447 #endif /* CONFIG_CIFS_STATS2 */
50448 spin_lock(&cifs_tcp_ses_lock);
50449 list_for_each(tmp1, &cifs_tcp_ses_list) {
50450@@ -281,7 +281,7 @@ static ssize_t cifs_stats_proc_write(struct file *file,
50451 tcon = list_entry(tmp3,
50452 struct cifs_tcon,
50453 tcon_list);
50454- atomic_set(&tcon->num_smbs_sent, 0);
50455+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
50456 if (server->ops->clear_stats)
50457 server->ops->clear_stats(tcon);
50458 }
50459@@ -313,8 +313,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
50460 smBufAllocCount.counter, cifs_min_small);
50461 #ifdef CONFIG_CIFS_STATS2
50462 seq_printf(m, "Total Large %d Small %d Allocations\n",
50463- atomic_read(&totBufAllocCount),
50464- atomic_read(&totSmBufAllocCount));
50465+ atomic_read_unchecked(&totBufAllocCount),
50466+ atomic_read_unchecked(&totSmBufAllocCount));
50467 #endif /* CONFIG_CIFS_STATS2 */
50468
50469 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
50470@@ -343,7 +343,7 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
50471 if (tcon->need_reconnect)
50472 seq_puts(m, "\tDISCONNECTED ");
50473 seq_printf(m, "\nSMBs: %d",
50474- atomic_read(&tcon->num_smbs_sent));
50475+ atomic_read_unchecked(&tcon->num_smbs_sent));
50476 if (server->ops->print_stats)
50477 server->ops->print_stats(m, tcon);
50478 }
50479diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
50480index b9db388..9a73d6d 100644
50481--- a/fs/cifs/cifsfs.c
50482+++ b/fs/cifs/cifsfs.c
50483@@ -1026,7 +1026,7 @@ cifs_init_request_bufs(void)
50484 /* cERROR(1, "CIFSMaxBufSize %d 0x%x",CIFSMaxBufSize,CIFSMaxBufSize); */
50485 cifs_req_cachep = kmem_cache_create("cifs_request",
50486 CIFSMaxBufSize + max_hdr_size, 0,
50487- SLAB_HWCACHE_ALIGN, NULL);
50488+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
50489 if (cifs_req_cachep == NULL)
50490 return -ENOMEM;
50491
50492@@ -1053,7 +1053,7 @@ cifs_init_request_bufs(void)
50493 efficient to alloc 1 per page off the slab compared to 17K (5page)
50494 alloc of large cifs buffers even when page debugging is on */
50495 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
50496- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
50497+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
50498 NULL);
50499 if (cifs_sm_req_cachep == NULL) {
50500 mempool_destroy(cifs_req_poolp);
50501@@ -1138,8 +1138,8 @@ init_cifs(void)
50502 atomic_set(&bufAllocCount, 0);
50503 atomic_set(&smBufAllocCount, 0);
50504 #ifdef CONFIG_CIFS_STATS2
50505- atomic_set(&totBufAllocCount, 0);
50506- atomic_set(&totSmBufAllocCount, 0);
50507+ atomic_set_unchecked(&totBufAllocCount, 0);
50508+ atomic_set_unchecked(&totSmBufAllocCount, 0);
50509 #endif /* CONFIG_CIFS_STATS2 */
50510
50511 atomic_set(&midCount, 0);
50512diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
50513index e6899ce..d6b2920 100644
50514--- a/fs/cifs/cifsglob.h
50515+++ b/fs/cifs/cifsglob.h
50516@@ -751,35 +751,35 @@ struct cifs_tcon {
50517 __u16 Flags; /* optional support bits */
50518 enum statusEnum tidStatus;
50519 #ifdef CONFIG_CIFS_STATS
50520- atomic_t num_smbs_sent;
50521+ atomic_unchecked_t num_smbs_sent;
50522 union {
50523 struct {
50524- atomic_t num_writes;
50525- atomic_t num_reads;
50526- atomic_t num_flushes;
50527- atomic_t num_oplock_brks;
50528- atomic_t num_opens;
50529- atomic_t num_closes;
50530- atomic_t num_deletes;
50531- atomic_t num_mkdirs;
50532- atomic_t num_posixopens;
50533- atomic_t num_posixmkdirs;
50534- atomic_t num_rmdirs;
50535- atomic_t num_renames;
50536- atomic_t num_t2renames;
50537- atomic_t num_ffirst;
50538- atomic_t num_fnext;
50539- atomic_t num_fclose;
50540- atomic_t num_hardlinks;
50541- atomic_t num_symlinks;
50542- atomic_t num_locks;
50543- atomic_t num_acl_get;
50544- atomic_t num_acl_set;
50545+ atomic_unchecked_t num_writes;
50546+ atomic_unchecked_t num_reads;
50547+ atomic_unchecked_t num_flushes;
50548+ atomic_unchecked_t num_oplock_brks;
50549+ atomic_unchecked_t num_opens;
50550+ atomic_unchecked_t num_closes;
50551+ atomic_unchecked_t num_deletes;
50552+ atomic_unchecked_t num_mkdirs;
50553+ atomic_unchecked_t num_posixopens;
50554+ atomic_unchecked_t num_posixmkdirs;
50555+ atomic_unchecked_t num_rmdirs;
50556+ atomic_unchecked_t num_renames;
50557+ atomic_unchecked_t num_t2renames;
50558+ atomic_unchecked_t num_ffirst;
50559+ atomic_unchecked_t num_fnext;
50560+ atomic_unchecked_t num_fclose;
50561+ atomic_unchecked_t num_hardlinks;
50562+ atomic_unchecked_t num_symlinks;
50563+ atomic_unchecked_t num_locks;
50564+ atomic_unchecked_t num_acl_get;
50565+ atomic_unchecked_t num_acl_set;
50566 } cifs_stats;
50567 #ifdef CONFIG_CIFS_SMB2
50568 struct {
50569- atomic_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
50570- atomic_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
50571+ atomic_unchecked_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
50572+ atomic_unchecked_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
50573 } smb2_stats;
50574 #endif /* CONFIG_CIFS_SMB2 */
50575 } stats;
50576@@ -1080,7 +1080,7 @@ convert_delimiter(char *path, char delim)
50577 }
50578
50579 #ifdef CONFIG_CIFS_STATS
50580-#define cifs_stats_inc atomic_inc
50581+#define cifs_stats_inc atomic_inc_unchecked
50582
50583 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
50584 unsigned int bytes)
50585@@ -1445,8 +1445,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
50586 /* Various Debug counters */
50587 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
50588 #ifdef CONFIG_CIFS_STATS2
50589-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
50590-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
50591+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
50592+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
50593 #endif
50594 GLOBAL_EXTERN atomic_t smBufAllocCount;
50595 GLOBAL_EXTERN atomic_t midCount;
50596diff --git a/fs/cifs/link.c b/fs/cifs/link.c
50597index 51dc2fb..1e12a33 100644
50598--- a/fs/cifs/link.c
50599+++ b/fs/cifs/link.c
50600@@ -616,7 +616,7 @@ symlink_exit:
50601
50602 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
50603 {
50604- char *p = nd_get_link(nd);
50605+ const char *p = nd_get_link(nd);
50606 if (!IS_ERR(p))
50607 kfree(p);
50608 }
50609diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
50610index 3a00c0d..42d901c 100644
50611--- a/fs/cifs/misc.c
50612+++ b/fs/cifs/misc.c
50613@@ -169,7 +169,7 @@ cifs_buf_get(void)
50614 memset(ret_buf, 0, buf_size + 3);
50615 atomic_inc(&bufAllocCount);
50616 #ifdef CONFIG_CIFS_STATS2
50617- atomic_inc(&totBufAllocCount);
50618+ atomic_inc_unchecked(&totBufAllocCount);
50619 #endif /* CONFIG_CIFS_STATS2 */
50620 }
50621
50622@@ -204,7 +204,7 @@ cifs_small_buf_get(void)
50623 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
50624 atomic_inc(&smBufAllocCount);
50625 #ifdef CONFIG_CIFS_STATS2
50626- atomic_inc(&totSmBufAllocCount);
50627+ atomic_inc_unchecked(&totSmBufAllocCount);
50628 #endif /* CONFIG_CIFS_STATS2 */
50629
50630 }
50631diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
50632index 47bc5a8..10decbe 100644
50633--- a/fs/cifs/smb1ops.c
50634+++ b/fs/cifs/smb1ops.c
50635@@ -586,27 +586,27 @@ static void
50636 cifs_clear_stats(struct cifs_tcon *tcon)
50637 {
50638 #ifdef CONFIG_CIFS_STATS
50639- atomic_set(&tcon->stats.cifs_stats.num_writes, 0);
50640- atomic_set(&tcon->stats.cifs_stats.num_reads, 0);
50641- atomic_set(&tcon->stats.cifs_stats.num_flushes, 0);
50642- atomic_set(&tcon->stats.cifs_stats.num_oplock_brks, 0);
50643- atomic_set(&tcon->stats.cifs_stats.num_opens, 0);
50644- atomic_set(&tcon->stats.cifs_stats.num_posixopens, 0);
50645- atomic_set(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
50646- atomic_set(&tcon->stats.cifs_stats.num_closes, 0);
50647- atomic_set(&tcon->stats.cifs_stats.num_deletes, 0);
50648- atomic_set(&tcon->stats.cifs_stats.num_mkdirs, 0);
50649- atomic_set(&tcon->stats.cifs_stats.num_rmdirs, 0);
50650- atomic_set(&tcon->stats.cifs_stats.num_renames, 0);
50651- atomic_set(&tcon->stats.cifs_stats.num_t2renames, 0);
50652- atomic_set(&tcon->stats.cifs_stats.num_ffirst, 0);
50653- atomic_set(&tcon->stats.cifs_stats.num_fnext, 0);
50654- atomic_set(&tcon->stats.cifs_stats.num_fclose, 0);
50655- atomic_set(&tcon->stats.cifs_stats.num_hardlinks, 0);
50656- atomic_set(&tcon->stats.cifs_stats.num_symlinks, 0);
50657- atomic_set(&tcon->stats.cifs_stats.num_locks, 0);
50658- atomic_set(&tcon->stats.cifs_stats.num_acl_get, 0);
50659- atomic_set(&tcon->stats.cifs_stats.num_acl_set, 0);
50660+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_writes, 0);
50661+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_reads, 0);
50662+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_flushes, 0);
50663+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_oplock_brks, 0);
50664+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_opens, 0);
50665+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixopens, 0);
50666+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
50667+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_closes, 0);
50668+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_deletes, 0);
50669+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_mkdirs, 0);
50670+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_rmdirs, 0);
50671+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_renames, 0);
50672+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_t2renames, 0);
50673+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_ffirst, 0);
50674+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fnext, 0);
50675+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fclose, 0);
50676+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_hardlinks, 0);
50677+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_symlinks, 0);
50678+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_locks, 0);
50679+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_get, 0);
50680+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_set, 0);
50681 #endif
50682 }
50683
50684@@ -615,36 +615,36 @@ cifs_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
50685 {
50686 #ifdef CONFIG_CIFS_STATS
50687 seq_printf(m, " Oplocks breaks: %d",
50688- atomic_read(&tcon->stats.cifs_stats.num_oplock_brks));
50689+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_oplock_brks));
50690 seq_printf(m, "\nReads: %d Bytes: %llu",
50691- atomic_read(&tcon->stats.cifs_stats.num_reads),
50692+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_reads),
50693 (long long)(tcon->bytes_read));
50694 seq_printf(m, "\nWrites: %d Bytes: %llu",
50695- atomic_read(&tcon->stats.cifs_stats.num_writes),
50696+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_writes),
50697 (long long)(tcon->bytes_written));
50698 seq_printf(m, "\nFlushes: %d",
50699- atomic_read(&tcon->stats.cifs_stats.num_flushes));
50700+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_flushes));
50701 seq_printf(m, "\nLocks: %d HardLinks: %d Symlinks: %d",
50702- atomic_read(&tcon->stats.cifs_stats.num_locks),
50703- atomic_read(&tcon->stats.cifs_stats.num_hardlinks),
50704- atomic_read(&tcon->stats.cifs_stats.num_symlinks));
50705+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_locks),
50706+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_hardlinks),
50707+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_symlinks));
50708 seq_printf(m, "\nOpens: %d Closes: %d Deletes: %d",
50709- atomic_read(&tcon->stats.cifs_stats.num_opens),
50710- atomic_read(&tcon->stats.cifs_stats.num_closes),
50711- atomic_read(&tcon->stats.cifs_stats.num_deletes));
50712+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_opens),
50713+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_closes),
50714+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_deletes));
50715 seq_printf(m, "\nPosix Opens: %d Posix Mkdirs: %d",
50716- atomic_read(&tcon->stats.cifs_stats.num_posixopens),
50717- atomic_read(&tcon->stats.cifs_stats.num_posixmkdirs));
50718+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixopens),
50719+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs));
50720 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
50721- atomic_read(&tcon->stats.cifs_stats.num_mkdirs),
50722- atomic_read(&tcon->stats.cifs_stats.num_rmdirs));
50723+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_mkdirs),
50724+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_rmdirs));
50725 seq_printf(m, "\nRenames: %d T2 Renames %d",
50726- atomic_read(&tcon->stats.cifs_stats.num_renames),
50727- atomic_read(&tcon->stats.cifs_stats.num_t2renames));
50728+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_renames),
50729+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_t2renames));
50730 seq_printf(m, "\nFindFirst: %d FNext %d FClose %d",
50731- atomic_read(&tcon->stats.cifs_stats.num_ffirst),
50732- atomic_read(&tcon->stats.cifs_stats.num_fnext),
50733- atomic_read(&tcon->stats.cifs_stats.num_fclose));
50734+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_ffirst),
50735+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fnext),
50736+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fclose));
50737 #endif
50738 }
50739
50740diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
50741index bceffe7..cd1ae59 100644
50742--- a/fs/cifs/smb2ops.c
50743+++ b/fs/cifs/smb2ops.c
50744@@ -274,8 +274,8 @@ smb2_clear_stats(struct cifs_tcon *tcon)
50745 #ifdef CONFIG_CIFS_STATS
50746 int i;
50747 for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
50748- atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
50749- atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
50750+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
50751+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
50752 }
50753 #endif
50754 }
50755@@ -284,66 +284,66 @@ static void
50756 smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
50757 {
50758 #ifdef CONFIG_CIFS_STATS
50759- atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
50760- atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
50761+ atomic_unchecked_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
50762+ atomic_unchecked_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
50763 seq_printf(m, "\nNegotiates: %d sent %d failed",
50764- atomic_read(&sent[SMB2_NEGOTIATE_HE]),
50765- atomic_read(&failed[SMB2_NEGOTIATE_HE]));
50766+ atomic_read_unchecked(&sent[SMB2_NEGOTIATE_HE]),
50767+ atomic_read_unchecked(&failed[SMB2_NEGOTIATE_HE]));
50768 seq_printf(m, "\nSessionSetups: %d sent %d failed",
50769- atomic_read(&sent[SMB2_SESSION_SETUP_HE]),
50770- atomic_read(&failed[SMB2_SESSION_SETUP_HE]));
50771+ atomic_read_unchecked(&sent[SMB2_SESSION_SETUP_HE]),
50772+ atomic_read_unchecked(&failed[SMB2_SESSION_SETUP_HE]));
50773 #define SMB2LOGOFF 0x0002 /* trivial request/resp */
50774 seq_printf(m, "\nLogoffs: %d sent %d failed",
50775- atomic_read(&sent[SMB2_LOGOFF_HE]),
50776- atomic_read(&failed[SMB2_LOGOFF_HE]));
50777+ atomic_read_unchecked(&sent[SMB2_LOGOFF_HE]),
50778+ atomic_read_unchecked(&failed[SMB2_LOGOFF_HE]));
50779 seq_printf(m, "\nTreeConnects: %d sent %d failed",
50780- atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
50781- atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
50782+ atomic_read_unchecked(&sent[SMB2_TREE_CONNECT_HE]),
50783+ atomic_read_unchecked(&failed[SMB2_TREE_CONNECT_HE]));
50784 seq_printf(m, "\nTreeDisconnects: %d sent %d failed",
50785- atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]),
50786- atomic_read(&failed[SMB2_TREE_DISCONNECT_HE]));
50787+ atomic_read_unchecked(&sent[SMB2_TREE_DISCONNECT_HE]),
50788+ atomic_read_unchecked(&failed[SMB2_TREE_DISCONNECT_HE]));
50789 seq_printf(m, "\nCreates: %d sent %d failed",
50790- atomic_read(&sent[SMB2_CREATE_HE]),
50791- atomic_read(&failed[SMB2_CREATE_HE]));
50792+ atomic_read_unchecked(&sent[SMB2_CREATE_HE]),
50793+ atomic_read_unchecked(&failed[SMB2_CREATE_HE]));
50794 seq_printf(m, "\nCloses: %d sent %d failed",
50795- atomic_read(&sent[SMB2_CLOSE_HE]),
50796- atomic_read(&failed[SMB2_CLOSE_HE]));
50797+ atomic_read_unchecked(&sent[SMB2_CLOSE_HE]),
50798+ atomic_read_unchecked(&failed[SMB2_CLOSE_HE]));
50799 seq_printf(m, "\nFlushes: %d sent %d failed",
50800- atomic_read(&sent[SMB2_FLUSH_HE]),
50801- atomic_read(&failed[SMB2_FLUSH_HE]));
50802+ atomic_read_unchecked(&sent[SMB2_FLUSH_HE]),
50803+ atomic_read_unchecked(&failed[SMB2_FLUSH_HE]));
50804 seq_printf(m, "\nReads: %d sent %d failed",
50805- atomic_read(&sent[SMB2_READ_HE]),
50806- atomic_read(&failed[SMB2_READ_HE]));
50807+ atomic_read_unchecked(&sent[SMB2_READ_HE]),
50808+ atomic_read_unchecked(&failed[SMB2_READ_HE]));
50809 seq_printf(m, "\nWrites: %d sent %d failed",
50810- atomic_read(&sent[SMB2_WRITE_HE]),
50811- atomic_read(&failed[SMB2_WRITE_HE]));
50812+ atomic_read_unchecked(&sent[SMB2_WRITE_HE]),
50813+ atomic_read_unchecked(&failed[SMB2_WRITE_HE]));
50814 seq_printf(m, "\nLocks: %d sent %d failed",
50815- atomic_read(&sent[SMB2_LOCK_HE]),
50816- atomic_read(&failed[SMB2_LOCK_HE]));
50817+ atomic_read_unchecked(&sent[SMB2_LOCK_HE]),
50818+ atomic_read_unchecked(&failed[SMB2_LOCK_HE]));
50819 seq_printf(m, "\nIOCTLs: %d sent %d failed",
50820- atomic_read(&sent[SMB2_IOCTL_HE]),
50821- atomic_read(&failed[SMB2_IOCTL_HE]));
50822+ atomic_read_unchecked(&sent[SMB2_IOCTL_HE]),
50823+ atomic_read_unchecked(&failed[SMB2_IOCTL_HE]));
50824 seq_printf(m, "\nCancels: %d sent %d failed",
50825- atomic_read(&sent[SMB2_CANCEL_HE]),
50826- atomic_read(&failed[SMB2_CANCEL_HE]));
50827+ atomic_read_unchecked(&sent[SMB2_CANCEL_HE]),
50828+ atomic_read_unchecked(&failed[SMB2_CANCEL_HE]));
50829 seq_printf(m, "\nEchos: %d sent %d failed",
50830- atomic_read(&sent[SMB2_ECHO_HE]),
50831- atomic_read(&failed[SMB2_ECHO_HE]));
50832+ atomic_read_unchecked(&sent[SMB2_ECHO_HE]),
50833+ atomic_read_unchecked(&failed[SMB2_ECHO_HE]));
50834 seq_printf(m, "\nQueryDirectories: %d sent %d failed",
50835- atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]),
50836- atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE]));
50837+ atomic_read_unchecked(&sent[SMB2_QUERY_DIRECTORY_HE]),
50838+ atomic_read_unchecked(&failed[SMB2_QUERY_DIRECTORY_HE]));
50839 seq_printf(m, "\nChangeNotifies: %d sent %d failed",
50840- atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]),
50841- atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE]));
50842+ atomic_read_unchecked(&sent[SMB2_CHANGE_NOTIFY_HE]),
50843+ atomic_read_unchecked(&failed[SMB2_CHANGE_NOTIFY_HE]));
50844 seq_printf(m, "\nQueryInfos: %d sent %d failed",
50845- atomic_read(&sent[SMB2_QUERY_INFO_HE]),
50846- atomic_read(&failed[SMB2_QUERY_INFO_HE]));
50847+ atomic_read_unchecked(&sent[SMB2_QUERY_INFO_HE]),
50848+ atomic_read_unchecked(&failed[SMB2_QUERY_INFO_HE]));
50849 seq_printf(m, "\nSetInfos: %d sent %d failed",
50850- atomic_read(&sent[SMB2_SET_INFO_HE]),
50851- atomic_read(&failed[SMB2_SET_INFO_HE]));
50852+ atomic_read_unchecked(&sent[SMB2_SET_INFO_HE]),
50853+ atomic_read_unchecked(&failed[SMB2_SET_INFO_HE]));
50854 seq_printf(m, "\nOplockBreaks: %d sent %d failed",
50855- atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]),
50856- atomic_read(&failed[SMB2_OPLOCK_BREAK_HE]));
50857+ atomic_read_unchecked(&sent[SMB2_OPLOCK_BREAK_HE]),
50858+ atomic_read_unchecked(&failed[SMB2_OPLOCK_BREAK_HE]));
50859 #endif
50860 }
50861
50862diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
50863index 41d9d07..dbb4772 100644
50864--- a/fs/cifs/smb2pdu.c
50865+++ b/fs/cifs/smb2pdu.c
50866@@ -1761,8 +1761,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
50867 default:
50868 cERROR(1, "info level %u isn't supported",
50869 srch_inf->info_level);
50870- rc = -EINVAL;
50871- goto qdir_exit;
50872+ return -EINVAL;
50873 }
50874
50875 req->FileIndex = cpu_to_le32(index);
50876diff --git a/fs/coda/cache.c b/fs/coda/cache.c
50877index 958ae0e..505c9d0 100644
50878--- a/fs/coda/cache.c
50879+++ b/fs/coda/cache.c
50880@@ -24,7 +24,7 @@
50881 #include "coda_linux.h"
50882 #include "coda_cache.h"
50883
50884-static atomic_t permission_epoch = ATOMIC_INIT(0);
50885+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
50886
50887 /* replace or extend an acl cache hit */
50888 void coda_cache_enter(struct inode *inode, int mask)
50889@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
50890 struct coda_inode_info *cii = ITOC(inode);
50891
50892 spin_lock(&cii->c_lock);
50893- cii->c_cached_epoch = atomic_read(&permission_epoch);
50894+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
50895 if (cii->c_uid != current_fsuid()) {
50896 cii->c_uid = current_fsuid();
50897 cii->c_cached_perm = mask;
50898@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
50899 {
50900 struct coda_inode_info *cii = ITOC(inode);
50901 spin_lock(&cii->c_lock);
50902- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
50903+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
50904 spin_unlock(&cii->c_lock);
50905 }
50906
50907 /* remove all acl caches */
50908 void coda_cache_clear_all(struct super_block *sb)
50909 {
50910- atomic_inc(&permission_epoch);
50911+ atomic_inc_unchecked(&permission_epoch);
50912 }
50913
50914
50915@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
50916 spin_lock(&cii->c_lock);
50917 hit = (mask & cii->c_cached_perm) == mask &&
50918 cii->c_uid == current_fsuid() &&
50919- cii->c_cached_epoch == atomic_read(&permission_epoch);
50920+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
50921 spin_unlock(&cii->c_lock);
50922
50923 return hit;
50924diff --git a/fs/compat.c b/fs/compat.c
50925index a06dcbc..dacb6d3 100644
50926--- a/fs/compat.c
50927+++ b/fs/compat.c
50928@@ -54,7 +54,7 @@
50929 #include <asm/ioctls.h>
50930 #include "internal.h"
50931
50932-int compat_log = 1;
50933+int compat_log = 0;
50934
50935 int compat_printk(const char *fmt, ...)
50936 {
50937@@ -490,7 +490,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
50938
50939 set_fs(KERNEL_DS);
50940 /* The __user pointer cast is valid because of the set_fs() */
50941- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
50942+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
50943 set_fs(oldfs);
50944 /* truncating is ok because it's a user address */
50945 if (!ret)
50946@@ -548,7 +548,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
50947 goto out;
50948
50949 ret = -EINVAL;
50950- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
50951+ if (nr_segs > UIO_MAXIOV)
50952 goto out;
50953 if (nr_segs > fast_segs) {
50954 ret = -ENOMEM;
50955@@ -835,6 +835,7 @@ struct compat_old_linux_dirent {
50956
50957 struct compat_readdir_callback {
50958 struct compat_old_linux_dirent __user *dirent;
50959+ struct file * file;
50960 int result;
50961 };
50962
50963@@ -852,6 +853,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
50964 buf->result = -EOVERFLOW;
50965 return -EOVERFLOW;
50966 }
50967+
50968+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
50969+ return 0;
50970+
50971 buf->result++;
50972 dirent = buf->dirent;
50973 if (!access_ok(VERIFY_WRITE, dirent,
50974@@ -882,6 +887,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
50975
50976 buf.result = 0;
50977 buf.dirent = dirent;
50978+ buf.file = f.file;
50979
50980 error = vfs_readdir(f.file, compat_fillonedir, &buf);
50981 if (buf.result)
50982@@ -901,6 +907,7 @@ struct compat_linux_dirent {
50983 struct compat_getdents_callback {
50984 struct compat_linux_dirent __user *current_dir;
50985 struct compat_linux_dirent __user *previous;
50986+ struct file * file;
50987 int count;
50988 int error;
50989 };
50990@@ -922,6 +929,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
50991 buf->error = -EOVERFLOW;
50992 return -EOVERFLOW;
50993 }
50994+
50995+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
50996+ return 0;
50997+
50998 dirent = buf->previous;
50999 if (dirent) {
51000 if (__put_user(offset, &dirent->d_off))
51001@@ -967,6 +978,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
51002 buf.previous = NULL;
51003 buf.count = count;
51004 buf.error = 0;
51005+ buf.file = f.file;
51006
51007 error = vfs_readdir(f.file, compat_filldir, &buf);
51008 if (error >= 0)
51009@@ -987,6 +999,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
51010 struct compat_getdents_callback64 {
51011 struct linux_dirent64 __user *current_dir;
51012 struct linux_dirent64 __user *previous;
51013+ struct file * file;
51014 int count;
51015 int error;
51016 };
51017@@ -1003,6 +1016,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
51018 buf->error = -EINVAL; /* only used if we fail.. */
51019 if (reclen > buf->count)
51020 return -EINVAL;
51021+
51022+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
51023+ return 0;
51024+
51025 dirent = buf->previous;
51026
51027 if (dirent) {
51028@@ -1052,13 +1069,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
51029 buf.previous = NULL;
51030 buf.count = count;
51031 buf.error = 0;
51032+ buf.file = f.file;
51033
51034 error = vfs_readdir(f.file, compat_filldir64, &buf);
51035 if (error >= 0)
51036 error = buf.error;
51037 lastdirent = buf.previous;
51038 if (lastdirent) {
51039- typeof(lastdirent->d_off) d_off = f.file->f_pos;
51040+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = f.file->f_pos;
51041 if (__put_user_unaligned(d_off, &lastdirent->d_off))
51042 error = -EFAULT;
51043 else
51044diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
51045index a81147e..20bf2b5 100644
51046--- a/fs/compat_binfmt_elf.c
51047+++ b/fs/compat_binfmt_elf.c
51048@@ -30,11 +30,13 @@
51049 #undef elf_phdr
51050 #undef elf_shdr
51051 #undef elf_note
51052+#undef elf_dyn
51053 #undef elf_addr_t
51054 #define elfhdr elf32_hdr
51055 #define elf_phdr elf32_phdr
51056 #define elf_shdr elf32_shdr
51057 #define elf_note elf32_note
51058+#define elf_dyn Elf32_Dyn
51059 #define elf_addr_t Elf32_Addr
51060
51061 /*
51062diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
51063index e2f57a0..3c78771 100644
51064--- a/fs/compat_ioctl.c
51065+++ b/fs/compat_ioctl.c
51066@@ -623,7 +623,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
51067 return -EFAULT;
51068 if (__get_user(udata, &ss32->iomem_base))
51069 return -EFAULT;
51070- ss.iomem_base = compat_ptr(udata);
51071+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
51072 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
51073 __get_user(ss.port_high, &ss32->port_high))
51074 return -EFAULT;
51075@@ -798,7 +798,7 @@ static int compat_ioctl_preallocate(struct file *file,
51076 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
51077 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
51078 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
51079- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
51080+ copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
51081 return -EFAULT;
51082
51083 return ioctl_preallocate(file, p);
51084@@ -1620,8 +1620,8 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
51085 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
51086 {
51087 unsigned int a, b;
51088- a = *(unsigned int *)p;
51089- b = *(unsigned int *)q;
51090+ a = *(const unsigned int *)p;
51091+ b = *(const unsigned int *)q;
51092 if (a > b)
51093 return 1;
51094 if (a < b)
51095diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
51096index 712b10f..c33c4ca 100644
51097--- a/fs/configfs/dir.c
51098+++ b/fs/configfs/dir.c
51099@@ -1037,10 +1037,11 @@ static int configfs_dump(struct configfs_dirent *sd, int level)
51100 static int configfs_depend_prep(struct dentry *origin,
51101 struct config_item *target)
51102 {
51103- struct configfs_dirent *child_sd, *sd = origin->d_fsdata;
51104+ struct configfs_dirent *child_sd, *sd;
51105 int ret = 0;
51106
51107- BUG_ON(!origin || !sd);
51108+ BUG_ON(!origin || !origin->d_fsdata);
51109+ sd = origin->d_fsdata;
51110
51111 if (sd->s_element == target) /* Boo-yah */
51112 goto out;
51113@@ -1564,7 +1565,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
51114 }
51115 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
51116 struct configfs_dirent *next;
51117- const char * name;
51118+ const unsigned char * name;
51119+ char d_name[sizeof(next->s_dentry->d_iname)];
51120 int len;
51121 struct inode *inode = NULL;
51122
51123@@ -1574,7 +1576,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
51124 continue;
51125
51126 name = configfs_get_name(next);
51127- len = strlen(name);
51128+ if (next->s_dentry && name == next->s_dentry->d_iname) {
51129+ len = next->s_dentry->d_name.len;
51130+ memcpy(d_name, name, len);
51131+ name = d_name;
51132+ } else
51133+ len = strlen(name);
51134
51135 /*
51136 * We'll have a dentry and an inode for
51137diff --git a/fs/coredump.c b/fs/coredump.c
51138index 1774932..5812106 100644
51139--- a/fs/coredump.c
51140+++ b/fs/coredump.c
51141@@ -52,7 +52,7 @@ struct core_name {
51142 char *corename;
51143 int used, size;
51144 };
51145-static atomic_t call_count = ATOMIC_INIT(1);
51146+static atomic_unchecked_t call_count = ATOMIC_INIT(1);
51147
51148 /* The maximal length of core_pattern is also specified in sysctl.c */
51149
51150@@ -60,7 +60,7 @@ static int expand_corename(struct core_name *cn)
51151 {
51152 char *old_corename = cn->corename;
51153
51154- cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
51155+ cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
51156 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
51157
51158 if (!cn->corename) {
51159@@ -157,7 +157,7 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm)
51160 int pid_in_pattern = 0;
51161 int err = 0;
51162
51163- cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
51164+ cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
51165 cn->corename = kmalloc(cn->size, GFP_KERNEL);
51166 cn->used = 0;
51167
51168@@ -414,17 +414,17 @@ static void wait_for_dump_helpers(struct file *file)
51169 pipe = file->f_path.dentry->d_inode->i_pipe;
51170
51171 pipe_lock(pipe);
51172- pipe->readers++;
51173- pipe->writers--;
51174+ atomic_inc(&pipe->readers);
51175+ atomic_dec(&pipe->writers);
51176
51177- while ((pipe->readers > 1) && (!signal_pending(current))) {
51178+ while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
51179 wake_up_interruptible_sync(&pipe->wait);
51180 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
51181 pipe_wait(pipe);
51182 }
51183
51184- pipe->readers--;
51185- pipe->writers++;
51186+ atomic_dec(&pipe->readers);
51187+ atomic_inc(&pipe->writers);
51188 pipe_unlock(pipe);
51189
51190 }
51191@@ -471,7 +471,8 @@ void do_coredump(siginfo_t *siginfo)
51192 int ispipe;
51193 struct files_struct *displaced;
51194 bool need_nonrelative = false;
51195- static atomic_t core_dump_count = ATOMIC_INIT(0);
51196+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
51197+ long signr = siginfo->si_signo;
51198 struct coredump_params cprm = {
51199 .siginfo = siginfo,
51200 .regs = signal_pt_regs(),
51201@@ -484,7 +485,10 @@ void do_coredump(siginfo_t *siginfo)
51202 .mm_flags = mm->flags,
51203 };
51204
51205- audit_core_dumps(siginfo->si_signo);
51206+ audit_core_dumps(signr);
51207+
51208+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
51209+ gr_handle_brute_attach(cprm.mm_flags);
51210
51211 binfmt = mm->binfmt;
51212 if (!binfmt || !binfmt->core_dump)
51213@@ -508,7 +512,7 @@ void do_coredump(siginfo_t *siginfo)
51214 need_nonrelative = true;
51215 }
51216
51217- retval = coredump_wait(siginfo->si_signo, &core_state);
51218+ retval = coredump_wait(signr, &core_state);
51219 if (retval < 0)
51220 goto fail_creds;
51221
51222@@ -556,7 +560,7 @@ void do_coredump(siginfo_t *siginfo)
51223 }
51224 cprm.limit = RLIM_INFINITY;
51225
51226- dump_count = atomic_inc_return(&core_dump_count);
51227+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
51228 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
51229 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
51230 task_tgid_vnr(current), current->comm);
51231@@ -583,6 +587,8 @@ void do_coredump(siginfo_t *siginfo)
51232 } else {
51233 struct inode *inode;
51234
51235+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
51236+
51237 if (cprm.limit < binfmt->min_coredump)
51238 goto fail_unlock;
51239
51240@@ -640,7 +646,7 @@ close_fail:
51241 filp_close(cprm.file, NULL);
51242 fail_dropcount:
51243 if (ispipe)
51244- atomic_dec(&core_dump_count);
51245+ atomic_dec_unchecked(&core_dump_count);
51246 fail_unlock:
51247 kfree(cn.corename);
51248 fail_corename:
51249@@ -659,7 +665,7 @@ fail:
51250 */
51251 int dump_write(struct file *file, const void *addr, int nr)
51252 {
51253- return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
51254+ return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
51255 }
51256 EXPORT_SYMBOL(dump_write);
51257
51258diff --git a/fs/dcache.c b/fs/dcache.c
51259index c3bbf85..5b71101 100644
51260--- a/fs/dcache.c
51261+++ b/fs/dcache.c
51262@@ -3139,7 +3139,7 @@ void __init vfs_caches_init(unsigned long mempages)
51263 mempages -= reserve;
51264
51265 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
51266- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
51267+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
51268
51269 dcache_init();
51270 inode_init();
51271diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
51272index a5f12b7..4ee8a6f 100644
51273--- a/fs/debugfs/inode.c
51274+++ b/fs/debugfs/inode.c
51275@@ -415,7 +415,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
51276 */
51277 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
51278 {
51279+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
51280+ return __create_file(name, S_IFDIR | S_IRWXU,
51281+#else
51282 return __create_file(name, S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
51283+#endif
51284 parent, NULL, NULL);
51285 }
51286 EXPORT_SYMBOL_GPL(debugfs_create_dir);
51287diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
51288index cc7709e..7e7211f 100644
51289--- a/fs/ecryptfs/inode.c
51290+++ b/fs/ecryptfs/inode.c
51291@@ -674,7 +674,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
51292 old_fs = get_fs();
51293 set_fs(get_ds());
51294 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
51295- (char __user *)lower_buf,
51296+ (char __force_user *)lower_buf,
51297 PATH_MAX);
51298 set_fs(old_fs);
51299 if (rc < 0)
51300@@ -706,7 +706,7 @@ out:
51301 static void
51302 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
51303 {
51304- char *buf = nd_get_link(nd);
51305+ const char *buf = nd_get_link(nd);
51306 if (!IS_ERR(buf)) {
51307 /* Free the char* */
51308 kfree(buf);
51309diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
51310index 412e6ed..d8263e8 100644
51311--- a/fs/ecryptfs/miscdev.c
51312+++ b/fs/ecryptfs/miscdev.c
51313@@ -80,13 +80,6 @@ ecryptfs_miscdev_open(struct inode *inode, struct file *file)
51314 int rc;
51315
51316 mutex_lock(&ecryptfs_daemon_hash_mux);
51317- rc = try_module_get(THIS_MODULE);
51318- if (rc == 0) {
51319- rc = -EIO;
51320- printk(KERN_ERR "%s: Error attempting to increment module use "
51321- "count; rc = [%d]\n", __func__, rc);
51322- goto out_unlock_daemon_list;
51323- }
51324 rc = ecryptfs_find_daemon_by_euid(&daemon);
51325 if (!rc) {
51326 rc = -EINVAL;
51327@@ -96,7 +89,7 @@ ecryptfs_miscdev_open(struct inode *inode, struct file *file)
51328 if (rc) {
51329 printk(KERN_ERR "%s: Error attempting to spawn daemon; "
51330 "rc = [%d]\n", __func__, rc);
51331- goto out_module_put_unlock_daemon_list;
51332+ goto out_unlock_daemon_list;
51333 }
51334 mutex_lock(&daemon->mux);
51335 if (daemon->flags & ECRYPTFS_DAEMON_MISCDEV_OPEN) {
51336@@ -108,9 +101,6 @@ ecryptfs_miscdev_open(struct inode *inode, struct file *file)
51337 atomic_inc(&ecryptfs_num_miscdev_opens);
51338 out_unlock_daemon:
51339 mutex_unlock(&daemon->mux);
51340-out_module_put_unlock_daemon_list:
51341- if (rc)
51342- module_put(THIS_MODULE);
51343 out_unlock_daemon_list:
51344 mutex_unlock(&ecryptfs_daemon_hash_mux);
51345 return rc;
51346@@ -147,7 +137,6 @@ ecryptfs_miscdev_release(struct inode *inode, struct file *file)
51347 "bug.\n", __func__, rc);
51348 BUG();
51349 }
51350- module_put(THIS_MODULE);
51351 return rc;
51352 }
51353
51354@@ -315,7 +304,7 @@ check_list:
51355 goto out_unlock_msg_ctx;
51356 i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
51357 if (msg_ctx->msg) {
51358- if (copy_to_user(&buf[i], packet_length, packet_length_size))
51359+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
51360 goto out_unlock_msg_ctx;
51361 i += packet_length_size;
51362 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
51363@@ -471,6 +460,7 @@ out_free:
51364
51365
51366 static const struct file_operations ecryptfs_miscdev_fops = {
51367+ .owner = THIS_MODULE,
51368 .open = ecryptfs_miscdev_open,
51369 .poll = ecryptfs_miscdev_poll,
51370 .read = ecryptfs_miscdev_read,
51371diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
51372index b2a34a1..162fa69 100644
51373--- a/fs/ecryptfs/read_write.c
51374+++ b/fs/ecryptfs/read_write.c
51375@@ -48,7 +48,7 @@ int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data,
51376 return -EIO;
51377 fs_save = get_fs();
51378 set_fs(get_ds());
51379- rc = vfs_write(lower_file, data, size, &offset);
51380+ rc = vfs_write(lower_file, (const char __force_user *)data, size, &offset);
51381 set_fs(fs_save);
51382 mark_inode_dirty_sync(ecryptfs_inode);
51383 return rc;
51384@@ -244,7 +244,7 @@ int ecryptfs_read_lower(char *data, loff_t offset, size_t size,
51385 return -EIO;
51386 fs_save = get_fs();
51387 set_fs(get_ds());
51388- rc = vfs_read(lower_file, data, size, &offset);
51389+ rc = vfs_read(lower_file, (char __force_user *)data, size, &offset);
51390 set_fs(fs_save);
51391 return rc;
51392 }
51393diff --git a/fs/exec.c b/fs/exec.c
51394index 20df02c..9a87617 100644
51395--- a/fs/exec.c
51396+++ b/fs/exec.c
51397@@ -55,8 +55,20 @@
51398 #include <linux/pipe_fs_i.h>
51399 #include <linux/oom.h>
51400 #include <linux/compat.h>
51401+#include <linux/random.h>
51402+#include <linux/seq_file.h>
51403+#include <linux/coredump.h>
51404+#include <linux/mman.h>
51405+
51406+#ifdef CONFIG_PAX_REFCOUNT
51407+#include <linux/kallsyms.h>
51408+#include <linux/kdebug.h>
51409+#endif
51410+
51411+#include <trace/events/fs.h>
51412
51413 #include <asm/uaccess.h>
51414+#include <asm/sections.h>
51415 #include <asm/mmu_context.h>
51416 #include <asm/tlb.h>
51417
51418@@ -66,6 +78,18 @@
51419
51420 #include <trace/events/sched.h>
51421
51422+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
51423+void __weak pax_set_initial_flags(struct linux_binprm *bprm)
51424+{
51425+ pr_warn_once("PAX: PAX_HAVE_ACL_FLAGS was enabled without providing the pax_set_initial_flags callback, this is probably not what you wanted.\n");
51426+}
51427+#endif
51428+
51429+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
51430+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
51431+EXPORT_SYMBOL(pax_set_initial_flags_func);
51432+#endif
51433+
51434 int suid_dumpable = 0;
51435
51436 static LIST_HEAD(formats);
51437@@ -75,8 +99,8 @@ void __register_binfmt(struct linux_binfmt * fmt, int insert)
51438 {
51439 BUG_ON(!fmt);
51440 write_lock(&binfmt_lock);
51441- insert ? list_add(&fmt->lh, &formats) :
51442- list_add_tail(&fmt->lh, &formats);
51443+ insert ? pax_list_add((struct list_head *)&fmt->lh, &formats) :
51444+ pax_list_add_tail((struct list_head *)&fmt->lh, &formats);
51445 write_unlock(&binfmt_lock);
51446 }
51447
51448@@ -85,7 +109,7 @@ EXPORT_SYMBOL(__register_binfmt);
51449 void unregister_binfmt(struct linux_binfmt * fmt)
51450 {
51451 write_lock(&binfmt_lock);
51452- list_del(&fmt->lh);
51453+ pax_list_del((struct list_head *)&fmt->lh);
51454 write_unlock(&binfmt_lock);
51455 }
51456
51457@@ -180,18 +204,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
51458 int write)
51459 {
51460 struct page *page;
51461- int ret;
51462
51463-#ifdef CONFIG_STACK_GROWSUP
51464- if (write) {
51465- ret = expand_downwards(bprm->vma, pos);
51466- if (ret < 0)
51467- return NULL;
51468- }
51469-#endif
51470- ret = get_user_pages(current, bprm->mm, pos,
51471- 1, write, 1, &page, NULL);
51472- if (ret <= 0)
51473+ if (0 > expand_downwards(bprm->vma, pos))
51474+ return NULL;
51475+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
51476 return NULL;
51477
51478 if (write) {
51479@@ -207,6 +223,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
51480 if (size <= ARG_MAX)
51481 return page;
51482
51483+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
51484+ // only allow 512KB for argv+env on suid/sgid binaries
51485+ // to prevent easy ASLR exhaustion
51486+ if (((!uid_eq(bprm->cred->euid, current_euid())) ||
51487+ (!gid_eq(bprm->cred->egid, current_egid()))) &&
51488+ (size > (512 * 1024))) {
51489+ put_page(page);
51490+ return NULL;
51491+ }
51492+#endif
51493+
51494 /*
51495 * Limit to 1/4-th the stack size for the argv+env strings.
51496 * This ensures that:
51497@@ -266,6 +293,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
51498 vma->vm_end = STACK_TOP_MAX;
51499 vma->vm_start = vma->vm_end - PAGE_SIZE;
51500 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
51501+
51502+#ifdef CONFIG_PAX_SEGMEXEC
51503+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
51504+#endif
51505+
51506 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
51507 INIT_LIST_HEAD(&vma->anon_vma_chain);
51508
51509@@ -276,6 +308,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
51510 mm->stack_vm = mm->total_vm = 1;
51511 up_write(&mm->mmap_sem);
51512 bprm->p = vma->vm_end - sizeof(void *);
51513+
51514+#ifdef CONFIG_PAX_RANDUSTACK
51515+ if (randomize_va_space)
51516+ bprm->p ^= random32() & ~PAGE_MASK;
51517+#endif
51518+
51519 return 0;
51520 err:
51521 up_write(&mm->mmap_sem);
51522@@ -396,7 +434,7 @@ struct user_arg_ptr {
51523 } ptr;
51524 };
51525
51526-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
51527+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
51528 {
51529 const char __user *native;
51530
51531@@ -405,14 +443,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
51532 compat_uptr_t compat;
51533
51534 if (get_user(compat, argv.ptr.compat + nr))
51535- return ERR_PTR(-EFAULT);
51536+ return (const char __force_user *)ERR_PTR(-EFAULT);
51537
51538 return compat_ptr(compat);
51539 }
51540 #endif
51541
51542 if (get_user(native, argv.ptr.native + nr))
51543- return ERR_PTR(-EFAULT);
51544+ return (const char __force_user *)ERR_PTR(-EFAULT);
51545
51546 return native;
51547 }
51548@@ -431,7 +469,7 @@ static int count(struct user_arg_ptr argv, int max)
51549 if (!p)
51550 break;
51551
51552- if (IS_ERR(p))
51553+ if (IS_ERR((const char __force_kernel *)p))
51554 return -EFAULT;
51555
51556 if (i >= max)
51557@@ -466,7 +504,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
51558
51559 ret = -EFAULT;
51560 str = get_user_arg_ptr(argv, argc);
51561- if (IS_ERR(str))
51562+ if (IS_ERR((const char __force_kernel *)str))
51563 goto out;
51564
51565 len = strnlen_user(str, MAX_ARG_STRLEN);
51566@@ -548,7 +586,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
51567 int r;
51568 mm_segment_t oldfs = get_fs();
51569 struct user_arg_ptr argv = {
51570- .ptr.native = (const char __user *const __user *)__argv,
51571+ .ptr.native = (const char __force_user *const __force_user *)__argv,
51572 };
51573
51574 set_fs(KERNEL_DS);
51575@@ -583,7 +621,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
51576 unsigned long new_end = old_end - shift;
51577 struct mmu_gather tlb;
51578
51579- BUG_ON(new_start > new_end);
51580+ if (new_start >= new_end || new_start < mmap_min_addr)
51581+ return -ENOMEM;
51582
51583 /*
51584 * ensure there are no vmas between where we want to go
51585@@ -592,6 +631,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
51586 if (vma != find_vma(mm, new_start))
51587 return -EFAULT;
51588
51589+#ifdef CONFIG_PAX_SEGMEXEC
51590+ BUG_ON(pax_find_mirror_vma(vma));
51591+#endif
51592+
51593 /*
51594 * cover the whole range: [new_start, old_end)
51595 */
51596@@ -672,10 +715,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
51597 stack_top = arch_align_stack(stack_top);
51598 stack_top = PAGE_ALIGN(stack_top);
51599
51600- if (unlikely(stack_top < mmap_min_addr) ||
51601- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
51602- return -ENOMEM;
51603-
51604 stack_shift = vma->vm_end - stack_top;
51605
51606 bprm->p -= stack_shift;
51607@@ -687,8 +726,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
51608 bprm->exec -= stack_shift;
51609
51610 down_write(&mm->mmap_sem);
51611+
51612+ /* Move stack pages down in memory. */
51613+ if (stack_shift) {
51614+ ret = shift_arg_pages(vma, stack_shift);
51615+ if (ret)
51616+ goto out_unlock;
51617+ }
51618+
51619 vm_flags = VM_STACK_FLAGS;
51620
51621+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
51622+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
51623+ vm_flags &= ~VM_EXEC;
51624+
51625+#ifdef CONFIG_PAX_MPROTECT
51626+ if (mm->pax_flags & MF_PAX_MPROTECT)
51627+ vm_flags &= ~VM_MAYEXEC;
51628+#endif
51629+
51630+ }
51631+#endif
51632+
51633 /*
51634 * Adjust stack execute permissions; explicitly enable for
51635 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
51636@@ -707,13 +766,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
51637 goto out_unlock;
51638 BUG_ON(prev != vma);
51639
51640- /* Move stack pages down in memory. */
51641- if (stack_shift) {
51642- ret = shift_arg_pages(vma, stack_shift);
51643- if (ret)
51644- goto out_unlock;
51645- }
51646-
51647 /* mprotect_fixup is overkill to remove the temporary stack flags */
51648 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
51649
51650@@ -737,6 +789,27 @@ int setup_arg_pages(struct linux_binprm *bprm,
51651 #endif
51652 current->mm->start_stack = bprm->p;
51653 ret = expand_stack(vma, stack_base);
51654+
51655+#if !defined(CONFIG_STACK_GROWSUP) && defined(CONFIG_PAX_RANDMMAP)
51656+ if (!ret && (mm->pax_flags & MF_PAX_RANDMMAP) && STACK_TOP <= 0xFFFFFFFFU && STACK_TOP > vma->vm_end) {
51657+ unsigned long size, flags, vm_flags;
51658+
51659+ size = STACK_TOP - vma->vm_end;
51660+ flags = MAP_FIXED | MAP_PRIVATE;
51661+ vm_flags = VM_NONE | VM_DONTEXPAND | VM_DONTDUMP;
51662+
51663+ ret = vma->vm_end != mmap_region(NULL, vma->vm_end, size, flags, vm_flags, 0);
51664+
51665+#ifdef CONFIG_X86
51666+ if (!ret) {
51667+ size = mmap_min_addr + ((mm->delta_mmap ^ mm->delta_stack) & (0xFFUL << PAGE_SHIFT));
51668+ ret = 0 != mmap_region(NULL, 0, PAGE_ALIGN(size), flags, vm_flags, 0);
51669+ }
51670+#endif
51671+
51672+ }
51673+#endif
51674+
51675 if (ret)
51676 ret = -EFAULT;
51677
51678@@ -772,6 +845,8 @@ struct file *open_exec(const char *name)
51679
51680 fsnotify_open(file);
51681
51682+ trace_open_exec(name);
51683+
51684 err = deny_write_access(file);
51685 if (err)
51686 goto exit;
51687@@ -795,7 +870,7 @@ int kernel_read(struct file *file, loff_t offset,
51688 old_fs = get_fs();
51689 set_fs(get_ds());
51690 /* The cast to a user pointer is valid due to the set_fs() */
51691- result = vfs_read(file, (void __user *)addr, count, &pos);
51692+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
51693 set_fs(old_fs);
51694 return result;
51695 }
51696@@ -1247,7 +1322,7 @@ static int check_unsafe_exec(struct linux_binprm *bprm)
51697 }
51698 rcu_read_unlock();
51699
51700- if (p->fs->users > n_fs) {
51701+ if (atomic_read(&p->fs->users) > n_fs) {
51702 bprm->unsafe |= LSM_UNSAFE_SHARE;
51703 } else {
51704 res = -EAGAIN;
51705@@ -1447,6 +1522,31 @@ int search_binary_handler(struct linux_binprm *bprm)
51706
51707 EXPORT_SYMBOL(search_binary_handler);
51708
51709+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
51710+static DEFINE_PER_CPU(u64, exec_counter);
51711+static int __init init_exec_counters(void)
51712+{
51713+ unsigned int cpu;
51714+
51715+ for_each_possible_cpu(cpu) {
51716+ per_cpu(exec_counter, cpu) = (u64)cpu;
51717+ }
51718+
51719+ return 0;
51720+}
51721+early_initcall(init_exec_counters);
51722+static inline void increment_exec_counter(void)
51723+{
51724+ BUILD_BUG_ON(NR_CPUS > (1 << 16));
51725+ current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
51726+}
51727+#else
51728+static inline void increment_exec_counter(void) {}
51729+#endif
51730+
51731+extern void gr_handle_exec_args(struct linux_binprm *bprm,
51732+ struct user_arg_ptr argv);
51733+
51734 /*
51735 * sys_execve() executes a new program.
51736 */
51737@@ -1454,6 +1554,11 @@ static int do_execve_common(const char *filename,
51738 struct user_arg_ptr argv,
51739 struct user_arg_ptr envp)
51740 {
51741+#ifdef CONFIG_GRKERNSEC
51742+ struct file *old_exec_file;
51743+ struct acl_subject_label *old_acl;
51744+ struct rlimit old_rlim[RLIM_NLIMITS];
51745+#endif
51746 struct linux_binprm *bprm;
51747 struct file *file;
51748 struct files_struct *displaced;
51749@@ -1461,6 +1566,8 @@ static int do_execve_common(const char *filename,
51750 int retval;
51751 const struct cred *cred = current_cred();
51752
51753+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
51754+
51755 /*
51756 * We move the actual failure in case of RLIMIT_NPROC excess from
51757 * set*uid() to execve() because too many poorly written programs
51758@@ -1501,12 +1608,27 @@ static int do_execve_common(const char *filename,
51759 if (IS_ERR(file))
51760 goto out_unmark;
51761
51762+ if (gr_ptrace_readexec(file, bprm->unsafe)) {
51763+ retval = -EPERM;
51764+ goto out_file;
51765+ }
51766+
51767 sched_exec();
51768
51769 bprm->file = file;
51770 bprm->filename = filename;
51771 bprm->interp = filename;
51772
51773+ if (gr_process_user_ban()) {
51774+ retval = -EPERM;
51775+ goto out_file;
51776+ }
51777+
51778+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
51779+ retval = -EACCES;
51780+ goto out_file;
51781+ }
51782+
51783 retval = bprm_mm_init(bprm);
51784 if (retval)
51785 goto out_file;
51786@@ -1523,24 +1645,65 @@ static int do_execve_common(const char *filename,
51787 if (retval < 0)
51788 goto out;
51789
51790+#ifdef CONFIG_GRKERNSEC
51791+ old_acl = current->acl;
51792+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
51793+ old_exec_file = current->exec_file;
51794+ get_file(file);
51795+ current->exec_file = file;
51796+#endif
51797+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
51798+ /* limit suid stack to 8MB
51799+ * we saved the old limits above and will restore them if this exec fails
51800+ */
51801+ if (((!uid_eq(bprm->cred->euid, current_euid())) || (!gid_eq(bprm->cred->egid, current_egid()))) &&
51802+ (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
51803+ current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
51804+#endif
51805+
51806+ if (!gr_tpe_allow(file)) {
51807+ retval = -EACCES;
51808+ goto out_fail;
51809+ }
51810+
51811+ if (gr_check_crash_exec(file)) {
51812+ retval = -EACCES;
51813+ goto out_fail;
51814+ }
51815+
51816+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
51817+ bprm->unsafe);
51818+ if (retval < 0)
51819+ goto out_fail;
51820+
51821 retval = copy_strings_kernel(1, &bprm->filename, bprm);
51822 if (retval < 0)
51823- goto out;
51824+ goto out_fail;
51825
51826 bprm->exec = bprm->p;
51827 retval = copy_strings(bprm->envc, envp, bprm);
51828 if (retval < 0)
51829- goto out;
51830+ goto out_fail;
51831
51832 retval = copy_strings(bprm->argc, argv, bprm);
51833 if (retval < 0)
51834- goto out;
51835+ goto out_fail;
51836+
51837+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
51838+
51839+ gr_handle_exec_args(bprm, argv);
51840
51841 retval = search_binary_handler(bprm);
51842 if (retval < 0)
51843- goto out;
51844+ goto out_fail;
51845+#ifdef CONFIG_GRKERNSEC
51846+ if (old_exec_file)
51847+ fput(old_exec_file);
51848+#endif
51849
51850 /* execve succeeded */
51851+
51852+ increment_exec_counter();
51853 current->fs->in_exec = 0;
51854 current->in_execve = 0;
51855 acct_update_integrals(current);
51856@@ -1549,6 +1712,14 @@ static int do_execve_common(const char *filename,
51857 put_files_struct(displaced);
51858 return retval;
51859
51860+out_fail:
51861+#ifdef CONFIG_GRKERNSEC
51862+ current->acl = old_acl;
51863+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
51864+ fput(current->exec_file);
51865+ current->exec_file = old_exec_file;
51866+#endif
51867+
51868 out:
51869 if (bprm->mm) {
51870 acct_arg_size(bprm, 0);
51871@@ -1697,3 +1868,278 @@ asmlinkage long compat_sys_execve(const char __user * filename,
51872 return error;
51873 }
51874 #endif
51875+
51876+int pax_check_flags(unsigned long *flags)
51877+{
51878+ int retval = 0;
51879+
51880+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
51881+ if (*flags & MF_PAX_SEGMEXEC)
51882+ {
51883+ *flags &= ~MF_PAX_SEGMEXEC;
51884+ retval = -EINVAL;
51885+ }
51886+#endif
51887+
51888+ if ((*flags & MF_PAX_PAGEEXEC)
51889+
51890+#ifdef CONFIG_PAX_PAGEEXEC
51891+ && (*flags & MF_PAX_SEGMEXEC)
51892+#endif
51893+
51894+ )
51895+ {
51896+ *flags &= ~MF_PAX_PAGEEXEC;
51897+ retval = -EINVAL;
51898+ }
51899+
51900+ if ((*flags & MF_PAX_MPROTECT)
51901+
51902+#ifdef CONFIG_PAX_MPROTECT
51903+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
51904+#endif
51905+
51906+ )
51907+ {
51908+ *flags &= ~MF_PAX_MPROTECT;
51909+ retval = -EINVAL;
51910+ }
51911+
51912+ if ((*flags & MF_PAX_EMUTRAMP)
51913+
51914+#ifdef CONFIG_PAX_EMUTRAMP
51915+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
51916+#endif
51917+
51918+ )
51919+ {
51920+ *flags &= ~MF_PAX_EMUTRAMP;
51921+ retval = -EINVAL;
51922+ }
51923+
51924+ return retval;
51925+}
51926+
51927+EXPORT_SYMBOL(pax_check_flags);
51928+
51929+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
51930+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
51931+{
51932+ struct task_struct *tsk = current;
51933+ struct mm_struct *mm = current->mm;
51934+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
51935+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
51936+ char *path_exec = NULL;
51937+ char *path_fault = NULL;
51938+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
51939+ siginfo_t info = { };
51940+
51941+ if (buffer_exec && buffer_fault) {
51942+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
51943+
51944+ down_read(&mm->mmap_sem);
51945+ vma = mm->mmap;
51946+ while (vma && (!vma_exec || !vma_fault)) {
51947+ if (vma->vm_file && mm->exe_file == vma->vm_file && (vma->vm_flags & VM_EXEC))
51948+ vma_exec = vma;
51949+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
51950+ vma_fault = vma;
51951+ vma = vma->vm_next;
51952+ }
51953+ if (vma_exec) {
51954+ path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
51955+ if (IS_ERR(path_exec))
51956+ path_exec = "<path too long>";
51957+ else {
51958+ path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
51959+ if (path_exec) {
51960+ *path_exec = 0;
51961+ path_exec = buffer_exec;
51962+ } else
51963+ path_exec = "<path too long>";
51964+ }
51965+ }
51966+ if (vma_fault) {
51967+ start = vma_fault->vm_start;
51968+ end = vma_fault->vm_end;
51969+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
51970+ if (vma_fault->vm_file) {
51971+ path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
51972+ if (IS_ERR(path_fault))
51973+ path_fault = "<path too long>";
51974+ else {
51975+ path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
51976+ if (path_fault) {
51977+ *path_fault = 0;
51978+ path_fault = buffer_fault;
51979+ } else
51980+ path_fault = "<path too long>";
51981+ }
51982+ } else
51983+ path_fault = "<anonymous mapping>";
51984+ }
51985+ up_read(&mm->mmap_sem);
51986+ }
51987+ if (tsk->signal->curr_ip)
51988+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
51989+ else
51990+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
51991+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
51992+ from_kuid_munged(&init_user_ns, task_uid(tsk)), from_kuid_munged(&init_user_ns, task_euid(tsk)), pc, sp);
51993+ free_page((unsigned long)buffer_exec);
51994+ free_page((unsigned long)buffer_fault);
51995+ pax_report_insns(regs, pc, sp);
51996+ info.si_signo = SIGKILL;
51997+ info.si_errno = 0;
51998+ info.si_code = SI_KERNEL;
51999+ info.si_pid = 0;
52000+ info.si_uid = 0;
52001+ do_coredump(&info);
52002+}
52003+#endif
52004+
52005+#ifdef CONFIG_PAX_REFCOUNT
52006+void pax_report_refcount_overflow(struct pt_regs *regs)
52007+{
52008+ if (current->signal->curr_ip)
52009+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
52010+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
52011+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
52012+ else
52013+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n", current->comm, task_pid_nr(current),
52014+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
52015+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
52016+ show_regs(regs);
52017+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
52018+}
52019+#endif
52020+
52021+#ifdef CONFIG_PAX_USERCOPY
52022+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
52023+static noinline int check_stack_object(const void *obj, unsigned long len)
52024+{
52025+ const void * const stack = task_stack_page(current);
52026+ const void * const stackend = stack + THREAD_SIZE;
52027+
52028+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
52029+ const void *frame = NULL;
52030+ const void *oldframe;
52031+#endif
52032+
52033+ if (obj + len < obj)
52034+ return -1;
52035+
52036+ if (obj + len <= stack || stackend <= obj)
52037+ return 0;
52038+
52039+ if (obj < stack || stackend < obj + len)
52040+ return -1;
52041+
52042+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
52043+ oldframe = __builtin_frame_address(1);
52044+ if (oldframe)
52045+ frame = __builtin_frame_address(2);
52046+ /*
52047+ low ----------------------------------------------> high
52048+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
52049+ ^----------------^
52050+ allow copies only within here
52051+ */
52052+ while (stack <= frame && frame < stackend) {
52053+ /* if obj + len extends past the last frame, this
52054+ check won't pass and the next frame will be 0,
52055+ causing us to bail out and correctly report
52056+ the copy as invalid
52057+ */
52058+ if (obj + len <= frame)
52059+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
52060+ oldframe = frame;
52061+ frame = *(const void * const *)frame;
52062+ }
52063+ return -1;
52064+#else
52065+ return 1;
52066+#endif
52067+}
52068+
52069+static __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to_user, const char *type)
52070+{
52071+ if (current->signal->curr_ip)
52072+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
52073+ &current->signal->curr_ip, to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
52074+ else
52075+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
52076+ to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
52077+ dump_stack();
52078+ gr_handle_kernel_exploit();
52079+ do_group_exit(SIGKILL);
52080+}
52081+#endif
52082+
52083+#ifdef CONFIG_PAX_USERCOPY
52084+static inline bool check_kernel_text_object(unsigned long low, unsigned long high)
52085+{
52086+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
52087+ unsigned long textlow = ktla_ktva((unsigned long)_stext);
52088+ unsigned long texthigh = ktla_ktva((unsigned long)_etext);
52089+#else
52090+ unsigned long textlow = _stext;
52091+ unsigned long texthigh = _etext;
52092+#endif
52093+
52094+ if (high < textlow || low > texthigh)
52095+ return false;
52096+ else
52097+ return true;
52098+}
52099+#endif
52100+
52101+void __check_object_size(const void *ptr, unsigned long n, bool to_user)
52102+{
52103+
52104+#ifdef CONFIG_PAX_USERCOPY
52105+ const char *type;
52106+
52107+ if (!n)
52108+ return;
52109+
52110+ type = check_heap_object(ptr, n);
52111+ if (!type) {
52112+ int ret = check_stack_object(ptr, n);
52113+ if (ret == 1 || ret == 2)
52114+ return;
52115+ if (ret == 0) {
52116+ if (check_kernel_text_object((unsigned long)ptr, (unsigned long)ptr + n))
52117+ type = "<kernel text>";
52118+ else
52119+ return;
52120+ } else
52121+ type = "<process stack>";
52122+ }
52123+
52124+ pax_report_usercopy(ptr, n, to_user, type);
52125+#endif
52126+
52127+}
52128+EXPORT_SYMBOL(__check_object_size);
52129+
52130+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
52131+void pax_track_stack(void)
52132+{
52133+ unsigned long sp = (unsigned long)&sp;
52134+ if (sp < current_thread_info()->lowest_stack &&
52135+ sp > (unsigned long)task_stack_page(current))
52136+ current_thread_info()->lowest_stack = sp;
52137+}
52138+EXPORT_SYMBOL(pax_track_stack);
52139+#endif
52140+
52141+#ifdef CONFIG_PAX_SIZE_OVERFLOW
52142+void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name)
52143+{
52144+ printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u %s", func, file, line, ssa_name);
52145+ dump_stack();
52146+ do_group_exit(SIGKILL);
52147+}
52148+EXPORT_SYMBOL(report_size_overflow);
52149+#endif
52150diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
52151index 2616d0e..2ffdec9 100644
52152--- a/fs/ext2/balloc.c
52153+++ b/fs/ext2/balloc.c
52154@@ -1190,10 +1190,10 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
52155
52156 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
52157 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
52158- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
52159+ if (free_blocks < root_blocks + 1 &&
52160 !uid_eq(sbi->s_resuid, current_fsuid()) &&
52161 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
52162- !in_group_p (sbi->s_resgid))) {
52163+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
52164 return 0;
52165 }
52166 return 1;
52167diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
52168index 22548f5..41521d8 100644
52169--- a/fs/ext3/balloc.c
52170+++ b/fs/ext3/balloc.c
52171@@ -1438,10 +1438,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
52172
52173 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
52174 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
52175- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
52176+ if (free_blocks < root_blocks + 1 &&
52177 !use_reservation && !uid_eq(sbi->s_resuid, current_fsuid()) &&
52178 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
52179- !in_group_p (sbi->s_resgid))) {
52180+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
52181 return 0;
52182 }
52183 return 1;
52184diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
52185index 92e68b3..115d987 100644
52186--- a/fs/ext4/balloc.c
52187+++ b/fs/ext4/balloc.c
52188@@ -505,8 +505,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
52189 /* Hm, nope. Are (enough) root reserved clusters available? */
52190 if (uid_eq(sbi->s_resuid, current_fsuid()) ||
52191 (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) ||
52192- capable(CAP_SYS_RESOURCE) ||
52193- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
52194+ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
52195+ capable_nolog(CAP_SYS_RESOURCE)) {
52196
52197 if (free_clusters >= (nclusters + dirty_clusters))
52198 return 1;
52199diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
52200index bbcd6a0..2824592 100644
52201--- a/fs/ext4/ext4.h
52202+++ b/fs/ext4/ext4.h
52203@@ -1265,19 +1265,19 @@ struct ext4_sb_info {
52204 unsigned long s_mb_last_start;
52205
52206 /* stats for buddy allocator */
52207- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
52208- atomic_t s_bal_success; /* we found long enough chunks */
52209- atomic_t s_bal_allocated; /* in blocks */
52210- atomic_t s_bal_ex_scanned; /* total extents scanned */
52211- atomic_t s_bal_goals; /* goal hits */
52212- atomic_t s_bal_breaks; /* too long searches */
52213- atomic_t s_bal_2orders; /* 2^order hits */
52214+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
52215+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
52216+ atomic_unchecked_t s_bal_allocated; /* in blocks */
52217+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
52218+ atomic_unchecked_t s_bal_goals; /* goal hits */
52219+ atomic_unchecked_t s_bal_breaks; /* too long searches */
52220+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
52221 spinlock_t s_bal_lock;
52222 unsigned long s_mb_buddies_generated;
52223 unsigned long long s_mb_generation_time;
52224- atomic_t s_mb_lost_chunks;
52225- atomic_t s_mb_preallocated;
52226- atomic_t s_mb_discarded;
52227+ atomic_unchecked_t s_mb_lost_chunks;
52228+ atomic_unchecked_t s_mb_preallocated;
52229+ atomic_unchecked_t s_mb_discarded;
52230 atomic_t s_lock_busy;
52231
52232 /* locality groups */
52233diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
52234index 82f8c2d..ce7c889 100644
52235--- a/fs/ext4/mballoc.c
52236+++ b/fs/ext4/mballoc.c
52237@@ -1747,7 +1747,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
52238 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
52239
52240 if (EXT4_SB(sb)->s_mb_stats)
52241- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
52242+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
52243
52244 break;
52245 }
52246@@ -2044,7 +2044,7 @@ repeat:
52247 ac->ac_status = AC_STATUS_CONTINUE;
52248 ac->ac_flags |= EXT4_MB_HINT_FIRST;
52249 cr = 3;
52250- atomic_inc(&sbi->s_mb_lost_chunks);
52251+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
52252 goto repeat;
52253 }
52254 }
52255@@ -2552,25 +2552,25 @@ int ext4_mb_release(struct super_block *sb)
52256 if (sbi->s_mb_stats) {
52257 ext4_msg(sb, KERN_INFO,
52258 "mballoc: %u blocks %u reqs (%u success)",
52259- atomic_read(&sbi->s_bal_allocated),
52260- atomic_read(&sbi->s_bal_reqs),
52261- atomic_read(&sbi->s_bal_success));
52262+ atomic_read_unchecked(&sbi->s_bal_allocated),
52263+ atomic_read_unchecked(&sbi->s_bal_reqs),
52264+ atomic_read_unchecked(&sbi->s_bal_success));
52265 ext4_msg(sb, KERN_INFO,
52266 "mballoc: %u extents scanned, %u goal hits, "
52267 "%u 2^N hits, %u breaks, %u lost",
52268- atomic_read(&sbi->s_bal_ex_scanned),
52269- atomic_read(&sbi->s_bal_goals),
52270- atomic_read(&sbi->s_bal_2orders),
52271- atomic_read(&sbi->s_bal_breaks),
52272- atomic_read(&sbi->s_mb_lost_chunks));
52273+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
52274+ atomic_read_unchecked(&sbi->s_bal_goals),
52275+ atomic_read_unchecked(&sbi->s_bal_2orders),
52276+ atomic_read_unchecked(&sbi->s_bal_breaks),
52277+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
52278 ext4_msg(sb, KERN_INFO,
52279 "mballoc: %lu generated and it took %Lu",
52280 sbi->s_mb_buddies_generated,
52281 sbi->s_mb_generation_time);
52282 ext4_msg(sb, KERN_INFO,
52283 "mballoc: %u preallocated, %u discarded",
52284- atomic_read(&sbi->s_mb_preallocated),
52285- atomic_read(&sbi->s_mb_discarded));
52286+ atomic_read_unchecked(&sbi->s_mb_preallocated),
52287+ atomic_read_unchecked(&sbi->s_mb_discarded));
52288 }
52289
52290 free_percpu(sbi->s_locality_groups);
52291@@ -3060,16 +3060,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
52292 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
52293
52294 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
52295- atomic_inc(&sbi->s_bal_reqs);
52296- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
52297+ atomic_inc_unchecked(&sbi->s_bal_reqs);
52298+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
52299 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
52300- atomic_inc(&sbi->s_bal_success);
52301- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
52302+ atomic_inc_unchecked(&sbi->s_bal_success);
52303+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
52304 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
52305 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
52306- atomic_inc(&sbi->s_bal_goals);
52307+ atomic_inc_unchecked(&sbi->s_bal_goals);
52308 if (ac->ac_found > sbi->s_mb_max_to_scan)
52309- atomic_inc(&sbi->s_bal_breaks);
52310+ atomic_inc_unchecked(&sbi->s_bal_breaks);
52311 }
52312
52313 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
52314@@ -3469,7 +3469,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
52315 trace_ext4_mb_new_inode_pa(ac, pa);
52316
52317 ext4_mb_use_inode_pa(ac, pa);
52318- atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
52319+ atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
52320
52321 ei = EXT4_I(ac->ac_inode);
52322 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
52323@@ -3529,7 +3529,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
52324 trace_ext4_mb_new_group_pa(ac, pa);
52325
52326 ext4_mb_use_group_pa(ac, pa);
52327- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
52328+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
52329
52330 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
52331 lg = ac->ac_lg;
52332@@ -3618,7 +3618,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
52333 * from the bitmap and continue.
52334 */
52335 }
52336- atomic_add(free, &sbi->s_mb_discarded);
52337+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
52338
52339 return err;
52340 }
52341@@ -3636,7 +3636,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
52342 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
52343 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
52344 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
52345- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
52346+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
52347 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
52348
52349 return 0;
52350diff --git a/fs/ext4/super.c b/fs/ext4/super.c
52351index 24c767d..893aa55 100644
52352--- a/fs/ext4/super.c
52353+++ b/fs/ext4/super.c
52354@@ -2429,7 +2429,7 @@ struct ext4_attr {
52355 ssize_t (*store)(struct ext4_attr *, struct ext4_sb_info *,
52356 const char *, size_t);
52357 int offset;
52358-};
52359+} __do_const;
52360
52361 static int parse_strtoul(const char *buf,
52362 unsigned long max, unsigned long *value)
52363diff --git a/fs/fcntl.c b/fs/fcntl.c
52364index 71a600a..20d87b1 100644
52365--- a/fs/fcntl.c
52366+++ b/fs/fcntl.c
52367@@ -107,6 +107,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
52368 if (err)
52369 return err;
52370
52371+ if (gr_handle_chroot_fowner(pid, type))
52372+ return -ENOENT;
52373+ if (gr_check_protected_task_fowner(pid, type))
52374+ return -EACCES;
52375+
52376 f_modown(filp, pid, type, force);
52377 return 0;
52378 }
52379diff --git a/fs/fhandle.c b/fs/fhandle.c
52380index 999ff5c..41f4109 100644
52381--- a/fs/fhandle.c
52382+++ b/fs/fhandle.c
52383@@ -67,8 +67,7 @@ static long do_sys_name_to_handle(struct path *path,
52384 } else
52385 retval = 0;
52386 /* copy the mount id */
52387- if (copy_to_user(mnt_id, &real_mount(path->mnt)->mnt_id,
52388- sizeof(*mnt_id)) ||
52389+ if (put_user(real_mount(path->mnt)->mnt_id, mnt_id) ||
52390 copy_to_user(ufh, handle,
52391 sizeof(struct file_handle) + handle_bytes))
52392 retval = -EFAULT;
52393diff --git a/fs/fifo.c b/fs/fifo.c
52394index cf6f434..3d7942c 100644
52395--- a/fs/fifo.c
52396+++ b/fs/fifo.c
52397@@ -59,10 +59,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
52398 */
52399 filp->f_op = &read_pipefifo_fops;
52400 pipe->r_counter++;
52401- if (pipe->readers++ == 0)
52402+ if (atomic_inc_return(&pipe->readers) == 1)
52403 wake_up_partner(inode);
52404
52405- if (!pipe->writers) {
52406+ if (!atomic_read(&pipe->writers)) {
52407 if ((filp->f_flags & O_NONBLOCK)) {
52408 /* suppress POLLHUP until we have
52409 * seen a writer */
52410@@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
52411 * errno=ENXIO when there is no process reading the FIFO.
52412 */
52413 ret = -ENXIO;
52414- if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
52415+ if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
52416 goto err;
52417
52418 filp->f_op = &write_pipefifo_fops;
52419 pipe->w_counter++;
52420- if (!pipe->writers++)
52421+ if (atomic_inc_return(&pipe->writers) == 1)
52422 wake_up_partner(inode);
52423
52424- if (!pipe->readers) {
52425+ if (!atomic_read(&pipe->readers)) {
52426 if (wait_for_partner(inode, &pipe->r_counter))
52427 goto err_wr;
52428 }
52429@@ -104,11 +104,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
52430 */
52431 filp->f_op = &rdwr_pipefifo_fops;
52432
52433- pipe->readers++;
52434- pipe->writers++;
52435+ atomic_inc(&pipe->readers);
52436+ atomic_inc(&pipe->writers);
52437 pipe->r_counter++;
52438 pipe->w_counter++;
52439- if (pipe->readers == 1 || pipe->writers == 1)
52440+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
52441 wake_up_partner(inode);
52442 break;
52443
52444@@ -122,19 +122,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
52445 return 0;
52446
52447 err_rd:
52448- if (!--pipe->readers)
52449+ if (atomic_dec_and_test(&pipe->readers))
52450 wake_up_interruptible(&pipe->wait);
52451 ret = -ERESTARTSYS;
52452 goto err;
52453
52454 err_wr:
52455- if (!--pipe->writers)
52456+ if (atomic_dec_and_test(&pipe->writers))
52457 wake_up_interruptible(&pipe->wait);
52458 ret = -ERESTARTSYS;
52459 goto err;
52460
52461 err:
52462- if (!pipe->readers && !pipe->writers)
52463+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
52464 free_pipe_info(inode);
52465
52466 err_nocleanup:
52467diff --git a/fs/file.c b/fs/file.c
52468index 2b3570b..c57924b 100644
52469--- a/fs/file.c
52470+++ b/fs/file.c
52471@@ -16,6 +16,7 @@
52472 #include <linux/slab.h>
52473 #include <linux/vmalloc.h>
52474 #include <linux/file.h>
52475+#include <linux/security.h>
52476 #include <linux/fdtable.h>
52477 #include <linux/bitops.h>
52478 #include <linux/interrupt.h>
52479@@ -892,6 +893,7 @@ int replace_fd(unsigned fd, struct file *file, unsigned flags)
52480 if (!file)
52481 return __close_fd(files, fd);
52482
52483+ gr_learn_resource(current, RLIMIT_NOFILE, fd, 0);
52484 if (fd >= rlimit(RLIMIT_NOFILE))
52485 return -EBADF;
52486
52487@@ -918,6 +920,7 @@ SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
52488 if (unlikely(oldfd == newfd))
52489 return -EINVAL;
52490
52491+ gr_learn_resource(current, RLIMIT_NOFILE, newfd, 0);
52492 if (newfd >= rlimit(RLIMIT_NOFILE))
52493 return -EBADF;
52494
52495@@ -973,6 +976,7 @@ SYSCALL_DEFINE1(dup, unsigned int, fildes)
52496 int f_dupfd(unsigned int from, struct file *file, unsigned flags)
52497 {
52498 int err;
52499+ gr_learn_resource(current, RLIMIT_NOFILE, from, 0);
52500 if (from >= rlimit(RLIMIT_NOFILE))
52501 return -EINVAL;
52502 err = alloc_fd(from, flags);
52503diff --git a/fs/filesystems.c b/fs/filesystems.c
52504index da165f6..3671bdb 100644
52505--- a/fs/filesystems.c
52506+++ b/fs/filesystems.c
52507@@ -273,7 +273,12 @@ struct file_system_type *get_fs_type(const char *name)
52508 int len = dot ? dot - name : strlen(name);
52509
52510 fs = __get_fs_type(name, len);
52511+
52512+#ifdef CONFIG_GRKERNSEC_MODHARDEN
52513+ if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
52514+#else
52515 if (!fs && (request_module("%.*s", len, name) == 0))
52516+#endif
52517 fs = __get_fs_type(name, len);
52518
52519 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
52520diff --git a/fs/fs_struct.c b/fs/fs_struct.c
52521index fe6ca58..65318cf 100644
52522--- a/fs/fs_struct.c
52523+++ b/fs/fs_struct.c
52524@@ -4,6 +4,7 @@
52525 #include <linux/path.h>
52526 #include <linux/slab.h>
52527 #include <linux/fs_struct.h>
52528+#include <linux/grsecurity.h>
52529 #include "internal.h"
52530
52531 /*
52532@@ -19,6 +20,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
52533 write_seqcount_begin(&fs->seq);
52534 old_root = fs->root;
52535 fs->root = *path;
52536+ gr_set_chroot_entries(current, path);
52537 write_seqcount_end(&fs->seq);
52538 spin_unlock(&fs->lock);
52539 if (old_root.dentry)
52540@@ -53,6 +55,21 @@ static inline int replace_path(struct path *p, const struct path *old, const str
52541 return 1;
52542 }
52543
52544+static inline int replace_root_path(struct task_struct *task, struct path *p, const struct path *old, struct path *new)
52545+{
52546+ if (likely(p->dentry != old->dentry || p->mnt != old->mnt))
52547+ return 0;
52548+ *p = *new;
52549+
52550+ /* This function is only called from pivot_root(). Leave our
52551+ gr_chroot_dentry and is_chrooted flags as-is, so that a
52552+ pivoted root isn't treated as a chroot
52553+ */
52554+ //gr_set_chroot_entries(task, new);
52555+
52556+ return 1;
52557+}
52558+
52559 void chroot_fs_refs(struct path *old_root, struct path *new_root)
52560 {
52561 struct task_struct *g, *p;
52562@@ -67,7 +84,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
52563 int hits = 0;
52564 spin_lock(&fs->lock);
52565 write_seqcount_begin(&fs->seq);
52566- hits += replace_path(&fs->root, old_root, new_root);
52567+ hits += replace_root_path(p, &fs->root, old_root, new_root);
52568 hits += replace_path(&fs->pwd, old_root, new_root);
52569 write_seqcount_end(&fs->seq);
52570 while (hits--) {
52571@@ -99,7 +116,8 @@ void exit_fs(struct task_struct *tsk)
52572 task_lock(tsk);
52573 spin_lock(&fs->lock);
52574 tsk->fs = NULL;
52575- kill = !--fs->users;
52576+ gr_clear_chroot_entries(tsk);
52577+ kill = !atomic_dec_return(&fs->users);
52578 spin_unlock(&fs->lock);
52579 task_unlock(tsk);
52580 if (kill)
52581@@ -112,7 +130,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
52582 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
52583 /* We don't need to lock fs - think why ;-) */
52584 if (fs) {
52585- fs->users = 1;
52586+ atomic_set(&fs->users, 1);
52587 fs->in_exec = 0;
52588 spin_lock_init(&fs->lock);
52589 seqcount_init(&fs->seq);
52590@@ -121,6 +139,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
52591 spin_lock(&old->lock);
52592 fs->root = old->root;
52593 path_get(&fs->root);
52594+ /* instead of calling gr_set_chroot_entries here,
52595+ we call it from every caller of this function
52596+ */
52597 fs->pwd = old->pwd;
52598 path_get(&fs->pwd);
52599 spin_unlock(&old->lock);
52600@@ -139,8 +160,9 @@ int unshare_fs_struct(void)
52601
52602 task_lock(current);
52603 spin_lock(&fs->lock);
52604- kill = !--fs->users;
52605+ kill = !atomic_dec_return(&fs->users);
52606 current->fs = new_fs;
52607+ gr_set_chroot_entries(current, &new_fs->root);
52608 spin_unlock(&fs->lock);
52609 task_unlock(current);
52610
52611@@ -153,13 +175,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
52612
52613 int current_umask(void)
52614 {
52615- return current->fs->umask;
52616+ return current->fs->umask | gr_acl_umask();
52617 }
52618 EXPORT_SYMBOL(current_umask);
52619
52620 /* to be mentioned only in INIT_TASK */
52621 struct fs_struct init_fs = {
52622- .users = 1,
52623+ .users = ATOMIC_INIT(1),
52624 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
52625 .seq = SEQCNT_ZERO,
52626 .umask = 0022,
52627diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
52628index 8dcb114..b1072e2 100644
52629--- a/fs/fscache/cookie.c
52630+++ b/fs/fscache/cookie.c
52631@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
52632 parent ? (char *) parent->def->name : "<no-parent>",
52633 def->name, netfs_data);
52634
52635- fscache_stat(&fscache_n_acquires);
52636+ fscache_stat_unchecked(&fscache_n_acquires);
52637
52638 /* if there's no parent cookie, then we don't create one here either */
52639 if (!parent) {
52640- fscache_stat(&fscache_n_acquires_null);
52641+ fscache_stat_unchecked(&fscache_n_acquires_null);
52642 _leave(" [no parent]");
52643 return NULL;
52644 }
52645@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
52646 /* allocate and initialise a cookie */
52647 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
52648 if (!cookie) {
52649- fscache_stat(&fscache_n_acquires_oom);
52650+ fscache_stat_unchecked(&fscache_n_acquires_oom);
52651 _leave(" [ENOMEM]");
52652 return NULL;
52653 }
52654@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
52655
52656 switch (cookie->def->type) {
52657 case FSCACHE_COOKIE_TYPE_INDEX:
52658- fscache_stat(&fscache_n_cookie_index);
52659+ fscache_stat_unchecked(&fscache_n_cookie_index);
52660 break;
52661 case FSCACHE_COOKIE_TYPE_DATAFILE:
52662- fscache_stat(&fscache_n_cookie_data);
52663+ fscache_stat_unchecked(&fscache_n_cookie_data);
52664 break;
52665 default:
52666- fscache_stat(&fscache_n_cookie_special);
52667+ fscache_stat_unchecked(&fscache_n_cookie_special);
52668 break;
52669 }
52670
52671@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
52672 if (fscache_acquire_non_index_cookie(cookie) < 0) {
52673 atomic_dec(&parent->n_children);
52674 __fscache_cookie_put(cookie);
52675- fscache_stat(&fscache_n_acquires_nobufs);
52676+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
52677 _leave(" = NULL");
52678 return NULL;
52679 }
52680 }
52681
52682- fscache_stat(&fscache_n_acquires_ok);
52683+ fscache_stat_unchecked(&fscache_n_acquires_ok);
52684 _leave(" = %p", cookie);
52685 return cookie;
52686 }
52687@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
52688 cache = fscache_select_cache_for_object(cookie->parent);
52689 if (!cache) {
52690 up_read(&fscache_addremove_sem);
52691- fscache_stat(&fscache_n_acquires_no_cache);
52692+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
52693 _leave(" = -ENOMEDIUM [no cache]");
52694 return -ENOMEDIUM;
52695 }
52696@@ -256,12 +256,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
52697 object = cache->ops->alloc_object(cache, cookie);
52698 fscache_stat_d(&fscache_n_cop_alloc_object);
52699 if (IS_ERR(object)) {
52700- fscache_stat(&fscache_n_object_no_alloc);
52701+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
52702 ret = PTR_ERR(object);
52703 goto error;
52704 }
52705
52706- fscache_stat(&fscache_n_object_alloc);
52707+ fscache_stat_unchecked(&fscache_n_object_alloc);
52708
52709 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
52710
52711@@ -378,7 +378,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie)
52712
52713 _enter("{%s}", cookie->def->name);
52714
52715- fscache_stat(&fscache_n_invalidates);
52716+ fscache_stat_unchecked(&fscache_n_invalidates);
52717
52718 /* Only permit invalidation of data files. Invalidating an index will
52719 * require the caller to release all its attachments to the tree rooted
52720@@ -437,10 +437,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
52721 struct fscache_object *object;
52722 struct hlist_node *_p;
52723
52724- fscache_stat(&fscache_n_updates);
52725+ fscache_stat_unchecked(&fscache_n_updates);
52726
52727 if (!cookie) {
52728- fscache_stat(&fscache_n_updates_null);
52729+ fscache_stat_unchecked(&fscache_n_updates_null);
52730 _leave(" [no cookie]");
52731 return;
52732 }
52733@@ -474,12 +474,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
52734 struct fscache_object *object;
52735 unsigned long event;
52736
52737- fscache_stat(&fscache_n_relinquishes);
52738+ fscache_stat_unchecked(&fscache_n_relinquishes);
52739 if (retire)
52740- fscache_stat(&fscache_n_relinquishes_retire);
52741+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
52742
52743 if (!cookie) {
52744- fscache_stat(&fscache_n_relinquishes_null);
52745+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
52746 _leave(" [no cookie]");
52747 return;
52748 }
52749@@ -495,7 +495,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
52750
52751 /* wait for the cookie to finish being instantiated (or to fail) */
52752 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
52753- fscache_stat(&fscache_n_relinquishes_waitcrt);
52754+ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
52755 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
52756 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
52757 }
52758diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
52759index ee38fef..0a326d4 100644
52760--- a/fs/fscache/internal.h
52761+++ b/fs/fscache/internal.h
52762@@ -148,101 +148,101 @@ extern void fscache_proc_cleanup(void);
52763 * stats.c
52764 */
52765 #ifdef CONFIG_FSCACHE_STATS
52766-extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
52767-extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
52768+extern atomic_unchecked_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
52769+extern atomic_unchecked_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
52770
52771-extern atomic_t fscache_n_op_pend;
52772-extern atomic_t fscache_n_op_run;
52773-extern atomic_t fscache_n_op_enqueue;
52774-extern atomic_t fscache_n_op_deferred_release;
52775-extern atomic_t fscache_n_op_release;
52776-extern atomic_t fscache_n_op_gc;
52777-extern atomic_t fscache_n_op_cancelled;
52778-extern atomic_t fscache_n_op_rejected;
52779+extern atomic_unchecked_t fscache_n_op_pend;
52780+extern atomic_unchecked_t fscache_n_op_run;
52781+extern atomic_unchecked_t fscache_n_op_enqueue;
52782+extern atomic_unchecked_t fscache_n_op_deferred_release;
52783+extern atomic_unchecked_t fscache_n_op_release;
52784+extern atomic_unchecked_t fscache_n_op_gc;
52785+extern atomic_unchecked_t fscache_n_op_cancelled;
52786+extern atomic_unchecked_t fscache_n_op_rejected;
52787
52788-extern atomic_t fscache_n_attr_changed;
52789-extern atomic_t fscache_n_attr_changed_ok;
52790-extern atomic_t fscache_n_attr_changed_nobufs;
52791-extern atomic_t fscache_n_attr_changed_nomem;
52792-extern atomic_t fscache_n_attr_changed_calls;
52793+extern atomic_unchecked_t fscache_n_attr_changed;
52794+extern atomic_unchecked_t fscache_n_attr_changed_ok;
52795+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
52796+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
52797+extern atomic_unchecked_t fscache_n_attr_changed_calls;
52798
52799-extern atomic_t fscache_n_allocs;
52800-extern atomic_t fscache_n_allocs_ok;
52801-extern atomic_t fscache_n_allocs_wait;
52802-extern atomic_t fscache_n_allocs_nobufs;
52803-extern atomic_t fscache_n_allocs_intr;
52804-extern atomic_t fscache_n_allocs_object_dead;
52805-extern atomic_t fscache_n_alloc_ops;
52806-extern atomic_t fscache_n_alloc_op_waits;
52807+extern atomic_unchecked_t fscache_n_allocs;
52808+extern atomic_unchecked_t fscache_n_allocs_ok;
52809+extern atomic_unchecked_t fscache_n_allocs_wait;
52810+extern atomic_unchecked_t fscache_n_allocs_nobufs;
52811+extern atomic_unchecked_t fscache_n_allocs_intr;
52812+extern atomic_unchecked_t fscache_n_allocs_object_dead;
52813+extern atomic_unchecked_t fscache_n_alloc_ops;
52814+extern atomic_unchecked_t fscache_n_alloc_op_waits;
52815
52816-extern atomic_t fscache_n_retrievals;
52817-extern atomic_t fscache_n_retrievals_ok;
52818-extern atomic_t fscache_n_retrievals_wait;
52819-extern atomic_t fscache_n_retrievals_nodata;
52820-extern atomic_t fscache_n_retrievals_nobufs;
52821-extern atomic_t fscache_n_retrievals_intr;
52822-extern atomic_t fscache_n_retrievals_nomem;
52823-extern atomic_t fscache_n_retrievals_object_dead;
52824-extern atomic_t fscache_n_retrieval_ops;
52825-extern atomic_t fscache_n_retrieval_op_waits;
52826+extern atomic_unchecked_t fscache_n_retrievals;
52827+extern atomic_unchecked_t fscache_n_retrievals_ok;
52828+extern atomic_unchecked_t fscache_n_retrievals_wait;
52829+extern atomic_unchecked_t fscache_n_retrievals_nodata;
52830+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
52831+extern atomic_unchecked_t fscache_n_retrievals_intr;
52832+extern atomic_unchecked_t fscache_n_retrievals_nomem;
52833+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
52834+extern atomic_unchecked_t fscache_n_retrieval_ops;
52835+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
52836
52837-extern atomic_t fscache_n_stores;
52838-extern atomic_t fscache_n_stores_ok;
52839-extern atomic_t fscache_n_stores_again;
52840-extern atomic_t fscache_n_stores_nobufs;
52841-extern atomic_t fscache_n_stores_oom;
52842-extern atomic_t fscache_n_store_ops;
52843-extern atomic_t fscache_n_store_calls;
52844-extern atomic_t fscache_n_store_pages;
52845-extern atomic_t fscache_n_store_radix_deletes;
52846-extern atomic_t fscache_n_store_pages_over_limit;
52847+extern atomic_unchecked_t fscache_n_stores;
52848+extern atomic_unchecked_t fscache_n_stores_ok;
52849+extern atomic_unchecked_t fscache_n_stores_again;
52850+extern atomic_unchecked_t fscache_n_stores_nobufs;
52851+extern atomic_unchecked_t fscache_n_stores_oom;
52852+extern atomic_unchecked_t fscache_n_store_ops;
52853+extern atomic_unchecked_t fscache_n_store_calls;
52854+extern atomic_unchecked_t fscache_n_store_pages;
52855+extern atomic_unchecked_t fscache_n_store_radix_deletes;
52856+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
52857
52858-extern atomic_t fscache_n_store_vmscan_not_storing;
52859-extern atomic_t fscache_n_store_vmscan_gone;
52860-extern atomic_t fscache_n_store_vmscan_busy;
52861-extern atomic_t fscache_n_store_vmscan_cancelled;
52862-extern atomic_t fscache_n_store_vmscan_wait;
52863+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
52864+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
52865+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
52866+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
52867+extern atomic_unchecked_t fscache_n_store_vmscan_wait;
52868
52869-extern atomic_t fscache_n_marks;
52870-extern atomic_t fscache_n_uncaches;
52871+extern atomic_unchecked_t fscache_n_marks;
52872+extern atomic_unchecked_t fscache_n_uncaches;
52873
52874-extern atomic_t fscache_n_acquires;
52875-extern atomic_t fscache_n_acquires_null;
52876-extern atomic_t fscache_n_acquires_no_cache;
52877-extern atomic_t fscache_n_acquires_ok;
52878-extern atomic_t fscache_n_acquires_nobufs;
52879-extern atomic_t fscache_n_acquires_oom;
52880+extern atomic_unchecked_t fscache_n_acquires;
52881+extern atomic_unchecked_t fscache_n_acquires_null;
52882+extern atomic_unchecked_t fscache_n_acquires_no_cache;
52883+extern atomic_unchecked_t fscache_n_acquires_ok;
52884+extern atomic_unchecked_t fscache_n_acquires_nobufs;
52885+extern atomic_unchecked_t fscache_n_acquires_oom;
52886
52887-extern atomic_t fscache_n_invalidates;
52888-extern atomic_t fscache_n_invalidates_run;
52889+extern atomic_unchecked_t fscache_n_invalidates;
52890+extern atomic_unchecked_t fscache_n_invalidates_run;
52891
52892-extern atomic_t fscache_n_updates;
52893-extern atomic_t fscache_n_updates_null;
52894-extern atomic_t fscache_n_updates_run;
52895+extern atomic_unchecked_t fscache_n_updates;
52896+extern atomic_unchecked_t fscache_n_updates_null;
52897+extern atomic_unchecked_t fscache_n_updates_run;
52898
52899-extern atomic_t fscache_n_relinquishes;
52900-extern atomic_t fscache_n_relinquishes_null;
52901-extern atomic_t fscache_n_relinquishes_waitcrt;
52902-extern atomic_t fscache_n_relinquishes_retire;
52903+extern atomic_unchecked_t fscache_n_relinquishes;
52904+extern atomic_unchecked_t fscache_n_relinquishes_null;
52905+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
52906+extern atomic_unchecked_t fscache_n_relinquishes_retire;
52907
52908-extern atomic_t fscache_n_cookie_index;
52909-extern atomic_t fscache_n_cookie_data;
52910-extern atomic_t fscache_n_cookie_special;
52911+extern atomic_unchecked_t fscache_n_cookie_index;
52912+extern atomic_unchecked_t fscache_n_cookie_data;
52913+extern atomic_unchecked_t fscache_n_cookie_special;
52914
52915-extern atomic_t fscache_n_object_alloc;
52916-extern atomic_t fscache_n_object_no_alloc;
52917-extern atomic_t fscache_n_object_lookups;
52918-extern atomic_t fscache_n_object_lookups_negative;
52919-extern atomic_t fscache_n_object_lookups_positive;
52920-extern atomic_t fscache_n_object_lookups_timed_out;
52921-extern atomic_t fscache_n_object_created;
52922-extern atomic_t fscache_n_object_avail;
52923-extern atomic_t fscache_n_object_dead;
52924+extern atomic_unchecked_t fscache_n_object_alloc;
52925+extern atomic_unchecked_t fscache_n_object_no_alloc;
52926+extern atomic_unchecked_t fscache_n_object_lookups;
52927+extern atomic_unchecked_t fscache_n_object_lookups_negative;
52928+extern atomic_unchecked_t fscache_n_object_lookups_positive;
52929+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
52930+extern atomic_unchecked_t fscache_n_object_created;
52931+extern atomic_unchecked_t fscache_n_object_avail;
52932+extern atomic_unchecked_t fscache_n_object_dead;
52933
52934-extern atomic_t fscache_n_checkaux_none;
52935-extern atomic_t fscache_n_checkaux_okay;
52936-extern atomic_t fscache_n_checkaux_update;
52937-extern atomic_t fscache_n_checkaux_obsolete;
52938+extern atomic_unchecked_t fscache_n_checkaux_none;
52939+extern atomic_unchecked_t fscache_n_checkaux_okay;
52940+extern atomic_unchecked_t fscache_n_checkaux_update;
52941+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
52942
52943 extern atomic_t fscache_n_cop_alloc_object;
52944 extern atomic_t fscache_n_cop_lookup_object;
52945@@ -267,6 +267,11 @@ static inline void fscache_stat(atomic_t *stat)
52946 atomic_inc(stat);
52947 }
52948
52949+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
52950+{
52951+ atomic_inc_unchecked(stat);
52952+}
52953+
52954 static inline void fscache_stat_d(atomic_t *stat)
52955 {
52956 atomic_dec(stat);
52957@@ -279,6 +284,7 @@ extern const struct file_operations fscache_stats_fops;
52958
52959 #define __fscache_stat(stat) (NULL)
52960 #define fscache_stat(stat) do {} while (0)
52961+#define fscache_stat_unchecked(stat) do {} while (0)
52962 #define fscache_stat_d(stat) do {} while (0)
52963 #endif
52964
52965diff --git a/fs/fscache/object.c b/fs/fscache/object.c
52966index 50d41c1..10ee117 100644
52967--- a/fs/fscache/object.c
52968+++ b/fs/fscache/object.c
52969@@ -143,7 +143,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
52970 /* Invalidate an object on disk */
52971 case FSCACHE_OBJECT_INVALIDATING:
52972 clear_bit(FSCACHE_OBJECT_EV_INVALIDATE, &object->events);
52973- fscache_stat(&fscache_n_invalidates_run);
52974+ fscache_stat_unchecked(&fscache_n_invalidates_run);
52975 fscache_stat(&fscache_n_cop_invalidate_object);
52976 fscache_invalidate_object(object);
52977 fscache_stat_d(&fscache_n_cop_invalidate_object);
52978@@ -153,7 +153,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
52979 /* update the object metadata on disk */
52980 case FSCACHE_OBJECT_UPDATING:
52981 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
52982- fscache_stat(&fscache_n_updates_run);
52983+ fscache_stat_unchecked(&fscache_n_updates_run);
52984 fscache_stat(&fscache_n_cop_update_object);
52985 object->cache->ops->update_object(object);
52986 fscache_stat_d(&fscache_n_cop_update_object);
52987@@ -242,7 +242,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
52988 spin_lock(&object->lock);
52989 object->state = FSCACHE_OBJECT_DEAD;
52990 spin_unlock(&object->lock);
52991- fscache_stat(&fscache_n_object_dead);
52992+ fscache_stat_unchecked(&fscache_n_object_dead);
52993 goto terminal_transit;
52994
52995 /* handle the parent cache of this object being withdrawn from
52996@@ -257,7 +257,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
52997 spin_lock(&object->lock);
52998 object->state = FSCACHE_OBJECT_DEAD;
52999 spin_unlock(&object->lock);
53000- fscache_stat(&fscache_n_object_dead);
53001+ fscache_stat_unchecked(&fscache_n_object_dead);
53002 goto terminal_transit;
53003
53004 /* complain about the object being woken up once it is
53005@@ -495,7 +495,7 @@ static void fscache_lookup_object(struct fscache_object *object)
53006 parent->cookie->def->name, cookie->def->name,
53007 object->cache->tag->name);
53008
53009- fscache_stat(&fscache_n_object_lookups);
53010+ fscache_stat_unchecked(&fscache_n_object_lookups);
53011 fscache_stat(&fscache_n_cop_lookup_object);
53012 ret = object->cache->ops->lookup_object(object);
53013 fscache_stat_d(&fscache_n_cop_lookup_object);
53014@@ -506,7 +506,7 @@ static void fscache_lookup_object(struct fscache_object *object)
53015 if (ret == -ETIMEDOUT) {
53016 /* probably stuck behind another object, so move this one to
53017 * the back of the queue */
53018- fscache_stat(&fscache_n_object_lookups_timed_out);
53019+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
53020 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
53021 }
53022
53023@@ -529,7 +529,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
53024
53025 spin_lock(&object->lock);
53026 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
53027- fscache_stat(&fscache_n_object_lookups_negative);
53028+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
53029
53030 /* transit here to allow write requests to begin stacking up
53031 * and read requests to begin returning ENODATA */
53032@@ -575,7 +575,7 @@ void fscache_obtained_object(struct fscache_object *object)
53033 * result, in which case there may be data available */
53034 spin_lock(&object->lock);
53035 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
53036- fscache_stat(&fscache_n_object_lookups_positive);
53037+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
53038
53039 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
53040
53041@@ -589,7 +589,7 @@ void fscache_obtained_object(struct fscache_object *object)
53042 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
53043 } else {
53044 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
53045- fscache_stat(&fscache_n_object_created);
53046+ fscache_stat_unchecked(&fscache_n_object_created);
53047
53048 object->state = FSCACHE_OBJECT_AVAILABLE;
53049 spin_unlock(&object->lock);
53050@@ -634,7 +634,7 @@ static void fscache_object_available(struct fscache_object *object)
53051 fscache_enqueue_dependents(object);
53052
53053 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
53054- fscache_stat(&fscache_n_object_avail);
53055+ fscache_stat_unchecked(&fscache_n_object_avail);
53056
53057 _leave("");
53058 }
53059@@ -894,7 +894,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
53060 enum fscache_checkaux result;
53061
53062 if (!object->cookie->def->check_aux) {
53063- fscache_stat(&fscache_n_checkaux_none);
53064+ fscache_stat_unchecked(&fscache_n_checkaux_none);
53065 return FSCACHE_CHECKAUX_OKAY;
53066 }
53067
53068@@ -903,17 +903,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
53069 switch (result) {
53070 /* entry okay as is */
53071 case FSCACHE_CHECKAUX_OKAY:
53072- fscache_stat(&fscache_n_checkaux_okay);
53073+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
53074 break;
53075
53076 /* entry requires update */
53077 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
53078- fscache_stat(&fscache_n_checkaux_update);
53079+ fscache_stat_unchecked(&fscache_n_checkaux_update);
53080 break;
53081
53082 /* entry requires deletion */
53083 case FSCACHE_CHECKAUX_OBSOLETE:
53084- fscache_stat(&fscache_n_checkaux_obsolete);
53085+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
53086 break;
53087
53088 default:
53089diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
53090index 762a9ec..2023284 100644
53091--- a/fs/fscache/operation.c
53092+++ b/fs/fscache/operation.c
53093@@ -17,7 +17,7 @@
53094 #include <linux/slab.h>
53095 #include "internal.h"
53096
53097-atomic_t fscache_op_debug_id;
53098+atomic_unchecked_t fscache_op_debug_id;
53099 EXPORT_SYMBOL(fscache_op_debug_id);
53100
53101 /**
53102@@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
53103 ASSERTCMP(atomic_read(&op->usage), >, 0);
53104 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
53105
53106- fscache_stat(&fscache_n_op_enqueue);
53107+ fscache_stat_unchecked(&fscache_n_op_enqueue);
53108 switch (op->flags & FSCACHE_OP_TYPE) {
53109 case FSCACHE_OP_ASYNC:
53110 _debug("queue async");
53111@@ -73,7 +73,7 @@ static void fscache_run_op(struct fscache_object *object,
53112 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
53113 if (op->processor)
53114 fscache_enqueue_operation(op);
53115- fscache_stat(&fscache_n_op_run);
53116+ fscache_stat_unchecked(&fscache_n_op_run);
53117 }
53118
53119 /*
53120@@ -105,11 +105,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
53121 if (object->n_in_progress > 0) {
53122 atomic_inc(&op->usage);
53123 list_add_tail(&op->pend_link, &object->pending_ops);
53124- fscache_stat(&fscache_n_op_pend);
53125+ fscache_stat_unchecked(&fscache_n_op_pend);
53126 } else if (!list_empty(&object->pending_ops)) {
53127 atomic_inc(&op->usage);
53128 list_add_tail(&op->pend_link, &object->pending_ops);
53129- fscache_stat(&fscache_n_op_pend);
53130+ fscache_stat_unchecked(&fscache_n_op_pend);
53131 fscache_start_operations(object);
53132 } else {
53133 ASSERTCMP(object->n_in_progress, ==, 0);
53134@@ -125,7 +125,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
53135 object->n_exclusive++; /* reads and writes must wait */
53136 atomic_inc(&op->usage);
53137 list_add_tail(&op->pend_link, &object->pending_ops);
53138- fscache_stat(&fscache_n_op_pend);
53139+ fscache_stat_unchecked(&fscache_n_op_pend);
53140 ret = 0;
53141 } else {
53142 /* If we're in any other state, there must have been an I/O
53143@@ -215,11 +215,11 @@ int fscache_submit_op(struct fscache_object *object,
53144 if (object->n_exclusive > 0) {
53145 atomic_inc(&op->usage);
53146 list_add_tail(&op->pend_link, &object->pending_ops);
53147- fscache_stat(&fscache_n_op_pend);
53148+ fscache_stat_unchecked(&fscache_n_op_pend);
53149 } else if (!list_empty(&object->pending_ops)) {
53150 atomic_inc(&op->usage);
53151 list_add_tail(&op->pend_link, &object->pending_ops);
53152- fscache_stat(&fscache_n_op_pend);
53153+ fscache_stat_unchecked(&fscache_n_op_pend);
53154 fscache_start_operations(object);
53155 } else {
53156 ASSERTCMP(object->n_exclusive, ==, 0);
53157@@ -231,12 +231,12 @@ int fscache_submit_op(struct fscache_object *object,
53158 object->n_ops++;
53159 atomic_inc(&op->usage);
53160 list_add_tail(&op->pend_link, &object->pending_ops);
53161- fscache_stat(&fscache_n_op_pend);
53162+ fscache_stat_unchecked(&fscache_n_op_pend);
53163 ret = 0;
53164 } else if (object->state == FSCACHE_OBJECT_DYING ||
53165 object->state == FSCACHE_OBJECT_LC_DYING ||
53166 object->state == FSCACHE_OBJECT_WITHDRAWING) {
53167- fscache_stat(&fscache_n_op_rejected);
53168+ fscache_stat_unchecked(&fscache_n_op_rejected);
53169 op->state = FSCACHE_OP_ST_CANCELLED;
53170 ret = -ENOBUFS;
53171 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
53172@@ -315,7 +315,7 @@ int fscache_cancel_op(struct fscache_operation *op,
53173 ret = -EBUSY;
53174 if (op->state == FSCACHE_OP_ST_PENDING) {
53175 ASSERT(!list_empty(&op->pend_link));
53176- fscache_stat(&fscache_n_op_cancelled);
53177+ fscache_stat_unchecked(&fscache_n_op_cancelled);
53178 list_del_init(&op->pend_link);
53179 if (do_cancel)
53180 do_cancel(op);
53181@@ -347,7 +347,7 @@ void fscache_cancel_all_ops(struct fscache_object *object)
53182 while (!list_empty(&object->pending_ops)) {
53183 op = list_entry(object->pending_ops.next,
53184 struct fscache_operation, pend_link);
53185- fscache_stat(&fscache_n_op_cancelled);
53186+ fscache_stat_unchecked(&fscache_n_op_cancelled);
53187 list_del_init(&op->pend_link);
53188
53189 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING);
53190@@ -419,7 +419,7 @@ void fscache_put_operation(struct fscache_operation *op)
53191 op->state, ==, FSCACHE_OP_ST_CANCELLED);
53192 op->state = FSCACHE_OP_ST_DEAD;
53193
53194- fscache_stat(&fscache_n_op_release);
53195+ fscache_stat_unchecked(&fscache_n_op_release);
53196
53197 if (op->release) {
53198 op->release(op);
53199@@ -442,7 +442,7 @@ void fscache_put_operation(struct fscache_operation *op)
53200 * lock, and defer it otherwise */
53201 if (!spin_trylock(&object->lock)) {
53202 _debug("defer put");
53203- fscache_stat(&fscache_n_op_deferred_release);
53204+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
53205
53206 cache = object->cache;
53207 spin_lock(&cache->op_gc_list_lock);
53208@@ -495,7 +495,7 @@ void fscache_operation_gc(struct work_struct *work)
53209
53210 _debug("GC DEFERRED REL OBJ%x OP%x",
53211 object->debug_id, op->debug_id);
53212- fscache_stat(&fscache_n_op_gc);
53213+ fscache_stat_unchecked(&fscache_n_op_gc);
53214
53215 ASSERTCMP(atomic_read(&op->usage), ==, 0);
53216 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_DEAD);
53217diff --git a/fs/fscache/page.c b/fs/fscache/page.c
53218index ff000e5..c44ec6d 100644
53219--- a/fs/fscache/page.c
53220+++ b/fs/fscache/page.c
53221@@ -61,7 +61,7 @@ try_again:
53222 val = radix_tree_lookup(&cookie->stores, page->index);
53223 if (!val) {
53224 rcu_read_unlock();
53225- fscache_stat(&fscache_n_store_vmscan_not_storing);
53226+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
53227 __fscache_uncache_page(cookie, page);
53228 return true;
53229 }
53230@@ -91,11 +91,11 @@ try_again:
53231 spin_unlock(&cookie->stores_lock);
53232
53233 if (xpage) {
53234- fscache_stat(&fscache_n_store_vmscan_cancelled);
53235- fscache_stat(&fscache_n_store_radix_deletes);
53236+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
53237+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
53238 ASSERTCMP(xpage, ==, page);
53239 } else {
53240- fscache_stat(&fscache_n_store_vmscan_gone);
53241+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
53242 }
53243
53244 wake_up_bit(&cookie->flags, 0);
53245@@ -110,11 +110,11 @@ page_busy:
53246 * sleeping on memory allocation, so we may need to impose a timeout
53247 * too. */
53248 if (!(gfp & __GFP_WAIT)) {
53249- fscache_stat(&fscache_n_store_vmscan_busy);
53250+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
53251 return false;
53252 }
53253
53254- fscache_stat(&fscache_n_store_vmscan_wait);
53255+ fscache_stat_unchecked(&fscache_n_store_vmscan_wait);
53256 __fscache_wait_on_page_write(cookie, page);
53257 gfp &= ~__GFP_WAIT;
53258 goto try_again;
53259@@ -140,7 +140,7 @@ static void fscache_end_page_write(struct fscache_object *object,
53260 FSCACHE_COOKIE_STORING_TAG);
53261 if (!radix_tree_tag_get(&cookie->stores, page->index,
53262 FSCACHE_COOKIE_PENDING_TAG)) {
53263- fscache_stat(&fscache_n_store_radix_deletes);
53264+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
53265 xpage = radix_tree_delete(&cookie->stores, page->index);
53266 }
53267 spin_unlock(&cookie->stores_lock);
53268@@ -161,7 +161,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
53269
53270 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
53271
53272- fscache_stat(&fscache_n_attr_changed_calls);
53273+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
53274
53275 if (fscache_object_is_active(object)) {
53276 fscache_stat(&fscache_n_cop_attr_changed);
53277@@ -187,11 +187,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
53278
53279 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
53280
53281- fscache_stat(&fscache_n_attr_changed);
53282+ fscache_stat_unchecked(&fscache_n_attr_changed);
53283
53284 op = kzalloc(sizeof(*op), GFP_KERNEL);
53285 if (!op) {
53286- fscache_stat(&fscache_n_attr_changed_nomem);
53287+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
53288 _leave(" = -ENOMEM");
53289 return -ENOMEM;
53290 }
53291@@ -209,7 +209,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
53292 if (fscache_submit_exclusive_op(object, op) < 0)
53293 goto nobufs;
53294 spin_unlock(&cookie->lock);
53295- fscache_stat(&fscache_n_attr_changed_ok);
53296+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
53297 fscache_put_operation(op);
53298 _leave(" = 0");
53299 return 0;
53300@@ -217,7 +217,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
53301 nobufs:
53302 spin_unlock(&cookie->lock);
53303 kfree(op);
53304- fscache_stat(&fscache_n_attr_changed_nobufs);
53305+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
53306 _leave(" = %d", -ENOBUFS);
53307 return -ENOBUFS;
53308 }
53309@@ -255,7 +255,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
53310 /* allocate a retrieval operation and attempt to submit it */
53311 op = kzalloc(sizeof(*op), GFP_NOIO);
53312 if (!op) {
53313- fscache_stat(&fscache_n_retrievals_nomem);
53314+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
53315 return NULL;
53316 }
53317
53318@@ -283,13 +283,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
53319 return 0;
53320 }
53321
53322- fscache_stat(&fscache_n_retrievals_wait);
53323+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
53324
53325 jif = jiffies;
53326 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
53327 fscache_wait_bit_interruptible,
53328 TASK_INTERRUPTIBLE) != 0) {
53329- fscache_stat(&fscache_n_retrievals_intr);
53330+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
53331 _leave(" = -ERESTARTSYS");
53332 return -ERESTARTSYS;
53333 }
53334@@ -318,8 +318,8 @@ static void fscache_do_cancel_retrieval(struct fscache_operation *_op)
53335 */
53336 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
53337 struct fscache_retrieval *op,
53338- atomic_t *stat_op_waits,
53339- atomic_t *stat_object_dead)
53340+ atomic_unchecked_t *stat_op_waits,
53341+ atomic_unchecked_t *stat_object_dead)
53342 {
53343 int ret;
53344
53345@@ -327,7 +327,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
53346 goto check_if_dead;
53347
53348 _debug(">>> WT");
53349- fscache_stat(stat_op_waits);
53350+ fscache_stat_unchecked(stat_op_waits);
53351 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
53352 fscache_wait_bit_interruptible,
53353 TASK_INTERRUPTIBLE) != 0) {
53354@@ -344,14 +344,14 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
53355
53356 check_if_dead:
53357 if (op->op.state == FSCACHE_OP_ST_CANCELLED) {
53358- fscache_stat(stat_object_dead);
53359+ fscache_stat_unchecked(stat_object_dead);
53360 _leave(" = -ENOBUFS [cancelled]");
53361 return -ENOBUFS;
53362 }
53363 if (unlikely(fscache_object_is_dead(object))) {
53364 pr_err("%s() = -ENOBUFS [obj dead %d]\n", __func__, op->op.state);
53365 fscache_cancel_op(&op->op, fscache_do_cancel_retrieval);
53366- fscache_stat(stat_object_dead);
53367+ fscache_stat_unchecked(stat_object_dead);
53368 return -ENOBUFS;
53369 }
53370 return 0;
53371@@ -378,7 +378,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
53372
53373 _enter("%p,%p,,,", cookie, page);
53374
53375- fscache_stat(&fscache_n_retrievals);
53376+ fscache_stat_unchecked(&fscache_n_retrievals);
53377
53378 if (hlist_empty(&cookie->backing_objects))
53379 goto nobufs;
53380@@ -417,7 +417,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
53381 goto nobufs_unlock_dec;
53382 spin_unlock(&cookie->lock);
53383
53384- fscache_stat(&fscache_n_retrieval_ops);
53385+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
53386
53387 /* pin the netfs read context in case we need to do the actual netfs
53388 * read because we've encountered a cache read failure */
53389@@ -447,15 +447,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
53390
53391 error:
53392 if (ret == -ENOMEM)
53393- fscache_stat(&fscache_n_retrievals_nomem);
53394+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
53395 else if (ret == -ERESTARTSYS)
53396- fscache_stat(&fscache_n_retrievals_intr);
53397+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
53398 else if (ret == -ENODATA)
53399- fscache_stat(&fscache_n_retrievals_nodata);
53400+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
53401 else if (ret < 0)
53402- fscache_stat(&fscache_n_retrievals_nobufs);
53403+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
53404 else
53405- fscache_stat(&fscache_n_retrievals_ok);
53406+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
53407
53408 fscache_put_retrieval(op);
53409 _leave(" = %d", ret);
53410@@ -467,7 +467,7 @@ nobufs_unlock:
53411 spin_unlock(&cookie->lock);
53412 kfree(op);
53413 nobufs:
53414- fscache_stat(&fscache_n_retrievals_nobufs);
53415+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
53416 _leave(" = -ENOBUFS");
53417 return -ENOBUFS;
53418 }
53419@@ -505,7 +505,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
53420
53421 _enter("%p,,%d,,,", cookie, *nr_pages);
53422
53423- fscache_stat(&fscache_n_retrievals);
53424+ fscache_stat_unchecked(&fscache_n_retrievals);
53425
53426 if (hlist_empty(&cookie->backing_objects))
53427 goto nobufs;
53428@@ -541,7 +541,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
53429 goto nobufs_unlock_dec;
53430 spin_unlock(&cookie->lock);
53431
53432- fscache_stat(&fscache_n_retrieval_ops);
53433+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
53434
53435 /* pin the netfs read context in case we need to do the actual netfs
53436 * read because we've encountered a cache read failure */
53437@@ -571,15 +571,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
53438
53439 error:
53440 if (ret == -ENOMEM)
53441- fscache_stat(&fscache_n_retrievals_nomem);
53442+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
53443 else if (ret == -ERESTARTSYS)
53444- fscache_stat(&fscache_n_retrievals_intr);
53445+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
53446 else if (ret == -ENODATA)
53447- fscache_stat(&fscache_n_retrievals_nodata);
53448+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
53449 else if (ret < 0)
53450- fscache_stat(&fscache_n_retrievals_nobufs);
53451+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
53452 else
53453- fscache_stat(&fscache_n_retrievals_ok);
53454+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
53455
53456 fscache_put_retrieval(op);
53457 _leave(" = %d", ret);
53458@@ -591,7 +591,7 @@ nobufs_unlock:
53459 spin_unlock(&cookie->lock);
53460 kfree(op);
53461 nobufs:
53462- fscache_stat(&fscache_n_retrievals_nobufs);
53463+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
53464 _leave(" = -ENOBUFS");
53465 return -ENOBUFS;
53466 }
53467@@ -615,7 +615,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
53468
53469 _enter("%p,%p,,,", cookie, page);
53470
53471- fscache_stat(&fscache_n_allocs);
53472+ fscache_stat_unchecked(&fscache_n_allocs);
53473
53474 if (hlist_empty(&cookie->backing_objects))
53475 goto nobufs;
53476@@ -647,7 +647,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
53477 goto nobufs_unlock;
53478 spin_unlock(&cookie->lock);
53479
53480- fscache_stat(&fscache_n_alloc_ops);
53481+ fscache_stat_unchecked(&fscache_n_alloc_ops);
53482
53483 ret = fscache_wait_for_retrieval_activation(
53484 object, op,
53485@@ -663,11 +663,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
53486
53487 error:
53488 if (ret == -ERESTARTSYS)
53489- fscache_stat(&fscache_n_allocs_intr);
53490+ fscache_stat_unchecked(&fscache_n_allocs_intr);
53491 else if (ret < 0)
53492- fscache_stat(&fscache_n_allocs_nobufs);
53493+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
53494 else
53495- fscache_stat(&fscache_n_allocs_ok);
53496+ fscache_stat_unchecked(&fscache_n_allocs_ok);
53497
53498 fscache_put_retrieval(op);
53499 _leave(" = %d", ret);
53500@@ -677,7 +677,7 @@ nobufs_unlock:
53501 spin_unlock(&cookie->lock);
53502 kfree(op);
53503 nobufs:
53504- fscache_stat(&fscache_n_allocs_nobufs);
53505+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
53506 _leave(" = -ENOBUFS");
53507 return -ENOBUFS;
53508 }
53509@@ -736,7 +736,7 @@ static void fscache_write_op(struct fscache_operation *_op)
53510
53511 spin_lock(&cookie->stores_lock);
53512
53513- fscache_stat(&fscache_n_store_calls);
53514+ fscache_stat_unchecked(&fscache_n_store_calls);
53515
53516 /* find a page to store */
53517 page = NULL;
53518@@ -747,7 +747,7 @@ static void fscache_write_op(struct fscache_operation *_op)
53519 page = results[0];
53520 _debug("gang %d [%lx]", n, page->index);
53521 if (page->index > op->store_limit) {
53522- fscache_stat(&fscache_n_store_pages_over_limit);
53523+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
53524 goto superseded;
53525 }
53526
53527@@ -759,7 +759,7 @@ static void fscache_write_op(struct fscache_operation *_op)
53528 spin_unlock(&cookie->stores_lock);
53529 spin_unlock(&object->lock);
53530
53531- fscache_stat(&fscache_n_store_pages);
53532+ fscache_stat_unchecked(&fscache_n_store_pages);
53533 fscache_stat(&fscache_n_cop_write_page);
53534 ret = object->cache->ops->write_page(op, page);
53535 fscache_stat_d(&fscache_n_cop_write_page);
53536@@ -860,7 +860,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
53537 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
53538 ASSERT(PageFsCache(page));
53539
53540- fscache_stat(&fscache_n_stores);
53541+ fscache_stat_unchecked(&fscache_n_stores);
53542
53543 if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
53544 _leave(" = -ENOBUFS [invalidating]");
53545@@ -916,7 +916,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
53546 spin_unlock(&cookie->stores_lock);
53547 spin_unlock(&object->lock);
53548
53549- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
53550+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
53551 op->store_limit = object->store_limit;
53552
53553 if (fscache_submit_op(object, &op->op) < 0)
53554@@ -924,8 +924,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
53555
53556 spin_unlock(&cookie->lock);
53557 radix_tree_preload_end();
53558- fscache_stat(&fscache_n_store_ops);
53559- fscache_stat(&fscache_n_stores_ok);
53560+ fscache_stat_unchecked(&fscache_n_store_ops);
53561+ fscache_stat_unchecked(&fscache_n_stores_ok);
53562
53563 /* the work queue now carries its own ref on the object */
53564 fscache_put_operation(&op->op);
53565@@ -933,14 +933,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
53566 return 0;
53567
53568 already_queued:
53569- fscache_stat(&fscache_n_stores_again);
53570+ fscache_stat_unchecked(&fscache_n_stores_again);
53571 already_pending:
53572 spin_unlock(&cookie->stores_lock);
53573 spin_unlock(&object->lock);
53574 spin_unlock(&cookie->lock);
53575 radix_tree_preload_end();
53576 kfree(op);
53577- fscache_stat(&fscache_n_stores_ok);
53578+ fscache_stat_unchecked(&fscache_n_stores_ok);
53579 _leave(" = 0");
53580 return 0;
53581
53582@@ -959,14 +959,14 @@ nobufs:
53583 spin_unlock(&cookie->lock);
53584 radix_tree_preload_end();
53585 kfree(op);
53586- fscache_stat(&fscache_n_stores_nobufs);
53587+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
53588 _leave(" = -ENOBUFS");
53589 return -ENOBUFS;
53590
53591 nomem_free:
53592 kfree(op);
53593 nomem:
53594- fscache_stat(&fscache_n_stores_oom);
53595+ fscache_stat_unchecked(&fscache_n_stores_oom);
53596 _leave(" = -ENOMEM");
53597 return -ENOMEM;
53598 }
53599@@ -984,7 +984,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
53600 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
53601 ASSERTCMP(page, !=, NULL);
53602
53603- fscache_stat(&fscache_n_uncaches);
53604+ fscache_stat_unchecked(&fscache_n_uncaches);
53605
53606 /* cache withdrawal may beat us to it */
53607 if (!PageFsCache(page))
53608@@ -1035,7 +1035,7 @@ void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page)
53609 struct fscache_cookie *cookie = op->op.object->cookie;
53610
53611 #ifdef CONFIG_FSCACHE_STATS
53612- atomic_inc(&fscache_n_marks);
53613+ atomic_inc_unchecked(&fscache_n_marks);
53614 #endif
53615
53616 _debug("- mark %p{%lx}", page, page->index);
53617diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
53618index 8179e8b..5072cc7 100644
53619--- a/fs/fscache/stats.c
53620+++ b/fs/fscache/stats.c
53621@@ -18,99 +18,99 @@
53622 /*
53623 * operation counters
53624 */
53625-atomic_t fscache_n_op_pend;
53626-atomic_t fscache_n_op_run;
53627-atomic_t fscache_n_op_enqueue;
53628-atomic_t fscache_n_op_requeue;
53629-atomic_t fscache_n_op_deferred_release;
53630-atomic_t fscache_n_op_release;
53631-atomic_t fscache_n_op_gc;
53632-atomic_t fscache_n_op_cancelled;
53633-atomic_t fscache_n_op_rejected;
53634+atomic_unchecked_t fscache_n_op_pend;
53635+atomic_unchecked_t fscache_n_op_run;
53636+atomic_unchecked_t fscache_n_op_enqueue;
53637+atomic_unchecked_t fscache_n_op_requeue;
53638+atomic_unchecked_t fscache_n_op_deferred_release;
53639+atomic_unchecked_t fscache_n_op_release;
53640+atomic_unchecked_t fscache_n_op_gc;
53641+atomic_unchecked_t fscache_n_op_cancelled;
53642+atomic_unchecked_t fscache_n_op_rejected;
53643
53644-atomic_t fscache_n_attr_changed;
53645-atomic_t fscache_n_attr_changed_ok;
53646-atomic_t fscache_n_attr_changed_nobufs;
53647-atomic_t fscache_n_attr_changed_nomem;
53648-atomic_t fscache_n_attr_changed_calls;
53649+atomic_unchecked_t fscache_n_attr_changed;
53650+atomic_unchecked_t fscache_n_attr_changed_ok;
53651+atomic_unchecked_t fscache_n_attr_changed_nobufs;
53652+atomic_unchecked_t fscache_n_attr_changed_nomem;
53653+atomic_unchecked_t fscache_n_attr_changed_calls;
53654
53655-atomic_t fscache_n_allocs;
53656-atomic_t fscache_n_allocs_ok;
53657-atomic_t fscache_n_allocs_wait;
53658-atomic_t fscache_n_allocs_nobufs;
53659-atomic_t fscache_n_allocs_intr;
53660-atomic_t fscache_n_allocs_object_dead;
53661-atomic_t fscache_n_alloc_ops;
53662-atomic_t fscache_n_alloc_op_waits;
53663+atomic_unchecked_t fscache_n_allocs;
53664+atomic_unchecked_t fscache_n_allocs_ok;
53665+atomic_unchecked_t fscache_n_allocs_wait;
53666+atomic_unchecked_t fscache_n_allocs_nobufs;
53667+atomic_unchecked_t fscache_n_allocs_intr;
53668+atomic_unchecked_t fscache_n_allocs_object_dead;
53669+atomic_unchecked_t fscache_n_alloc_ops;
53670+atomic_unchecked_t fscache_n_alloc_op_waits;
53671
53672-atomic_t fscache_n_retrievals;
53673-atomic_t fscache_n_retrievals_ok;
53674-atomic_t fscache_n_retrievals_wait;
53675-atomic_t fscache_n_retrievals_nodata;
53676-atomic_t fscache_n_retrievals_nobufs;
53677-atomic_t fscache_n_retrievals_intr;
53678-atomic_t fscache_n_retrievals_nomem;
53679-atomic_t fscache_n_retrievals_object_dead;
53680-atomic_t fscache_n_retrieval_ops;
53681-atomic_t fscache_n_retrieval_op_waits;
53682+atomic_unchecked_t fscache_n_retrievals;
53683+atomic_unchecked_t fscache_n_retrievals_ok;
53684+atomic_unchecked_t fscache_n_retrievals_wait;
53685+atomic_unchecked_t fscache_n_retrievals_nodata;
53686+atomic_unchecked_t fscache_n_retrievals_nobufs;
53687+atomic_unchecked_t fscache_n_retrievals_intr;
53688+atomic_unchecked_t fscache_n_retrievals_nomem;
53689+atomic_unchecked_t fscache_n_retrievals_object_dead;
53690+atomic_unchecked_t fscache_n_retrieval_ops;
53691+atomic_unchecked_t fscache_n_retrieval_op_waits;
53692
53693-atomic_t fscache_n_stores;
53694-atomic_t fscache_n_stores_ok;
53695-atomic_t fscache_n_stores_again;
53696-atomic_t fscache_n_stores_nobufs;
53697-atomic_t fscache_n_stores_oom;
53698-atomic_t fscache_n_store_ops;
53699-atomic_t fscache_n_store_calls;
53700-atomic_t fscache_n_store_pages;
53701-atomic_t fscache_n_store_radix_deletes;
53702-atomic_t fscache_n_store_pages_over_limit;
53703+atomic_unchecked_t fscache_n_stores;
53704+atomic_unchecked_t fscache_n_stores_ok;
53705+atomic_unchecked_t fscache_n_stores_again;
53706+atomic_unchecked_t fscache_n_stores_nobufs;
53707+atomic_unchecked_t fscache_n_stores_oom;
53708+atomic_unchecked_t fscache_n_store_ops;
53709+atomic_unchecked_t fscache_n_store_calls;
53710+atomic_unchecked_t fscache_n_store_pages;
53711+atomic_unchecked_t fscache_n_store_radix_deletes;
53712+atomic_unchecked_t fscache_n_store_pages_over_limit;
53713
53714-atomic_t fscache_n_store_vmscan_not_storing;
53715-atomic_t fscache_n_store_vmscan_gone;
53716-atomic_t fscache_n_store_vmscan_busy;
53717-atomic_t fscache_n_store_vmscan_cancelled;
53718-atomic_t fscache_n_store_vmscan_wait;
53719+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
53720+atomic_unchecked_t fscache_n_store_vmscan_gone;
53721+atomic_unchecked_t fscache_n_store_vmscan_busy;
53722+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
53723+atomic_unchecked_t fscache_n_store_vmscan_wait;
53724
53725-atomic_t fscache_n_marks;
53726-atomic_t fscache_n_uncaches;
53727+atomic_unchecked_t fscache_n_marks;
53728+atomic_unchecked_t fscache_n_uncaches;
53729
53730-atomic_t fscache_n_acquires;
53731-atomic_t fscache_n_acquires_null;
53732-atomic_t fscache_n_acquires_no_cache;
53733-atomic_t fscache_n_acquires_ok;
53734-atomic_t fscache_n_acquires_nobufs;
53735-atomic_t fscache_n_acquires_oom;
53736+atomic_unchecked_t fscache_n_acquires;
53737+atomic_unchecked_t fscache_n_acquires_null;
53738+atomic_unchecked_t fscache_n_acquires_no_cache;
53739+atomic_unchecked_t fscache_n_acquires_ok;
53740+atomic_unchecked_t fscache_n_acquires_nobufs;
53741+atomic_unchecked_t fscache_n_acquires_oom;
53742
53743-atomic_t fscache_n_invalidates;
53744-atomic_t fscache_n_invalidates_run;
53745+atomic_unchecked_t fscache_n_invalidates;
53746+atomic_unchecked_t fscache_n_invalidates_run;
53747
53748-atomic_t fscache_n_updates;
53749-atomic_t fscache_n_updates_null;
53750-atomic_t fscache_n_updates_run;
53751+atomic_unchecked_t fscache_n_updates;
53752+atomic_unchecked_t fscache_n_updates_null;
53753+atomic_unchecked_t fscache_n_updates_run;
53754
53755-atomic_t fscache_n_relinquishes;
53756-atomic_t fscache_n_relinquishes_null;
53757-atomic_t fscache_n_relinquishes_waitcrt;
53758-atomic_t fscache_n_relinquishes_retire;
53759+atomic_unchecked_t fscache_n_relinquishes;
53760+atomic_unchecked_t fscache_n_relinquishes_null;
53761+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
53762+atomic_unchecked_t fscache_n_relinquishes_retire;
53763
53764-atomic_t fscache_n_cookie_index;
53765-atomic_t fscache_n_cookie_data;
53766-atomic_t fscache_n_cookie_special;
53767+atomic_unchecked_t fscache_n_cookie_index;
53768+atomic_unchecked_t fscache_n_cookie_data;
53769+atomic_unchecked_t fscache_n_cookie_special;
53770
53771-atomic_t fscache_n_object_alloc;
53772-atomic_t fscache_n_object_no_alloc;
53773-atomic_t fscache_n_object_lookups;
53774-atomic_t fscache_n_object_lookups_negative;
53775-atomic_t fscache_n_object_lookups_positive;
53776-atomic_t fscache_n_object_lookups_timed_out;
53777-atomic_t fscache_n_object_created;
53778-atomic_t fscache_n_object_avail;
53779-atomic_t fscache_n_object_dead;
53780+atomic_unchecked_t fscache_n_object_alloc;
53781+atomic_unchecked_t fscache_n_object_no_alloc;
53782+atomic_unchecked_t fscache_n_object_lookups;
53783+atomic_unchecked_t fscache_n_object_lookups_negative;
53784+atomic_unchecked_t fscache_n_object_lookups_positive;
53785+atomic_unchecked_t fscache_n_object_lookups_timed_out;
53786+atomic_unchecked_t fscache_n_object_created;
53787+atomic_unchecked_t fscache_n_object_avail;
53788+atomic_unchecked_t fscache_n_object_dead;
53789
53790-atomic_t fscache_n_checkaux_none;
53791-atomic_t fscache_n_checkaux_okay;
53792-atomic_t fscache_n_checkaux_update;
53793-atomic_t fscache_n_checkaux_obsolete;
53794+atomic_unchecked_t fscache_n_checkaux_none;
53795+atomic_unchecked_t fscache_n_checkaux_okay;
53796+atomic_unchecked_t fscache_n_checkaux_update;
53797+atomic_unchecked_t fscache_n_checkaux_obsolete;
53798
53799 atomic_t fscache_n_cop_alloc_object;
53800 atomic_t fscache_n_cop_lookup_object;
53801@@ -138,118 +138,118 @@ static int fscache_stats_show(struct seq_file *m, void *v)
53802 seq_puts(m, "FS-Cache statistics\n");
53803
53804 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
53805- atomic_read(&fscache_n_cookie_index),
53806- atomic_read(&fscache_n_cookie_data),
53807- atomic_read(&fscache_n_cookie_special));
53808+ atomic_read_unchecked(&fscache_n_cookie_index),
53809+ atomic_read_unchecked(&fscache_n_cookie_data),
53810+ atomic_read_unchecked(&fscache_n_cookie_special));
53811
53812 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
53813- atomic_read(&fscache_n_object_alloc),
53814- atomic_read(&fscache_n_object_no_alloc),
53815- atomic_read(&fscache_n_object_avail),
53816- atomic_read(&fscache_n_object_dead));
53817+ atomic_read_unchecked(&fscache_n_object_alloc),
53818+ atomic_read_unchecked(&fscache_n_object_no_alloc),
53819+ atomic_read_unchecked(&fscache_n_object_avail),
53820+ atomic_read_unchecked(&fscache_n_object_dead));
53821 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
53822- atomic_read(&fscache_n_checkaux_none),
53823- atomic_read(&fscache_n_checkaux_okay),
53824- atomic_read(&fscache_n_checkaux_update),
53825- atomic_read(&fscache_n_checkaux_obsolete));
53826+ atomic_read_unchecked(&fscache_n_checkaux_none),
53827+ atomic_read_unchecked(&fscache_n_checkaux_okay),
53828+ atomic_read_unchecked(&fscache_n_checkaux_update),
53829+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
53830
53831 seq_printf(m, "Pages : mrk=%u unc=%u\n",
53832- atomic_read(&fscache_n_marks),
53833- atomic_read(&fscache_n_uncaches));
53834+ atomic_read_unchecked(&fscache_n_marks),
53835+ atomic_read_unchecked(&fscache_n_uncaches));
53836
53837 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
53838 " oom=%u\n",
53839- atomic_read(&fscache_n_acquires),
53840- atomic_read(&fscache_n_acquires_null),
53841- atomic_read(&fscache_n_acquires_no_cache),
53842- atomic_read(&fscache_n_acquires_ok),
53843- atomic_read(&fscache_n_acquires_nobufs),
53844- atomic_read(&fscache_n_acquires_oom));
53845+ atomic_read_unchecked(&fscache_n_acquires),
53846+ atomic_read_unchecked(&fscache_n_acquires_null),
53847+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
53848+ atomic_read_unchecked(&fscache_n_acquires_ok),
53849+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
53850+ atomic_read_unchecked(&fscache_n_acquires_oom));
53851
53852 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
53853- atomic_read(&fscache_n_object_lookups),
53854- atomic_read(&fscache_n_object_lookups_negative),
53855- atomic_read(&fscache_n_object_lookups_positive),
53856- atomic_read(&fscache_n_object_created),
53857- atomic_read(&fscache_n_object_lookups_timed_out));
53858+ atomic_read_unchecked(&fscache_n_object_lookups),
53859+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
53860+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
53861+ atomic_read_unchecked(&fscache_n_object_created),
53862+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
53863
53864 seq_printf(m, "Invals : n=%u run=%u\n",
53865- atomic_read(&fscache_n_invalidates),
53866- atomic_read(&fscache_n_invalidates_run));
53867+ atomic_read_unchecked(&fscache_n_invalidates),
53868+ atomic_read_unchecked(&fscache_n_invalidates_run));
53869
53870 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
53871- atomic_read(&fscache_n_updates),
53872- atomic_read(&fscache_n_updates_null),
53873- atomic_read(&fscache_n_updates_run));
53874+ atomic_read_unchecked(&fscache_n_updates),
53875+ atomic_read_unchecked(&fscache_n_updates_null),
53876+ atomic_read_unchecked(&fscache_n_updates_run));
53877
53878 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
53879- atomic_read(&fscache_n_relinquishes),
53880- atomic_read(&fscache_n_relinquishes_null),
53881- atomic_read(&fscache_n_relinquishes_waitcrt),
53882- atomic_read(&fscache_n_relinquishes_retire));
53883+ atomic_read_unchecked(&fscache_n_relinquishes),
53884+ atomic_read_unchecked(&fscache_n_relinquishes_null),
53885+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
53886+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
53887
53888 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
53889- atomic_read(&fscache_n_attr_changed),
53890- atomic_read(&fscache_n_attr_changed_ok),
53891- atomic_read(&fscache_n_attr_changed_nobufs),
53892- atomic_read(&fscache_n_attr_changed_nomem),
53893- atomic_read(&fscache_n_attr_changed_calls));
53894+ atomic_read_unchecked(&fscache_n_attr_changed),
53895+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
53896+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
53897+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
53898+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
53899
53900 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
53901- atomic_read(&fscache_n_allocs),
53902- atomic_read(&fscache_n_allocs_ok),
53903- atomic_read(&fscache_n_allocs_wait),
53904- atomic_read(&fscache_n_allocs_nobufs),
53905- atomic_read(&fscache_n_allocs_intr));
53906+ atomic_read_unchecked(&fscache_n_allocs),
53907+ atomic_read_unchecked(&fscache_n_allocs_ok),
53908+ atomic_read_unchecked(&fscache_n_allocs_wait),
53909+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
53910+ atomic_read_unchecked(&fscache_n_allocs_intr));
53911 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
53912- atomic_read(&fscache_n_alloc_ops),
53913- atomic_read(&fscache_n_alloc_op_waits),
53914- atomic_read(&fscache_n_allocs_object_dead));
53915+ atomic_read_unchecked(&fscache_n_alloc_ops),
53916+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
53917+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
53918
53919 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
53920 " int=%u oom=%u\n",
53921- atomic_read(&fscache_n_retrievals),
53922- atomic_read(&fscache_n_retrievals_ok),
53923- atomic_read(&fscache_n_retrievals_wait),
53924- atomic_read(&fscache_n_retrievals_nodata),
53925- atomic_read(&fscache_n_retrievals_nobufs),
53926- atomic_read(&fscache_n_retrievals_intr),
53927- atomic_read(&fscache_n_retrievals_nomem));
53928+ atomic_read_unchecked(&fscache_n_retrievals),
53929+ atomic_read_unchecked(&fscache_n_retrievals_ok),
53930+ atomic_read_unchecked(&fscache_n_retrievals_wait),
53931+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
53932+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
53933+ atomic_read_unchecked(&fscache_n_retrievals_intr),
53934+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
53935 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
53936- atomic_read(&fscache_n_retrieval_ops),
53937- atomic_read(&fscache_n_retrieval_op_waits),
53938- atomic_read(&fscache_n_retrievals_object_dead));
53939+ atomic_read_unchecked(&fscache_n_retrieval_ops),
53940+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
53941+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
53942
53943 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
53944- atomic_read(&fscache_n_stores),
53945- atomic_read(&fscache_n_stores_ok),
53946- atomic_read(&fscache_n_stores_again),
53947- atomic_read(&fscache_n_stores_nobufs),
53948- atomic_read(&fscache_n_stores_oom));
53949+ atomic_read_unchecked(&fscache_n_stores),
53950+ atomic_read_unchecked(&fscache_n_stores_ok),
53951+ atomic_read_unchecked(&fscache_n_stores_again),
53952+ atomic_read_unchecked(&fscache_n_stores_nobufs),
53953+ atomic_read_unchecked(&fscache_n_stores_oom));
53954 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
53955- atomic_read(&fscache_n_store_ops),
53956- atomic_read(&fscache_n_store_calls),
53957- atomic_read(&fscache_n_store_pages),
53958- atomic_read(&fscache_n_store_radix_deletes),
53959- atomic_read(&fscache_n_store_pages_over_limit));
53960+ atomic_read_unchecked(&fscache_n_store_ops),
53961+ atomic_read_unchecked(&fscache_n_store_calls),
53962+ atomic_read_unchecked(&fscache_n_store_pages),
53963+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
53964+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
53965
53966 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u wt=%u\n",
53967- atomic_read(&fscache_n_store_vmscan_not_storing),
53968- atomic_read(&fscache_n_store_vmscan_gone),
53969- atomic_read(&fscache_n_store_vmscan_busy),
53970- atomic_read(&fscache_n_store_vmscan_cancelled),
53971- atomic_read(&fscache_n_store_vmscan_wait));
53972+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
53973+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
53974+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
53975+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled),
53976+ atomic_read_unchecked(&fscache_n_store_vmscan_wait));
53977
53978 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
53979- atomic_read(&fscache_n_op_pend),
53980- atomic_read(&fscache_n_op_run),
53981- atomic_read(&fscache_n_op_enqueue),
53982- atomic_read(&fscache_n_op_cancelled),
53983- atomic_read(&fscache_n_op_rejected));
53984+ atomic_read_unchecked(&fscache_n_op_pend),
53985+ atomic_read_unchecked(&fscache_n_op_run),
53986+ atomic_read_unchecked(&fscache_n_op_enqueue),
53987+ atomic_read_unchecked(&fscache_n_op_cancelled),
53988+ atomic_read_unchecked(&fscache_n_op_rejected));
53989 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
53990- atomic_read(&fscache_n_op_deferred_release),
53991- atomic_read(&fscache_n_op_release),
53992- atomic_read(&fscache_n_op_gc));
53993+ atomic_read_unchecked(&fscache_n_op_deferred_release),
53994+ atomic_read_unchecked(&fscache_n_op_release),
53995+ atomic_read_unchecked(&fscache_n_op_gc));
53996
53997 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
53998 atomic_read(&fscache_n_cop_alloc_object),
53999diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
54000index e397b67..b0d8709 100644
54001--- a/fs/fuse/cuse.c
54002+++ b/fs/fuse/cuse.c
54003@@ -593,10 +593,12 @@ static int __init cuse_init(void)
54004 INIT_LIST_HEAD(&cuse_conntbl[i]);
54005
54006 /* inherit and extend fuse_dev_operations */
54007- cuse_channel_fops = fuse_dev_operations;
54008- cuse_channel_fops.owner = THIS_MODULE;
54009- cuse_channel_fops.open = cuse_channel_open;
54010- cuse_channel_fops.release = cuse_channel_release;
54011+ pax_open_kernel();
54012+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
54013+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
54014+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
54015+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
54016+ pax_close_kernel();
54017
54018 cuse_class = class_create(THIS_MODULE, "cuse");
54019 if (IS_ERR(cuse_class))
54020diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
54021index e83351a..41e3c9c 100644
54022--- a/fs/fuse/dev.c
54023+++ b/fs/fuse/dev.c
54024@@ -1236,7 +1236,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
54025 ret = 0;
54026 pipe_lock(pipe);
54027
54028- if (!pipe->readers) {
54029+ if (!atomic_read(&pipe->readers)) {
54030 send_sig(SIGPIPE, current, 0);
54031 if (!ret)
54032 ret = -EPIPE;
54033diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
54034index 315e1f8..91f890c 100644
54035--- a/fs/fuse/dir.c
54036+++ b/fs/fuse/dir.c
54037@@ -1233,7 +1233,7 @@ static char *read_link(struct dentry *dentry)
54038 return link;
54039 }
54040
54041-static void free_link(char *link)
54042+static void free_link(const char *link)
54043 {
54044 if (!IS_ERR(link))
54045 free_page((unsigned long) link);
54046diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
54047index 2b6f569..fcb4d1f 100644
54048--- a/fs/gfs2/inode.c
54049+++ b/fs/gfs2/inode.c
54050@@ -1499,7 +1499,7 @@ out:
54051
54052 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
54053 {
54054- char *s = nd_get_link(nd);
54055+ const char *s = nd_get_link(nd);
54056 if (!IS_ERR(s))
54057 kfree(s);
54058 }
54059diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
54060index ccee8cc..144b5d7 100644
54061--- a/fs/hugetlbfs/inode.c
54062+++ b/fs/hugetlbfs/inode.c
54063@@ -152,6 +152,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
54064 struct mm_struct *mm = current->mm;
54065 struct vm_area_struct *vma;
54066 struct hstate *h = hstate_file(file);
54067+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
54068 struct vm_unmapped_area_info info;
54069
54070 if (len & ~huge_page_mask(h))
54071@@ -165,17 +166,26 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
54072 return addr;
54073 }
54074
54075+#ifdef CONFIG_PAX_RANDMMAP
54076+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
54077+#endif
54078+
54079 if (addr) {
54080 addr = ALIGN(addr, huge_page_size(h));
54081 vma = find_vma(mm, addr);
54082- if (TASK_SIZE - len >= addr &&
54083- (!vma || addr + len <= vma->vm_start))
54084+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
54085 return addr;
54086 }
54087
54088 info.flags = 0;
54089 info.length = len;
54090 info.low_limit = TASK_UNMAPPED_BASE;
54091+
54092+#ifdef CONFIG_PAX_RANDMMAP
54093+ if (mm->pax_flags & MF_PAX_RANDMMAP)
54094+ info.low_limit += mm->delta_mmap;
54095+#endif
54096+
54097 info.high_limit = TASK_SIZE;
54098 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
54099 info.align_offset = 0;
54100@@ -897,7 +907,7 @@ static struct file_system_type hugetlbfs_fs_type = {
54101 .kill_sb = kill_litter_super,
54102 };
54103
54104-static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
54105+struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
54106
54107 static int can_do_hugetlb_shm(void)
54108 {
54109diff --git a/fs/inode.c b/fs/inode.c
54110index b98540e..6a439ea 100644
54111--- a/fs/inode.c
54112+++ b/fs/inode.c
54113@@ -880,8 +880,8 @@ unsigned int get_next_ino(void)
54114
54115 #ifdef CONFIG_SMP
54116 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
54117- static atomic_t shared_last_ino;
54118- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
54119+ static atomic_unchecked_t shared_last_ino;
54120+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
54121
54122 res = next - LAST_INO_BATCH;
54123 }
54124diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
54125index 4a6cf28..d3a29d3 100644
54126--- a/fs/jffs2/erase.c
54127+++ b/fs/jffs2/erase.c
54128@@ -452,7 +452,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
54129 struct jffs2_unknown_node marker = {
54130 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
54131 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
54132- .totlen = cpu_to_je32(c->cleanmarker_size)
54133+ .totlen = cpu_to_je32(c->cleanmarker_size),
54134+ .hdr_crc = cpu_to_je32(0)
54135 };
54136
54137 jffs2_prealloc_raw_node_refs(c, jeb, 1);
54138diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
54139index a6597d6..41b30ec 100644
54140--- a/fs/jffs2/wbuf.c
54141+++ b/fs/jffs2/wbuf.c
54142@@ -1023,7 +1023,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
54143 {
54144 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
54145 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
54146- .totlen = constant_cpu_to_je32(8)
54147+ .totlen = constant_cpu_to_je32(8),
54148+ .hdr_crc = constant_cpu_to_je32(0)
54149 };
54150
54151 /*
54152diff --git a/fs/jfs/super.c b/fs/jfs/super.c
54153index 1a543be..a4e1363 100644
54154--- a/fs/jfs/super.c
54155+++ b/fs/jfs/super.c
54156@@ -225,7 +225,7 @@ static const match_table_t tokens = {
54157 static int parse_options(char *options, struct super_block *sb, s64 *newLVSize,
54158 int *flag)
54159 {
54160- void *nls_map = (void *)-1; /* -1: no change; NULL: none */
54161+ const void *nls_map = (const void *)-1; /* -1: no change; NULL: none */
54162 char *p;
54163 struct jfs_sb_info *sbi = JFS_SBI(sb);
54164
54165@@ -253,7 +253,7 @@ static int parse_options(char *options, struct super_block *sb, s64 *newLVSize,
54166 /* Don't do anything ;-) */
54167 break;
54168 case Opt_iocharset:
54169- if (nls_map && nls_map != (void *) -1)
54170+ if (nls_map && nls_map != (const void *) -1)
54171 unload_nls(nls_map);
54172 if (!strcmp(args[0].from, "none"))
54173 nls_map = NULL;
54174@@ -855,7 +855,7 @@ static int __init init_jfs_fs(void)
54175
54176 jfs_inode_cachep =
54177 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
54178- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
54179+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
54180 init_once);
54181 if (jfs_inode_cachep == NULL)
54182 return -ENOMEM;
54183diff --git a/fs/libfs.c b/fs/libfs.c
54184index 916da8c..1588998 100644
54185--- a/fs/libfs.c
54186+++ b/fs/libfs.c
54187@@ -165,6 +165,9 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
54188
54189 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
54190 struct dentry *next;
54191+ char d_name[sizeof(next->d_iname)];
54192+ const unsigned char *name;
54193+
54194 next = list_entry(p, struct dentry, d_u.d_child);
54195 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
54196 if (!simple_positive(next)) {
54197@@ -174,7 +177,12 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
54198
54199 spin_unlock(&next->d_lock);
54200 spin_unlock(&dentry->d_lock);
54201- if (filldir(dirent, next->d_name.name,
54202+ name = next->d_name.name;
54203+ if (name == next->d_iname) {
54204+ memcpy(d_name, name, next->d_name.len);
54205+ name = d_name;
54206+ }
54207+ if (filldir(dirent, name,
54208 next->d_name.len, filp->f_pos,
54209 next->d_inode->i_ino,
54210 dt_type(next->d_inode)) < 0)
54211diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
54212index 52e5120..808936e 100644
54213--- a/fs/lockd/clntproc.c
54214+++ b/fs/lockd/clntproc.c
54215@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
54216 /*
54217 * Cookie counter for NLM requests
54218 */
54219-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
54220+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
54221
54222 void nlmclnt_next_cookie(struct nlm_cookie *c)
54223 {
54224- u32 cookie = atomic_inc_return(&nlm_cookie);
54225+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
54226
54227 memcpy(c->data, &cookie, 4);
54228 c->len=4;
54229diff --git a/fs/locks.c b/fs/locks.c
54230index a94e331..060bce3 100644
54231--- a/fs/locks.c
54232+++ b/fs/locks.c
54233@@ -2064,16 +2064,16 @@ void locks_remove_flock(struct file *filp)
54234 return;
54235
54236 if (filp->f_op && filp->f_op->flock) {
54237- struct file_lock fl = {
54238+ struct file_lock flock = {
54239 .fl_pid = current->tgid,
54240 .fl_file = filp,
54241 .fl_flags = FL_FLOCK,
54242 .fl_type = F_UNLCK,
54243 .fl_end = OFFSET_MAX,
54244 };
54245- filp->f_op->flock(filp, F_SETLKW, &fl);
54246- if (fl.fl_ops && fl.fl_ops->fl_release_private)
54247- fl.fl_ops->fl_release_private(&fl);
54248+ filp->f_op->flock(filp, F_SETLKW, &flock);
54249+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
54250+ flock.fl_ops->fl_release_private(&flock);
54251 }
54252
54253 lock_flocks();
54254diff --git a/fs/namei.c b/fs/namei.c
54255index ec97aef..e67718d 100644
54256--- a/fs/namei.c
54257+++ b/fs/namei.c
54258@@ -319,16 +319,32 @@ int generic_permission(struct inode *inode, int mask)
54259 if (ret != -EACCES)
54260 return ret;
54261
54262+#ifdef CONFIG_GRKERNSEC
54263+ /* we'll block if we have to log due to a denied capability use */
54264+ if (mask & MAY_NOT_BLOCK)
54265+ return -ECHILD;
54266+#endif
54267+
54268 if (S_ISDIR(inode->i_mode)) {
54269 /* DACs are overridable for directories */
54270- if (inode_capable(inode, CAP_DAC_OVERRIDE))
54271- return 0;
54272 if (!(mask & MAY_WRITE))
54273- if (inode_capable(inode, CAP_DAC_READ_SEARCH))
54274+ if (inode_capable_nolog(inode, CAP_DAC_OVERRIDE) ||
54275+ inode_capable(inode, CAP_DAC_READ_SEARCH))
54276 return 0;
54277+ if (inode_capable(inode, CAP_DAC_OVERRIDE))
54278+ return 0;
54279 return -EACCES;
54280 }
54281 /*
54282+ * Searching includes executable on directories, else just read.
54283+ */
54284+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
54285+ if (mask == MAY_READ)
54286+ if (inode_capable_nolog(inode, CAP_DAC_OVERRIDE) ||
54287+ inode_capable(inode, CAP_DAC_READ_SEARCH))
54288+ return 0;
54289+
54290+ /*
54291 * Read/write DACs are always overridable.
54292 * Executable DACs are overridable when there is
54293 * at least one exec bit set.
54294@@ -337,14 +353,6 @@ int generic_permission(struct inode *inode, int mask)
54295 if (inode_capable(inode, CAP_DAC_OVERRIDE))
54296 return 0;
54297
54298- /*
54299- * Searching includes executable on directories, else just read.
54300- */
54301- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
54302- if (mask == MAY_READ)
54303- if (inode_capable(inode, CAP_DAC_READ_SEARCH))
54304- return 0;
54305-
54306 return -EACCES;
54307 }
54308
54309@@ -824,7 +832,7 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
54310 {
54311 struct dentry *dentry = link->dentry;
54312 int error;
54313- char *s;
54314+ const char *s;
54315
54316 BUG_ON(nd->flags & LOOKUP_RCU);
54317
54318@@ -845,6 +853,12 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
54319 if (error)
54320 goto out_put_nd_path;
54321
54322+ if (gr_handle_follow_link(dentry->d_parent->d_inode,
54323+ dentry->d_inode, dentry, nd->path.mnt)) {
54324+ error = -EACCES;
54325+ goto out_put_nd_path;
54326+ }
54327+
54328 nd->last_type = LAST_BIND;
54329 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
54330 error = PTR_ERR(*p);
54331@@ -1594,6 +1608,8 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
54332 break;
54333 res = walk_component(nd, path, &nd->last,
54334 nd->last_type, LOOKUP_FOLLOW);
54335+ if (res >= 0 && gr_handle_symlink_owner(&link, nd->inode))
54336+ res = -EACCES;
54337 put_link(nd, &link, cookie);
54338 } while (res > 0);
54339
54340@@ -1692,7 +1708,7 @@ EXPORT_SYMBOL(full_name_hash);
54341 static inline unsigned long hash_name(const char *name, unsigned int *hashp)
54342 {
54343 unsigned long a, b, adata, bdata, mask, hash, len;
54344- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
54345+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
54346
54347 hash = a = 0;
54348 len = -sizeof(unsigned long);
54349@@ -1977,6 +1993,8 @@ static int path_lookupat(int dfd, const char *name,
54350 if (err)
54351 break;
54352 err = lookup_last(nd, &path);
54353+ if (!err && gr_handle_symlink_owner(&link, nd->inode))
54354+ err = -EACCES;
54355 put_link(nd, &link, cookie);
54356 }
54357 }
54358@@ -1984,6 +2002,13 @@ static int path_lookupat(int dfd, const char *name,
54359 if (!err)
54360 err = complete_walk(nd);
54361
54362+ if (!err && !(nd->flags & LOOKUP_PARENT)) {
54363+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
54364+ path_put(&nd->path);
54365+ err = -ENOENT;
54366+ }
54367+ }
54368+
54369 if (!err && nd->flags & LOOKUP_DIRECTORY) {
54370 if (!nd->inode->i_op->lookup) {
54371 path_put(&nd->path);
54372@@ -2011,8 +2036,15 @@ static int filename_lookup(int dfd, struct filename *name,
54373 retval = path_lookupat(dfd, name->name,
54374 flags | LOOKUP_REVAL, nd);
54375
54376- if (likely(!retval))
54377+ if (likely(!retval)) {
54378 audit_inode(name, nd->path.dentry, flags & LOOKUP_PARENT);
54379+ if (name->name[0] != '/' && nd->path.dentry && nd->inode) {
54380+ if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt)) {
54381+ path_put(&nd->path);
54382+ return -ENOENT;
54383+ }
54384+ }
54385+ }
54386 return retval;
54387 }
54388
54389@@ -2390,6 +2422,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
54390 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
54391 return -EPERM;
54392
54393+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
54394+ return -EPERM;
54395+ if (gr_handle_rawio(inode))
54396+ return -EPERM;
54397+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
54398+ return -EACCES;
54399+
54400 return 0;
54401 }
54402
54403@@ -2611,7 +2650,7 @@ looked_up:
54404 * cleared otherwise prior to returning.
54405 */
54406 static int lookup_open(struct nameidata *nd, struct path *path,
54407- struct file *file,
54408+ struct path *link, struct file *file,
54409 const struct open_flags *op,
54410 bool got_write, int *opened)
54411 {
54412@@ -2646,6 +2685,17 @@ static int lookup_open(struct nameidata *nd, struct path *path,
54413 /* Negative dentry, just create the file */
54414 if (!dentry->d_inode && (op->open_flag & O_CREAT)) {
54415 umode_t mode = op->mode;
54416+
54417+ if (link && gr_handle_symlink_owner(link, dir->d_inode)) {
54418+ error = -EACCES;
54419+ goto out_dput;
54420+ }
54421+
54422+ if (!gr_acl_handle_creat(dentry, dir, nd->path.mnt, op->open_flag, op->acc_mode, mode)) {
54423+ error = -EACCES;
54424+ goto out_dput;
54425+ }
54426+
54427 if (!IS_POSIXACL(dir->d_inode))
54428 mode &= ~current_umask();
54429 /*
54430@@ -2667,6 +2717,8 @@ static int lookup_open(struct nameidata *nd, struct path *path,
54431 nd->flags & LOOKUP_EXCL);
54432 if (error)
54433 goto out_dput;
54434+ else
54435+ gr_handle_create(dentry, nd->path.mnt);
54436 }
54437 out_no_open:
54438 path->dentry = dentry;
54439@@ -2681,7 +2733,7 @@ out_dput:
54440 /*
54441 * Handle the last step of open()
54442 */
54443-static int do_last(struct nameidata *nd, struct path *path,
54444+static int do_last(struct nameidata *nd, struct path *path, struct path *link,
54445 struct file *file, const struct open_flags *op,
54446 int *opened, struct filename *name)
54447 {
54448@@ -2710,16 +2762,32 @@ static int do_last(struct nameidata *nd, struct path *path,
54449 error = complete_walk(nd);
54450 if (error)
54451 return error;
54452+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
54453+ error = -ENOENT;
54454+ goto out;
54455+ }
54456 audit_inode(name, nd->path.dentry, 0);
54457 if (open_flag & O_CREAT) {
54458 error = -EISDIR;
54459 goto out;
54460 }
54461+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
54462+ error = -EACCES;
54463+ goto out;
54464+ }
54465 goto finish_open;
54466 case LAST_BIND:
54467 error = complete_walk(nd);
54468 if (error)
54469 return error;
54470+ if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
54471+ error = -ENOENT;
54472+ goto out;
54473+ }
54474+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
54475+ error = -EACCES;
54476+ goto out;
54477+ }
54478 audit_inode(name, dir, 0);
54479 goto finish_open;
54480 }
54481@@ -2768,7 +2836,7 @@ retry_lookup:
54482 */
54483 }
54484 mutex_lock(&dir->d_inode->i_mutex);
54485- error = lookup_open(nd, path, file, op, got_write, opened);
54486+ error = lookup_open(nd, path, link, file, op, got_write, opened);
54487 mutex_unlock(&dir->d_inode->i_mutex);
54488
54489 if (error <= 0) {
54490@@ -2792,11 +2860,28 @@ retry_lookup:
54491 goto finish_open_created;
54492 }
54493
54494+ if (!gr_acl_handle_hidden_file(path->dentry, nd->path.mnt)) {
54495+ error = -ENOENT;
54496+ goto exit_dput;
54497+ }
54498+ if (link && gr_handle_symlink_owner(link, path->dentry->d_inode)) {
54499+ error = -EACCES;
54500+ goto exit_dput;
54501+ }
54502+
54503 /*
54504 * create/update audit record if it already exists.
54505 */
54506- if (path->dentry->d_inode)
54507+ if (path->dentry->d_inode) {
54508+ /* only check if O_CREAT is specified, all other checks need to go
54509+ into may_open */
54510+ if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
54511+ error = -EACCES;
54512+ goto exit_dput;
54513+ }
54514+
54515 audit_inode(name, path->dentry, 0);
54516+ }
54517
54518 /*
54519 * If atomic_open() acquired write access it is dropped now due to
54520@@ -2837,6 +2922,11 @@ finish_lookup:
54521 }
54522 }
54523 BUG_ON(inode != path->dentry->d_inode);
54524+ /* if we're resolving a symlink to another symlink */
54525+ if (link && gr_handle_symlink_owner(link, inode)) {
54526+ error = -EACCES;
54527+ goto out;
54528+ }
54529 return 1;
54530 }
54531
54532@@ -2846,7 +2936,6 @@ finish_lookup:
54533 save_parent.dentry = nd->path.dentry;
54534 save_parent.mnt = mntget(path->mnt);
54535 nd->path.dentry = path->dentry;
54536-
54537 }
54538 nd->inode = inode;
54539 /* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */
54540@@ -2855,6 +2944,16 @@ finish_lookup:
54541 path_put(&save_parent);
54542 return error;
54543 }
54544+
54545+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
54546+ error = -ENOENT;
54547+ goto out;
54548+ }
54549+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
54550+ error = -EACCES;
54551+ goto out;
54552+ }
54553+
54554 error = -EISDIR;
54555 if ((open_flag & O_CREAT) && S_ISDIR(nd->inode->i_mode))
54556 goto out;
54557@@ -2953,7 +3052,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
54558 if (unlikely(error))
54559 goto out;
54560
54561- error = do_last(nd, &path, file, op, &opened, pathname);
54562+ error = do_last(nd, &path, NULL, file, op, &opened, pathname);
54563 while (unlikely(error > 0)) { /* trailing symlink */
54564 struct path link = path;
54565 void *cookie;
54566@@ -2971,7 +3070,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
54567 error = follow_link(&link, nd, &cookie);
54568 if (unlikely(error))
54569 break;
54570- error = do_last(nd, &path, file, op, &opened, pathname);
54571+ error = do_last(nd, &path, &link, file, op, &opened, pathname);
54572 put_link(nd, &link, cookie);
54573 }
54574 out:
54575@@ -3071,8 +3170,12 @@ struct dentry *kern_path_create(int dfd, const char *pathname,
54576 goto unlock;
54577
54578 error = -EEXIST;
54579- if (dentry->d_inode)
54580+ if (dentry->d_inode) {
54581+ if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt)) {
54582+ error = -ENOENT;
54583+ }
54584 goto fail;
54585+ }
54586 /*
54587 * Special case - lookup gave negative, but... we had foo/bar/
54588 * From the vfs_mknod() POV we just have a negative dentry -
54589@@ -3124,6 +3227,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname,
54590 }
54591 EXPORT_SYMBOL(user_path_create);
54592
54593+static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, struct filename **to, unsigned int lookup_flags)
54594+{
54595+ struct filename *tmp = getname(pathname);
54596+ struct dentry *res;
54597+ if (IS_ERR(tmp))
54598+ return ERR_CAST(tmp);
54599+ res = kern_path_create(dfd, tmp->name, path, lookup_flags);
54600+ if (IS_ERR(res))
54601+ putname(tmp);
54602+ else
54603+ *to = tmp;
54604+ return res;
54605+}
54606+
54607 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
54608 {
54609 int error = may_create(dir, dentry);
54610@@ -3186,6 +3303,17 @@ retry:
54611
54612 if (!IS_POSIXACL(path.dentry->d_inode))
54613 mode &= ~current_umask();
54614+
54615+ if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
54616+ error = -EPERM;
54617+ goto out;
54618+ }
54619+
54620+ if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
54621+ error = -EACCES;
54622+ goto out;
54623+ }
54624+
54625 error = security_path_mknod(&path, dentry, mode, dev);
54626 if (error)
54627 goto out;
54628@@ -3202,6 +3330,8 @@ retry:
54629 break;
54630 }
54631 out:
54632+ if (!error)
54633+ gr_handle_create(dentry, path.mnt);
54634 done_path_create(&path, dentry);
54635 if (retry_estale(error, lookup_flags)) {
54636 lookup_flags |= LOOKUP_REVAL;
54637@@ -3254,9 +3384,16 @@ retry:
54638
54639 if (!IS_POSIXACL(path.dentry->d_inode))
54640 mode &= ~current_umask();
54641+ if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
54642+ error = -EACCES;
54643+ goto out;
54644+ }
54645 error = security_path_mkdir(&path, dentry, mode);
54646 if (!error)
54647 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
54648+ if (!error)
54649+ gr_handle_create(dentry, path.mnt);
54650+out:
54651 done_path_create(&path, dentry);
54652 if (retry_estale(error, lookup_flags)) {
54653 lookup_flags |= LOOKUP_REVAL;
54654@@ -3337,6 +3474,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
54655 struct filename *name;
54656 struct dentry *dentry;
54657 struct nameidata nd;
54658+ ino_t saved_ino = 0;
54659+ dev_t saved_dev = 0;
54660 unsigned int lookup_flags = 0;
54661 retry:
54662 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
54663@@ -3369,10 +3508,21 @@ retry:
54664 error = -ENOENT;
54665 goto exit3;
54666 }
54667+
54668+ saved_ino = dentry->d_inode->i_ino;
54669+ saved_dev = gr_get_dev_from_dentry(dentry);
54670+
54671+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
54672+ error = -EACCES;
54673+ goto exit3;
54674+ }
54675+
54676 error = security_path_rmdir(&nd.path, dentry);
54677 if (error)
54678 goto exit3;
54679 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
54680+ if (!error && (saved_dev || saved_ino))
54681+ gr_handle_delete(saved_ino, saved_dev);
54682 exit3:
54683 dput(dentry);
54684 exit2:
54685@@ -3438,6 +3588,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
54686 struct dentry *dentry;
54687 struct nameidata nd;
54688 struct inode *inode = NULL;
54689+ ino_t saved_ino = 0;
54690+ dev_t saved_dev = 0;
54691 unsigned int lookup_flags = 0;
54692 retry:
54693 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
54694@@ -3464,10 +3616,22 @@ retry:
54695 if (!inode)
54696 goto slashes;
54697 ihold(inode);
54698+
54699+ if (inode->i_nlink <= 1) {
54700+ saved_ino = inode->i_ino;
54701+ saved_dev = gr_get_dev_from_dentry(dentry);
54702+ }
54703+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
54704+ error = -EACCES;
54705+ goto exit2;
54706+ }
54707+
54708 error = security_path_unlink(&nd.path, dentry);
54709 if (error)
54710 goto exit2;
54711 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
54712+ if (!error && (saved_ino || saved_dev))
54713+ gr_handle_delete(saved_ino, saved_dev);
54714 exit2:
54715 dput(dentry);
54716 }
54717@@ -3545,9 +3709,17 @@ retry:
54718 if (IS_ERR(dentry))
54719 goto out_putname;
54720
54721+ if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
54722+ error = -EACCES;
54723+ goto out;
54724+ }
54725+
54726 error = security_path_symlink(&path, dentry, from->name);
54727 if (!error)
54728 error = vfs_symlink(path.dentry->d_inode, dentry, from->name);
54729+ if (!error)
54730+ gr_handle_create(dentry, path.mnt);
54731+out:
54732 done_path_create(&path, dentry);
54733 if (retry_estale(error, lookup_flags)) {
54734 lookup_flags |= LOOKUP_REVAL;
54735@@ -3621,6 +3793,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
54736 {
54737 struct dentry *new_dentry;
54738 struct path old_path, new_path;
54739+ struct filename *to = NULL;
54740 int how = 0;
54741 int error;
54742
54743@@ -3644,7 +3817,7 @@ retry:
54744 if (error)
54745 return error;
54746
54747- new_dentry = user_path_create(newdfd, newname, &new_path,
54748+ new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to,
54749 (how & LOOKUP_REVAL));
54750 error = PTR_ERR(new_dentry);
54751 if (IS_ERR(new_dentry))
54752@@ -3656,11 +3829,28 @@ retry:
54753 error = may_linkat(&old_path);
54754 if (unlikely(error))
54755 goto out_dput;
54756+
54757+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
54758+ old_path.dentry->d_inode,
54759+ old_path.dentry->d_inode->i_mode, to)) {
54760+ error = -EACCES;
54761+ goto out_dput;
54762+ }
54763+
54764+ if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
54765+ old_path.dentry, old_path.mnt, to)) {
54766+ error = -EACCES;
54767+ goto out_dput;
54768+ }
54769+
54770 error = security_path_link(old_path.dentry, &new_path, new_dentry);
54771 if (error)
54772 goto out_dput;
54773 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry);
54774+ if (!error)
54775+ gr_handle_create(new_dentry, new_path.mnt);
54776 out_dput:
54777+ putname(to);
54778 done_path_create(&new_path, new_dentry);
54779 if (retry_estale(error, how)) {
54780 how |= LOOKUP_REVAL;
54781@@ -3906,12 +4096,21 @@ retry:
54782 if (new_dentry == trap)
54783 goto exit5;
54784
54785+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
54786+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
54787+ to);
54788+ if (error)
54789+ goto exit5;
54790+
54791 error = security_path_rename(&oldnd.path, old_dentry,
54792 &newnd.path, new_dentry);
54793 if (error)
54794 goto exit5;
54795 error = vfs_rename(old_dir->d_inode, old_dentry,
54796 new_dir->d_inode, new_dentry);
54797+ if (!error)
54798+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
54799+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
54800 exit5:
54801 dput(new_dentry);
54802 exit4:
54803@@ -3943,6 +4142,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
54804
54805 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
54806 {
54807+ char tmpbuf[64];
54808+ const char *newlink;
54809 int len;
54810
54811 len = PTR_ERR(link);
54812@@ -3952,7 +4153,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
54813 len = strlen(link);
54814 if (len > (unsigned) buflen)
54815 len = buflen;
54816- if (copy_to_user(buffer, link, len))
54817+
54818+ if (len < sizeof(tmpbuf)) {
54819+ memcpy(tmpbuf, link, len);
54820+ newlink = tmpbuf;
54821+ } else
54822+ newlink = link;
54823+
54824+ if (copy_to_user(buffer, newlink, len))
54825 len = -EFAULT;
54826 out:
54827 return len;
54828diff --git a/fs/namespace.c b/fs/namespace.c
54829index 5dd7709..6f64e9c 100644
54830--- a/fs/namespace.c
54831+++ b/fs/namespace.c
54832@@ -1219,6 +1219,9 @@ static int do_umount(struct mount *mnt, int flags)
54833 if (!(sb->s_flags & MS_RDONLY))
54834 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
54835 up_write(&sb->s_umount);
54836+
54837+ gr_log_remount(mnt->mnt_devname, retval);
54838+
54839 return retval;
54840 }
54841
54842@@ -1238,6 +1241,9 @@ static int do_umount(struct mount *mnt, int flags)
54843 br_write_unlock(&vfsmount_lock);
54844 up_write(&namespace_sem);
54845 release_mounts(&umount_list);
54846+
54847+ gr_log_unmount(mnt->mnt_devname, retval);
54848+
54849 return retval;
54850 }
54851
54852@@ -1713,7 +1719,7 @@ static int do_loopback(struct path *path, const char *old_name,
54853
54854 if (IS_ERR(mnt)) {
54855 err = PTR_ERR(mnt);
54856- goto out;
54857+ goto out2;
54858 }
54859
54860 err = graft_tree(mnt, path);
54861@@ -2294,6 +2300,16 @@ long do_mount(const char *dev_name, const char *dir_name,
54862 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
54863 MS_STRICTATIME);
54864
54865+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
54866+ retval = -EPERM;
54867+ goto dput_out;
54868+ }
54869+
54870+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
54871+ retval = -EPERM;
54872+ goto dput_out;
54873+ }
54874+
54875 if (flags & MS_REMOUNT)
54876 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
54877 data_page);
54878@@ -2308,6 +2324,9 @@ long do_mount(const char *dev_name, const char *dir_name,
54879 dev_name, data_page);
54880 dput_out:
54881 path_put(&path);
54882+
54883+ gr_log_mount(dev_name, dir_name, retval);
54884+
54885 return retval;
54886 }
54887
54888@@ -2594,6 +2613,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
54889 if (error)
54890 goto out2;
54891
54892+ if (gr_handle_chroot_pivot()) {
54893+ error = -EPERM;
54894+ goto out2;
54895+ }
54896+
54897 get_fs_root(current->fs, &root);
54898 error = lock_mount(&old);
54899 if (error)
54900@@ -2842,7 +2866,7 @@ static int mntns_install(struct nsproxy *nsproxy, void *ns)
54901 !nsown_capable(CAP_SYS_ADMIN))
54902 return -EPERM;
54903
54904- if (fs->users != 1)
54905+ if (atomic_read(&fs->users) != 1)
54906 return -EINVAL;
54907
54908 get_mnt_ns(mnt_ns);
54909diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
54910index 59461c9..b17c57e 100644
54911--- a/fs/nfs/callback_xdr.c
54912+++ b/fs/nfs/callback_xdr.c
54913@@ -51,7 +51,7 @@ struct callback_op {
54914 callback_decode_arg_t decode_args;
54915 callback_encode_res_t encode_res;
54916 long res_maxsize;
54917-};
54918+} __do_const;
54919
54920 static struct callback_op callback_ops[];
54921
54922diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
54923index ebeb94c..ff35337 100644
54924--- a/fs/nfs/inode.c
54925+++ b/fs/nfs/inode.c
54926@@ -1042,16 +1042,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
54927 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
54928 }
54929
54930-static atomic_long_t nfs_attr_generation_counter;
54931+static atomic_long_unchecked_t nfs_attr_generation_counter;
54932
54933 static unsigned long nfs_read_attr_generation_counter(void)
54934 {
54935- return atomic_long_read(&nfs_attr_generation_counter);
54936+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
54937 }
54938
54939 unsigned long nfs_inc_attr_generation_counter(void)
54940 {
54941- return atomic_long_inc_return(&nfs_attr_generation_counter);
54942+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
54943 }
54944
54945 void nfs_fattr_init(struct nfs_fattr *fattr)
54946diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
54947index 9d1c5db..1e13db8 100644
54948--- a/fs/nfsd/nfs4proc.c
54949+++ b/fs/nfsd/nfs4proc.c
54950@@ -1097,7 +1097,7 @@ struct nfsd4_operation {
54951 nfsd4op_rsize op_rsize_bop;
54952 stateid_getter op_get_currentstateid;
54953 stateid_setter op_set_currentstateid;
54954-};
54955+} __do_const;
54956
54957 static struct nfsd4_operation nfsd4_ops[];
54958
54959diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
54960index d1dd710..32ac0e8 100644
54961--- a/fs/nfsd/nfs4xdr.c
54962+++ b/fs/nfsd/nfs4xdr.c
54963@@ -1456,7 +1456,7 @@ nfsd4_decode_notsupp(struct nfsd4_compoundargs *argp, void *p)
54964
54965 typedef __be32(*nfsd4_dec)(struct nfsd4_compoundargs *argp, void *);
54966
54967-static nfsd4_dec nfsd4_dec_ops[] = {
54968+static const nfsd4_dec nfsd4_dec_ops[] = {
54969 [OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access,
54970 [OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close,
54971 [OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit,
54972@@ -1496,7 +1496,7 @@ static nfsd4_dec nfsd4_dec_ops[] = {
54973 [OP_RELEASE_LOCKOWNER] = (nfsd4_dec)nfsd4_decode_release_lockowner,
54974 };
54975
54976-static nfsd4_dec nfsd41_dec_ops[] = {
54977+static const nfsd4_dec nfsd41_dec_ops[] = {
54978 [OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access,
54979 [OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close,
54980 [OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit,
54981@@ -1558,7 +1558,7 @@ static nfsd4_dec nfsd41_dec_ops[] = {
54982 };
54983
54984 struct nfsd4_minorversion_ops {
54985- nfsd4_dec *decoders;
54986+ const nfsd4_dec *decoders;
54987 int nops;
54988 };
54989
54990diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
54991index 2cbac34..6dc3889 100644
54992--- a/fs/nfsd/nfscache.c
54993+++ b/fs/nfsd/nfscache.c
54994@@ -264,8 +264,10 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
54995 if (!(rp = rqstp->rq_cacherep) || cache_disabled)
54996 return;
54997
54998- len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
54999- len >>= 2;
55000+ if (statp) {
55001+ len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
55002+ len >>= 2;
55003+ }
55004
55005 /* Don't cache excessive amounts of data and XDR failures */
55006 if (!statp || len > (256 >> 2)) {
55007diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
55008index 69c6413..c0408d2 100644
55009--- a/fs/nfsd/vfs.c
55010+++ b/fs/nfsd/vfs.c
55011@@ -939,7 +939,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
55012 } else {
55013 oldfs = get_fs();
55014 set_fs(KERNEL_DS);
55015- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
55016+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
55017 set_fs(oldfs);
55018 }
55019
55020@@ -1026,7 +1026,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
55021
55022 /* Write the data. */
55023 oldfs = get_fs(); set_fs(KERNEL_DS);
55024- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &pos);
55025+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &pos);
55026 set_fs(oldfs);
55027 if (host_err < 0)
55028 goto out_nfserr;
55029@@ -1572,7 +1572,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
55030 */
55031
55032 oldfs = get_fs(); set_fs(KERNEL_DS);
55033- host_err = inode->i_op->readlink(path.dentry, (char __user *)buf, *lenp);
55034+ host_err = inode->i_op->readlink(path.dentry, (char __force_user *)buf, *lenp);
55035 set_fs(oldfs);
55036
55037 if (host_err < 0)
55038diff --git a/fs/nls/nls_base.c b/fs/nls/nls_base.c
55039index fea6bd5..8ee9d81 100644
55040--- a/fs/nls/nls_base.c
55041+++ b/fs/nls/nls_base.c
55042@@ -234,20 +234,22 @@ EXPORT_SYMBOL(utf16s_to_utf8s);
55043
55044 int register_nls(struct nls_table * nls)
55045 {
55046- struct nls_table ** tmp = &tables;
55047+ struct nls_table *tmp = tables;
55048
55049 if (nls->next)
55050 return -EBUSY;
55051
55052 spin_lock(&nls_lock);
55053- while (*tmp) {
55054- if (nls == *tmp) {
55055+ while (tmp) {
55056+ if (nls == tmp) {
55057 spin_unlock(&nls_lock);
55058 return -EBUSY;
55059 }
55060- tmp = &(*tmp)->next;
55061+ tmp = tmp->next;
55062 }
55063- nls->next = tables;
55064+ pax_open_kernel();
55065+ *(struct nls_table **)&nls->next = tables;
55066+ pax_close_kernel();
55067 tables = nls;
55068 spin_unlock(&nls_lock);
55069 return 0;
55070@@ -255,12 +257,14 @@ int register_nls(struct nls_table * nls)
55071
55072 int unregister_nls(struct nls_table * nls)
55073 {
55074- struct nls_table ** tmp = &tables;
55075+ struct nls_table * const * tmp = &tables;
55076
55077 spin_lock(&nls_lock);
55078 while (*tmp) {
55079 if (nls == *tmp) {
55080- *tmp = nls->next;
55081+ pax_open_kernel();
55082+ *(struct nls_table **)tmp = nls->next;
55083+ pax_close_kernel();
55084 spin_unlock(&nls_lock);
55085 return 0;
55086 }
55087diff --git a/fs/nls/nls_euc-jp.c b/fs/nls/nls_euc-jp.c
55088index 7424929..35f6be5 100644
55089--- a/fs/nls/nls_euc-jp.c
55090+++ b/fs/nls/nls_euc-jp.c
55091@@ -561,8 +561,10 @@ static int __init init_nls_euc_jp(void)
55092 p_nls = load_nls("cp932");
55093
55094 if (p_nls) {
55095- table.charset2upper = p_nls->charset2upper;
55096- table.charset2lower = p_nls->charset2lower;
55097+ pax_open_kernel();
55098+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
55099+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
55100+ pax_close_kernel();
55101 return register_nls(&table);
55102 }
55103
55104diff --git a/fs/nls/nls_koi8-ru.c b/fs/nls/nls_koi8-ru.c
55105index e7bc1d7..06bd4bb 100644
55106--- a/fs/nls/nls_koi8-ru.c
55107+++ b/fs/nls/nls_koi8-ru.c
55108@@ -63,8 +63,10 @@ static int __init init_nls_koi8_ru(void)
55109 p_nls = load_nls("koi8-u");
55110
55111 if (p_nls) {
55112- table.charset2upper = p_nls->charset2upper;
55113- table.charset2lower = p_nls->charset2lower;
55114+ pax_open_kernel();
55115+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
55116+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
55117+ pax_close_kernel();
55118 return register_nls(&table);
55119 }
55120
55121diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
55122index 9ff4a5e..deb1f0f 100644
55123--- a/fs/notify/fanotify/fanotify_user.c
55124+++ b/fs/notify/fanotify/fanotify_user.c
55125@@ -251,8 +251,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
55126
55127 fd = fanotify_event_metadata.fd;
55128 ret = -EFAULT;
55129- if (copy_to_user(buf, &fanotify_event_metadata,
55130- fanotify_event_metadata.event_len))
55131+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
55132+ copy_to_user(buf, &fanotify_event_metadata, fanotify_event_metadata.event_len))
55133 goto out_close_fd;
55134
55135 ret = prepare_for_access_response(group, event, fd);
55136diff --git a/fs/notify/notification.c b/fs/notify/notification.c
55137index 7b51b05..5ea5ef6 100644
55138--- a/fs/notify/notification.c
55139+++ b/fs/notify/notification.c
55140@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
55141 * get set to 0 so it will never get 'freed'
55142 */
55143 static struct fsnotify_event *q_overflow_event;
55144-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
55145+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
55146
55147 /**
55148 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
55149@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
55150 */
55151 u32 fsnotify_get_cookie(void)
55152 {
55153- return atomic_inc_return(&fsnotify_sync_cookie);
55154+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
55155 }
55156 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
55157
55158diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
55159index 99e3610..02c1068 100644
55160--- a/fs/ntfs/dir.c
55161+++ b/fs/ntfs/dir.c
55162@@ -1329,7 +1329,7 @@ find_next_index_buffer:
55163 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
55164 ~(s64)(ndir->itype.index.block_size - 1)));
55165 /* Bounds checks. */
55166- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
55167+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
55168 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
55169 "inode 0x%lx or driver bug.", vdir->i_ino);
55170 goto err_out;
55171diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
55172index 5b2d4f0..c6de396 100644
55173--- a/fs/ntfs/file.c
55174+++ b/fs/ntfs/file.c
55175@@ -2242,6 +2242,6 @@ const struct inode_operations ntfs_file_inode_ops = {
55176 #endif /* NTFS_RW */
55177 };
55178
55179-const struct file_operations ntfs_empty_file_ops = {};
55180+const struct file_operations ntfs_empty_file_ops __read_only;
55181
55182-const struct inode_operations ntfs_empty_inode_ops = {};
55183+const struct inode_operations ntfs_empty_inode_ops __read_only;
55184diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
55185index a9f78c7..ed8a381 100644
55186--- a/fs/ocfs2/localalloc.c
55187+++ b/fs/ocfs2/localalloc.c
55188@@ -1279,7 +1279,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
55189 goto bail;
55190 }
55191
55192- atomic_inc(&osb->alloc_stats.moves);
55193+ atomic_inc_unchecked(&osb->alloc_stats.moves);
55194
55195 bail:
55196 if (handle)
55197diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
55198index d355e6e..578d905 100644
55199--- a/fs/ocfs2/ocfs2.h
55200+++ b/fs/ocfs2/ocfs2.h
55201@@ -235,11 +235,11 @@ enum ocfs2_vol_state
55202
55203 struct ocfs2_alloc_stats
55204 {
55205- atomic_t moves;
55206- atomic_t local_data;
55207- atomic_t bitmap_data;
55208- atomic_t bg_allocs;
55209- atomic_t bg_extends;
55210+ atomic_unchecked_t moves;
55211+ atomic_unchecked_t local_data;
55212+ atomic_unchecked_t bitmap_data;
55213+ atomic_unchecked_t bg_allocs;
55214+ atomic_unchecked_t bg_extends;
55215 };
55216
55217 enum ocfs2_local_alloc_state
55218diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
55219index b7e74b5..19c6536 100644
55220--- a/fs/ocfs2/suballoc.c
55221+++ b/fs/ocfs2/suballoc.c
55222@@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
55223 mlog_errno(status);
55224 goto bail;
55225 }
55226- atomic_inc(&osb->alloc_stats.bg_extends);
55227+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
55228
55229 /* You should never ask for this much metadata */
55230 BUG_ON(bits_wanted >
55231@@ -2007,7 +2007,7 @@ int ocfs2_claim_metadata(handle_t *handle,
55232 mlog_errno(status);
55233 goto bail;
55234 }
55235- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
55236+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
55237
55238 *suballoc_loc = res.sr_bg_blkno;
55239 *suballoc_bit_start = res.sr_bit_offset;
55240@@ -2171,7 +2171,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
55241 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
55242 res->sr_bits);
55243
55244- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
55245+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
55246
55247 BUG_ON(res->sr_bits != 1);
55248
55249@@ -2213,7 +2213,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
55250 mlog_errno(status);
55251 goto bail;
55252 }
55253- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
55254+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
55255
55256 BUG_ON(res.sr_bits != 1);
55257
55258@@ -2317,7 +2317,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
55259 cluster_start,
55260 num_clusters);
55261 if (!status)
55262- atomic_inc(&osb->alloc_stats.local_data);
55263+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
55264 } else {
55265 if (min_clusters > (osb->bitmap_cpg - 1)) {
55266 /* The only paths asking for contiguousness
55267@@ -2343,7 +2343,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
55268 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
55269 res.sr_bg_blkno,
55270 res.sr_bit_offset);
55271- atomic_inc(&osb->alloc_stats.bitmap_data);
55272+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
55273 *num_clusters = res.sr_bits;
55274 }
55275 }
55276diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
55277index 0e91ec2..f4b3fc6 100644
55278--- a/fs/ocfs2/super.c
55279+++ b/fs/ocfs2/super.c
55280@@ -301,11 +301,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
55281 "%10s => GlobalAllocs: %d LocalAllocs: %d "
55282 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
55283 "Stats",
55284- atomic_read(&osb->alloc_stats.bitmap_data),
55285- atomic_read(&osb->alloc_stats.local_data),
55286- atomic_read(&osb->alloc_stats.bg_allocs),
55287- atomic_read(&osb->alloc_stats.moves),
55288- atomic_read(&osb->alloc_stats.bg_extends));
55289+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
55290+ atomic_read_unchecked(&osb->alloc_stats.local_data),
55291+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
55292+ atomic_read_unchecked(&osb->alloc_stats.moves),
55293+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
55294
55295 out += snprintf(buf + out, len - out,
55296 "%10s => State: %u Descriptor: %llu Size: %u bits "
55297@@ -2121,11 +2121,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
55298 spin_lock_init(&osb->osb_xattr_lock);
55299 ocfs2_init_steal_slots(osb);
55300
55301- atomic_set(&osb->alloc_stats.moves, 0);
55302- atomic_set(&osb->alloc_stats.local_data, 0);
55303- atomic_set(&osb->alloc_stats.bitmap_data, 0);
55304- atomic_set(&osb->alloc_stats.bg_allocs, 0);
55305- atomic_set(&osb->alloc_stats.bg_extends, 0);
55306+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
55307+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
55308+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
55309+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
55310+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
55311
55312 /* Copy the blockcheck stats from the superblock probe */
55313 osb->osb_ecc_stats = *stats;
55314diff --git a/fs/open.c b/fs/open.c
55315index 9b33c0c..2ffcca2 100644
55316--- a/fs/open.c
55317+++ b/fs/open.c
55318@@ -31,6 +31,8 @@
55319 #include <linux/ima.h>
55320 #include <linux/dnotify.h>
55321
55322+#define CREATE_TRACE_POINTS
55323+#include <trace/events/fs.h>
55324 #include "internal.h"
55325
55326 int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
55327@@ -101,6 +103,8 @@ long vfs_truncate(struct path *path, loff_t length)
55328 error = locks_verify_truncate(inode, NULL, length);
55329 if (!error)
55330 error = security_path_truncate(path);
55331+ if (!error && !gr_acl_handle_truncate(path->dentry, path->mnt))
55332+ error = -EACCES;
55333 if (!error)
55334 error = do_truncate(path->dentry, length, 0, NULL);
55335
55336@@ -178,6 +182,8 @@ static long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
55337 error = locks_verify_truncate(inode, f.file, length);
55338 if (!error)
55339 error = security_path_truncate(&f.file->f_path);
55340+ if (!error && !gr_acl_handle_truncate(f.file->f_path.dentry, f.file->f_path.mnt))
55341+ error = -EACCES;
55342 if (!error)
55343 error = do_truncate(dentry, length, ATTR_MTIME|ATTR_CTIME, f.file);
55344 sb_end_write(inode->i_sb);
55345@@ -373,6 +379,9 @@ retry:
55346 if (__mnt_is_readonly(path.mnt))
55347 res = -EROFS;
55348
55349+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
55350+ res = -EACCES;
55351+
55352 out_path_release:
55353 path_put(&path);
55354 if (retry_estale(res, lookup_flags)) {
55355@@ -404,6 +413,8 @@ retry:
55356 if (error)
55357 goto dput_and_out;
55358
55359+ gr_log_chdir(path.dentry, path.mnt);
55360+
55361 set_fs_pwd(current->fs, &path);
55362
55363 dput_and_out:
55364@@ -433,6 +444,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
55365 goto out_putf;
55366
55367 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
55368+
55369+ if (!error && !gr_chroot_fchdir(f.file->f_path.dentry, f.file->f_path.mnt))
55370+ error = -EPERM;
55371+
55372+ if (!error)
55373+ gr_log_chdir(f.file->f_path.dentry, f.file->f_path.mnt);
55374+
55375 if (!error)
55376 set_fs_pwd(current->fs, &f.file->f_path);
55377 out_putf:
55378@@ -462,7 +480,13 @@ retry:
55379 if (error)
55380 goto dput_and_out;
55381
55382+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
55383+ goto dput_and_out;
55384+
55385 set_fs_root(current->fs, &path);
55386+
55387+ gr_handle_chroot_chdir(&path);
55388+
55389 error = 0;
55390 dput_and_out:
55391 path_put(&path);
55392@@ -484,6 +508,16 @@ static int chmod_common(struct path *path, umode_t mode)
55393 if (error)
55394 return error;
55395 mutex_lock(&inode->i_mutex);
55396+
55397+ if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
55398+ error = -EACCES;
55399+ goto out_unlock;
55400+ }
55401+ if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
55402+ error = -EACCES;
55403+ goto out_unlock;
55404+ }
55405+
55406 error = security_path_chmod(path, mode);
55407 if (error)
55408 goto out_unlock;
55409@@ -544,6 +578,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
55410 uid = make_kuid(current_user_ns(), user);
55411 gid = make_kgid(current_user_ns(), group);
55412
55413+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
55414+ return -EACCES;
55415+
55416 newattrs.ia_valid = ATTR_CTIME;
55417 if (user != (uid_t) -1) {
55418 if (!uid_valid(uid))
55419@@ -960,6 +997,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
55420 } else {
55421 fsnotify_open(f);
55422 fd_install(fd, f);
55423+ trace_do_sys_open(tmp->name, flags, mode);
55424 }
55425 }
55426 putname(tmp);
55427diff --git a/fs/pipe.c b/fs/pipe.c
55428index 8e2e73f..1ef1048 100644
55429--- a/fs/pipe.c
55430+++ b/fs/pipe.c
55431@@ -438,9 +438,9 @@ redo:
55432 }
55433 if (bufs) /* More to do? */
55434 continue;
55435- if (!pipe->writers)
55436+ if (!atomic_read(&pipe->writers))
55437 break;
55438- if (!pipe->waiting_writers) {
55439+ if (!atomic_read(&pipe->waiting_writers)) {
55440 /* syscall merging: Usually we must not sleep
55441 * if O_NONBLOCK is set, or if we got some data.
55442 * But if a writer sleeps in kernel space, then
55443@@ -504,7 +504,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
55444 mutex_lock(&inode->i_mutex);
55445 pipe = inode->i_pipe;
55446
55447- if (!pipe->readers) {
55448+ if (!atomic_read(&pipe->readers)) {
55449 send_sig(SIGPIPE, current, 0);
55450 ret = -EPIPE;
55451 goto out;
55452@@ -553,7 +553,7 @@ redo1:
55453 for (;;) {
55454 int bufs;
55455
55456- if (!pipe->readers) {
55457+ if (!atomic_read(&pipe->readers)) {
55458 send_sig(SIGPIPE, current, 0);
55459 if (!ret)
55460 ret = -EPIPE;
55461@@ -644,9 +644,9 @@ redo2:
55462 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
55463 do_wakeup = 0;
55464 }
55465- pipe->waiting_writers++;
55466+ atomic_inc(&pipe->waiting_writers);
55467 pipe_wait(pipe);
55468- pipe->waiting_writers--;
55469+ atomic_dec(&pipe->waiting_writers);
55470 }
55471 out:
55472 mutex_unlock(&inode->i_mutex);
55473@@ -716,7 +716,7 @@ pipe_poll(struct file *filp, poll_table *wait)
55474 mask = 0;
55475 if (filp->f_mode & FMODE_READ) {
55476 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
55477- if (!pipe->writers && filp->f_version != pipe->w_counter)
55478+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
55479 mask |= POLLHUP;
55480 }
55481
55482@@ -726,7 +726,7 @@ pipe_poll(struct file *filp, poll_table *wait)
55483 * Most Unices do not set POLLERR for FIFOs but on Linux they
55484 * behave exactly like pipes for poll().
55485 */
55486- if (!pipe->readers)
55487+ if (!atomic_read(&pipe->readers))
55488 mask |= POLLERR;
55489 }
55490
55491@@ -740,10 +740,10 @@ pipe_release(struct inode *inode, int decr, int decw)
55492
55493 mutex_lock(&inode->i_mutex);
55494 pipe = inode->i_pipe;
55495- pipe->readers -= decr;
55496- pipe->writers -= decw;
55497+ atomic_sub(decr, &pipe->readers);
55498+ atomic_sub(decw, &pipe->writers);
55499
55500- if (!pipe->readers && !pipe->writers) {
55501+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
55502 free_pipe_info(inode);
55503 } else {
55504 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
55505@@ -833,7 +833,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
55506
55507 if (inode->i_pipe) {
55508 ret = 0;
55509- inode->i_pipe->readers++;
55510+ atomic_inc(&inode->i_pipe->readers);
55511 }
55512
55513 mutex_unlock(&inode->i_mutex);
55514@@ -850,7 +850,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
55515
55516 if (inode->i_pipe) {
55517 ret = 0;
55518- inode->i_pipe->writers++;
55519+ atomic_inc(&inode->i_pipe->writers);
55520 }
55521
55522 mutex_unlock(&inode->i_mutex);
55523@@ -871,9 +871,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
55524 if (inode->i_pipe) {
55525 ret = 0;
55526 if (filp->f_mode & FMODE_READ)
55527- inode->i_pipe->readers++;
55528+ atomic_inc(&inode->i_pipe->readers);
55529 if (filp->f_mode & FMODE_WRITE)
55530- inode->i_pipe->writers++;
55531+ atomic_inc(&inode->i_pipe->writers);
55532 }
55533
55534 mutex_unlock(&inode->i_mutex);
55535@@ -965,7 +965,7 @@ void free_pipe_info(struct inode *inode)
55536 inode->i_pipe = NULL;
55537 }
55538
55539-static struct vfsmount *pipe_mnt __read_mostly;
55540+struct vfsmount *pipe_mnt __read_mostly;
55541
55542 /*
55543 * pipefs_dname() is called from d_path().
55544@@ -995,7 +995,8 @@ static struct inode * get_pipe_inode(void)
55545 goto fail_iput;
55546 inode->i_pipe = pipe;
55547
55548- pipe->readers = pipe->writers = 1;
55549+ atomic_set(&pipe->readers, 1);
55550+ atomic_set(&pipe->writers, 1);
55551 inode->i_fop = &rdwr_pipefifo_fops;
55552
55553 /*
55554diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
55555index 15af622..0e9f4467 100644
55556--- a/fs/proc/Kconfig
55557+++ b/fs/proc/Kconfig
55558@@ -30,12 +30,12 @@ config PROC_FS
55559
55560 config PROC_KCORE
55561 bool "/proc/kcore support" if !ARM
55562- depends on PROC_FS && MMU
55563+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
55564
55565 config PROC_VMCORE
55566 bool "/proc/vmcore support"
55567- depends on PROC_FS && CRASH_DUMP
55568- default y
55569+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
55570+ default n
55571 help
55572 Exports the dump image of crashed kernel in ELF format.
55573
55574@@ -59,8 +59,8 @@ config PROC_SYSCTL
55575 limited in memory.
55576
55577 config PROC_PAGE_MONITOR
55578- default y
55579- depends on PROC_FS && MMU
55580+ default n
55581+ depends on PROC_FS && MMU && !GRKERNSEC
55582 bool "Enable /proc page monitoring" if EXPERT
55583 help
55584 Various /proc files exist to monitor process memory utilization:
55585diff --git a/fs/proc/array.c b/fs/proc/array.c
55586index be3c22f..0df1564 100644
55587--- a/fs/proc/array.c
55588+++ b/fs/proc/array.c
55589@@ -60,6 +60,7 @@
55590 #include <linux/tty.h>
55591 #include <linux/string.h>
55592 #include <linux/mman.h>
55593+#include <linux/grsecurity.h>
55594 #include <linux/proc_fs.h>
55595 #include <linux/ioport.h>
55596 #include <linux/uaccess.h>
55597@@ -363,6 +364,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
55598 seq_putc(m, '\n');
55599 }
55600
55601+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
55602+static inline void task_pax(struct seq_file *m, struct task_struct *p)
55603+{
55604+ if (p->mm)
55605+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
55606+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
55607+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
55608+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
55609+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
55610+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
55611+ else
55612+ seq_printf(m, "PaX:\t-----\n");
55613+}
55614+#endif
55615+
55616 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
55617 struct pid *pid, struct task_struct *task)
55618 {
55619@@ -381,9 +397,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
55620 task_cpus_allowed(m, task);
55621 cpuset_task_status_allowed(m, task);
55622 task_context_switch_counts(m, task);
55623+
55624+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
55625+ task_pax(m, task);
55626+#endif
55627+
55628+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
55629+ task_grsec_rbac(m, task);
55630+#endif
55631+
55632 return 0;
55633 }
55634
55635+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55636+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
55637+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
55638+ _mm->pax_flags & MF_PAX_SEGMEXEC))
55639+#endif
55640+
55641 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
55642 struct pid *pid, struct task_struct *task, int whole)
55643 {
55644@@ -405,6 +436,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
55645 char tcomm[sizeof(task->comm)];
55646 unsigned long flags;
55647
55648+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55649+ if (current->exec_id != m->exec_id) {
55650+ gr_log_badprocpid("stat");
55651+ return 0;
55652+ }
55653+#endif
55654+
55655 state = *get_task_state(task);
55656 vsize = eip = esp = 0;
55657 permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
55658@@ -476,6 +514,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
55659 gtime = task->gtime;
55660 }
55661
55662+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55663+ if (PAX_RAND_FLAGS(mm)) {
55664+ eip = 0;
55665+ esp = 0;
55666+ wchan = 0;
55667+ }
55668+#endif
55669+#ifdef CONFIG_GRKERNSEC_HIDESYM
55670+ wchan = 0;
55671+ eip =0;
55672+ esp =0;
55673+#endif
55674+
55675 /* scale priority and nice values from timeslices to -20..20 */
55676 /* to make it look like a "normal" Unix priority/nice value */
55677 priority = task_prio(task);
55678@@ -512,9 +563,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
55679 seq_put_decimal_ull(m, ' ', vsize);
55680 seq_put_decimal_ull(m, ' ', mm ? get_mm_rss(mm) : 0);
55681 seq_put_decimal_ull(m, ' ', rsslim);
55682+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55683+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0));
55684+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0));
55685+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0));
55686+#else
55687 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0);
55688 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0);
55689 seq_put_decimal_ull(m, ' ', (permitted && mm) ? mm->start_stack : 0);
55690+#endif
55691 seq_put_decimal_ull(m, ' ', esp);
55692 seq_put_decimal_ull(m, ' ', eip);
55693 /* The signal information here is obsolete.
55694@@ -536,7 +593,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
55695 seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime));
55696 seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime));
55697
55698- if (mm && permitted) {
55699+ if (mm && permitted
55700+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55701+ && !PAX_RAND_FLAGS(mm)
55702+#endif
55703+ ) {
55704 seq_put_decimal_ull(m, ' ', mm->start_data);
55705 seq_put_decimal_ull(m, ' ', mm->end_data);
55706 seq_put_decimal_ull(m, ' ', mm->start_brk);
55707@@ -574,8 +635,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
55708 struct pid *pid, struct task_struct *task)
55709 {
55710 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
55711- struct mm_struct *mm = get_task_mm(task);
55712+ struct mm_struct *mm;
55713
55714+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55715+ if (current->exec_id != m->exec_id) {
55716+ gr_log_badprocpid("statm");
55717+ return 0;
55718+ }
55719+#endif
55720+ mm = get_task_mm(task);
55721 if (mm) {
55722 size = task_statm(mm, &shared, &text, &data, &resident);
55723 mmput(mm);
55724@@ -598,6 +666,13 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
55725 return 0;
55726 }
55727
55728+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
55729+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
55730+{
55731+ return sprintf(buffer, "%pI4\n", &task->signal->curr_ip);
55732+}
55733+#endif
55734+
55735 #ifdef CONFIG_CHECKPOINT_RESTORE
55736 static struct pid *
55737 get_children_pid(struct inode *inode, struct pid *pid_prev, loff_t pos)
55738diff --git a/fs/proc/base.c b/fs/proc/base.c
55739index 9b43ff77..0fa9564 100644
55740--- a/fs/proc/base.c
55741+++ b/fs/proc/base.c
55742@@ -111,6 +111,14 @@ struct pid_entry {
55743 union proc_op op;
55744 };
55745
55746+struct getdents_callback {
55747+ struct linux_dirent __user * current_dir;
55748+ struct linux_dirent __user * previous;
55749+ struct file * file;
55750+ int count;
55751+ int error;
55752+};
55753+
55754 #define NOD(NAME, MODE, IOP, FOP, OP) { \
55755 .name = (NAME), \
55756 .len = sizeof(NAME) - 1, \
55757@@ -208,6 +216,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
55758 if (!mm->arg_end)
55759 goto out_mm; /* Shh! No looking before we're done */
55760
55761+ if (gr_acl_handle_procpidmem(task))
55762+ goto out_mm;
55763+
55764 len = mm->arg_end - mm->arg_start;
55765
55766 if (len > PAGE_SIZE)
55767@@ -235,12 +246,28 @@ out:
55768 return res;
55769 }
55770
55771+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55772+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
55773+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
55774+ _mm->pax_flags & MF_PAX_SEGMEXEC))
55775+#endif
55776+
55777 static int proc_pid_auxv(struct task_struct *task, char *buffer)
55778 {
55779 struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ);
55780 int res = PTR_ERR(mm);
55781 if (mm && !IS_ERR(mm)) {
55782 unsigned int nwords = 0;
55783+
55784+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55785+ /* allow if we're currently ptracing this task */
55786+ if (PAX_RAND_FLAGS(mm) &&
55787+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
55788+ mmput(mm);
55789+ return 0;
55790+ }
55791+#endif
55792+
55793 do {
55794 nwords += 2;
55795 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
55796@@ -254,7 +281,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
55797 }
55798
55799
55800-#ifdef CONFIG_KALLSYMS
55801+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
55802 /*
55803 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
55804 * Returns the resolved symbol. If that fails, simply return the address.
55805@@ -293,7 +320,7 @@ static void unlock_trace(struct task_struct *task)
55806 mutex_unlock(&task->signal->cred_guard_mutex);
55807 }
55808
55809-#ifdef CONFIG_STACKTRACE
55810+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
55811
55812 #define MAX_STACK_TRACE_DEPTH 64
55813
55814@@ -485,7 +512,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
55815 return count;
55816 }
55817
55818-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
55819+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
55820 static int proc_pid_syscall(struct task_struct *task, char *buffer)
55821 {
55822 long nr;
55823@@ -514,7 +541,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
55824 /************************************************************************/
55825
55826 /* permission checks */
55827-static int proc_fd_access_allowed(struct inode *inode)
55828+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
55829 {
55830 struct task_struct *task;
55831 int allowed = 0;
55832@@ -524,7 +551,10 @@ static int proc_fd_access_allowed(struct inode *inode)
55833 */
55834 task = get_proc_task(inode);
55835 if (task) {
55836- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
55837+ if (log)
55838+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
55839+ else
55840+ allowed = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
55841 put_task_struct(task);
55842 }
55843 return allowed;
55844@@ -555,10 +585,35 @@ static bool has_pid_permissions(struct pid_namespace *pid,
55845 struct task_struct *task,
55846 int hide_pid_min)
55847 {
55848+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
55849+ return false;
55850+
55851+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
55852+ rcu_read_lock();
55853+ {
55854+ const struct cred *tmpcred = current_cred();
55855+ const struct cred *cred = __task_cred(task);
55856+
55857+ if (uid_eq(tmpcred->uid, GLOBAL_ROOT_UID) || uid_eq(tmpcred->uid, cred->uid)
55858+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
55859+ || in_group_p(grsec_proc_gid)
55860+#endif
55861+ ) {
55862+ rcu_read_unlock();
55863+ return true;
55864+ }
55865+ }
55866+ rcu_read_unlock();
55867+
55868+ if (!pid->hide_pid)
55869+ return false;
55870+#endif
55871+
55872 if (pid->hide_pid < hide_pid_min)
55873 return true;
55874 if (in_group_p(pid->pid_gid))
55875 return true;
55876+
55877 return ptrace_may_access(task, PTRACE_MODE_READ);
55878 }
55879
55880@@ -576,7 +631,11 @@ static int proc_pid_permission(struct inode *inode, int mask)
55881 put_task_struct(task);
55882
55883 if (!has_perms) {
55884+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
55885+ {
55886+#else
55887 if (pid->hide_pid == 2) {
55888+#endif
55889 /*
55890 * Let's make getdents(), stat(), and open()
55891 * consistent with each other. If a process
55892@@ -674,6 +733,11 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
55893 if (!task)
55894 return -ESRCH;
55895
55896+ if (gr_acl_handle_procpidmem(task)) {
55897+ put_task_struct(task);
55898+ return -EPERM;
55899+ }
55900+
55901 mm = mm_access(task, mode);
55902 put_task_struct(task);
55903
55904@@ -689,6 +753,10 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
55905
55906 file->private_data = mm;
55907
55908+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55909+ file->f_version = current->exec_id;
55910+#endif
55911+
55912 return 0;
55913 }
55914
55915@@ -710,6 +778,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
55916 ssize_t copied;
55917 char *page;
55918
55919+#ifdef CONFIG_GRKERNSEC
55920+ if (write)
55921+ return -EPERM;
55922+#endif
55923+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55924+ if (file->f_version != current->exec_id) {
55925+ gr_log_badprocpid("mem");
55926+ return 0;
55927+ }
55928+#endif
55929+
55930 if (!mm)
55931 return 0;
55932
55933@@ -722,7 +801,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
55934 goto free;
55935
55936 while (count > 0) {
55937- int this_len = min_t(int, count, PAGE_SIZE);
55938+ ssize_t this_len = min_t(ssize_t, count, PAGE_SIZE);
55939
55940 if (write && copy_from_user(page, buf, this_len)) {
55941 copied = -EFAULT;
55942@@ -814,6 +893,13 @@ static ssize_t environ_read(struct file *file, char __user *buf,
55943 if (!mm)
55944 return 0;
55945
55946+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55947+ if (file->f_version != current->exec_id) {
55948+ gr_log_badprocpid("environ");
55949+ return 0;
55950+ }
55951+#endif
55952+
55953 page = (char *)__get_free_page(GFP_TEMPORARY);
55954 if (!page)
55955 return -ENOMEM;
55956@@ -823,7 +909,7 @@ static ssize_t environ_read(struct file *file, char __user *buf,
55957 goto free;
55958 while (count > 0) {
55959 size_t this_len, max_len;
55960- int retval;
55961+ ssize_t retval;
55962
55963 if (src >= (mm->env_end - mm->env_start))
55964 break;
55965@@ -1429,7 +1515,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
55966 int error = -EACCES;
55967
55968 /* Are we allowed to snoop on the tasks file descriptors? */
55969- if (!proc_fd_access_allowed(inode))
55970+ if (!proc_fd_access_allowed(inode, 0))
55971 goto out;
55972
55973 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
55974@@ -1473,8 +1559,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
55975 struct path path;
55976
55977 /* Are we allowed to snoop on the tasks file descriptors? */
55978- if (!proc_fd_access_allowed(inode))
55979- goto out;
55980+ /* logging this is needed for learning on chromium to work properly,
55981+ but we don't want to flood the logs from 'ps' which does a readlink
55982+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
55983+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
55984+ */
55985+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
55986+ if (!proc_fd_access_allowed(inode,0))
55987+ goto out;
55988+ } else {
55989+ if (!proc_fd_access_allowed(inode,1))
55990+ goto out;
55991+ }
55992
55993 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
55994 if (error)
55995@@ -1524,7 +1620,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
55996 rcu_read_lock();
55997 cred = __task_cred(task);
55998 inode->i_uid = cred->euid;
55999+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
56000+ inode->i_gid = grsec_proc_gid;
56001+#else
56002 inode->i_gid = cred->egid;
56003+#endif
56004 rcu_read_unlock();
56005 }
56006 security_task_to_inode(task, inode);
56007@@ -1560,10 +1660,19 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
56008 return -ENOENT;
56009 }
56010 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
56011+#ifdef CONFIG_GRKERNSEC_PROC_USER
56012+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
56013+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
56014+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
56015+#endif
56016 task_dumpable(task)) {
56017 cred = __task_cred(task);
56018 stat->uid = cred->euid;
56019+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
56020+ stat->gid = grsec_proc_gid;
56021+#else
56022 stat->gid = cred->egid;
56023+#endif
56024 }
56025 }
56026 rcu_read_unlock();
56027@@ -1601,11 +1710,20 @@ int pid_revalidate(struct dentry *dentry, unsigned int flags)
56028
56029 if (task) {
56030 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
56031+#ifdef CONFIG_GRKERNSEC_PROC_USER
56032+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
56033+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
56034+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
56035+#endif
56036 task_dumpable(task)) {
56037 rcu_read_lock();
56038 cred = __task_cred(task);
56039 inode->i_uid = cred->euid;
56040+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
56041+ inode->i_gid = grsec_proc_gid;
56042+#else
56043 inode->i_gid = cred->egid;
56044+#endif
56045 rcu_read_unlock();
56046 } else {
56047 inode->i_uid = GLOBAL_ROOT_UID;
56048@@ -2058,6 +2176,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
56049 if (!task)
56050 goto out_no_task;
56051
56052+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
56053+ goto out;
56054+
56055 /*
56056 * Yes, it does not scale. And it should not. Don't add
56057 * new entries into /proc/<tgid>/ without very good reasons.
56058@@ -2102,6 +2223,9 @@ static int proc_pident_readdir(struct file *filp,
56059 if (!task)
56060 goto out_no_task;
56061
56062+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
56063+ goto out;
56064+
56065 ret = 0;
56066 i = filp->f_pos;
56067 switch (i) {
56068@@ -2515,7 +2639,7 @@ static const struct pid_entry tgid_base_stuff[] = {
56069 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
56070 #endif
56071 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
56072-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
56073+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
56074 INF("syscall", S_IRUGO, proc_pid_syscall),
56075 #endif
56076 INF("cmdline", S_IRUGO, proc_pid_cmdline),
56077@@ -2540,10 +2664,10 @@ static const struct pid_entry tgid_base_stuff[] = {
56078 #ifdef CONFIG_SECURITY
56079 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
56080 #endif
56081-#ifdef CONFIG_KALLSYMS
56082+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
56083 INF("wchan", S_IRUGO, proc_pid_wchan),
56084 #endif
56085-#ifdef CONFIG_STACKTRACE
56086+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
56087 ONE("stack", S_IRUGO, proc_pid_stack),
56088 #endif
56089 #ifdef CONFIG_SCHEDSTATS
56090@@ -2577,6 +2701,9 @@ static const struct pid_entry tgid_base_stuff[] = {
56091 #ifdef CONFIG_HARDWALL
56092 INF("hardwall", S_IRUGO, proc_pid_hardwall),
56093 #endif
56094+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
56095+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
56096+#endif
56097 #ifdef CONFIG_USER_NS
56098 REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations),
56099 REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations),
56100@@ -2705,7 +2832,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
56101 if (!inode)
56102 goto out;
56103
56104+#ifdef CONFIG_GRKERNSEC_PROC_USER
56105+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
56106+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
56107+ inode->i_gid = grsec_proc_gid;
56108+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
56109+#else
56110 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
56111+#endif
56112 inode->i_op = &proc_tgid_base_inode_operations;
56113 inode->i_fop = &proc_tgid_base_operations;
56114 inode->i_flags|=S_IMMUTABLE;
56115@@ -2743,7 +2877,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, unsign
56116 if (!task)
56117 goto out;
56118
56119+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
56120+ goto out_put_task;
56121+
56122 result = proc_pid_instantiate(dir, dentry, task, NULL);
56123+out_put_task:
56124 put_task_struct(task);
56125 out:
56126 return result;
56127@@ -2806,6 +2944,8 @@ static int proc_pid_fill_cache(struct file *filp, void *dirent, filldir_t filldi
56128 static int fake_filldir(void *buf, const char *name, int namelen,
56129 loff_t offset, u64 ino, unsigned d_type)
56130 {
56131+ struct getdents_callback * __buf = (struct getdents_callback *) buf;
56132+ __buf->error = -EINVAL;
56133 return 0;
56134 }
56135
56136@@ -2857,7 +2997,7 @@ static const struct pid_entry tid_base_stuff[] = {
56137 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
56138 #endif
56139 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
56140-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
56141+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
56142 INF("syscall", S_IRUGO, proc_pid_syscall),
56143 #endif
56144 INF("cmdline", S_IRUGO, proc_pid_cmdline),
56145@@ -2884,10 +3024,10 @@ static const struct pid_entry tid_base_stuff[] = {
56146 #ifdef CONFIG_SECURITY
56147 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
56148 #endif
56149-#ifdef CONFIG_KALLSYMS
56150+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
56151 INF("wchan", S_IRUGO, proc_pid_wchan),
56152 #endif
56153-#ifdef CONFIG_STACKTRACE
56154+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
56155 ONE("stack", S_IRUGO, proc_pid_stack),
56156 #endif
56157 #ifdef CONFIG_SCHEDSTATS
56158diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
56159index 82676e3..5f8518a 100644
56160--- a/fs/proc/cmdline.c
56161+++ b/fs/proc/cmdline.c
56162@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
56163
56164 static int __init proc_cmdline_init(void)
56165 {
56166+#ifdef CONFIG_GRKERNSEC_PROC_ADD
56167+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
56168+#else
56169 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
56170+#endif
56171 return 0;
56172 }
56173 module_init(proc_cmdline_init);
56174diff --git a/fs/proc/devices.c b/fs/proc/devices.c
56175index b143471..bb105e5 100644
56176--- a/fs/proc/devices.c
56177+++ b/fs/proc/devices.c
56178@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
56179
56180 static int __init proc_devices_init(void)
56181 {
56182+#ifdef CONFIG_GRKERNSEC_PROC_ADD
56183+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
56184+#else
56185 proc_create("devices", 0, NULL, &proc_devinfo_operations);
56186+#endif
56187 return 0;
56188 }
56189 module_init(proc_devices_init);
56190diff --git a/fs/proc/fd.c b/fs/proc/fd.c
56191index d7a4a28..0201742 100644
56192--- a/fs/proc/fd.c
56193+++ b/fs/proc/fd.c
56194@@ -25,7 +25,8 @@ static int seq_show(struct seq_file *m, void *v)
56195 if (!task)
56196 return -ENOENT;
56197
56198- files = get_files_struct(task);
56199+ if (!gr_acl_handle_procpidmem(task))
56200+ files = get_files_struct(task);
56201 put_task_struct(task);
56202
56203 if (files) {
56204@@ -302,11 +303,21 @@ static struct dentry *proc_lookupfd(struct inode *dir, struct dentry *dentry,
56205 */
56206 int proc_fd_permission(struct inode *inode, int mask)
56207 {
56208+ struct task_struct *task;
56209 int rv = generic_permission(inode, mask);
56210- if (rv == 0)
56211- return 0;
56212+
56213 if (task_pid(current) == proc_pid(inode))
56214 rv = 0;
56215+
56216+ task = get_proc_task(inode);
56217+ if (task == NULL)
56218+ return rv;
56219+
56220+ if (gr_acl_handle_procpidmem(task))
56221+ rv = -EACCES;
56222+
56223+ put_task_struct(task);
56224+
56225 return rv;
56226 }
56227
56228diff --git a/fs/proc/inode.c b/fs/proc/inode.c
56229index 0ac1e1b..0497e58 100644
56230--- a/fs/proc/inode.c
56231+++ b/fs/proc/inode.c
56232@@ -21,11 +21,17 @@
56233 #include <linux/seq_file.h>
56234 #include <linux/slab.h>
56235 #include <linux/mount.h>
56236+#include <linux/grsecurity.h>
56237
56238 #include <asm/uaccess.h>
56239
56240 #include "internal.h"
56241
56242+#ifdef CONFIG_PROC_SYSCTL
56243+extern const struct inode_operations proc_sys_inode_operations;
56244+extern const struct inode_operations proc_sys_dir_operations;
56245+#endif
56246+
56247 static void proc_evict_inode(struct inode *inode)
56248 {
56249 struct proc_dir_entry *de;
56250@@ -53,6 +59,13 @@ static void proc_evict_inode(struct inode *inode)
56251 ns = PROC_I(inode)->ns;
56252 if (ns_ops && ns)
56253 ns_ops->put(ns);
56254+
56255+#ifdef CONFIG_PROC_SYSCTL
56256+ if (inode->i_op == &proc_sys_inode_operations ||
56257+ inode->i_op == &proc_sys_dir_operations)
56258+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
56259+#endif
56260+
56261 }
56262
56263 static struct kmem_cache * proc_inode_cachep;
56264@@ -455,7 +468,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
56265 if (de->mode) {
56266 inode->i_mode = de->mode;
56267 inode->i_uid = de->uid;
56268+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
56269+ inode->i_gid = grsec_proc_gid;
56270+#else
56271 inode->i_gid = de->gid;
56272+#endif
56273 }
56274 if (de->size)
56275 inode->i_size = de->size;
56276diff --git a/fs/proc/internal.h b/fs/proc/internal.h
56277index 252544c..04395b9 100644
56278--- a/fs/proc/internal.h
56279+++ b/fs/proc/internal.h
56280@@ -55,6 +55,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
56281 struct pid *pid, struct task_struct *task);
56282 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
56283 struct pid *pid, struct task_struct *task);
56284+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
56285+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
56286+#endif
56287 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
56288
56289 extern const struct file_operations proc_tid_children_operations;
56290diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
56291index e96d4f1..8b116ed 100644
56292--- a/fs/proc/kcore.c
56293+++ b/fs/proc/kcore.c
56294@@ -480,9 +480,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
56295 * the addresses in the elf_phdr on our list.
56296 */
56297 start = kc_offset_to_vaddr(*fpos - elf_buflen);
56298- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
56299+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
56300+ if (tsz > buflen)
56301 tsz = buflen;
56302-
56303+
56304 while (buflen) {
56305 struct kcore_list *m;
56306
56307@@ -511,20 +512,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
56308 kfree(elf_buf);
56309 } else {
56310 if (kern_addr_valid(start)) {
56311- unsigned long n;
56312+ char *elf_buf;
56313+ mm_segment_t oldfs;
56314
56315- n = copy_to_user(buffer, (char *)start, tsz);
56316- /*
56317- * We cannot distinguish between fault on source
56318- * and fault on destination. When this happens
56319- * we clear too and hope it will trigger the
56320- * EFAULT again.
56321- */
56322- if (n) {
56323- if (clear_user(buffer + tsz - n,
56324- n))
56325+ elf_buf = kmalloc(tsz, GFP_KERNEL);
56326+ if (!elf_buf)
56327+ return -ENOMEM;
56328+ oldfs = get_fs();
56329+ set_fs(KERNEL_DS);
56330+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
56331+ set_fs(oldfs);
56332+ if (copy_to_user(buffer, elf_buf, tsz)) {
56333+ kfree(elf_buf);
56334 return -EFAULT;
56335+ }
56336 }
56337+ set_fs(oldfs);
56338+ kfree(elf_buf);
56339 } else {
56340 if (clear_user(buffer, tsz))
56341 return -EFAULT;
56342@@ -544,6 +548,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
56343
56344 static int open_kcore(struct inode *inode, struct file *filp)
56345 {
56346+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
56347+ return -EPERM;
56348+#endif
56349 if (!capable(CAP_SYS_RAWIO))
56350 return -EPERM;
56351 if (kcore_need_update)
56352diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
56353index 80e4645..53e5fcf 100644
56354--- a/fs/proc/meminfo.c
56355+++ b/fs/proc/meminfo.c
56356@@ -158,7 +158,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
56357 vmi.used >> 10,
56358 vmi.largest_chunk >> 10
56359 #ifdef CONFIG_MEMORY_FAILURE
56360- ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
56361+ ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
56362 #endif
56363 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
56364 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
56365diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
56366index b1822dd..df622cb 100644
56367--- a/fs/proc/nommu.c
56368+++ b/fs/proc/nommu.c
56369@@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
56370 if (len < 1)
56371 len = 1;
56372 seq_printf(m, "%*c", len, ' ');
56373- seq_path(m, &file->f_path, "");
56374+ seq_path(m, &file->f_path, "\n\\");
56375 }
56376
56377 seq_putc(m, '\n');
56378diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
56379index fe72cd0..21b52ff 100644
56380--- a/fs/proc/proc_net.c
56381+++ b/fs/proc/proc_net.c
56382@@ -23,6 +23,7 @@
56383 #include <linux/nsproxy.h>
56384 #include <net/net_namespace.h>
56385 #include <linux/seq_file.h>
56386+#include <linux/grsecurity.h>
56387
56388 #include "internal.h"
56389
56390@@ -105,6 +106,17 @@ static struct net *get_proc_task_net(struct inode *dir)
56391 struct task_struct *task;
56392 struct nsproxy *ns;
56393 struct net *net = NULL;
56394+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
56395+ const struct cred *cred = current_cred();
56396+#endif
56397+
56398+#ifdef CONFIG_GRKERNSEC_PROC_USER
56399+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID))
56400+ return net;
56401+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
56402+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID) && !in_group_p(grsec_proc_gid))
56403+ return net;
56404+#endif
56405
56406 rcu_read_lock();
56407 task = pid_task(proc_pid(dir), PIDTYPE_PID);
56408diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
56409index 1827d88..43b0279 100644
56410--- a/fs/proc/proc_sysctl.c
56411+++ b/fs/proc/proc_sysctl.c
56412@@ -12,11 +12,15 @@
56413 #include <linux/module.h>
56414 #include "internal.h"
56415
56416+extern int gr_handle_chroot_sysctl(const int op);
56417+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
56418+ const int op);
56419+
56420 static const struct dentry_operations proc_sys_dentry_operations;
56421 static const struct file_operations proc_sys_file_operations;
56422-static const struct inode_operations proc_sys_inode_operations;
56423+const struct inode_operations proc_sys_inode_operations;
56424 static const struct file_operations proc_sys_dir_file_operations;
56425-static const struct inode_operations proc_sys_dir_operations;
56426+const struct inode_operations proc_sys_dir_operations;
56427
56428 void proc_sys_poll_notify(struct ctl_table_poll *poll)
56429 {
56430@@ -466,6 +470,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
56431
56432 err = NULL;
56433 d_set_d_op(dentry, &proc_sys_dentry_operations);
56434+
56435+ gr_handle_proc_create(dentry, inode);
56436+
56437 d_add(dentry, inode);
56438
56439 out:
56440@@ -481,6 +488,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
56441 struct inode *inode = filp->f_path.dentry->d_inode;
56442 struct ctl_table_header *head = grab_header(inode);
56443 struct ctl_table *table = PROC_I(inode)->sysctl_entry;
56444+ int op = write ? MAY_WRITE : MAY_READ;
56445 ssize_t error;
56446 size_t res;
56447
56448@@ -492,7 +500,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
56449 * and won't be until we finish.
56450 */
56451 error = -EPERM;
56452- if (sysctl_perm(head, table, write ? MAY_WRITE : MAY_READ))
56453+ if (sysctl_perm(head, table, op))
56454 goto out;
56455
56456 /* if that can happen at all, it should be -EINVAL, not -EISDIR */
56457@@ -500,6 +508,22 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
56458 if (!table->proc_handler)
56459 goto out;
56460
56461+#ifdef CONFIG_GRKERNSEC
56462+ error = -EPERM;
56463+ if (gr_handle_chroot_sysctl(op))
56464+ goto out;
56465+ dget(filp->f_path.dentry);
56466+ if (gr_handle_sysctl_mod(filp->f_path.dentry->d_parent->d_name.name, table->procname, op)) {
56467+ dput(filp->f_path.dentry);
56468+ goto out;
56469+ }
56470+ dput(filp->f_path.dentry);
56471+ if (!gr_acl_handle_open(filp->f_path.dentry, filp->f_path.mnt, op))
56472+ goto out;
56473+ if (write && !capable(CAP_SYS_ADMIN))
56474+ goto out;
56475+#endif
56476+
56477 /* careful: calling conventions are nasty here */
56478 res = count;
56479 error = table->proc_handler(table, write, buf, &res, ppos);
56480@@ -597,6 +621,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
56481 return -ENOMEM;
56482 } else {
56483 d_set_d_op(child, &proc_sys_dentry_operations);
56484+
56485+ gr_handle_proc_create(child, inode);
56486+
56487 d_add(child, inode);
56488 }
56489 } else {
56490@@ -640,6 +667,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
56491 if ((*pos)++ < file->f_pos)
56492 return 0;
56493
56494+ if (!gr_acl_handle_hidden_file(file->f_path.dentry, file->f_path.mnt))
56495+ return 0;
56496+
56497 if (unlikely(S_ISLNK(table->mode)))
56498 res = proc_sys_link_fill_cache(file, dirent, filldir, head, table);
56499 else
56500@@ -750,6 +780,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
56501 if (IS_ERR(head))
56502 return PTR_ERR(head);
56503
56504+ if (table && !gr_acl_handle_hidden_file(dentry, mnt))
56505+ return -ENOENT;
56506+
56507 generic_fillattr(inode, stat);
56508 if (table)
56509 stat->mode = (stat->mode & S_IFMT) | table->mode;
56510@@ -772,13 +805,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
56511 .llseek = generic_file_llseek,
56512 };
56513
56514-static const struct inode_operations proc_sys_inode_operations = {
56515+const struct inode_operations proc_sys_inode_operations = {
56516 .permission = proc_sys_permission,
56517 .setattr = proc_sys_setattr,
56518 .getattr = proc_sys_getattr,
56519 };
56520
56521-static const struct inode_operations proc_sys_dir_operations = {
56522+const struct inode_operations proc_sys_dir_operations = {
56523 .lookup = proc_sys_lookup,
56524 .permission = proc_sys_permission,
56525 .setattr = proc_sys_setattr,
56526@@ -854,7 +887,7 @@ static struct ctl_dir *find_subdir(struct ctl_dir *dir,
56527 static struct ctl_dir *new_dir(struct ctl_table_set *set,
56528 const char *name, int namelen)
56529 {
56530- struct ctl_table *table;
56531+ ctl_table_no_const *table;
56532 struct ctl_dir *new;
56533 struct ctl_node *node;
56534 char *new_name;
56535@@ -866,7 +899,7 @@ static struct ctl_dir *new_dir(struct ctl_table_set *set,
56536 return NULL;
56537
56538 node = (struct ctl_node *)(new + 1);
56539- table = (struct ctl_table *)(node + 1);
56540+ table = (ctl_table_no_const *)(node + 1);
56541 new_name = (char *)(table + 2);
56542 memcpy(new_name, name, namelen);
56543 new_name[namelen] = '\0';
56544@@ -1035,7 +1068,8 @@ static int sysctl_check_table(const char *path, struct ctl_table *table)
56545 static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table *table,
56546 struct ctl_table_root *link_root)
56547 {
56548- struct ctl_table *link_table, *entry, *link;
56549+ ctl_table_no_const *link_table, *link;
56550+ struct ctl_table *entry;
56551 struct ctl_table_header *links;
56552 struct ctl_node *node;
56553 char *link_name;
56554@@ -1058,7 +1092,7 @@ static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table
56555 return NULL;
56556
56557 node = (struct ctl_node *)(links + 1);
56558- link_table = (struct ctl_table *)(node + nr_entries);
56559+ link_table = (ctl_table_no_const *)(node + nr_entries);
56560 link_name = (char *)&link_table[nr_entries + 1];
56561
56562 for (link = link_table, entry = table; entry->procname; link++, entry++) {
56563@@ -1306,8 +1340,8 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
56564 struct ctl_table_header ***subheader, struct ctl_table_set *set,
56565 struct ctl_table *table)
56566 {
56567- struct ctl_table *ctl_table_arg = NULL;
56568- struct ctl_table *entry, *files;
56569+ ctl_table_no_const *ctl_table_arg = NULL, *files = NULL;
56570+ struct ctl_table *entry;
56571 int nr_files = 0;
56572 int nr_dirs = 0;
56573 int err = -ENOMEM;
56574@@ -1319,10 +1353,9 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
56575 nr_files++;
56576 }
56577
56578- files = table;
56579 /* If there are mixed files and directories we need a new table */
56580 if (nr_dirs && nr_files) {
56581- struct ctl_table *new;
56582+ ctl_table_no_const *new;
56583 files = kzalloc(sizeof(struct ctl_table) * (nr_files + 1),
56584 GFP_KERNEL);
56585 if (!files)
56586@@ -1340,7 +1373,7 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
56587 /* Register everything except a directory full of subdirectories */
56588 if (nr_files || !nr_dirs) {
56589 struct ctl_table_header *header;
56590- header = __register_sysctl_table(set, path, files);
56591+ header = __register_sysctl_table(set, path, files ? files : table);
56592 if (!header) {
56593 kfree(ctl_table_arg);
56594 goto out;
56595diff --git a/fs/proc/root.c b/fs/proc/root.c
56596index 9c7fab1..ed1c8e0 100644
56597--- a/fs/proc/root.c
56598+++ b/fs/proc/root.c
56599@@ -180,7 +180,15 @@ void __init proc_root_init(void)
56600 #ifdef CONFIG_PROC_DEVICETREE
56601 proc_device_tree_init();
56602 #endif
56603+#ifdef CONFIG_GRKERNSEC_PROC_ADD
56604+#ifdef CONFIG_GRKERNSEC_PROC_USER
56605+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
56606+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
56607+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
56608+#endif
56609+#else
56610 proc_mkdir("bus", NULL);
56611+#endif
56612 proc_sys_init();
56613 }
56614
56615diff --git a/fs/proc/self.c b/fs/proc/self.c
56616index aa5cc3b..c91a5d0 100644
56617--- a/fs/proc/self.c
56618+++ b/fs/proc/self.c
56619@@ -37,7 +37,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
56620 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
56621 void *cookie)
56622 {
56623- char *s = nd_get_link(nd);
56624+ const char *s = nd_get_link(nd);
56625 if (!IS_ERR(s))
56626 kfree(s);
56627 }
56628diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
56629index ca5ce7f..02c1cf0 100644
56630--- a/fs/proc/task_mmu.c
56631+++ b/fs/proc/task_mmu.c
56632@@ -11,12 +11,19 @@
56633 #include <linux/rmap.h>
56634 #include <linux/swap.h>
56635 #include <linux/swapops.h>
56636+#include <linux/grsecurity.h>
56637
56638 #include <asm/elf.h>
56639 #include <asm/uaccess.h>
56640 #include <asm/tlbflush.h>
56641 #include "internal.h"
56642
56643+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
56644+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
56645+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
56646+ _mm->pax_flags & MF_PAX_SEGMEXEC))
56647+#endif
56648+
56649 void task_mem(struct seq_file *m, struct mm_struct *mm)
56650 {
56651 unsigned long data, text, lib, swap;
56652@@ -52,8 +59,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
56653 "VmExe:\t%8lu kB\n"
56654 "VmLib:\t%8lu kB\n"
56655 "VmPTE:\t%8lu kB\n"
56656- "VmSwap:\t%8lu kB\n",
56657- hiwater_vm << (PAGE_SHIFT-10),
56658+ "VmSwap:\t%8lu kB\n"
56659+
56660+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
56661+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
56662+#endif
56663+
56664+ ,hiwater_vm << (PAGE_SHIFT-10),
56665 total_vm << (PAGE_SHIFT-10),
56666 mm->locked_vm << (PAGE_SHIFT-10),
56667 mm->pinned_vm << (PAGE_SHIFT-10),
56668@@ -62,7 +74,19 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
56669 data << (PAGE_SHIFT-10),
56670 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
56671 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
56672- swap << (PAGE_SHIFT-10));
56673+ swap << (PAGE_SHIFT-10)
56674+
56675+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
56676+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
56677+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_base
56678+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_limit
56679+#else
56680+ , mm->context.user_cs_base
56681+ , mm->context.user_cs_limit
56682+#endif
56683+#endif
56684+
56685+ );
56686 }
56687
56688 unsigned long task_vsize(struct mm_struct *mm)
56689@@ -277,13 +301,13 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
56690 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
56691 }
56692
56693- /* We don't show the stack guard page in /proc/maps */
56694+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
56695+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
56696+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
56697+#else
56698 start = vma->vm_start;
56699- if (stack_guard_page_start(vma, start))
56700- start += PAGE_SIZE;
56701 end = vma->vm_end;
56702- if (stack_guard_page_end(vma, end))
56703- end -= PAGE_SIZE;
56704+#endif
56705
56706 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
56707 start,
56708@@ -292,7 +316,11 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
56709 flags & VM_WRITE ? 'w' : '-',
56710 flags & VM_EXEC ? 'x' : '-',
56711 flags & VM_MAYSHARE ? 's' : 'p',
56712+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
56713+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
56714+#else
56715 pgoff,
56716+#endif
56717 MAJOR(dev), MINOR(dev), ino, &len);
56718
56719 /*
56720@@ -301,7 +329,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
56721 */
56722 if (file) {
56723 pad_len_spaces(m, len);
56724- seq_path(m, &file->f_path, "\n");
56725+ seq_path(m, &file->f_path, "\n\\");
56726 goto done;
56727 }
56728
56729@@ -327,8 +355,9 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
56730 * Thread stack in /proc/PID/task/TID/maps or
56731 * the main process stack.
56732 */
56733- if (!is_pid || (vma->vm_start <= mm->start_stack &&
56734- vma->vm_end >= mm->start_stack)) {
56735+ if (!is_pid || (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
56736+ (vma->vm_start <= mm->start_stack &&
56737+ vma->vm_end >= mm->start_stack)) {
56738 name = "[stack]";
56739 } else {
56740 /* Thread stack in /proc/PID/maps */
56741@@ -352,6 +381,13 @@ static int show_map(struct seq_file *m, void *v, int is_pid)
56742 struct proc_maps_private *priv = m->private;
56743 struct task_struct *task = priv->task;
56744
56745+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
56746+ if (current->exec_id != m->exec_id) {
56747+ gr_log_badprocpid("maps");
56748+ return 0;
56749+ }
56750+#endif
56751+
56752 show_map_vma(m, vma, is_pid);
56753
56754 if (m->count < m->size) /* vma is copied successfully */
56755@@ -589,12 +625,23 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
56756 .private = &mss,
56757 };
56758
56759+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
56760+ if (current->exec_id != m->exec_id) {
56761+ gr_log_badprocpid("smaps");
56762+ return 0;
56763+ }
56764+#endif
56765 memset(&mss, 0, sizeof mss);
56766- mss.vma = vma;
56767- /* mmap_sem is held in m_start */
56768- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
56769- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
56770-
56771+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
56772+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
56773+#endif
56774+ mss.vma = vma;
56775+ /* mmap_sem is held in m_start */
56776+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
56777+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
56778+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
56779+ }
56780+#endif
56781 show_map_vma(m, vma, is_pid);
56782
56783 seq_printf(m,
56784@@ -612,7 +659,11 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
56785 "KernelPageSize: %8lu kB\n"
56786 "MMUPageSize: %8lu kB\n"
56787 "Locked: %8lu kB\n",
56788+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
56789+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
56790+#else
56791 (vma->vm_end - vma->vm_start) >> 10,
56792+#endif
56793 mss.resident >> 10,
56794 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
56795 mss.shared_clean >> 10,
56796@@ -1264,6 +1315,13 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
56797 int n;
56798 char buffer[50];
56799
56800+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
56801+ if (current->exec_id != m->exec_id) {
56802+ gr_log_badprocpid("numa_maps");
56803+ return 0;
56804+ }
56805+#endif
56806+
56807 if (!mm)
56808 return 0;
56809
56810@@ -1281,11 +1339,15 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
56811 mpol_to_str(buffer, sizeof(buffer), pol);
56812 mpol_cond_put(pol);
56813
56814+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
56815+ seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
56816+#else
56817 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
56818+#endif
56819
56820 if (file) {
56821 seq_printf(m, " file=");
56822- seq_path(m, &file->f_path, "\n\t= ");
56823+ seq_path(m, &file->f_path, "\n\t\\= ");
56824 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
56825 seq_printf(m, " heap");
56826 } else {
56827diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
56828index 1ccfa53..0848f95 100644
56829--- a/fs/proc/task_nommu.c
56830+++ b/fs/proc/task_nommu.c
56831@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
56832 else
56833 bytes += kobjsize(mm);
56834
56835- if (current->fs && current->fs->users > 1)
56836+ if (current->fs && atomic_read(&current->fs->users) > 1)
56837 sbytes += kobjsize(current->fs);
56838 else
56839 bytes += kobjsize(current->fs);
56840@@ -168,7 +168,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
56841
56842 if (file) {
56843 pad_len_spaces(m, len);
56844- seq_path(m, &file->f_path, "");
56845+ seq_path(m, &file->f_path, "\n\\");
56846 } else if (mm) {
56847 pid_t tid = vm_is_stack(priv->task, vma, is_pid);
56848
56849diff --git a/fs/qnx6/qnx6.h b/fs/qnx6/qnx6.h
56850index b00fcc9..e0c6381 100644
56851--- a/fs/qnx6/qnx6.h
56852+++ b/fs/qnx6/qnx6.h
56853@@ -74,7 +74,7 @@ enum {
56854 BYTESEX_BE,
56855 };
56856
56857-static inline __u64 fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
56858+static inline __u64 __intentional_overflow(-1) fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
56859 {
56860 if (sbi->s_bytesex == BYTESEX_LE)
56861 return le64_to_cpu((__force __le64)n);
56862@@ -90,7 +90,7 @@ static inline __fs64 cpu_to_fs64(struct qnx6_sb_info *sbi, __u64 n)
56863 return (__force __fs64)cpu_to_be64(n);
56864 }
56865
56866-static inline __u32 fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
56867+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
56868 {
56869 if (sbi->s_bytesex == BYTESEX_LE)
56870 return le32_to_cpu((__force __le32)n);
56871diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
56872index 16e8abb..2dcf914 100644
56873--- a/fs/quota/netlink.c
56874+++ b/fs/quota/netlink.c
56875@@ -33,7 +33,7 @@ static struct genl_family quota_genl_family = {
56876 void quota_send_warning(struct kqid qid, dev_t dev,
56877 const char warntype)
56878 {
56879- static atomic_t seq;
56880+ static atomic_unchecked_t seq;
56881 struct sk_buff *skb;
56882 void *msg_head;
56883 int ret;
56884@@ -49,7 +49,7 @@ void quota_send_warning(struct kqid qid, dev_t dev,
56885 "VFS: Not enough memory to send quota warning.\n");
56886 return;
56887 }
56888- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
56889+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
56890 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
56891 if (!msg_head) {
56892 printk(KERN_ERR
56893diff --git a/fs/readdir.c b/fs/readdir.c
56894index 5e69ef5..e5d9099 100644
56895--- a/fs/readdir.c
56896+++ b/fs/readdir.c
56897@@ -17,6 +17,7 @@
56898 #include <linux/security.h>
56899 #include <linux/syscalls.h>
56900 #include <linux/unistd.h>
56901+#include <linux/namei.h>
56902
56903 #include <asm/uaccess.h>
56904
56905@@ -67,6 +68,7 @@ struct old_linux_dirent {
56906
56907 struct readdir_callback {
56908 struct old_linux_dirent __user * dirent;
56909+ struct file * file;
56910 int result;
56911 };
56912
56913@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
56914 buf->result = -EOVERFLOW;
56915 return -EOVERFLOW;
56916 }
56917+
56918+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
56919+ return 0;
56920+
56921 buf->result++;
56922 dirent = buf->dirent;
56923 if (!access_ok(VERIFY_WRITE, dirent,
56924@@ -114,6 +120,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
56925
56926 buf.result = 0;
56927 buf.dirent = dirent;
56928+ buf.file = f.file;
56929
56930 error = vfs_readdir(f.file, fillonedir, &buf);
56931 if (buf.result)
56932@@ -139,6 +146,7 @@ struct linux_dirent {
56933 struct getdents_callback {
56934 struct linux_dirent __user * current_dir;
56935 struct linux_dirent __user * previous;
56936+ struct file * file;
56937 int count;
56938 int error;
56939 };
56940@@ -160,6 +168,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
56941 buf->error = -EOVERFLOW;
56942 return -EOVERFLOW;
56943 }
56944+
56945+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
56946+ return 0;
56947+
56948 dirent = buf->previous;
56949 if (dirent) {
56950 if (__put_user(offset, &dirent->d_off))
56951@@ -205,6 +217,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
56952 buf.previous = NULL;
56953 buf.count = count;
56954 buf.error = 0;
56955+ buf.file = f.file;
56956
56957 error = vfs_readdir(f.file, filldir, &buf);
56958 if (error >= 0)
56959@@ -223,6 +236,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
56960 struct getdents_callback64 {
56961 struct linux_dirent64 __user * current_dir;
56962 struct linux_dirent64 __user * previous;
56963+ struct file *file;
56964 int count;
56965 int error;
56966 };
56967@@ -238,6 +252,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
56968 buf->error = -EINVAL; /* only used if we fail.. */
56969 if (reclen > buf->count)
56970 return -EINVAL;
56971+
56972+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
56973+ return 0;
56974+
56975 dirent = buf->previous;
56976 if (dirent) {
56977 if (__put_user(offset, &dirent->d_off))
56978@@ -283,6 +301,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
56979
56980 buf.current_dir = dirent;
56981 buf.previous = NULL;
56982+ buf.file = f.file;
56983 buf.count = count;
56984 buf.error = 0;
56985
56986@@ -291,7 +310,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
56987 error = buf.error;
56988 lastdirent = buf.previous;
56989 if (lastdirent) {
56990- typeof(lastdirent->d_off) d_off = f.file->f_pos;
56991+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = f.file->f_pos;
56992 if (__put_user(d_off, &lastdirent->d_off))
56993 error = -EFAULT;
56994 else
56995diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
56996index 2b7882b..1c5ef48 100644
56997--- a/fs/reiserfs/do_balan.c
56998+++ b/fs/reiserfs/do_balan.c
56999@@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
57000 return;
57001 }
57002
57003- atomic_inc(&(fs_generation(tb->tb_sb)));
57004+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
57005 do_balance_starts(tb);
57006
57007 /* balance leaf returns 0 except if combining L R and S into
57008diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
57009index e60e870..f40ac16 100644
57010--- a/fs/reiserfs/procfs.c
57011+++ b/fs/reiserfs/procfs.c
57012@@ -112,7 +112,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
57013 "SMALL_TAILS " : "NO_TAILS ",
57014 replay_only(sb) ? "REPLAY_ONLY " : "",
57015 convert_reiserfs(sb) ? "CONV " : "",
57016- atomic_read(&r->s_generation_counter),
57017+ atomic_read_unchecked(&r->s_generation_counter),
57018 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
57019 SF(s_do_balance), SF(s_unneeded_left_neighbor),
57020 SF(s_good_search_by_key_reada), SF(s_bmaps),
57021diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
57022index 157e474..65a6114 100644
57023--- a/fs/reiserfs/reiserfs.h
57024+++ b/fs/reiserfs/reiserfs.h
57025@@ -453,7 +453,7 @@ struct reiserfs_sb_info {
57026 /* Comment? -Hans */
57027 wait_queue_head_t s_wait;
57028 /* To be obsoleted soon by per buffer seals.. -Hans */
57029- atomic_t s_generation_counter; // increased by one every time the
57030+ atomic_unchecked_t s_generation_counter; // increased by one every time the
57031 // tree gets re-balanced
57032 unsigned long s_properties; /* File system properties. Currently holds
57033 on-disk FS format */
57034@@ -1978,7 +1978,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
57035 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
57036
57037 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
57038-#define get_generation(s) atomic_read (&fs_generation(s))
57039+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
57040 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
57041 #define __fs_changed(gen,s) (gen != get_generation (s))
57042 #define fs_changed(gen,s) \
57043diff --git a/fs/select.c b/fs/select.c
57044index 2ef72d9..f213b17 100644
57045--- a/fs/select.c
57046+++ b/fs/select.c
57047@@ -20,6 +20,7 @@
57048 #include <linux/export.h>
57049 #include <linux/slab.h>
57050 #include <linux/poll.h>
57051+#include <linux/security.h>
57052 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
57053 #include <linux/file.h>
57054 #include <linux/fdtable.h>
57055@@ -826,6 +827,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
57056 struct poll_list *walk = head;
57057 unsigned long todo = nfds;
57058
57059+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
57060 if (nfds > rlimit(RLIMIT_NOFILE))
57061 return -EINVAL;
57062
57063diff --git a/fs/seq_file.c b/fs/seq_file.c
57064index f2bc3df..239d4f6 100644
57065--- a/fs/seq_file.c
57066+++ b/fs/seq_file.c
57067@@ -10,6 +10,7 @@
57068 #include <linux/seq_file.h>
57069 #include <linux/slab.h>
57070 #include <linux/cred.h>
57071+#include <linux/sched.h>
57072
57073 #include <asm/uaccess.h>
57074 #include <asm/page.h>
57075@@ -60,6 +61,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
57076 #ifdef CONFIG_USER_NS
57077 p->user_ns = file->f_cred->user_ns;
57078 #endif
57079+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
57080+ p->exec_id = current->exec_id;
57081+#endif
57082
57083 /*
57084 * Wrappers around seq_open(e.g. swaps_open) need to be
57085@@ -96,7 +100,7 @@ static int traverse(struct seq_file *m, loff_t offset)
57086 return 0;
57087 }
57088 if (!m->buf) {
57089- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
57090+ m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL | GFP_USERCOPY);
57091 if (!m->buf)
57092 return -ENOMEM;
57093 }
57094@@ -136,7 +140,7 @@ static int traverse(struct seq_file *m, loff_t offset)
57095 Eoverflow:
57096 m->op->stop(m, p);
57097 kfree(m->buf);
57098- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
57099+ m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | GFP_USERCOPY);
57100 return !m->buf ? -ENOMEM : -EAGAIN;
57101 }
57102
57103@@ -191,7 +195,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
57104
57105 /* grab buffer if we didn't have one */
57106 if (!m->buf) {
57107- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
57108+ m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL | GFP_USERCOPY);
57109 if (!m->buf)
57110 goto Enomem;
57111 }
57112@@ -232,7 +236,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
57113 goto Fill;
57114 m->op->stop(m, p);
57115 kfree(m->buf);
57116- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
57117+ m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | GFP_USERCOPY);
57118 if (!m->buf)
57119 goto Enomem;
57120 m->count = 0;
57121@@ -581,7 +585,7 @@ static void single_stop(struct seq_file *p, void *v)
57122 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
57123 void *data)
57124 {
57125- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
57126+ seq_operations_no_const *op = kzalloc(sizeof(*op), GFP_KERNEL);
57127 int res = -ENOMEM;
57128
57129 if (op) {
57130diff --git a/fs/splice.c b/fs/splice.c
57131index 6909d89..5b2e8f9 100644
57132--- a/fs/splice.c
57133+++ b/fs/splice.c
57134@@ -194,7 +194,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
57135 pipe_lock(pipe);
57136
57137 for (;;) {
57138- if (!pipe->readers) {
57139+ if (!atomic_read(&pipe->readers)) {
57140 send_sig(SIGPIPE, current, 0);
57141 if (!ret)
57142 ret = -EPIPE;
57143@@ -248,9 +248,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
57144 do_wakeup = 0;
57145 }
57146
57147- pipe->waiting_writers++;
57148+ atomic_inc(&pipe->waiting_writers);
57149 pipe_wait(pipe);
57150- pipe->waiting_writers--;
57151+ atomic_dec(&pipe->waiting_writers);
57152 }
57153
57154 pipe_unlock(pipe);
57155@@ -563,7 +563,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
57156 old_fs = get_fs();
57157 set_fs(get_ds());
57158 /* The cast to a user pointer is valid due to the set_fs() */
57159- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
57160+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
57161 set_fs(old_fs);
57162
57163 return res;
57164@@ -578,7 +578,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
57165 old_fs = get_fs();
57166 set_fs(get_ds());
57167 /* The cast to a user pointer is valid due to the set_fs() */
57168- res = vfs_write(file, (const char __user *)buf, count, &pos);
57169+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
57170 set_fs(old_fs);
57171
57172 return res;
57173@@ -630,7 +630,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
57174 goto err;
57175
57176 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
57177- vec[i].iov_base = (void __user *) page_address(page);
57178+ vec[i].iov_base = (void __force_user *) page_address(page);
57179 vec[i].iov_len = this_len;
57180 spd.pages[i] = page;
57181 spd.nr_pages++;
57182@@ -851,10 +851,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
57183 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
57184 {
57185 while (!pipe->nrbufs) {
57186- if (!pipe->writers)
57187+ if (!atomic_read(&pipe->writers))
57188 return 0;
57189
57190- if (!pipe->waiting_writers && sd->num_spliced)
57191+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
57192 return 0;
57193
57194 if (sd->flags & SPLICE_F_NONBLOCK)
57195@@ -1189,7 +1189,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
57196 * out of the pipe right after the splice_to_pipe(). So set
57197 * PIPE_READERS appropriately.
57198 */
57199- pipe->readers = 1;
57200+ atomic_set(&pipe->readers, 1);
57201
57202 current->splice_pipe = pipe;
57203 }
57204@@ -1738,9 +1738,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
57205 ret = -ERESTARTSYS;
57206 break;
57207 }
57208- if (!pipe->writers)
57209+ if (!atomic_read(&pipe->writers))
57210 break;
57211- if (!pipe->waiting_writers) {
57212+ if (!atomic_read(&pipe->waiting_writers)) {
57213 if (flags & SPLICE_F_NONBLOCK) {
57214 ret = -EAGAIN;
57215 break;
57216@@ -1772,7 +1772,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
57217 pipe_lock(pipe);
57218
57219 while (pipe->nrbufs >= pipe->buffers) {
57220- if (!pipe->readers) {
57221+ if (!atomic_read(&pipe->readers)) {
57222 send_sig(SIGPIPE, current, 0);
57223 ret = -EPIPE;
57224 break;
57225@@ -1785,9 +1785,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
57226 ret = -ERESTARTSYS;
57227 break;
57228 }
57229- pipe->waiting_writers++;
57230+ atomic_inc(&pipe->waiting_writers);
57231 pipe_wait(pipe);
57232- pipe->waiting_writers--;
57233+ atomic_dec(&pipe->waiting_writers);
57234 }
57235
57236 pipe_unlock(pipe);
57237@@ -1823,14 +1823,14 @@ retry:
57238 pipe_double_lock(ipipe, opipe);
57239
57240 do {
57241- if (!opipe->readers) {
57242+ if (!atomic_read(&opipe->readers)) {
57243 send_sig(SIGPIPE, current, 0);
57244 if (!ret)
57245 ret = -EPIPE;
57246 break;
57247 }
57248
57249- if (!ipipe->nrbufs && !ipipe->writers)
57250+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
57251 break;
57252
57253 /*
57254@@ -1927,7 +1927,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
57255 pipe_double_lock(ipipe, opipe);
57256
57257 do {
57258- if (!opipe->readers) {
57259+ if (!atomic_read(&opipe->readers)) {
57260 send_sig(SIGPIPE, current, 0);
57261 if (!ret)
57262 ret = -EPIPE;
57263@@ -1972,7 +1972,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
57264 * return EAGAIN if we have the potential of some data in the
57265 * future, otherwise just return 0
57266 */
57267- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
57268+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
57269 ret = -EAGAIN;
57270
57271 pipe_unlock(ipipe);
57272diff --git a/fs/stat.c b/fs/stat.c
57273index 14f4545..9b7f55b 100644
57274--- a/fs/stat.c
57275+++ b/fs/stat.c
57276@@ -28,8 +28,13 @@ void generic_fillattr(struct inode *inode, struct kstat *stat)
57277 stat->gid = inode->i_gid;
57278 stat->rdev = inode->i_rdev;
57279 stat->size = i_size_read(inode);
57280- stat->atime = inode->i_atime;
57281- stat->mtime = inode->i_mtime;
57282+ if (is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
57283+ stat->atime = inode->i_ctime;
57284+ stat->mtime = inode->i_ctime;
57285+ } else {
57286+ stat->atime = inode->i_atime;
57287+ stat->mtime = inode->i_mtime;
57288+ }
57289 stat->ctime = inode->i_ctime;
57290 stat->blksize = (1 << inode->i_blkbits);
57291 stat->blocks = inode->i_blocks;
57292@@ -46,8 +51,14 @@ int vfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
57293 if (retval)
57294 return retval;
57295
57296- if (inode->i_op->getattr)
57297- return inode->i_op->getattr(mnt, dentry, stat);
57298+ if (inode->i_op->getattr) {
57299+ retval = inode->i_op->getattr(mnt, dentry, stat);
57300+ if (!retval && is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
57301+ stat->atime = stat->ctime;
57302+ stat->mtime = stat->ctime;
57303+ }
57304+ return retval;
57305+ }
57306
57307 generic_fillattr(inode, stat);
57308 return 0;
57309diff --git a/fs/sysfs/bin.c b/fs/sysfs/bin.c
57310index 614b2b5..4d321e6 100644
57311--- a/fs/sysfs/bin.c
57312+++ b/fs/sysfs/bin.c
57313@@ -235,13 +235,13 @@ static int bin_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
57314 return ret;
57315 }
57316
57317-static int bin_access(struct vm_area_struct *vma, unsigned long addr,
57318- void *buf, int len, int write)
57319+static ssize_t bin_access(struct vm_area_struct *vma, unsigned long addr,
57320+ void *buf, size_t len, int write)
57321 {
57322 struct file *file = vma->vm_file;
57323 struct bin_buffer *bb = file->private_data;
57324 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
57325- int ret;
57326+ ssize_t ret;
57327
57328 if (!bb->vm_ops)
57329 return -EINVAL;
57330diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
57331index 1f8c823..ed57cfe 100644
57332--- a/fs/sysfs/dir.c
57333+++ b/fs/sysfs/dir.c
57334@@ -40,7 +40,7 @@ static DEFINE_IDA(sysfs_ino_ida);
57335 *
57336 * Returns 31 bit hash of ns + name (so it fits in an off_t )
57337 */
57338-static unsigned int sysfs_name_hash(const void *ns, const char *name)
57339+static unsigned int sysfs_name_hash(const void *ns, const unsigned char *name)
57340 {
57341 unsigned long hash = init_name_hash();
57342 unsigned int len = strlen(name);
57343@@ -685,6 +685,18 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
57344 struct sysfs_dirent *sd;
57345 int rc;
57346
57347+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
57348+ const char *parent_name = parent_sd->s_name;
57349+
57350+ mode = S_IFDIR | S_IRWXU;
57351+
57352+ if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
57353+ (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
57354+ (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse"))) ||
57355+ (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
57356+ mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
57357+#endif
57358+
57359 /* allocate */
57360 sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
57361 if (!sd)
57362diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
57363index 602f56d..6853db8 100644
57364--- a/fs/sysfs/file.c
57365+++ b/fs/sysfs/file.c
57366@@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
57367
57368 struct sysfs_open_dirent {
57369 atomic_t refcnt;
57370- atomic_t event;
57371+ atomic_unchecked_t event;
57372 wait_queue_head_t poll;
57373 struct list_head buffers; /* goes through sysfs_buffer.list */
57374 };
57375@@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
57376 if (!sysfs_get_active(attr_sd))
57377 return -ENODEV;
57378
57379- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
57380+ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
57381 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
57382
57383 sysfs_put_active(attr_sd);
57384@@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
57385 return -ENOMEM;
57386
57387 atomic_set(&new_od->refcnt, 0);
57388- atomic_set(&new_od->event, 1);
57389+ atomic_set_unchecked(&new_od->event, 1);
57390 init_waitqueue_head(&new_od->poll);
57391 INIT_LIST_HEAD(&new_od->buffers);
57392 goto retry;
57393@@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
57394
57395 sysfs_put_active(attr_sd);
57396
57397- if (buffer->event != atomic_read(&od->event))
57398+ if (buffer->event != atomic_read_unchecked(&od->event))
57399 goto trigger;
57400
57401 return DEFAULT_POLLMASK;
57402@@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
57403
57404 od = sd->s_attr.open;
57405 if (od) {
57406- atomic_inc(&od->event);
57407+ atomic_inc_unchecked(&od->event);
57408 wake_up_interruptible(&od->poll);
57409 }
57410
57411diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
57412index 3c9eb56..9dea5be 100644
57413--- a/fs/sysfs/symlink.c
57414+++ b/fs/sysfs/symlink.c
57415@@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
57416
57417 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
57418 {
57419- char *page = nd_get_link(nd);
57420+ const char *page = nd_get_link(nd);
57421 if (!IS_ERR(page))
57422 free_page((unsigned long)page);
57423 }
57424diff --git a/fs/sysv/sysv.h b/fs/sysv/sysv.h
57425index 69d4889..a810bd4 100644
57426--- a/fs/sysv/sysv.h
57427+++ b/fs/sysv/sysv.h
57428@@ -188,7 +188,7 @@ static inline u32 PDP_swab(u32 x)
57429 #endif
57430 }
57431
57432-static inline __u32 fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
57433+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
57434 {
57435 if (sbi->s_bytesex == BYTESEX_PDP)
57436 return PDP_swab((__force __u32)n);
57437diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
57438index e18b988..f1d4ad0f 100644
57439--- a/fs/ubifs/io.c
57440+++ b/fs/ubifs/io.c
57441@@ -155,7 +155,7 @@ int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len)
57442 return err;
57443 }
57444
57445-int ubifs_leb_unmap(struct ubifs_info *c, int lnum)
57446+int __intentional_overflow(-1) ubifs_leb_unmap(struct ubifs_info *c, int lnum)
57447 {
57448 int err;
57449
57450diff --git a/fs/udf/misc.c b/fs/udf/misc.c
57451index c175b4d..8f36a16 100644
57452--- a/fs/udf/misc.c
57453+++ b/fs/udf/misc.c
57454@@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
57455
57456 u8 udf_tag_checksum(const struct tag *t)
57457 {
57458- u8 *data = (u8 *)t;
57459+ const u8 *data = (const u8 *)t;
57460 u8 checksum = 0;
57461 int i;
57462 for (i = 0; i < sizeof(struct tag); ++i)
57463diff --git a/fs/ufs/swab.h b/fs/ufs/swab.h
57464index 8d974c4..b82f6ec 100644
57465--- a/fs/ufs/swab.h
57466+++ b/fs/ufs/swab.h
57467@@ -22,7 +22,7 @@ enum {
57468 BYTESEX_BE
57469 };
57470
57471-static inline u64
57472+static inline u64 __intentional_overflow(-1)
57473 fs64_to_cpu(struct super_block *sbp, __fs64 n)
57474 {
57475 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
57476@@ -40,7 +40,7 @@ cpu_to_fs64(struct super_block *sbp, u64 n)
57477 return (__force __fs64)cpu_to_be64(n);
57478 }
57479
57480-static inline u32
57481+static inline u32 __intentional_overflow(-1)
57482 fs32_to_cpu(struct super_block *sbp, __fs32 n)
57483 {
57484 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
57485diff --git a/fs/utimes.c b/fs/utimes.c
57486index f4fb7ec..3fe03c0 100644
57487--- a/fs/utimes.c
57488+++ b/fs/utimes.c
57489@@ -1,6 +1,7 @@
57490 #include <linux/compiler.h>
57491 #include <linux/file.h>
57492 #include <linux/fs.h>
57493+#include <linux/security.h>
57494 #include <linux/linkage.h>
57495 #include <linux/mount.h>
57496 #include <linux/namei.h>
57497@@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
57498 goto mnt_drop_write_and_out;
57499 }
57500 }
57501+
57502+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
57503+ error = -EACCES;
57504+ goto mnt_drop_write_and_out;
57505+ }
57506+
57507 mutex_lock(&inode->i_mutex);
57508 error = notify_change(path->dentry, &newattrs);
57509 mutex_unlock(&inode->i_mutex);
57510diff --git a/fs/xattr.c b/fs/xattr.c
57511index 3377dff..4feded6 100644
57512--- a/fs/xattr.c
57513+++ b/fs/xattr.c
57514@@ -319,7 +319,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
57515 * Extended attribute SET operations
57516 */
57517 static long
57518-setxattr(struct dentry *d, const char __user *name, const void __user *value,
57519+setxattr(struct path *path, const char __user *name, const void __user *value,
57520 size_t size, int flags)
57521 {
57522 int error;
57523@@ -355,7 +355,12 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
57524 posix_acl_fix_xattr_from_user(kvalue, size);
57525 }
57526
57527- error = vfs_setxattr(d, kname, kvalue, size, flags);
57528+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
57529+ error = -EACCES;
57530+ goto out;
57531+ }
57532+
57533+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
57534 out:
57535 if (vvalue)
57536 vfree(vvalue);
57537@@ -377,7 +382,7 @@ retry:
57538 return error;
57539 error = mnt_want_write(path.mnt);
57540 if (!error) {
57541- error = setxattr(path.dentry, name, value, size, flags);
57542+ error = setxattr(&path, name, value, size, flags);
57543 mnt_drop_write(path.mnt);
57544 }
57545 path_put(&path);
57546@@ -401,7 +406,7 @@ retry:
57547 return error;
57548 error = mnt_want_write(path.mnt);
57549 if (!error) {
57550- error = setxattr(path.dentry, name, value, size, flags);
57551+ error = setxattr(&path, name, value, size, flags);
57552 mnt_drop_write(path.mnt);
57553 }
57554 path_put(&path);
57555@@ -416,16 +421,14 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
57556 const void __user *,value, size_t, size, int, flags)
57557 {
57558 struct fd f = fdget(fd);
57559- struct dentry *dentry;
57560 int error = -EBADF;
57561
57562 if (!f.file)
57563 return error;
57564- dentry = f.file->f_path.dentry;
57565- audit_inode(NULL, dentry, 0);
57566+ audit_inode(NULL, f.file->f_path.dentry, 0);
57567 error = mnt_want_write_file(f.file);
57568 if (!error) {
57569- error = setxattr(dentry, name, value, size, flags);
57570+ error = setxattr(&f.file->f_path, name, value, size, flags);
57571 mnt_drop_write_file(f.file);
57572 }
57573 fdput(f);
57574diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
57575index 9fbea87..6b19972 100644
57576--- a/fs/xattr_acl.c
57577+++ b/fs/xattr_acl.c
57578@@ -76,8 +76,8 @@ struct posix_acl *
57579 posix_acl_from_xattr(struct user_namespace *user_ns,
57580 const void *value, size_t size)
57581 {
57582- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
57583- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
57584+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
57585+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
57586 int count;
57587 struct posix_acl *acl;
57588 struct posix_acl_entry *acl_e;
57589diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
57590index 572a858..12a9b0d 100644
57591--- a/fs/xfs/xfs_bmap.c
57592+++ b/fs/xfs/xfs_bmap.c
57593@@ -192,7 +192,7 @@ xfs_bmap_validate_ret(
57594 int nmap,
57595 int ret_nmap);
57596 #else
57597-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
57598+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
57599 #endif /* DEBUG */
57600
57601 STATIC int
57602diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
57603index 1b9fc3e..e1bdde0 100644
57604--- a/fs/xfs/xfs_dir2_sf.c
57605+++ b/fs/xfs/xfs_dir2_sf.c
57606@@ -851,7 +851,15 @@ xfs_dir2_sf_getdents(
57607 }
57608
57609 ino = xfs_dir2_sfe_get_ino(sfp, sfep);
57610- if (filldir(dirent, (char *)sfep->name, sfep->namelen,
57611+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
57612+ char name[sfep->namelen];
57613+ memcpy(name, sfep->name, sfep->namelen);
57614+ if (filldir(dirent, name, sfep->namelen,
57615+ off & 0x7fffffff, ino, DT_UNKNOWN)) {
57616+ *offset = off & 0x7fffffff;
57617+ return 0;
57618+ }
57619+ } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
57620 off & 0x7fffffff, ino, DT_UNKNOWN)) {
57621 *offset = off & 0x7fffffff;
57622 return 0;
57623diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
57624index c1c3ef8..0952438 100644
57625--- a/fs/xfs/xfs_ioctl.c
57626+++ b/fs/xfs/xfs_ioctl.c
57627@@ -127,7 +127,7 @@ xfs_find_handle(
57628 }
57629
57630 error = -EFAULT;
57631- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
57632+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
57633 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
57634 goto out_put;
57635
57636diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
57637index d82efaa..0904a8e 100644
57638--- a/fs/xfs/xfs_iops.c
57639+++ b/fs/xfs/xfs_iops.c
57640@@ -395,7 +395,7 @@ xfs_vn_put_link(
57641 struct nameidata *nd,
57642 void *p)
57643 {
57644- char *s = nd_get_link(nd);
57645+ const char *s = nd_get_link(nd);
57646
57647 if (!IS_ERR(s))
57648 kfree(s);
57649diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
57650new file mode 100644
57651index 0000000..92247e4
57652--- /dev/null
57653+++ b/grsecurity/Kconfig
57654@@ -0,0 +1,1021 @@
57655+#
57656+# grecurity configuration
57657+#
57658+menu "Memory Protections"
57659+depends on GRKERNSEC
57660+
57661+config GRKERNSEC_KMEM
57662+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
57663+ default y if GRKERNSEC_CONFIG_AUTO
57664+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
57665+ help
57666+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
57667+ be written to or read from to modify or leak the contents of the running
57668+ kernel. /dev/port will also not be allowed to be opened and support
57669+ for /dev/cpu/*/msr will be removed. If you have module
57670+ support disabled, enabling this will close up five ways that are
57671+ currently used to insert malicious code into the running kernel.
57672+
57673+ Even with all these features enabled, we still highly recommend that
57674+ you use the RBAC system, as it is still possible for an attacker to
57675+ modify the running kernel through privileged I/O granted by ioperm/iopl.
57676+
57677+ If you are not using XFree86, you may be able to stop this additional
57678+ case by enabling the 'Disable privileged I/O' option. Though nothing
57679+ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
57680+ but only to video memory, which is the only writing we allow in this
57681+ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
57682+ not be allowed to mprotect it with PROT_WRITE later.
57683+ Enabling this feature will prevent the "cpupower" and "powertop" tools
57684+ from working.
57685+
57686+ It is highly recommended that you say Y here if you meet all the
57687+ conditions above.
57688+
57689+config GRKERNSEC_VM86
57690+ bool "Restrict VM86 mode"
57691+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
57692+ depends on X86_32
57693+
57694+ help
57695+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
57696+ make use of a special execution mode on 32bit x86 processors called
57697+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
57698+ video cards and will still work with this option enabled. The purpose
57699+ of the option is to prevent exploitation of emulation errors in
57700+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
57701+ Nearly all users should be able to enable this option.
57702+
57703+config GRKERNSEC_IO
57704+ bool "Disable privileged I/O"
57705+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
57706+ depends on X86
57707+ select RTC_CLASS
57708+ select RTC_INTF_DEV
57709+ select RTC_DRV_CMOS
57710+
57711+ help
57712+ If you say Y here, all ioperm and iopl calls will return an error.
57713+ Ioperm and iopl can be used to modify the running kernel.
57714+ Unfortunately, some programs need this access to operate properly,
57715+ the most notable of which are XFree86 and hwclock. hwclock can be
57716+ remedied by having RTC support in the kernel, so real-time
57717+ clock support is enabled if this option is enabled, to ensure
57718+ that hwclock operates correctly. XFree86 still will not
57719+ operate correctly with this option enabled, so DO NOT CHOOSE Y
57720+ IF YOU USE XFree86. If you use XFree86 and you still want to
57721+ protect your kernel against modification, use the RBAC system.
57722+
57723+config GRKERNSEC_JIT_HARDEN
57724+ bool "Harden BPF JIT against spray attacks"
57725+ default y if GRKERNSEC_CONFIG_AUTO
57726+ depends on BPF_JIT
57727+ help
57728+ If you say Y here, the native code generated by the kernel's Berkeley
57729+ Packet Filter (BPF) JIT engine will be hardened against JIT-spraying
57730+ attacks that attempt to fit attacker-beneficial instructions in
57731+ 32bit immediate fields of JIT-generated native instructions. The
57732+ attacker will generally aim to cause an unintended instruction sequence
57733+ of JIT-generated native code to execute by jumping into the middle of
57734+ a generated instruction. This feature effectively randomizes the 32bit
57735+ immediate constants present in the generated code to thwart such attacks.
57736+
57737+ If you're using KERNEXEC, it's recommended that you enable this option
57738+ to supplement the hardening of the kernel.
57739+
57740+config GRKERNSEC_RAND_THREADSTACK
57741+ bool "Insert random gaps between thread stacks"
57742+ default y if GRKERNSEC_CONFIG_AUTO
57743+ depends on PAX_RANDMMAP && !PPC
57744+ help
57745+ If you say Y here, a random-sized gap will be enforced between allocated
57746+ thread stacks. Glibc's NPTL and other threading libraries that
57747+ pass MAP_STACK to the kernel for thread stack allocation are supported.
57748+ The implementation currently provides 8 bits of entropy for the gap.
57749+
57750+ Many distributions do not compile threaded remote services with the
57751+ -fstack-check argument to GCC, causing the variable-sized stack-based
57752+ allocator, alloca(), to not probe the stack on allocation. This
57753+ permits an unbounded alloca() to skip over any guard page and potentially
57754+ modify another thread's stack reliably. An enforced random gap
57755+ reduces the reliability of such an attack and increases the chance
57756+ that such a read/write to another thread's stack instead lands in
57757+ an unmapped area, causing a crash and triggering grsecurity's
57758+ anti-bruteforcing logic.
57759+
57760+config GRKERNSEC_PROC_MEMMAP
57761+ bool "Harden ASLR against information leaks and entropy reduction"
57762+ default y if (GRKERNSEC_CONFIG_AUTO || PAX_NOEXEC || PAX_ASLR)
57763+ depends on PAX_NOEXEC || PAX_ASLR
57764+ help
57765+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
57766+ give no information about the addresses of its mappings if
57767+ PaX features that rely on random addresses are enabled on the task.
57768+ In addition to sanitizing this information and disabling other
57769+ dangerous sources of information, this option causes reads of sensitive
57770+ /proc/<pid> entries where the file descriptor was opened in a different
57771+ task than the one performing the read. Such attempts are logged.
57772+ This option also limits argv/env strings for suid/sgid binaries
57773+ to 512KB to prevent a complete exhaustion of the stack entropy provided
57774+ by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
57775+ binaries to prevent alternative mmap layouts from being abused.
57776+
57777+ If you use PaX it is essential that you say Y here as it closes up
57778+ several holes that make full ASLR useless locally.
57779+
57780+config GRKERNSEC_BRUTE
57781+ bool "Deter exploit bruteforcing"
57782+ default y if GRKERNSEC_CONFIG_AUTO
57783+ help
57784+ If you say Y here, attempts to bruteforce exploits against forking
57785+ daemons such as apache or sshd, as well as against suid/sgid binaries
57786+ will be deterred. When a child of a forking daemon is killed by PaX
57787+ or crashes due to an illegal instruction or other suspicious signal,
57788+ the parent process will be delayed 30 seconds upon every subsequent
57789+ fork until the administrator is able to assess the situation and
57790+ restart the daemon.
57791+ In the suid/sgid case, the attempt is logged, the user has all their
57792+ processes terminated, and they are prevented from executing any further
57793+ processes for 15 minutes.
57794+ It is recommended that you also enable signal logging in the auditing
57795+ section so that logs are generated when a process triggers a suspicious
57796+ signal.
57797+ If the sysctl option is enabled, a sysctl option with name
57798+ "deter_bruteforce" is created.
57799+
57800+
57801+config GRKERNSEC_MODHARDEN
57802+ bool "Harden module auto-loading"
57803+ default y if GRKERNSEC_CONFIG_AUTO
57804+ depends on MODULES
57805+ help
57806+ If you say Y here, module auto-loading in response to use of some
57807+ feature implemented by an unloaded module will be restricted to
57808+ root users. Enabling this option helps defend against attacks
57809+ by unprivileged users who abuse the auto-loading behavior to
57810+ cause a vulnerable module to load that is then exploited.
57811+
57812+ If this option prevents a legitimate use of auto-loading for a
57813+ non-root user, the administrator can execute modprobe manually
57814+ with the exact name of the module mentioned in the alert log.
57815+ Alternatively, the administrator can add the module to the list
57816+ of modules loaded at boot by modifying init scripts.
57817+
57818+ Modification of init scripts will most likely be needed on
57819+ Ubuntu servers with encrypted home directory support enabled,
57820+ as the first non-root user logging in will cause the ecb(aes),
57821+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
57822+
57823+config GRKERNSEC_HIDESYM
57824+ bool "Hide kernel symbols"
57825+ default y if GRKERNSEC_CONFIG_AUTO
57826+ select PAX_USERCOPY_SLABS
57827+ help
57828+ If you say Y here, getting information on loaded modules, and
57829+ displaying all kernel symbols through a syscall will be restricted
57830+ to users with CAP_SYS_MODULE. For software compatibility reasons,
57831+ /proc/kallsyms will be restricted to the root user. The RBAC
57832+ system can hide that entry even from root.
57833+
57834+ This option also prevents leaking of kernel addresses through
57835+ several /proc entries.
57836+
57837+ Note that this option is only effective provided the following
57838+ conditions are met:
57839+ 1) The kernel using grsecurity is not precompiled by some distribution
57840+ 2) You have also enabled GRKERNSEC_DMESG
57841+ 3) You are using the RBAC system and hiding other files such as your
57842+ kernel image and System.map. Alternatively, enabling this option
57843+ causes the permissions on /boot, /lib/modules, and the kernel
57844+ source directory to change at compile time to prevent
57845+ reading by non-root users.
57846+ If the above conditions are met, this option will aid in providing a
57847+ useful protection against local kernel exploitation of overflows
57848+ and arbitrary read/write vulnerabilities.
57849+
57850+config GRKERNSEC_KERN_LOCKOUT
57851+ bool "Active kernel exploit response"
57852+ default y if GRKERNSEC_CONFIG_AUTO
57853+ depends on X86 || ARM || PPC || SPARC
57854+ help
57855+ If you say Y here, when a PaX alert is triggered due to suspicious
57856+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
57857+ or an OOPS occurs due to bad memory accesses, instead of just
57858+ terminating the offending process (and potentially allowing
57859+ a subsequent exploit from the same user), we will take one of two
57860+ actions:
57861+ If the user was root, we will panic the system
57862+ If the user was non-root, we will log the attempt, terminate
57863+ all processes owned by the user, then prevent them from creating
57864+ any new processes until the system is restarted
57865+ This deters repeated kernel exploitation/bruteforcing attempts
57866+ and is useful for later forensics.
57867+
57868+endmenu
57869+menu "Role Based Access Control Options"
57870+depends on GRKERNSEC
57871+
57872+config GRKERNSEC_RBAC_DEBUG
57873+ bool
57874+
57875+config GRKERNSEC_NO_RBAC
57876+ bool "Disable RBAC system"
57877+ help
57878+ If you say Y here, the /dev/grsec device will be removed from the kernel,
57879+ preventing the RBAC system from being enabled. You should only say Y
57880+ here if you have no intention of using the RBAC system, so as to prevent
57881+ an attacker with root access from misusing the RBAC system to hide files
57882+ and processes when loadable module support and /dev/[k]mem have been
57883+ locked down.
57884+
57885+config GRKERNSEC_ACL_HIDEKERN
57886+ bool "Hide kernel processes"
57887+ help
57888+ If you say Y here, all kernel threads will be hidden to all
57889+ processes but those whose subject has the "view hidden processes"
57890+ flag.
57891+
57892+config GRKERNSEC_ACL_MAXTRIES
57893+ int "Maximum tries before password lockout"
57894+ default 3
57895+ help
57896+ This option enforces the maximum number of times a user can attempt
57897+ to authorize themselves with the grsecurity RBAC system before being
57898+ denied the ability to attempt authorization again for a specified time.
57899+ The lower the number, the harder it will be to brute-force a password.
57900+
57901+config GRKERNSEC_ACL_TIMEOUT
57902+ int "Time to wait after max password tries, in seconds"
57903+ default 30
57904+ help
57905+ This option specifies the time the user must wait after attempting to
57906+ authorize to the RBAC system with the maximum number of invalid
57907+ passwords. The higher the number, the harder it will be to brute-force
57908+ a password.
57909+
57910+endmenu
57911+menu "Filesystem Protections"
57912+depends on GRKERNSEC
57913+
57914+config GRKERNSEC_PROC
57915+ bool "Proc restrictions"
57916+ default y if GRKERNSEC_CONFIG_AUTO
57917+ help
57918+ If you say Y here, the permissions of the /proc filesystem
57919+ will be altered to enhance system security and privacy. You MUST
57920+ choose either a user only restriction or a user and group restriction.
57921+ Depending upon the option you choose, you can either restrict users to
57922+ see only the processes they themselves run, or choose a group that can
57923+ view all processes and files normally restricted to root if you choose
57924+ the "restrict to user only" option. NOTE: If you're running identd or
57925+ ntpd as a non-root user, you will have to run it as the group you
57926+ specify here.
57927+
57928+config GRKERNSEC_PROC_USER
57929+ bool "Restrict /proc to user only"
57930+ depends on GRKERNSEC_PROC
57931+ help
57932+ If you say Y here, non-root users will only be able to view their own
57933+ processes, and restricts them from viewing network-related information,
57934+ and viewing kernel symbol and module information.
57935+
57936+config GRKERNSEC_PROC_USERGROUP
57937+ bool "Allow special group"
57938+ default y if GRKERNSEC_CONFIG_AUTO
57939+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
57940+ help
57941+ If you say Y here, you will be able to select a group that will be
57942+ able to view all processes and network-related information. If you've
57943+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
57944+ remain hidden. This option is useful if you want to run identd as
57945+ a non-root user. The group you select may also be chosen at boot time
57946+ via "grsec_proc_gid=" on the kernel commandline.
57947+
57948+config GRKERNSEC_PROC_GID
57949+ int "GID for special group"
57950+ depends on GRKERNSEC_PROC_USERGROUP
57951+ default 1001
57952+
57953+config GRKERNSEC_PROC_ADD
57954+ bool "Additional restrictions"
57955+ default y if GRKERNSEC_CONFIG_AUTO
57956+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
57957+ help
57958+ If you say Y here, additional restrictions will be placed on
57959+ /proc that keep normal users from viewing device information and
57960+ slabinfo information that could be useful for exploits.
57961+
57962+config GRKERNSEC_LINK
57963+ bool "Linking restrictions"
57964+ default y if GRKERNSEC_CONFIG_AUTO
57965+ help
57966+ If you say Y here, /tmp race exploits will be prevented, since users
57967+ will no longer be able to follow symlinks owned by other users in
57968+ world-writable +t directories (e.g. /tmp), unless the owner of the
57969+ symlink is the owner of the directory. users will also not be
57970+ able to hardlink to files they do not own. If the sysctl option is
57971+ enabled, a sysctl option with name "linking_restrictions" is created.
57972+
57973+config GRKERNSEC_SYMLINKOWN
57974+ bool "Kernel-enforced SymlinksIfOwnerMatch"
57975+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
57976+ help
57977+ Apache's SymlinksIfOwnerMatch option has an inherent race condition
57978+ that prevents it from being used as a security feature. As Apache
57979+ verifies the symlink by performing a stat() against the target of
57980+ the symlink before it is followed, an attacker can setup a symlink
57981+ to point to a same-owned file, then replace the symlink with one
57982+ that targets another user's file just after Apache "validates" the
57983+ symlink -- a classic TOCTOU race. If you say Y here, a complete,
57984+ race-free replacement for Apache's "SymlinksIfOwnerMatch" option
57985+ will be in place for the group you specify. If the sysctl option
57986+ is enabled, a sysctl option with name "enforce_symlinksifowner" is
57987+ created.
57988+
57989+config GRKERNSEC_SYMLINKOWN_GID
57990+ int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
57991+ depends on GRKERNSEC_SYMLINKOWN
57992+ default 1006
57993+ help
57994+ Setting this GID determines what group kernel-enforced
57995+ SymlinksIfOwnerMatch will be enabled for. If the sysctl option
57996+ is enabled, a sysctl option with name "symlinkown_gid" is created.
57997+
57998+config GRKERNSEC_FIFO
57999+ bool "FIFO restrictions"
58000+ default y if GRKERNSEC_CONFIG_AUTO
58001+ help
58002+ If you say Y here, users will not be able to write to FIFOs they don't
58003+ own in world-writable +t directories (e.g. /tmp), unless the owner of
58004+ the FIFO is the same owner of the directory it's held in. If the sysctl
58005+ option is enabled, a sysctl option with name "fifo_restrictions" is
58006+ created.
58007+
58008+config GRKERNSEC_SYSFS_RESTRICT
58009+ bool "Sysfs/debugfs restriction"
58010+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
58011+ depends on SYSFS
58012+ help
58013+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
58014+ any filesystem normally mounted under it (e.g. debugfs) will be
58015+ mostly accessible only by root. These filesystems generally provide access
58016+ to hardware and debug information that isn't appropriate for unprivileged
58017+ users of the system. Sysfs and debugfs have also become a large source
58018+ of new vulnerabilities, ranging from infoleaks to local compromise.
58019+ There has been very little oversight with an eye toward security involved
58020+ in adding new exporters of information to these filesystems, so their
58021+ use is discouraged.
58022+ For reasons of compatibility, a few directories have been whitelisted
58023+ for access by non-root users:
58024+ /sys/fs/selinux
58025+ /sys/fs/fuse
58026+ /sys/devices/system/cpu
58027+
58028+config GRKERNSEC_ROFS
58029+ bool "Runtime read-only mount protection"
58030+ help
58031+ If you say Y here, a sysctl option with name "romount_protect" will
58032+ be created. By setting this option to 1 at runtime, filesystems
58033+ will be protected in the following ways:
58034+ * No new writable mounts will be allowed
58035+ * Existing read-only mounts won't be able to be remounted read/write
58036+ * Write operations will be denied on all block devices
58037+ This option acts independently of grsec_lock: once it is set to 1,
58038+ it cannot be turned off. Therefore, please be mindful of the resulting
58039+ behavior if this option is enabled in an init script on a read-only
58040+ filesystem. This feature is mainly intended for secure embedded systems.
58041+
58042+config GRKERNSEC_DEVICE_SIDECHANNEL
58043+ bool "Eliminate stat/notify-based device sidechannels"
58044+ default y if GRKERNSEC_CONFIG_AUTO
58045+ help
58046+ If you say Y here, timing analyses on block or character
58047+ devices like /dev/ptmx using stat or inotify/dnotify/fanotify
58048+ will be thwarted for unprivileged users. If a process without
58049+ CAP_MKNOD stats such a device, the last access and last modify times
58050+ will match the device's create time. No access or modify events
58051+ will be triggered through inotify/dnotify/fanotify for such devices.
58052+ This feature will prevent attacks that may at a minimum
58053+ allow an attacker to determine the administrator's password length.
58054+
58055+config GRKERNSEC_CHROOT
58056+ bool "Chroot jail restrictions"
58057+ default y if GRKERNSEC_CONFIG_AUTO
58058+ help
58059+ If you say Y here, you will be able to choose several options that will
58060+ make breaking out of a chrooted jail much more difficult. If you
58061+ encounter no software incompatibilities with the following options, it
58062+ is recommended that you enable each one.
58063+
58064+config GRKERNSEC_CHROOT_MOUNT
58065+ bool "Deny mounts"
58066+ default y if GRKERNSEC_CONFIG_AUTO
58067+ depends on GRKERNSEC_CHROOT
58068+ help
58069+ If you say Y here, processes inside a chroot will not be able to
58070+ mount or remount filesystems. If the sysctl option is enabled, a
58071+ sysctl option with name "chroot_deny_mount" is created.
58072+
58073+config GRKERNSEC_CHROOT_DOUBLE
58074+ bool "Deny double-chroots"
58075+ default y if GRKERNSEC_CONFIG_AUTO
58076+ depends on GRKERNSEC_CHROOT
58077+ help
58078+ If you say Y here, processes inside a chroot will not be able to chroot
58079+ again outside the chroot. This is a widely used method of breaking
58080+ out of a chroot jail and should not be allowed. If the sysctl
58081+ option is enabled, a sysctl option with name
58082+ "chroot_deny_chroot" is created.
58083+
58084+config GRKERNSEC_CHROOT_PIVOT
58085+ bool "Deny pivot_root in chroot"
58086+ default y if GRKERNSEC_CONFIG_AUTO
58087+ depends on GRKERNSEC_CHROOT
58088+ help
58089+ If you say Y here, processes inside a chroot will not be able to use
58090+ a function called pivot_root() that was introduced in Linux 2.3.41. It
58091+ works similar to chroot in that it changes the root filesystem. This
58092+ function could be misused in a chrooted process to attempt to break out
58093+ of the chroot, and therefore should not be allowed. If the sysctl
58094+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
58095+ created.
58096+
58097+config GRKERNSEC_CHROOT_CHDIR
58098+ bool "Enforce chdir(\"/\") on all chroots"
58099+ default y if GRKERNSEC_CONFIG_AUTO
58100+ depends on GRKERNSEC_CHROOT
58101+ help
58102+ If you say Y here, the current working directory of all newly-chrooted
58103+ applications will be set to the the root directory of the chroot.
58104+ The man page on chroot(2) states:
58105+ Note that this call does not change the current working
58106+ directory, so that `.' can be outside the tree rooted at
58107+ `/'. In particular, the super-user can escape from a
58108+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
58109+
58110+ It is recommended that you say Y here, since it's not known to break
58111+ any software. If the sysctl option is enabled, a sysctl option with
58112+ name "chroot_enforce_chdir" is created.
58113+
58114+config GRKERNSEC_CHROOT_CHMOD
58115+ bool "Deny (f)chmod +s"
58116+ default y if GRKERNSEC_CONFIG_AUTO
58117+ depends on GRKERNSEC_CHROOT
58118+ help
58119+ If you say Y here, processes inside a chroot will not be able to chmod
58120+ or fchmod files to make them have suid or sgid bits. This protects
58121+ against another published method of breaking a chroot. If the sysctl
58122+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
58123+ created.
58124+
58125+config GRKERNSEC_CHROOT_FCHDIR
58126+ bool "Deny fchdir out of chroot"
58127+ default y if GRKERNSEC_CONFIG_AUTO
58128+ depends on GRKERNSEC_CHROOT
58129+ help
58130+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
58131+ to a file descriptor of the chrooting process that points to a directory
58132+ outside the filesystem will be stopped. If the sysctl option
58133+ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
58134+
58135+config GRKERNSEC_CHROOT_MKNOD
58136+ bool "Deny mknod"
58137+ default y if GRKERNSEC_CONFIG_AUTO
58138+ depends on GRKERNSEC_CHROOT
58139+ help
58140+ If you say Y here, processes inside a chroot will not be allowed to
58141+ mknod. The problem with using mknod inside a chroot is that it
58142+ would allow an attacker to create a device entry that is the same
58143+ as one on the physical root of your system, which could range from
58144+ anything from the console device to a device for your harddrive (which
58145+ they could then use to wipe the drive or steal data). It is recommended
58146+ that you say Y here, unless you run into software incompatibilities.
58147+ If the sysctl option is enabled, a sysctl option with name
58148+ "chroot_deny_mknod" is created.
58149+
58150+config GRKERNSEC_CHROOT_SHMAT
58151+ bool "Deny shmat() out of chroot"
58152+ default y if GRKERNSEC_CONFIG_AUTO
58153+ depends on GRKERNSEC_CHROOT
58154+ help
58155+ If you say Y here, processes inside a chroot will not be able to attach
58156+ to shared memory segments that were created outside of the chroot jail.
58157+ It is recommended that you say Y here. If the sysctl option is enabled,
58158+ a sysctl option with name "chroot_deny_shmat" is created.
58159+
58160+config GRKERNSEC_CHROOT_UNIX
58161+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
58162+ default y if GRKERNSEC_CONFIG_AUTO
58163+ depends on GRKERNSEC_CHROOT
58164+ help
58165+ If you say Y here, processes inside a chroot will not be able to
58166+ connect to abstract (meaning not belonging to a filesystem) Unix
58167+ domain sockets that were bound outside of a chroot. It is recommended
58168+ that you say Y here. If the sysctl option is enabled, a sysctl option
58169+ with name "chroot_deny_unix" is created.
58170+
58171+config GRKERNSEC_CHROOT_FINDTASK
58172+ bool "Protect outside processes"
58173+ default y if GRKERNSEC_CONFIG_AUTO
58174+ depends on GRKERNSEC_CHROOT
58175+ help
58176+ If you say Y here, processes inside a chroot will not be able to
58177+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
58178+ getsid, or view any process outside of the chroot. If the sysctl
58179+ option is enabled, a sysctl option with name "chroot_findtask" is
58180+ created.
58181+
58182+config GRKERNSEC_CHROOT_NICE
58183+ bool "Restrict priority changes"
58184+ default y if GRKERNSEC_CONFIG_AUTO
58185+ depends on GRKERNSEC_CHROOT
58186+ help
58187+ If you say Y here, processes inside a chroot will not be able to raise
58188+ the priority of processes in the chroot, or alter the priority of
58189+ processes outside the chroot. This provides more security than simply
58190+ removing CAP_SYS_NICE from the process' capability set. If the
58191+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
58192+ is created.
58193+
58194+config GRKERNSEC_CHROOT_SYSCTL
58195+ bool "Deny sysctl writes"
58196+ default y if GRKERNSEC_CONFIG_AUTO
58197+ depends on GRKERNSEC_CHROOT
58198+ help
58199+ If you say Y here, an attacker in a chroot will not be able to
58200+ write to sysctl entries, either by sysctl(2) or through a /proc
58201+ interface. It is strongly recommended that you say Y here. If the
58202+ sysctl option is enabled, a sysctl option with name
58203+ "chroot_deny_sysctl" is created.
58204+
58205+config GRKERNSEC_CHROOT_CAPS
58206+ bool "Capability restrictions"
58207+ default y if GRKERNSEC_CONFIG_AUTO
58208+ depends on GRKERNSEC_CHROOT
58209+ help
58210+ If you say Y here, the capabilities on all processes within a
58211+ chroot jail will be lowered to stop module insertion, raw i/o,
58212+ system and net admin tasks, rebooting the system, modifying immutable
58213+ files, modifying IPC owned by another, and changing the system time.
58214+ This is left an option because it can break some apps. Disable this
58215+ if your chrooted apps are having problems performing those kinds of
58216+ tasks. If the sysctl option is enabled, a sysctl option with
58217+ name "chroot_caps" is created.
58218+
58219+endmenu
58220+menu "Kernel Auditing"
58221+depends on GRKERNSEC
58222+
58223+config GRKERNSEC_AUDIT_GROUP
58224+ bool "Single group for auditing"
58225+ help
58226+ If you say Y here, the exec and chdir logging features will only operate
58227+ on a group you specify. This option is recommended if you only want to
58228+ watch certain users instead of having a large amount of logs from the
58229+ entire system. If the sysctl option is enabled, a sysctl option with
58230+ name "audit_group" is created.
58231+
58232+config GRKERNSEC_AUDIT_GID
58233+ int "GID for auditing"
58234+ depends on GRKERNSEC_AUDIT_GROUP
58235+ default 1007
58236+
58237+config GRKERNSEC_EXECLOG
58238+ bool "Exec logging"
58239+ help
58240+ If you say Y here, all execve() calls will be logged (since the
58241+ other exec*() calls are frontends to execve(), all execution
58242+ will be logged). Useful for shell-servers that like to keep track
58243+ of their users. If the sysctl option is enabled, a sysctl option with
58244+ name "exec_logging" is created.
58245+ WARNING: This option when enabled will produce a LOT of logs, especially
58246+ on an active system.
58247+
58248+config GRKERNSEC_RESLOG
58249+ bool "Resource logging"
58250+ default y if GRKERNSEC_CONFIG_AUTO
58251+ help
58252+ If you say Y here, all attempts to overstep resource limits will
58253+ be logged with the resource name, the requested size, and the current
58254+ limit. It is highly recommended that you say Y here. If the sysctl
58255+ option is enabled, a sysctl option with name "resource_logging" is
58256+ created. If the RBAC system is enabled, the sysctl value is ignored.
58257+
58258+config GRKERNSEC_CHROOT_EXECLOG
58259+ bool "Log execs within chroot"
58260+ help
58261+ If you say Y here, all executions inside a chroot jail will be logged
58262+ to syslog. This can cause a large amount of logs if certain
58263+ applications (eg. djb's daemontools) are installed on the system, and
58264+ is therefore left as an option. If the sysctl option is enabled, a
58265+ sysctl option with name "chroot_execlog" is created.
58266+
58267+config GRKERNSEC_AUDIT_PTRACE
58268+ bool "Ptrace logging"
58269+ help
58270+ If you say Y here, all attempts to attach to a process via ptrace
58271+ will be logged. If the sysctl option is enabled, a sysctl option
58272+ with name "audit_ptrace" is created.
58273+
58274+config GRKERNSEC_AUDIT_CHDIR
58275+ bool "Chdir logging"
58276+ help
58277+ If you say Y here, all chdir() calls will be logged. If the sysctl
58278+ option is enabled, a sysctl option with name "audit_chdir" is created.
58279+
58280+config GRKERNSEC_AUDIT_MOUNT
58281+ bool "(Un)Mount logging"
58282+ help
58283+ If you say Y here, all mounts and unmounts will be logged. If the
58284+ sysctl option is enabled, a sysctl option with name "audit_mount" is
58285+ created.
58286+
58287+config GRKERNSEC_SIGNAL
58288+ bool "Signal logging"
58289+ default y if GRKERNSEC_CONFIG_AUTO
58290+ help
58291+ If you say Y here, certain important signals will be logged, such as
58292+ SIGSEGV, which will as a result inform you of when a error in a program
58293+ occurred, which in some cases could mean a possible exploit attempt.
58294+ If the sysctl option is enabled, a sysctl option with name
58295+ "signal_logging" is created.
58296+
58297+config GRKERNSEC_FORKFAIL
58298+ bool "Fork failure logging"
58299+ help
58300+ If you say Y here, all failed fork() attempts will be logged.
58301+ This could suggest a fork bomb, or someone attempting to overstep
58302+ their process limit. If the sysctl option is enabled, a sysctl option
58303+ with name "forkfail_logging" is created.
58304+
58305+config GRKERNSEC_TIME
58306+ bool "Time change logging"
58307+ default y if GRKERNSEC_CONFIG_AUTO
58308+ help
58309+ If you say Y here, any changes of the system clock will be logged.
58310+ If the sysctl option is enabled, a sysctl option with name
58311+ "timechange_logging" is created.
58312+
58313+config GRKERNSEC_PROC_IPADDR
58314+ bool "/proc/<pid>/ipaddr support"
58315+ default y if GRKERNSEC_CONFIG_AUTO
58316+ help
58317+ If you say Y here, a new entry will be added to each /proc/<pid>
58318+ directory that contains the IP address of the person using the task.
58319+ The IP is carried across local TCP and AF_UNIX stream sockets.
58320+ This information can be useful for IDS/IPSes to perform remote response
58321+ to a local attack. The entry is readable by only the owner of the
58322+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
58323+ the RBAC system), and thus does not create privacy concerns.
58324+
58325+config GRKERNSEC_RWXMAP_LOG
58326+ bool 'Denied RWX mmap/mprotect logging'
58327+ default y if GRKERNSEC_CONFIG_AUTO
58328+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
58329+ help
58330+ If you say Y here, calls to mmap() and mprotect() with explicit
58331+ usage of PROT_WRITE and PROT_EXEC together will be logged when
58332+ denied by the PAX_MPROTECT feature. If the sysctl option is
58333+ enabled, a sysctl option with name "rwxmap_logging" is created.
58334+
58335+config GRKERNSEC_AUDIT_TEXTREL
58336+ bool 'ELF text relocations logging (READ HELP)'
58337+ depends on PAX_MPROTECT
58338+ help
58339+ If you say Y here, text relocations will be logged with the filename
58340+ of the offending library or binary. The purpose of the feature is
58341+ to help Linux distribution developers get rid of libraries and
58342+ binaries that need text relocations which hinder the future progress
58343+ of PaX. Only Linux distribution developers should say Y here, and
58344+ never on a production machine, as this option creates an information
58345+ leak that could aid an attacker in defeating the randomization of
58346+ a single memory region. If the sysctl option is enabled, a sysctl
58347+ option with name "audit_textrel" is created.
58348+
58349+endmenu
58350+
58351+menu "Executable Protections"
58352+depends on GRKERNSEC
58353+
58354+config GRKERNSEC_DMESG
58355+ bool "Dmesg(8) restriction"
58356+ default y if GRKERNSEC_CONFIG_AUTO
58357+ help
58358+ If you say Y here, non-root users will not be able to use dmesg(8)
58359+ to view the contents of the kernel's circular log buffer.
58360+ The kernel's log buffer often contains kernel addresses and other
58361+ identifying information useful to an attacker in fingerprinting a
58362+ system for a targeted exploit.
58363+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
58364+ created.
58365+
58366+config GRKERNSEC_HARDEN_PTRACE
58367+ bool "Deter ptrace-based process snooping"
58368+ default y if GRKERNSEC_CONFIG_AUTO
58369+ help
58370+ If you say Y here, TTY sniffers and other malicious monitoring
58371+ programs implemented through ptrace will be defeated. If you
58372+ have been using the RBAC system, this option has already been
58373+ enabled for several years for all users, with the ability to make
58374+ fine-grained exceptions.
58375+
58376+ This option only affects the ability of non-root users to ptrace
58377+ processes that are not a descendent of the ptracing process.
58378+ This means that strace ./binary and gdb ./binary will still work,
58379+ but attaching to arbitrary processes will not. If the sysctl
58380+ option is enabled, a sysctl option with name "harden_ptrace" is
58381+ created.
58382+
58383+config GRKERNSEC_PTRACE_READEXEC
58384+ bool "Require read access to ptrace sensitive binaries"
58385+ default y if GRKERNSEC_CONFIG_AUTO
58386+ help
58387+ If you say Y here, unprivileged users will not be able to ptrace unreadable
58388+ binaries. This option is useful in environments that
58389+ remove the read bits (e.g. file mode 4711) from suid binaries to
58390+ prevent infoleaking of their contents. This option adds
58391+ consistency to the use of that file mode, as the binary could normally
58392+ be read out when run without privileges while ptracing.
58393+
58394+ If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
58395+ is created.
58396+
58397+config GRKERNSEC_SETXID
58398+ bool "Enforce consistent multithreaded privileges"
58399+ default y if GRKERNSEC_CONFIG_AUTO
58400+ depends on (X86 || SPARC64 || PPC || ARM || MIPS)
58401+ help
58402+ If you say Y here, a change from a root uid to a non-root uid
58403+ in a multithreaded application will cause the resulting uids,
58404+ gids, supplementary groups, and capabilities in that thread
58405+ to be propagated to the other threads of the process. In most
58406+ cases this is unnecessary, as glibc will emulate this behavior
58407+ on behalf of the application. Other libcs do not act in the
58408+ same way, allowing the other threads of the process to continue
58409+ running with root privileges. If the sysctl option is enabled,
58410+ a sysctl option with name "consistent_setxid" is created.
58411+
58412+config GRKERNSEC_TPE
58413+ bool "Trusted Path Execution (TPE)"
58414+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
58415+ help
58416+ If you say Y here, you will be able to choose a gid to add to the
58417+ supplementary groups of users you want to mark as "untrusted."
58418+ These users will not be able to execute any files that are not in
58419+ root-owned directories writable only by root. If the sysctl option
58420+ is enabled, a sysctl option with name "tpe" is created.
58421+
58422+config GRKERNSEC_TPE_ALL
58423+ bool "Partially restrict all non-root users"
58424+ depends on GRKERNSEC_TPE
58425+ help
58426+ If you say Y here, all non-root users will be covered under
58427+ a weaker TPE restriction. This is separate from, and in addition to,
58428+ the main TPE options that you have selected elsewhere. Thus, if a
58429+ "trusted" GID is chosen, this restriction applies to even that GID.
58430+ Under this restriction, all non-root users will only be allowed to
58431+ execute files in directories they own that are not group or
58432+ world-writable, or in directories owned by root and writable only by
58433+ root. If the sysctl option is enabled, a sysctl option with name
58434+ "tpe_restrict_all" is created.
58435+
58436+config GRKERNSEC_TPE_INVERT
58437+ bool "Invert GID option"
58438+ depends on GRKERNSEC_TPE
58439+ help
58440+ If you say Y here, the group you specify in the TPE configuration will
58441+ decide what group TPE restrictions will be *disabled* for. This
58442+ option is useful if you want TPE restrictions to be applied to most
58443+ users on the system. If the sysctl option is enabled, a sysctl option
58444+ with name "tpe_invert" is created. Unlike other sysctl options, this
58445+ entry will default to on for backward-compatibility.
58446+
58447+config GRKERNSEC_TPE_GID
58448+ int
58449+ default GRKERNSEC_TPE_UNTRUSTED_GID if (GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT)
58450+ default GRKERNSEC_TPE_TRUSTED_GID if (GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT)
58451+
58452+config GRKERNSEC_TPE_UNTRUSTED_GID
58453+ int "GID for TPE-untrusted users"
58454+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
58455+ default 1005
58456+ help
58457+ Setting this GID determines what group TPE restrictions will be
58458+ *enabled* for. If the sysctl option is enabled, a sysctl option
58459+ with name "tpe_gid" is created.
58460+
58461+config GRKERNSEC_TPE_TRUSTED_GID
58462+ int "GID for TPE-trusted users"
58463+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
58464+ default 1005
58465+ help
58466+ Setting this GID determines what group TPE restrictions will be
58467+ *disabled* for. If the sysctl option is enabled, a sysctl option
58468+ with name "tpe_gid" is created.
58469+
58470+endmenu
58471+menu "Network Protections"
58472+depends on GRKERNSEC
58473+
58474+config GRKERNSEC_RANDNET
58475+ bool "Larger entropy pools"
58476+ default y if GRKERNSEC_CONFIG_AUTO
58477+ help
58478+ If you say Y here, the entropy pools used for many features of Linux
58479+ and grsecurity will be doubled in size. Since several grsecurity
58480+ features use additional randomness, it is recommended that you say Y
58481+ here. Saying Y here has a similar effect as modifying
58482+ /proc/sys/kernel/random/poolsize.
58483+
58484+config GRKERNSEC_BLACKHOLE
58485+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
58486+ default y if GRKERNSEC_CONFIG_AUTO
58487+ depends on NET
58488+ help
58489+ If you say Y here, neither TCP resets nor ICMP
58490+ destination-unreachable packets will be sent in response to packets
58491+ sent to ports for which no associated listening process exists.
58492+ This feature supports both IPV4 and IPV6 and exempts the
58493+ loopback interface from blackholing. Enabling this feature
58494+ makes a host more resilient to DoS attacks and reduces network
58495+ visibility against scanners.
58496+
58497+ The blackhole feature as-implemented is equivalent to the FreeBSD
58498+ blackhole feature, as it prevents RST responses to all packets, not
58499+ just SYNs. Under most application behavior this causes no
58500+ problems, but applications (like haproxy) may not close certain
58501+ connections in a way that cleanly terminates them on the remote
58502+ end, leaving the remote host in LAST_ACK state. Because of this
58503+ side-effect and to prevent intentional LAST_ACK DoSes, this
58504+ feature also adds automatic mitigation against such attacks.
58505+ The mitigation drastically reduces the amount of time a socket
58506+ can spend in LAST_ACK state. If you're using haproxy and not
58507+ all servers it connects to have this option enabled, consider
58508+ disabling this feature on the haproxy host.
58509+
58510+ If the sysctl option is enabled, two sysctl options with names
58511+ "ip_blackhole" and "lastack_retries" will be created.
58512+ While "ip_blackhole" takes the standard zero/non-zero on/off
58513+ toggle, "lastack_retries" uses the same kinds of values as
58514+ "tcp_retries1" and "tcp_retries2". The default value of 4
58515+ prevents a socket from lasting more than 45 seconds in LAST_ACK
58516+ state.
58517+
58518+config GRKERNSEC_NO_SIMULT_CONNECT
58519+ bool "Disable TCP Simultaneous Connect"
58520+ default y if GRKERNSEC_CONFIG_AUTO
58521+ depends on NET
58522+ help
58523+ If you say Y here, a feature by Willy Tarreau will be enabled that
58524+ removes a weakness in Linux's strict implementation of TCP that
58525+ allows two clients to connect to each other without either entering
58526+ a listening state. The weakness allows an attacker to easily prevent
58527+ a client from connecting to a known server provided the source port
58528+ for the connection is guessed correctly.
58529+
58530+ As the weakness could be used to prevent an antivirus or IPS from
58531+ fetching updates, or prevent an SSL gateway from fetching a CRL,
58532+ it should be eliminated by enabling this option. Though Linux is
58533+ one of few operating systems supporting simultaneous connect, it
58534+ has no legitimate use in practice and is rarely supported by firewalls.
58535+
58536+config GRKERNSEC_SOCKET
58537+ bool "Socket restrictions"
58538+ depends on NET
58539+ help
58540+ If you say Y here, you will be able to choose from several options.
58541+ If you assign a GID on your system and add it to the supplementary
58542+ groups of users you want to restrict socket access to, this patch
58543+ will perform up to three things, based on the option(s) you choose.
58544+
58545+config GRKERNSEC_SOCKET_ALL
58546+ bool "Deny any sockets to group"
58547+ depends on GRKERNSEC_SOCKET
58548+ help
58549+ If you say Y here, you will be able to choose a GID of whose users will
58550+ be unable to connect to other hosts from your machine or run server
58551+ applications from your machine. If the sysctl option is enabled, a
58552+ sysctl option with name "socket_all" is created.
58553+
58554+config GRKERNSEC_SOCKET_ALL_GID
58555+ int "GID to deny all sockets for"
58556+ depends on GRKERNSEC_SOCKET_ALL
58557+ default 1004
58558+ help
58559+ Here you can choose the GID to disable socket access for. Remember to
58560+ add the users you want socket access disabled for to the GID
58561+ specified here. If the sysctl option is enabled, a sysctl option
58562+ with name "socket_all_gid" is created.
58563+
58564+config GRKERNSEC_SOCKET_CLIENT
58565+ bool "Deny client sockets to group"
58566+ depends on GRKERNSEC_SOCKET
58567+ help
58568+ If you say Y here, you will be able to choose a GID of whose users will
58569+ be unable to connect to other hosts from your machine, but will be
58570+ able to run servers. If this option is enabled, all users in the group
58571+ you specify will have to use passive mode when initiating ftp transfers
58572+ from the shell on your machine. If the sysctl option is enabled, a
58573+ sysctl option with name "socket_client" is created.
58574+
58575+config GRKERNSEC_SOCKET_CLIENT_GID
58576+ int "GID to deny client sockets for"
58577+ depends on GRKERNSEC_SOCKET_CLIENT
58578+ default 1003
58579+ help
58580+ Here you can choose the GID to disable client socket access for.
58581+ Remember to add the users you want client socket access disabled for to
58582+ the GID specified here. If the sysctl option is enabled, a sysctl
58583+ option with name "socket_client_gid" is created.
58584+
58585+config GRKERNSEC_SOCKET_SERVER
58586+ bool "Deny server sockets to group"
58587+ depends on GRKERNSEC_SOCKET
58588+ help
58589+ If you say Y here, you will be able to choose a GID of whose users will
58590+ be unable to run server applications from your machine. If the sysctl
58591+ option is enabled, a sysctl option with name "socket_server" is created.
58592+
58593+config GRKERNSEC_SOCKET_SERVER_GID
58594+ int "GID to deny server sockets for"
58595+ depends on GRKERNSEC_SOCKET_SERVER
58596+ default 1002
58597+ help
58598+ Here you can choose the GID to disable server socket access for.
58599+ Remember to add the users you want server socket access disabled for to
58600+ the GID specified here. If the sysctl option is enabled, a sysctl
58601+ option with name "socket_server_gid" is created.
58602+
58603+endmenu
58604+menu "Sysctl Support"
58605+depends on GRKERNSEC && SYSCTL
58606+
58607+config GRKERNSEC_SYSCTL
58608+ bool "Sysctl support"
58609+ default y if GRKERNSEC_CONFIG_AUTO
58610+ help
58611+ If you say Y here, you will be able to change the options that
58612+ grsecurity runs with at bootup, without having to recompile your
58613+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
58614+ to enable (1) or disable (0) various features. All the sysctl entries
58615+ are mutable until the "grsec_lock" entry is set to a non-zero value.
58616+ All features enabled in the kernel configuration are disabled at boot
58617+ if you do not say Y to the "Turn on features by default" option.
58618+ All options should be set at startup, and the grsec_lock entry should
58619+ be set to a non-zero value after all the options are set.
58620+ *THIS IS EXTREMELY IMPORTANT*
58621+
58622+config GRKERNSEC_SYSCTL_DISTRO
58623+ bool "Extra sysctl support for distro makers (READ HELP)"
58624+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
58625+ help
58626+ If you say Y here, additional sysctl options will be created
58627+ for features that affect processes running as root. Therefore,
58628+ it is critical when using this option that the grsec_lock entry be
58629+ enabled after boot. Only distros with prebuilt kernel packages
58630+ with this option enabled that can ensure grsec_lock is enabled
58631+ after boot should use this option.
58632+ *Failure to set grsec_lock after boot makes all grsec features
58633+ this option covers useless*
58634+
58635+ Currently this option creates the following sysctl entries:
58636+ "Disable Privileged I/O": "disable_priv_io"
58637+
58638+config GRKERNSEC_SYSCTL_ON
58639+ bool "Turn on features by default"
58640+ default y if GRKERNSEC_CONFIG_AUTO
58641+ depends on GRKERNSEC_SYSCTL
58642+ help
58643+ If you say Y here, instead of having all features enabled in the
58644+ kernel configuration disabled at boot time, the features will be
58645+ enabled at boot time. It is recommended you say Y here unless
58646+ there is some reason you would want all sysctl-tunable features to
58647+ be disabled by default. As mentioned elsewhere, it is important
58648+ to enable the grsec_lock entry once you have finished modifying
58649+ the sysctl entries.
58650+
58651+endmenu
58652+menu "Logging Options"
58653+depends on GRKERNSEC
58654+
58655+config GRKERNSEC_FLOODTIME
58656+ int "Seconds in between log messages (minimum)"
58657+ default 10
58658+ help
58659+ This option allows you to enforce the number of seconds between
58660+ grsecurity log messages. The default should be suitable for most
58661+ people, however, if you choose to change it, choose a value small enough
58662+ to allow informative logs to be produced, but large enough to
58663+ prevent flooding.
58664+
58665+config GRKERNSEC_FLOODBURST
58666+ int "Number of messages in a burst (maximum)"
58667+ default 6
58668+ help
58669+ This option allows you to choose the maximum number of messages allowed
58670+ within the flood time interval you chose in a separate option. The
58671+ default should be suitable for most people, however if you find that
58672+ many of your logs are being interpreted as flooding, you may want to
58673+ raise this value.
58674+
58675+endmenu
58676diff --git a/grsecurity/Makefile b/grsecurity/Makefile
58677new file mode 100644
58678index 0000000..1b9afa9
58679--- /dev/null
58680+++ b/grsecurity/Makefile
58681@@ -0,0 +1,38 @@
58682+# grsecurity's ACL system was originally written in 2001 by Michael Dalton
58683+# during 2001-2009 it has been completely redesigned by Brad Spengler
58684+# into an RBAC system
58685+#
58686+# All code in this directory and various hooks inserted throughout the kernel
58687+# are copyright Brad Spengler - Open Source Security, Inc., and released
58688+# under the GPL v2 or higher
58689+
58690+KBUILD_CFLAGS += -Werror
58691+
58692+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
58693+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
58694+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
58695+
58696+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
58697+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
58698+ gracl_learn.o grsec_log.o
58699+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
58700+
58701+ifdef CONFIG_NET
58702+obj-y += grsec_sock.o
58703+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
58704+endif
58705+
58706+ifndef CONFIG_GRKERNSEC
58707+obj-y += grsec_disabled.o
58708+endif
58709+
58710+ifdef CONFIG_GRKERNSEC_HIDESYM
58711+extra-y := grsec_hidesym.o
58712+$(obj)/grsec_hidesym.o:
58713+ @-chmod -f 500 /boot
58714+ @-chmod -f 500 /lib/modules
58715+ @-chmod -f 500 /lib64/modules
58716+ @-chmod -f 500 /lib32/modules
58717+ @-chmod -f 700 .
58718+ @echo ' grsec: protected kernel image paths'
58719+endif
58720diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
58721new file mode 100644
58722index 0000000..b306b36
58723--- /dev/null
58724+++ b/grsecurity/gracl.c
58725@@ -0,0 +1,4071 @@
58726+#include <linux/kernel.h>
58727+#include <linux/module.h>
58728+#include <linux/sched.h>
58729+#include <linux/mm.h>
58730+#include <linux/file.h>
58731+#include <linux/fs.h>
58732+#include <linux/namei.h>
58733+#include <linux/mount.h>
58734+#include <linux/tty.h>
58735+#include <linux/proc_fs.h>
58736+#include <linux/lglock.h>
58737+#include <linux/slab.h>
58738+#include <linux/vmalloc.h>
58739+#include <linux/types.h>
58740+#include <linux/sysctl.h>
58741+#include <linux/netdevice.h>
58742+#include <linux/ptrace.h>
58743+#include <linux/gracl.h>
58744+#include <linux/gralloc.h>
58745+#include <linux/security.h>
58746+#include <linux/grinternal.h>
58747+#include <linux/pid_namespace.h>
58748+#include <linux/stop_machine.h>
58749+#include <linux/fdtable.h>
58750+#include <linux/percpu.h>
58751+#include <linux/lglock.h>
58752+#include <linux/hugetlb.h>
58753+#include <linux/posix-timers.h>
58754+#include "../fs/mount.h"
58755+
58756+#include <asm/uaccess.h>
58757+#include <asm/errno.h>
58758+#include <asm/mman.h>
58759+
58760+extern struct lglock vfsmount_lock;
58761+
58762+static struct acl_role_db acl_role_set;
58763+static struct name_db name_set;
58764+static struct inodev_db inodev_set;
58765+
58766+/* for keeping track of userspace pointers used for subjects, so we
58767+ can share references in the kernel as well
58768+*/
58769+
58770+static struct path real_root;
58771+
58772+static struct acl_subj_map_db subj_map_set;
58773+
58774+static struct acl_role_label *default_role;
58775+
58776+static struct acl_role_label *role_list;
58777+
58778+static u16 acl_sp_role_value;
58779+
58780+extern char *gr_shared_page[4];
58781+static DEFINE_MUTEX(gr_dev_mutex);
58782+DEFINE_RWLOCK(gr_inode_lock);
58783+
58784+struct gr_arg *gr_usermode;
58785+
58786+static unsigned int gr_status __read_only = GR_STATUS_INIT;
58787+
58788+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
58789+extern void gr_clear_learn_entries(void);
58790+
58791+unsigned char *gr_system_salt;
58792+unsigned char *gr_system_sum;
58793+
58794+static struct sprole_pw **acl_special_roles = NULL;
58795+static __u16 num_sprole_pws = 0;
58796+
58797+static struct acl_role_label *kernel_role = NULL;
58798+
58799+static unsigned int gr_auth_attempts = 0;
58800+static unsigned long gr_auth_expires = 0UL;
58801+
58802+#ifdef CONFIG_NET
58803+extern struct vfsmount *sock_mnt;
58804+#endif
58805+
58806+extern struct vfsmount *pipe_mnt;
58807+extern struct vfsmount *shm_mnt;
58808+
58809+#ifdef CONFIG_HUGETLBFS
58810+extern struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
58811+#endif
58812+
58813+static struct acl_object_label *fakefs_obj_rw;
58814+static struct acl_object_label *fakefs_obj_rwx;
58815+
58816+extern int gr_init_uidset(void);
58817+extern void gr_free_uidset(void);
58818+extern void gr_remove_uid(uid_t uid);
58819+extern int gr_find_uid(uid_t uid);
58820+
58821+__inline__ int
58822+gr_acl_is_enabled(void)
58823+{
58824+ return (gr_status & GR_READY);
58825+}
58826+
58827+#ifdef CONFIG_BTRFS_FS
58828+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
58829+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
58830+#endif
58831+
58832+static inline dev_t __get_dev(const struct dentry *dentry)
58833+{
58834+#ifdef CONFIG_BTRFS_FS
58835+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
58836+ return get_btrfs_dev_from_inode(dentry->d_inode);
58837+ else
58838+#endif
58839+ return dentry->d_inode->i_sb->s_dev;
58840+}
58841+
58842+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
58843+{
58844+ return __get_dev(dentry);
58845+}
58846+
58847+static char gr_task_roletype_to_char(struct task_struct *task)
58848+{
58849+ switch (task->role->roletype &
58850+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
58851+ GR_ROLE_SPECIAL)) {
58852+ case GR_ROLE_DEFAULT:
58853+ return 'D';
58854+ case GR_ROLE_USER:
58855+ return 'U';
58856+ case GR_ROLE_GROUP:
58857+ return 'G';
58858+ case GR_ROLE_SPECIAL:
58859+ return 'S';
58860+ }
58861+
58862+ return 'X';
58863+}
58864+
58865+char gr_roletype_to_char(void)
58866+{
58867+ return gr_task_roletype_to_char(current);
58868+}
58869+
58870+__inline__ int
58871+gr_acl_tpe_check(void)
58872+{
58873+ if (unlikely(!(gr_status & GR_READY)))
58874+ return 0;
58875+ if (current->role->roletype & GR_ROLE_TPE)
58876+ return 1;
58877+ else
58878+ return 0;
58879+}
58880+
58881+int
58882+gr_handle_rawio(const struct inode *inode)
58883+{
58884+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
58885+ if (inode && S_ISBLK(inode->i_mode) &&
58886+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
58887+ !capable(CAP_SYS_RAWIO))
58888+ return 1;
58889+#endif
58890+ return 0;
58891+}
58892+
58893+static int
58894+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
58895+{
58896+ if (likely(lena != lenb))
58897+ return 0;
58898+
58899+ return !memcmp(a, b, lena);
58900+}
58901+
58902+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
58903+{
58904+ *buflen -= namelen;
58905+ if (*buflen < 0)
58906+ return -ENAMETOOLONG;
58907+ *buffer -= namelen;
58908+ memcpy(*buffer, str, namelen);
58909+ return 0;
58910+}
58911+
58912+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
58913+{
58914+ return prepend(buffer, buflen, name->name, name->len);
58915+}
58916+
58917+static int prepend_path(const struct path *path, struct path *root,
58918+ char **buffer, int *buflen)
58919+{
58920+ struct dentry *dentry = path->dentry;
58921+ struct vfsmount *vfsmnt = path->mnt;
58922+ struct mount *mnt = real_mount(vfsmnt);
58923+ bool slash = false;
58924+ int error = 0;
58925+
58926+ while (dentry != root->dentry || vfsmnt != root->mnt) {
58927+ struct dentry * parent;
58928+
58929+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
58930+ /* Global root? */
58931+ if (!mnt_has_parent(mnt)) {
58932+ goto out;
58933+ }
58934+ dentry = mnt->mnt_mountpoint;
58935+ mnt = mnt->mnt_parent;
58936+ vfsmnt = &mnt->mnt;
58937+ continue;
58938+ }
58939+ parent = dentry->d_parent;
58940+ prefetch(parent);
58941+ spin_lock(&dentry->d_lock);
58942+ error = prepend_name(buffer, buflen, &dentry->d_name);
58943+ spin_unlock(&dentry->d_lock);
58944+ if (!error)
58945+ error = prepend(buffer, buflen, "/", 1);
58946+ if (error)
58947+ break;
58948+
58949+ slash = true;
58950+ dentry = parent;
58951+ }
58952+
58953+out:
58954+ if (!error && !slash)
58955+ error = prepend(buffer, buflen, "/", 1);
58956+
58957+ return error;
58958+}
58959+
58960+/* this must be called with vfsmount_lock and rename_lock held */
58961+
58962+static char *__our_d_path(const struct path *path, struct path *root,
58963+ char *buf, int buflen)
58964+{
58965+ char *res = buf + buflen;
58966+ int error;
58967+
58968+ prepend(&res, &buflen, "\0", 1);
58969+ error = prepend_path(path, root, &res, &buflen);
58970+ if (error)
58971+ return ERR_PTR(error);
58972+
58973+ return res;
58974+}
58975+
58976+static char *
58977+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
58978+{
58979+ char *retval;
58980+
58981+ retval = __our_d_path(path, root, buf, buflen);
58982+ if (unlikely(IS_ERR(retval)))
58983+ retval = strcpy(buf, "<path too long>");
58984+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
58985+ retval[1] = '\0';
58986+
58987+ return retval;
58988+}
58989+
58990+static char *
58991+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
58992+ char *buf, int buflen)
58993+{
58994+ struct path path;
58995+ char *res;
58996+
58997+ path.dentry = (struct dentry *)dentry;
58998+ path.mnt = (struct vfsmount *)vfsmnt;
58999+
59000+ /* we can use real_root.dentry, real_root.mnt, because this is only called
59001+ by the RBAC system */
59002+ res = gen_full_path(&path, &real_root, buf, buflen);
59003+
59004+ return res;
59005+}
59006+
59007+static char *
59008+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
59009+ char *buf, int buflen)
59010+{
59011+ char *res;
59012+ struct path path;
59013+ struct path root;
59014+ struct task_struct *reaper = init_pid_ns.child_reaper;
59015+
59016+ path.dentry = (struct dentry *)dentry;
59017+ path.mnt = (struct vfsmount *)vfsmnt;
59018+
59019+ /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
59020+ get_fs_root(reaper->fs, &root);
59021+
59022+ br_read_lock(&vfsmount_lock);
59023+ write_seqlock(&rename_lock);
59024+ res = gen_full_path(&path, &root, buf, buflen);
59025+ write_sequnlock(&rename_lock);
59026+ br_read_unlock(&vfsmount_lock);
59027+
59028+ path_put(&root);
59029+ return res;
59030+}
59031+
59032+static char *
59033+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
59034+{
59035+ char *ret;
59036+ br_read_lock(&vfsmount_lock);
59037+ write_seqlock(&rename_lock);
59038+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
59039+ PAGE_SIZE);
59040+ write_sequnlock(&rename_lock);
59041+ br_read_unlock(&vfsmount_lock);
59042+ return ret;
59043+}
59044+
59045+static char *
59046+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
59047+{
59048+ char *ret;
59049+ char *buf;
59050+ int buflen;
59051+
59052+ br_read_lock(&vfsmount_lock);
59053+ write_seqlock(&rename_lock);
59054+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
59055+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
59056+ buflen = (int)(ret - buf);
59057+ if (buflen >= 5)
59058+ prepend(&ret, &buflen, "/proc", 5);
59059+ else
59060+ ret = strcpy(buf, "<path too long>");
59061+ write_sequnlock(&rename_lock);
59062+ br_read_unlock(&vfsmount_lock);
59063+ return ret;
59064+}
59065+
59066+char *
59067+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
59068+{
59069+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
59070+ PAGE_SIZE);
59071+}
59072+
59073+char *
59074+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
59075+{
59076+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
59077+ PAGE_SIZE);
59078+}
59079+
59080+char *
59081+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
59082+{
59083+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
59084+ PAGE_SIZE);
59085+}
59086+
59087+char *
59088+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
59089+{
59090+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
59091+ PAGE_SIZE);
59092+}
59093+
59094+char *
59095+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
59096+{
59097+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
59098+ PAGE_SIZE);
59099+}
59100+
59101+__inline__ __u32
59102+to_gr_audit(const __u32 reqmode)
59103+{
59104+ /* masks off auditable permission flags, then shifts them to create
59105+ auditing flags, and adds the special case of append auditing if
59106+ we're requesting write */
59107+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
59108+}
59109+
59110+struct acl_subject_label *
59111+lookup_subject_map(const struct acl_subject_label *userp)
59112+{
59113+ unsigned int index = gr_shash(userp, subj_map_set.s_size);
59114+ struct subject_map *match;
59115+
59116+ match = subj_map_set.s_hash[index];
59117+
59118+ while (match && match->user != userp)
59119+ match = match->next;
59120+
59121+ if (match != NULL)
59122+ return match->kernel;
59123+ else
59124+ return NULL;
59125+}
59126+
59127+static void
59128+insert_subj_map_entry(struct subject_map *subjmap)
59129+{
59130+ unsigned int index = gr_shash(subjmap->user, subj_map_set.s_size);
59131+ struct subject_map **curr;
59132+
59133+ subjmap->prev = NULL;
59134+
59135+ curr = &subj_map_set.s_hash[index];
59136+ if (*curr != NULL)
59137+ (*curr)->prev = subjmap;
59138+
59139+ subjmap->next = *curr;
59140+ *curr = subjmap;
59141+
59142+ return;
59143+}
59144+
59145+static struct acl_role_label *
59146+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
59147+ const gid_t gid)
59148+{
59149+ unsigned int index = gr_rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
59150+ struct acl_role_label *match;
59151+ struct role_allowed_ip *ipp;
59152+ unsigned int x;
59153+ u32 curr_ip = task->signal->curr_ip;
59154+
59155+ task->signal->saved_ip = curr_ip;
59156+
59157+ match = acl_role_set.r_hash[index];
59158+
59159+ while (match) {
59160+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
59161+ for (x = 0; x < match->domain_child_num; x++) {
59162+ if (match->domain_children[x] == uid)
59163+ goto found;
59164+ }
59165+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
59166+ break;
59167+ match = match->next;
59168+ }
59169+found:
59170+ if (match == NULL) {
59171+ try_group:
59172+ index = gr_rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
59173+ match = acl_role_set.r_hash[index];
59174+
59175+ while (match) {
59176+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
59177+ for (x = 0; x < match->domain_child_num; x++) {
59178+ if (match->domain_children[x] == gid)
59179+ goto found2;
59180+ }
59181+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
59182+ break;
59183+ match = match->next;
59184+ }
59185+found2:
59186+ if (match == NULL)
59187+ match = default_role;
59188+ if (match->allowed_ips == NULL)
59189+ return match;
59190+ else {
59191+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
59192+ if (likely
59193+ ((ntohl(curr_ip) & ipp->netmask) ==
59194+ (ntohl(ipp->addr) & ipp->netmask)))
59195+ return match;
59196+ }
59197+ match = default_role;
59198+ }
59199+ } else if (match->allowed_ips == NULL) {
59200+ return match;
59201+ } else {
59202+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
59203+ if (likely
59204+ ((ntohl(curr_ip) & ipp->netmask) ==
59205+ (ntohl(ipp->addr) & ipp->netmask)))
59206+ return match;
59207+ }
59208+ goto try_group;
59209+ }
59210+
59211+ return match;
59212+}
59213+
59214+struct acl_subject_label *
59215+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
59216+ const struct acl_role_label *role)
59217+{
59218+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
59219+ struct acl_subject_label *match;
59220+
59221+ match = role->subj_hash[index];
59222+
59223+ while (match && (match->inode != ino || match->device != dev ||
59224+ (match->mode & GR_DELETED))) {
59225+ match = match->next;
59226+ }
59227+
59228+ if (match && !(match->mode & GR_DELETED))
59229+ return match;
59230+ else
59231+ return NULL;
59232+}
59233+
59234+struct acl_subject_label *
59235+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
59236+ const struct acl_role_label *role)
59237+{
59238+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
59239+ struct acl_subject_label *match;
59240+
59241+ match = role->subj_hash[index];
59242+
59243+ while (match && (match->inode != ino || match->device != dev ||
59244+ !(match->mode & GR_DELETED))) {
59245+ match = match->next;
59246+ }
59247+
59248+ if (match && (match->mode & GR_DELETED))
59249+ return match;
59250+ else
59251+ return NULL;
59252+}
59253+
59254+static struct acl_object_label *
59255+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
59256+ const struct acl_subject_label *subj)
59257+{
59258+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
59259+ struct acl_object_label *match;
59260+
59261+ match = subj->obj_hash[index];
59262+
59263+ while (match && (match->inode != ino || match->device != dev ||
59264+ (match->mode & GR_DELETED))) {
59265+ match = match->next;
59266+ }
59267+
59268+ if (match && !(match->mode & GR_DELETED))
59269+ return match;
59270+ else
59271+ return NULL;
59272+}
59273+
59274+static struct acl_object_label *
59275+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
59276+ const struct acl_subject_label *subj)
59277+{
59278+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
59279+ struct acl_object_label *match;
59280+
59281+ match = subj->obj_hash[index];
59282+
59283+ while (match && (match->inode != ino || match->device != dev ||
59284+ !(match->mode & GR_DELETED))) {
59285+ match = match->next;
59286+ }
59287+
59288+ if (match && (match->mode & GR_DELETED))
59289+ return match;
59290+
59291+ match = subj->obj_hash[index];
59292+
59293+ while (match && (match->inode != ino || match->device != dev ||
59294+ (match->mode & GR_DELETED))) {
59295+ match = match->next;
59296+ }
59297+
59298+ if (match && !(match->mode & GR_DELETED))
59299+ return match;
59300+ else
59301+ return NULL;
59302+}
59303+
59304+static struct name_entry *
59305+lookup_name_entry(const char *name)
59306+{
59307+ unsigned int len = strlen(name);
59308+ unsigned int key = full_name_hash(name, len);
59309+ unsigned int index = key % name_set.n_size;
59310+ struct name_entry *match;
59311+
59312+ match = name_set.n_hash[index];
59313+
59314+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
59315+ match = match->next;
59316+
59317+ return match;
59318+}
59319+
59320+static struct name_entry *
59321+lookup_name_entry_create(const char *name)
59322+{
59323+ unsigned int len = strlen(name);
59324+ unsigned int key = full_name_hash(name, len);
59325+ unsigned int index = key % name_set.n_size;
59326+ struct name_entry *match;
59327+
59328+ match = name_set.n_hash[index];
59329+
59330+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
59331+ !match->deleted))
59332+ match = match->next;
59333+
59334+ if (match && match->deleted)
59335+ return match;
59336+
59337+ match = name_set.n_hash[index];
59338+
59339+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
59340+ match->deleted))
59341+ match = match->next;
59342+
59343+ if (match && !match->deleted)
59344+ return match;
59345+ else
59346+ return NULL;
59347+}
59348+
59349+static struct inodev_entry *
59350+lookup_inodev_entry(const ino_t ino, const dev_t dev)
59351+{
59352+ unsigned int index = gr_fhash(ino, dev, inodev_set.i_size);
59353+ struct inodev_entry *match;
59354+
59355+ match = inodev_set.i_hash[index];
59356+
59357+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
59358+ match = match->next;
59359+
59360+ return match;
59361+}
59362+
59363+static void
59364+insert_inodev_entry(struct inodev_entry *entry)
59365+{
59366+ unsigned int index = gr_fhash(entry->nentry->inode, entry->nentry->device,
59367+ inodev_set.i_size);
59368+ struct inodev_entry **curr;
59369+
59370+ entry->prev = NULL;
59371+
59372+ curr = &inodev_set.i_hash[index];
59373+ if (*curr != NULL)
59374+ (*curr)->prev = entry;
59375+
59376+ entry->next = *curr;
59377+ *curr = entry;
59378+
59379+ return;
59380+}
59381+
59382+static void
59383+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
59384+{
59385+ unsigned int index =
59386+ gr_rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
59387+ struct acl_role_label **curr;
59388+ struct acl_role_label *tmp, *tmp2;
59389+
59390+ curr = &acl_role_set.r_hash[index];
59391+
59392+ /* simple case, slot is empty, just set it to our role */
59393+ if (*curr == NULL) {
59394+ *curr = role;
59395+ } else {
59396+ /* example:
59397+ 1 -> 2 -> 3 (adding 2 -> 3 to here)
59398+ 2 -> 3
59399+ */
59400+ /* first check to see if we can already be reached via this slot */
59401+ tmp = *curr;
59402+ while (tmp && tmp != role)
59403+ tmp = tmp->next;
59404+ if (tmp == role) {
59405+ /* we don't need to add ourselves to this slot's chain */
59406+ return;
59407+ }
59408+ /* we need to add ourselves to this chain, two cases */
59409+ if (role->next == NULL) {
59410+ /* simple case, append the current chain to our role */
59411+ role->next = *curr;
59412+ *curr = role;
59413+ } else {
59414+ /* 1 -> 2 -> 3 -> 4
59415+ 2 -> 3 -> 4
59416+ 3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here)
59417+ */
59418+ /* trickier case: walk our role's chain until we find
59419+ the role for the start of the current slot's chain */
59420+ tmp = role;
59421+ tmp2 = *curr;
59422+ while (tmp->next && tmp->next != tmp2)
59423+ tmp = tmp->next;
59424+ if (tmp->next == tmp2) {
59425+ /* from example above, we found 3, so just
59426+ replace this slot's chain with ours */
59427+ *curr = role;
59428+ } else {
59429+ /* we didn't find a subset of our role's chain
59430+ in the current slot's chain, so append their
59431+ chain to ours, and set us as the first role in
59432+ the slot's chain
59433+
59434+ we could fold this case with the case above,
59435+ but making it explicit for clarity
59436+ */
59437+ tmp->next = tmp2;
59438+ *curr = role;
59439+ }
59440+ }
59441+ }
59442+
59443+ return;
59444+}
59445+
59446+static void
59447+insert_acl_role_label(struct acl_role_label *role)
59448+{
59449+ int i;
59450+
59451+ if (role_list == NULL) {
59452+ role_list = role;
59453+ role->prev = NULL;
59454+ } else {
59455+ role->prev = role_list;
59456+ role_list = role;
59457+ }
59458+
59459+ /* used for hash chains */
59460+ role->next = NULL;
59461+
59462+ if (role->roletype & GR_ROLE_DOMAIN) {
59463+ for (i = 0; i < role->domain_child_num; i++)
59464+ __insert_acl_role_label(role, role->domain_children[i]);
59465+ } else
59466+ __insert_acl_role_label(role, role->uidgid);
59467+}
59468+
59469+static int
59470+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
59471+{
59472+ struct name_entry **curr, *nentry;
59473+ struct inodev_entry *ientry;
59474+ unsigned int len = strlen(name);
59475+ unsigned int key = full_name_hash(name, len);
59476+ unsigned int index = key % name_set.n_size;
59477+
59478+ curr = &name_set.n_hash[index];
59479+
59480+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
59481+ curr = &((*curr)->next);
59482+
59483+ if (*curr != NULL)
59484+ return 1;
59485+
59486+ nentry = acl_alloc(sizeof (struct name_entry));
59487+ if (nentry == NULL)
59488+ return 0;
59489+ ientry = acl_alloc(sizeof (struct inodev_entry));
59490+ if (ientry == NULL)
59491+ return 0;
59492+ ientry->nentry = nentry;
59493+
59494+ nentry->key = key;
59495+ nentry->name = name;
59496+ nentry->inode = inode;
59497+ nentry->device = device;
59498+ nentry->len = len;
59499+ nentry->deleted = deleted;
59500+
59501+ nentry->prev = NULL;
59502+ curr = &name_set.n_hash[index];
59503+ if (*curr != NULL)
59504+ (*curr)->prev = nentry;
59505+ nentry->next = *curr;
59506+ *curr = nentry;
59507+
59508+ /* insert us into the table searchable by inode/dev */
59509+ insert_inodev_entry(ientry);
59510+
59511+ return 1;
59512+}
59513+
59514+static void
59515+insert_acl_obj_label(struct acl_object_label *obj,
59516+ struct acl_subject_label *subj)
59517+{
59518+ unsigned int index =
59519+ gr_fhash(obj->inode, obj->device, subj->obj_hash_size);
59520+ struct acl_object_label **curr;
59521+
59522+
59523+ obj->prev = NULL;
59524+
59525+ curr = &subj->obj_hash[index];
59526+ if (*curr != NULL)
59527+ (*curr)->prev = obj;
59528+
59529+ obj->next = *curr;
59530+ *curr = obj;
59531+
59532+ return;
59533+}
59534+
59535+static void
59536+insert_acl_subj_label(struct acl_subject_label *obj,
59537+ struct acl_role_label *role)
59538+{
59539+ unsigned int index = gr_fhash(obj->inode, obj->device, role->subj_hash_size);
59540+ struct acl_subject_label **curr;
59541+
59542+ obj->prev = NULL;
59543+
59544+ curr = &role->subj_hash[index];
59545+ if (*curr != NULL)
59546+ (*curr)->prev = obj;
59547+
59548+ obj->next = *curr;
59549+ *curr = obj;
59550+
59551+ return;
59552+}
59553+
59554+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
59555+
59556+static void *
59557+create_table(__u32 * len, int elementsize)
59558+{
59559+ unsigned int table_sizes[] = {
59560+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
59561+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
59562+ 4194301, 8388593, 16777213, 33554393, 67108859
59563+ };
59564+ void *newtable = NULL;
59565+ unsigned int pwr = 0;
59566+
59567+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
59568+ table_sizes[pwr] <= *len)
59569+ pwr++;
59570+
59571+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
59572+ return newtable;
59573+
59574+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
59575+ newtable =
59576+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
59577+ else
59578+ newtable = vmalloc(table_sizes[pwr] * elementsize);
59579+
59580+ *len = table_sizes[pwr];
59581+
59582+ return newtable;
59583+}
59584+
59585+static int
59586+init_variables(const struct gr_arg *arg)
59587+{
59588+ struct task_struct *reaper = init_pid_ns.child_reaper;
59589+ unsigned int stacksize;
59590+
59591+ subj_map_set.s_size = arg->role_db.num_subjects;
59592+ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
59593+ name_set.n_size = arg->role_db.num_objects;
59594+ inodev_set.i_size = arg->role_db.num_objects;
59595+
59596+ if (!subj_map_set.s_size || !acl_role_set.r_size ||
59597+ !name_set.n_size || !inodev_set.i_size)
59598+ return 1;
59599+
59600+ if (!gr_init_uidset())
59601+ return 1;
59602+
59603+ /* set up the stack that holds allocation info */
59604+
59605+ stacksize = arg->role_db.num_pointers + 5;
59606+
59607+ if (!acl_alloc_stack_init(stacksize))
59608+ return 1;
59609+
59610+ /* grab reference for the real root dentry and vfsmount */
59611+ get_fs_root(reaper->fs, &real_root);
59612+
59613+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
59614+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
59615+#endif
59616+
59617+ fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
59618+ if (fakefs_obj_rw == NULL)
59619+ return 1;
59620+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
59621+
59622+ fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
59623+ if (fakefs_obj_rwx == NULL)
59624+ return 1;
59625+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
59626+
59627+ subj_map_set.s_hash =
59628+ (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
59629+ acl_role_set.r_hash =
59630+ (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
59631+ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
59632+ inodev_set.i_hash =
59633+ (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
59634+
59635+ if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
59636+ !name_set.n_hash || !inodev_set.i_hash)
59637+ return 1;
59638+
59639+ memset(subj_map_set.s_hash, 0,
59640+ sizeof(struct subject_map *) * subj_map_set.s_size);
59641+ memset(acl_role_set.r_hash, 0,
59642+ sizeof (struct acl_role_label *) * acl_role_set.r_size);
59643+ memset(name_set.n_hash, 0,
59644+ sizeof (struct name_entry *) * name_set.n_size);
59645+ memset(inodev_set.i_hash, 0,
59646+ sizeof (struct inodev_entry *) * inodev_set.i_size);
59647+
59648+ return 0;
59649+}
59650+
59651+/* free information not needed after startup
59652+ currently contains user->kernel pointer mappings for subjects
59653+*/
59654+
59655+static void
59656+free_init_variables(void)
59657+{
59658+ __u32 i;
59659+
59660+ if (subj_map_set.s_hash) {
59661+ for (i = 0; i < subj_map_set.s_size; i++) {
59662+ if (subj_map_set.s_hash[i]) {
59663+ kfree(subj_map_set.s_hash[i]);
59664+ subj_map_set.s_hash[i] = NULL;
59665+ }
59666+ }
59667+
59668+ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
59669+ PAGE_SIZE)
59670+ kfree(subj_map_set.s_hash);
59671+ else
59672+ vfree(subj_map_set.s_hash);
59673+ }
59674+
59675+ return;
59676+}
59677+
59678+static void
59679+free_variables(void)
59680+{
59681+ struct acl_subject_label *s;
59682+ struct acl_role_label *r;
59683+ struct task_struct *task, *task2;
59684+ unsigned int x;
59685+
59686+ gr_clear_learn_entries();
59687+
59688+ read_lock(&tasklist_lock);
59689+ do_each_thread(task2, task) {
59690+ task->acl_sp_role = 0;
59691+ task->acl_role_id = 0;
59692+ task->acl = NULL;
59693+ task->role = NULL;
59694+ } while_each_thread(task2, task);
59695+ read_unlock(&tasklist_lock);
59696+
59697+ /* release the reference to the real root dentry and vfsmount */
59698+ path_put(&real_root);
59699+ memset(&real_root, 0, sizeof(real_root));
59700+
59701+ /* free all object hash tables */
59702+
59703+ FOR_EACH_ROLE_START(r)
59704+ if (r->subj_hash == NULL)
59705+ goto next_role;
59706+ FOR_EACH_SUBJECT_START(r, s, x)
59707+ if (s->obj_hash == NULL)
59708+ break;
59709+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
59710+ kfree(s->obj_hash);
59711+ else
59712+ vfree(s->obj_hash);
59713+ FOR_EACH_SUBJECT_END(s, x)
59714+ FOR_EACH_NESTED_SUBJECT_START(r, s)
59715+ if (s->obj_hash == NULL)
59716+ break;
59717+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
59718+ kfree(s->obj_hash);
59719+ else
59720+ vfree(s->obj_hash);
59721+ FOR_EACH_NESTED_SUBJECT_END(s)
59722+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
59723+ kfree(r->subj_hash);
59724+ else
59725+ vfree(r->subj_hash);
59726+ r->subj_hash = NULL;
59727+next_role:
59728+ FOR_EACH_ROLE_END(r)
59729+
59730+ acl_free_all();
59731+
59732+ if (acl_role_set.r_hash) {
59733+ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
59734+ PAGE_SIZE)
59735+ kfree(acl_role_set.r_hash);
59736+ else
59737+ vfree(acl_role_set.r_hash);
59738+ }
59739+ if (name_set.n_hash) {
59740+ if ((name_set.n_size * sizeof (struct name_entry *)) <=
59741+ PAGE_SIZE)
59742+ kfree(name_set.n_hash);
59743+ else
59744+ vfree(name_set.n_hash);
59745+ }
59746+
59747+ if (inodev_set.i_hash) {
59748+ if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
59749+ PAGE_SIZE)
59750+ kfree(inodev_set.i_hash);
59751+ else
59752+ vfree(inodev_set.i_hash);
59753+ }
59754+
59755+ gr_free_uidset();
59756+
59757+ memset(&name_set, 0, sizeof (struct name_db));
59758+ memset(&inodev_set, 0, sizeof (struct inodev_db));
59759+ memset(&acl_role_set, 0, sizeof (struct acl_role_db));
59760+ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
59761+
59762+ default_role = NULL;
59763+ kernel_role = NULL;
59764+ role_list = NULL;
59765+
59766+ return;
59767+}
59768+
59769+static __u32
59770+count_user_objs(struct acl_object_label *userp)
59771+{
59772+ struct acl_object_label o_tmp;
59773+ __u32 num = 0;
59774+
59775+ while (userp) {
59776+ if (copy_from_user(&o_tmp, userp,
59777+ sizeof (struct acl_object_label)))
59778+ break;
59779+
59780+ userp = o_tmp.prev;
59781+ num++;
59782+ }
59783+
59784+ return num;
59785+}
59786+
59787+static struct acl_subject_label *
59788+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied);
59789+
59790+static int
59791+copy_user_glob(struct acl_object_label *obj)
59792+{
59793+ struct acl_object_label *g_tmp, **guser;
59794+ unsigned int len;
59795+ char *tmp;
59796+
59797+ if (obj->globbed == NULL)
59798+ return 0;
59799+
59800+ guser = &obj->globbed;
59801+ while (*guser) {
59802+ g_tmp = (struct acl_object_label *)
59803+ acl_alloc(sizeof (struct acl_object_label));
59804+ if (g_tmp == NULL)
59805+ return -ENOMEM;
59806+
59807+ if (copy_from_user(g_tmp, *guser,
59808+ sizeof (struct acl_object_label)))
59809+ return -EFAULT;
59810+
59811+ len = strnlen_user(g_tmp->filename, PATH_MAX);
59812+
59813+ if (!len || len >= PATH_MAX)
59814+ return -EINVAL;
59815+
59816+ if ((tmp = (char *) acl_alloc(len)) == NULL)
59817+ return -ENOMEM;
59818+
59819+ if (copy_from_user(tmp, g_tmp->filename, len))
59820+ return -EFAULT;
59821+ tmp[len-1] = '\0';
59822+ g_tmp->filename = tmp;
59823+
59824+ *guser = g_tmp;
59825+ guser = &(g_tmp->next);
59826+ }
59827+
59828+ return 0;
59829+}
59830+
59831+static int
59832+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
59833+ struct acl_role_label *role)
59834+{
59835+ struct acl_object_label *o_tmp;
59836+ unsigned int len;
59837+ int ret;
59838+ char *tmp;
59839+
59840+ while (userp) {
59841+ if ((o_tmp = (struct acl_object_label *)
59842+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
59843+ return -ENOMEM;
59844+
59845+ if (copy_from_user(o_tmp, userp,
59846+ sizeof (struct acl_object_label)))
59847+ return -EFAULT;
59848+
59849+ userp = o_tmp->prev;
59850+
59851+ len = strnlen_user(o_tmp->filename, PATH_MAX);
59852+
59853+ if (!len || len >= PATH_MAX)
59854+ return -EINVAL;
59855+
59856+ if ((tmp = (char *) acl_alloc(len)) == NULL)
59857+ return -ENOMEM;
59858+
59859+ if (copy_from_user(tmp, o_tmp->filename, len))
59860+ return -EFAULT;
59861+ tmp[len-1] = '\0';
59862+ o_tmp->filename = tmp;
59863+
59864+ insert_acl_obj_label(o_tmp, subj);
59865+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
59866+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
59867+ return -ENOMEM;
59868+
59869+ ret = copy_user_glob(o_tmp);
59870+ if (ret)
59871+ return ret;
59872+
59873+ if (o_tmp->nested) {
59874+ int already_copied;
59875+
59876+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role, &already_copied);
59877+ if (IS_ERR(o_tmp->nested))
59878+ return PTR_ERR(o_tmp->nested);
59879+
59880+ /* insert into nested subject list if we haven't copied this one yet
59881+ to prevent duplicate entries */
59882+ if (!already_copied) {
59883+ o_tmp->nested->next = role->hash->first;
59884+ role->hash->first = o_tmp->nested;
59885+ }
59886+ }
59887+ }
59888+
59889+ return 0;
59890+}
59891+
59892+static __u32
59893+count_user_subjs(struct acl_subject_label *userp)
59894+{
59895+ struct acl_subject_label s_tmp;
59896+ __u32 num = 0;
59897+
59898+ while (userp) {
59899+ if (copy_from_user(&s_tmp, userp,
59900+ sizeof (struct acl_subject_label)))
59901+ break;
59902+
59903+ userp = s_tmp.prev;
59904+ }
59905+
59906+ return num;
59907+}
59908+
59909+static int
59910+copy_user_allowedips(struct acl_role_label *rolep)
59911+{
59912+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
59913+
59914+ ruserip = rolep->allowed_ips;
59915+
59916+ while (ruserip) {
59917+ rlast = rtmp;
59918+
59919+ if ((rtmp = (struct role_allowed_ip *)
59920+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
59921+ return -ENOMEM;
59922+
59923+ if (copy_from_user(rtmp, ruserip,
59924+ sizeof (struct role_allowed_ip)))
59925+ return -EFAULT;
59926+
59927+ ruserip = rtmp->prev;
59928+
59929+ if (!rlast) {
59930+ rtmp->prev = NULL;
59931+ rolep->allowed_ips = rtmp;
59932+ } else {
59933+ rlast->next = rtmp;
59934+ rtmp->prev = rlast;
59935+ }
59936+
59937+ if (!ruserip)
59938+ rtmp->next = NULL;
59939+ }
59940+
59941+ return 0;
59942+}
59943+
59944+static int
59945+copy_user_transitions(struct acl_role_label *rolep)
59946+{
59947+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
59948+
59949+ unsigned int len;
59950+ char *tmp;
59951+
59952+ rusertp = rolep->transitions;
59953+
59954+ while (rusertp) {
59955+ rlast = rtmp;
59956+
59957+ if ((rtmp = (struct role_transition *)
59958+ acl_alloc(sizeof (struct role_transition))) == NULL)
59959+ return -ENOMEM;
59960+
59961+ if (copy_from_user(rtmp, rusertp,
59962+ sizeof (struct role_transition)))
59963+ return -EFAULT;
59964+
59965+ rusertp = rtmp->prev;
59966+
59967+ len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
59968+
59969+ if (!len || len >= GR_SPROLE_LEN)
59970+ return -EINVAL;
59971+
59972+ if ((tmp = (char *) acl_alloc(len)) == NULL)
59973+ return -ENOMEM;
59974+
59975+ if (copy_from_user(tmp, rtmp->rolename, len))
59976+ return -EFAULT;
59977+ tmp[len-1] = '\0';
59978+ rtmp->rolename = tmp;
59979+
59980+ if (!rlast) {
59981+ rtmp->prev = NULL;
59982+ rolep->transitions = rtmp;
59983+ } else {
59984+ rlast->next = rtmp;
59985+ rtmp->prev = rlast;
59986+ }
59987+
59988+ if (!rusertp)
59989+ rtmp->next = NULL;
59990+ }
59991+
59992+ return 0;
59993+}
59994+
59995+static struct acl_subject_label *
59996+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied)
59997+{
59998+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
59999+ unsigned int len;
60000+ char *tmp;
60001+ __u32 num_objs;
60002+ struct acl_ip_label **i_tmp, *i_utmp2;
60003+ struct gr_hash_struct ghash;
60004+ struct subject_map *subjmap;
60005+ unsigned int i_num;
60006+ int err;
60007+
60008+ if (already_copied != NULL)
60009+ *already_copied = 0;
60010+
60011+ s_tmp = lookup_subject_map(userp);
60012+
60013+ /* we've already copied this subject into the kernel, just return
60014+ the reference to it, and don't copy it over again
60015+ */
60016+ if (s_tmp) {
60017+ if (already_copied != NULL)
60018+ *already_copied = 1;
60019+ return(s_tmp);
60020+ }
60021+
60022+ if ((s_tmp = (struct acl_subject_label *)
60023+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
60024+ return ERR_PTR(-ENOMEM);
60025+
60026+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
60027+ if (subjmap == NULL)
60028+ return ERR_PTR(-ENOMEM);
60029+
60030+ subjmap->user = userp;
60031+ subjmap->kernel = s_tmp;
60032+ insert_subj_map_entry(subjmap);
60033+
60034+ if (copy_from_user(s_tmp, userp,
60035+ sizeof (struct acl_subject_label)))
60036+ return ERR_PTR(-EFAULT);
60037+
60038+ len = strnlen_user(s_tmp->filename, PATH_MAX);
60039+
60040+ if (!len || len >= PATH_MAX)
60041+ return ERR_PTR(-EINVAL);
60042+
60043+ if ((tmp = (char *) acl_alloc(len)) == NULL)
60044+ return ERR_PTR(-ENOMEM);
60045+
60046+ if (copy_from_user(tmp, s_tmp->filename, len))
60047+ return ERR_PTR(-EFAULT);
60048+ tmp[len-1] = '\0';
60049+ s_tmp->filename = tmp;
60050+
60051+ if (!strcmp(s_tmp->filename, "/"))
60052+ role->root_label = s_tmp;
60053+
60054+ if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
60055+ return ERR_PTR(-EFAULT);
60056+
60057+ /* copy user and group transition tables */
60058+
60059+ if (s_tmp->user_trans_num) {
60060+ uid_t *uidlist;
60061+
60062+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
60063+ if (uidlist == NULL)
60064+ return ERR_PTR(-ENOMEM);
60065+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
60066+ return ERR_PTR(-EFAULT);
60067+
60068+ s_tmp->user_transitions = uidlist;
60069+ }
60070+
60071+ if (s_tmp->group_trans_num) {
60072+ gid_t *gidlist;
60073+
60074+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
60075+ if (gidlist == NULL)
60076+ return ERR_PTR(-ENOMEM);
60077+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
60078+ return ERR_PTR(-EFAULT);
60079+
60080+ s_tmp->group_transitions = gidlist;
60081+ }
60082+
60083+ /* set up object hash table */
60084+ num_objs = count_user_objs(ghash.first);
60085+
60086+ s_tmp->obj_hash_size = num_objs;
60087+ s_tmp->obj_hash =
60088+ (struct acl_object_label **)
60089+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
60090+
60091+ if (!s_tmp->obj_hash)
60092+ return ERR_PTR(-ENOMEM);
60093+
60094+ memset(s_tmp->obj_hash, 0,
60095+ s_tmp->obj_hash_size *
60096+ sizeof (struct acl_object_label *));
60097+
60098+ /* add in objects */
60099+ err = copy_user_objs(ghash.first, s_tmp, role);
60100+
60101+ if (err)
60102+ return ERR_PTR(err);
60103+
60104+ /* set pointer for parent subject */
60105+ if (s_tmp->parent_subject) {
60106+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role, NULL);
60107+
60108+ if (IS_ERR(s_tmp2))
60109+ return s_tmp2;
60110+
60111+ s_tmp->parent_subject = s_tmp2;
60112+ }
60113+
60114+ /* add in ip acls */
60115+
60116+ if (!s_tmp->ip_num) {
60117+ s_tmp->ips = NULL;
60118+ goto insert;
60119+ }
60120+
60121+ i_tmp =
60122+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
60123+ sizeof (struct acl_ip_label *));
60124+
60125+ if (!i_tmp)
60126+ return ERR_PTR(-ENOMEM);
60127+
60128+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
60129+ *(i_tmp + i_num) =
60130+ (struct acl_ip_label *)
60131+ acl_alloc(sizeof (struct acl_ip_label));
60132+ if (!*(i_tmp + i_num))
60133+ return ERR_PTR(-ENOMEM);
60134+
60135+ if (copy_from_user
60136+ (&i_utmp2, s_tmp->ips + i_num,
60137+ sizeof (struct acl_ip_label *)))
60138+ return ERR_PTR(-EFAULT);
60139+
60140+ if (copy_from_user
60141+ (*(i_tmp + i_num), i_utmp2,
60142+ sizeof (struct acl_ip_label)))
60143+ return ERR_PTR(-EFAULT);
60144+
60145+ if ((*(i_tmp + i_num))->iface == NULL)
60146+ continue;
60147+
60148+ len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
60149+ if (!len || len >= IFNAMSIZ)
60150+ return ERR_PTR(-EINVAL);
60151+ tmp = acl_alloc(len);
60152+ if (tmp == NULL)
60153+ return ERR_PTR(-ENOMEM);
60154+ if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
60155+ return ERR_PTR(-EFAULT);
60156+ (*(i_tmp + i_num))->iface = tmp;
60157+ }
60158+
60159+ s_tmp->ips = i_tmp;
60160+
60161+insert:
60162+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
60163+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
60164+ return ERR_PTR(-ENOMEM);
60165+
60166+ return s_tmp;
60167+}
60168+
60169+static int
60170+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
60171+{
60172+ struct acl_subject_label s_pre;
60173+ struct acl_subject_label * ret;
60174+ int err;
60175+
60176+ while (userp) {
60177+ if (copy_from_user(&s_pre, userp,
60178+ sizeof (struct acl_subject_label)))
60179+ return -EFAULT;
60180+
60181+ ret = do_copy_user_subj(userp, role, NULL);
60182+
60183+ err = PTR_ERR(ret);
60184+ if (IS_ERR(ret))
60185+ return err;
60186+
60187+ insert_acl_subj_label(ret, role);
60188+
60189+ userp = s_pre.prev;
60190+ }
60191+
60192+ return 0;
60193+}
60194+
60195+static int
60196+copy_user_acl(struct gr_arg *arg)
60197+{
60198+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
60199+ struct acl_subject_label *subj_list;
60200+ struct sprole_pw *sptmp;
60201+ struct gr_hash_struct *ghash;
60202+ uid_t *domainlist;
60203+ unsigned int r_num;
60204+ unsigned int len;
60205+ char *tmp;
60206+ int err = 0;
60207+ __u16 i;
60208+ __u32 num_subjs;
60209+
60210+ /* we need a default and kernel role */
60211+ if (arg->role_db.num_roles < 2)
60212+ return -EINVAL;
60213+
60214+ /* copy special role authentication info from userspace */
60215+
60216+ num_sprole_pws = arg->num_sprole_pws;
60217+ acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
60218+
60219+ if (!acl_special_roles && num_sprole_pws)
60220+ return -ENOMEM;
60221+
60222+ for (i = 0; i < num_sprole_pws; i++) {
60223+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
60224+ if (!sptmp)
60225+ return -ENOMEM;
60226+ if (copy_from_user(sptmp, arg->sprole_pws + i,
60227+ sizeof (struct sprole_pw)))
60228+ return -EFAULT;
60229+
60230+ len = strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
60231+
60232+ if (!len || len >= GR_SPROLE_LEN)
60233+ return -EINVAL;
60234+
60235+ if ((tmp = (char *) acl_alloc(len)) == NULL)
60236+ return -ENOMEM;
60237+
60238+ if (copy_from_user(tmp, sptmp->rolename, len))
60239+ return -EFAULT;
60240+
60241+ tmp[len-1] = '\0';
60242+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
60243+ printk(KERN_ALERT "Copying special role %s\n", tmp);
60244+#endif
60245+ sptmp->rolename = tmp;
60246+ acl_special_roles[i] = sptmp;
60247+ }
60248+
60249+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
60250+
60251+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
60252+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
60253+
60254+ if (!r_tmp)
60255+ return -ENOMEM;
60256+
60257+ if (copy_from_user(&r_utmp2, r_utmp + r_num,
60258+ sizeof (struct acl_role_label *)))
60259+ return -EFAULT;
60260+
60261+ if (copy_from_user(r_tmp, r_utmp2,
60262+ sizeof (struct acl_role_label)))
60263+ return -EFAULT;
60264+
60265+ len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
60266+
60267+ if (!len || len >= PATH_MAX)
60268+ return -EINVAL;
60269+
60270+ if ((tmp = (char *) acl_alloc(len)) == NULL)
60271+ return -ENOMEM;
60272+
60273+ if (copy_from_user(tmp, r_tmp->rolename, len))
60274+ return -EFAULT;
60275+
60276+ tmp[len-1] = '\0';
60277+ r_tmp->rolename = tmp;
60278+
60279+ if (!strcmp(r_tmp->rolename, "default")
60280+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
60281+ default_role = r_tmp;
60282+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
60283+ kernel_role = r_tmp;
60284+ }
60285+
60286+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL)
60287+ return -ENOMEM;
60288+
60289+ if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct)))
60290+ return -EFAULT;
60291+
60292+ r_tmp->hash = ghash;
60293+
60294+ num_subjs = count_user_subjs(r_tmp->hash->first);
60295+
60296+ r_tmp->subj_hash_size = num_subjs;
60297+ r_tmp->subj_hash =
60298+ (struct acl_subject_label **)
60299+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
60300+
60301+ if (!r_tmp->subj_hash)
60302+ return -ENOMEM;
60303+
60304+ err = copy_user_allowedips(r_tmp);
60305+ if (err)
60306+ return err;
60307+
60308+ /* copy domain info */
60309+ if (r_tmp->domain_children != NULL) {
60310+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
60311+ if (domainlist == NULL)
60312+ return -ENOMEM;
60313+
60314+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t)))
60315+ return -EFAULT;
60316+
60317+ r_tmp->domain_children = domainlist;
60318+ }
60319+
60320+ err = copy_user_transitions(r_tmp);
60321+ if (err)
60322+ return err;
60323+
60324+ memset(r_tmp->subj_hash, 0,
60325+ r_tmp->subj_hash_size *
60326+ sizeof (struct acl_subject_label *));
60327+
60328+ /* acquire the list of subjects, then NULL out
60329+ the list prior to parsing the subjects for this role,
60330+ as during this parsing the list is replaced with a list
60331+ of *nested* subjects for the role
60332+ */
60333+ subj_list = r_tmp->hash->first;
60334+
60335+ /* set nested subject list to null */
60336+ r_tmp->hash->first = NULL;
60337+
60338+ err = copy_user_subjs(subj_list, r_tmp);
60339+
60340+ if (err)
60341+ return err;
60342+
60343+ insert_acl_role_label(r_tmp);
60344+ }
60345+
60346+ if (default_role == NULL || kernel_role == NULL)
60347+ return -EINVAL;
60348+
60349+ return err;
60350+}
60351+
60352+static int
60353+gracl_init(struct gr_arg *args)
60354+{
60355+ int error = 0;
60356+
60357+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
60358+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
60359+
60360+ if (init_variables(args)) {
60361+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
60362+ error = -ENOMEM;
60363+ free_variables();
60364+ goto out;
60365+ }
60366+
60367+ error = copy_user_acl(args);
60368+ free_init_variables();
60369+ if (error) {
60370+ free_variables();
60371+ goto out;
60372+ }
60373+
60374+ if ((error = gr_set_acls(0))) {
60375+ free_variables();
60376+ goto out;
60377+ }
60378+
60379+ pax_open_kernel();
60380+ gr_status |= GR_READY;
60381+ pax_close_kernel();
60382+
60383+ out:
60384+ return error;
60385+}
60386+
60387+/* derived from glibc fnmatch() 0: match, 1: no match*/
60388+
60389+static int
60390+glob_match(const char *p, const char *n)
60391+{
60392+ char c;
60393+
60394+ while ((c = *p++) != '\0') {
60395+ switch (c) {
60396+ case '?':
60397+ if (*n == '\0')
60398+ return 1;
60399+ else if (*n == '/')
60400+ return 1;
60401+ break;
60402+ case '\\':
60403+ if (*n != c)
60404+ return 1;
60405+ break;
60406+ case '*':
60407+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
60408+ if (*n == '/')
60409+ return 1;
60410+ else if (c == '?') {
60411+ if (*n == '\0')
60412+ return 1;
60413+ else
60414+ ++n;
60415+ }
60416+ }
60417+ if (c == '\0') {
60418+ return 0;
60419+ } else {
60420+ const char *endp;
60421+
60422+ if ((endp = strchr(n, '/')) == NULL)
60423+ endp = n + strlen(n);
60424+
60425+ if (c == '[') {
60426+ for (--p; n < endp; ++n)
60427+ if (!glob_match(p, n))
60428+ return 0;
60429+ } else if (c == '/') {
60430+ while (*n != '\0' && *n != '/')
60431+ ++n;
60432+ if (*n == '/' && !glob_match(p, n + 1))
60433+ return 0;
60434+ } else {
60435+ for (--p; n < endp; ++n)
60436+ if (*n == c && !glob_match(p, n))
60437+ return 0;
60438+ }
60439+
60440+ return 1;
60441+ }
60442+ case '[':
60443+ {
60444+ int not;
60445+ char cold;
60446+
60447+ if (*n == '\0' || *n == '/')
60448+ return 1;
60449+
60450+ not = (*p == '!' || *p == '^');
60451+ if (not)
60452+ ++p;
60453+
60454+ c = *p++;
60455+ for (;;) {
60456+ unsigned char fn = (unsigned char)*n;
60457+
60458+ if (c == '\0')
60459+ return 1;
60460+ else {
60461+ if (c == fn)
60462+ goto matched;
60463+ cold = c;
60464+ c = *p++;
60465+
60466+ if (c == '-' && *p != ']') {
60467+ unsigned char cend = *p++;
60468+
60469+ if (cend == '\0')
60470+ return 1;
60471+
60472+ if (cold <= fn && fn <= cend)
60473+ goto matched;
60474+
60475+ c = *p++;
60476+ }
60477+ }
60478+
60479+ if (c == ']')
60480+ break;
60481+ }
60482+ if (!not)
60483+ return 1;
60484+ break;
60485+ matched:
60486+ while (c != ']') {
60487+ if (c == '\0')
60488+ return 1;
60489+
60490+ c = *p++;
60491+ }
60492+ if (not)
60493+ return 1;
60494+ }
60495+ break;
60496+ default:
60497+ if (c != *n)
60498+ return 1;
60499+ }
60500+
60501+ ++n;
60502+ }
60503+
60504+ if (*n == '\0')
60505+ return 0;
60506+
60507+ if (*n == '/')
60508+ return 0;
60509+
60510+ return 1;
60511+}
60512+
60513+static struct acl_object_label *
60514+chk_glob_label(struct acl_object_label *globbed,
60515+ const struct dentry *dentry, const struct vfsmount *mnt, char **path)
60516+{
60517+ struct acl_object_label *tmp;
60518+
60519+ if (*path == NULL)
60520+ *path = gr_to_filename_nolock(dentry, mnt);
60521+
60522+ tmp = globbed;
60523+
60524+ while (tmp) {
60525+ if (!glob_match(tmp->filename, *path))
60526+ return tmp;
60527+ tmp = tmp->next;
60528+ }
60529+
60530+ return NULL;
60531+}
60532+
60533+static struct acl_object_label *
60534+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
60535+ const ino_t curr_ino, const dev_t curr_dev,
60536+ const struct acl_subject_label *subj, char **path, const int checkglob)
60537+{
60538+ struct acl_subject_label *tmpsubj;
60539+ struct acl_object_label *retval;
60540+ struct acl_object_label *retval2;
60541+
60542+ tmpsubj = (struct acl_subject_label *) subj;
60543+ read_lock(&gr_inode_lock);
60544+ do {
60545+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
60546+ if (retval) {
60547+ if (checkglob && retval->globbed) {
60548+ retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
60549+ if (retval2)
60550+ retval = retval2;
60551+ }
60552+ break;
60553+ }
60554+ } while ((tmpsubj = tmpsubj->parent_subject));
60555+ read_unlock(&gr_inode_lock);
60556+
60557+ return retval;
60558+}
60559+
60560+static __inline__ struct acl_object_label *
60561+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
60562+ struct dentry *curr_dentry,
60563+ const struct acl_subject_label *subj, char **path, const int checkglob)
60564+{
60565+ int newglob = checkglob;
60566+ ino_t inode;
60567+ dev_t device;
60568+
60569+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
60570+ as we don't want a / * rule to match instead of the / object
60571+ don't do this for create lookups that call this function though, since they're looking up
60572+ on the parent and thus need globbing checks on all paths
60573+ */
60574+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
60575+ newglob = GR_NO_GLOB;
60576+
60577+ spin_lock(&curr_dentry->d_lock);
60578+ inode = curr_dentry->d_inode->i_ino;
60579+ device = __get_dev(curr_dentry);
60580+ spin_unlock(&curr_dentry->d_lock);
60581+
60582+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
60583+}
60584+
60585+#ifdef CONFIG_HUGETLBFS
60586+static inline bool
60587+is_hugetlbfs_mnt(const struct vfsmount *mnt)
60588+{
60589+ int i;
60590+ for (i = 0; i < HUGE_MAX_HSTATE; i++) {
60591+ if (unlikely(hugetlbfs_vfsmount[i] == mnt))
60592+ return true;
60593+ }
60594+
60595+ return false;
60596+}
60597+#endif
60598+
60599+static struct acl_object_label *
60600+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
60601+ const struct acl_subject_label *subj, char *path, const int checkglob)
60602+{
60603+ struct dentry *dentry = (struct dentry *) l_dentry;
60604+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
60605+ struct mount *real_mnt = real_mount(mnt);
60606+ struct acl_object_label *retval;
60607+ struct dentry *parent;
60608+
60609+ br_read_lock(&vfsmount_lock);
60610+ write_seqlock(&rename_lock);
60611+
60612+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
60613+#ifdef CONFIG_NET
60614+ mnt == sock_mnt ||
60615+#endif
60616+#ifdef CONFIG_HUGETLBFS
60617+ (is_hugetlbfs_mnt(mnt) && dentry->d_inode->i_nlink == 0) ||
60618+#endif
60619+ /* ignore Eric Biederman */
60620+ IS_PRIVATE(l_dentry->d_inode))) {
60621+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
60622+ goto out;
60623+ }
60624+
60625+ for (;;) {
60626+ if (dentry == real_root.dentry && mnt == real_root.mnt)
60627+ break;
60628+
60629+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
60630+ if (!mnt_has_parent(real_mnt))
60631+ break;
60632+
60633+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
60634+ if (retval != NULL)
60635+ goto out;
60636+
60637+ dentry = real_mnt->mnt_mountpoint;
60638+ real_mnt = real_mnt->mnt_parent;
60639+ mnt = &real_mnt->mnt;
60640+ continue;
60641+ }
60642+
60643+ parent = dentry->d_parent;
60644+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
60645+ if (retval != NULL)
60646+ goto out;
60647+
60648+ dentry = parent;
60649+ }
60650+
60651+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
60652+
60653+ /* real_root is pinned so we don't have to hold a reference */
60654+ if (retval == NULL)
60655+ retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
60656+out:
60657+ write_sequnlock(&rename_lock);
60658+ br_read_unlock(&vfsmount_lock);
60659+
60660+ BUG_ON(retval == NULL);
60661+
60662+ return retval;
60663+}
60664+
60665+static __inline__ struct acl_object_label *
60666+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
60667+ const struct acl_subject_label *subj)
60668+{
60669+ char *path = NULL;
60670+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
60671+}
60672+
60673+static __inline__ struct acl_object_label *
60674+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
60675+ const struct acl_subject_label *subj)
60676+{
60677+ char *path = NULL;
60678+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
60679+}
60680+
60681+static __inline__ struct acl_object_label *
60682+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
60683+ const struct acl_subject_label *subj, char *path)
60684+{
60685+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
60686+}
60687+
60688+static struct acl_subject_label *
60689+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
60690+ const struct acl_role_label *role)
60691+{
60692+ struct dentry *dentry = (struct dentry *) l_dentry;
60693+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
60694+ struct mount *real_mnt = real_mount(mnt);
60695+ struct acl_subject_label *retval;
60696+ struct dentry *parent;
60697+
60698+ br_read_lock(&vfsmount_lock);
60699+ write_seqlock(&rename_lock);
60700+
60701+ for (;;) {
60702+ if (dentry == real_root.dentry && mnt == real_root.mnt)
60703+ break;
60704+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
60705+ if (!mnt_has_parent(real_mnt))
60706+ break;
60707+
60708+ spin_lock(&dentry->d_lock);
60709+ read_lock(&gr_inode_lock);
60710+ retval =
60711+ lookup_acl_subj_label(dentry->d_inode->i_ino,
60712+ __get_dev(dentry), role);
60713+ read_unlock(&gr_inode_lock);
60714+ spin_unlock(&dentry->d_lock);
60715+ if (retval != NULL)
60716+ goto out;
60717+
60718+ dentry = real_mnt->mnt_mountpoint;
60719+ real_mnt = real_mnt->mnt_parent;
60720+ mnt = &real_mnt->mnt;
60721+ continue;
60722+ }
60723+
60724+ spin_lock(&dentry->d_lock);
60725+ read_lock(&gr_inode_lock);
60726+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
60727+ __get_dev(dentry), role);
60728+ read_unlock(&gr_inode_lock);
60729+ parent = dentry->d_parent;
60730+ spin_unlock(&dentry->d_lock);
60731+
60732+ if (retval != NULL)
60733+ goto out;
60734+
60735+ dentry = parent;
60736+ }
60737+
60738+ spin_lock(&dentry->d_lock);
60739+ read_lock(&gr_inode_lock);
60740+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
60741+ __get_dev(dentry), role);
60742+ read_unlock(&gr_inode_lock);
60743+ spin_unlock(&dentry->d_lock);
60744+
60745+ if (unlikely(retval == NULL)) {
60746+ /* real_root is pinned, we don't need to hold a reference */
60747+ read_lock(&gr_inode_lock);
60748+ retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
60749+ __get_dev(real_root.dentry), role);
60750+ read_unlock(&gr_inode_lock);
60751+ }
60752+out:
60753+ write_sequnlock(&rename_lock);
60754+ br_read_unlock(&vfsmount_lock);
60755+
60756+ BUG_ON(retval == NULL);
60757+
60758+ return retval;
60759+}
60760+
60761+static void
60762+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
60763+{
60764+ struct task_struct *task = current;
60765+ const struct cred *cred = current_cred();
60766+
60767+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
60768+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
60769+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
60770+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
60771+
60772+ return;
60773+}
60774+
60775+static void
60776+gr_log_learn_uid_change(const kuid_t real, const kuid_t effective, const kuid_t fs)
60777+{
60778+ struct task_struct *task = current;
60779+ const struct cred *cred = current_cred();
60780+
60781+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
60782+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
60783+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
60784+ 'u', GR_GLOBAL_UID(real), GR_GLOBAL_UID(effective), GR_GLOBAL_UID(fs), &task->signal->saved_ip);
60785+
60786+ return;
60787+}
60788+
60789+static void
60790+gr_log_learn_gid_change(const kgid_t real, const kgid_t effective, const kgid_t fs)
60791+{
60792+ struct task_struct *task = current;
60793+ const struct cred *cred = current_cred();
60794+
60795+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
60796+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
60797+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
60798+ 'g', GR_GLOBAL_GID(real), GR_GLOBAL_GID(effective), GR_GLOBAL_GID(fs), &task->signal->saved_ip);
60799+
60800+ return;
60801+}
60802+
60803+__u32
60804+gr_search_file(const struct dentry * dentry, const __u32 mode,
60805+ const struct vfsmount * mnt)
60806+{
60807+ __u32 retval = mode;
60808+ struct acl_subject_label *curracl;
60809+ struct acl_object_label *currobj;
60810+
60811+ if (unlikely(!(gr_status & GR_READY)))
60812+ return (mode & ~GR_AUDITS);
60813+
60814+ curracl = current->acl;
60815+
60816+ currobj = chk_obj_label(dentry, mnt, curracl);
60817+ retval = currobj->mode & mode;
60818+
60819+ /* if we're opening a specified transfer file for writing
60820+ (e.g. /dev/initctl), then transfer our role to init
60821+ */
60822+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
60823+ current->role->roletype & GR_ROLE_PERSIST)) {
60824+ struct task_struct *task = init_pid_ns.child_reaper;
60825+
60826+ if (task->role != current->role) {
60827+ task->acl_sp_role = 0;
60828+ task->acl_role_id = current->acl_role_id;
60829+ task->role = current->role;
60830+ rcu_read_lock();
60831+ read_lock(&grsec_exec_file_lock);
60832+ gr_apply_subject_to_task(task);
60833+ read_unlock(&grsec_exec_file_lock);
60834+ rcu_read_unlock();
60835+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
60836+ }
60837+ }
60838+
60839+ if (unlikely
60840+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
60841+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
60842+ __u32 new_mode = mode;
60843+
60844+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
60845+
60846+ retval = new_mode;
60847+
60848+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
60849+ new_mode |= GR_INHERIT;
60850+
60851+ if (!(mode & GR_NOLEARN))
60852+ gr_log_learn(dentry, mnt, new_mode);
60853+ }
60854+
60855+ return retval;
60856+}
60857+
60858+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
60859+ const struct dentry *parent,
60860+ const struct vfsmount *mnt)
60861+{
60862+ struct name_entry *match;
60863+ struct acl_object_label *matchpo;
60864+ struct acl_subject_label *curracl;
60865+ char *path;
60866+
60867+ if (unlikely(!(gr_status & GR_READY)))
60868+ return NULL;
60869+
60870+ preempt_disable();
60871+ path = gr_to_filename_rbac(new_dentry, mnt);
60872+ match = lookup_name_entry_create(path);
60873+
60874+ curracl = current->acl;
60875+
60876+ if (match) {
60877+ read_lock(&gr_inode_lock);
60878+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
60879+ read_unlock(&gr_inode_lock);
60880+
60881+ if (matchpo) {
60882+ preempt_enable();
60883+ return matchpo;
60884+ }
60885+ }
60886+
60887+ // lookup parent
60888+
60889+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
60890+
60891+ preempt_enable();
60892+ return matchpo;
60893+}
60894+
60895+__u32
60896+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
60897+ const struct vfsmount * mnt, const __u32 mode)
60898+{
60899+ struct acl_object_label *matchpo;
60900+ __u32 retval;
60901+
60902+ if (unlikely(!(gr_status & GR_READY)))
60903+ return (mode & ~GR_AUDITS);
60904+
60905+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
60906+
60907+ retval = matchpo->mode & mode;
60908+
60909+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
60910+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
60911+ __u32 new_mode = mode;
60912+
60913+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
60914+
60915+ gr_log_learn(new_dentry, mnt, new_mode);
60916+ return new_mode;
60917+ }
60918+
60919+ return retval;
60920+}
60921+
60922+__u32
60923+gr_check_link(const struct dentry * new_dentry,
60924+ const struct dentry * parent_dentry,
60925+ const struct vfsmount * parent_mnt,
60926+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
60927+{
60928+ struct acl_object_label *obj;
60929+ __u32 oldmode, newmode;
60930+ __u32 needmode;
60931+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
60932+ GR_DELETE | GR_INHERIT;
60933+
60934+ if (unlikely(!(gr_status & GR_READY)))
60935+ return (GR_CREATE | GR_LINK);
60936+
60937+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
60938+ oldmode = obj->mode;
60939+
60940+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
60941+ newmode = obj->mode;
60942+
60943+ needmode = newmode & checkmodes;
60944+
60945+ // old name for hardlink must have at least the permissions of the new name
60946+ if ((oldmode & needmode) != needmode)
60947+ goto bad;
60948+
60949+ // if old name had restrictions/auditing, make sure the new name does as well
60950+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
60951+
60952+ // don't allow hardlinking of suid/sgid/fcapped files without permission
60953+ if (is_privileged_binary(old_dentry))
60954+ needmode |= GR_SETID;
60955+
60956+ if ((newmode & needmode) != needmode)
60957+ goto bad;
60958+
60959+ // enforce minimum permissions
60960+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
60961+ return newmode;
60962+bad:
60963+ needmode = oldmode;
60964+ if (is_privileged_binary(old_dentry))
60965+ needmode |= GR_SETID;
60966+
60967+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
60968+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
60969+ return (GR_CREATE | GR_LINK);
60970+ } else if (newmode & GR_SUPPRESS)
60971+ return GR_SUPPRESS;
60972+ else
60973+ return 0;
60974+}
60975+
60976+int
60977+gr_check_hidden_task(const struct task_struct *task)
60978+{
60979+ if (unlikely(!(gr_status & GR_READY)))
60980+ return 0;
60981+
60982+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
60983+ return 1;
60984+
60985+ return 0;
60986+}
60987+
60988+int
60989+gr_check_protected_task(const struct task_struct *task)
60990+{
60991+ if (unlikely(!(gr_status & GR_READY) || !task))
60992+ return 0;
60993+
60994+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
60995+ task->acl != current->acl)
60996+ return 1;
60997+
60998+ return 0;
60999+}
61000+
61001+int
61002+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
61003+{
61004+ struct task_struct *p;
61005+ int ret = 0;
61006+
61007+ if (unlikely(!(gr_status & GR_READY) || !pid))
61008+ return ret;
61009+
61010+ read_lock(&tasklist_lock);
61011+ do_each_pid_task(pid, type, p) {
61012+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
61013+ p->acl != current->acl) {
61014+ ret = 1;
61015+ goto out;
61016+ }
61017+ } while_each_pid_task(pid, type, p);
61018+out:
61019+ read_unlock(&tasklist_lock);
61020+
61021+ return ret;
61022+}
61023+
61024+void
61025+gr_copy_label(struct task_struct *tsk)
61026+{
61027+ tsk->signal->used_accept = 0;
61028+ tsk->acl_sp_role = 0;
61029+ tsk->acl_role_id = current->acl_role_id;
61030+ tsk->acl = current->acl;
61031+ tsk->role = current->role;
61032+ tsk->signal->curr_ip = current->signal->curr_ip;
61033+ tsk->signal->saved_ip = current->signal->saved_ip;
61034+ if (current->exec_file)
61035+ get_file(current->exec_file);
61036+ tsk->exec_file = current->exec_file;
61037+ tsk->is_writable = current->is_writable;
61038+ if (unlikely(current->signal->used_accept)) {
61039+ current->signal->curr_ip = 0;
61040+ current->signal->saved_ip = 0;
61041+ }
61042+
61043+ return;
61044+}
61045+
61046+static void
61047+gr_set_proc_res(struct task_struct *task)
61048+{
61049+ struct acl_subject_label *proc;
61050+ unsigned short i;
61051+
61052+ proc = task->acl;
61053+
61054+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
61055+ return;
61056+
61057+ for (i = 0; i < RLIM_NLIMITS; i++) {
61058+ if (!(proc->resmask & (1U << i)))
61059+ continue;
61060+
61061+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
61062+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
61063+
61064+ if (i == RLIMIT_CPU)
61065+ update_rlimit_cpu(task, proc->res[i].rlim_cur);
61066+ }
61067+
61068+ return;
61069+}
61070+
61071+extern int __gr_process_user_ban(struct user_struct *user);
61072+
61073+int
61074+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
61075+{
61076+ unsigned int i;
61077+ __u16 num;
61078+ uid_t *uidlist;
61079+ uid_t curuid;
61080+ int realok = 0;
61081+ int effectiveok = 0;
61082+ int fsok = 0;
61083+ uid_t globalreal, globaleffective, globalfs;
61084+
61085+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
61086+ struct user_struct *user;
61087+
61088+ if (!uid_valid(real))
61089+ goto skipit;
61090+
61091+ /* find user based on global namespace */
61092+
61093+ globalreal = GR_GLOBAL_UID(real);
61094+
61095+ user = find_user(make_kuid(&init_user_ns, globalreal));
61096+ if (user == NULL)
61097+ goto skipit;
61098+
61099+ if (__gr_process_user_ban(user)) {
61100+ /* for find_user */
61101+ free_uid(user);
61102+ return 1;
61103+ }
61104+
61105+ /* for find_user */
61106+ free_uid(user);
61107+
61108+skipit:
61109+#endif
61110+
61111+ if (unlikely(!(gr_status & GR_READY)))
61112+ return 0;
61113+
61114+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
61115+ gr_log_learn_uid_change(real, effective, fs);
61116+
61117+ num = current->acl->user_trans_num;
61118+ uidlist = current->acl->user_transitions;
61119+
61120+ if (uidlist == NULL)
61121+ return 0;
61122+
61123+ if (!uid_valid(real)) {
61124+ realok = 1;
61125+ globalreal = (uid_t)-1;
61126+ } else {
61127+ globalreal = GR_GLOBAL_UID(real);
61128+ }
61129+ if (!uid_valid(effective)) {
61130+ effectiveok = 1;
61131+ globaleffective = (uid_t)-1;
61132+ } else {
61133+ globaleffective = GR_GLOBAL_UID(effective);
61134+ }
61135+ if (!uid_valid(fs)) {
61136+ fsok = 1;
61137+ globalfs = (uid_t)-1;
61138+ } else {
61139+ globalfs = GR_GLOBAL_UID(fs);
61140+ }
61141+
61142+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
61143+ for (i = 0; i < num; i++) {
61144+ curuid = uidlist[i];
61145+ if (globalreal == curuid)
61146+ realok = 1;
61147+ if (globaleffective == curuid)
61148+ effectiveok = 1;
61149+ if (globalfs == curuid)
61150+ fsok = 1;
61151+ }
61152+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
61153+ for (i = 0; i < num; i++) {
61154+ curuid = uidlist[i];
61155+ if (globalreal == curuid)
61156+ break;
61157+ if (globaleffective == curuid)
61158+ break;
61159+ if (globalfs == curuid)
61160+ break;
61161+ }
61162+ /* not in deny list */
61163+ if (i == num) {
61164+ realok = 1;
61165+ effectiveok = 1;
61166+ fsok = 1;
61167+ }
61168+ }
61169+
61170+ if (realok && effectiveok && fsok)
61171+ return 0;
61172+ else {
61173+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
61174+ return 1;
61175+ }
61176+}
61177+
61178+int
61179+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
61180+{
61181+ unsigned int i;
61182+ __u16 num;
61183+ gid_t *gidlist;
61184+ gid_t curgid;
61185+ int realok = 0;
61186+ int effectiveok = 0;
61187+ int fsok = 0;
61188+ gid_t globalreal, globaleffective, globalfs;
61189+
61190+ if (unlikely(!(gr_status & GR_READY)))
61191+ return 0;
61192+
61193+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
61194+ gr_log_learn_gid_change(real, effective, fs);
61195+
61196+ num = current->acl->group_trans_num;
61197+ gidlist = current->acl->group_transitions;
61198+
61199+ if (gidlist == NULL)
61200+ return 0;
61201+
61202+ if (!gid_valid(real)) {
61203+ realok = 1;
61204+ globalreal = (gid_t)-1;
61205+ } else {
61206+ globalreal = GR_GLOBAL_GID(real);
61207+ }
61208+ if (!gid_valid(effective)) {
61209+ effectiveok = 1;
61210+ globaleffective = (gid_t)-1;
61211+ } else {
61212+ globaleffective = GR_GLOBAL_GID(effective);
61213+ }
61214+ if (!gid_valid(fs)) {
61215+ fsok = 1;
61216+ globalfs = (gid_t)-1;
61217+ } else {
61218+ globalfs = GR_GLOBAL_GID(fs);
61219+ }
61220+
61221+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
61222+ for (i = 0; i < num; i++) {
61223+ curgid = gidlist[i];
61224+ if (globalreal == curgid)
61225+ realok = 1;
61226+ if (globaleffective == curgid)
61227+ effectiveok = 1;
61228+ if (globalfs == curgid)
61229+ fsok = 1;
61230+ }
61231+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
61232+ for (i = 0; i < num; i++) {
61233+ curgid = gidlist[i];
61234+ if (globalreal == curgid)
61235+ break;
61236+ if (globaleffective == curgid)
61237+ break;
61238+ if (globalfs == curgid)
61239+ break;
61240+ }
61241+ /* not in deny list */
61242+ if (i == num) {
61243+ realok = 1;
61244+ effectiveok = 1;
61245+ fsok = 1;
61246+ }
61247+ }
61248+
61249+ if (realok && effectiveok && fsok)
61250+ return 0;
61251+ else {
61252+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
61253+ return 1;
61254+ }
61255+}
61256+
61257+extern int gr_acl_is_capable(const int cap);
61258+
61259+void
61260+gr_set_role_label(struct task_struct *task, const kuid_t kuid, const kgid_t kgid)
61261+{
61262+ struct acl_role_label *role = task->role;
61263+ struct acl_subject_label *subj = NULL;
61264+ struct acl_object_label *obj;
61265+ struct file *filp;
61266+ uid_t uid;
61267+ gid_t gid;
61268+
61269+ if (unlikely(!(gr_status & GR_READY)))
61270+ return;
61271+
61272+ uid = GR_GLOBAL_UID(kuid);
61273+ gid = GR_GLOBAL_GID(kgid);
61274+
61275+ filp = task->exec_file;
61276+
61277+ /* kernel process, we'll give them the kernel role */
61278+ if (unlikely(!filp)) {
61279+ task->role = kernel_role;
61280+ task->acl = kernel_role->root_label;
61281+ return;
61282+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
61283+ role = lookup_acl_role_label(task, uid, gid);
61284+
61285+ /* don't change the role if we're not a privileged process */
61286+ if (role && task->role != role &&
61287+ (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
61288+ ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
61289+ return;
61290+
61291+ /* perform subject lookup in possibly new role
61292+ we can use this result below in the case where role == task->role
61293+ */
61294+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
61295+
61296+ /* if we changed uid/gid, but result in the same role
61297+ and are using inheritance, don't lose the inherited subject
61298+ if current subject is other than what normal lookup
61299+ would result in, we arrived via inheritance, don't
61300+ lose subject
61301+ */
61302+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
61303+ (subj == task->acl)))
61304+ task->acl = subj;
61305+
61306+ task->role = role;
61307+
61308+ task->is_writable = 0;
61309+
61310+ /* ignore additional mmap checks for processes that are writable
61311+ by the default ACL */
61312+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
61313+ if (unlikely(obj->mode & GR_WRITE))
61314+ task->is_writable = 1;
61315+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
61316+ if (unlikely(obj->mode & GR_WRITE))
61317+ task->is_writable = 1;
61318+
61319+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
61320+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
61321+#endif
61322+
61323+ gr_set_proc_res(task);
61324+
61325+ return;
61326+}
61327+
61328+int
61329+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
61330+ const int unsafe_flags)
61331+{
61332+ struct task_struct *task = current;
61333+ struct acl_subject_label *newacl;
61334+ struct acl_object_label *obj;
61335+ __u32 retmode;
61336+
61337+ if (unlikely(!(gr_status & GR_READY)))
61338+ return 0;
61339+
61340+ newacl = chk_subj_label(dentry, mnt, task->role);
61341+
61342+ /* special handling for if we did an strace -f -p <pid> from an admin role, where pid then
61343+ did an exec
61344+ */
61345+ rcu_read_lock();
61346+ read_lock(&tasklist_lock);
61347+ if (task->ptrace && task->parent && ((task->parent->role->roletype & GR_ROLE_GOD) ||
61348+ (task->parent->acl->mode & GR_POVERRIDE))) {
61349+ read_unlock(&tasklist_lock);
61350+ rcu_read_unlock();
61351+ goto skip_check;
61352+ }
61353+ read_unlock(&tasklist_lock);
61354+ rcu_read_unlock();
61355+
61356+ if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
61357+ !(task->role->roletype & GR_ROLE_GOD) &&
61358+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
61359+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
61360+ if (unsafe_flags & LSM_UNSAFE_SHARE)
61361+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
61362+ else
61363+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
61364+ return -EACCES;
61365+ }
61366+
61367+skip_check:
61368+
61369+ obj = chk_obj_label(dentry, mnt, task->acl);
61370+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
61371+
61372+ if (!(task->acl->mode & GR_INHERITLEARN) &&
61373+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
61374+ if (obj->nested)
61375+ task->acl = obj->nested;
61376+ else
61377+ task->acl = newacl;
61378+ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
61379+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
61380+
61381+ task->is_writable = 0;
61382+
61383+ /* ignore additional mmap checks for processes that are writable
61384+ by the default ACL */
61385+ obj = chk_obj_label(dentry, mnt, default_role->root_label);
61386+ if (unlikely(obj->mode & GR_WRITE))
61387+ task->is_writable = 1;
61388+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
61389+ if (unlikely(obj->mode & GR_WRITE))
61390+ task->is_writable = 1;
61391+
61392+ gr_set_proc_res(task);
61393+
61394+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
61395+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
61396+#endif
61397+ return 0;
61398+}
61399+
61400+/* always called with valid inodev ptr */
61401+static void
61402+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
61403+{
61404+ struct acl_object_label *matchpo;
61405+ struct acl_subject_label *matchps;
61406+ struct acl_subject_label *subj;
61407+ struct acl_role_label *role;
61408+ unsigned int x;
61409+
61410+ FOR_EACH_ROLE_START(role)
61411+ FOR_EACH_SUBJECT_START(role, subj, x)
61412+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
61413+ matchpo->mode |= GR_DELETED;
61414+ FOR_EACH_SUBJECT_END(subj,x)
61415+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
61416+ /* nested subjects aren't in the role's subj_hash table */
61417+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
61418+ matchpo->mode |= GR_DELETED;
61419+ FOR_EACH_NESTED_SUBJECT_END(subj)
61420+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
61421+ matchps->mode |= GR_DELETED;
61422+ FOR_EACH_ROLE_END(role)
61423+
61424+ inodev->nentry->deleted = 1;
61425+
61426+ return;
61427+}
61428+
61429+void
61430+gr_handle_delete(const ino_t ino, const dev_t dev)
61431+{
61432+ struct inodev_entry *inodev;
61433+
61434+ if (unlikely(!(gr_status & GR_READY)))
61435+ return;
61436+
61437+ write_lock(&gr_inode_lock);
61438+ inodev = lookup_inodev_entry(ino, dev);
61439+ if (inodev != NULL)
61440+ do_handle_delete(inodev, ino, dev);
61441+ write_unlock(&gr_inode_lock);
61442+
61443+ return;
61444+}
61445+
61446+static void
61447+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
61448+ const ino_t newinode, const dev_t newdevice,
61449+ struct acl_subject_label *subj)
61450+{
61451+ unsigned int index = gr_fhash(oldinode, olddevice, subj->obj_hash_size);
61452+ struct acl_object_label *match;
61453+
61454+ match = subj->obj_hash[index];
61455+
61456+ while (match && (match->inode != oldinode ||
61457+ match->device != olddevice ||
61458+ !(match->mode & GR_DELETED)))
61459+ match = match->next;
61460+
61461+ if (match && (match->inode == oldinode)
61462+ && (match->device == olddevice)
61463+ && (match->mode & GR_DELETED)) {
61464+ if (match->prev == NULL) {
61465+ subj->obj_hash[index] = match->next;
61466+ if (match->next != NULL)
61467+ match->next->prev = NULL;
61468+ } else {
61469+ match->prev->next = match->next;
61470+ if (match->next != NULL)
61471+ match->next->prev = match->prev;
61472+ }
61473+ match->prev = NULL;
61474+ match->next = NULL;
61475+ match->inode = newinode;
61476+ match->device = newdevice;
61477+ match->mode &= ~GR_DELETED;
61478+
61479+ insert_acl_obj_label(match, subj);
61480+ }
61481+
61482+ return;
61483+}
61484+
61485+static void
61486+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
61487+ const ino_t newinode, const dev_t newdevice,
61488+ struct acl_role_label *role)
61489+{
61490+ unsigned int index = gr_fhash(oldinode, olddevice, role->subj_hash_size);
61491+ struct acl_subject_label *match;
61492+
61493+ match = role->subj_hash[index];
61494+
61495+ while (match && (match->inode != oldinode ||
61496+ match->device != olddevice ||
61497+ !(match->mode & GR_DELETED)))
61498+ match = match->next;
61499+
61500+ if (match && (match->inode == oldinode)
61501+ && (match->device == olddevice)
61502+ && (match->mode & GR_DELETED)) {
61503+ if (match->prev == NULL) {
61504+ role->subj_hash[index] = match->next;
61505+ if (match->next != NULL)
61506+ match->next->prev = NULL;
61507+ } else {
61508+ match->prev->next = match->next;
61509+ if (match->next != NULL)
61510+ match->next->prev = match->prev;
61511+ }
61512+ match->prev = NULL;
61513+ match->next = NULL;
61514+ match->inode = newinode;
61515+ match->device = newdevice;
61516+ match->mode &= ~GR_DELETED;
61517+
61518+ insert_acl_subj_label(match, role);
61519+ }
61520+
61521+ return;
61522+}
61523+
61524+static void
61525+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
61526+ const ino_t newinode, const dev_t newdevice)
61527+{
61528+ unsigned int index = gr_fhash(oldinode, olddevice, inodev_set.i_size);
61529+ struct inodev_entry *match;
61530+
61531+ match = inodev_set.i_hash[index];
61532+
61533+ while (match && (match->nentry->inode != oldinode ||
61534+ match->nentry->device != olddevice || !match->nentry->deleted))
61535+ match = match->next;
61536+
61537+ if (match && (match->nentry->inode == oldinode)
61538+ && (match->nentry->device == olddevice) &&
61539+ match->nentry->deleted) {
61540+ if (match->prev == NULL) {
61541+ inodev_set.i_hash[index] = match->next;
61542+ if (match->next != NULL)
61543+ match->next->prev = NULL;
61544+ } else {
61545+ match->prev->next = match->next;
61546+ if (match->next != NULL)
61547+ match->next->prev = match->prev;
61548+ }
61549+ match->prev = NULL;
61550+ match->next = NULL;
61551+ match->nentry->inode = newinode;
61552+ match->nentry->device = newdevice;
61553+ match->nentry->deleted = 0;
61554+
61555+ insert_inodev_entry(match);
61556+ }
61557+
61558+ return;
61559+}
61560+
61561+static void
61562+__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
61563+{
61564+ struct acl_subject_label *subj;
61565+ struct acl_role_label *role;
61566+ unsigned int x;
61567+
61568+ FOR_EACH_ROLE_START(role)
61569+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
61570+
61571+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
61572+ if ((subj->inode == ino) && (subj->device == dev)) {
61573+ subj->inode = ino;
61574+ subj->device = dev;
61575+ }
61576+ /* nested subjects aren't in the role's subj_hash table */
61577+ update_acl_obj_label(matchn->inode, matchn->device,
61578+ ino, dev, subj);
61579+ FOR_EACH_NESTED_SUBJECT_END(subj)
61580+ FOR_EACH_SUBJECT_START(role, subj, x)
61581+ update_acl_obj_label(matchn->inode, matchn->device,
61582+ ino, dev, subj);
61583+ FOR_EACH_SUBJECT_END(subj,x)
61584+ FOR_EACH_ROLE_END(role)
61585+
61586+ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
61587+
61588+ return;
61589+}
61590+
61591+static void
61592+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
61593+ const struct vfsmount *mnt)
61594+{
61595+ ino_t ino = dentry->d_inode->i_ino;
61596+ dev_t dev = __get_dev(dentry);
61597+
61598+ __do_handle_create(matchn, ino, dev);
61599+
61600+ return;
61601+}
61602+
61603+void
61604+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
61605+{
61606+ struct name_entry *matchn;
61607+
61608+ if (unlikely(!(gr_status & GR_READY)))
61609+ return;
61610+
61611+ preempt_disable();
61612+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
61613+
61614+ if (unlikely((unsigned long)matchn)) {
61615+ write_lock(&gr_inode_lock);
61616+ do_handle_create(matchn, dentry, mnt);
61617+ write_unlock(&gr_inode_lock);
61618+ }
61619+ preempt_enable();
61620+
61621+ return;
61622+}
61623+
61624+void
61625+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
61626+{
61627+ struct name_entry *matchn;
61628+
61629+ if (unlikely(!(gr_status & GR_READY)))
61630+ return;
61631+
61632+ preempt_disable();
61633+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
61634+
61635+ if (unlikely((unsigned long)matchn)) {
61636+ write_lock(&gr_inode_lock);
61637+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
61638+ write_unlock(&gr_inode_lock);
61639+ }
61640+ preempt_enable();
61641+
61642+ return;
61643+}
61644+
61645+void
61646+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
61647+ struct dentry *old_dentry,
61648+ struct dentry *new_dentry,
61649+ struct vfsmount *mnt, const __u8 replace)
61650+{
61651+ struct name_entry *matchn;
61652+ struct inodev_entry *inodev;
61653+ struct inode *inode = new_dentry->d_inode;
61654+ ino_t old_ino = old_dentry->d_inode->i_ino;
61655+ dev_t old_dev = __get_dev(old_dentry);
61656+
61657+ /* vfs_rename swaps the name and parent link for old_dentry and
61658+ new_dentry
61659+ at this point, old_dentry has the new name, parent link, and inode
61660+ for the renamed file
61661+ if a file is being replaced by a rename, new_dentry has the inode
61662+ and name for the replaced file
61663+ */
61664+
61665+ if (unlikely(!(gr_status & GR_READY)))
61666+ return;
61667+
61668+ preempt_disable();
61669+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
61670+
61671+ /* we wouldn't have to check d_inode if it weren't for
61672+ NFS silly-renaming
61673+ */
61674+
61675+ write_lock(&gr_inode_lock);
61676+ if (unlikely(replace && inode)) {
61677+ ino_t new_ino = inode->i_ino;
61678+ dev_t new_dev = __get_dev(new_dentry);
61679+
61680+ inodev = lookup_inodev_entry(new_ino, new_dev);
61681+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
61682+ do_handle_delete(inodev, new_ino, new_dev);
61683+ }
61684+
61685+ inodev = lookup_inodev_entry(old_ino, old_dev);
61686+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
61687+ do_handle_delete(inodev, old_ino, old_dev);
61688+
61689+ if (unlikely((unsigned long)matchn))
61690+ do_handle_create(matchn, old_dentry, mnt);
61691+
61692+ write_unlock(&gr_inode_lock);
61693+ preempt_enable();
61694+
61695+ return;
61696+}
61697+
61698+static int
61699+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
61700+ unsigned char **sum)
61701+{
61702+ struct acl_role_label *r;
61703+ struct role_allowed_ip *ipp;
61704+ struct role_transition *trans;
61705+ unsigned int i;
61706+ int found = 0;
61707+ u32 curr_ip = current->signal->curr_ip;
61708+
61709+ current->signal->saved_ip = curr_ip;
61710+
61711+ /* check transition table */
61712+
61713+ for (trans = current->role->transitions; trans; trans = trans->next) {
61714+ if (!strcmp(rolename, trans->rolename)) {
61715+ found = 1;
61716+ break;
61717+ }
61718+ }
61719+
61720+ if (!found)
61721+ return 0;
61722+
61723+ /* handle special roles that do not require authentication
61724+ and check ip */
61725+
61726+ FOR_EACH_ROLE_START(r)
61727+ if (!strcmp(rolename, r->rolename) &&
61728+ (r->roletype & GR_ROLE_SPECIAL)) {
61729+ found = 0;
61730+ if (r->allowed_ips != NULL) {
61731+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
61732+ if ((ntohl(curr_ip) & ipp->netmask) ==
61733+ (ntohl(ipp->addr) & ipp->netmask))
61734+ found = 1;
61735+ }
61736+ } else
61737+ found = 2;
61738+ if (!found)
61739+ return 0;
61740+
61741+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
61742+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
61743+ *salt = NULL;
61744+ *sum = NULL;
61745+ return 1;
61746+ }
61747+ }
61748+ FOR_EACH_ROLE_END(r)
61749+
61750+ for (i = 0; i < num_sprole_pws; i++) {
61751+ if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
61752+ *salt = acl_special_roles[i]->salt;
61753+ *sum = acl_special_roles[i]->sum;
61754+ return 1;
61755+ }
61756+ }
61757+
61758+ return 0;
61759+}
61760+
61761+static void
61762+assign_special_role(char *rolename)
61763+{
61764+ struct acl_object_label *obj;
61765+ struct acl_role_label *r;
61766+ struct acl_role_label *assigned = NULL;
61767+ struct task_struct *tsk;
61768+ struct file *filp;
61769+
61770+ FOR_EACH_ROLE_START(r)
61771+ if (!strcmp(rolename, r->rolename) &&
61772+ (r->roletype & GR_ROLE_SPECIAL)) {
61773+ assigned = r;
61774+ break;
61775+ }
61776+ FOR_EACH_ROLE_END(r)
61777+
61778+ if (!assigned)
61779+ return;
61780+
61781+ read_lock(&tasklist_lock);
61782+ read_lock(&grsec_exec_file_lock);
61783+
61784+ tsk = current->real_parent;
61785+ if (tsk == NULL)
61786+ goto out_unlock;
61787+
61788+ filp = tsk->exec_file;
61789+ if (filp == NULL)
61790+ goto out_unlock;
61791+
61792+ tsk->is_writable = 0;
61793+
61794+ tsk->acl_sp_role = 1;
61795+ tsk->acl_role_id = ++acl_sp_role_value;
61796+ tsk->role = assigned;
61797+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
61798+
61799+ /* ignore additional mmap checks for processes that are writable
61800+ by the default ACL */
61801+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
61802+ if (unlikely(obj->mode & GR_WRITE))
61803+ tsk->is_writable = 1;
61804+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
61805+ if (unlikely(obj->mode & GR_WRITE))
61806+ tsk->is_writable = 1;
61807+
61808+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
61809+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, task_pid_nr(tsk));
61810+#endif
61811+
61812+out_unlock:
61813+ read_unlock(&grsec_exec_file_lock);
61814+ read_unlock(&tasklist_lock);
61815+ return;
61816+}
61817+
61818+int gr_check_secure_terminal(struct task_struct *task)
61819+{
61820+ struct task_struct *p, *p2, *p3;
61821+ struct files_struct *files;
61822+ struct fdtable *fdt;
61823+ struct file *our_file = NULL, *file;
61824+ int i;
61825+
61826+ if (task->signal->tty == NULL)
61827+ return 1;
61828+
61829+ files = get_files_struct(task);
61830+ if (files != NULL) {
61831+ rcu_read_lock();
61832+ fdt = files_fdtable(files);
61833+ for (i=0; i < fdt->max_fds; i++) {
61834+ file = fcheck_files(files, i);
61835+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
61836+ get_file(file);
61837+ our_file = file;
61838+ }
61839+ }
61840+ rcu_read_unlock();
61841+ put_files_struct(files);
61842+ }
61843+
61844+ if (our_file == NULL)
61845+ return 1;
61846+
61847+ read_lock(&tasklist_lock);
61848+ do_each_thread(p2, p) {
61849+ files = get_files_struct(p);
61850+ if (files == NULL ||
61851+ (p->signal && p->signal->tty == task->signal->tty)) {
61852+ if (files != NULL)
61853+ put_files_struct(files);
61854+ continue;
61855+ }
61856+ rcu_read_lock();
61857+ fdt = files_fdtable(files);
61858+ for (i=0; i < fdt->max_fds; i++) {
61859+ file = fcheck_files(files, i);
61860+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
61861+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
61862+ p3 = task;
61863+ while (task_pid_nr(p3) > 0) {
61864+ if (p3 == p)
61865+ break;
61866+ p3 = p3->real_parent;
61867+ }
61868+ if (p3 == p)
61869+ break;
61870+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
61871+ gr_handle_alertkill(p);
61872+ rcu_read_unlock();
61873+ put_files_struct(files);
61874+ read_unlock(&tasklist_lock);
61875+ fput(our_file);
61876+ return 0;
61877+ }
61878+ }
61879+ rcu_read_unlock();
61880+ put_files_struct(files);
61881+ } while_each_thread(p2, p);
61882+ read_unlock(&tasklist_lock);
61883+
61884+ fput(our_file);
61885+ return 1;
61886+}
61887+
61888+static int gr_rbac_disable(void *unused)
61889+{
61890+ pax_open_kernel();
61891+ gr_status &= ~GR_READY;
61892+ pax_close_kernel();
61893+
61894+ return 0;
61895+}
61896+
61897+ssize_t
61898+write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
61899+{
61900+ struct gr_arg_wrapper uwrap;
61901+ unsigned char *sprole_salt = NULL;
61902+ unsigned char *sprole_sum = NULL;
61903+ int error = sizeof (struct gr_arg_wrapper);
61904+ int error2 = 0;
61905+
61906+ mutex_lock(&gr_dev_mutex);
61907+
61908+ if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
61909+ error = -EPERM;
61910+ goto out;
61911+ }
61912+
61913+ if (count != sizeof (struct gr_arg_wrapper)) {
61914+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
61915+ error = -EINVAL;
61916+ goto out;
61917+ }
61918+
61919+
61920+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
61921+ gr_auth_expires = 0;
61922+ gr_auth_attempts = 0;
61923+ }
61924+
61925+ if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
61926+ error = -EFAULT;
61927+ goto out;
61928+ }
61929+
61930+ if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
61931+ error = -EINVAL;
61932+ goto out;
61933+ }
61934+
61935+ if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
61936+ error = -EFAULT;
61937+ goto out;
61938+ }
61939+
61940+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
61941+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
61942+ time_after(gr_auth_expires, get_seconds())) {
61943+ error = -EBUSY;
61944+ goto out;
61945+ }
61946+
61947+ /* if non-root trying to do anything other than use a special role,
61948+ do not attempt authentication, do not count towards authentication
61949+ locking
61950+ */
61951+
61952+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
61953+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
61954+ gr_is_global_nonroot(current_uid())) {
61955+ error = -EPERM;
61956+ goto out;
61957+ }
61958+
61959+ /* ensure pw and special role name are null terminated */
61960+
61961+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
61962+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
61963+
61964+ /* Okay.
61965+ * We have our enough of the argument structure..(we have yet
61966+ * to copy_from_user the tables themselves) . Copy the tables
61967+ * only if we need them, i.e. for loading operations. */
61968+
61969+ switch (gr_usermode->mode) {
61970+ case GR_STATUS:
61971+ if (gr_status & GR_READY) {
61972+ error = 1;
61973+ if (!gr_check_secure_terminal(current))
61974+ error = 3;
61975+ } else
61976+ error = 2;
61977+ goto out;
61978+ case GR_SHUTDOWN:
61979+ if ((gr_status & GR_READY)
61980+ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
61981+ stop_machine(gr_rbac_disable, NULL, NULL);
61982+ free_variables();
61983+ memset(gr_usermode, 0, sizeof (struct gr_arg));
61984+ memset(gr_system_salt, 0, GR_SALT_LEN);
61985+ memset(gr_system_sum, 0, GR_SHA_LEN);
61986+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
61987+ } else if (gr_status & GR_READY) {
61988+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
61989+ error = -EPERM;
61990+ } else {
61991+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
61992+ error = -EAGAIN;
61993+ }
61994+ break;
61995+ case GR_ENABLE:
61996+ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
61997+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
61998+ else {
61999+ if (gr_status & GR_READY)
62000+ error = -EAGAIN;
62001+ else
62002+ error = error2;
62003+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
62004+ }
62005+ break;
62006+ case GR_RELOAD:
62007+ if (!(gr_status & GR_READY)) {
62008+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
62009+ error = -EAGAIN;
62010+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
62011+ stop_machine(gr_rbac_disable, NULL, NULL);
62012+ free_variables();
62013+ error2 = gracl_init(gr_usermode);
62014+ if (!error2)
62015+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
62016+ else {
62017+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
62018+ error = error2;
62019+ }
62020+ } else {
62021+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
62022+ error = -EPERM;
62023+ }
62024+ break;
62025+ case GR_SEGVMOD:
62026+ if (unlikely(!(gr_status & GR_READY))) {
62027+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
62028+ error = -EAGAIN;
62029+ break;
62030+ }
62031+
62032+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
62033+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
62034+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
62035+ struct acl_subject_label *segvacl;
62036+ segvacl =
62037+ lookup_acl_subj_label(gr_usermode->segv_inode,
62038+ gr_usermode->segv_device,
62039+ current->role);
62040+ if (segvacl) {
62041+ segvacl->crashes = 0;
62042+ segvacl->expires = 0;
62043+ }
62044+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
62045+ gr_remove_uid(gr_usermode->segv_uid);
62046+ }
62047+ } else {
62048+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
62049+ error = -EPERM;
62050+ }
62051+ break;
62052+ case GR_SPROLE:
62053+ case GR_SPROLEPAM:
62054+ if (unlikely(!(gr_status & GR_READY))) {
62055+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
62056+ error = -EAGAIN;
62057+ break;
62058+ }
62059+
62060+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
62061+ current->role->expires = 0;
62062+ current->role->auth_attempts = 0;
62063+ }
62064+
62065+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
62066+ time_after(current->role->expires, get_seconds())) {
62067+ error = -EBUSY;
62068+ goto out;
62069+ }
62070+
62071+ if (lookup_special_role_auth
62072+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
62073+ && ((!sprole_salt && !sprole_sum)
62074+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
62075+ char *p = "";
62076+ assign_special_role(gr_usermode->sp_role);
62077+ read_lock(&tasklist_lock);
62078+ if (current->real_parent)
62079+ p = current->real_parent->role->rolename;
62080+ read_unlock(&tasklist_lock);
62081+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
62082+ p, acl_sp_role_value);
62083+ } else {
62084+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
62085+ error = -EPERM;
62086+ if(!(current->role->auth_attempts++))
62087+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
62088+
62089+ goto out;
62090+ }
62091+ break;
62092+ case GR_UNSPROLE:
62093+ if (unlikely(!(gr_status & GR_READY))) {
62094+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
62095+ error = -EAGAIN;
62096+ break;
62097+ }
62098+
62099+ if (current->role->roletype & GR_ROLE_SPECIAL) {
62100+ char *p = "";
62101+ int i = 0;
62102+
62103+ read_lock(&tasklist_lock);
62104+ if (current->real_parent) {
62105+ p = current->real_parent->role->rolename;
62106+ i = current->real_parent->acl_role_id;
62107+ }
62108+ read_unlock(&tasklist_lock);
62109+
62110+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
62111+ gr_set_acls(1);
62112+ } else {
62113+ error = -EPERM;
62114+ goto out;
62115+ }
62116+ break;
62117+ default:
62118+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
62119+ error = -EINVAL;
62120+ break;
62121+ }
62122+
62123+ if (error != -EPERM)
62124+ goto out;
62125+
62126+ if(!(gr_auth_attempts++))
62127+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
62128+
62129+ out:
62130+ mutex_unlock(&gr_dev_mutex);
62131+ return error;
62132+}
62133+
62134+/* must be called with
62135+ rcu_read_lock();
62136+ read_lock(&tasklist_lock);
62137+ read_lock(&grsec_exec_file_lock);
62138+*/
62139+int gr_apply_subject_to_task(struct task_struct *task)
62140+{
62141+ struct acl_object_label *obj;
62142+ char *tmpname;
62143+ struct acl_subject_label *tmpsubj;
62144+ struct file *filp;
62145+ struct name_entry *nmatch;
62146+
62147+ filp = task->exec_file;
62148+ if (filp == NULL)
62149+ return 0;
62150+
62151+ /* the following is to apply the correct subject
62152+ on binaries running when the RBAC system
62153+ is enabled, when the binaries have been
62154+ replaced or deleted since their execution
62155+ -----
62156+ when the RBAC system starts, the inode/dev
62157+ from exec_file will be one the RBAC system
62158+ is unaware of. It only knows the inode/dev
62159+ of the present file on disk, or the absence
62160+ of it.
62161+ */
62162+ preempt_disable();
62163+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
62164+
62165+ nmatch = lookup_name_entry(tmpname);
62166+ preempt_enable();
62167+ tmpsubj = NULL;
62168+ if (nmatch) {
62169+ if (nmatch->deleted)
62170+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
62171+ else
62172+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
62173+ if (tmpsubj != NULL)
62174+ task->acl = tmpsubj;
62175+ }
62176+ if (tmpsubj == NULL)
62177+ task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
62178+ task->role);
62179+ if (task->acl) {
62180+ task->is_writable = 0;
62181+ /* ignore additional mmap checks for processes that are writable
62182+ by the default ACL */
62183+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
62184+ if (unlikely(obj->mode & GR_WRITE))
62185+ task->is_writable = 1;
62186+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
62187+ if (unlikely(obj->mode & GR_WRITE))
62188+ task->is_writable = 1;
62189+
62190+ gr_set_proc_res(task);
62191+
62192+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
62193+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
62194+#endif
62195+ } else {
62196+ return 1;
62197+ }
62198+
62199+ return 0;
62200+}
62201+
62202+int
62203+gr_set_acls(const int type)
62204+{
62205+ struct task_struct *task, *task2;
62206+ struct acl_role_label *role = current->role;
62207+ __u16 acl_role_id = current->acl_role_id;
62208+ const struct cred *cred;
62209+ int ret;
62210+
62211+ rcu_read_lock();
62212+ read_lock(&tasklist_lock);
62213+ read_lock(&grsec_exec_file_lock);
62214+ do_each_thread(task2, task) {
62215+ /* check to see if we're called from the exit handler,
62216+ if so, only replace ACLs that have inherited the admin
62217+ ACL */
62218+
62219+ if (type && (task->role != role ||
62220+ task->acl_role_id != acl_role_id))
62221+ continue;
62222+
62223+ task->acl_role_id = 0;
62224+ task->acl_sp_role = 0;
62225+
62226+ if (task->exec_file) {
62227+ cred = __task_cred(task);
62228+ task->role = lookup_acl_role_label(task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
62229+ ret = gr_apply_subject_to_task(task);
62230+ if (ret) {
62231+ read_unlock(&grsec_exec_file_lock);
62232+ read_unlock(&tasklist_lock);
62233+ rcu_read_unlock();
62234+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task_pid_nr(task));
62235+ return ret;
62236+ }
62237+ } else {
62238+ // it's a kernel process
62239+ task->role = kernel_role;
62240+ task->acl = kernel_role->root_label;
62241+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
62242+ task->acl->mode &= ~GR_PROCFIND;
62243+#endif
62244+ }
62245+ } while_each_thread(task2, task);
62246+ read_unlock(&grsec_exec_file_lock);
62247+ read_unlock(&tasklist_lock);
62248+ rcu_read_unlock();
62249+
62250+ return 0;
62251+}
62252+
62253+#if defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC)
62254+static const unsigned long res_learn_bumps[GR_NLIMITS] = {
62255+ [RLIMIT_CPU] = GR_RLIM_CPU_BUMP,
62256+ [RLIMIT_FSIZE] = GR_RLIM_FSIZE_BUMP,
62257+ [RLIMIT_DATA] = GR_RLIM_DATA_BUMP,
62258+ [RLIMIT_STACK] = GR_RLIM_STACK_BUMP,
62259+ [RLIMIT_CORE] = GR_RLIM_CORE_BUMP,
62260+ [RLIMIT_RSS] = GR_RLIM_RSS_BUMP,
62261+ [RLIMIT_NPROC] = GR_RLIM_NPROC_BUMP,
62262+ [RLIMIT_NOFILE] = GR_RLIM_NOFILE_BUMP,
62263+ [RLIMIT_MEMLOCK] = GR_RLIM_MEMLOCK_BUMP,
62264+ [RLIMIT_AS] = GR_RLIM_AS_BUMP,
62265+ [RLIMIT_LOCKS] = GR_RLIM_LOCKS_BUMP,
62266+ [RLIMIT_SIGPENDING] = GR_RLIM_SIGPENDING_BUMP,
62267+ [RLIMIT_MSGQUEUE] = GR_RLIM_MSGQUEUE_BUMP,
62268+ [RLIMIT_NICE] = GR_RLIM_NICE_BUMP,
62269+ [RLIMIT_RTPRIO] = GR_RLIM_RTPRIO_BUMP,
62270+ [RLIMIT_RTTIME] = GR_RLIM_RTTIME_BUMP
62271+};
62272+
62273+void
62274+gr_learn_resource(const struct task_struct *task,
62275+ const int res, const unsigned long wanted, const int gt)
62276+{
62277+ struct acl_subject_label *acl;
62278+ const struct cred *cred;
62279+
62280+ if (unlikely((gr_status & GR_READY) &&
62281+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
62282+ goto skip_reslog;
62283+
62284+ gr_log_resource(task, res, wanted, gt);
62285+skip_reslog:
62286+
62287+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
62288+ return;
62289+
62290+ acl = task->acl;
62291+
62292+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
62293+ !(acl->resmask & (1U << (unsigned short) res))))
62294+ return;
62295+
62296+ if (wanted >= acl->res[res].rlim_cur) {
62297+ unsigned long res_add;
62298+
62299+ res_add = wanted + res_learn_bumps[res];
62300+
62301+ acl->res[res].rlim_cur = res_add;
62302+
62303+ if (wanted > acl->res[res].rlim_max)
62304+ acl->res[res].rlim_max = res_add;
62305+
62306+ /* only log the subject filename, since resource logging is supported for
62307+ single-subject learning only */
62308+ rcu_read_lock();
62309+ cred = __task_cred(task);
62310+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
62311+ task->role->roletype, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), acl->filename,
62312+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
62313+ "", (unsigned long) res, &task->signal->saved_ip);
62314+ rcu_read_unlock();
62315+ }
62316+
62317+ return;
62318+}
62319+EXPORT_SYMBOL(gr_learn_resource);
62320+#endif
62321+
62322+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
62323+void
62324+pax_set_initial_flags(struct linux_binprm *bprm)
62325+{
62326+ struct task_struct *task = current;
62327+ struct acl_subject_label *proc;
62328+ unsigned long flags;
62329+
62330+ if (unlikely(!(gr_status & GR_READY)))
62331+ return;
62332+
62333+ flags = pax_get_flags(task);
62334+
62335+ proc = task->acl;
62336+
62337+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
62338+ flags &= ~MF_PAX_PAGEEXEC;
62339+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
62340+ flags &= ~MF_PAX_SEGMEXEC;
62341+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
62342+ flags &= ~MF_PAX_RANDMMAP;
62343+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
62344+ flags &= ~MF_PAX_EMUTRAMP;
62345+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
62346+ flags &= ~MF_PAX_MPROTECT;
62347+
62348+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
62349+ flags |= MF_PAX_PAGEEXEC;
62350+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
62351+ flags |= MF_PAX_SEGMEXEC;
62352+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
62353+ flags |= MF_PAX_RANDMMAP;
62354+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
62355+ flags |= MF_PAX_EMUTRAMP;
62356+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
62357+ flags |= MF_PAX_MPROTECT;
62358+
62359+ pax_set_flags(task, flags);
62360+
62361+ return;
62362+}
62363+#endif
62364+
62365+int
62366+gr_handle_proc_ptrace(struct task_struct *task)
62367+{
62368+ struct file *filp;
62369+ struct task_struct *tmp = task;
62370+ struct task_struct *curtemp = current;
62371+ __u32 retmode;
62372+
62373+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
62374+ if (unlikely(!(gr_status & GR_READY)))
62375+ return 0;
62376+#endif
62377+
62378+ read_lock(&tasklist_lock);
62379+ read_lock(&grsec_exec_file_lock);
62380+ filp = task->exec_file;
62381+
62382+ while (task_pid_nr(tmp) > 0) {
62383+ if (tmp == curtemp)
62384+ break;
62385+ tmp = tmp->real_parent;
62386+ }
62387+
62388+ if (!filp || (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
62389+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
62390+ read_unlock(&grsec_exec_file_lock);
62391+ read_unlock(&tasklist_lock);
62392+ return 1;
62393+ }
62394+
62395+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
62396+ if (!(gr_status & GR_READY)) {
62397+ read_unlock(&grsec_exec_file_lock);
62398+ read_unlock(&tasklist_lock);
62399+ return 0;
62400+ }
62401+#endif
62402+
62403+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
62404+ read_unlock(&grsec_exec_file_lock);
62405+ read_unlock(&tasklist_lock);
62406+
62407+ if (retmode & GR_NOPTRACE)
62408+ return 1;
62409+
62410+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
62411+ && (current->acl != task->acl || (current->acl != current->role->root_label
62412+ && task_pid_nr(current) != task_pid_nr(task))))
62413+ return 1;
62414+
62415+ return 0;
62416+}
62417+
62418+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
62419+{
62420+ if (unlikely(!(gr_status & GR_READY)))
62421+ return;
62422+
62423+ if (!(current->role->roletype & GR_ROLE_GOD))
62424+ return;
62425+
62426+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
62427+ p->role->rolename, gr_task_roletype_to_char(p),
62428+ p->acl->filename);
62429+}
62430+
62431+int
62432+gr_handle_ptrace(struct task_struct *task, const long request)
62433+{
62434+ struct task_struct *tmp = task;
62435+ struct task_struct *curtemp = current;
62436+ __u32 retmode;
62437+
62438+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
62439+ if (unlikely(!(gr_status & GR_READY)))
62440+ return 0;
62441+#endif
62442+ if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
62443+ read_lock(&tasklist_lock);
62444+ while (task_pid_nr(tmp) > 0) {
62445+ if (tmp == curtemp)
62446+ break;
62447+ tmp = tmp->real_parent;
62448+ }
62449+
62450+ if (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
62451+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
62452+ read_unlock(&tasklist_lock);
62453+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
62454+ return 1;
62455+ }
62456+ read_unlock(&tasklist_lock);
62457+ }
62458+
62459+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
62460+ if (!(gr_status & GR_READY))
62461+ return 0;
62462+#endif
62463+
62464+ read_lock(&grsec_exec_file_lock);
62465+ if (unlikely(!task->exec_file)) {
62466+ read_unlock(&grsec_exec_file_lock);
62467+ return 0;
62468+ }
62469+
62470+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
62471+ read_unlock(&grsec_exec_file_lock);
62472+
62473+ if (retmode & GR_NOPTRACE) {
62474+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
62475+ return 1;
62476+ }
62477+
62478+ if (retmode & GR_PTRACERD) {
62479+ switch (request) {
62480+ case PTRACE_SEIZE:
62481+ case PTRACE_POKETEXT:
62482+ case PTRACE_POKEDATA:
62483+ case PTRACE_POKEUSR:
62484+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
62485+ case PTRACE_SETREGS:
62486+ case PTRACE_SETFPREGS:
62487+#endif
62488+#ifdef CONFIG_X86
62489+ case PTRACE_SETFPXREGS:
62490+#endif
62491+#ifdef CONFIG_ALTIVEC
62492+ case PTRACE_SETVRREGS:
62493+#endif
62494+ return 1;
62495+ default:
62496+ return 0;
62497+ }
62498+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
62499+ !(current->role->roletype & GR_ROLE_GOD) &&
62500+ (current->acl != task->acl)) {
62501+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
62502+ return 1;
62503+ }
62504+
62505+ return 0;
62506+}
62507+
62508+static int is_writable_mmap(const struct file *filp)
62509+{
62510+ struct task_struct *task = current;
62511+ struct acl_object_label *obj, *obj2;
62512+
62513+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
62514+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
62515+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
62516+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
62517+ task->role->root_label);
62518+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
62519+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
62520+ return 1;
62521+ }
62522+ }
62523+ return 0;
62524+}
62525+
62526+int
62527+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
62528+{
62529+ __u32 mode;
62530+
62531+ if (unlikely(!file || !(prot & PROT_EXEC)))
62532+ return 1;
62533+
62534+ if (is_writable_mmap(file))
62535+ return 0;
62536+
62537+ mode =
62538+ gr_search_file(file->f_path.dentry,
62539+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
62540+ file->f_path.mnt);
62541+
62542+ if (!gr_tpe_allow(file))
62543+ return 0;
62544+
62545+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
62546+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
62547+ return 0;
62548+ } else if (unlikely(!(mode & GR_EXEC))) {
62549+ return 0;
62550+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
62551+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
62552+ return 1;
62553+ }
62554+
62555+ return 1;
62556+}
62557+
62558+int
62559+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
62560+{
62561+ __u32 mode;
62562+
62563+ if (unlikely(!file || !(prot & PROT_EXEC)))
62564+ return 1;
62565+
62566+ if (is_writable_mmap(file))
62567+ return 0;
62568+
62569+ mode =
62570+ gr_search_file(file->f_path.dentry,
62571+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
62572+ file->f_path.mnt);
62573+
62574+ if (!gr_tpe_allow(file))
62575+ return 0;
62576+
62577+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
62578+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
62579+ return 0;
62580+ } else if (unlikely(!(mode & GR_EXEC))) {
62581+ return 0;
62582+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
62583+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
62584+ return 1;
62585+ }
62586+
62587+ return 1;
62588+}
62589+
62590+void
62591+gr_acl_handle_psacct(struct task_struct *task, const long code)
62592+{
62593+ unsigned long runtime;
62594+ unsigned long cputime;
62595+ unsigned int wday, cday;
62596+ __u8 whr, chr;
62597+ __u8 wmin, cmin;
62598+ __u8 wsec, csec;
62599+ struct timespec timeval;
62600+
62601+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
62602+ !(task->acl->mode & GR_PROCACCT)))
62603+ return;
62604+
62605+ do_posix_clock_monotonic_gettime(&timeval);
62606+ runtime = timeval.tv_sec - task->start_time.tv_sec;
62607+ wday = runtime / (3600 * 24);
62608+ runtime -= wday * (3600 * 24);
62609+ whr = runtime / 3600;
62610+ runtime -= whr * 3600;
62611+ wmin = runtime / 60;
62612+ runtime -= wmin * 60;
62613+ wsec = runtime;
62614+
62615+ cputime = (task->utime + task->stime) / HZ;
62616+ cday = cputime / (3600 * 24);
62617+ cputime -= cday * (3600 * 24);
62618+ chr = cputime / 3600;
62619+ cputime -= chr * 3600;
62620+ cmin = cputime / 60;
62621+ cputime -= cmin * 60;
62622+ csec = cputime;
62623+
62624+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
62625+
62626+ return;
62627+}
62628+
62629+void gr_set_kernel_label(struct task_struct *task)
62630+{
62631+ if (gr_status & GR_READY) {
62632+ task->role = kernel_role;
62633+ task->acl = kernel_role->root_label;
62634+ }
62635+ return;
62636+}
62637+
62638+#ifdef CONFIG_TASKSTATS
62639+int gr_is_taskstats_denied(int pid)
62640+{
62641+ struct task_struct *task;
62642+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62643+ const struct cred *cred;
62644+#endif
62645+ int ret = 0;
62646+
62647+ /* restrict taskstats viewing to un-chrooted root users
62648+ who have the 'view' subject flag if the RBAC system is enabled
62649+ */
62650+
62651+ rcu_read_lock();
62652+ read_lock(&tasklist_lock);
62653+ task = find_task_by_vpid(pid);
62654+ if (task) {
62655+#ifdef CONFIG_GRKERNSEC_CHROOT
62656+ if (proc_is_chrooted(task))
62657+ ret = -EACCES;
62658+#endif
62659+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62660+ cred = __task_cred(task);
62661+#ifdef CONFIG_GRKERNSEC_PROC_USER
62662+ if (gr_is_global_nonroot(cred->uid))
62663+ ret = -EACCES;
62664+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62665+ if (gr_is_global_nonroot(cred->uid) && !groups_search(cred->group_info, grsec_proc_gid))
62666+ ret = -EACCES;
62667+#endif
62668+#endif
62669+ if (gr_status & GR_READY) {
62670+ if (!(task->acl->mode & GR_VIEW))
62671+ ret = -EACCES;
62672+ }
62673+ } else
62674+ ret = -ENOENT;
62675+
62676+ read_unlock(&tasklist_lock);
62677+ rcu_read_unlock();
62678+
62679+ return ret;
62680+}
62681+#endif
62682+
62683+/* AUXV entries are filled via a descendant of search_binary_handler
62684+ after we've already applied the subject for the target
62685+*/
62686+int gr_acl_enable_at_secure(void)
62687+{
62688+ if (unlikely(!(gr_status & GR_READY)))
62689+ return 0;
62690+
62691+ if (current->acl->mode & GR_ATSECURE)
62692+ return 1;
62693+
62694+ return 0;
62695+}
62696+
62697+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
62698+{
62699+ struct task_struct *task = current;
62700+ struct dentry *dentry = file->f_path.dentry;
62701+ struct vfsmount *mnt = file->f_path.mnt;
62702+ struct acl_object_label *obj, *tmp;
62703+ struct acl_subject_label *subj;
62704+ unsigned int bufsize;
62705+ int is_not_root;
62706+ char *path;
62707+ dev_t dev = __get_dev(dentry);
62708+
62709+ if (unlikely(!(gr_status & GR_READY)))
62710+ return 1;
62711+
62712+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
62713+ return 1;
62714+
62715+ /* ignore Eric Biederman */
62716+ if (IS_PRIVATE(dentry->d_inode))
62717+ return 1;
62718+
62719+ subj = task->acl;
62720+ read_lock(&gr_inode_lock);
62721+ do {
62722+ obj = lookup_acl_obj_label(ino, dev, subj);
62723+ if (obj != NULL) {
62724+ read_unlock(&gr_inode_lock);
62725+ return (obj->mode & GR_FIND) ? 1 : 0;
62726+ }
62727+ } while ((subj = subj->parent_subject));
62728+ read_unlock(&gr_inode_lock);
62729+
62730+ /* this is purely an optimization since we're looking for an object
62731+ for the directory we're doing a readdir on
62732+ if it's possible for any globbed object to match the entry we're
62733+ filling into the directory, then the object we find here will be
62734+ an anchor point with attached globbed objects
62735+ */
62736+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
62737+ if (obj->globbed == NULL)
62738+ return (obj->mode & GR_FIND) ? 1 : 0;
62739+
62740+ is_not_root = ((obj->filename[0] == '/') &&
62741+ (obj->filename[1] == '\0')) ? 0 : 1;
62742+ bufsize = PAGE_SIZE - namelen - is_not_root;
62743+
62744+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
62745+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
62746+ return 1;
62747+
62748+ preempt_disable();
62749+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
62750+ bufsize);
62751+
62752+ bufsize = strlen(path);
62753+
62754+ /* if base is "/", don't append an additional slash */
62755+ if (is_not_root)
62756+ *(path + bufsize) = '/';
62757+ memcpy(path + bufsize + is_not_root, name, namelen);
62758+ *(path + bufsize + namelen + is_not_root) = '\0';
62759+
62760+ tmp = obj->globbed;
62761+ while (tmp) {
62762+ if (!glob_match(tmp->filename, path)) {
62763+ preempt_enable();
62764+ return (tmp->mode & GR_FIND) ? 1 : 0;
62765+ }
62766+ tmp = tmp->next;
62767+ }
62768+ preempt_enable();
62769+ return (obj->mode & GR_FIND) ? 1 : 0;
62770+}
62771+
62772+void gr_put_exec_file(struct task_struct *task)
62773+{
62774+ struct file *filp;
62775+
62776+ write_lock(&grsec_exec_file_lock);
62777+ filp = task->exec_file;
62778+ task->exec_file = NULL;
62779+ write_unlock(&grsec_exec_file_lock);
62780+
62781+ if (filp)
62782+ fput(filp);
62783+
62784+ return;
62785+}
62786+
62787+
62788+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
62789+EXPORT_SYMBOL(gr_acl_is_enabled);
62790+#endif
62791+EXPORT_SYMBOL(gr_set_kernel_label);
62792+#ifdef CONFIG_SECURITY
62793+EXPORT_SYMBOL(gr_check_user_change);
62794+EXPORT_SYMBOL(gr_check_group_change);
62795+#endif
62796+
62797diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
62798new file mode 100644
62799index 0000000..34fefda
62800--- /dev/null
62801+++ b/grsecurity/gracl_alloc.c
62802@@ -0,0 +1,105 @@
62803+#include <linux/kernel.h>
62804+#include <linux/mm.h>
62805+#include <linux/slab.h>
62806+#include <linux/vmalloc.h>
62807+#include <linux/gracl.h>
62808+#include <linux/grsecurity.h>
62809+
62810+static unsigned long alloc_stack_next = 1;
62811+static unsigned long alloc_stack_size = 1;
62812+static void **alloc_stack;
62813+
62814+static __inline__ int
62815+alloc_pop(void)
62816+{
62817+ if (alloc_stack_next == 1)
62818+ return 0;
62819+
62820+ kfree(alloc_stack[alloc_stack_next - 2]);
62821+
62822+ alloc_stack_next--;
62823+
62824+ return 1;
62825+}
62826+
62827+static __inline__ int
62828+alloc_push(void *buf)
62829+{
62830+ if (alloc_stack_next >= alloc_stack_size)
62831+ return 1;
62832+
62833+ alloc_stack[alloc_stack_next - 1] = buf;
62834+
62835+ alloc_stack_next++;
62836+
62837+ return 0;
62838+}
62839+
62840+void *
62841+acl_alloc(unsigned long len)
62842+{
62843+ void *ret = NULL;
62844+
62845+ if (!len || len > PAGE_SIZE)
62846+ goto out;
62847+
62848+ ret = kmalloc(len, GFP_KERNEL);
62849+
62850+ if (ret) {
62851+ if (alloc_push(ret)) {
62852+ kfree(ret);
62853+ ret = NULL;
62854+ }
62855+ }
62856+
62857+out:
62858+ return ret;
62859+}
62860+
62861+void *
62862+acl_alloc_num(unsigned long num, unsigned long len)
62863+{
62864+ if (!len || (num > (PAGE_SIZE / len)))
62865+ return NULL;
62866+
62867+ return acl_alloc(num * len);
62868+}
62869+
62870+void
62871+acl_free_all(void)
62872+{
62873+ if (gr_acl_is_enabled() || !alloc_stack)
62874+ return;
62875+
62876+ while (alloc_pop()) ;
62877+
62878+ if (alloc_stack) {
62879+ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
62880+ kfree(alloc_stack);
62881+ else
62882+ vfree(alloc_stack);
62883+ }
62884+
62885+ alloc_stack = NULL;
62886+ alloc_stack_size = 1;
62887+ alloc_stack_next = 1;
62888+
62889+ return;
62890+}
62891+
62892+int
62893+acl_alloc_stack_init(unsigned long size)
62894+{
62895+ if ((size * sizeof (void *)) <= PAGE_SIZE)
62896+ alloc_stack =
62897+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
62898+ else
62899+ alloc_stack = (void **) vmalloc(size * sizeof (void *));
62900+
62901+ alloc_stack_size = size;
62902+
62903+ if (!alloc_stack)
62904+ return 0;
62905+ else
62906+ return 1;
62907+}
62908diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
62909new file mode 100644
62910index 0000000..bdd51ea
62911--- /dev/null
62912+++ b/grsecurity/gracl_cap.c
62913@@ -0,0 +1,110 @@
62914+#include <linux/kernel.h>
62915+#include <linux/module.h>
62916+#include <linux/sched.h>
62917+#include <linux/gracl.h>
62918+#include <linux/grsecurity.h>
62919+#include <linux/grinternal.h>
62920+
62921+extern const char *captab_log[];
62922+extern int captab_log_entries;
62923+
62924+int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
62925+{
62926+ struct acl_subject_label *curracl;
62927+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
62928+ kernel_cap_t cap_audit = __cap_empty_set;
62929+
62930+ if (!gr_acl_is_enabled())
62931+ return 1;
62932+
62933+ curracl = task->acl;
62934+
62935+ cap_drop = curracl->cap_lower;
62936+ cap_mask = curracl->cap_mask;
62937+ cap_audit = curracl->cap_invert_audit;
62938+
62939+ while ((curracl = curracl->parent_subject)) {
62940+ /* if the cap isn't specified in the current computed mask but is specified in the
62941+ current level subject, and is lowered in the current level subject, then add
62942+ it to the set of dropped capabilities
62943+ otherwise, add the current level subject's mask to the current computed mask
62944+ */
62945+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
62946+ cap_raise(cap_mask, cap);
62947+ if (cap_raised(curracl->cap_lower, cap))
62948+ cap_raise(cap_drop, cap);
62949+ if (cap_raised(curracl->cap_invert_audit, cap))
62950+ cap_raise(cap_audit, cap);
62951+ }
62952+ }
62953+
62954+ if (!cap_raised(cap_drop, cap)) {
62955+ if (cap_raised(cap_audit, cap))
62956+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
62957+ return 1;
62958+ }
62959+
62960+ curracl = task->acl;
62961+
62962+ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
62963+ && cap_raised(cred->cap_effective, cap)) {
62964+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
62965+ task->role->roletype, GR_GLOBAL_UID(cred->uid),
62966+ GR_GLOBAL_GID(cred->gid), task->exec_file ?
62967+ gr_to_filename(task->exec_file->f_path.dentry,
62968+ task->exec_file->f_path.mnt) : curracl->filename,
62969+ curracl->filename, 0UL,
62970+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
62971+ return 1;
62972+ }
62973+
62974+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
62975+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
62976+
62977+ return 0;
62978+}
62979+
62980+int
62981+gr_acl_is_capable(const int cap)
62982+{
62983+ return gr_task_acl_is_capable(current, current_cred(), cap);
62984+}
62985+
62986+int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap)
62987+{
62988+ struct acl_subject_label *curracl;
62989+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
62990+
62991+ if (!gr_acl_is_enabled())
62992+ return 1;
62993+
62994+ curracl = task->acl;
62995+
62996+ cap_drop = curracl->cap_lower;
62997+ cap_mask = curracl->cap_mask;
62998+
62999+ while ((curracl = curracl->parent_subject)) {
63000+ /* if the cap isn't specified in the current computed mask but is specified in the
63001+ current level subject, and is lowered in the current level subject, then add
63002+ it to the set of dropped capabilities
63003+ otherwise, add the current level subject's mask to the current computed mask
63004+ */
63005+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
63006+ cap_raise(cap_mask, cap);
63007+ if (cap_raised(curracl->cap_lower, cap))
63008+ cap_raise(cap_drop, cap);
63009+ }
63010+ }
63011+
63012+ if (!cap_raised(cap_drop, cap))
63013+ return 1;
63014+
63015+ return 0;
63016+}
63017+
63018+int
63019+gr_acl_is_capable_nolog(const int cap)
63020+{
63021+ return gr_task_acl_is_capable_nolog(current, cap);
63022+}
63023+
63024diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
63025new file mode 100644
63026index 0000000..a340c17
63027--- /dev/null
63028+++ b/grsecurity/gracl_fs.c
63029@@ -0,0 +1,431 @@
63030+#include <linux/kernel.h>
63031+#include <linux/sched.h>
63032+#include <linux/types.h>
63033+#include <linux/fs.h>
63034+#include <linux/file.h>
63035+#include <linux/stat.h>
63036+#include <linux/grsecurity.h>
63037+#include <linux/grinternal.h>
63038+#include <linux/gracl.h>
63039+
63040+umode_t
63041+gr_acl_umask(void)
63042+{
63043+ if (unlikely(!gr_acl_is_enabled()))
63044+ return 0;
63045+
63046+ return current->role->umask;
63047+}
63048+
63049+__u32
63050+gr_acl_handle_hidden_file(const struct dentry * dentry,
63051+ const struct vfsmount * mnt)
63052+{
63053+ __u32 mode;
63054+
63055+ if (unlikely(!dentry->d_inode))
63056+ return GR_FIND;
63057+
63058+ mode =
63059+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
63060+
63061+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
63062+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
63063+ return mode;
63064+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
63065+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
63066+ return 0;
63067+ } else if (unlikely(!(mode & GR_FIND)))
63068+ return 0;
63069+
63070+ return GR_FIND;
63071+}
63072+
63073+__u32
63074+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
63075+ int acc_mode)
63076+{
63077+ __u32 reqmode = GR_FIND;
63078+ __u32 mode;
63079+
63080+ if (unlikely(!dentry->d_inode))
63081+ return reqmode;
63082+
63083+ if (acc_mode & MAY_APPEND)
63084+ reqmode |= GR_APPEND;
63085+ else if (acc_mode & MAY_WRITE)
63086+ reqmode |= GR_WRITE;
63087+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
63088+ reqmode |= GR_READ;
63089+
63090+ mode =
63091+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
63092+ mnt);
63093+
63094+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
63095+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
63096+ reqmode & GR_READ ? " reading" : "",
63097+ reqmode & GR_WRITE ? " writing" : reqmode &
63098+ GR_APPEND ? " appending" : "");
63099+ return reqmode;
63100+ } else
63101+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
63102+ {
63103+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
63104+ reqmode & GR_READ ? " reading" : "",
63105+ reqmode & GR_WRITE ? " writing" : reqmode &
63106+ GR_APPEND ? " appending" : "");
63107+ return 0;
63108+ } else if (unlikely((mode & reqmode) != reqmode))
63109+ return 0;
63110+
63111+ return reqmode;
63112+}
63113+
63114+__u32
63115+gr_acl_handle_creat(const struct dentry * dentry,
63116+ const struct dentry * p_dentry,
63117+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
63118+ const int imode)
63119+{
63120+ __u32 reqmode = GR_WRITE | GR_CREATE;
63121+ __u32 mode;
63122+
63123+ if (acc_mode & MAY_APPEND)
63124+ reqmode |= GR_APPEND;
63125+ // if a directory was required or the directory already exists, then
63126+ // don't count this open as a read
63127+ if ((acc_mode & MAY_READ) &&
63128+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
63129+ reqmode |= GR_READ;
63130+ if ((open_flags & O_CREAT) &&
63131+ ((imode & S_ISUID) || ((imode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
63132+ reqmode |= GR_SETID;
63133+
63134+ mode =
63135+ gr_check_create(dentry, p_dentry, p_mnt,
63136+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
63137+
63138+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
63139+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
63140+ reqmode & GR_READ ? " reading" : "",
63141+ reqmode & GR_WRITE ? " writing" : reqmode &
63142+ GR_APPEND ? " appending" : "");
63143+ return reqmode;
63144+ } else
63145+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
63146+ {
63147+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
63148+ reqmode & GR_READ ? " reading" : "",
63149+ reqmode & GR_WRITE ? " writing" : reqmode &
63150+ GR_APPEND ? " appending" : "");
63151+ return 0;
63152+ } else if (unlikely((mode & reqmode) != reqmode))
63153+ return 0;
63154+
63155+ return reqmode;
63156+}
63157+
63158+__u32
63159+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
63160+ const int fmode)
63161+{
63162+ __u32 mode, reqmode = GR_FIND;
63163+
63164+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
63165+ reqmode |= GR_EXEC;
63166+ if (fmode & S_IWOTH)
63167+ reqmode |= GR_WRITE;
63168+ if (fmode & S_IROTH)
63169+ reqmode |= GR_READ;
63170+
63171+ mode =
63172+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
63173+ mnt);
63174+
63175+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
63176+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
63177+ reqmode & GR_READ ? " reading" : "",
63178+ reqmode & GR_WRITE ? " writing" : "",
63179+ reqmode & GR_EXEC ? " executing" : "");
63180+ return reqmode;
63181+ } else
63182+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
63183+ {
63184+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
63185+ reqmode & GR_READ ? " reading" : "",
63186+ reqmode & GR_WRITE ? " writing" : "",
63187+ reqmode & GR_EXEC ? " executing" : "");
63188+ return 0;
63189+ } else if (unlikely((mode & reqmode) != reqmode))
63190+ return 0;
63191+
63192+ return reqmode;
63193+}
63194+
63195+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
63196+{
63197+ __u32 mode;
63198+
63199+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
63200+
63201+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
63202+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
63203+ return mode;
63204+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
63205+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
63206+ return 0;
63207+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
63208+ return 0;
63209+
63210+ return (reqmode);
63211+}
63212+
63213+__u32
63214+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
63215+{
63216+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
63217+}
63218+
63219+__u32
63220+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
63221+{
63222+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
63223+}
63224+
63225+__u32
63226+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
63227+{
63228+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
63229+}
63230+
63231+__u32
63232+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
63233+{
63234+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
63235+}
63236+
63237+__u32
63238+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
63239+ umode_t *modeptr)
63240+{
63241+ umode_t mode;
63242+
63243+ *modeptr &= ~gr_acl_umask();
63244+ mode = *modeptr;
63245+
63246+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
63247+ return 1;
63248+
63249+ if (unlikely(dentry->d_inode && !S_ISDIR(dentry->d_inode->i_mode) &&
63250+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))) {
63251+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
63252+ GR_CHMOD_ACL_MSG);
63253+ } else {
63254+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
63255+ }
63256+}
63257+
63258+__u32
63259+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
63260+{
63261+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
63262+}
63263+
63264+__u32
63265+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
63266+{
63267+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
63268+}
63269+
63270+__u32
63271+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
63272+{
63273+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
63274+}
63275+
63276+__u32
63277+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
63278+{
63279+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
63280+ GR_UNIXCONNECT_ACL_MSG);
63281+}
63282+
63283+/* hardlinks require at minimum create and link permission,
63284+ any additional privilege required is based on the
63285+ privilege of the file being linked to
63286+*/
63287+__u32
63288+gr_acl_handle_link(const struct dentry * new_dentry,
63289+ const struct dentry * parent_dentry,
63290+ const struct vfsmount * parent_mnt,
63291+ const struct dentry * old_dentry,
63292+ const struct vfsmount * old_mnt, const struct filename *to)
63293+{
63294+ __u32 mode;
63295+ __u32 needmode = GR_CREATE | GR_LINK;
63296+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
63297+
63298+ mode =
63299+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
63300+ old_mnt);
63301+
63302+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
63303+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
63304+ return mode;
63305+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
63306+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
63307+ return 0;
63308+ } else if (unlikely((mode & needmode) != needmode))
63309+ return 0;
63310+
63311+ return 1;
63312+}
63313+
63314+__u32
63315+gr_acl_handle_symlink(const struct dentry * new_dentry,
63316+ const struct dentry * parent_dentry,
63317+ const struct vfsmount * parent_mnt, const struct filename *from)
63318+{
63319+ __u32 needmode = GR_WRITE | GR_CREATE;
63320+ __u32 mode;
63321+
63322+ mode =
63323+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
63324+ GR_CREATE | GR_AUDIT_CREATE |
63325+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
63326+
63327+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
63328+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
63329+ return mode;
63330+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
63331+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
63332+ return 0;
63333+ } else if (unlikely((mode & needmode) != needmode))
63334+ return 0;
63335+
63336+ return (GR_WRITE | GR_CREATE);
63337+}
63338+
63339+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
63340+{
63341+ __u32 mode;
63342+
63343+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
63344+
63345+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
63346+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
63347+ return mode;
63348+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
63349+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
63350+ return 0;
63351+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
63352+ return 0;
63353+
63354+ return (reqmode);
63355+}
63356+
63357+__u32
63358+gr_acl_handle_mknod(const struct dentry * new_dentry,
63359+ const struct dentry * parent_dentry,
63360+ const struct vfsmount * parent_mnt,
63361+ const int mode)
63362+{
63363+ __u32 reqmode = GR_WRITE | GR_CREATE;
63364+ if (unlikely((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
63365+ reqmode |= GR_SETID;
63366+
63367+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
63368+ reqmode, GR_MKNOD_ACL_MSG);
63369+}
63370+
63371+__u32
63372+gr_acl_handle_mkdir(const struct dentry *new_dentry,
63373+ const struct dentry *parent_dentry,
63374+ const struct vfsmount *parent_mnt)
63375+{
63376+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
63377+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
63378+}
63379+
63380+#define RENAME_CHECK_SUCCESS(old, new) \
63381+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
63382+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
63383+
63384+int
63385+gr_acl_handle_rename(struct dentry *new_dentry,
63386+ struct dentry *parent_dentry,
63387+ const struct vfsmount *parent_mnt,
63388+ struct dentry *old_dentry,
63389+ struct inode *old_parent_inode,
63390+ struct vfsmount *old_mnt, const struct filename *newname)
63391+{
63392+ __u32 comp1, comp2;
63393+ int error = 0;
63394+
63395+ if (unlikely(!gr_acl_is_enabled()))
63396+ return 0;
63397+
63398+ if (!new_dentry->d_inode) {
63399+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
63400+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
63401+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
63402+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
63403+ GR_DELETE | GR_AUDIT_DELETE |
63404+ GR_AUDIT_READ | GR_AUDIT_WRITE |
63405+ GR_SUPPRESS, old_mnt);
63406+ } else {
63407+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
63408+ GR_CREATE | GR_DELETE |
63409+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
63410+ GR_AUDIT_READ | GR_AUDIT_WRITE |
63411+ GR_SUPPRESS, parent_mnt);
63412+ comp2 =
63413+ gr_search_file(old_dentry,
63414+ GR_READ | GR_WRITE | GR_AUDIT_READ |
63415+ GR_DELETE | GR_AUDIT_DELETE |
63416+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
63417+ }
63418+
63419+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
63420+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
63421+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
63422+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
63423+ && !(comp2 & GR_SUPPRESS)) {
63424+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
63425+ error = -EACCES;
63426+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
63427+ error = -EACCES;
63428+
63429+ return error;
63430+}
63431+
63432+void
63433+gr_acl_handle_exit(void)
63434+{
63435+ u16 id;
63436+ char *rolename;
63437+
63438+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
63439+ !(current->role->roletype & GR_ROLE_PERSIST))) {
63440+ id = current->acl_role_id;
63441+ rolename = current->role->rolename;
63442+ gr_set_acls(1);
63443+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
63444+ }
63445+
63446+ gr_put_exec_file(current);
63447+ return;
63448+}
63449+
63450+int
63451+gr_acl_handle_procpidmem(const struct task_struct *task)
63452+{
63453+ if (unlikely(!gr_acl_is_enabled()))
63454+ return 0;
63455+
63456+ if (task != current && task->acl->mode & GR_PROTPROCFD)
63457+ return -EACCES;
63458+
63459+ return 0;
63460+}
63461diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
63462new file mode 100644
63463index 0000000..8132048
63464--- /dev/null
63465+++ b/grsecurity/gracl_ip.c
63466@@ -0,0 +1,387 @@
63467+#include <linux/kernel.h>
63468+#include <asm/uaccess.h>
63469+#include <asm/errno.h>
63470+#include <net/sock.h>
63471+#include <linux/file.h>
63472+#include <linux/fs.h>
63473+#include <linux/net.h>
63474+#include <linux/in.h>
63475+#include <linux/skbuff.h>
63476+#include <linux/ip.h>
63477+#include <linux/udp.h>
63478+#include <linux/types.h>
63479+#include <linux/sched.h>
63480+#include <linux/netdevice.h>
63481+#include <linux/inetdevice.h>
63482+#include <linux/gracl.h>
63483+#include <linux/grsecurity.h>
63484+#include <linux/grinternal.h>
63485+
63486+#define GR_BIND 0x01
63487+#define GR_CONNECT 0x02
63488+#define GR_INVERT 0x04
63489+#define GR_BINDOVERRIDE 0x08
63490+#define GR_CONNECTOVERRIDE 0x10
63491+#define GR_SOCK_FAMILY 0x20
63492+
63493+static const char * gr_protocols[IPPROTO_MAX] = {
63494+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
63495+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
63496+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
63497+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
63498+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
63499+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
63500+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
63501+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
63502+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
63503+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
63504+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
63505+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
63506+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
63507+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
63508+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
63509+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
63510+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
63511+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
63512+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
63513+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
63514+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
63515+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
63516+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
63517+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
63518+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
63519+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
63520+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
63521+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
63522+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
63523+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
63524+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
63525+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
63526+ };
63527+
63528+static const char * gr_socktypes[SOCK_MAX] = {
63529+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
63530+ "unknown:7", "unknown:8", "unknown:9", "packet"
63531+ };
63532+
63533+static const char * gr_sockfamilies[AF_MAX+1] = {
63534+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
63535+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
63536+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
63537+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
63538+ };
63539+
63540+const char *
63541+gr_proto_to_name(unsigned char proto)
63542+{
63543+ return gr_protocols[proto];
63544+}
63545+
63546+const char *
63547+gr_socktype_to_name(unsigned char type)
63548+{
63549+ return gr_socktypes[type];
63550+}
63551+
63552+const char *
63553+gr_sockfamily_to_name(unsigned char family)
63554+{
63555+ return gr_sockfamilies[family];
63556+}
63557+
63558+int
63559+gr_search_socket(const int domain, const int type, const int protocol)
63560+{
63561+ struct acl_subject_label *curr;
63562+ const struct cred *cred = current_cred();
63563+
63564+ if (unlikely(!gr_acl_is_enabled()))
63565+ goto exit;
63566+
63567+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
63568+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
63569+ goto exit; // let the kernel handle it
63570+
63571+ curr = current->acl;
63572+
63573+ if (curr->sock_families[domain / 32] & (1U << (domain % 32))) {
63574+ /* the family is allowed, if this is PF_INET allow it only if
63575+ the extra sock type/protocol checks pass */
63576+ if (domain == PF_INET)
63577+ goto inet_check;
63578+ goto exit;
63579+ } else {
63580+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
63581+ __u32 fakeip = 0;
63582+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
63583+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
63584+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
63585+ gr_to_filename(current->exec_file->f_path.dentry,
63586+ current->exec_file->f_path.mnt) :
63587+ curr->filename, curr->filename,
63588+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
63589+ &current->signal->saved_ip);
63590+ goto exit;
63591+ }
63592+ goto exit_fail;
63593+ }
63594+
63595+inet_check:
63596+ /* the rest of this checking is for IPv4 only */
63597+ if (!curr->ips)
63598+ goto exit;
63599+
63600+ if ((curr->ip_type & (1U << type)) &&
63601+ (curr->ip_proto[protocol / 32] & (1U << (protocol % 32))))
63602+ goto exit;
63603+
63604+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
63605+ /* we don't place acls on raw sockets , and sometimes
63606+ dgram/ip sockets are opened for ioctl and not
63607+ bind/connect, so we'll fake a bind learn log */
63608+ if (type == SOCK_RAW || type == SOCK_PACKET) {
63609+ __u32 fakeip = 0;
63610+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
63611+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
63612+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
63613+ gr_to_filename(current->exec_file->f_path.dentry,
63614+ current->exec_file->f_path.mnt) :
63615+ curr->filename, curr->filename,
63616+ &fakeip, 0, type,
63617+ protocol, GR_CONNECT, &current->signal->saved_ip);
63618+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
63619+ __u32 fakeip = 0;
63620+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
63621+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
63622+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
63623+ gr_to_filename(current->exec_file->f_path.dentry,
63624+ current->exec_file->f_path.mnt) :
63625+ curr->filename, curr->filename,
63626+ &fakeip, 0, type,
63627+ protocol, GR_BIND, &current->signal->saved_ip);
63628+ }
63629+ /* we'll log when they use connect or bind */
63630+ goto exit;
63631+ }
63632+
63633+exit_fail:
63634+ if (domain == PF_INET)
63635+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
63636+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
63637+ else
63638+#ifndef CONFIG_IPV6
63639+ if (domain != PF_INET6)
63640+#endif
63641+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
63642+ gr_socktype_to_name(type), protocol);
63643+
63644+ return 0;
63645+exit:
63646+ return 1;
63647+}
63648+
63649+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
63650+{
63651+ if ((ip->mode & mode) &&
63652+ (ip_port >= ip->low) &&
63653+ (ip_port <= ip->high) &&
63654+ ((ntohl(ip_addr) & our_netmask) ==
63655+ (ntohl(our_addr) & our_netmask))
63656+ && (ip->proto[protocol / 32] & (1U << (protocol % 32)))
63657+ && (ip->type & (1U << type))) {
63658+ if (ip->mode & GR_INVERT)
63659+ return 2; // specifically denied
63660+ else
63661+ return 1; // allowed
63662+ }
63663+
63664+ return 0; // not specifically allowed, may continue parsing
63665+}
63666+
63667+static int
63668+gr_search_connectbind(const int full_mode, struct sock *sk,
63669+ struct sockaddr_in *addr, const int type)
63670+{
63671+ char iface[IFNAMSIZ] = {0};
63672+ struct acl_subject_label *curr;
63673+ struct acl_ip_label *ip;
63674+ struct inet_sock *isk;
63675+ struct net_device *dev;
63676+ struct in_device *idev;
63677+ unsigned long i;
63678+ int ret;
63679+ int mode = full_mode & (GR_BIND | GR_CONNECT);
63680+ __u32 ip_addr = 0;
63681+ __u32 our_addr;
63682+ __u32 our_netmask;
63683+ char *p;
63684+ __u16 ip_port = 0;
63685+ const struct cred *cred = current_cred();
63686+
63687+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
63688+ return 0;
63689+
63690+ curr = current->acl;
63691+ isk = inet_sk(sk);
63692+
63693+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
63694+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
63695+ addr->sin_addr.s_addr = curr->inaddr_any_override;
63696+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
63697+ struct sockaddr_in saddr;
63698+ int err;
63699+
63700+ saddr.sin_family = AF_INET;
63701+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
63702+ saddr.sin_port = isk->inet_sport;
63703+
63704+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
63705+ if (err)
63706+ return err;
63707+
63708+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
63709+ if (err)
63710+ return err;
63711+ }
63712+
63713+ if (!curr->ips)
63714+ return 0;
63715+
63716+ ip_addr = addr->sin_addr.s_addr;
63717+ ip_port = ntohs(addr->sin_port);
63718+
63719+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
63720+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
63721+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
63722+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
63723+ gr_to_filename(current->exec_file->f_path.dentry,
63724+ current->exec_file->f_path.mnt) :
63725+ curr->filename, curr->filename,
63726+ &ip_addr, ip_port, type,
63727+ sk->sk_protocol, mode, &current->signal->saved_ip);
63728+ return 0;
63729+ }
63730+
63731+ for (i = 0; i < curr->ip_num; i++) {
63732+ ip = *(curr->ips + i);
63733+ if (ip->iface != NULL) {
63734+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
63735+ p = strchr(iface, ':');
63736+ if (p != NULL)
63737+ *p = '\0';
63738+ dev = dev_get_by_name(sock_net(sk), iface);
63739+ if (dev == NULL)
63740+ continue;
63741+ idev = in_dev_get(dev);
63742+ if (idev == NULL) {
63743+ dev_put(dev);
63744+ continue;
63745+ }
63746+ rcu_read_lock();
63747+ for_ifa(idev) {
63748+ if (!strcmp(ip->iface, ifa->ifa_label)) {
63749+ our_addr = ifa->ifa_address;
63750+ our_netmask = 0xffffffff;
63751+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
63752+ if (ret == 1) {
63753+ rcu_read_unlock();
63754+ in_dev_put(idev);
63755+ dev_put(dev);
63756+ return 0;
63757+ } else if (ret == 2) {
63758+ rcu_read_unlock();
63759+ in_dev_put(idev);
63760+ dev_put(dev);
63761+ goto denied;
63762+ }
63763+ }
63764+ } endfor_ifa(idev);
63765+ rcu_read_unlock();
63766+ in_dev_put(idev);
63767+ dev_put(dev);
63768+ } else {
63769+ our_addr = ip->addr;
63770+ our_netmask = ip->netmask;
63771+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
63772+ if (ret == 1)
63773+ return 0;
63774+ else if (ret == 2)
63775+ goto denied;
63776+ }
63777+ }
63778+
63779+denied:
63780+ if (mode == GR_BIND)
63781+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
63782+ else if (mode == GR_CONNECT)
63783+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
63784+
63785+ return -EACCES;
63786+}
63787+
63788+int
63789+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
63790+{
63791+ /* always allow disconnection of dgram sockets with connect */
63792+ if (addr->sin_family == AF_UNSPEC)
63793+ return 0;
63794+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
63795+}
63796+
63797+int
63798+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
63799+{
63800+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
63801+}
63802+
63803+int gr_search_listen(struct socket *sock)
63804+{
63805+ struct sock *sk = sock->sk;
63806+ struct sockaddr_in addr;
63807+
63808+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
63809+ addr.sin_port = inet_sk(sk)->inet_sport;
63810+
63811+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
63812+}
63813+
63814+int gr_search_accept(struct socket *sock)
63815+{
63816+ struct sock *sk = sock->sk;
63817+ struct sockaddr_in addr;
63818+
63819+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
63820+ addr.sin_port = inet_sk(sk)->inet_sport;
63821+
63822+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
63823+}
63824+
63825+int
63826+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
63827+{
63828+ if (addr)
63829+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
63830+ else {
63831+ struct sockaddr_in sin;
63832+ const struct inet_sock *inet = inet_sk(sk);
63833+
63834+ sin.sin_addr.s_addr = inet->inet_daddr;
63835+ sin.sin_port = inet->inet_dport;
63836+
63837+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
63838+ }
63839+}
63840+
63841+int
63842+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
63843+{
63844+ struct sockaddr_in sin;
63845+
63846+ if (unlikely(skb->len < sizeof (struct udphdr)))
63847+ return 0; // skip this packet
63848+
63849+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
63850+ sin.sin_port = udp_hdr(skb)->source;
63851+
63852+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
63853+}
63854diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
63855new file mode 100644
63856index 0000000..25f54ef
63857--- /dev/null
63858+++ b/grsecurity/gracl_learn.c
63859@@ -0,0 +1,207 @@
63860+#include <linux/kernel.h>
63861+#include <linux/mm.h>
63862+#include <linux/sched.h>
63863+#include <linux/poll.h>
63864+#include <linux/string.h>
63865+#include <linux/file.h>
63866+#include <linux/types.h>
63867+#include <linux/vmalloc.h>
63868+#include <linux/grinternal.h>
63869+
63870+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
63871+ size_t count, loff_t *ppos);
63872+extern int gr_acl_is_enabled(void);
63873+
63874+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
63875+static int gr_learn_attached;
63876+
63877+/* use a 512k buffer */
63878+#define LEARN_BUFFER_SIZE (512 * 1024)
63879+
63880+static DEFINE_SPINLOCK(gr_learn_lock);
63881+static DEFINE_MUTEX(gr_learn_user_mutex);
63882+
63883+/* we need to maintain two buffers, so that the kernel context of grlearn
63884+ uses a semaphore around the userspace copying, and the other kernel contexts
63885+ use a spinlock when copying into the buffer, since they cannot sleep
63886+*/
63887+static char *learn_buffer;
63888+static char *learn_buffer_user;
63889+static int learn_buffer_len;
63890+static int learn_buffer_user_len;
63891+
63892+static ssize_t
63893+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
63894+{
63895+ DECLARE_WAITQUEUE(wait, current);
63896+ ssize_t retval = 0;
63897+
63898+ add_wait_queue(&learn_wait, &wait);
63899+ set_current_state(TASK_INTERRUPTIBLE);
63900+ do {
63901+ mutex_lock(&gr_learn_user_mutex);
63902+ spin_lock(&gr_learn_lock);
63903+ if (learn_buffer_len)
63904+ break;
63905+ spin_unlock(&gr_learn_lock);
63906+ mutex_unlock(&gr_learn_user_mutex);
63907+ if (file->f_flags & O_NONBLOCK) {
63908+ retval = -EAGAIN;
63909+ goto out;
63910+ }
63911+ if (signal_pending(current)) {
63912+ retval = -ERESTARTSYS;
63913+ goto out;
63914+ }
63915+
63916+ schedule();
63917+ } while (1);
63918+
63919+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
63920+ learn_buffer_user_len = learn_buffer_len;
63921+ retval = learn_buffer_len;
63922+ learn_buffer_len = 0;
63923+
63924+ spin_unlock(&gr_learn_lock);
63925+
63926+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
63927+ retval = -EFAULT;
63928+
63929+ mutex_unlock(&gr_learn_user_mutex);
63930+out:
63931+ set_current_state(TASK_RUNNING);
63932+ remove_wait_queue(&learn_wait, &wait);
63933+ return retval;
63934+}
63935+
63936+static unsigned int
63937+poll_learn(struct file * file, poll_table * wait)
63938+{
63939+ poll_wait(file, &learn_wait, wait);
63940+
63941+ if (learn_buffer_len)
63942+ return (POLLIN | POLLRDNORM);
63943+
63944+ return 0;
63945+}
63946+
63947+void
63948+gr_clear_learn_entries(void)
63949+{
63950+ char *tmp;
63951+
63952+ mutex_lock(&gr_learn_user_mutex);
63953+ spin_lock(&gr_learn_lock);
63954+ tmp = learn_buffer;
63955+ learn_buffer = NULL;
63956+ spin_unlock(&gr_learn_lock);
63957+ if (tmp)
63958+ vfree(tmp);
63959+ if (learn_buffer_user != NULL) {
63960+ vfree(learn_buffer_user);
63961+ learn_buffer_user = NULL;
63962+ }
63963+ learn_buffer_len = 0;
63964+ mutex_unlock(&gr_learn_user_mutex);
63965+
63966+ return;
63967+}
63968+
63969+void
63970+gr_add_learn_entry(const char *fmt, ...)
63971+{
63972+ va_list args;
63973+ unsigned int len;
63974+
63975+ if (!gr_learn_attached)
63976+ return;
63977+
63978+ spin_lock(&gr_learn_lock);
63979+
63980+ /* leave a gap at the end so we know when it's "full" but don't have to
63981+ compute the exact length of the string we're trying to append
63982+ */
63983+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
63984+ spin_unlock(&gr_learn_lock);
63985+ wake_up_interruptible(&learn_wait);
63986+ return;
63987+ }
63988+ if (learn_buffer == NULL) {
63989+ spin_unlock(&gr_learn_lock);
63990+ return;
63991+ }
63992+
63993+ va_start(args, fmt);
63994+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
63995+ va_end(args);
63996+
63997+ learn_buffer_len += len + 1;
63998+
63999+ spin_unlock(&gr_learn_lock);
64000+ wake_up_interruptible(&learn_wait);
64001+
64002+ return;
64003+}
64004+
64005+static int
64006+open_learn(struct inode *inode, struct file *file)
64007+{
64008+ if (file->f_mode & FMODE_READ && gr_learn_attached)
64009+ return -EBUSY;
64010+ if (file->f_mode & FMODE_READ) {
64011+ int retval = 0;
64012+ mutex_lock(&gr_learn_user_mutex);
64013+ if (learn_buffer == NULL)
64014+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
64015+ if (learn_buffer_user == NULL)
64016+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
64017+ if (learn_buffer == NULL) {
64018+ retval = -ENOMEM;
64019+ goto out_error;
64020+ }
64021+ if (learn_buffer_user == NULL) {
64022+ retval = -ENOMEM;
64023+ goto out_error;
64024+ }
64025+ learn_buffer_len = 0;
64026+ learn_buffer_user_len = 0;
64027+ gr_learn_attached = 1;
64028+out_error:
64029+ mutex_unlock(&gr_learn_user_mutex);
64030+ return retval;
64031+ }
64032+ return 0;
64033+}
64034+
64035+static int
64036+close_learn(struct inode *inode, struct file *file)
64037+{
64038+ if (file->f_mode & FMODE_READ) {
64039+ char *tmp = NULL;
64040+ mutex_lock(&gr_learn_user_mutex);
64041+ spin_lock(&gr_learn_lock);
64042+ tmp = learn_buffer;
64043+ learn_buffer = NULL;
64044+ spin_unlock(&gr_learn_lock);
64045+ if (tmp)
64046+ vfree(tmp);
64047+ if (learn_buffer_user != NULL) {
64048+ vfree(learn_buffer_user);
64049+ learn_buffer_user = NULL;
64050+ }
64051+ learn_buffer_len = 0;
64052+ learn_buffer_user_len = 0;
64053+ gr_learn_attached = 0;
64054+ mutex_unlock(&gr_learn_user_mutex);
64055+ }
64056+
64057+ return 0;
64058+}
64059+
64060+const struct file_operations grsec_fops = {
64061+ .read = read_learn,
64062+ .write = write_grsec_handler,
64063+ .open = open_learn,
64064+ .release = close_learn,
64065+ .poll = poll_learn,
64066+};
64067diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
64068new file mode 100644
64069index 0000000..39645c9
64070--- /dev/null
64071+++ b/grsecurity/gracl_res.c
64072@@ -0,0 +1,68 @@
64073+#include <linux/kernel.h>
64074+#include <linux/sched.h>
64075+#include <linux/gracl.h>
64076+#include <linux/grinternal.h>
64077+
64078+static const char *restab_log[] = {
64079+ [RLIMIT_CPU] = "RLIMIT_CPU",
64080+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
64081+ [RLIMIT_DATA] = "RLIMIT_DATA",
64082+ [RLIMIT_STACK] = "RLIMIT_STACK",
64083+ [RLIMIT_CORE] = "RLIMIT_CORE",
64084+ [RLIMIT_RSS] = "RLIMIT_RSS",
64085+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
64086+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
64087+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
64088+ [RLIMIT_AS] = "RLIMIT_AS",
64089+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
64090+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
64091+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
64092+ [RLIMIT_NICE] = "RLIMIT_NICE",
64093+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
64094+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
64095+ [GR_CRASH_RES] = "RLIMIT_CRASH"
64096+};
64097+
64098+void
64099+gr_log_resource(const struct task_struct *task,
64100+ const int res, const unsigned long wanted, const int gt)
64101+{
64102+ const struct cred *cred;
64103+ unsigned long rlim;
64104+
64105+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
64106+ return;
64107+
64108+ // not yet supported resource
64109+ if (unlikely(!restab_log[res]))
64110+ return;
64111+
64112+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
64113+ rlim = task_rlimit_max(task, res);
64114+ else
64115+ rlim = task_rlimit(task, res);
64116+
64117+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
64118+ return;
64119+
64120+ rcu_read_lock();
64121+ cred = __task_cred(task);
64122+
64123+ if (res == RLIMIT_NPROC &&
64124+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
64125+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
64126+ goto out_rcu_unlock;
64127+ else if (res == RLIMIT_MEMLOCK &&
64128+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
64129+ goto out_rcu_unlock;
64130+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
64131+ goto out_rcu_unlock;
64132+ rcu_read_unlock();
64133+
64134+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
64135+
64136+ return;
64137+out_rcu_unlock:
64138+ rcu_read_unlock();
64139+ return;
64140+}
64141diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
64142new file mode 100644
64143index 0000000..cb1e5ab
64144--- /dev/null
64145+++ b/grsecurity/gracl_segv.c
64146@@ -0,0 +1,303 @@
64147+#include <linux/kernel.h>
64148+#include <linux/mm.h>
64149+#include <asm/uaccess.h>
64150+#include <asm/errno.h>
64151+#include <asm/mman.h>
64152+#include <net/sock.h>
64153+#include <linux/file.h>
64154+#include <linux/fs.h>
64155+#include <linux/net.h>
64156+#include <linux/in.h>
64157+#include <linux/slab.h>
64158+#include <linux/types.h>
64159+#include <linux/sched.h>
64160+#include <linux/timer.h>
64161+#include <linux/gracl.h>
64162+#include <linux/grsecurity.h>
64163+#include <linux/grinternal.h>
64164+
64165+static struct crash_uid *uid_set;
64166+static unsigned short uid_used;
64167+static DEFINE_SPINLOCK(gr_uid_lock);
64168+extern rwlock_t gr_inode_lock;
64169+extern struct acl_subject_label *
64170+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
64171+ struct acl_role_label *role);
64172+
64173+#ifdef CONFIG_BTRFS_FS
64174+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
64175+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
64176+#endif
64177+
64178+static inline dev_t __get_dev(const struct dentry *dentry)
64179+{
64180+#ifdef CONFIG_BTRFS_FS
64181+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
64182+ return get_btrfs_dev_from_inode(dentry->d_inode);
64183+ else
64184+#endif
64185+ return dentry->d_inode->i_sb->s_dev;
64186+}
64187+
64188+int
64189+gr_init_uidset(void)
64190+{
64191+ uid_set =
64192+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
64193+ uid_used = 0;
64194+
64195+ return uid_set ? 1 : 0;
64196+}
64197+
64198+void
64199+gr_free_uidset(void)
64200+{
64201+ if (uid_set)
64202+ kfree(uid_set);
64203+
64204+ return;
64205+}
64206+
64207+int
64208+gr_find_uid(const uid_t uid)
64209+{
64210+ struct crash_uid *tmp = uid_set;
64211+ uid_t buid;
64212+ int low = 0, high = uid_used - 1, mid;
64213+
64214+ while (high >= low) {
64215+ mid = (low + high) >> 1;
64216+ buid = tmp[mid].uid;
64217+ if (buid == uid)
64218+ return mid;
64219+ if (buid > uid)
64220+ high = mid - 1;
64221+ if (buid < uid)
64222+ low = mid + 1;
64223+ }
64224+
64225+ return -1;
64226+}
64227+
64228+static __inline__ void
64229+gr_insertsort(void)
64230+{
64231+ unsigned short i, j;
64232+ struct crash_uid index;
64233+
64234+ for (i = 1; i < uid_used; i++) {
64235+ index = uid_set[i];
64236+ j = i;
64237+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
64238+ uid_set[j] = uid_set[j - 1];
64239+ j--;
64240+ }
64241+ uid_set[j] = index;
64242+ }
64243+
64244+ return;
64245+}
64246+
64247+static __inline__ void
64248+gr_insert_uid(const kuid_t kuid, const unsigned long expires)
64249+{
64250+ int loc;
64251+ uid_t uid = GR_GLOBAL_UID(kuid);
64252+
64253+ if (uid_used == GR_UIDTABLE_MAX)
64254+ return;
64255+
64256+ loc = gr_find_uid(uid);
64257+
64258+ if (loc >= 0) {
64259+ uid_set[loc].expires = expires;
64260+ return;
64261+ }
64262+
64263+ uid_set[uid_used].uid = uid;
64264+ uid_set[uid_used].expires = expires;
64265+ uid_used++;
64266+
64267+ gr_insertsort();
64268+
64269+ return;
64270+}
64271+
64272+void
64273+gr_remove_uid(const unsigned short loc)
64274+{
64275+ unsigned short i;
64276+
64277+ for (i = loc + 1; i < uid_used; i++)
64278+ uid_set[i - 1] = uid_set[i];
64279+
64280+ uid_used--;
64281+
64282+ return;
64283+}
64284+
64285+int
64286+gr_check_crash_uid(const kuid_t kuid)
64287+{
64288+ int loc;
64289+ int ret = 0;
64290+ uid_t uid;
64291+
64292+ if (unlikely(!gr_acl_is_enabled()))
64293+ return 0;
64294+
64295+ uid = GR_GLOBAL_UID(kuid);
64296+
64297+ spin_lock(&gr_uid_lock);
64298+ loc = gr_find_uid(uid);
64299+
64300+ if (loc < 0)
64301+ goto out_unlock;
64302+
64303+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
64304+ gr_remove_uid(loc);
64305+ else
64306+ ret = 1;
64307+
64308+out_unlock:
64309+ spin_unlock(&gr_uid_lock);
64310+ return ret;
64311+}
64312+
64313+static __inline__ int
64314+proc_is_setxid(const struct cred *cred)
64315+{
64316+ if (!uid_eq(cred->uid, cred->euid) || !uid_eq(cred->uid, cred->suid) ||
64317+ !uid_eq(cred->uid, cred->fsuid))
64318+ return 1;
64319+ if (!gid_eq(cred->gid, cred->egid) || !gid_eq(cred->gid, cred->sgid) ||
64320+ !gid_eq(cred->gid, cred->fsgid))
64321+ return 1;
64322+
64323+ return 0;
64324+}
64325+
64326+extern int gr_fake_force_sig(int sig, struct task_struct *t);
64327+
64328+void
64329+gr_handle_crash(struct task_struct *task, const int sig)
64330+{
64331+ struct acl_subject_label *curr;
64332+ struct task_struct *tsk, *tsk2;
64333+ const struct cred *cred;
64334+ const struct cred *cred2;
64335+
64336+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
64337+ return;
64338+
64339+ if (unlikely(!gr_acl_is_enabled()))
64340+ return;
64341+
64342+ curr = task->acl;
64343+
64344+ if (!(curr->resmask & (1U << GR_CRASH_RES)))
64345+ return;
64346+
64347+ if (time_before_eq(curr->expires, get_seconds())) {
64348+ curr->expires = 0;
64349+ curr->crashes = 0;
64350+ }
64351+
64352+ curr->crashes++;
64353+
64354+ if (!curr->expires)
64355+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
64356+
64357+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
64358+ time_after(curr->expires, get_seconds())) {
64359+ rcu_read_lock();
64360+ cred = __task_cred(task);
64361+ if (gr_is_global_nonroot(cred->uid) && proc_is_setxid(cred)) {
64362+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
64363+ spin_lock(&gr_uid_lock);
64364+ gr_insert_uid(cred->uid, curr->expires);
64365+ spin_unlock(&gr_uid_lock);
64366+ curr->expires = 0;
64367+ curr->crashes = 0;
64368+ read_lock(&tasklist_lock);
64369+ do_each_thread(tsk2, tsk) {
64370+ cred2 = __task_cred(tsk);
64371+ if (tsk != task && uid_eq(cred2->uid, cred->uid))
64372+ gr_fake_force_sig(SIGKILL, tsk);
64373+ } while_each_thread(tsk2, tsk);
64374+ read_unlock(&tasklist_lock);
64375+ } else {
64376+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
64377+ read_lock(&tasklist_lock);
64378+ read_lock(&grsec_exec_file_lock);
64379+ do_each_thread(tsk2, tsk) {
64380+ if (likely(tsk != task)) {
64381+ // if this thread has the same subject as the one that triggered
64382+ // RES_CRASH and it's the same binary, kill it
64383+ if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
64384+ gr_fake_force_sig(SIGKILL, tsk);
64385+ }
64386+ } while_each_thread(tsk2, tsk);
64387+ read_unlock(&grsec_exec_file_lock);
64388+ read_unlock(&tasklist_lock);
64389+ }
64390+ rcu_read_unlock();
64391+ }
64392+
64393+ return;
64394+}
64395+
64396+int
64397+gr_check_crash_exec(const struct file *filp)
64398+{
64399+ struct acl_subject_label *curr;
64400+
64401+ if (unlikely(!gr_acl_is_enabled()))
64402+ return 0;
64403+
64404+ read_lock(&gr_inode_lock);
64405+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
64406+ __get_dev(filp->f_path.dentry),
64407+ current->role);
64408+ read_unlock(&gr_inode_lock);
64409+
64410+ if (!curr || !(curr->resmask & (1U << GR_CRASH_RES)) ||
64411+ (!curr->crashes && !curr->expires))
64412+ return 0;
64413+
64414+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
64415+ time_after(curr->expires, get_seconds()))
64416+ return 1;
64417+ else if (time_before_eq(curr->expires, get_seconds())) {
64418+ curr->crashes = 0;
64419+ curr->expires = 0;
64420+ }
64421+
64422+ return 0;
64423+}
64424+
64425+void
64426+gr_handle_alertkill(struct task_struct *task)
64427+{
64428+ struct acl_subject_label *curracl;
64429+ __u32 curr_ip;
64430+ struct task_struct *p, *p2;
64431+
64432+ if (unlikely(!gr_acl_is_enabled()))
64433+ return;
64434+
64435+ curracl = task->acl;
64436+ curr_ip = task->signal->curr_ip;
64437+
64438+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
64439+ read_lock(&tasklist_lock);
64440+ do_each_thread(p2, p) {
64441+ if (p->signal->curr_ip == curr_ip)
64442+ gr_fake_force_sig(SIGKILL, p);
64443+ } while_each_thread(p2, p);
64444+ read_unlock(&tasklist_lock);
64445+ } else if (curracl->mode & GR_KILLPROC)
64446+ gr_fake_force_sig(SIGKILL, task);
64447+
64448+ return;
64449+}
64450diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
64451new file mode 100644
64452index 0000000..98011b0
64453--- /dev/null
64454+++ b/grsecurity/gracl_shm.c
64455@@ -0,0 +1,40 @@
64456+#include <linux/kernel.h>
64457+#include <linux/mm.h>
64458+#include <linux/sched.h>
64459+#include <linux/file.h>
64460+#include <linux/ipc.h>
64461+#include <linux/gracl.h>
64462+#include <linux/grsecurity.h>
64463+#include <linux/grinternal.h>
64464+
64465+int
64466+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
64467+ const time_t shm_createtime, const kuid_t cuid, const int shmid)
64468+{
64469+ struct task_struct *task;
64470+
64471+ if (!gr_acl_is_enabled())
64472+ return 1;
64473+
64474+ rcu_read_lock();
64475+ read_lock(&tasklist_lock);
64476+
64477+ task = find_task_by_vpid(shm_cprid);
64478+
64479+ if (unlikely(!task))
64480+ task = find_task_by_vpid(shm_lapid);
64481+
64482+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
64483+ (task_pid_nr(task) == shm_lapid)) &&
64484+ (task->acl->mode & GR_PROTSHM) &&
64485+ (task->acl != current->acl))) {
64486+ read_unlock(&tasklist_lock);
64487+ rcu_read_unlock();
64488+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, GR_GLOBAL_UID(cuid), shm_cprid, shmid);
64489+ return 0;
64490+ }
64491+ read_unlock(&tasklist_lock);
64492+ rcu_read_unlock();
64493+
64494+ return 1;
64495+}
64496diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
64497new file mode 100644
64498index 0000000..bc0be01
64499--- /dev/null
64500+++ b/grsecurity/grsec_chdir.c
64501@@ -0,0 +1,19 @@
64502+#include <linux/kernel.h>
64503+#include <linux/sched.h>
64504+#include <linux/fs.h>
64505+#include <linux/file.h>
64506+#include <linux/grsecurity.h>
64507+#include <linux/grinternal.h>
64508+
64509+void
64510+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
64511+{
64512+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
64513+ if ((grsec_enable_chdir && grsec_enable_group &&
64514+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
64515+ !grsec_enable_group)) {
64516+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
64517+ }
64518+#endif
64519+ return;
64520+}
64521diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
64522new file mode 100644
64523index 0000000..6d2de57
64524--- /dev/null
64525+++ b/grsecurity/grsec_chroot.c
64526@@ -0,0 +1,357 @@
64527+#include <linux/kernel.h>
64528+#include <linux/module.h>
64529+#include <linux/sched.h>
64530+#include <linux/file.h>
64531+#include <linux/fs.h>
64532+#include <linux/mount.h>
64533+#include <linux/types.h>
64534+#include "../fs/mount.h"
64535+#include <linux/grsecurity.h>
64536+#include <linux/grinternal.h>
64537+
64538+void gr_set_chroot_entries(struct task_struct *task, struct path *path)
64539+{
64540+#ifdef CONFIG_GRKERNSEC
64541+ if (task_pid_nr(task) > 1 && path->dentry != init_task.fs->root.dentry &&
64542+ path->dentry != task->nsproxy->mnt_ns->root->mnt.mnt_root)
64543+ task->gr_is_chrooted = 1;
64544+ else
64545+ task->gr_is_chrooted = 0;
64546+
64547+ task->gr_chroot_dentry = path->dentry;
64548+#endif
64549+ return;
64550+}
64551+
64552+void gr_clear_chroot_entries(struct task_struct *task)
64553+{
64554+#ifdef CONFIG_GRKERNSEC
64555+ task->gr_is_chrooted = 0;
64556+ task->gr_chroot_dentry = NULL;
64557+#endif
64558+ return;
64559+}
64560+
64561+int
64562+gr_handle_chroot_unix(const pid_t pid)
64563+{
64564+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
64565+ struct task_struct *p;
64566+
64567+ if (unlikely(!grsec_enable_chroot_unix))
64568+ return 1;
64569+
64570+ if (likely(!proc_is_chrooted(current)))
64571+ return 1;
64572+
64573+ rcu_read_lock();
64574+ read_lock(&tasklist_lock);
64575+ p = find_task_by_vpid_unrestricted(pid);
64576+ if (unlikely(p && !have_same_root(current, p))) {
64577+ read_unlock(&tasklist_lock);
64578+ rcu_read_unlock();
64579+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
64580+ return 0;
64581+ }
64582+ read_unlock(&tasklist_lock);
64583+ rcu_read_unlock();
64584+#endif
64585+ return 1;
64586+}
64587+
64588+int
64589+gr_handle_chroot_nice(void)
64590+{
64591+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
64592+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
64593+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
64594+ return -EPERM;
64595+ }
64596+#endif
64597+ return 0;
64598+}
64599+
64600+int
64601+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
64602+{
64603+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
64604+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
64605+ && proc_is_chrooted(current)) {
64606+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, task_pid_nr(p));
64607+ return -EACCES;
64608+ }
64609+#endif
64610+ return 0;
64611+}
64612+
64613+int
64614+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
64615+{
64616+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
64617+ struct task_struct *p;
64618+ int ret = 0;
64619+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
64620+ return ret;
64621+
64622+ read_lock(&tasklist_lock);
64623+ do_each_pid_task(pid, type, p) {
64624+ if (!have_same_root(current, p)) {
64625+ ret = 1;
64626+ goto out;
64627+ }
64628+ } while_each_pid_task(pid, type, p);
64629+out:
64630+ read_unlock(&tasklist_lock);
64631+ return ret;
64632+#endif
64633+ return 0;
64634+}
64635+
64636+int
64637+gr_pid_is_chrooted(struct task_struct *p)
64638+{
64639+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
64640+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
64641+ return 0;
64642+
64643+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
64644+ !have_same_root(current, p)) {
64645+ return 1;
64646+ }
64647+#endif
64648+ return 0;
64649+}
64650+
64651+EXPORT_SYMBOL(gr_pid_is_chrooted);
64652+
64653+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
64654+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
64655+{
64656+ struct path path, currentroot;
64657+ int ret = 0;
64658+
64659+ path.dentry = (struct dentry *)u_dentry;
64660+ path.mnt = (struct vfsmount *)u_mnt;
64661+ get_fs_root(current->fs, &currentroot);
64662+ if (path_is_under(&path, &currentroot))
64663+ ret = 1;
64664+ path_put(&currentroot);
64665+
64666+ return ret;
64667+}
64668+#endif
64669+
64670+int
64671+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
64672+{
64673+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
64674+ if (!grsec_enable_chroot_fchdir)
64675+ return 1;
64676+
64677+ if (!proc_is_chrooted(current))
64678+ return 1;
64679+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
64680+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
64681+ return 0;
64682+ }
64683+#endif
64684+ return 1;
64685+}
64686+
64687+int
64688+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
64689+ const time_t shm_createtime)
64690+{
64691+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
64692+ struct task_struct *p;
64693+ time_t starttime;
64694+
64695+ if (unlikely(!grsec_enable_chroot_shmat))
64696+ return 1;
64697+
64698+ if (likely(!proc_is_chrooted(current)))
64699+ return 1;
64700+
64701+ rcu_read_lock();
64702+ read_lock(&tasklist_lock);
64703+
64704+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
64705+ starttime = p->start_time.tv_sec;
64706+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
64707+ if (have_same_root(current, p)) {
64708+ goto allow;
64709+ } else {
64710+ read_unlock(&tasklist_lock);
64711+ rcu_read_unlock();
64712+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
64713+ return 0;
64714+ }
64715+ }
64716+ /* creator exited, pid reuse, fall through to next check */
64717+ }
64718+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
64719+ if (unlikely(!have_same_root(current, p))) {
64720+ read_unlock(&tasklist_lock);
64721+ rcu_read_unlock();
64722+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
64723+ return 0;
64724+ }
64725+ }
64726+
64727+allow:
64728+ read_unlock(&tasklist_lock);
64729+ rcu_read_unlock();
64730+#endif
64731+ return 1;
64732+}
64733+
64734+void
64735+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
64736+{
64737+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
64738+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
64739+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
64740+#endif
64741+ return;
64742+}
64743+
64744+int
64745+gr_handle_chroot_mknod(const struct dentry *dentry,
64746+ const struct vfsmount *mnt, const int mode)
64747+{
64748+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
64749+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
64750+ proc_is_chrooted(current)) {
64751+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
64752+ return -EPERM;
64753+ }
64754+#endif
64755+ return 0;
64756+}
64757+
64758+int
64759+gr_handle_chroot_mount(const struct dentry *dentry,
64760+ const struct vfsmount *mnt, const char *dev_name)
64761+{
64762+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
64763+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
64764+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
64765+ return -EPERM;
64766+ }
64767+#endif
64768+ return 0;
64769+}
64770+
64771+int
64772+gr_handle_chroot_pivot(void)
64773+{
64774+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
64775+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
64776+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
64777+ return -EPERM;
64778+ }
64779+#endif
64780+ return 0;
64781+}
64782+
64783+int
64784+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
64785+{
64786+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
64787+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
64788+ !gr_is_outside_chroot(dentry, mnt)) {
64789+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
64790+ return -EPERM;
64791+ }
64792+#endif
64793+ return 0;
64794+}
64795+
64796+extern const char *captab_log[];
64797+extern int captab_log_entries;
64798+
64799+int
64800+gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
64801+{
64802+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
64803+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
64804+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
64805+ if (cap_raised(chroot_caps, cap)) {
64806+ if (cap_raised(cred->cap_effective, cap) && cap < captab_log_entries) {
64807+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, task, captab_log[cap]);
64808+ }
64809+ return 0;
64810+ }
64811+ }
64812+#endif
64813+ return 1;
64814+}
64815+
64816+int
64817+gr_chroot_is_capable(const int cap)
64818+{
64819+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
64820+ return gr_task_chroot_is_capable(current, current_cred(), cap);
64821+#endif
64822+ return 1;
64823+}
64824+
64825+int
64826+gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap)
64827+{
64828+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
64829+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
64830+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
64831+ if (cap_raised(chroot_caps, cap)) {
64832+ return 0;
64833+ }
64834+ }
64835+#endif
64836+ return 1;
64837+}
64838+
64839+int
64840+gr_chroot_is_capable_nolog(const int cap)
64841+{
64842+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
64843+ return gr_task_chroot_is_capable_nolog(current, cap);
64844+#endif
64845+ return 1;
64846+}
64847+
64848+int
64849+gr_handle_chroot_sysctl(const int op)
64850+{
64851+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
64852+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
64853+ proc_is_chrooted(current))
64854+ return -EACCES;
64855+#endif
64856+ return 0;
64857+}
64858+
64859+void
64860+gr_handle_chroot_chdir(struct path *path)
64861+{
64862+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
64863+ if (grsec_enable_chroot_chdir)
64864+ set_fs_pwd(current->fs, path);
64865+#endif
64866+ return;
64867+}
64868+
64869+int
64870+gr_handle_chroot_chmod(const struct dentry *dentry,
64871+ const struct vfsmount *mnt, const int mode)
64872+{
64873+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
64874+ /* allow chmod +s on directories, but not files */
64875+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
64876+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
64877+ proc_is_chrooted(current)) {
64878+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
64879+ return -EPERM;
64880+ }
64881+#endif
64882+ return 0;
64883+}
64884diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
64885new file mode 100644
64886index 0000000..207d409
64887--- /dev/null
64888+++ b/grsecurity/grsec_disabled.c
64889@@ -0,0 +1,434 @@
64890+#include <linux/kernel.h>
64891+#include <linux/module.h>
64892+#include <linux/sched.h>
64893+#include <linux/file.h>
64894+#include <linux/fs.h>
64895+#include <linux/kdev_t.h>
64896+#include <linux/net.h>
64897+#include <linux/in.h>
64898+#include <linux/ip.h>
64899+#include <linux/skbuff.h>
64900+#include <linux/sysctl.h>
64901+
64902+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
64903+void
64904+pax_set_initial_flags(struct linux_binprm *bprm)
64905+{
64906+ return;
64907+}
64908+#endif
64909+
64910+#ifdef CONFIG_SYSCTL
64911+__u32
64912+gr_handle_sysctl(const struct ctl_table * table, const int op)
64913+{
64914+ return 0;
64915+}
64916+#endif
64917+
64918+#ifdef CONFIG_TASKSTATS
64919+int gr_is_taskstats_denied(int pid)
64920+{
64921+ return 0;
64922+}
64923+#endif
64924+
64925+int
64926+gr_acl_is_enabled(void)
64927+{
64928+ return 0;
64929+}
64930+
64931+void
64932+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
64933+{
64934+ return;
64935+}
64936+
64937+int
64938+gr_handle_rawio(const struct inode *inode)
64939+{
64940+ return 0;
64941+}
64942+
64943+void
64944+gr_acl_handle_psacct(struct task_struct *task, const long code)
64945+{
64946+ return;
64947+}
64948+
64949+int
64950+gr_handle_ptrace(struct task_struct *task, const long request)
64951+{
64952+ return 0;
64953+}
64954+
64955+int
64956+gr_handle_proc_ptrace(struct task_struct *task)
64957+{
64958+ return 0;
64959+}
64960+
64961+int
64962+gr_set_acls(const int type)
64963+{
64964+ return 0;
64965+}
64966+
64967+int
64968+gr_check_hidden_task(const struct task_struct *tsk)
64969+{
64970+ return 0;
64971+}
64972+
64973+int
64974+gr_check_protected_task(const struct task_struct *task)
64975+{
64976+ return 0;
64977+}
64978+
64979+int
64980+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
64981+{
64982+ return 0;
64983+}
64984+
64985+void
64986+gr_copy_label(struct task_struct *tsk)
64987+{
64988+ return;
64989+}
64990+
64991+void
64992+gr_set_pax_flags(struct task_struct *task)
64993+{
64994+ return;
64995+}
64996+
64997+int
64998+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
64999+ const int unsafe_share)
65000+{
65001+ return 0;
65002+}
65003+
65004+void
65005+gr_handle_delete(const ino_t ino, const dev_t dev)
65006+{
65007+ return;
65008+}
65009+
65010+void
65011+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
65012+{
65013+ return;
65014+}
65015+
65016+void
65017+gr_handle_crash(struct task_struct *task, const int sig)
65018+{
65019+ return;
65020+}
65021+
65022+int
65023+gr_check_crash_exec(const struct file *filp)
65024+{
65025+ return 0;
65026+}
65027+
65028+int
65029+gr_check_crash_uid(const kuid_t uid)
65030+{
65031+ return 0;
65032+}
65033+
65034+void
65035+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
65036+ struct dentry *old_dentry,
65037+ struct dentry *new_dentry,
65038+ struct vfsmount *mnt, const __u8 replace)
65039+{
65040+ return;
65041+}
65042+
65043+int
65044+gr_search_socket(const int family, const int type, const int protocol)
65045+{
65046+ return 1;
65047+}
65048+
65049+int
65050+gr_search_connectbind(const int mode, const struct socket *sock,
65051+ const struct sockaddr_in *addr)
65052+{
65053+ return 0;
65054+}
65055+
65056+void
65057+gr_handle_alertkill(struct task_struct *task)
65058+{
65059+ return;
65060+}
65061+
65062+__u32
65063+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
65064+{
65065+ return 1;
65066+}
65067+
65068+__u32
65069+gr_acl_handle_hidden_file(const struct dentry * dentry,
65070+ const struct vfsmount * mnt)
65071+{
65072+ return 1;
65073+}
65074+
65075+__u32
65076+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
65077+ int acc_mode)
65078+{
65079+ return 1;
65080+}
65081+
65082+__u32
65083+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
65084+{
65085+ return 1;
65086+}
65087+
65088+__u32
65089+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
65090+{
65091+ return 1;
65092+}
65093+
65094+int
65095+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
65096+ unsigned int *vm_flags)
65097+{
65098+ return 1;
65099+}
65100+
65101+__u32
65102+gr_acl_handle_truncate(const struct dentry * dentry,
65103+ const struct vfsmount * mnt)
65104+{
65105+ return 1;
65106+}
65107+
65108+__u32
65109+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
65110+{
65111+ return 1;
65112+}
65113+
65114+__u32
65115+gr_acl_handle_access(const struct dentry * dentry,
65116+ const struct vfsmount * mnt, const int fmode)
65117+{
65118+ return 1;
65119+}
65120+
65121+__u32
65122+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
65123+ umode_t *mode)
65124+{
65125+ return 1;
65126+}
65127+
65128+__u32
65129+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
65130+{
65131+ return 1;
65132+}
65133+
65134+__u32
65135+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
65136+{
65137+ return 1;
65138+}
65139+
65140+void
65141+grsecurity_init(void)
65142+{
65143+ return;
65144+}
65145+
65146+umode_t gr_acl_umask(void)
65147+{
65148+ return 0;
65149+}
65150+
65151+__u32
65152+gr_acl_handle_mknod(const struct dentry * new_dentry,
65153+ const struct dentry * parent_dentry,
65154+ const struct vfsmount * parent_mnt,
65155+ const int mode)
65156+{
65157+ return 1;
65158+}
65159+
65160+__u32
65161+gr_acl_handle_mkdir(const struct dentry * new_dentry,
65162+ const struct dentry * parent_dentry,
65163+ const struct vfsmount * parent_mnt)
65164+{
65165+ return 1;
65166+}
65167+
65168+__u32
65169+gr_acl_handle_symlink(const struct dentry * new_dentry,
65170+ const struct dentry * parent_dentry,
65171+ const struct vfsmount * parent_mnt, const struct filename *from)
65172+{
65173+ return 1;
65174+}
65175+
65176+__u32
65177+gr_acl_handle_link(const struct dentry * new_dentry,
65178+ const struct dentry * parent_dentry,
65179+ const struct vfsmount * parent_mnt,
65180+ const struct dentry * old_dentry,
65181+ const struct vfsmount * old_mnt, const struct filename *to)
65182+{
65183+ return 1;
65184+}
65185+
65186+int
65187+gr_acl_handle_rename(const struct dentry *new_dentry,
65188+ const struct dentry *parent_dentry,
65189+ const struct vfsmount *parent_mnt,
65190+ const struct dentry *old_dentry,
65191+ const struct inode *old_parent_inode,
65192+ const struct vfsmount *old_mnt, const struct filename *newname)
65193+{
65194+ return 0;
65195+}
65196+
65197+int
65198+gr_acl_handle_filldir(const struct file *file, const char *name,
65199+ const int namelen, const ino_t ino)
65200+{
65201+ return 1;
65202+}
65203+
65204+int
65205+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
65206+ const time_t shm_createtime, const kuid_t cuid, const int shmid)
65207+{
65208+ return 1;
65209+}
65210+
65211+int
65212+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
65213+{
65214+ return 0;
65215+}
65216+
65217+int
65218+gr_search_accept(const struct socket *sock)
65219+{
65220+ return 0;
65221+}
65222+
65223+int
65224+gr_search_listen(const struct socket *sock)
65225+{
65226+ return 0;
65227+}
65228+
65229+int
65230+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
65231+{
65232+ return 0;
65233+}
65234+
65235+__u32
65236+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
65237+{
65238+ return 1;
65239+}
65240+
65241+__u32
65242+gr_acl_handle_creat(const struct dentry * dentry,
65243+ const struct dentry * p_dentry,
65244+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
65245+ const int imode)
65246+{
65247+ return 1;
65248+}
65249+
65250+void
65251+gr_acl_handle_exit(void)
65252+{
65253+ return;
65254+}
65255+
65256+int
65257+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
65258+{
65259+ return 1;
65260+}
65261+
65262+void
65263+gr_set_role_label(const kuid_t uid, const kgid_t gid)
65264+{
65265+ return;
65266+}
65267+
65268+int
65269+gr_acl_handle_procpidmem(const struct task_struct *task)
65270+{
65271+ return 0;
65272+}
65273+
65274+int
65275+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
65276+{
65277+ return 0;
65278+}
65279+
65280+int
65281+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
65282+{
65283+ return 0;
65284+}
65285+
65286+void
65287+gr_set_kernel_label(struct task_struct *task)
65288+{
65289+ return;
65290+}
65291+
65292+int
65293+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
65294+{
65295+ return 0;
65296+}
65297+
65298+int
65299+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
65300+{
65301+ return 0;
65302+}
65303+
65304+int gr_acl_enable_at_secure(void)
65305+{
65306+ return 0;
65307+}
65308+
65309+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
65310+{
65311+ return dentry->d_inode->i_sb->s_dev;
65312+}
65313+
65314+void gr_put_exec_file(struct task_struct *task)
65315+{
65316+ return;
65317+}
65318+
65319+EXPORT_SYMBOL(gr_set_kernel_label);
65320+#ifdef CONFIG_SECURITY
65321+EXPORT_SYMBOL(gr_check_user_change);
65322+EXPORT_SYMBOL(gr_check_group_change);
65323+#endif
65324diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
65325new file mode 100644
65326index 0000000..387032b
65327--- /dev/null
65328+++ b/grsecurity/grsec_exec.c
65329@@ -0,0 +1,187 @@
65330+#include <linux/kernel.h>
65331+#include <linux/sched.h>
65332+#include <linux/file.h>
65333+#include <linux/binfmts.h>
65334+#include <linux/fs.h>
65335+#include <linux/types.h>
65336+#include <linux/grdefs.h>
65337+#include <linux/grsecurity.h>
65338+#include <linux/grinternal.h>
65339+#include <linux/capability.h>
65340+#include <linux/module.h>
65341+#include <linux/compat.h>
65342+
65343+#include <asm/uaccess.h>
65344+
65345+#ifdef CONFIG_GRKERNSEC_EXECLOG
65346+static char gr_exec_arg_buf[132];
65347+static DEFINE_MUTEX(gr_exec_arg_mutex);
65348+#endif
65349+
65350+struct user_arg_ptr {
65351+#ifdef CONFIG_COMPAT
65352+ bool is_compat;
65353+#endif
65354+ union {
65355+ const char __user *const __user *native;
65356+#ifdef CONFIG_COMPAT
65357+ const compat_uptr_t __user *compat;
65358+#endif
65359+ } ptr;
65360+};
65361+
65362+extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
65363+
65364+void
65365+gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
65366+{
65367+#ifdef CONFIG_GRKERNSEC_EXECLOG
65368+ char *grarg = gr_exec_arg_buf;
65369+ unsigned int i, x, execlen = 0;
65370+ char c;
65371+
65372+ if (!((grsec_enable_execlog && grsec_enable_group &&
65373+ in_group_p(grsec_audit_gid))
65374+ || (grsec_enable_execlog && !grsec_enable_group)))
65375+ return;
65376+
65377+ mutex_lock(&gr_exec_arg_mutex);
65378+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
65379+
65380+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
65381+ const char __user *p;
65382+ unsigned int len;
65383+
65384+ p = get_user_arg_ptr(argv, i);
65385+ if (IS_ERR(p))
65386+ goto log;
65387+
65388+ len = strnlen_user(p, 128 - execlen);
65389+ if (len > 128 - execlen)
65390+ len = 128 - execlen;
65391+ else if (len > 0)
65392+ len--;
65393+ if (copy_from_user(grarg + execlen, p, len))
65394+ goto log;
65395+
65396+ /* rewrite unprintable characters */
65397+ for (x = 0; x < len; x++) {
65398+ c = *(grarg + execlen + x);
65399+ if (c < 32 || c > 126)
65400+ *(grarg + execlen + x) = ' ';
65401+ }
65402+
65403+ execlen += len;
65404+ *(grarg + execlen) = ' ';
65405+ *(grarg + execlen + 1) = '\0';
65406+ execlen++;
65407+ }
65408+
65409+ log:
65410+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
65411+ bprm->file->f_path.mnt, grarg);
65412+ mutex_unlock(&gr_exec_arg_mutex);
65413+#endif
65414+ return;
65415+}
65416+
65417+#ifdef CONFIG_GRKERNSEC
65418+extern int gr_acl_is_capable(const int cap);
65419+extern int gr_acl_is_capable_nolog(const int cap);
65420+extern int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
65421+extern int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap);
65422+extern int gr_chroot_is_capable(const int cap);
65423+extern int gr_chroot_is_capable_nolog(const int cap);
65424+extern int gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
65425+extern int gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap);
65426+#endif
65427+
65428+const char *captab_log[] = {
65429+ "CAP_CHOWN",
65430+ "CAP_DAC_OVERRIDE",
65431+ "CAP_DAC_READ_SEARCH",
65432+ "CAP_FOWNER",
65433+ "CAP_FSETID",
65434+ "CAP_KILL",
65435+ "CAP_SETGID",
65436+ "CAP_SETUID",
65437+ "CAP_SETPCAP",
65438+ "CAP_LINUX_IMMUTABLE",
65439+ "CAP_NET_BIND_SERVICE",
65440+ "CAP_NET_BROADCAST",
65441+ "CAP_NET_ADMIN",
65442+ "CAP_NET_RAW",
65443+ "CAP_IPC_LOCK",
65444+ "CAP_IPC_OWNER",
65445+ "CAP_SYS_MODULE",
65446+ "CAP_SYS_RAWIO",
65447+ "CAP_SYS_CHROOT",
65448+ "CAP_SYS_PTRACE",
65449+ "CAP_SYS_PACCT",
65450+ "CAP_SYS_ADMIN",
65451+ "CAP_SYS_BOOT",
65452+ "CAP_SYS_NICE",
65453+ "CAP_SYS_RESOURCE",
65454+ "CAP_SYS_TIME",
65455+ "CAP_SYS_TTY_CONFIG",
65456+ "CAP_MKNOD",
65457+ "CAP_LEASE",
65458+ "CAP_AUDIT_WRITE",
65459+ "CAP_AUDIT_CONTROL",
65460+ "CAP_SETFCAP",
65461+ "CAP_MAC_OVERRIDE",
65462+ "CAP_MAC_ADMIN",
65463+ "CAP_SYSLOG",
65464+ "CAP_WAKE_ALARM"
65465+};
65466+
65467+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
65468+
65469+int gr_is_capable(const int cap)
65470+{
65471+#ifdef CONFIG_GRKERNSEC
65472+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
65473+ return 1;
65474+ return 0;
65475+#else
65476+ return 1;
65477+#endif
65478+}
65479+
65480+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
65481+{
65482+#ifdef CONFIG_GRKERNSEC
65483+ if (gr_task_acl_is_capable(task, cred, cap) && gr_task_chroot_is_capable(task, cred, cap))
65484+ return 1;
65485+ return 0;
65486+#else
65487+ return 1;
65488+#endif
65489+}
65490+
65491+int gr_is_capable_nolog(const int cap)
65492+{
65493+#ifdef CONFIG_GRKERNSEC
65494+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
65495+ return 1;
65496+ return 0;
65497+#else
65498+ return 1;
65499+#endif
65500+}
65501+
65502+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap)
65503+{
65504+#ifdef CONFIG_GRKERNSEC
65505+ if (gr_task_acl_is_capable_nolog(task, cap) && gr_task_chroot_is_capable_nolog(task, cap))
65506+ return 1;
65507+ return 0;
65508+#else
65509+ return 1;
65510+#endif
65511+}
65512+
65513+EXPORT_SYMBOL(gr_is_capable);
65514+EXPORT_SYMBOL(gr_is_capable_nolog);
65515+EXPORT_SYMBOL(gr_task_is_capable);
65516+EXPORT_SYMBOL(gr_task_is_capable_nolog);
65517diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
65518new file mode 100644
65519index 0000000..06cc6ea
65520--- /dev/null
65521+++ b/grsecurity/grsec_fifo.c
65522@@ -0,0 +1,24 @@
65523+#include <linux/kernel.h>
65524+#include <linux/sched.h>
65525+#include <linux/fs.h>
65526+#include <linux/file.h>
65527+#include <linux/grinternal.h>
65528+
65529+int
65530+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
65531+ const struct dentry *dir, const int flag, const int acc_mode)
65532+{
65533+#ifdef CONFIG_GRKERNSEC_FIFO
65534+ const struct cred *cred = current_cred();
65535+
65536+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
65537+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
65538+ !uid_eq(dentry->d_inode->i_uid, dir->d_inode->i_uid) &&
65539+ !uid_eq(cred->fsuid, dentry->d_inode->i_uid)) {
65540+ if (!inode_permission(dentry->d_inode, acc_mode))
65541+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, GR_GLOBAL_UID(dentry->d_inode->i_uid), GR_GLOBAL_GID(dentry->d_inode->i_gid));
65542+ return -EACCES;
65543+ }
65544+#endif
65545+ return 0;
65546+}
65547diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
65548new file mode 100644
65549index 0000000..8ca18bf
65550--- /dev/null
65551+++ b/grsecurity/grsec_fork.c
65552@@ -0,0 +1,23 @@
65553+#include <linux/kernel.h>
65554+#include <linux/sched.h>
65555+#include <linux/grsecurity.h>
65556+#include <linux/grinternal.h>
65557+#include <linux/errno.h>
65558+
65559+void
65560+gr_log_forkfail(const int retval)
65561+{
65562+#ifdef CONFIG_GRKERNSEC_FORKFAIL
65563+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
65564+ switch (retval) {
65565+ case -EAGAIN:
65566+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
65567+ break;
65568+ case -ENOMEM:
65569+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
65570+ break;
65571+ }
65572+ }
65573+#endif
65574+ return;
65575+}
65576diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
65577new file mode 100644
65578index 0000000..a862e9f
65579--- /dev/null
65580+++ b/grsecurity/grsec_init.c
65581@@ -0,0 +1,283 @@
65582+#include <linux/kernel.h>
65583+#include <linux/sched.h>
65584+#include <linux/mm.h>
65585+#include <linux/gracl.h>
65586+#include <linux/slab.h>
65587+#include <linux/vmalloc.h>
65588+#include <linux/percpu.h>
65589+#include <linux/module.h>
65590+
65591+int grsec_enable_ptrace_readexec;
65592+int grsec_enable_setxid;
65593+int grsec_enable_symlinkown;
65594+kgid_t grsec_symlinkown_gid;
65595+int grsec_enable_brute;
65596+int grsec_enable_link;
65597+int grsec_enable_dmesg;
65598+int grsec_enable_harden_ptrace;
65599+int grsec_enable_fifo;
65600+int grsec_enable_execlog;
65601+int grsec_enable_signal;
65602+int grsec_enable_forkfail;
65603+int grsec_enable_audit_ptrace;
65604+int grsec_enable_time;
65605+int grsec_enable_audit_textrel;
65606+int grsec_enable_group;
65607+kgid_t grsec_audit_gid;
65608+int grsec_enable_chdir;
65609+int grsec_enable_mount;
65610+int grsec_enable_rofs;
65611+int grsec_enable_chroot_findtask;
65612+int grsec_enable_chroot_mount;
65613+int grsec_enable_chroot_shmat;
65614+int grsec_enable_chroot_fchdir;
65615+int grsec_enable_chroot_double;
65616+int grsec_enable_chroot_pivot;
65617+int grsec_enable_chroot_chdir;
65618+int grsec_enable_chroot_chmod;
65619+int grsec_enable_chroot_mknod;
65620+int grsec_enable_chroot_nice;
65621+int grsec_enable_chroot_execlog;
65622+int grsec_enable_chroot_caps;
65623+int grsec_enable_chroot_sysctl;
65624+int grsec_enable_chroot_unix;
65625+int grsec_enable_tpe;
65626+kgid_t grsec_tpe_gid;
65627+int grsec_enable_blackhole;
65628+#ifdef CONFIG_IPV6_MODULE
65629+EXPORT_SYMBOL(grsec_enable_blackhole);
65630+#endif
65631+int grsec_lastack_retries;
65632+int grsec_enable_tpe_all;
65633+int grsec_enable_tpe_invert;
65634+int grsec_enable_socket_all;
65635+kgid_t grsec_socket_all_gid;
65636+int grsec_enable_socket_client;
65637+kgid_t grsec_socket_client_gid;
65638+int grsec_enable_socket_server;
65639+kgid_t grsec_socket_server_gid;
65640+int grsec_resource_logging;
65641+int grsec_disable_privio;
65642+int grsec_enable_log_rwxmaps;
65643+int grsec_lock;
65644+
65645+DEFINE_SPINLOCK(grsec_alert_lock);
65646+unsigned long grsec_alert_wtime = 0;
65647+unsigned long grsec_alert_fyet = 0;
65648+
65649+DEFINE_SPINLOCK(grsec_audit_lock);
65650+
65651+DEFINE_RWLOCK(grsec_exec_file_lock);
65652+
65653+char *gr_shared_page[4];
65654+
65655+char *gr_alert_log_fmt;
65656+char *gr_audit_log_fmt;
65657+char *gr_alert_log_buf;
65658+char *gr_audit_log_buf;
65659+
65660+extern struct gr_arg *gr_usermode;
65661+extern unsigned char *gr_system_salt;
65662+extern unsigned char *gr_system_sum;
65663+
65664+void __init
65665+grsecurity_init(void)
65666+{
65667+ int j;
65668+ /* create the per-cpu shared pages */
65669+
65670+#ifdef CONFIG_X86
65671+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
65672+#endif
65673+
65674+ for (j = 0; j < 4; j++) {
65675+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
65676+ if (gr_shared_page[j] == NULL) {
65677+ panic("Unable to allocate grsecurity shared page");
65678+ return;
65679+ }
65680+ }
65681+
65682+ /* allocate log buffers */
65683+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
65684+ if (!gr_alert_log_fmt) {
65685+ panic("Unable to allocate grsecurity alert log format buffer");
65686+ return;
65687+ }
65688+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
65689+ if (!gr_audit_log_fmt) {
65690+ panic("Unable to allocate grsecurity audit log format buffer");
65691+ return;
65692+ }
65693+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
65694+ if (!gr_alert_log_buf) {
65695+ panic("Unable to allocate grsecurity alert log buffer");
65696+ return;
65697+ }
65698+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
65699+ if (!gr_audit_log_buf) {
65700+ panic("Unable to allocate grsecurity audit log buffer");
65701+ return;
65702+ }
65703+
65704+ /* allocate memory for authentication structure */
65705+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
65706+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
65707+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
65708+
65709+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
65710+ panic("Unable to allocate grsecurity authentication structure");
65711+ return;
65712+ }
65713+
65714+
65715+#ifdef CONFIG_GRKERNSEC_IO
65716+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
65717+ grsec_disable_privio = 1;
65718+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
65719+ grsec_disable_privio = 1;
65720+#else
65721+ grsec_disable_privio = 0;
65722+#endif
65723+#endif
65724+
65725+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
65726+ /* for backward compatibility, tpe_invert always defaults to on if
65727+ enabled in the kernel
65728+ */
65729+ grsec_enable_tpe_invert = 1;
65730+#endif
65731+
65732+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
65733+#ifndef CONFIG_GRKERNSEC_SYSCTL
65734+ grsec_lock = 1;
65735+#endif
65736+
65737+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
65738+ grsec_enable_audit_textrel = 1;
65739+#endif
65740+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
65741+ grsec_enable_log_rwxmaps = 1;
65742+#endif
65743+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
65744+ grsec_enable_group = 1;
65745+ grsec_audit_gid = KGIDT_INIT(CONFIG_GRKERNSEC_AUDIT_GID);
65746+#endif
65747+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
65748+ grsec_enable_ptrace_readexec = 1;
65749+#endif
65750+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
65751+ grsec_enable_chdir = 1;
65752+#endif
65753+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
65754+ grsec_enable_harden_ptrace = 1;
65755+#endif
65756+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
65757+ grsec_enable_mount = 1;
65758+#endif
65759+#ifdef CONFIG_GRKERNSEC_LINK
65760+ grsec_enable_link = 1;
65761+#endif
65762+#ifdef CONFIG_GRKERNSEC_BRUTE
65763+ grsec_enable_brute = 1;
65764+#endif
65765+#ifdef CONFIG_GRKERNSEC_DMESG
65766+ grsec_enable_dmesg = 1;
65767+#endif
65768+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65769+ grsec_enable_blackhole = 1;
65770+ grsec_lastack_retries = 4;
65771+#endif
65772+#ifdef CONFIG_GRKERNSEC_FIFO
65773+ grsec_enable_fifo = 1;
65774+#endif
65775+#ifdef CONFIG_GRKERNSEC_EXECLOG
65776+ grsec_enable_execlog = 1;
65777+#endif
65778+#ifdef CONFIG_GRKERNSEC_SETXID
65779+ grsec_enable_setxid = 1;
65780+#endif
65781+#ifdef CONFIG_GRKERNSEC_SIGNAL
65782+ grsec_enable_signal = 1;
65783+#endif
65784+#ifdef CONFIG_GRKERNSEC_FORKFAIL
65785+ grsec_enable_forkfail = 1;
65786+#endif
65787+#ifdef CONFIG_GRKERNSEC_TIME
65788+ grsec_enable_time = 1;
65789+#endif
65790+#ifdef CONFIG_GRKERNSEC_RESLOG
65791+ grsec_resource_logging = 1;
65792+#endif
65793+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
65794+ grsec_enable_chroot_findtask = 1;
65795+#endif
65796+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
65797+ grsec_enable_chroot_unix = 1;
65798+#endif
65799+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
65800+ grsec_enable_chroot_mount = 1;
65801+#endif
65802+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
65803+ grsec_enable_chroot_fchdir = 1;
65804+#endif
65805+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
65806+ grsec_enable_chroot_shmat = 1;
65807+#endif
65808+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
65809+ grsec_enable_audit_ptrace = 1;
65810+#endif
65811+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
65812+ grsec_enable_chroot_double = 1;
65813+#endif
65814+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
65815+ grsec_enable_chroot_pivot = 1;
65816+#endif
65817+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
65818+ grsec_enable_chroot_chdir = 1;
65819+#endif
65820+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
65821+ grsec_enable_chroot_chmod = 1;
65822+#endif
65823+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
65824+ grsec_enable_chroot_mknod = 1;
65825+#endif
65826+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
65827+ grsec_enable_chroot_nice = 1;
65828+#endif
65829+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
65830+ grsec_enable_chroot_execlog = 1;
65831+#endif
65832+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
65833+ grsec_enable_chroot_caps = 1;
65834+#endif
65835+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
65836+ grsec_enable_chroot_sysctl = 1;
65837+#endif
65838+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
65839+ grsec_enable_symlinkown = 1;
65840+ grsec_symlinkown_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SYMLINKOWN_GID);
65841+#endif
65842+#ifdef CONFIG_GRKERNSEC_TPE
65843+ grsec_enable_tpe = 1;
65844+ grsec_tpe_gid = KGIDT_INIT(CONFIG_GRKERNSEC_TPE_GID);
65845+#ifdef CONFIG_GRKERNSEC_TPE_ALL
65846+ grsec_enable_tpe_all = 1;
65847+#endif
65848+#endif
65849+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
65850+ grsec_enable_socket_all = 1;
65851+ grsec_socket_all_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_ALL_GID);
65852+#endif
65853+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
65854+ grsec_enable_socket_client = 1;
65855+ grsec_socket_client_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_CLIENT_GID);
65856+#endif
65857+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
65858+ grsec_enable_socket_server = 1;
65859+ grsec_socket_server_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_SERVER_GID);
65860+#endif
65861+#endif
65862+
65863+ return;
65864+}
65865diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
65866new file mode 100644
65867index 0000000..5e05e20
65868--- /dev/null
65869+++ b/grsecurity/grsec_link.c
65870@@ -0,0 +1,58 @@
65871+#include <linux/kernel.h>
65872+#include <linux/sched.h>
65873+#include <linux/fs.h>
65874+#include <linux/file.h>
65875+#include <linux/grinternal.h>
65876+
65877+int gr_handle_symlink_owner(const struct path *link, const struct inode *target)
65878+{
65879+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
65880+ const struct inode *link_inode = link->dentry->d_inode;
65881+
65882+ if (grsec_enable_symlinkown && in_group_p(grsec_symlinkown_gid) &&
65883+ /* ignore root-owned links, e.g. /proc/self */
65884+ gr_is_global_nonroot(link_inode->i_uid) && target &&
65885+ !uid_eq(link_inode->i_uid, target->i_uid)) {
65886+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINKOWNER_MSG, link->dentry, link->mnt, link_inode->i_uid, target->i_uid);
65887+ return 1;
65888+ }
65889+#endif
65890+ return 0;
65891+}
65892+
65893+int
65894+gr_handle_follow_link(const struct inode *parent,
65895+ const struct inode *inode,
65896+ const struct dentry *dentry, const struct vfsmount *mnt)
65897+{
65898+#ifdef CONFIG_GRKERNSEC_LINK
65899+ const struct cred *cred = current_cred();
65900+
65901+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
65902+ (parent->i_mode & S_ISVTX) && !uid_eq(parent->i_uid, inode->i_uid) &&
65903+ (parent->i_mode & S_IWOTH) && !uid_eq(cred->fsuid, inode->i_uid)) {
65904+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
65905+ return -EACCES;
65906+ }
65907+#endif
65908+ return 0;
65909+}
65910+
65911+int
65912+gr_handle_hardlink(const struct dentry *dentry,
65913+ const struct vfsmount *mnt,
65914+ struct inode *inode, const int mode, const struct filename *to)
65915+{
65916+#ifdef CONFIG_GRKERNSEC_LINK
65917+ const struct cred *cred = current_cred();
65918+
65919+ if (grsec_enable_link && !uid_eq(cred->fsuid, inode->i_uid) &&
65920+ (!S_ISREG(mode) || is_privileged_binary(dentry) ||
65921+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
65922+ !capable(CAP_FOWNER) && gr_is_global_nonroot(cred->uid)) {
65923+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to->name);
65924+ return -EPERM;
65925+ }
65926+#endif
65927+ return 0;
65928+}
65929diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
65930new file mode 100644
65931index 0000000..7c06085
65932--- /dev/null
65933+++ b/grsecurity/grsec_log.c
65934@@ -0,0 +1,326 @@
65935+#include <linux/kernel.h>
65936+#include <linux/sched.h>
65937+#include <linux/file.h>
65938+#include <linux/tty.h>
65939+#include <linux/fs.h>
65940+#include <linux/grinternal.h>
65941+
65942+#ifdef CONFIG_TREE_PREEMPT_RCU
65943+#define DISABLE_PREEMPT() preempt_disable()
65944+#define ENABLE_PREEMPT() preempt_enable()
65945+#else
65946+#define DISABLE_PREEMPT()
65947+#define ENABLE_PREEMPT()
65948+#endif
65949+
65950+#define BEGIN_LOCKS(x) \
65951+ DISABLE_PREEMPT(); \
65952+ rcu_read_lock(); \
65953+ read_lock(&tasklist_lock); \
65954+ read_lock(&grsec_exec_file_lock); \
65955+ if (x != GR_DO_AUDIT) \
65956+ spin_lock(&grsec_alert_lock); \
65957+ else \
65958+ spin_lock(&grsec_audit_lock)
65959+
65960+#define END_LOCKS(x) \
65961+ if (x != GR_DO_AUDIT) \
65962+ spin_unlock(&grsec_alert_lock); \
65963+ else \
65964+ spin_unlock(&grsec_audit_lock); \
65965+ read_unlock(&grsec_exec_file_lock); \
65966+ read_unlock(&tasklist_lock); \
65967+ rcu_read_unlock(); \
65968+ ENABLE_PREEMPT(); \
65969+ if (x == GR_DONT_AUDIT) \
65970+ gr_handle_alertkill(current)
65971+
65972+enum {
65973+ FLOODING,
65974+ NO_FLOODING
65975+};
65976+
65977+extern char *gr_alert_log_fmt;
65978+extern char *gr_audit_log_fmt;
65979+extern char *gr_alert_log_buf;
65980+extern char *gr_audit_log_buf;
65981+
65982+static int gr_log_start(int audit)
65983+{
65984+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
65985+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
65986+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
65987+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
65988+ unsigned long curr_secs = get_seconds();
65989+
65990+ if (audit == GR_DO_AUDIT)
65991+ goto set_fmt;
65992+
65993+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
65994+ grsec_alert_wtime = curr_secs;
65995+ grsec_alert_fyet = 0;
65996+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
65997+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
65998+ grsec_alert_fyet++;
65999+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
66000+ grsec_alert_wtime = curr_secs;
66001+ grsec_alert_fyet++;
66002+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
66003+ return FLOODING;
66004+ }
66005+ else return FLOODING;
66006+
66007+set_fmt:
66008+#endif
66009+ memset(buf, 0, PAGE_SIZE);
66010+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
66011+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
66012+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
66013+ } else if (current->signal->curr_ip) {
66014+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
66015+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
66016+ } else if (gr_acl_is_enabled()) {
66017+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
66018+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
66019+ } else {
66020+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
66021+ strcpy(buf, fmt);
66022+ }
66023+
66024+ return NO_FLOODING;
66025+}
66026+
66027+static void gr_log_middle(int audit, const char *msg, va_list ap)
66028+ __attribute__ ((format (printf, 2, 0)));
66029+
66030+static void gr_log_middle(int audit, const char *msg, va_list ap)
66031+{
66032+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
66033+ unsigned int len = strlen(buf);
66034+
66035+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
66036+
66037+ return;
66038+}
66039+
66040+static void gr_log_middle_varargs(int audit, const char *msg, ...)
66041+ __attribute__ ((format (printf, 2, 3)));
66042+
66043+static void gr_log_middle_varargs(int audit, const char *msg, ...)
66044+{
66045+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
66046+ unsigned int len = strlen(buf);
66047+ va_list ap;
66048+
66049+ va_start(ap, msg);
66050+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
66051+ va_end(ap);
66052+
66053+ return;
66054+}
66055+
66056+static void gr_log_end(int audit, int append_default)
66057+{
66058+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
66059+ if (append_default) {
66060+ struct task_struct *task = current;
66061+ struct task_struct *parent = task->real_parent;
66062+ const struct cred *cred = __task_cred(task);
66063+ const struct cred *pcred = __task_cred(parent);
66064+ unsigned int len = strlen(buf);
66065+
66066+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
66067+ }
66068+
66069+ printk("%s\n", buf);
66070+
66071+ return;
66072+}
66073+
66074+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
66075+{
66076+ int logtype;
66077+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
66078+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
66079+ void *voidptr = NULL;
66080+ int num1 = 0, num2 = 0;
66081+ unsigned long ulong1 = 0, ulong2 = 0;
66082+ struct dentry *dentry = NULL;
66083+ struct vfsmount *mnt = NULL;
66084+ struct file *file = NULL;
66085+ struct task_struct *task = NULL;
66086+ const struct cred *cred, *pcred;
66087+ va_list ap;
66088+
66089+ BEGIN_LOCKS(audit);
66090+ logtype = gr_log_start(audit);
66091+ if (logtype == FLOODING) {
66092+ END_LOCKS(audit);
66093+ return;
66094+ }
66095+ va_start(ap, argtypes);
66096+ switch (argtypes) {
66097+ case GR_TTYSNIFF:
66098+ task = va_arg(ap, struct task_struct *);
66099+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task_pid_nr(task), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent));
66100+ break;
66101+ case GR_SYSCTL_HIDDEN:
66102+ str1 = va_arg(ap, char *);
66103+ gr_log_middle_varargs(audit, msg, result, str1);
66104+ break;
66105+ case GR_RBAC:
66106+ dentry = va_arg(ap, struct dentry *);
66107+ mnt = va_arg(ap, struct vfsmount *);
66108+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
66109+ break;
66110+ case GR_RBAC_STR:
66111+ dentry = va_arg(ap, struct dentry *);
66112+ mnt = va_arg(ap, struct vfsmount *);
66113+ str1 = va_arg(ap, char *);
66114+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
66115+ break;
66116+ case GR_STR_RBAC:
66117+ str1 = va_arg(ap, char *);
66118+ dentry = va_arg(ap, struct dentry *);
66119+ mnt = va_arg(ap, struct vfsmount *);
66120+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
66121+ break;
66122+ case GR_RBAC_MODE2:
66123+ dentry = va_arg(ap, struct dentry *);
66124+ mnt = va_arg(ap, struct vfsmount *);
66125+ str1 = va_arg(ap, char *);
66126+ str2 = va_arg(ap, char *);
66127+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
66128+ break;
66129+ case GR_RBAC_MODE3:
66130+ dentry = va_arg(ap, struct dentry *);
66131+ mnt = va_arg(ap, struct vfsmount *);
66132+ str1 = va_arg(ap, char *);
66133+ str2 = va_arg(ap, char *);
66134+ str3 = va_arg(ap, char *);
66135+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
66136+ break;
66137+ case GR_FILENAME:
66138+ dentry = va_arg(ap, struct dentry *);
66139+ mnt = va_arg(ap, struct vfsmount *);
66140+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
66141+ break;
66142+ case GR_STR_FILENAME:
66143+ str1 = va_arg(ap, char *);
66144+ dentry = va_arg(ap, struct dentry *);
66145+ mnt = va_arg(ap, struct vfsmount *);
66146+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
66147+ break;
66148+ case GR_FILENAME_STR:
66149+ dentry = va_arg(ap, struct dentry *);
66150+ mnt = va_arg(ap, struct vfsmount *);
66151+ str1 = va_arg(ap, char *);
66152+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
66153+ break;
66154+ case GR_FILENAME_TWO_INT:
66155+ dentry = va_arg(ap, struct dentry *);
66156+ mnt = va_arg(ap, struct vfsmount *);
66157+ num1 = va_arg(ap, int);
66158+ num2 = va_arg(ap, int);
66159+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
66160+ break;
66161+ case GR_FILENAME_TWO_INT_STR:
66162+ dentry = va_arg(ap, struct dentry *);
66163+ mnt = va_arg(ap, struct vfsmount *);
66164+ num1 = va_arg(ap, int);
66165+ num2 = va_arg(ap, int);
66166+ str1 = va_arg(ap, char *);
66167+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
66168+ break;
66169+ case GR_TEXTREL:
66170+ file = va_arg(ap, struct file *);
66171+ ulong1 = va_arg(ap, unsigned long);
66172+ ulong2 = va_arg(ap, unsigned long);
66173+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
66174+ break;
66175+ case GR_PTRACE:
66176+ task = va_arg(ap, struct task_struct *);
66177+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task_pid_nr(task));
66178+ break;
66179+ case GR_RESOURCE:
66180+ task = va_arg(ap, struct task_struct *);
66181+ cred = __task_cred(task);
66182+ pcred = __task_cred(task->real_parent);
66183+ ulong1 = va_arg(ap, unsigned long);
66184+ str1 = va_arg(ap, char *);
66185+ ulong2 = va_arg(ap, unsigned long);
66186+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
66187+ break;
66188+ case GR_CAP:
66189+ task = va_arg(ap, struct task_struct *);
66190+ cred = __task_cred(task);
66191+ pcred = __task_cred(task->real_parent);
66192+ str1 = va_arg(ap, char *);
66193+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
66194+ break;
66195+ case GR_SIG:
66196+ str1 = va_arg(ap, char *);
66197+ voidptr = va_arg(ap, void *);
66198+ gr_log_middle_varargs(audit, msg, str1, voidptr);
66199+ break;
66200+ case GR_SIG2:
66201+ task = va_arg(ap, struct task_struct *);
66202+ cred = __task_cred(task);
66203+ pcred = __task_cred(task->real_parent);
66204+ num1 = va_arg(ap, int);
66205+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
66206+ break;
66207+ case GR_CRASH1:
66208+ task = va_arg(ap, struct task_struct *);
66209+ cred = __task_cred(task);
66210+ pcred = __task_cred(task->real_parent);
66211+ ulong1 = va_arg(ap, unsigned long);
66212+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), GR_GLOBAL_UID(cred->uid), ulong1);
66213+ break;
66214+ case GR_CRASH2:
66215+ task = va_arg(ap, struct task_struct *);
66216+ cred = __task_cred(task);
66217+ pcred = __task_cred(task->real_parent);
66218+ ulong1 = va_arg(ap, unsigned long);
66219+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), ulong1);
66220+ break;
66221+ case GR_RWXMAP:
66222+ file = va_arg(ap, struct file *);
66223+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
66224+ break;
66225+ case GR_PSACCT:
66226+ {
66227+ unsigned int wday, cday;
66228+ __u8 whr, chr;
66229+ __u8 wmin, cmin;
66230+ __u8 wsec, csec;
66231+ char cur_tty[64] = { 0 };
66232+ char parent_tty[64] = { 0 };
66233+
66234+ task = va_arg(ap, struct task_struct *);
66235+ wday = va_arg(ap, unsigned int);
66236+ cday = va_arg(ap, unsigned int);
66237+ whr = va_arg(ap, int);
66238+ chr = va_arg(ap, int);
66239+ wmin = va_arg(ap, int);
66240+ cmin = va_arg(ap, int);
66241+ wsec = va_arg(ap, int);
66242+ csec = va_arg(ap, int);
66243+ ulong1 = va_arg(ap, unsigned long);
66244+ cred = __task_cred(task);
66245+ pcred = __task_cred(task->real_parent);
66246+
66247+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
66248+ }
66249+ break;
66250+ default:
66251+ gr_log_middle(audit, msg, ap);
66252+ }
66253+ va_end(ap);
66254+ // these don't need DEFAULTSECARGS printed on the end
66255+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
66256+ gr_log_end(audit, 0);
66257+ else
66258+ gr_log_end(audit, 1);
66259+ END_LOCKS(audit);
66260+}
66261diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
66262new file mode 100644
66263index 0000000..f536303
66264--- /dev/null
66265+++ b/grsecurity/grsec_mem.c
66266@@ -0,0 +1,40 @@
66267+#include <linux/kernel.h>
66268+#include <linux/sched.h>
66269+#include <linux/mm.h>
66270+#include <linux/mman.h>
66271+#include <linux/grinternal.h>
66272+
66273+void
66274+gr_handle_ioperm(void)
66275+{
66276+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
66277+ return;
66278+}
66279+
66280+void
66281+gr_handle_iopl(void)
66282+{
66283+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
66284+ return;
66285+}
66286+
66287+void
66288+gr_handle_mem_readwrite(u64 from, u64 to)
66289+{
66290+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
66291+ return;
66292+}
66293+
66294+void
66295+gr_handle_vm86(void)
66296+{
66297+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
66298+ return;
66299+}
66300+
66301+void
66302+gr_log_badprocpid(const char *entry)
66303+{
66304+ gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
66305+ return;
66306+}
66307diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
66308new file mode 100644
66309index 0000000..2131422
66310--- /dev/null
66311+++ b/grsecurity/grsec_mount.c
66312@@ -0,0 +1,62 @@
66313+#include <linux/kernel.h>
66314+#include <linux/sched.h>
66315+#include <linux/mount.h>
66316+#include <linux/grsecurity.h>
66317+#include <linux/grinternal.h>
66318+
66319+void
66320+gr_log_remount(const char *devname, const int retval)
66321+{
66322+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
66323+ if (grsec_enable_mount && (retval >= 0))
66324+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
66325+#endif
66326+ return;
66327+}
66328+
66329+void
66330+gr_log_unmount(const char *devname, const int retval)
66331+{
66332+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
66333+ if (grsec_enable_mount && (retval >= 0))
66334+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
66335+#endif
66336+ return;
66337+}
66338+
66339+void
66340+gr_log_mount(const char *from, const char *to, const int retval)
66341+{
66342+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
66343+ if (grsec_enable_mount && (retval >= 0))
66344+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
66345+#endif
66346+ return;
66347+}
66348+
66349+int
66350+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
66351+{
66352+#ifdef CONFIG_GRKERNSEC_ROFS
66353+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
66354+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
66355+ return -EPERM;
66356+ } else
66357+ return 0;
66358+#endif
66359+ return 0;
66360+}
66361+
66362+int
66363+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
66364+{
66365+#ifdef CONFIG_GRKERNSEC_ROFS
66366+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
66367+ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
66368+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
66369+ return -EPERM;
66370+ } else
66371+ return 0;
66372+#endif
66373+ return 0;
66374+}
66375diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
66376new file mode 100644
66377index 0000000..a3b12a0
66378--- /dev/null
66379+++ b/grsecurity/grsec_pax.c
66380@@ -0,0 +1,36 @@
66381+#include <linux/kernel.h>
66382+#include <linux/sched.h>
66383+#include <linux/mm.h>
66384+#include <linux/file.h>
66385+#include <linux/grinternal.h>
66386+#include <linux/grsecurity.h>
66387+
66388+void
66389+gr_log_textrel(struct vm_area_struct * vma)
66390+{
66391+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
66392+ if (grsec_enable_audit_textrel)
66393+ gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
66394+#endif
66395+ return;
66396+}
66397+
66398+void
66399+gr_log_rwxmmap(struct file *file)
66400+{
66401+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
66402+ if (grsec_enable_log_rwxmaps)
66403+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
66404+#endif
66405+ return;
66406+}
66407+
66408+void
66409+gr_log_rwxmprotect(struct file *file)
66410+{
66411+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
66412+ if (grsec_enable_log_rwxmaps)
66413+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
66414+#endif
66415+ return;
66416+}
66417diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
66418new file mode 100644
66419index 0000000..f7f29aa
66420--- /dev/null
66421+++ b/grsecurity/grsec_ptrace.c
66422@@ -0,0 +1,30 @@
66423+#include <linux/kernel.h>
66424+#include <linux/sched.h>
66425+#include <linux/grinternal.h>
66426+#include <linux/security.h>
66427+
66428+void
66429+gr_audit_ptrace(struct task_struct *task)
66430+{
66431+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
66432+ if (grsec_enable_audit_ptrace)
66433+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
66434+#endif
66435+ return;
66436+}
66437+
66438+int
66439+gr_ptrace_readexec(struct file *file, int unsafe_flags)
66440+{
66441+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
66442+ const struct dentry *dentry = file->f_path.dentry;
66443+ const struct vfsmount *mnt = file->f_path.mnt;
66444+
66445+ if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
66446+ (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
66447+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
66448+ return -EACCES;
66449+ }
66450+#endif
66451+ return 0;
66452+}
66453diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
66454new file mode 100644
66455index 0000000..e09715a
66456--- /dev/null
66457+++ b/grsecurity/grsec_sig.c
66458@@ -0,0 +1,222 @@
66459+#include <linux/kernel.h>
66460+#include <linux/sched.h>
66461+#include <linux/delay.h>
66462+#include <linux/grsecurity.h>
66463+#include <linux/grinternal.h>
66464+#include <linux/hardirq.h>
66465+
66466+char *signames[] = {
66467+ [SIGSEGV] = "Segmentation fault",
66468+ [SIGILL] = "Illegal instruction",
66469+ [SIGABRT] = "Abort",
66470+ [SIGBUS] = "Invalid alignment/Bus error"
66471+};
66472+
66473+void
66474+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
66475+{
66476+#ifdef CONFIG_GRKERNSEC_SIGNAL
66477+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
66478+ (sig == SIGABRT) || (sig == SIGBUS))) {
66479+ if (task_pid_nr(t) == task_pid_nr(current)) {
66480+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
66481+ } else {
66482+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
66483+ }
66484+ }
66485+#endif
66486+ return;
66487+}
66488+
66489+int
66490+gr_handle_signal(const struct task_struct *p, const int sig)
66491+{
66492+#ifdef CONFIG_GRKERNSEC
66493+ /* ignore the 0 signal for protected task checks */
66494+ if (task_pid_nr(current) > 1 && sig && gr_check_protected_task(p)) {
66495+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
66496+ return -EPERM;
66497+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
66498+ return -EPERM;
66499+ }
66500+#endif
66501+ return 0;
66502+}
66503+
66504+#ifdef CONFIG_GRKERNSEC
66505+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
66506+
66507+int gr_fake_force_sig(int sig, struct task_struct *t)
66508+{
66509+ unsigned long int flags;
66510+ int ret, blocked, ignored;
66511+ struct k_sigaction *action;
66512+
66513+ spin_lock_irqsave(&t->sighand->siglock, flags);
66514+ action = &t->sighand->action[sig-1];
66515+ ignored = action->sa.sa_handler == SIG_IGN;
66516+ blocked = sigismember(&t->blocked, sig);
66517+ if (blocked || ignored) {
66518+ action->sa.sa_handler = SIG_DFL;
66519+ if (blocked) {
66520+ sigdelset(&t->blocked, sig);
66521+ recalc_sigpending_and_wake(t);
66522+ }
66523+ }
66524+ if (action->sa.sa_handler == SIG_DFL)
66525+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
66526+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
66527+
66528+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
66529+
66530+ return ret;
66531+}
66532+#endif
66533+
66534+#ifdef CONFIG_GRKERNSEC_BRUTE
66535+#define GR_USER_BAN_TIME (15 * 60)
66536+#define GR_DAEMON_BRUTE_TIME (30 * 60)
66537+
66538+static int __get_dumpable(unsigned long mm_flags)
66539+{
66540+ int ret;
66541+
66542+ ret = mm_flags & MMF_DUMPABLE_MASK;
66543+ return (ret >= 2) ? 2 : ret;
66544+}
66545+#endif
66546+
66547+void gr_handle_brute_attach(unsigned long mm_flags)
66548+{
66549+#ifdef CONFIG_GRKERNSEC_BRUTE
66550+ struct task_struct *p = current;
66551+ kuid_t uid = GLOBAL_ROOT_UID;
66552+ int daemon = 0;
66553+
66554+ if (!grsec_enable_brute)
66555+ return;
66556+
66557+ rcu_read_lock();
66558+ read_lock(&tasklist_lock);
66559+ read_lock(&grsec_exec_file_lock);
66560+ if (p->real_parent && p->real_parent->exec_file == p->exec_file) {
66561+ p->real_parent->brute_expires = get_seconds() + GR_DAEMON_BRUTE_TIME;
66562+ p->real_parent->brute = 1;
66563+ daemon = 1;
66564+ } else {
66565+ const struct cred *cred = __task_cred(p), *cred2;
66566+ struct task_struct *tsk, *tsk2;
66567+
66568+ if (!__get_dumpable(mm_flags) && gr_is_global_nonroot(cred->uid)) {
66569+ struct user_struct *user;
66570+
66571+ uid = cred->uid;
66572+
66573+ /* this is put upon execution past expiration */
66574+ user = find_user(uid);
66575+ if (user == NULL)
66576+ goto unlock;
66577+ user->banned = 1;
66578+ user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
66579+ if (user->ban_expires == ~0UL)
66580+ user->ban_expires--;
66581+
66582+ do_each_thread(tsk2, tsk) {
66583+ cred2 = __task_cred(tsk);
66584+ if (tsk != p && uid_eq(cred2->uid, uid))
66585+ gr_fake_force_sig(SIGKILL, tsk);
66586+ } while_each_thread(tsk2, tsk);
66587+ }
66588+ }
66589+unlock:
66590+ read_unlock(&grsec_exec_file_lock);
66591+ read_unlock(&tasklist_lock);
66592+ rcu_read_unlock();
66593+
66594+ if (gr_is_global_nonroot(uid))
66595+ printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n",
66596+ GR_GLOBAL_UID(uid), GR_USER_BAN_TIME / 60);
66597+ else if (daemon)
66598+ gr_log_noargs(GR_DONT_AUDIT, GR_BRUTE_DAEMON_MSG);
66599+
66600+#endif
66601+ return;
66602+}
66603+
66604+void gr_handle_brute_check(void)
66605+{
66606+#ifdef CONFIG_GRKERNSEC_BRUTE
66607+ struct task_struct *p = current;
66608+
66609+ if (unlikely(p->brute)) {
66610+ if (!grsec_enable_brute)
66611+ p->brute = 0;
66612+ else if (time_before(get_seconds(), p->brute_expires))
66613+ msleep(30 * 1000);
66614+ }
66615+#endif
66616+ return;
66617+}
66618+
66619+void gr_handle_kernel_exploit(void)
66620+{
66621+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
66622+ const struct cred *cred;
66623+ struct task_struct *tsk, *tsk2;
66624+ struct user_struct *user;
66625+ kuid_t uid;
66626+
66627+ if (in_irq() || in_serving_softirq() || in_nmi())
66628+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
66629+
66630+ uid = current_uid();
66631+
66632+ if (gr_is_global_root(uid))
66633+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
66634+ else {
66635+ /* kill all the processes of this user, hold a reference
66636+ to their creds struct, and prevent them from creating
66637+ another process until system reset
66638+ */
66639+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n",
66640+ GR_GLOBAL_UID(uid));
66641+ /* we intentionally leak this ref */
66642+ user = get_uid(current->cred->user);
66643+ if (user) {
66644+ user->banned = 1;
66645+ user->ban_expires = ~0UL;
66646+ }
66647+
66648+ read_lock(&tasklist_lock);
66649+ do_each_thread(tsk2, tsk) {
66650+ cred = __task_cred(tsk);
66651+ if (uid_eq(cred->uid, uid))
66652+ gr_fake_force_sig(SIGKILL, tsk);
66653+ } while_each_thread(tsk2, tsk);
66654+ read_unlock(&tasklist_lock);
66655+ }
66656+#endif
66657+}
66658+
66659+int __gr_process_user_ban(struct user_struct *user)
66660+{
66661+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
66662+ if (unlikely(user->banned)) {
66663+ if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
66664+ user->banned = 0;
66665+ user->ban_expires = 0;
66666+ free_uid(user);
66667+ } else
66668+ return -EPERM;
66669+ }
66670+#endif
66671+ return 0;
66672+}
66673+
66674+int gr_process_user_ban(void)
66675+{
66676+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
66677+ return __gr_process_user_ban(current->cred->user);
66678+#endif
66679+ return 0;
66680+}
66681diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
66682new file mode 100644
66683index 0000000..4030d57
66684--- /dev/null
66685+++ b/grsecurity/grsec_sock.c
66686@@ -0,0 +1,244 @@
66687+#include <linux/kernel.h>
66688+#include <linux/module.h>
66689+#include <linux/sched.h>
66690+#include <linux/file.h>
66691+#include <linux/net.h>
66692+#include <linux/in.h>
66693+#include <linux/ip.h>
66694+#include <net/sock.h>
66695+#include <net/inet_sock.h>
66696+#include <linux/grsecurity.h>
66697+#include <linux/grinternal.h>
66698+#include <linux/gracl.h>
66699+
66700+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
66701+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
66702+
66703+EXPORT_SYMBOL(gr_search_udp_recvmsg);
66704+EXPORT_SYMBOL(gr_search_udp_sendmsg);
66705+
66706+#ifdef CONFIG_UNIX_MODULE
66707+EXPORT_SYMBOL(gr_acl_handle_unix);
66708+EXPORT_SYMBOL(gr_acl_handle_mknod);
66709+EXPORT_SYMBOL(gr_handle_chroot_unix);
66710+EXPORT_SYMBOL(gr_handle_create);
66711+#endif
66712+
66713+#ifdef CONFIG_GRKERNSEC
66714+#define gr_conn_table_size 32749
66715+struct conn_table_entry {
66716+ struct conn_table_entry *next;
66717+ struct signal_struct *sig;
66718+};
66719+
66720+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
66721+DEFINE_SPINLOCK(gr_conn_table_lock);
66722+
66723+extern const char * gr_socktype_to_name(unsigned char type);
66724+extern const char * gr_proto_to_name(unsigned char proto);
66725+extern const char * gr_sockfamily_to_name(unsigned char family);
66726+
66727+static __inline__ int
66728+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
66729+{
66730+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
66731+}
66732+
66733+static __inline__ int
66734+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
66735+ __u16 sport, __u16 dport)
66736+{
66737+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
66738+ sig->gr_sport == sport && sig->gr_dport == dport))
66739+ return 1;
66740+ else
66741+ return 0;
66742+}
66743+
66744+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
66745+{
66746+ struct conn_table_entry **match;
66747+ unsigned int index;
66748+
66749+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
66750+ sig->gr_sport, sig->gr_dport,
66751+ gr_conn_table_size);
66752+
66753+ newent->sig = sig;
66754+
66755+ match = &gr_conn_table[index];
66756+ newent->next = *match;
66757+ *match = newent;
66758+
66759+ return;
66760+}
66761+
66762+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
66763+{
66764+ struct conn_table_entry *match, *last = NULL;
66765+ unsigned int index;
66766+
66767+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
66768+ sig->gr_sport, sig->gr_dport,
66769+ gr_conn_table_size);
66770+
66771+ match = gr_conn_table[index];
66772+ while (match && !conn_match(match->sig,
66773+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
66774+ sig->gr_dport)) {
66775+ last = match;
66776+ match = match->next;
66777+ }
66778+
66779+ if (match) {
66780+ if (last)
66781+ last->next = match->next;
66782+ else
66783+ gr_conn_table[index] = NULL;
66784+ kfree(match);
66785+ }
66786+
66787+ return;
66788+}
66789+
66790+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
66791+ __u16 sport, __u16 dport)
66792+{
66793+ struct conn_table_entry *match;
66794+ unsigned int index;
66795+
66796+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
66797+
66798+ match = gr_conn_table[index];
66799+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
66800+ match = match->next;
66801+
66802+ if (match)
66803+ return match->sig;
66804+ else
66805+ return NULL;
66806+}
66807+
66808+#endif
66809+
66810+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
66811+{
66812+#ifdef CONFIG_GRKERNSEC
66813+ struct signal_struct *sig = task->signal;
66814+ struct conn_table_entry *newent;
66815+
66816+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
66817+ if (newent == NULL)
66818+ return;
66819+ /* no bh lock needed since we are called with bh disabled */
66820+ spin_lock(&gr_conn_table_lock);
66821+ gr_del_task_from_ip_table_nolock(sig);
66822+ sig->gr_saddr = inet->inet_rcv_saddr;
66823+ sig->gr_daddr = inet->inet_daddr;
66824+ sig->gr_sport = inet->inet_sport;
66825+ sig->gr_dport = inet->inet_dport;
66826+ gr_add_to_task_ip_table_nolock(sig, newent);
66827+ spin_unlock(&gr_conn_table_lock);
66828+#endif
66829+ return;
66830+}
66831+
66832+void gr_del_task_from_ip_table(struct task_struct *task)
66833+{
66834+#ifdef CONFIG_GRKERNSEC
66835+ spin_lock_bh(&gr_conn_table_lock);
66836+ gr_del_task_from_ip_table_nolock(task->signal);
66837+ spin_unlock_bh(&gr_conn_table_lock);
66838+#endif
66839+ return;
66840+}
66841+
66842+void
66843+gr_attach_curr_ip(const struct sock *sk)
66844+{
66845+#ifdef CONFIG_GRKERNSEC
66846+ struct signal_struct *p, *set;
66847+ const struct inet_sock *inet = inet_sk(sk);
66848+
66849+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
66850+ return;
66851+
66852+ set = current->signal;
66853+
66854+ spin_lock_bh(&gr_conn_table_lock);
66855+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
66856+ inet->inet_dport, inet->inet_sport);
66857+ if (unlikely(p != NULL)) {
66858+ set->curr_ip = p->curr_ip;
66859+ set->used_accept = 1;
66860+ gr_del_task_from_ip_table_nolock(p);
66861+ spin_unlock_bh(&gr_conn_table_lock);
66862+ return;
66863+ }
66864+ spin_unlock_bh(&gr_conn_table_lock);
66865+
66866+ set->curr_ip = inet->inet_daddr;
66867+ set->used_accept = 1;
66868+#endif
66869+ return;
66870+}
66871+
66872+int
66873+gr_handle_sock_all(const int family, const int type, const int protocol)
66874+{
66875+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
66876+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
66877+ (family != AF_UNIX)) {
66878+ if (family == AF_INET)
66879+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
66880+ else
66881+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
66882+ return -EACCES;
66883+ }
66884+#endif
66885+ return 0;
66886+}
66887+
66888+int
66889+gr_handle_sock_server(const struct sockaddr *sck)
66890+{
66891+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
66892+ if (grsec_enable_socket_server &&
66893+ in_group_p(grsec_socket_server_gid) &&
66894+ sck && (sck->sa_family != AF_UNIX) &&
66895+ (sck->sa_family != AF_LOCAL)) {
66896+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
66897+ return -EACCES;
66898+ }
66899+#endif
66900+ return 0;
66901+}
66902+
66903+int
66904+gr_handle_sock_server_other(const struct sock *sck)
66905+{
66906+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
66907+ if (grsec_enable_socket_server &&
66908+ in_group_p(grsec_socket_server_gid) &&
66909+ sck && (sck->sk_family != AF_UNIX) &&
66910+ (sck->sk_family != AF_LOCAL)) {
66911+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
66912+ return -EACCES;
66913+ }
66914+#endif
66915+ return 0;
66916+}
66917+
66918+int
66919+gr_handle_sock_client(const struct sockaddr *sck)
66920+{
66921+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
66922+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
66923+ sck && (sck->sa_family != AF_UNIX) &&
66924+ (sck->sa_family != AF_LOCAL)) {
66925+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
66926+ return -EACCES;
66927+ }
66928+#endif
66929+ return 0;
66930+}
66931diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
66932new file mode 100644
66933index 0000000..f55ef0f
66934--- /dev/null
66935+++ b/grsecurity/grsec_sysctl.c
66936@@ -0,0 +1,469 @@
66937+#include <linux/kernel.h>
66938+#include <linux/sched.h>
66939+#include <linux/sysctl.h>
66940+#include <linux/grsecurity.h>
66941+#include <linux/grinternal.h>
66942+
66943+int
66944+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
66945+{
66946+#ifdef CONFIG_GRKERNSEC_SYSCTL
66947+ if (dirname == NULL || name == NULL)
66948+ return 0;
66949+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
66950+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
66951+ return -EACCES;
66952+ }
66953+#endif
66954+ return 0;
66955+}
66956+
66957+#ifdef CONFIG_GRKERNSEC_ROFS
66958+static int __maybe_unused one = 1;
66959+#endif
66960+
66961+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
66962+struct ctl_table grsecurity_table[] = {
66963+#ifdef CONFIG_GRKERNSEC_SYSCTL
66964+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
66965+#ifdef CONFIG_GRKERNSEC_IO
66966+ {
66967+ .procname = "disable_priv_io",
66968+ .data = &grsec_disable_privio,
66969+ .maxlen = sizeof(int),
66970+ .mode = 0600,
66971+ .proc_handler = &proc_dointvec,
66972+ },
66973+#endif
66974+#endif
66975+#ifdef CONFIG_GRKERNSEC_LINK
66976+ {
66977+ .procname = "linking_restrictions",
66978+ .data = &grsec_enable_link,
66979+ .maxlen = sizeof(int),
66980+ .mode = 0600,
66981+ .proc_handler = &proc_dointvec,
66982+ },
66983+#endif
66984+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
66985+ {
66986+ .procname = "enforce_symlinksifowner",
66987+ .data = &grsec_enable_symlinkown,
66988+ .maxlen = sizeof(int),
66989+ .mode = 0600,
66990+ .proc_handler = &proc_dointvec,
66991+ },
66992+ {
66993+ .procname = "symlinkown_gid",
66994+ .data = &grsec_symlinkown_gid,
66995+ .maxlen = sizeof(int),
66996+ .mode = 0600,
66997+ .proc_handler = &proc_dointvec,
66998+ },
66999+#endif
67000+#ifdef CONFIG_GRKERNSEC_BRUTE
67001+ {
67002+ .procname = "deter_bruteforce",
67003+ .data = &grsec_enable_brute,
67004+ .maxlen = sizeof(int),
67005+ .mode = 0600,
67006+ .proc_handler = &proc_dointvec,
67007+ },
67008+#endif
67009+#ifdef CONFIG_GRKERNSEC_FIFO
67010+ {
67011+ .procname = "fifo_restrictions",
67012+ .data = &grsec_enable_fifo,
67013+ .maxlen = sizeof(int),
67014+ .mode = 0600,
67015+ .proc_handler = &proc_dointvec,
67016+ },
67017+#endif
67018+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
67019+ {
67020+ .procname = "ptrace_readexec",
67021+ .data = &grsec_enable_ptrace_readexec,
67022+ .maxlen = sizeof(int),
67023+ .mode = 0600,
67024+ .proc_handler = &proc_dointvec,
67025+ },
67026+#endif
67027+#ifdef CONFIG_GRKERNSEC_SETXID
67028+ {
67029+ .procname = "consistent_setxid",
67030+ .data = &grsec_enable_setxid,
67031+ .maxlen = sizeof(int),
67032+ .mode = 0600,
67033+ .proc_handler = &proc_dointvec,
67034+ },
67035+#endif
67036+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67037+ {
67038+ .procname = "ip_blackhole",
67039+ .data = &grsec_enable_blackhole,
67040+ .maxlen = sizeof(int),
67041+ .mode = 0600,
67042+ .proc_handler = &proc_dointvec,
67043+ },
67044+ {
67045+ .procname = "lastack_retries",
67046+ .data = &grsec_lastack_retries,
67047+ .maxlen = sizeof(int),
67048+ .mode = 0600,
67049+ .proc_handler = &proc_dointvec,
67050+ },
67051+#endif
67052+#ifdef CONFIG_GRKERNSEC_EXECLOG
67053+ {
67054+ .procname = "exec_logging",
67055+ .data = &grsec_enable_execlog,
67056+ .maxlen = sizeof(int),
67057+ .mode = 0600,
67058+ .proc_handler = &proc_dointvec,
67059+ },
67060+#endif
67061+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
67062+ {
67063+ .procname = "rwxmap_logging",
67064+ .data = &grsec_enable_log_rwxmaps,
67065+ .maxlen = sizeof(int),
67066+ .mode = 0600,
67067+ .proc_handler = &proc_dointvec,
67068+ },
67069+#endif
67070+#ifdef CONFIG_GRKERNSEC_SIGNAL
67071+ {
67072+ .procname = "signal_logging",
67073+ .data = &grsec_enable_signal,
67074+ .maxlen = sizeof(int),
67075+ .mode = 0600,
67076+ .proc_handler = &proc_dointvec,
67077+ },
67078+#endif
67079+#ifdef CONFIG_GRKERNSEC_FORKFAIL
67080+ {
67081+ .procname = "forkfail_logging",
67082+ .data = &grsec_enable_forkfail,
67083+ .maxlen = sizeof(int),
67084+ .mode = 0600,
67085+ .proc_handler = &proc_dointvec,
67086+ },
67087+#endif
67088+#ifdef CONFIG_GRKERNSEC_TIME
67089+ {
67090+ .procname = "timechange_logging",
67091+ .data = &grsec_enable_time,
67092+ .maxlen = sizeof(int),
67093+ .mode = 0600,
67094+ .proc_handler = &proc_dointvec,
67095+ },
67096+#endif
67097+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
67098+ {
67099+ .procname = "chroot_deny_shmat",
67100+ .data = &grsec_enable_chroot_shmat,
67101+ .maxlen = sizeof(int),
67102+ .mode = 0600,
67103+ .proc_handler = &proc_dointvec,
67104+ },
67105+#endif
67106+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
67107+ {
67108+ .procname = "chroot_deny_unix",
67109+ .data = &grsec_enable_chroot_unix,
67110+ .maxlen = sizeof(int),
67111+ .mode = 0600,
67112+ .proc_handler = &proc_dointvec,
67113+ },
67114+#endif
67115+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
67116+ {
67117+ .procname = "chroot_deny_mount",
67118+ .data = &grsec_enable_chroot_mount,
67119+ .maxlen = sizeof(int),
67120+ .mode = 0600,
67121+ .proc_handler = &proc_dointvec,
67122+ },
67123+#endif
67124+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
67125+ {
67126+ .procname = "chroot_deny_fchdir",
67127+ .data = &grsec_enable_chroot_fchdir,
67128+ .maxlen = sizeof(int),
67129+ .mode = 0600,
67130+ .proc_handler = &proc_dointvec,
67131+ },
67132+#endif
67133+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
67134+ {
67135+ .procname = "chroot_deny_chroot",
67136+ .data = &grsec_enable_chroot_double,
67137+ .maxlen = sizeof(int),
67138+ .mode = 0600,
67139+ .proc_handler = &proc_dointvec,
67140+ },
67141+#endif
67142+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
67143+ {
67144+ .procname = "chroot_deny_pivot",
67145+ .data = &grsec_enable_chroot_pivot,
67146+ .maxlen = sizeof(int),
67147+ .mode = 0600,
67148+ .proc_handler = &proc_dointvec,
67149+ },
67150+#endif
67151+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
67152+ {
67153+ .procname = "chroot_enforce_chdir",
67154+ .data = &grsec_enable_chroot_chdir,
67155+ .maxlen = sizeof(int),
67156+ .mode = 0600,
67157+ .proc_handler = &proc_dointvec,
67158+ },
67159+#endif
67160+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
67161+ {
67162+ .procname = "chroot_deny_chmod",
67163+ .data = &grsec_enable_chroot_chmod,
67164+ .maxlen = sizeof(int),
67165+ .mode = 0600,
67166+ .proc_handler = &proc_dointvec,
67167+ },
67168+#endif
67169+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
67170+ {
67171+ .procname = "chroot_deny_mknod",
67172+ .data = &grsec_enable_chroot_mknod,
67173+ .maxlen = sizeof(int),
67174+ .mode = 0600,
67175+ .proc_handler = &proc_dointvec,
67176+ },
67177+#endif
67178+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
67179+ {
67180+ .procname = "chroot_restrict_nice",
67181+ .data = &grsec_enable_chroot_nice,
67182+ .maxlen = sizeof(int),
67183+ .mode = 0600,
67184+ .proc_handler = &proc_dointvec,
67185+ },
67186+#endif
67187+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
67188+ {
67189+ .procname = "chroot_execlog",
67190+ .data = &grsec_enable_chroot_execlog,
67191+ .maxlen = sizeof(int),
67192+ .mode = 0600,
67193+ .proc_handler = &proc_dointvec,
67194+ },
67195+#endif
67196+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
67197+ {
67198+ .procname = "chroot_caps",
67199+ .data = &grsec_enable_chroot_caps,
67200+ .maxlen = sizeof(int),
67201+ .mode = 0600,
67202+ .proc_handler = &proc_dointvec,
67203+ },
67204+#endif
67205+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
67206+ {
67207+ .procname = "chroot_deny_sysctl",
67208+ .data = &grsec_enable_chroot_sysctl,
67209+ .maxlen = sizeof(int),
67210+ .mode = 0600,
67211+ .proc_handler = &proc_dointvec,
67212+ },
67213+#endif
67214+#ifdef CONFIG_GRKERNSEC_TPE
67215+ {
67216+ .procname = "tpe",
67217+ .data = &grsec_enable_tpe,
67218+ .maxlen = sizeof(int),
67219+ .mode = 0600,
67220+ .proc_handler = &proc_dointvec,
67221+ },
67222+ {
67223+ .procname = "tpe_gid",
67224+ .data = &grsec_tpe_gid,
67225+ .maxlen = sizeof(int),
67226+ .mode = 0600,
67227+ .proc_handler = &proc_dointvec,
67228+ },
67229+#endif
67230+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
67231+ {
67232+ .procname = "tpe_invert",
67233+ .data = &grsec_enable_tpe_invert,
67234+ .maxlen = sizeof(int),
67235+ .mode = 0600,
67236+ .proc_handler = &proc_dointvec,
67237+ },
67238+#endif
67239+#ifdef CONFIG_GRKERNSEC_TPE_ALL
67240+ {
67241+ .procname = "tpe_restrict_all",
67242+ .data = &grsec_enable_tpe_all,
67243+ .maxlen = sizeof(int),
67244+ .mode = 0600,
67245+ .proc_handler = &proc_dointvec,
67246+ },
67247+#endif
67248+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
67249+ {
67250+ .procname = "socket_all",
67251+ .data = &grsec_enable_socket_all,
67252+ .maxlen = sizeof(int),
67253+ .mode = 0600,
67254+ .proc_handler = &proc_dointvec,
67255+ },
67256+ {
67257+ .procname = "socket_all_gid",
67258+ .data = &grsec_socket_all_gid,
67259+ .maxlen = sizeof(int),
67260+ .mode = 0600,
67261+ .proc_handler = &proc_dointvec,
67262+ },
67263+#endif
67264+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
67265+ {
67266+ .procname = "socket_client",
67267+ .data = &grsec_enable_socket_client,
67268+ .maxlen = sizeof(int),
67269+ .mode = 0600,
67270+ .proc_handler = &proc_dointvec,
67271+ },
67272+ {
67273+ .procname = "socket_client_gid",
67274+ .data = &grsec_socket_client_gid,
67275+ .maxlen = sizeof(int),
67276+ .mode = 0600,
67277+ .proc_handler = &proc_dointvec,
67278+ },
67279+#endif
67280+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
67281+ {
67282+ .procname = "socket_server",
67283+ .data = &grsec_enable_socket_server,
67284+ .maxlen = sizeof(int),
67285+ .mode = 0600,
67286+ .proc_handler = &proc_dointvec,
67287+ },
67288+ {
67289+ .procname = "socket_server_gid",
67290+ .data = &grsec_socket_server_gid,
67291+ .maxlen = sizeof(int),
67292+ .mode = 0600,
67293+ .proc_handler = &proc_dointvec,
67294+ },
67295+#endif
67296+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
67297+ {
67298+ .procname = "audit_group",
67299+ .data = &grsec_enable_group,
67300+ .maxlen = sizeof(int),
67301+ .mode = 0600,
67302+ .proc_handler = &proc_dointvec,
67303+ },
67304+ {
67305+ .procname = "audit_gid",
67306+ .data = &grsec_audit_gid,
67307+ .maxlen = sizeof(int),
67308+ .mode = 0600,
67309+ .proc_handler = &proc_dointvec,
67310+ },
67311+#endif
67312+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
67313+ {
67314+ .procname = "audit_chdir",
67315+ .data = &grsec_enable_chdir,
67316+ .maxlen = sizeof(int),
67317+ .mode = 0600,
67318+ .proc_handler = &proc_dointvec,
67319+ },
67320+#endif
67321+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
67322+ {
67323+ .procname = "audit_mount",
67324+ .data = &grsec_enable_mount,
67325+ .maxlen = sizeof(int),
67326+ .mode = 0600,
67327+ .proc_handler = &proc_dointvec,
67328+ },
67329+#endif
67330+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
67331+ {
67332+ .procname = "audit_textrel",
67333+ .data = &grsec_enable_audit_textrel,
67334+ .maxlen = sizeof(int),
67335+ .mode = 0600,
67336+ .proc_handler = &proc_dointvec,
67337+ },
67338+#endif
67339+#ifdef CONFIG_GRKERNSEC_DMESG
67340+ {
67341+ .procname = "dmesg",
67342+ .data = &grsec_enable_dmesg,
67343+ .maxlen = sizeof(int),
67344+ .mode = 0600,
67345+ .proc_handler = &proc_dointvec,
67346+ },
67347+#endif
67348+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
67349+ {
67350+ .procname = "chroot_findtask",
67351+ .data = &grsec_enable_chroot_findtask,
67352+ .maxlen = sizeof(int),
67353+ .mode = 0600,
67354+ .proc_handler = &proc_dointvec,
67355+ },
67356+#endif
67357+#ifdef CONFIG_GRKERNSEC_RESLOG
67358+ {
67359+ .procname = "resource_logging",
67360+ .data = &grsec_resource_logging,
67361+ .maxlen = sizeof(int),
67362+ .mode = 0600,
67363+ .proc_handler = &proc_dointvec,
67364+ },
67365+#endif
67366+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
67367+ {
67368+ .procname = "audit_ptrace",
67369+ .data = &grsec_enable_audit_ptrace,
67370+ .maxlen = sizeof(int),
67371+ .mode = 0600,
67372+ .proc_handler = &proc_dointvec,
67373+ },
67374+#endif
67375+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
67376+ {
67377+ .procname = "harden_ptrace",
67378+ .data = &grsec_enable_harden_ptrace,
67379+ .maxlen = sizeof(int),
67380+ .mode = 0600,
67381+ .proc_handler = &proc_dointvec,
67382+ },
67383+#endif
67384+ {
67385+ .procname = "grsec_lock",
67386+ .data = &grsec_lock,
67387+ .maxlen = sizeof(int),
67388+ .mode = 0600,
67389+ .proc_handler = &proc_dointvec,
67390+ },
67391+#endif
67392+#ifdef CONFIG_GRKERNSEC_ROFS
67393+ {
67394+ .procname = "romount_protect",
67395+ .data = &grsec_enable_rofs,
67396+ .maxlen = sizeof(int),
67397+ .mode = 0600,
67398+ .proc_handler = &proc_dointvec_minmax,
67399+ .extra1 = &one,
67400+ .extra2 = &one,
67401+ },
67402+#endif
67403+ { }
67404+};
67405+#endif
67406diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
67407new file mode 100644
67408index 0000000..0dc13c3
67409--- /dev/null
67410+++ b/grsecurity/grsec_time.c
67411@@ -0,0 +1,16 @@
67412+#include <linux/kernel.h>
67413+#include <linux/sched.h>
67414+#include <linux/grinternal.h>
67415+#include <linux/module.h>
67416+
67417+void
67418+gr_log_timechange(void)
67419+{
67420+#ifdef CONFIG_GRKERNSEC_TIME
67421+ if (grsec_enable_time)
67422+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
67423+#endif
67424+ return;
67425+}
67426+
67427+EXPORT_SYMBOL(gr_log_timechange);
67428diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
67429new file mode 100644
67430index 0000000..ee57dcf
67431--- /dev/null
67432+++ b/grsecurity/grsec_tpe.c
67433@@ -0,0 +1,73 @@
67434+#include <linux/kernel.h>
67435+#include <linux/sched.h>
67436+#include <linux/file.h>
67437+#include <linux/fs.h>
67438+#include <linux/grinternal.h>
67439+
67440+extern int gr_acl_tpe_check(void);
67441+
67442+int
67443+gr_tpe_allow(const struct file *file)
67444+{
67445+#ifdef CONFIG_GRKERNSEC
67446+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
67447+ const struct cred *cred = current_cred();
67448+ char *msg = NULL;
67449+ char *msg2 = NULL;
67450+
67451+ // never restrict root
67452+ if (gr_is_global_root(cred->uid))
67453+ return 1;
67454+
67455+ if (grsec_enable_tpe) {
67456+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
67457+ if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
67458+ msg = "not being in trusted group";
67459+ else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
67460+ msg = "being in untrusted group";
67461+#else
67462+ if (in_group_p(grsec_tpe_gid))
67463+ msg = "being in untrusted group";
67464+#endif
67465+ }
67466+ if (!msg && gr_acl_tpe_check())
67467+ msg = "being in untrusted role";
67468+
67469+ // not in any affected group/role
67470+ if (!msg)
67471+ goto next_check;
67472+
67473+ if (gr_is_global_nonroot(inode->i_uid))
67474+ msg2 = "file in non-root-owned directory";
67475+ else if (inode->i_mode & S_IWOTH)
67476+ msg2 = "file in world-writable directory";
67477+ else if (inode->i_mode & S_IWGRP)
67478+ msg2 = "file in group-writable directory";
67479+
67480+ if (msg && msg2) {
67481+ char fullmsg[70] = {0};
67482+ snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
67483+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
67484+ return 0;
67485+ }
67486+ msg = NULL;
67487+next_check:
67488+#ifdef CONFIG_GRKERNSEC_TPE_ALL
67489+ if (!grsec_enable_tpe || !grsec_enable_tpe_all)
67490+ return 1;
67491+
67492+ if (gr_is_global_nonroot(inode->i_uid) && !uid_eq(inode->i_uid, cred->uid))
67493+ msg = "directory not owned by user";
67494+ else if (inode->i_mode & S_IWOTH)
67495+ msg = "file in world-writable directory";
67496+ else if (inode->i_mode & S_IWGRP)
67497+ msg = "file in group-writable directory";
67498+
67499+ if (msg) {
67500+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
67501+ return 0;
67502+ }
67503+#endif
67504+#endif
67505+ return 1;
67506+}
67507diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
67508new file mode 100644
67509index 0000000..9f7b1ac
67510--- /dev/null
67511+++ b/grsecurity/grsum.c
67512@@ -0,0 +1,61 @@
67513+#include <linux/err.h>
67514+#include <linux/kernel.h>
67515+#include <linux/sched.h>
67516+#include <linux/mm.h>
67517+#include <linux/scatterlist.h>
67518+#include <linux/crypto.h>
67519+#include <linux/gracl.h>
67520+
67521+
67522+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
67523+#error "crypto and sha256 must be built into the kernel"
67524+#endif
67525+
67526+int
67527+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
67528+{
67529+ char *p;
67530+ struct crypto_hash *tfm;
67531+ struct hash_desc desc;
67532+ struct scatterlist sg;
67533+ unsigned char temp_sum[GR_SHA_LEN];
67534+ volatile int retval = 0;
67535+ volatile int dummy = 0;
67536+ unsigned int i;
67537+
67538+ sg_init_table(&sg, 1);
67539+
67540+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
67541+ if (IS_ERR(tfm)) {
67542+ /* should never happen, since sha256 should be built in */
67543+ return 1;
67544+ }
67545+
67546+ desc.tfm = tfm;
67547+ desc.flags = 0;
67548+
67549+ crypto_hash_init(&desc);
67550+
67551+ p = salt;
67552+ sg_set_buf(&sg, p, GR_SALT_LEN);
67553+ crypto_hash_update(&desc, &sg, sg.length);
67554+
67555+ p = entry->pw;
67556+ sg_set_buf(&sg, p, strlen(p));
67557+
67558+ crypto_hash_update(&desc, &sg, sg.length);
67559+
67560+ crypto_hash_final(&desc, temp_sum);
67561+
67562+ memset(entry->pw, 0, GR_PW_LEN);
67563+
67564+ for (i = 0; i < GR_SHA_LEN; i++)
67565+ if (sum[i] != temp_sum[i])
67566+ retval = 1;
67567+ else
67568+ dummy = 1; // waste a cycle
67569+
67570+ crypto_free_hash(tfm);
67571+
67572+ return retval;
67573+}
67574diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
67575index 77ff547..181834f 100644
67576--- a/include/asm-generic/4level-fixup.h
67577+++ b/include/asm-generic/4level-fixup.h
67578@@ -13,8 +13,10 @@
67579 #define pmd_alloc(mm, pud, address) \
67580 ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \
67581 NULL: pmd_offset(pud, address))
67582+#define pmd_alloc_kernel(mm, pud, address) pmd_alloc((mm), (pud), (address))
67583
67584 #define pud_alloc(mm, pgd, address) (pgd)
67585+#define pud_alloc_kernel(mm, pgd, address) pud_alloc((mm), (pgd), (address))
67586 #define pud_offset(pgd, start) (pgd)
67587 #define pud_none(pud) 0
67588 #define pud_bad(pud) 0
67589diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
67590index b7babf0..04ad282 100644
67591--- a/include/asm-generic/atomic-long.h
67592+++ b/include/asm-generic/atomic-long.h
67593@@ -22,6 +22,12 @@
67594
67595 typedef atomic64_t atomic_long_t;
67596
67597+#ifdef CONFIG_PAX_REFCOUNT
67598+typedef atomic64_unchecked_t atomic_long_unchecked_t;
67599+#else
67600+typedef atomic64_t atomic_long_unchecked_t;
67601+#endif
67602+
67603 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
67604
67605 static inline long atomic_long_read(atomic_long_t *l)
67606@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
67607 return (long)atomic64_read(v);
67608 }
67609
67610+#ifdef CONFIG_PAX_REFCOUNT
67611+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
67612+{
67613+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
67614+
67615+ return (long)atomic64_read_unchecked(v);
67616+}
67617+#endif
67618+
67619 static inline void atomic_long_set(atomic_long_t *l, long i)
67620 {
67621 atomic64_t *v = (atomic64_t *)l;
67622@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
67623 atomic64_set(v, i);
67624 }
67625
67626+#ifdef CONFIG_PAX_REFCOUNT
67627+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
67628+{
67629+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
67630+
67631+ atomic64_set_unchecked(v, i);
67632+}
67633+#endif
67634+
67635 static inline void atomic_long_inc(atomic_long_t *l)
67636 {
67637 atomic64_t *v = (atomic64_t *)l;
67638@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
67639 atomic64_inc(v);
67640 }
67641
67642+#ifdef CONFIG_PAX_REFCOUNT
67643+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
67644+{
67645+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
67646+
67647+ atomic64_inc_unchecked(v);
67648+}
67649+#endif
67650+
67651 static inline void atomic_long_dec(atomic_long_t *l)
67652 {
67653 atomic64_t *v = (atomic64_t *)l;
67654@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
67655 atomic64_dec(v);
67656 }
67657
67658+#ifdef CONFIG_PAX_REFCOUNT
67659+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
67660+{
67661+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
67662+
67663+ atomic64_dec_unchecked(v);
67664+}
67665+#endif
67666+
67667 static inline void atomic_long_add(long i, atomic_long_t *l)
67668 {
67669 atomic64_t *v = (atomic64_t *)l;
67670@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
67671 atomic64_add(i, v);
67672 }
67673
67674+#ifdef CONFIG_PAX_REFCOUNT
67675+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
67676+{
67677+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
67678+
67679+ atomic64_add_unchecked(i, v);
67680+}
67681+#endif
67682+
67683 static inline void atomic_long_sub(long i, atomic_long_t *l)
67684 {
67685 atomic64_t *v = (atomic64_t *)l;
67686@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
67687 atomic64_sub(i, v);
67688 }
67689
67690+#ifdef CONFIG_PAX_REFCOUNT
67691+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
67692+{
67693+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
67694+
67695+ atomic64_sub_unchecked(i, v);
67696+}
67697+#endif
67698+
67699 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
67700 {
67701 atomic64_t *v = (atomic64_t *)l;
67702@@ -101,6 +161,15 @@ static inline long atomic_long_add_return(long i, atomic_long_t *l)
67703 return (long)atomic64_add_return(i, v);
67704 }
67705
67706+#ifdef CONFIG_PAX_REFCOUNT
67707+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
67708+{
67709+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
67710+
67711+ return (long)atomic64_add_return_unchecked(i, v);
67712+}
67713+#endif
67714+
67715 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
67716 {
67717 atomic64_t *v = (atomic64_t *)l;
67718@@ -115,6 +184,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
67719 return (long)atomic64_inc_return(v);
67720 }
67721
67722+#ifdef CONFIG_PAX_REFCOUNT
67723+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
67724+{
67725+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
67726+
67727+ return (long)atomic64_inc_return_unchecked(v);
67728+}
67729+#endif
67730+
67731 static inline long atomic_long_dec_return(atomic_long_t *l)
67732 {
67733 atomic64_t *v = (atomic64_t *)l;
67734@@ -140,6 +218,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
67735
67736 typedef atomic_t atomic_long_t;
67737
67738+#ifdef CONFIG_PAX_REFCOUNT
67739+typedef atomic_unchecked_t atomic_long_unchecked_t;
67740+#else
67741+typedef atomic_t atomic_long_unchecked_t;
67742+#endif
67743+
67744 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
67745 static inline long atomic_long_read(atomic_long_t *l)
67746 {
67747@@ -148,6 +232,15 @@ static inline long atomic_long_read(atomic_long_t *l)
67748 return (long)atomic_read(v);
67749 }
67750
67751+#ifdef CONFIG_PAX_REFCOUNT
67752+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
67753+{
67754+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
67755+
67756+ return (long)atomic_read_unchecked(v);
67757+}
67758+#endif
67759+
67760 static inline void atomic_long_set(atomic_long_t *l, long i)
67761 {
67762 atomic_t *v = (atomic_t *)l;
67763@@ -155,6 +248,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
67764 atomic_set(v, i);
67765 }
67766
67767+#ifdef CONFIG_PAX_REFCOUNT
67768+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
67769+{
67770+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
67771+
67772+ atomic_set_unchecked(v, i);
67773+}
67774+#endif
67775+
67776 static inline void atomic_long_inc(atomic_long_t *l)
67777 {
67778 atomic_t *v = (atomic_t *)l;
67779@@ -162,6 +264,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
67780 atomic_inc(v);
67781 }
67782
67783+#ifdef CONFIG_PAX_REFCOUNT
67784+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
67785+{
67786+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
67787+
67788+ atomic_inc_unchecked(v);
67789+}
67790+#endif
67791+
67792 static inline void atomic_long_dec(atomic_long_t *l)
67793 {
67794 atomic_t *v = (atomic_t *)l;
67795@@ -169,6 +280,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
67796 atomic_dec(v);
67797 }
67798
67799+#ifdef CONFIG_PAX_REFCOUNT
67800+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
67801+{
67802+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
67803+
67804+ atomic_dec_unchecked(v);
67805+}
67806+#endif
67807+
67808 static inline void atomic_long_add(long i, atomic_long_t *l)
67809 {
67810 atomic_t *v = (atomic_t *)l;
67811@@ -176,6 +296,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
67812 atomic_add(i, v);
67813 }
67814
67815+#ifdef CONFIG_PAX_REFCOUNT
67816+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
67817+{
67818+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
67819+
67820+ atomic_add_unchecked(i, v);
67821+}
67822+#endif
67823+
67824 static inline void atomic_long_sub(long i, atomic_long_t *l)
67825 {
67826 atomic_t *v = (atomic_t *)l;
67827@@ -183,6 +312,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
67828 atomic_sub(i, v);
67829 }
67830
67831+#ifdef CONFIG_PAX_REFCOUNT
67832+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
67833+{
67834+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
67835+
67836+ atomic_sub_unchecked(i, v);
67837+}
67838+#endif
67839+
67840 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
67841 {
67842 atomic_t *v = (atomic_t *)l;
67843@@ -218,6 +356,16 @@ static inline long atomic_long_add_return(long i, atomic_long_t *l)
67844 return (long)atomic_add_return(i, v);
67845 }
67846
67847+#ifdef CONFIG_PAX_REFCOUNT
67848+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
67849+{
67850+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
67851+
67852+ return (long)atomic_add_return_unchecked(i, v);
67853+}
67854+
67855+#endif
67856+
67857 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
67858 {
67859 atomic_t *v = (atomic_t *)l;
67860@@ -232,6 +380,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
67861 return (long)atomic_inc_return(v);
67862 }
67863
67864+#ifdef CONFIG_PAX_REFCOUNT
67865+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
67866+{
67867+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
67868+
67869+ return (long)atomic_inc_return_unchecked(v);
67870+}
67871+#endif
67872+
67873 static inline long atomic_long_dec_return(atomic_long_t *l)
67874 {
67875 atomic_t *v = (atomic_t *)l;
67876@@ -255,4 +412,57 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
67877
67878 #endif /* BITS_PER_LONG == 64 */
67879
67880+#ifdef CONFIG_PAX_REFCOUNT
67881+static inline void pax_refcount_needs_these_functions(void)
67882+{
67883+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
67884+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
67885+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
67886+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
67887+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
67888+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
67889+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
67890+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
67891+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
67892+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
67893+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
67894+#ifdef CONFIG_X86
67895+ atomic_clear_mask_unchecked(0, NULL);
67896+ atomic_set_mask_unchecked(0, NULL);
67897+#endif
67898+
67899+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
67900+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
67901+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
67902+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
67903+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
67904+ atomic_long_add_return_unchecked(0, (atomic_long_unchecked_t *)NULL);
67905+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
67906+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
67907+}
67908+#else
67909+#define atomic_read_unchecked(v) atomic_read(v)
67910+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
67911+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
67912+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
67913+#define atomic_inc_unchecked(v) atomic_inc(v)
67914+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
67915+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
67916+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
67917+#define atomic_dec_unchecked(v) atomic_dec(v)
67918+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
67919+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
67920+#define atomic_clear_mask_unchecked(mask, v) atomic_clear_mask((mask), (v))
67921+#define atomic_set_mask_unchecked(mask, v) atomic_set_mask((mask), (v))
67922+
67923+#define atomic_long_read_unchecked(v) atomic_long_read(v)
67924+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
67925+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
67926+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
67927+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
67928+#define atomic_long_add_return_unchecked(i, v) atomic_long_add_return((i), (v))
67929+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
67930+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
67931+#endif
67932+
67933 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
67934diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
67935index 1ced641..c896ee8 100644
67936--- a/include/asm-generic/atomic.h
67937+++ b/include/asm-generic/atomic.h
67938@@ -159,7 +159,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
67939 * Atomically clears the bits set in @mask from @v
67940 */
67941 #ifndef atomic_clear_mask
67942-static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
67943+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
67944 {
67945 unsigned long flags;
67946
67947diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
67948index b18ce4f..2ee2843 100644
67949--- a/include/asm-generic/atomic64.h
67950+++ b/include/asm-generic/atomic64.h
67951@@ -16,6 +16,8 @@ typedef struct {
67952 long long counter;
67953 } atomic64_t;
67954
67955+typedef atomic64_t atomic64_unchecked_t;
67956+
67957 #define ATOMIC64_INIT(i) { (i) }
67958
67959 extern long long atomic64_read(const atomic64_t *v);
67960@@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
67961 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
67962 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
67963
67964+#define atomic64_read_unchecked(v) atomic64_read(v)
67965+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
67966+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
67967+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
67968+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
67969+#define atomic64_inc_unchecked(v) atomic64_inc(v)
67970+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
67971+#define atomic64_dec_unchecked(v) atomic64_dec(v)
67972+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
67973+
67974 #endif /* _ASM_GENERIC_ATOMIC64_H */
67975diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
67976index 1bfcfe5..e04c5c9 100644
67977--- a/include/asm-generic/cache.h
67978+++ b/include/asm-generic/cache.h
67979@@ -6,7 +6,7 @@
67980 * cache lines need to provide their own cache.h.
67981 */
67982
67983-#define L1_CACHE_SHIFT 5
67984-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
67985+#define L1_CACHE_SHIFT 5UL
67986+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
67987
67988 #endif /* __ASM_GENERIC_CACHE_H */
67989diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
67990index 0d68a1e..b74a761 100644
67991--- a/include/asm-generic/emergency-restart.h
67992+++ b/include/asm-generic/emergency-restart.h
67993@@ -1,7 +1,7 @@
67994 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
67995 #define _ASM_GENERIC_EMERGENCY_RESTART_H
67996
67997-static inline void machine_emergency_restart(void)
67998+static inline __noreturn void machine_emergency_restart(void)
67999 {
68000 machine_restart(NULL);
68001 }
68002diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
68003index 90f99c7..00ce236 100644
68004--- a/include/asm-generic/kmap_types.h
68005+++ b/include/asm-generic/kmap_types.h
68006@@ -2,9 +2,9 @@
68007 #define _ASM_GENERIC_KMAP_TYPES_H
68008
68009 #ifdef __WITH_KM_FENCE
68010-# define KM_TYPE_NR 41
68011+# define KM_TYPE_NR 42
68012 #else
68013-# define KM_TYPE_NR 20
68014+# define KM_TYPE_NR 21
68015 #endif
68016
68017 #endif
68018diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
68019index 9ceb03b..62b0b8f 100644
68020--- a/include/asm-generic/local.h
68021+++ b/include/asm-generic/local.h
68022@@ -23,24 +23,37 @@ typedef struct
68023 atomic_long_t a;
68024 } local_t;
68025
68026+typedef struct {
68027+ atomic_long_unchecked_t a;
68028+} local_unchecked_t;
68029+
68030 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
68031
68032 #define local_read(l) atomic_long_read(&(l)->a)
68033+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
68034 #define local_set(l,i) atomic_long_set((&(l)->a),(i))
68035+#define local_set_unchecked(l,i) atomic_long_set_unchecked((&(l)->a),(i))
68036 #define local_inc(l) atomic_long_inc(&(l)->a)
68037+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
68038 #define local_dec(l) atomic_long_dec(&(l)->a)
68039+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
68040 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
68041+#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
68042 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
68043+#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
68044
68045 #define local_sub_and_test(i, l) atomic_long_sub_and_test((i), (&(l)->a))
68046 #define local_dec_and_test(l) atomic_long_dec_and_test(&(l)->a)
68047 #define local_inc_and_test(l) atomic_long_inc_and_test(&(l)->a)
68048 #define local_add_negative(i, l) atomic_long_add_negative((i), (&(l)->a))
68049 #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
68050+#define local_add_return_unchecked(i, l) atomic_long_add_return_unchecked((i), (&(l)->a))
68051 #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
68052 #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
68053+#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
68054
68055 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
68056+#define local_cmpxchg_unchecked(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
68057 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
68058 #define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u))
68059 #define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a)
68060diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
68061index 725612b..9cc513a 100644
68062--- a/include/asm-generic/pgtable-nopmd.h
68063+++ b/include/asm-generic/pgtable-nopmd.h
68064@@ -1,14 +1,19 @@
68065 #ifndef _PGTABLE_NOPMD_H
68066 #define _PGTABLE_NOPMD_H
68067
68068-#ifndef __ASSEMBLY__
68069-
68070 #include <asm-generic/pgtable-nopud.h>
68071
68072-struct mm_struct;
68073-
68074 #define __PAGETABLE_PMD_FOLDED
68075
68076+#define PMD_SHIFT PUD_SHIFT
68077+#define PTRS_PER_PMD 1
68078+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
68079+#define PMD_MASK (~(PMD_SIZE-1))
68080+
68081+#ifndef __ASSEMBLY__
68082+
68083+struct mm_struct;
68084+
68085 /*
68086 * Having the pmd type consist of a pud gets the size right, and allows
68087 * us to conceptually access the pud entry that this pmd is folded into
68088@@ -16,11 +21,6 @@ struct mm_struct;
68089 */
68090 typedef struct { pud_t pud; } pmd_t;
68091
68092-#define PMD_SHIFT PUD_SHIFT
68093-#define PTRS_PER_PMD 1
68094-#define PMD_SIZE (1UL << PMD_SHIFT)
68095-#define PMD_MASK (~(PMD_SIZE-1))
68096-
68097 /*
68098 * The "pud_xxx()" functions here are trivial for a folded two-level
68099 * setup: the pmd is never bad, and a pmd always exists (as it's folded
68100diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
68101index 810431d..0ec4804f 100644
68102--- a/include/asm-generic/pgtable-nopud.h
68103+++ b/include/asm-generic/pgtable-nopud.h
68104@@ -1,10 +1,15 @@
68105 #ifndef _PGTABLE_NOPUD_H
68106 #define _PGTABLE_NOPUD_H
68107
68108-#ifndef __ASSEMBLY__
68109-
68110 #define __PAGETABLE_PUD_FOLDED
68111
68112+#define PUD_SHIFT PGDIR_SHIFT
68113+#define PTRS_PER_PUD 1
68114+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
68115+#define PUD_MASK (~(PUD_SIZE-1))
68116+
68117+#ifndef __ASSEMBLY__
68118+
68119 /*
68120 * Having the pud type consist of a pgd gets the size right, and allows
68121 * us to conceptually access the pgd entry that this pud is folded into
68122@@ -12,11 +17,6 @@
68123 */
68124 typedef struct { pgd_t pgd; } pud_t;
68125
68126-#define PUD_SHIFT PGDIR_SHIFT
68127-#define PTRS_PER_PUD 1
68128-#define PUD_SIZE (1UL << PUD_SHIFT)
68129-#define PUD_MASK (~(PUD_SIZE-1))
68130-
68131 /*
68132 * The "pgd_xxx()" functions here are trivial for a folded two-level
68133 * setup: the pud is never bad, and a pud always exists (as it's folded
68134@@ -29,6 +29,7 @@ static inline void pgd_clear(pgd_t *pgd) { }
68135 #define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
68136
68137 #define pgd_populate(mm, pgd, pud) do { } while (0)
68138+#define pgd_populate_kernel(mm, pgd, pud) do { } while (0)
68139 /*
68140 * (puds are folded into pgds so this doesn't get actually called,
68141 * but the define is needed for a generic inline function.)
68142diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
68143index 5cf680a..4b74d62 100644
68144--- a/include/asm-generic/pgtable.h
68145+++ b/include/asm-generic/pgtable.h
68146@@ -688,6 +688,14 @@ static inline pmd_t pmd_mknuma(pmd_t pmd)
68147 }
68148 #endif /* CONFIG_NUMA_BALANCING */
68149
68150+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
68151+static inline unsigned long pax_open_kernel(void) { return 0; }
68152+#endif
68153+
68154+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
68155+static inline unsigned long pax_close_kernel(void) { return 0; }
68156+#endif
68157+
68158 #endif /* CONFIG_MMU */
68159
68160 #endif /* !__ASSEMBLY__ */
68161diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
68162index d1ea7ce..b1ebf2a 100644
68163--- a/include/asm-generic/vmlinux.lds.h
68164+++ b/include/asm-generic/vmlinux.lds.h
68165@@ -218,6 +218,7 @@
68166 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
68167 VMLINUX_SYMBOL(__start_rodata) = .; \
68168 *(.rodata) *(.rodata.*) \
68169+ *(.data..read_only) \
68170 *(__vermagic) /* Kernel version magic */ \
68171 . = ALIGN(8); \
68172 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
68173@@ -725,17 +726,18 @@
68174 * section in the linker script will go there too. @phdr should have
68175 * a leading colon.
68176 *
68177- * Note that this macros defines __per_cpu_load as an absolute symbol.
68178+ * Note that this macros defines per_cpu_load as an absolute symbol.
68179 * If there is no need to put the percpu section at a predetermined
68180 * address, use PERCPU_SECTION.
68181 */
68182 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
68183- VMLINUX_SYMBOL(__per_cpu_load) = .; \
68184- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
68185+ per_cpu_load = .; \
68186+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
68187 - LOAD_OFFSET) { \
68188+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
68189 PERCPU_INPUT(cacheline) \
68190 } phdr \
68191- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
68192+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
68193
68194 /**
68195 * PERCPU_SECTION - define output section for percpu area, simple version
68196diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
68197index 418d270..bfd2794 100644
68198--- a/include/crypto/algapi.h
68199+++ b/include/crypto/algapi.h
68200@@ -34,7 +34,7 @@ struct crypto_type {
68201 unsigned int maskclear;
68202 unsigned int maskset;
68203 unsigned int tfmsize;
68204-};
68205+} __do_const;
68206
68207 struct crypto_instance {
68208 struct crypto_alg alg;
68209diff --git a/include/drm/drmP.h b/include/drm/drmP.h
68210index fad21c9..ab858bc 100644
68211--- a/include/drm/drmP.h
68212+++ b/include/drm/drmP.h
68213@@ -72,6 +72,7 @@
68214 #include <linux/workqueue.h>
68215 #include <linux/poll.h>
68216 #include <asm/pgalloc.h>
68217+#include <asm/local.h>
68218 #include <drm/drm.h>
68219 #include <drm/drm_sarea.h>
68220
68221@@ -293,10 +294,12 @@ do { \
68222 * \param cmd command.
68223 * \param arg argument.
68224 */
68225-typedef int drm_ioctl_t(struct drm_device *dev, void *data,
68226+typedef int (* const drm_ioctl_t)(struct drm_device *dev, void *data,
68227+ struct drm_file *file_priv);
68228+typedef int (* drm_ioctl_no_const_t)(struct drm_device *dev, void *data,
68229 struct drm_file *file_priv);
68230
68231-typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
68232+typedef int (* const drm_ioctl_compat_t)(struct file *filp, unsigned int cmd,
68233 unsigned long arg);
68234
68235 #define DRM_IOCTL_NR(n) _IOC_NR(n)
68236@@ -311,9 +314,9 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
68237 struct drm_ioctl_desc {
68238 unsigned int cmd;
68239 int flags;
68240- drm_ioctl_t *func;
68241+ drm_ioctl_t func;
68242 unsigned int cmd_drv;
68243-};
68244+} __do_const;
68245
68246 /**
68247 * Creates a driver or general drm_ioctl_desc array entry for the given
68248@@ -995,7 +998,7 @@ struct drm_info_list {
68249 int (*show)(struct seq_file*, void*); /** show callback */
68250 u32 driver_features; /**< Required driver features for this entry */
68251 void *data;
68252-};
68253+} __do_const;
68254
68255 /**
68256 * debugfs node structure. This structure represents a debugfs file.
68257@@ -1068,7 +1071,7 @@ struct drm_device {
68258
68259 /** \name Usage Counters */
68260 /*@{ */
68261- int open_count; /**< Outstanding files open */
68262+ local_t open_count; /**< Outstanding files open */
68263 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
68264 atomic_t vma_count; /**< Outstanding vma areas open */
68265 int buf_use; /**< Buffers in use -- cannot alloc */
68266@@ -1079,7 +1082,7 @@ struct drm_device {
68267 /*@{ */
68268 unsigned long counters;
68269 enum drm_stat_type types[15];
68270- atomic_t counts[15];
68271+ atomic_unchecked_t counts[15];
68272 /*@} */
68273
68274 struct list_head filelist;
68275diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
68276index f43d556..94d9343 100644
68277--- a/include/drm/drm_crtc_helper.h
68278+++ b/include/drm/drm_crtc_helper.h
68279@@ -109,7 +109,7 @@ struct drm_encoder_helper_funcs {
68280 struct drm_connector *connector);
68281 /* disable encoder when not in use - more explicit than dpms off */
68282 void (*disable)(struct drm_encoder *encoder);
68283-};
68284+} __no_const;
68285
68286 /**
68287 * drm_connector_helper_funcs - helper operations for connectors
68288diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
68289index 72dcbe8..8db58d7 100644
68290--- a/include/drm/ttm/ttm_memory.h
68291+++ b/include/drm/ttm/ttm_memory.h
68292@@ -48,7 +48,7 @@
68293
68294 struct ttm_mem_shrink {
68295 int (*do_shrink) (struct ttm_mem_shrink *);
68296-};
68297+} __no_const;
68298
68299 /**
68300 * struct ttm_mem_global - Global memory accounting structure.
68301diff --git a/include/keys/asymmetric-subtype.h b/include/keys/asymmetric-subtype.h
68302index 4b840e8..155d235 100644
68303--- a/include/keys/asymmetric-subtype.h
68304+++ b/include/keys/asymmetric-subtype.h
68305@@ -37,7 +37,7 @@ struct asymmetric_key_subtype {
68306 /* Verify the signature on a key of this subtype (optional) */
68307 int (*verify_signature)(const struct key *key,
68308 const struct public_key_signature *sig);
68309-};
68310+} __do_const;
68311
68312 /**
68313 * asymmetric_key_subtype - Get the subtype from an asymmetric key
68314diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
68315index c1da539..1dcec55 100644
68316--- a/include/linux/atmdev.h
68317+++ b/include/linux/atmdev.h
68318@@ -28,7 +28,7 @@ struct compat_atm_iobuf {
68319 #endif
68320
68321 struct k_atm_aal_stats {
68322-#define __HANDLE_ITEM(i) atomic_t i
68323+#define __HANDLE_ITEM(i) atomic_unchecked_t i
68324 __AAL_STAT_ITEMS
68325 #undef __HANDLE_ITEM
68326 };
68327@@ -200,7 +200,7 @@ struct atmdev_ops { /* only send is required */
68328 int (*change_qos)(struct atm_vcc *vcc,struct atm_qos *qos,int flags);
68329 int (*proc_read)(struct atm_dev *dev,loff_t *pos,char *page);
68330 struct module *owner;
68331-};
68332+} __do_const ;
68333
68334 struct atmphy_ops {
68335 int (*start)(struct atm_dev *dev);
68336diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
68337index 0530b98..96a8ac0 100644
68338--- a/include/linux/binfmts.h
68339+++ b/include/linux/binfmts.h
68340@@ -73,8 +73,9 @@ struct linux_binfmt {
68341 int (*load_binary)(struct linux_binprm *);
68342 int (*load_shlib)(struct file *);
68343 int (*core_dump)(struct coredump_params *cprm);
68344+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
68345 unsigned long min_coredump; /* minimal dump size */
68346-};
68347+} __do_const;
68348
68349 extern void __register_binfmt(struct linux_binfmt *fmt, int insert);
68350
68351diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
68352index f94bc83..62b9cfe 100644
68353--- a/include/linux/blkdev.h
68354+++ b/include/linux/blkdev.h
68355@@ -1498,7 +1498,7 @@ struct block_device_operations {
68356 /* this callback is with swap_lock and sometimes page table lock held */
68357 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
68358 struct module *owner;
68359-};
68360+} __do_const;
68361
68362 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
68363 unsigned long);
68364diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
68365index 7c2e030..b72475d 100644
68366--- a/include/linux/blktrace_api.h
68367+++ b/include/linux/blktrace_api.h
68368@@ -23,7 +23,7 @@ struct blk_trace {
68369 struct dentry *dir;
68370 struct dentry *dropped_file;
68371 struct dentry *msg_file;
68372- atomic_t dropped;
68373+ atomic_unchecked_t dropped;
68374 };
68375
68376 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
68377diff --git a/include/linux/cache.h b/include/linux/cache.h
68378index 4c57065..4307975 100644
68379--- a/include/linux/cache.h
68380+++ b/include/linux/cache.h
68381@@ -16,6 +16,10 @@
68382 #define __read_mostly
68383 #endif
68384
68385+#ifndef __read_only
68386+#define __read_only __read_mostly
68387+#endif
68388+
68389 #ifndef ____cacheline_aligned
68390 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
68391 #endif
68392diff --git a/include/linux/capability.h b/include/linux/capability.h
68393index d9a4f7f4..19f77d6 100644
68394--- a/include/linux/capability.h
68395+++ b/include/linux/capability.h
68396@@ -213,8 +213,13 @@ extern bool ns_capable(struct user_namespace *ns, int cap);
68397 extern bool nsown_capable(int cap);
68398 extern bool inode_capable(const struct inode *inode, int cap);
68399 extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap);
68400+extern bool capable_nolog(int cap);
68401+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
68402+extern bool inode_capable_nolog(const struct inode *inode, int cap);
68403
68404 /* audit system wants to get cap info from files as well */
68405 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
68406
68407+extern int is_privileged_binary(const struct dentry *dentry);
68408+
68409 #endif /* !_LINUX_CAPABILITY_H */
68410diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
68411index 8609d57..86e4d79 100644
68412--- a/include/linux/cdrom.h
68413+++ b/include/linux/cdrom.h
68414@@ -87,7 +87,6 @@ struct cdrom_device_ops {
68415
68416 /* driver specifications */
68417 const int capability; /* capability flags */
68418- int n_minors; /* number of active minor devices */
68419 /* handle uniform packets for scsi type devices (scsi,atapi) */
68420 int (*generic_packet) (struct cdrom_device_info *,
68421 struct packet_command *);
68422diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
68423index 42e55de..1cd0e66 100644
68424--- a/include/linux/cleancache.h
68425+++ b/include/linux/cleancache.h
68426@@ -31,7 +31,7 @@ struct cleancache_ops {
68427 void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t);
68428 void (*invalidate_inode)(int, struct cleancache_filekey);
68429 void (*invalidate_fs)(int);
68430-};
68431+} __no_const;
68432
68433 extern struct cleancache_ops
68434 cleancache_register_ops(struct cleancache_ops *ops);
68435diff --git a/include/linux/compat.h b/include/linux/compat.h
68436index dec7e2d..45db13f 100644
68437--- a/include/linux/compat.h
68438+++ b/include/linux/compat.h
68439@@ -311,14 +311,14 @@ long compat_sys_msgsnd(int first, int second, int third, void __user *uptr);
68440 long compat_sys_msgrcv(int first, int second, int msgtyp, int third,
68441 int version, void __user *uptr);
68442 long compat_sys_shmat(int first, int second, compat_uptr_t third, int version,
68443- void __user *uptr);
68444+ void __user *uptr) __intentional_overflow(0);
68445 #else
68446 long compat_sys_semctl(int semid, int semnum, int cmd, int arg);
68447 long compat_sys_msgsnd(int msqid, struct compat_msgbuf __user *msgp,
68448 compat_ssize_t msgsz, int msgflg);
68449 long compat_sys_msgrcv(int msqid, struct compat_msgbuf __user *msgp,
68450 compat_ssize_t msgsz, long msgtyp, int msgflg);
68451-long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg);
68452+long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg) __intentional_overflow(0);
68453 #endif
68454 long compat_sys_msgctl(int first, int second, void __user *uptr);
68455 long compat_sys_shmctl(int first, int second, void __user *uptr);
68456@@ -414,7 +414,7 @@ extern int compat_ptrace_request(struct task_struct *child,
68457 extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
68458 compat_ulong_t addr, compat_ulong_t data);
68459 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
68460- compat_long_t addr, compat_long_t data);
68461+ compat_ulong_t addr, compat_ulong_t data);
68462
68463 /*
68464 * epoll (fs/eventpoll.c) compat bits follow ...
68465diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
68466index 662fd1b..e801992 100644
68467--- a/include/linux/compiler-gcc4.h
68468+++ b/include/linux/compiler-gcc4.h
68469@@ -34,6 +34,21 @@
68470 #define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
68471
68472 #if __GNUC_MINOR__ >= 5
68473+
68474+#ifdef CONSTIFY_PLUGIN
68475+#define __no_const __attribute__((no_const))
68476+#define __do_const __attribute__((do_const))
68477+#endif
68478+
68479+#ifdef SIZE_OVERFLOW_PLUGIN
68480+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
68481+#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__)))
68482+#endif
68483+
68484+#ifdef LATENT_ENTROPY_PLUGIN
68485+#define __latent_entropy __attribute__((latent_entropy))
68486+#endif
68487+
68488 /*
68489 * Mark a position in code as unreachable. This can be used to
68490 * suppress control flow warnings after asm blocks that transfer
68491@@ -49,6 +64,11 @@
68492 #define __noclone __attribute__((__noclone__))
68493
68494 #endif
68495+
68496+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
68497+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
68498+#define __bos0(ptr) __bos((ptr), 0)
68499+#define __bos1(ptr) __bos((ptr), 1)
68500 #endif
68501
68502 #if __GNUC_MINOR__ >= 6
68503diff --git a/include/linux/compiler.h b/include/linux/compiler.h
68504index dd852b7..1ad5fba 100644
68505--- a/include/linux/compiler.h
68506+++ b/include/linux/compiler.h
68507@@ -5,11 +5,14 @@
68508
68509 #ifdef __CHECKER__
68510 # define __user __attribute__((noderef, address_space(1)))
68511+# define __force_user __force __user
68512 # define __kernel __attribute__((address_space(0)))
68513+# define __force_kernel __force __kernel
68514 # define __safe __attribute__((safe))
68515 # define __force __attribute__((force))
68516 # define __nocast __attribute__((nocast))
68517 # define __iomem __attribute__((noderef, address_space(2)))
68518+# define __force_iomem __force __iomem
68519 # define __must_hold(x) __attribute__((context(x,1,1)))
68520 # define __acquires(x) __attribute__((context(x,0,1)))
68521 # define __releases(x) __attribute__((context(x,1,0)))
68522@@ -17,20 +20,37 @@
68523 # define __release(x) __context__(x,-1)
68524 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
68525 # define __percpu __attribute__((noderef, address_space(3)))
68526+# define __force_percpu __force __percpu
68527 #ifdef CONFIG_SPARSE_RCU_POINTER
68528 # define __rcu __attribute__((noderef, address_space(4)))
68529+# define __force_rcu __force __rcu
68530 #else
68531 # define __rcu
68532+# define __force_rcu
68533 #endif
68534 extern void __chk_user_ptr(const volatile void __user *);
68535 extern void __chk_io_ptr(const volatile void __iomem *);
68536 #else
68537-# define __user
68538-# define __kernel
68539+# ifdef CHECKER_PLUGIN
68540+//# define __user
68541+//# define __force_user
68542+//# define __kernel
68543+//# define __force_kernel
68544+# else
68545+# ifdef STRUCTLEAK_PLUGIN
68546+# define __user __attribute__((user))
68547+# else
68548+# define __user
68549+# endif
68550+# define __force_user
68551+# define __kernel
68552+# define __force_kernel
68553+# endif
68554 # define __safe
68555 # define __force
68556 # define __nocast
68557 # define __iomem
68558+# define __force_iomem
68559 # define __chk_user_ptr(x) (void)0
68560 # define __chk_io_ptr(x) (void)0
68561 # define __builtin_warning(x, y...) (1)
68562@@ -41,7 +61,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
68563 # define __release(x) (void)0
68564 # define __cond_lock(x,c) (c)
68565 # define __percpu
68566+# define __force_percpu
68567 # define __rcu
68568+# define __force_rcu
68569 #endif
68570
68571 /* Indirect macros required for expanded argument pasting, eg. __LINE__. */
68572@@ -275,6 +297,26 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
68573 # define __attribute_const__ /* unimplemented */
68574 #endif
68575
68576+#ifndef __no_const
68577+# define __no_const
68578+#endif
68579+
68580+#ifndef __do_const
68581+# define __do_const
68582+#endif
68583+
68584+#ifndef __size_overflow
68585+# define __size_overflow(...)
68586+#endif
68587+
68588+#ifndef __intentional_overflow
68589+# define __intentional_overflow(...)
68590+#endif
68591+
68592+#ifndef __latent_entropy
68593+# define __latent_entropy
68594+#endif
68595+
68596 /*
68597 * Tell gcc if a function is cold. The compiler will assume any path
68598 * directly leading to the call is unlikely.
68599@@ -284,6 +326,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
68600 #define __cold
68601 #endif
68602
68603+#ifndef __alloc_size
68604+#define __alloc_size(...)
68605+#endif
68606+
68607+#ifndef __bos
68608+#define __bos(ptr, arg)
68609+#endif
68610+
68611+#ifndef __bos0
68612+#define __bos0(ptr)
68613+#endif
68614+
68615+#ifndef __bos1
68616+#define __bos1(ptr)
68617+#endif
68618+
68619 /* Simple shorthand for a section definition */
68620 #ifndef __section
68621 # define __section(S) __attribute__ ((__section__(#S)))
68622@@ -323,6 +381,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
68623 * use is to mediate communication between process-level code and irq/NMI
68624 * handlers, all running on the same CPU.
68625 */
68626-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
68627+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
68628+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
68629
68630 #endif /* __LINUX_COMPILER_H */
68631diff --git a/include/linux/completion.h b/include/linux/completion.h
68632index 51494e6..0fd1b61 100644
68633--- a/include/linux/completion.h
68634+++ b/include/linux/completion.h
68635@@ -78,13 +78,13 @@ static inline void init_completion(struct completion *x)
68636
68637 extern void wait_for_completion(struct completion *);
68638 extern int wait_for_completion_interruptible(struct completion *x);
68639-extern int wait_for_completion_killable(struct completion *x);
68640+extern int wait_for_completion_killable(struct completion *x) __intentional_overflow(-1);
68641 extern unsigned long wait_for_completion_timeout(struct completion *x,
68642 unsigned long timeout);
68643 extern long wait_for_completion_interruptible_timeout(
68644- struct completion *x, unsigned long timeout);
68645+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
68646 extern long wait_for_completion_killable_timeout(
68647- struct completion *x, unsigned long timeout);
68648+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
68649 extern bool try_wait_for_completion(struct completion *x);
68650 extern bool completion_done(struct completion *x);
68651
68652diff --git a/include/linux/configfs.h b/include/linux/configfs.h
68653index 34025df..d94bbbc 100644
68654--- a/include/linux/configfs.h
68655+++ b/include/linux/configfs.h
68656@@ -125,7 +125,7 @@ struct configfs_attribute {
68657 const char *ca_name;
68658 struct module *ca_owner;
68659 umode_t ca_mode;
68660-};
68661+} __do_const;
68662
68663 /*
68664 * Users often need to create attribute structures for their configurable
68665diff --git a/include/linux/cpu.h b/include/linux/cpu.h
68666index ce7a074..01ab8ac 100644
68667--- a/include/linux/cpu.h
68668+++ b/include/linux/cpu.h
68669@@ -115,7 +115,7 @@ enum {
68670 /* Need to know about CPUs going up/down? */
68671 #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE)
68672 #define cpu_notifier(fn, pri) { \
68673- static struct notifier_block fn##_nb __cpuinitdata = \
68674+ static struct notifier_block fn##_nb = \
68675 { .notifier_call = fn, .priority = pri }; \
68676 register_cpu_notifier(&fn##_nb); \
68677 }
68678diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
68679index a55b88e..fba90c5 100644
68680--- a/include/linux/cpufreq.h
68681+++ b/include/linux/cpufreq.h
68682@@ -240,7 +240,7 @@ struct cpufreq_driver {
68683 int (*suspend) (struct cpufreq_policy *policy);
68684 int (*resume) (struct cpufreq_policy *policy);
68685 struct freq_attr **attr;
68686-};
68687+} __do_const;
68688
68689 /* flags */
68690
68691@@ -299,6 +299,7 @@ struct global_attr {
68692 ssize_t (*store)(struct kobject *a, struct attribute *b,
68693 const char *c, size_t count);
68694 };
68695+typedef struct global_attr __no_const global_attr_no_const;
68696
68697 #define define_one_global_ro(_name) \
68698 static struct global_attr _name = \
68699diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
68700index 24cd1037..20a63aae 100644
68701--- a/include/linux/cpuidle.h
68702+++ b/include/linux/cpuidle.h
68703@@ -54,7 +54,8 @@ struct cpuidle_state {
68704 int index);
68705
68706 int (*enter_dead) (struct cpuidle_device *dev, int index);
68707-};
68708+} __do_const;
68709+typedef struct cpuidle_state __no_const cpuidle_state_no_const;
68710
68711 /* Idle State Flags */
68712 #define CPUIDLE_FLAG_TIME_VALID (0x01) /* is residency time measurable? */
68713@@ -216,7 +217,7 @@ struct cpuidle_governor {
68714 void (*reflect) (struct cpuidle_device *dev, int index);
68715
68716 struct module *owner;
68717-};
68718+} __do_const;
68719
68720 #ifdef CONFIG_CPU_IDLE
68721
68722diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
68723index 0325602..5e9feff 100644
68724--- a/include/linux/cpumask.h
68725+++ b/include/linux/cpumask.h
68726@@ -118,17 +118,17 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
68727 }
68728
68729 /* Valid inputs for n are -1 and 0. */
68730-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
68731+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
68732 {
68733 return n+1;
68734 }
68735
68736-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
68737+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
68738 {
68739 return n+1;
68740 }
68741
68742-static inline unsigned int cpumask_next_and(int n,
68743+static inline unsigned int __intentional_overflow(-1) cpumask_next_and(int n,
68744 const struct cpumask *srcp,
68745 const struct cpumask *andp)
68746 {
68747@@ -167,7 +167,7 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
68748 *
68749 * Returns >= nr_cpu_ids if no further cpus set.
68750 */
68751-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
68752+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
68753 {
68754 /* -1 is a legal arg here. */
68755 if (n != -1)
68756@@ -182,7 +182,7 @@ static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
68757 *
68758 * Returns >= nr_cpu_ids if no further cpus unset.
68759 */
68760-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
68761+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
68762 {
68763 /* -1 is a legal arg here. */
68764 if (n != -1)
68765@@ -190,7 +190,7 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
68766 return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
68767 }
68768
68769-int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
68770+int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *) __intentional_overflow(-1);
68771 int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
68772
68773 /**
68774diff --git a/include/linux/cred.h b/include/linux/cred.h
68775index 04421e8..6bce4ef 100644
68776--- a/include/linux/cred.h
68777+++ b/include/linux/cred.h
68778@@ -194,6 +194,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
68779 static inline void validate_process_creds(void)
68780 {
68781 }
68782+static inline void validate_task_creds(struct task_struct *task)
68783+{
68784+}
68785 #endif
68786
68787 /**
68788diff --git a/include/linux/crypto.h b/include/linux/crypto.h
68789index b92eadf..b4ecdc1 100644
68790--- a/include/linux/crypto.h
68791+++ b/include/linux/crypto.h
68792@@ -373,7 +373,7 @@ struct cipher_tfm {
68793 const u8 *key, unsigned int keylen);
68794 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
68795 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
68796-};
68797+} __no_const;
68798
68799 struct hash_tfm {
68800 int (*init)(struct hash_desc *desc);
68801@@ -394,13 +394,13 @@ struct compress_tfm {
68802 int (*cot_decompress)(struct crypto_tfm *tfm,
68803 const u8 *src, unsigned int slen,
68804 u8 *dst, unsigned int *dlen);
68805-};
68806+} __no_const;
68807
68808 struct rng_tfm {
68809 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
68810 unsigned int dlen);
68811 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
68812-};
68813+} __no_const;
68814
68815 #define crt_ablkcipher crt_u.ablkcipher
68816 #define crt_aead crt_u.aead
68817diff --git a/include/linux/ctype.h b/include/linux/ctype.h
68818index 8acfe31..6ffccd63 100644
68819--- a/include/linux/ctype.h
68820+++ b/include/linux/ctype.h
68821@@ -56,7 +56,7 @@ static inline unsigned char __toupper(unsigned char c)
68822 * Fast implementation of tolower() for internal usage. Do not use in your
68823 * code.
68824 */
68825-static inline char _tolower(const char c)
68826+static inline unsigned char _tolower(const unsigned char c)
68827 {
68828 return c | 0x20;
68829 }
68830diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
68831index 7925bf0..d5143d2 100644
68832--- a/include/linux/decompress/mm.h
68833+++ b/include/linux/decompress/mm.h
68834@@ -77,7 +77,7 @@ static void free(void *where)
68835 * warnings when not needed (indeed large_malloc / large_free are not
68836 * needed by inflate */
68837
68838-#define malloc(a) kmalloc(a, GFP_KERNEL)
68839+#define malloc(a) kmalloc((a), GFP_KERNEL)
68840 #define free(a) kfree(a)
68841
68842 #define large_malloc(a) vmalloc(a)
68843diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
68844index e83ef39..33e0eb3 100644
68845--- a/include/linux/devfreq.h
68846+++ b/include/linux/devfreq.h
68847@@ -114,7 +114,7 @@ struct devfreq_governor {
68848 int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
68849 int (*event_handler)(struct devfreq *devfreq,
68850 unsigned int event, void *data);
68851-};
68852+} __do_const;
68853
68854 /**
68855 * struct devfreq - Device devfreq structure
68856diff --git a/include/linux/device.h b/include/linux/device.h
68857index 43dcda9..7a1fb65 100644
68858--- a/include/linux/device.h
68859+++ b/include/linux/device.h
68860@@ -294,7 +294,7 @@ struct subsys_interface {
68861 struct list_head node;
68862 int (*add_dev)(struct device *dev, struct subsys_interface *sif);
68863 int (*remove_dev)(struct device *dev, struct subsys_interface *sif);
68864-};
68865+} __do_const;
68866
68867 int subsys_interface_register(struct subsys_interface *sif);
68868 void subsys_interface_unregister(struct subsys_interface *sif);
68869@@ -474,7 +474,7 @@ struct device_type {
68870 void (*release)(struct device *dev);
68871
68872 const struct dev_pm_ops *pm;
68873-};
68874+} __do_const;
68875
68876 /* interface for exporting device attributes */
68877 struct device_attribute {
68878@@ -484,11 +484,12 @@ struct device_attribute {
68879 ssize_t (*store)(struct device *dev, struct device_attribute *attr,
68880 const char *buf, size_t count);
68881 };
68882+typedef struct device_attribute __no_const device_attribute_no_const;
68883
68884 struct dev_ext_attribute {
68885 struct device_attribute attr;
68886 void *var;
68887-};
68888+} __do_const;
68889
68890 ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr,
68891 char *buf);
68892diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
68893index 94af418..b1ca7a2 100644
68894--- a/include/linux/dma-mapping.h
68895+++ b/include/linux/dma-mapping.h
68896@@ -54,7 +54,7 @@ struct dma_map_ops {
68897 u64 (*get_required_mask)(struct device *dev);
68898 #endif
68899 int is_phys;
68900-};
68901+} __do_const;
68902
68903 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
68904
68905diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
68906index d3201e4..8281e63 100644
68907--- a/include/linux/dmaengine.h
68908+++ b/include/linux/dmaengine.h
68909@@ -1018,9 +1018,9 @@ struct dma_pinned_list {
68910 struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
68911 void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
68912
68913-dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
68914+dma_cookie_t __intentional_overflow(0) dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
68915 struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
68916-dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
68917+dma_cookie_t __intentional_overflow(0) dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
68918 struct dma_pinned_list *pinned_list, struct page *page,
68919 unsigned int offset, size_t len);
68920
68921diff --git a/include/linux/efi.h b/include/linux/efi.h
68922index 7a9498a..155713d 100644
68923--- a/include/linux/efi.h
68924+++ b/include/linux/efi.h
68925@@ -733,6 +733,7 @@ struct efivar_operations {
68926 efi_set_variable_t *set_variable;
68927 efi_query_variable_info_t *query_variable_info;
68928 };
68929+typedef struct efivar_operations __no_const efivar_operations_no_const;
68930
68931 struct efivars {
68932 /*
68933diff --git a/include/linux/elf.h b/include/linux/elf.h
68934index 8c9048e..16a4665 100644
68935--- a/include/linux/elf.h
68936+++ b/include/linux/elf.h
68937@@ -20,6 +20,7 @@ extern Elf32_Dyn _DYNAMIC [];
68938 #define elf_note elf32_note
68939 #define elf_addr_t Elf32_Off
68940 #define Elf_Half Elf32_Half
68941+#define elf_dyn Elf32_Dyn
68942
68943 #else
68944
68945@@ -30,6 +31,7 @@ extern Elf64_Dyn _DYNAMIC [];
68946 #define elf_note elf64_note
68947 #define elf_addr_t Elf64_Off
68948 #define Elf_Half Elf64_Half
68949+#define elf_dyn Elf64_Dyn
68950
68951 #endif
68952
68953diff --git a/include/linux/err.h b/include/linux/err.h
68954index f2edce2..cc2082c 100644
68955--- a/include/linux/err.h
68956+++ b/include/linux/err.h
68957@@ -19,12 +19,12 @@
68958
68959 #define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
68960
68961-static inline void * __must_check ERR_PTR(long error)
68962+static inline void * __must_check __intentional_overflow(-1) ERR_PTR(long error)
68963 {
68964 return (void *) error;
68965 }
68966
68967-static inline long __must_check PTR_ERR(const void *ptr)
68968+static inline long __must_check __intentional_overflow(-1) PTR_ERR(const void *ptr)
68969 {
68970 return (long) ptr;
68971 }
68972diff --git a/include/linux/extcon.h b/include/linux/extcon.h
68973index fcb51c8..bdafcf6 100644
68974--- a/include/linux/extcon.h
68975+++ b/include/linux/extcon.h
68976@@ -134,7 +134,7 @@ struct extcon_dev {
68977 /* /sys/class/extcon/.../mutually_exclusive/... */
68978 struct attribute_group attr_g_muex;
68979 struct attribute **attrs_muex;
68980- struct device_attribute *d_attrs_muex;
68981+ device_attribute_no_const *d_attrs_muex;
68982 };
68983
68984 /**
68985diff --git a/include/linux/fb.h b/include/linux/fb.h
68986index c7a9571..02eeffe 100644
68987--- a/include/linux/fb.h
68988+++ b/include/linux/fb.h
68989@@ -302,7 +302,7 @@ struct fb_ops {
68990 /* called at KDB enter and leave time to prepare the console */
68991 int (*fb_debug_enter)(struct fb_info *info);
68992 int (*fb_debug_leave)(struct fb_info *info);
68993-};
68994+} __do_const;
68995
68996 #ifdef CONFIG_FB_TILEBLITTING
68997 #define FB_TILE_CURSOR_NONE 0
68998diff --git a/include/linux/filter.h b/include/linux/filter.h
68999index c45eabc..baa0be5 100644
69000--- a/include/linux/filter.h
69001+++ b/include/linux/filter.h
69002@@ -20,6 +20,7 @@ struct compat_sock_fprog {
69003
69004 struct sk_buff;
69005 struct sock;
69006+struct bpf_jit_work;
69007
69008 struct sk_filter
69009 {
69010@@ -27,6 +28,9 @@ struct sk_filter
69011 unsigned int len; /* Number of filter blocks */
69012 unsigned int (*bpf_func)(const struct sk_buff *skb,
69013 const struct sock_filter *filter);
69014+#ifdef CONFIG_BPF_JIT
69015+ struct bpf_jit_work *work;
69016+#endif
69017 struct rcu_head rcu;
69018 struct sock_filter insns[0];
69019 };
69020diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
69021index 3044254..9767f41 100644
69022--- a/include/linux/frontswap.h
69023+++ b/include/linux/frontswap.h
69024@@ -11,7 +11,7 @@ struct frontswap_ops {
69025 int (*load)(unsigned, pgoff_t, struct page *);
69026 void (*invalidate_page)(unsigned, pgoff_t);
69027 void (*invalidate_area)(unsigned);
69028-};
69029+} __no_const;
69030
69031 extern bool frontswap_enabled;
69032 extern struct frontswap_ops
69033diff --git a/include/linux/fs.h b/include/linux/fs.h
69034index 7617ee0..b575199 100644
69035--- a/include/linux/fs.h
69036+++ b/include/linux/fs.h
69037@@ -1541,7 +1541,8 @@ struct file_operations {
69038 long (*fallocate)(struct file *file, int mode, loff_t offset,
69039 loff_t len);
69040 int (*show_fdinfo)(struct seq_file *m, struct file *f);
69041-};
69042+} __do_const;
69043+typedef struct file_operations __no_const file_operations_no_const;
69044
69045 struct inode_operations {
69046 struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
69047@@ -2665,4 +2666,14 @@ static inline void inode_has_no_xattr(struct inode *inode)
69048 inode->i_flags |= S_NOSEC;
69049 }
69050
69051+static inline bool is_sidechannel_device(const struct inode *inode)
69052+{
69053+#ifdef CONFIG_GRKERNSEC_DEVICE_SIDECHANNEL
69054+ umode_t mode = inode->i_mode;
69055+ return ((S_ISCHR(mode) || S_ISBLK(mode)) && (mode & (S_IROTH | S_IWOTH)));
69056+#else
69057+ return false;
69058+#endif
69059+}
69060+
69061 #endif /* _LINUX_FS_H */
69062diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
69063index 324f931..f292b65 100644
69064--- a/include/linux/fs_struct.h
69065+++ b/include/linux/fs_struct.h
69066@@ -6,7 +6,7 @@
69067 #include <linux/seqlock.h>
69068
69069 struct fs_struct {
69070- int users;
69071+ atomic_t users;
69072 spinlock_t lock;
69073 seqcount_t seq;
69074 int umask;
69075diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
69076index 5dfa0aa..6acf322 100644
69077--- a/include/linux/fscache-cache.h
69078+++ b/include/linux/fscache-cache.h
69079@@ -112,7 +112,7 @@ struct fscache_operation {
69080 fscache_operation_release_t release;
69081 };
69082
69083-extern atomic_t fscache_op_debug_id;
69084+extern atomic_unchecked_t fscache_op_debug_id;
69085 extern void fscache_op_work_func(struct work_struct *work);
69086
69087 extern void fscache_enqueue_operation(struct fscache_operation *);
69088@@ -134,7 +134,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
69089 INIT_WORK(&op->work, fscache_op_work_func);
69090 atomic_set(&op->usage, 1);
69091 op->state = FSCACHE_OP_ST_INITIALISED;
69092- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
69093+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
69094 op->processor = processor;
69095 op->release = release;
69096 INIT_LIST_HEAD(&op->pend_link);
69097diff --git a/include/linux/fscache.h b/include/linux/fscache.h
69098index 7a08623..4c07b0f 100644
69099--- a/include/linux/fscache.h
69100+++ b/include/linux/fscache.h
69101@@ -152,7 +152,7 @@ struct fscache_cookie_def {
69102 * - this is mandatory for any object that may have data
69103 */
69104 void (*now_uncached)(void *cookie_netfs_data);
69105-};
69106+} __do_const;
69107
69108 /*
69109 * fscache cached network filesystem type
69110diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
69111index 0fbfb46..508eb0d 100644
69112--- a/include/linux/fsnotify.h
69113+++ b/include/linux/fsnotify.h
69114@@ -195,6 +195,9 @@ static inline void fsnotify_access(struct file *file)
69115 struct inode *inode = path->dentry->d_inode;
69116 __u32 mask = FS_ACCESS;
69117
69118+ if (is_sidechannel_device(inode))
69119+ return;
69120+
69121 if (S_ISDIR(inode->i_mode))
69122 mask |= FS_ISDIR;
69123
69124@@ -213,6 +216,9 @@ static inline void fsnotify_modify(struct file *file)
69125 struct inode *inode = path->dentry->d_inode;
69126 __u32 mask = FS_MODIFY;
69127
69128+ if (is_sidechannel_device(inode))
69129+ return;
69130+
69131 if (S_ISDIR(inode->i_mode))
69132 mask |= FS_ISDIR;
69133
69134@@ -315,7 +321,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
69135 */
69136 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
69137 {
69138- return kstrdup(name, GFP_KERNEL);
69139+ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
69140 }
69141
69142 /*
69143diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
69144index a3d4895..ddd2a50 100644
69145--- a/include/linux/ftrace_event.h
69146+++ b/include/linux/ftrace_event.h
69147@@ -272,7 +272,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type,
69148 extern int trace_add_event_call(struct ftrace_event_call *call);
69149 extern void trace_remove_event_call(struct ftrace_event_call *call);
69150
69151-#define is_signed_type(type) (((type)(-1)) < 0)
69152+#define is_signed_type(type) (((type)(-1)) < (type)1)
69153
69154 int trace_set_clr_event(const char *system, const char *event, int set);
69155
69156diff --git a/include/linux/genhd.h b/include/linux/genhd.h
69157index 79b8bba..86b539e 100644
69158--- a/include/linux/genhd.h
69159+++ b/include/linux/genhd.h
69160@@ -194,7 +194,7 @@ struct gendisk {
69161 struct kobject *slave_dir;
69162
69163 struct timer_rand_state *random;
69164- atomic_t sync_io; /* RAID */
69165+ atomic_unchecked_t sync_io; /* RAID */
69166 struct disk_events *ev;
69167 #ifdef CONFIG_BLK_DEV_INTEGRITY
69168 struct blk_integrity *integrity;
69169diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h
69170index 023bc34..b02b46a 100644
69171--- a/include/linux/genl_magic_func.h
69172+++ b/include/linux/genl_magic_func.h
69173@@ -246,7 +246,7 @@ const char *CONCAT_(GENL_MAGIC_FAMILY, _genl_cmd_to_str)(__u8 cmd)
69174 },
69175
69176 #define ZZZ_genl_ops CONCAT_(GENL_MAGIC_FAMILY, _genl_ops)
69177-static struct genl_ops ZZZ_genl_ops[] __read_mostly = {
69178+static struct genl_ops ZZZ_genl_ops[] = {
69179 #include GENL_MAGIC_INCLUDE_FILE
69180 };
69181
69182diff --git a/include/linux/gfp.h b/include/linux/gfp.h
69183index 0f615eb..5c3832f 100644
69184--- a/include/linux/gfp.h
69185+++ b/include/linux/gfp.h
69186@@ -35,6 +35,13 @@ struct vm_area_struct;
69187 #define ___GFP_NO_KSWAPD 0x400000u
69188 #define ___GFP_OTHER_NODE 0x800000u
69189 #define ___GFP_WRITE 0x1000000u
69190+
69191+#ifdef CONFIG_PAX_USERCOPY_SLABS
69192+#define ___GFP_USERCOPY 0x2000000u
69193+#else
69194+#define ___GFP_USERCOPY 0
69195+#endif
69196+
69197 /* If the above are modified, __GFP_BITS_SHIFT may need updating */
69198
69199 /*
69200@@ -92,6 +99,7 @@ struct vm_area_struct;
69201 #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
69202 #define __GFP_KMEMCG ((__force gfp_t)___GFP_KMEMCG) /* Allocation comes from a memcg-accounted resource */
69203 #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
69204+#define __GFP_USERCOPY ((__force gfp_t)___GFP_USERCOPY)/* Allocator intends to copy page to/from userland */
69205
69206 /*
69207 * This may seem redundant, but it's a way of annotating false positives vs.
69208@@ -99,7 +107,7 @@ struct vm_area_struct;
69209 */
69210 #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
69211
69212-#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */
69213+#define __GFP_BITS_SHIFT 26 /* Room for N __GFP_FOO bits */
69214 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
69215
69216 /* This equals 0, but use constants in case they ever change */
69217@@ -153,6 +161,8 @@ struct vm_area_struct;
69218 /* 4GB DMA on some platforms */
69219 #define GFP_DMA32 __GFP_DMA32
69220
69221+#define GFP_USERCOPY __GFP_USERCOPY
69222+
69223 /* Convert GFP flags to their corresponding migrate type */
69224 static inline int allocflags_to_migratetype(gfp_t gfp_flags)
69225 {
69226diff --git a/include/linux/gracl.h b/include/linux/gracl.h
69227new file mode 100644
69228index 0000000..ebe6d72
69229--- /dev/null
69230+++ b/include/linux/gracl.h
69231@@ -0,0 +1,319 @@
69232+#ifndef GR_ACL_H
69233+#define GR_ACL_H
69234+
69235+#include <linux/grdefs.h>
69236+#include <linux/resource.h>
69237+#include <linux/capability.h>
69238+#include <linux/dcache.h>
69239+#include <asm/resource.h>
69240+
69241+/* Major status information */
69242+
69243+#define GR_VERSION "grsecurity 2.9.1"
69244+#define GRSECURITY_VERSION 0x2901
69245+
69246+enum {
69247+ GR_SHUTDOWN = 0,
69248+ GR_ENABLE = 1,
69249+ GR_SPROLE = 2,
69250+ GR_RELOAD = 3,
69251+ GR_SEGVMOD = 4,
69252+ GR_STATUS = 5,
69253+ GR_UNSPROLE = 6,
69254+ GR_PASSSET = 7,
69255+ GR_SPROLEPAM = 8,
69256+};
69257+
69258+/* Password setup definitions
69259+ * kernel/grhash.c */
69260+enum {
69261+ GR_PW_LEN = 128,
69262+ GR_SALT_LEN = 16,
69263+ GR_SHA_LEN = 32,
69264+};
69265+
69266+enum {
69267+ GR_SPROLE_LEN = 64,
69268+};
69269+
69270+enum {
69271+ GR_NO_GLOB = 0,
69272+ GR_REG_GLOB,
69273+ GR_CREATE_GLOB
69274+};
69275+
69276+#define GR_NLIMITS 32
69277+
69278+/* Begin Data Structures */
69279+
69280+struct sprole_pw {
69281+ unsigned char *rolename;
69282+ unsigned char salt[GR_SALT_LEN];
69283+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
69284+};
69285+
69286+struct name_entry {
69287+ __u32 key;
69288+ ino_t inode;
69289+ dev_t device;
69290+ char *name;
69291+ __u16 len;
69292+ __u8 deleted;
69293+ struct name_entry *prev;
69294+ struct name_entry *next;
69295+};
69296+
69297+struct inodev_entry {
69298+ struct name_entry *nentry;
69299+ struct inodev_entry *prev;
69300+ struct inodev_entry *next;
69301+};
69302+
69303+struct acl_role_db {
69304+ struct acl_role_label **r_hash;
69305+ __u32 r_size;
69306+};
69307+
69308+struct inodev_db {
69309+ struct inodev_entry **i_hash;
69310+ __u32 i_size;
69311+};
69312+
69313+struct name_db {
69314+ struct name_entry **n_hash;
69315+ __u32 n_size;
69316+};
69317+
69318+struct crash_uid {
69319+ uid_t uid;
69320+ unsigned long expires;
69321+};
69322+
69323+struct gr_hash_struct {
69324+ void **table;
69325+ void **nametable;
69326+ void *first;
69327+ __u32 table_size;
69328+ __u32 used_size;
69329+ int type;
69330+};
69331+
69332+/* Userspace Grsecurity ACL data structures */
69333+
69334+struct acl_subject_label {
69335+ char *filename;
69336+ ino_t inode;
69337+ dev_t device;
69338+ __u32 mode;
69339+ kernel_cap_t cap_mask;
69340+ kernel_cap_t cap_lower;
69341+ kernel_cap_t cap_invert_audit;
69342+
69343+ struct rlimit res[GR_NLIMITS];
69344+ __u32 resmask;
69345+
69346+ __u8 user_trans_type;
69347+ __u8 group_trans_type;
69348+ uid_t *user_transitions;
69349+ gid_t *group_transitions;
69350+ __u16 user_trans_num;
69351+ __u16 group_trans_num;
69352+
69353+ __u32 sock_families[2];
69354+ __u32 ip_proto[8];
69355+ __u32 ip_type;
69356+ struct acl_ip_label **ips;
69357+ __u32 ip_num;
69358+ __u32 inaddr_any_override;
69359+
69360+ __u32 crashes;
69361+ unsigned long expires;
69362+
69363+ struct acl_subject_label *parent_subject;
69364+ struct gr_hash_struct *hash;
69365+ struct acl_subject_label *prev;
69366+ struct acl_subject_label *next;
69367+
69368+ struct acl_object_label **obj_hash;
69369+ __u32 obj_hash_size;
69370+ __u16 pax_flags;
69371+};
69372+
69373+struct role_allowed_ip {
69374+ __u32 addr;
69375+ __u32 netmask;
69376+
69377+ struct role_allowed_ip *prev;
69378+ struct role_allowed_ip *next;
69379+};
69380+
69381+struct role_transition {
69382+ char *rolename;
69383+
69384+ struct role_transition *prev;
69385+ struct role_transition *next;
69386+};
69387+
69388+struct acl_role_label {
69389+ char *rolename;
69390+ uid_t uidgid;
69391+ __u16 roletype;
69392+
69393+ __u16 auth_attempts;
69394+ unsigned long expires;
69395+
69396+ struct acl_subject_label *root_label;
69397+ struct gr_hash_struct *hash;
69398+
69399+ struct acl_role_label *prev;
69400+ struct acl_role_label *next;
69401+
69402+ struct role_transition *transitions;
69403+ struct role_allowed_ip *allowed_ips;
69404+ uid_t *domain_children;
69405+ __u16 domain_child_num;
69406+
69407+ umode_t umask;
69408+
69409+ struct acl_subject_label **subj_hash;
69410+ __u32 subj_hash_size;
69411+};
69412+
69413+struct user_acl_role_db {
69414+ struct acl_role_label **r_table;
69415+ __u32 num_pointers; /* Number of allocations to track */
69416+ __u32 num_roles; /* Number of roles */
69417+ __u32 num_domain_children; /* Number of domain children */
69418+ __u32 num_subjects; /* Number of subjects */
69419+ __u32 num_objects; /* Number of objects */
69420+};
69421+
69422+struct acl_object_label {
69423+ char *filename;
69424+ ino_t inode;
69425+ dev_t device;
69426+ __u32 mode;
69427+
69428+ struct acl_subject_label *nested;
69429+ struct acl_object_label *globbed;
69430+
69431+ /* next two structures not used */
69432+
69433+ struct acl_object_label *prev;
69434+ struct acl_object_label *next;
69435+};
69436+
69437+struct acl_ip_label {
69438+ char *iface;
69439+ __u32 addr;
69440+ __u32 netmask;
69441+ __u16 low, high;
69442+ __u8 mode;
69443+ __u32 type;
69444+ __u32 proto[8];
69445+
69446+ /* next two structures not used */
69447+
69448+ struct acl_ip_label *prev;
69449+ struct acl_ip_label *next;
69450+};
69451+
69452+struct gr_arg {
69453+ struct user_acl_role_db role_db;
69454+ unsigned char pw[GR_PW_LEN];
69455+ unsigned char salt[GR_SALT_LEN];
69456+ unsigned char sum[GR_SHA_LEN];
69457+ unsigned char sp_role[GR_SPROLE_LEN];
69458+ struct sprole_pw *sprole_pws;
69459+ dev_t segv_device;
69460+ ino_t segv_inode;
69461+ uid_t segv_uid;
69462+ __u16 num_sprole_pws;
69463+ __u16 mode;
69464+};
69465+
69466+struct gr_arg_wrapper {
69467+ struct gr_arg *arg;
69468+ __u32 version;
69469+ __u32 size;
69470+};
69471+
69472+struct subject_map {
69473+ struct acl_subject_label *user;
69474+ struct acl_subject_label *kernel;
69475+ struct subject_map *prev;
69476+ struct subject_map *next;
69477+};
69478+
69479+struct acl_subj_map_db {
69480+ struct subject_map **s_hash;
69481+ __u32 s_size;
69482+};
69483+
69484+/* End Data Structures Section */
69485+
69486+/* Hash functions generated by empirical testing by Brad Spengler
69487+ Makes good use of the low bits of the inode. Generally 0-1 times
69488+ in loop for successful match. 0-3 for unsuccessful match.
69489+ Shift/add algorithm with modulus of table size and an XOR*/
69490+
69491+static __inline__ unsigned int
69492+gr_rhash(const uid_t uid, const __u16 type, const unsigned int sz)
69493+{
69494+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
69495+}
69496+
69497+ static __inline__ unsigned int
69498+gr_shash(const struct acl_subject_label *userp, const unsigned int sz)
69499+{
69500+ return ((const unsigned long)userp % sz);
69501+}
69502+
69503+static __inline__ unsigned int
69504+gr_fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
69505+{
69506+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
69507+}
69508+
69509+static __inline__ unsigned int
69510+gr_nhash(const char *name, const __u16 len, const unsigned int sz)
69511+{
69512+ return full_name_hash((const unsigned char *)name, len) % sz;
69513+}
69514+
69515+#define FOR_EACH_ROLE_START(role) \
69516+ role = role_list; \
69517+ while (role) {
69518+
69519+#define FOR_EACH_ROLE_END(role) \
69520+ role = role->prev; \
69521+ }
69522+
69523+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
69524+ subj = NULL; \
69525+ iter = 0; \
69526+ while (iter < role->subj_hash_size) { \
69527+ if (subj == NULL) \
69528+ subj = role->subj_hash[iter]; \
69529+ if (subj == NULL) { \
69530+ iter++; \
69531+ continue; \
69532+ }
69533+
69534+#define FOR_EACH_SUBJECT_END(subj,iter) \
69535+ subj = subj->next; \
69536+ if (subj == NULL) \
69537+ iter++; \
69538+ }
69539+
69540+
69541+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
69542+ subj = role->hash->first; \
69543+ while (subj != NULL) {
69544+
69545+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
69546+ subj = subj->next; \
69547+ }
69548+
69549+#endif
69550+
69551diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
69552new file mode 100644
69553index 0000000..323ecf2
69554--- /dev/null
69555+++ b/include/linux/gralloc.h
69556@@ -0,0 +1,9 @@
69557+#ifndef __GRALLOC_H
69558+#define __GRALLOC_H
69559+
69560+void acl_free_all(void);
69561+int acl_alloc_stack_init(unsigned long size);
69562+void *acl_alloc(unsigned long len);
69563+void *acl_alloc_num(unsigned long num, unsigned long len);
69564+
69565+#endif
69566diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
69567new file mode 100644
69568index 0000000..be66033
69569--- /dev/null
69570+++ b/include/linux/grdefs.h
69571@@ -0,0 +1,140 @@
69572+#ifndef GRDEFS_H
69573+#define GRDEFS_H
69574+
69575+/* Begin grsecurity status declarations */
69576+
69577+enum {
69578+ GR_READY = 0x01,
69579+ GR_STATUS_INIT = 0x00 // disabled state
69580+};
69581+
69582+/* Begin ACL declarations */
69583+
69584+/* Role flags */
69585+
69586+enum {
69587+ GR_ROLE_USER = 0x0001,
69588+ GR_ROLE_GROUP = 0x0002,
69589+ GR_ROLE_DEFAULT = 0x0004,
69590+ GR_ROLE_SPECIAL = 0x0008,
69591+ GR_ROLE_AUTH = 0x0010,
69592+ GR_ROLE_NOPW = 0x0020,
69593+ GR_ROLE_GOD = 0x0040,
69594+ GR_ROLE_LEARN = 0x0080,
69595+ GR_ROLE_TPE = 0x0100,
69596+ GR_ROLE_DOMAIN = 0x0200,
69597+ GR_ROLE_PAM = 0x0400,
69598+ GR_ROLE_PERSIST = 0x0800
69599+};
69600+
69601+/* ACL Subject and Object mode flags */
69602+enum {
69603+ GR_DELETED = 0x80000000
69604+};
69605+
69606+/* ACL Object-only mode flags */
69607+enum {
69608+ GR_READ = 0x00000001,
69609+ GR_APPEND = 0x00000002,
69610+ GR_WRITE = 0x00000004,
69611+ GR_EXEC = 0x00000008,
69612+ GR_FIND = 0x00000010,
69613+ GR_INHERIT = 0x00000020,
69614+ GR_SETID = 0x00000040,
69615+ GR_CREATE = 0x00000080,
69616+ GR_DELETE = 0x00000100,
69617+ GR_LINK = 0x00000200,
69618+ GR_AUDIT_READ = 0x00000400,
69619+ GR_AUDIT_APPEND = 0x00000800,
69620+ GR_AUDIT_WRITE = 0x00001000,
69621+ GR_AUDIT_EXEC = 0x00002000,
69622+ GR_AUDIT_FIND = 0x00004000,
69623+ GR_AUDIT_INHERIT= 0x00008000,
69624+ GR_AUDIT_SETID = 0x00010000,
69625+ GR_AUDIT_CREATE = 0x00020000,
69626+ GR_AUDIT_DELETE = 0x00040000,
69627+ GR_AUDIT_LINK = 0x00080000,
69628+ GR_PTRACERD = 0x00100000,
69629+ GR_NOPTRACE = 0x00200000,
69630+ GR_SUPPRESS = 0x00400000,
69631+ GR_NOLEARN = 0x00800000,
69632+ GR_INIT_TRANSFER= 0x01000000
69633+};
69634+
69635+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
69636+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
69637+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
69638+
69639+/* ACL subject-only mode flags */
69640+enum {
69641+ GR_KILL = 0x00000001,
69642+ GR_VIEW = 0x00000002,
69643+ GR_PROTECTED = 0x00000004,
69644+ GR_LEARN = 0x00000008,
69645+ GR_OVERRIDE = 0x00000010,
69646+ /* just a placeholder, this mode is only used in userspace */
69647+ GR_DUMMY = 0x00000020,
69648+ GR_PROTSHM = 0x00000040,
69649+ GR_KILLPROC = 0x00000080,
69650+ GR_KILLIPPROC = 0x00000100,
69651+ /* just a placeholder, this mode is only used in userspace */
69652+ GR_NOTROJAN = 0x00000200,
69653+ GR_PROTPROCFD = 0x00000400,
69654+ GR_PROCACCT = 0x00000800,
69655+ GR_RELAXPTRACE = 0x00001000,
69656+ //GR_NESTED = 0x00002000,
69657+ GR_INHERITLEARN = 0x00004000,
69658+ GR_PROCFIND = 0x00008000,
69659+ GR_POVERRIDE = 0x00010000,
69660+ GR_KERNELAUTH = 0x00020000,
69661+ GR_ATSECURE = 0x00040000,
69662+ GR_SHMEXEC = 0x00080000
69663+};
69664+
69665+enum {
69666+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
69667+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
69668+ GR_PAX_ENABLE_MPROTECT = 0x0004,
69669+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
69670+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
69671+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
69672+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
69673+ GR_PAX_DISABLE_MPROTECT = 0x0400,
69674+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
69675+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
69676+};
69677+
69678+enum {
69679+ GR_ID_USER = 0x01,
69680+ GR_ID_GROUP = 0x02,
69681+};
69682+
69683+enum {
69684+ GR_ID_ALLOW = 0x01,
69685+ GR_ID_DENY = 0x02,
69686+};
69687+
69688+#define GR_CRASH_RES 31
69689+#define GR_UIDTABLE_MAX 500
69690+
69691+/* begin resource learning section */
69692+enum {
69693+ GR_RLIM_CPU_BUMP = 60,
69694+ GR_RLIM_FSIZE_BUMP = 50000,
69695+ GR_RLIM_DATA_BUMP = 10000,
69696+ GR_RLIM_STACK_BUMP = 1000,
69697+ GR_RLIM_CORE_BUMP = 10000,
69698+ GR_RLIM_RSS_BUMP = 500000,
69699+ GR_RLIM_NPROC_BUMP = 1,
69700+ GR_RLIM_NOFILE_BUMP = 5,
69701+ GR_RLIM_MEMLOCK_BUMP = 50000,
69702+ GR_RLIM_AS_BUMP = 500000,
69703+ GR_RLIM_LOCKS_BUMP = 2,
69704+ GR_RLIM_SIGPENDING_BUMP = 5,
69705+ GR_RLIM_MSGQUEUE_BUMP = 10000,
69706+ GR_RLIM_NICE_BUMP = 1,
69707+ GR_RLIM_RTPRIO_BUMP = 1,
69708+ GR_RLIM_RTTIME_BUMP = 1000000
69709+};
69710+
69711+#endif
69712diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
69713new file mode 100644
69714index 0000000..9bb6662
69715--- /dev/null
69716+++ b/include/linux/grinternal.h
69717@@ -0,0 +1,215 @@
69718+#ifndef __GRINTERNAL_H
69719+#define __GRINTERNAL_H
69720+
69721+#ifdef CONFIG_GRKERNSEC
69722+
69723+#include <linux/fs.h>
69724+#include <linux/mnt_namespace.h>
69725+#include <linux/nsproxy.h>
69726+#include <linux/gracl.h>
69727+#include <linux/grdefs.h>
69728+#include <linux/grmsg.h>
69729+
69730+void gr_add_learn_entry(const char *fmt, ...)
69731+ __attribute__ ((format (printf, 1, 2)));
69732+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
69733+ const struct vfsmount *mnt);
69734+__u32 gr_check_create(const struct dentry *new_dentry,
69735+ const struct dentry *parent,
69736+ const struct vfsmount *mnt, const __u32 mode);
69737+int gr_check_protected_task(const struct task_struct *task);
69738+__u32 to_gr_audit(const __u32 reqmode);
69739+int gr_set_acls(const int type);
69740+int gr_apply_subject_to_task(struct task_struct *task);
69741+int gr_acl_is_enabled(void);
69742+char gr_roletype_to_char(void);
69743+
69744+void gr_handle_alertkill(struct task_struct *task);
69745+char *gr_to_filename(const struct dentry *dentry,
69746+ const struct vfsmount *mnt);
69747+char *gr_to_filename1(const struct dentry *dentry,
69748+ const struct vfsmount *mnt);
69749+char *gr_to_filename2(const struct dentry *dentry,
69750+ const struct vfsmount *mnt);
69751+char *gr_to_filename3(const struct dentry *dentry,
69752+ const struct vfsmount *mnt);
69753+
69754+extern int grsec_enable_ptrace_readexec;
69755+extern int grsec_enable_harden_ptrace;
69756+extern int grsec_enable_link;
69757+extern int grsec_enable_fifo;
69758+extern int grsec_enable_execve;
69759+extern int grsec_enable_shm;
69760+extern int grsec_enable_execlog;
69761+extern int grsec_enable_signal;
69762+extern int grsec_enable_audit_ptrace;
69763+extern int grsec_enable_forkfail;
69764+extern int grsec_enable_time;
69765+extern int grsec_enable_rofs;
69766+extern int grsec_enable_chroot_shmat;
69767+extern int grsec_enable_chroot_mount;
69768+extern int grsec_enable_chroot_double;
69769+extern int grsec_enable_chroot_pivot;
69770+extern int grsec_enable_chroot_chdir;
69771+extern int grsec_enable_chroot_chmod;
69772+extern int grsec_enable_chroot_mknod;
69773+extern int grsec_enable_chroot_fchdir;
69774+extern int grsec_enable_chroot_nice;
69775+extern int grsec_enable_chroot_execlog;
69776+extern int grsec_enable_chroot_caps;
69777+extern int grsec_enable_chroot_sysctl;
69778+extern int grsec_enable_chroot_unix;
69779+extern int grsec_enable_symlinkown;
69780+extern kgid_t grsec_symlinkown_gid;
69781+extern int grsec_enable_tpe;
69782+extern kgid_t grsec_tpe_gid;
69783+extern int grsec_enable_tpe_all;
69784+extern int grsec_enable_tpe_invert;
69785+extern int grsec_enable_socket_all;
69786+extern kgid_t grsec_socket_all_gid;
69787+extern int grsec_enable_socket_client;
69788+extern kgid_t grsec_socket_client_gid;
69789+extern int grsec_enable_socket_server;
69790+extern kgid_t grsec_socket_server_gid;
69791+extern kgid_t grsec_audit_gid;
69792+extern int grsec_enable_group;
69793+extern int grsec_enable_audit_textrel;
69794+extern int grsec_enable_log_rwxmaps;
69795+extern int grsec_enable_mount;
69796+extern int grsec_enable_chdir;
69797+extern int grsec_resource_logging;
69798+extern int grsec_enable_blackhole;
69799+extern int grsec_lastack_retries;
69800+extern int grsec_enable_brute;
69801+extern int grsec_lock;
69802+
69803+extern spinlock_t grsec_alert_lock;
69804+extern unsigned long grsec_alert_wtime;
69805+extern unsigned long grsec_alert_fyet;
69806+
69807+extern spinlock_t grsec_audit_lock;
69808+
69809+extern rwlock_t grsec_exec_file_lock;
69810+
69811+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
69812+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
69813+ (tsk)->exec_file->f_vfsmnt) : "/")
69814+
69815+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
69816+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
69817+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
69818+
69819+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
69820+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
69821+ (tsk)->exec_file->f_vfsmnt) : "/")
69822+
69823+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
69824+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
69825+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
69826+
69827+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
69828+
69829+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
69830+
69831+#define GR_CHROOT_CAPS {{ \
69832+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
69833+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
69834+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
69835+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
69836+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
69837+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
69838+ CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
69839+
69840+#define security_learn(normal_msg,args...) \
69841+({ \
69842+ read_lock(&grsec_exec_file_lock); \
69843+ gr_add_learn_entry(normal_msg "\n", ## args); \
69844+ read_unlock(&grsec_exec_file_lock); \
69845+})
69846+
69847+enum {
69848+ GR_DO_AUDIT,
69849+ GR_DONT_AUDIT,
69850+ /* used for non-audit messages that we shouldn't kill the task on */
69851+ GR_DONT_AUDIT_GOOD
69852+};
69853+
69854+enum {
69855+ GR_TTYSNIFF,
69856+ GR_RBAC,
69857+ GR_RBAC_STR,
69858+ GR_STR_RBAC,
69859+ GR_RBAC_MODE2,
69860+ GR_RBAC_MODE3,
69861+ GR_FILENAME,
69862+ GR_SYSCTL_HIDDEN,
69863+ GR_NOARGS,
69864+ GR_ONE_INT,
69865+ GR_ONE_INT_TWO_STR,
69866+ GR_ONE_STR,
69867+ GR_STR_INT,
69868+ GR_TWO_STR_INT,
69869+ GR_TWO_INT,
69870+ GR_TWO_U64,
69871+ GR_THREE_INT,
69872+ GR_FIVE_INT_TWO_STR,
69873+ GR_TWO_STR,
69874+ GR_THREE_STR,
69875+ GR_FOUR_STR,
69876+ GR_STR_FILENAME,
69877+ GR_FILENAME_STR,
69878+ GR_FILENAME_TWO_INT,
69879+ GR_FILENAME_TWO_INT_STR,
69880+ GR_TEXTREL,
69881+ GR_PTRACE,
69882+ GR_RESOURCE,
69883+ GR_CAP,
69884+ GR_SIG,
69885+ GR_SIG2,
69886+ GR_CRASH1,
69887+ GR_CRASH2,
69888+ GR_PSACCT,
69889+ GR_RWXMAP
69890+};
69891+
69892+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
69893+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
69894+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
69895+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
69896+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
69897+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
69898+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
69899+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
69900+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
69901+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
69902+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
69903+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
69904+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
69905+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
69906+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
69907+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
69908+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
69909+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
69910+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
69911+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
69912+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
69913+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
69914+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
69915+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
69916+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
69917+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
69918+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
69919+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
69920+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
69921+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
69922+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
69923+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
69924+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
69925+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
69926+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
69927+
69928+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
69929+
69930+#endif
69931+
69932+#endif
69933diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
69934new file mode 100644
69935index 0000000..2bd4c8d
69936--- /dev/null
69937+++ b/include/linux/grmsg.h
69938@@ -0,0 +1,111 @@
69939+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
69940+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
69941+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
69942+#define GR_STOPMOD_MSG "denied modification of module state by "
69943+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
69944+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
69945+#define GR_IOPERM_MSG "denied use of ioperm() by "
69946+#define GR_IOPL_MSG "denied use of iopl() by "
69947+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
69948+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
69949+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
69950+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
69951+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
69952+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
69953+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
69954+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
69955+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
69956+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
69957+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
69958+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
69959+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
69960+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
69961+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
69962+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
69963+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
69964+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
69965+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
69966+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
69967+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
69968+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
69969+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
69970+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
69971+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
69972+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
69973+#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
69974+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
69975+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
69976+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
69977+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
69978+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
69979+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
69980+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
69981+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
69982+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
69983+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
69984+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
69985+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
69986+#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
69987+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
69988+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
69989+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
69990+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
69991+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
69992+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
69993+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
69994+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
69995+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
69996+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
69997+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
69998+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
69999+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
70000+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
70001+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
70002+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
70003+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
70004+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
70005+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
70006+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
70007+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
70008+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
70009+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
70010+#define GR_FAILFORK_MSG "failed fork with errno %s by "
70011+#define GR_NICE_CHROOT_MSG "denied priority change by "
70012+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
70013+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
70014+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
70015+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
70016+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
70017+#define GR_TIME_MSG "time set by "
70018+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
70019+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
70020+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
70021+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
70022+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
70023+#define GR_BIND_MSG "denied bind() by "
70024+#define GR_CONNECT_MSG "denied connect() by "
70025+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
70026+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
70027+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
70028+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
70029+#define GR_CAP_ACL_MSG "use of %s denied for "
70030+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
70031+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
70032+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
70033+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
70034+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
70035+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
70036+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
70037+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
70038+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
70039+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
70040+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
70041+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
70042+#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
70043+#define GR_VM86_MSG "denied use of vm86 by "
70044+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
70045+#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
70046+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
70047+#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
70048+#define GR_SYMLINKOWNER_MSG "denied following symlink %.950s since symlink owner %u does not match target owner %u, by "
70049+#define GR_BRUTE_DAEMON_MSG "bruteforce prevention initiated for the next 30 minutes or until service restarted, stalling each fork 30 seconds. Please investigate the crash report for "
70050diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
70051new file mode 100644
70052index 0000000..8da63a4
70053--- /dev/null
70054+++ b/include/linux/grsecurity.h
70055@@ -0,0 +1,242 @@
70056+#ifndef GR_SECURITY_H
70057+#define GR_SECURITY_H
70058+#include <linux/fs.h>
70059+#include <linux/fs_struct.h>
70060+#include <linux/binfmts.h>
70061+#include <linux/gracl.h>
70062+
70063+/* notify of brain-dead configs */
70064+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
70065+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
70066+#endif
70067+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
70068+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
70069+#endif
70070+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
70071+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
70072+#endif
70073+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
70074+#error "CONFIG_PAX enabled, but no PaX options are enabled."
70075+#endif
70076+
70077+void gr_handle_brute_attach(unsigned long mm_flags);
70078+void gr_handle_brute_check(void);
70079+void gr_handle_kernel_exploit(void);
70080+int gr_process_user_ban(void);
70081+
70082+char gr_roletype_to_char(void);
70083+
70084+int gr_acl_enable_at_secure(void);
70085+
70086+int gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs);
70087+int gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs);
70088+
70089+void gr_del_task_from_ip_table(struct task_struct *p);
70090+
70091+int gr_pid_is_chrooted(struct task_struct *p);
70092+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
70093+int gr_handle_chroot_nice(void);
70094+int gr_handle_chroot_sysctl(const int op);
70095+int gr_handle_chroot_setpriority(struct task_struct *p,
70096+ const int niceval);
70097+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
70098+int gr_handle_chroot_chroot(const struct dentry *dentry,
70099+ const struct vfsmount *mnt);
70100+void gr_handle_chroot_chdir(struct path *path);
70101+int gr_handle_chroot_chmod(const struct dentry *dentry,
70102+ const struct vfsmount *mnt, const int mode);
70103+int gr_handle_chroot_mknod(const struct dentry *dentry,
70104+ const struct vfsmount *mnt, const int mode);
70105+int gr_handle_chroot_mount(const struct dentry *dentry,
70106+ const struct vfsmount *mnt,
70107+ const char *dev_name);
70108+int gr_handle_chroot_pivot(void);
70109+int gr_handle_chroot_unix(const pid_t pid);
70110+
70111+int gr_handle_rawio(const struct inode *inode);
70112+
70113+void gr_handle_ioperm(void);
70114+void gr_handle_iopl(void);
70115+
70116+umode_t gr_acl_umask(void);
70117+
70118+int gr_tpe_allow(const struct file *file);
70119+
70120+void gr_set_chroot_entries(struct task_struct *task, struct path *path);
70121+void gr_clear_chroot_entries(struct task_struct *task);
70122+
70123+void gr_log_forkfail(const int retval);
70124+void gr_log_timechange(void);
70125+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
70126+void gr_log_chdir(const struct dentry *dentry,
70127+ const struct vfsmount *mnt);
70128+void gr_log_chroot_exec(const struct dentry *dentry,
70129+ const struct vfsmount *mnt);
70130+void gr_log_remount(const char *devname, const int retval);
70131+void gr_log_unmount(const char *devname, const int retval);
70132+void gr_log_mount(const char *from, const char *to, const int retval);
70133+void gr_log_textrel(struct vm_area_struct *vma);
70134+void gr_log_rwxmmap(struct file *file);
70135+void gr_log_rwxmprotect(struct file *file);
70136+
70137+int gr_handle_follow_link(const struct inode *parent,
70138+ const struct inode *inode,
70139+ const struct dentry *dentry,
70140+ const struct vfsmount *mnt);
70141+int gr_handle_fifo(const struct dentry *dentry,
70142+ const struct vfsmount *mnt,
70143+ const struct dentry *dir, const int flag,
70144+ const int acc_mode);
70145+int gr_handle_hardlink(const struct dentry *dentry,
70146+ const struct vfsmount *mnt,
70147+ struct inode *inode,
70148+ const int mode, const struct filename *to);
70149+
70150+int gr_is_capable(const int cap);
70151+int gr_is_capable_nolog(const int cap);
70152+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
70153+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap);
70154+
70155+void gr_copy_label(struct task_struct *tsk);
70156+void gr_handle_crash(struct task_struct *task, const int sig);
70157+int gr_handle_signal(const struct task_struct *p, const int sig);
70158+int gr_check_crash_uid(const kuid_t uid);
70159+int gr_check_protected_task(const struct task_struct *task);
70160+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
70161+int gr_acl_handle_mmap(const struct file *file,
70162+ const unsigned long prot);
70163+int gr_acl_handle_mprotect(const struct file *file,
70164+ const unsigned long prot);
70165+int gr_check_hidden_task(const struct task_struct *tsk);
70166+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
70167+ const struct vfsmount *mnt);
70168+__u32 gr_acl_handle_utime(const struct dentry *dentry,
70169+ const struct vfsmount *mnt);
70170+__u32 gr_acl_handle_access(const struct dentry *dentry,
70171+ const struct vfsmount *mnt, const int fmode);
70172+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
70173+ const struct vfsmount *mnt, umode_t *mode);
70174+__u32 gr_acl_handle_chown(const struct dentry *dentry,
70175+ const struct vfsmount *mnt);
70176+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
70177+ const struct vfsmount *mnt);
70178+int gr_handle_ptrace(struct task_struct *task, const long request);
70179+int gr_handle_proc_ptrace(struct task_struct *task);
70180+__u32 gr_acl_handle_execve(const struct dentry *dentry,
70181+ const struct vfsmount *mnt);
70182+int gr_check_crash_exec(const struct file *filp);
70183+int gr_acl_is_enabled(void);
70184+void gr_set_kernel_label(struct task_struct *task);
70185+void gr_set_role_label(struct task_struct *task, const kuid_t uid,
70186+ const kgid_t gid);
70187+int gr_set_proc_label(const struct dentry *dentry,
70188+ const struct vfsmount *mnt,
70189+ const int unsafe_flags);
70190+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
70191+ const struct vfsmount *mnt);
70192+__u32 gr_acl_handle_open(const struct dentry *dentry,
70193+ const struct vfsmount *mnt, int acc_mode);
70194+__u32 gr_acl_handle_creat(const struct dentry *dentry,
70195+ const struct dentry *p_dentry,
70196+ const struct vfsmount *p_mnt,
70197+ int open_flags, int acc_mode, const int imode);
70198+void gr_handle_create(const struct dentry *dentry,
70199+ const struct vfsmount *mnt);
70200+void gr_handle_proc_create(const struct dentry *dentry,
70201+ const struct inode *inode);
70202+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
70203+ const struct dentry *parent_dentry,
70204+ const struct vfsmount *parent_mnt,
70205+ const int mode);
70206+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
70207+ const struct dentry *parent_dentry,
70208+ const struct vfsmount *parent_mnt);
70209+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
70210+ const struct vfsmount *mnt);
70211+void gr_handle_delete(const ino_t ino, const dev_t dev);
70212+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
70213+ const struct vfsmount *mnt);
70214+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
70215+ const struct dentry *parent_dentry,
70216+ const struct vfsmount *parent_mnt,
70217+ const struct filename *from);
70218+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
70219+ const struct dentry *parent_dentry,
70220+ const struct vfsmount *parent_mnt,
70221+ const struct dentry *old_dentry,
70222+ const struct vfsmount *old_mnt, const struct filename *to);
70223+int gr_handle_symlink_owner(const struct path *link, const struct inode *target);
70224+int gr_acl_handle_rename(struct dentry *new_dentry,
70225+ struct dentry *parent_dentry,
70226+ const struct vfsmount *parent_mnt,
70227+ struct dentry *old_dentry,
70228+ struct inode *old_parent_inode,
70229+ struct vfsmount *old_mnt, const struct filename *newname);
70230+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
70231+ struct dentry *old_dentry,
70232+ struct dentry *new_dentry,
70233+ struct vfsmount *mnt, const __u8 replace);
70234+__u32 gr_check_link(const struct dentry *new_dentry,
70235+ const struct dentry *parent_dentry,
70236+ const struct vfsmount *parent_mnt,
70237+ const struct dentry *old_dentry,
70238+ const struct vfsmount *old_mnt);
70239+int gr_acl_handle_filldir(const struct file *file, const char *name,
70240+ const unsigned int namelen, const ino_t ino);
70241+
70242+__u32 gr_acl_handle_unix(const struct dentry *dentry,
70243+ const struct vfsmount *mnt);
70244+void gr_acl_handle_exit(void);
70245+void gr_acl_handle_psacct(struct task_struct *task, const long code);
70246+int gr_acl_handle_procpidmem(const struct task_struct *task);
70247+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
70248+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
70249+void gr_audit_ptrace(struct task_struct *task);
70250+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
70251+void gr_put_exec_file(struct task_struct *task);
70252+
70253+int gr_ptrace_readexec(struct file *file, int unsafe_flags);
70254+
70255+#if defined(CONFIG_GRKERNSEC) && (defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC))
70256+extern void gr_learn_resource(const struct task_struct *task, const int res,
70257+ const unsigned long wanted, const int gt);
70258+#else
70259+static inline void gr_learn_resource(const struct task_struct *task, const int res,
70260+ const unsigned long wanted, const int gt)
70261+{
70262+}
70263+#endif
70264+
70265+#ifdef CONFIG_GRKERNSEC_RESLOG
70266+extern void gr_log_resource(const struct task_struct *task, const int res,
70267+ const unsigned long wanted, const int gt);
70268+#else
70269+static inline void gr_log_resource(const struct task_struct *task, const int res,
70270+ const unsigned long wanted, const int gt)
70271+{
70272+}
70273+#endif
70274+
70275+#ifdef CONFIG_GRKERNSEC
70276+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
70277+void gr_handle_vm86(void);
70278+void gr_handle_mem_readwrite(u64 from, u64 to);
70279+
70280+void gr_log_badprocpid(const char *entry);
70281+
70282+extern int grsec_enable_dmesg;
70283+extern int grsec_disable_privio;
70284+
70285+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
70286+extern kgid_t grsec_proc_gid;
70287+#endif
70288+
70289+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
70290+extern int grsec_enable_chroot_findtask;
70291+#endif
70292+#ifdef CONFIG_GRKERNSEC_SETXID
70293+extern int grsec_enable_setxid;
70294+#endif
70295+#endif
70296+
70297+#endif
70298diff --git a/include/linux/grsock.h b/include/linux/grsock.h
70299new file mode 100644
70300index 0000000..e7ffaaf
70301--- /dev/null
70302+++ b/include/linux/grsock.h
70303@@ -0,0 +1,19 @@
70304+#ifndef __GRSOCK_H
70305+#define __GRSOCK_H
70306+
70307+extern void gr_attach_curr_ip(const struct sock *sk);
70308+extern int gr_handle_sock_all(const int family, const int type,
70309+ const int protocol);
70310+extern int gr_handle_sock_server(const struct sockaddr *sck);
70311+extern int gr_handle_sock_server_other(const struct sock *sck);
70312+extern int gr_handle_sock_client(const struct sockaddr *sck);
70313+extern int gr_search_connect(struct socket * sock,
70314+ struct sockaddr_in * addr);
70315+extern int gr_search_bind(struct socket * sock,
70316+ struct sockaddr_in * addr);
70317+extern int gr_search_listen(struct socket * sock);
70318+extern int gr_search_accept(struct socket * sock);
70319+extern int gr_search_socket(const int domain, const int type,
70320+ const int protocol);
70321+
70322+#endif
70323diff --git a/include/linux/highmem.h b/include/linux/highmem.h
70324index ef788b5..ac41b7b 100644
70325--- a/include/linux/highmem.h
70326+++ b/include/linux/highmem.h
70327@@ -189,6 +189,18 @@ static inline void clear_highpage(struct page *page)
70328 kunmap_atomic(kaddr);
70329 }
70330
70331+static inline void sanitize_highpage(struct page *page)
70332+{
70333+ void *kaddr;
70334+ unsigned long flags;
70335+
70336+ local_irq_save(flags);
70337+ kaddr = kmap_atomic(page);
70338+ clear_page(kaddr);
70339+ kunmap_atomic(kaddr);
70340+ local_irq_restore(flags);
70341+}
70342+
70343 static inline void zero_user_segments(struct page *page,
70344 unsigned start1, unsigned end1,
70345 unsigned start2, unsigned end2)
70346diff --git a/include/linux/hwmon-sysfs.h b/include/linux/hwmon-sysfs.h
70347index 1c7b89a..7f52502 100644
70348--- a/include/linux/hwmon-sysfs.h
70349+++ b/include/linux/hwmon-sysfs.h
70350@@ -25,7 +25,8 @@
70351 struct sensor_device_attribute{
70352 struct device_attribute dev_attr;
70353 int index;
70354-};
70355+} __do_const;
70356+typedef struct sensor_device_attribute __no_const sensor_device_attribute_no_const;
70357 #define to_sensor_dev_attr(_dev_attr) \
70358 container_of(_dev_attr, struct sensor_device_attribute, dev_attr)
70359
70360@@ -41,7 +42,7 @@ struct sensor_device_attribute_2 {
70361 struct device_attribute dev_attr;
70362 u8 index;
70363 u8 nr;
70364-};
70365+} __do_const;
70366 #define to_sensor_dev_attr_2(_dev_attr) \
70367 container_of(_dev_attr, struct sensor_device_attribute_2, dev_attr)
70368
70369diff --git a/include/linux/i2c.h b/include/linux/i2c.h
70370index d0c4db7..61b3577 100644
70371--- a/include/linux/i2c.h
70372+++ b/include/linux/i2c.h
70373@@ -369,6 +369,7 @@ struct i2c_algorithm {
70374 /* To determine what the adapter supports */
70375 u32 (*functionality) (struct i2c_adapter *);
70376 };
70377+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
70378
70379 /*
70380 * i2c_adapter is the structure used to identify a physical i2c bus along
70381diff --git a/include/linux/i2o.h b/include/linux/i2o.h
70382index d23c3c2..eb63c81 100644
70383--- a/include/linux/i2o.h
70384+++ b/include/linux/i2o.h
70385@@ -565,7 +565,7 @@ struct i2o_controller {
70386 struct i2o_device *exec; /* Executive */
70387 #if BITS_PER_LONG == 64
70388 spinlock_t context_list_lock; /* lock for context_list */
70389- atomic_t context_list_counter; /* needed for unique contexts */
70390+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
70391 struct list_head context_list; /* list of context id's
70392 and pointers */
70393 #endif
70394diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
70395index aff7ad8..3942bbd 100644
70396--- a/include/linux/if_pppox.h
70397+++ b/include/linux/if_pppox.h
70398@@ -76,7 +76,7 @@ struct pppox_proto {
70399 int (*ioctl)(struct socket *sock, unsigned int cmd,
70400 unsigned long arg);
70401 struct module *owner;
70402-};
70403+} __do_const;
70404
70405 extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp);
70406 extern void unregister_pppox_proto(int proto_num);
70407diff --git a/include/linux/init.h b/include/linux/init.h
70408index 10ed4f4..8e8490d 100644
70409--- a/include/linux/init.h
70410+++ b/include/linux/init.h
70411@@ -39,9 +39,36 @@
70412 * Also note, that this data cannot be "const".
70413 */
70414
70415+#ifdef MODULE
70416+#define add_init_latent_entropy
70417+#define add_devinit_latent_entropy
70418+#define add_cpuinit_latent_entropy
70419+#define add_meminit_latent_entropy
70420+#else
70421+#define add_init_latent_entropy __latent_entropy
70422+
70423+#ifdef CONFIG_HOTPLUG
70424+#define add_devinit_latent_entropy
70425+#else
70426+#define add_devinit_latent_entropy __latent_entropy
70427+#endif
70428+
70429+#ifdef CONFIG_HOTPLUG_CPU
70430+#define add_cpuinit_latent_entropy
70431+#else
70432+#define add_cpuinit_latent_entropy __latent_entropy
70433+#endif
70434+
70435+#ifdef CONFIG_MEMORY_HOTPLUG
70436+#define add_meminit_latent_entropy
70437+#else
70438+#define add_meminit_latent_entropy __latent_entropy
70439+#endif
70440+#endif
70441+
70442 /* These are for everybody (although not all archs will actually
70443 discard it in modules) */
70444-#define __init __section(.init.text) __cold notrace
70445+#define __init __section(.init.text) __cold notrace add_init_latent_entropy
70446 #define __initdata __section(.init.data)
70447 #define __initconst __constsection(.init.rodata)
70448 #define __exitdata __section(.exit.data)
70449@@ -94,7 +121,7 @@
70450 #define __exit __section(.exit.text) __exitused __cold notrace
70451
70452 /* Used for HOTPLUG_CPU */
70453-#define __cpuinit __section(.cpuinit.text) __cold notrace
70454+#define __cpuinit __section(.cpuinit.text) __cold notrace add_cpuinit_latent_entropy
70455 #define __cpuinitdata __section(.cpuinit.data)
70456 #define __cpuinitconst __constsection(.cpuinit.rodata)
70457 #define __cpuexit __section(.cpuexit.text) __exitused __cold notrace
70458@@ -102,7 +129,7 @@
70459 #define __cpuexitconst __constsection(.cpuexit.rodata)
70460
70461 /* Used for MEMORY_HOTPLUG */
70462-#define __meminit __section(.meminit.text) __cold notrace
70463+#define __meminit __section(.meminit.text) __cold notrace add_meminit_latent_entropy
70464 #define __meminitdata __section(.meminit.data)
70465 #define __meminitconst __constsection(.meminit.rodata)
70466 #define __memexit __section(.memexit.text) __exitused __cold notrace
70467diff --git a/include/linux/init_task.h b/include/linux/init_task.h
70468index 6d087c5..401cab8 100644
70469--- a/include/linux/init_task.h
70470+++ b/include/linux/init_task.h
70471@@ -143,6 +143,12 @@ extern struct task_group root_task_group;
70472
70473 #define INIT_TASK_COMM "swapper"
70474
70475+#ifdef CONFIG_X86
70476+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
70477+#else
70478+#define INIT_TASK_THREAD_INFO
70479+#endif
70480+
70481 /*
70482 * INIT_TASK is used to set up the first task table, touch at
70483 * your own risk!. Base=0, limit=0x1fffff (=2MB)
70484@@ -182,6 +188,7 @@ extern struct task_group root_task_group;
70485 RCU_POINTER_INITIALIZER(cred, &init_cred), \
70486 .comm = INIT_TASK_COMM, \
70487 .thread = INIT_THREAD, \
70488+ INIT_TASK_THREAD_INFO \
70489 .fs = &init_fs, \
70490 .files = &init_files, \
70491 .signal = &init_signals, \
70492diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
70493index 5fa5afe..ac55b25 100644
70494--- a/include/linux/interrupt.h
70495+++ b/include/linux/interrupt.h
70496@@ -430,7 +430,7 @@ enum
70497 /* map softirq index to softirq name. update 'softirq_to_name' in
70498 * kernel/softirq.c when adding a new softirq.
70499 */
70500-extern char *softirq_to_name[NR_SOFTIRQS];
70501+extern const char * const softirq_to_name[NR_SOFTIRQS];
70502
70503 /* softirq mask and active fields moved to irq_cpustat_t in
70504 * asm/hardirq.h to get better cache usage. KAO
70505@@ -438,12 +438,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
70506
70507 struct softirq_action
70508 {
70509- void (*action)(struct softirq_action *);
70510-};
70511+ void (*action)(void);
70512+} __no_const;
70513
70514 asmlinkage void do_softirq(void);
70515 asmlinkage void __do_softirq(void);
70516-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
70517+extern void open_softirq(int nr, void (*action)(void));
70518 extern void softirq_init(void);
70519 extern void __raise_softirq_irqoff(unsigned int nr);
70520
70521diff --git a/include/linux/iommu.h b/include/linux/iommu.h
70522index f3b99e1..9b73cee 100644
70523--- a/include/linux/iommu.h
70524+++ b/include/linux/iommu.h
70525@@ -101,7 +101,7 @@ struct iommu_ops {
70526 int (*domain_set_attr)(struct iommu_domain *domain,
70527 enum iommu_attr attr, void *data);
70528 unsigned long pgsize_bitmap;
70529-};
70530+} __do_const;
70531
70532 #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
70533 #define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
70534diff --git a/include/linux/ioport.h b/include/linux/ioport.h
70535index 85ac9b9b..e5759ab 100644
70536--- a/include/linux/ioport.h
70537+++ b/include/linux/ioport.h
70538@@ -161,7 +161,7 @@ struct resource *lookup_resource(struct resource *root, resource_size_t start);
70539 int adjust_resource(struct resource *res, resource_size_t start,
70540 resource_size_t size);
70541 resource_size_t resource_alignment(struct resource *res);
70542-static inline resource_size_t resource_size(const struct resource *res)
70543+static inline resource_size_t __intentional_overflow(-1) resource_size(const struct resource *res)
70544 {
70545 return res->end - res->start + 1;
70546 }
70547diff --git a/include/linux/irq.h b/include/linux/irq.h
70548index fdf2c4a..5332486 100644
70549--- a/include/linux/irq.h
70550+++ b/include/linux/irq.h
70551@@ -328,7 +328,8 @@ struct irq_chip {
70552 void (*irq_print_chip)(struct irq_data *data, struct seq_file *p);
70553
70554 unsigned long flags;
70555-};
70556+} __do_const;
70557+typedef struct irq_chip __no_const irq_chip_no_const;
70558
70559 /*
70560 * irq_chip specific flags
70561diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
70562index 6883e19..06992b1 100644
70563--- a/include/linux/kallsyms.h
70564+++ b/include/linux/kallsyms.h
70565@@ -15,7 +15,8 @@
70566
70567 struct module;
70568
70569-#ifdef CONFIG_KALLSYMS
70570+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
70571+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
70572 /* Lookup the address for a symbol. Returns 0 if not found. */
70573 unsigned long kallsyms_lookup_name(const char *name);
70574
70575@@ -106,6 +107,17 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
70576 /* Stupid that this does nothing, but I didn't create this mess. */
70577 #define __print_symbol(fmt, addr)
70578 #endif /*CONFIG_KALLSYMS*/
70579+#else /* when included by kallsyms.c, vsnprintf.c, or
70580+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
70581+extern void __print_symbol(const char *fmt, unsigned long address);
70582+extern int sprint_backtrace(char *buffer, unsigned long address);
70583+extern int sprint_symbol(char *buffer, unsigned long address);
70584+extern int sprint_symbol_no_offset(char *buffer, unsigned long address);
70585+const char *kallsyms_lookup(unsigned long addr,
70586+ unsigned long *symbolsize,
70587+ unsigned long *offset,
70588+ char **modname, char *namebuf);
70589+#endif
70590
70591 /* This macro allows us to keep printk typechecking */
70592 static __printf(1, 2)
70593diff --git a/include/linux/key-type.h b/include/linux/key-type.h
70594index 518a53a..5e28358 100644
70595--- a/include/linux/key-type.h
70596+++ b/include/linux/key-type.h
70597@@ -125,7 +125,7 @@ struct key_type {
70598 /* internal fields */
70599 struct list_head link; /* link in types list */
70600 struct lock_class_key lock_class; /* key->sem lock class */
70601-};
70602+} __do_const;
70603
70604 extern struct key_type key_type_keyring;
70605
70606diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
70607index 4dff0c6..1ca9b72 100644
70608--- a/include/linux/kgdb.h
70609+++ b/include/linux/kgdb.h
70610@@ -53,7 +53,7 @@ extern int kgdb_connected;
70611 extern int kgdb_io_module_registered;
70612
70613 extern atomic_t kgdb_setting_breakpoint;
70614-extern atomic_t kgdb_cpu_doing_single_step;
70615+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
70616
70617 extern struct task_struct *kgdb_usethread;
70618 extern struct task_struct *kgdb_contthread;
70619@@ -255,7 +255,7 @@ struct kgdb_arch {
70620 void (*correct_hw_break)(void);
70621
70622 void (*enable_nmi)(bool on);
70623-};
70624+} __do_const;
70625
70626 /**
70627 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
70628@@ -280,7 +280,7 @@ struct kgdb_io {
70629 void (*pre_exception) (void);
70630 void (*post_exception) (void);
70631 int is_console;
70632-};
70633+} __do_const;
70634
70635 extern struct kgdb_arch arch_kgdb_ops;
70636
70637diff --git a/include/linux/kmod.h b/include/linux/kmod.h
70638index 5398d58..5883a34 100644
70639--- a/include/linux/kmod.h
70640+++ b/include/linux/kmod.h
70641@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
70642 * usually useless though. */
70643 extern __printf(2, 3)
70644 int __request_module(bool wait, const char *name, ...);
70645+extern __printf(3, 4)
70646+int ___request_module(bool wait, char *param_name, const char *name, ...);
70647 #define request_module(mod...) __request_module(true, mod)
70648 #define request_module_nowait(mod...) __request_module(false, mod)
70649 #define try_then_request_module(x, mod...) \
70650diff --git a/include/linux/kobject.h b/include/linux/kobject.h
70651index 939b112..ed6ed51 100644
70652--- a/include/linux/kobject.h
70653+++ b/include/linux/kobject.h
70654@@ -111,7 +111,7 @@ struct kobj_type {
70655 struct attribute **default_attrs;
70656 const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
70657 const void *(*namespace)(struct kobject *kobj);
70658-};
70659+} __do_const;
70660
70661 struct kobj_uevent_env {
70662 char *envp[UEVENT_NUM_ENVP];
70663@@ -134,6 +134,7 @@ struct kobj_attribute {
70664 ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
70665 const char *buf, size_t count);
70666 };
70667+typedef struct kobj_attribute __no_const kobj_attribute_no_const;
70668
70669 extern const struct sysfs_ops kobj_sysfs_ops;
70670
70671diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h
70672index f66b065..c2c29b4 100644
70673--- a/include/linux/kobject_ns.h
70674+++ b/include/linux/kobject_ns.h
70675@@ -43,7 +43,7 @@ struct kobj_ns_type_operations {
70676 const void *(*netlink_ns)(struct sock *sk);
70677 const void *(*initial_ns)(void);
70678 void (*drop_ns)(void *);
70679-};
70680+} __do_const;
70681
70682 int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
70683 int kobj_ns_type_registered(enum kobj_ns_type type);
70684diff --git a/include/linux/kref.h b/include/linux/kref.h
70685index 4972e6e..de4d19b 100644
70686--- a/include/linux/kref.h
70687+++ b/include/linux/kref.h
70688@@ -64,7 +64,7 @@ static inline void kref_get(struct kref *kref)
70689 static inline int kref_sub(struct kref *kref, unsigned int count,
70690 void (*release)(struct kref *kref))
70691 {
70692- WARN_ON(release == NULL);
70693+ BUG_ON(release == NULL);
70694
70695 if (atomic_sub_and_test((int) count, &kref->refcount)) {
70696 release(kref);
70697diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
70698index ffdf8b7..1f91d0e 100644
70699--- a/include/linux/kvm_host.h
70700+++ b/include/linux/kvm_host.h
70701@@ -418,7 +418,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
70702 int __must_check vcpu_load(struct kvm_vcpu *vcpu);
70703 void vcpu_put(struct kvm_vcpu *vcpu);
70704
70705-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
70706+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
70707 struct module *module);
70708 void kvm_exit(void);
70709
70710@@ -574,7 +574,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
70711 struct kvm_guest_debug *dbg);
70712 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
70713
70714-int kvm_arch_init(void *opaque);
70715+int kvm_arch_init(const void *opaque);
70716 void kvm_arch_exit(void);
70717
70718 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
70719diff --git a/include/linux/libata.h b/include/linux/libata.h
70720index 0621bca..24d6851 100644
70721--- a/include/linux/libata.h
70722+++ b/include/linux/libata.h
70723@@ -916,7 +916,7 @@ struct ata_port_operations {
70724 * fields must be pointers.
70725 */
70726 const struct ata_port_operations *inherits;
70727-};
70728+} __do_const;
70729
70730 struct ata_port_info {
70731 unsigned long flags;
70732diff --git a/include/linux/list.h b/include/linux/list.h
70733index cc6d2aa..c10ee83 100644
70734--- a/include/linux/list.h
70735+++ b/include/linux/list.h
70736@@ -112,6 +112,19 @@ extern void __list_del_entry(struct list_head *entry);
70737 extern void list_del(struct list_head *entry);
70738 #endif
70739
70740+extern void __pax_list_add(struct list_head *new,
70741+ struct list_head *prev,
70742+ struct list_head *next);
70743+static inline void pax_list_add(struct list_head *new, struct list_head *head)
70744+{
70745+ __pax_list_add(new, head, head->next);
70746+}
70747+static inline void pax_list_add_tail(struct list_head *new, struct list_head *head)
70748+{
70749+ __pax_list_add(new, head->prev, head);
70750+}
70751+extern void pax_list_del(struct list_head *entry);
70752+
70753 /**
70754 * list_replace - replace old entry by new one
70755 * @old : the element to be replaced
70756@@ -145,6 +158,8 @@ static inline void list_del_init(struct list_head *entry)
70757 INIT_LIST_HEAD(entry);
70758 }
70759
70760+extern void pax_list_del_init(struct list_head *entry);
70761+
70762 /**
70763 * list_move - delete from one list and add as another's head
70764 * @list: the entry to move
70765diff --git a/include/linux/math64.h b/include/linux/math64.h
70766index b8ba855..0148090 100644
70767--- a/include/linux/math64.h
70768+++ b/include/linux/math64.h
70769@@ -14,7 +14,7 @@
70770 * This is commonly provided by 32bit archs to provide an optimized 64bit
70771 * divide.
70772 */
70773-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
70774+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
70775 {
70776 *remainder = dividend % divisor;
70777 return dividend / divisor;
70778@@ -50,7 +50,7 @@ static inline s64 div64_s64(s64 dividend, s64 divisor)
70779 #define div64_long(x,y) div_s64((x),(y))
70780
70781 #ifndef div_u64_rem
70782-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
70783+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
70784 {
70785 *remainder = do_div(dividend, divisor);
70786 return dividend;
70787@@ -79,7 +79,7 @@ extern s64 div64_s64(s64 dividend, s64 divisor);
70788 * divide.
70789 */
70790 #ifndef div_u64
70791-static inline u64 div_u64(u64 dividend, u32 divisor)
70792+static inline u64 __intentional_overflow(-1) div_u64(u64 dividend, u32 divisor)
70793 {
70794 u32 remainder;
70795 return div_u64_rem(dividend, divisor, &remainder);
70796diff --git a/include/linux/mm.h b/include/linux/mm.h
70797index 9568b90..6cc79f9 100644
70798--- a/include/linux/mm.h
70799+++ b/include/linux/mm.h
70800@@ -101,6 +101,11 @@ extern unsigned int kobjsize(const void *objp);
70801 #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
70802 #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */
70803 #define VM_ARCH_1 0x01000000 /* Architecture-specific flag */
70804+
70805+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
70806+#define VM_PAGEEXEC 0x02000000 /* vma->vm_page_prot needs special handling */
70807+#endif
70808+
70809 #define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */
70810
70811 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
70812@@ -200,8 +205,8 @@ struct vm_operations_struct {
70813 /* called by access_process_vm when get_user_pages() fails, typically
70814 * for use by special VMAs that can switch between memory and hardware
70815 */
70816- int (*access)(struct vm_area_struct *vma, unsigned long addr,
70817- void *buf, int len, int write);
70818+ ssize_t (*access)(struct vm_area_struct *vma, unsigned long addr,
70819+ void *buf, size_t len, int write);
70820 #ifdef CONFIG_NUMA
70821 /*
70822 * set_policy() op must add a reference to any non-NULL @new mempolicy
70823@@ -231,6 +236,7 @@ struct vm_operations_struct {
70824 int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr,
70825 unsigned long size, pgoff_t pgoff);
70826 };
70827+typedef struct vm_operations_struct __no_const vm_operations_struct_no_const;
70828
70829 struct mmu_gather;
70830 struct inode;
70831@@ -995,8 +1001,8 @@ int follow_pfn(struct vm_area_struct *vma, unsigned long address,
70832 unsigned long *pfn);
70833 int follow_phys(struct vm_area_struct *vma, unsigned long address,
70834 unsigned int flags, unsigned long *prot, resource_size_t *phys);
70835-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
70836- void *buf, int len, int write);
70837+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
70838+ void *buf, size_t len, int write);
70839
70840 static inline void unmap_shared_mapping_range(struct address_space *mapping,
70841 loff_t const holebegin, loff_t const holelen)
70842@@ -1035,10 +1041,10 @@ static inline int fixup_user_fault(struct task_struct *tsk,
70843 }
70844 #endif
70845
70846-extern int make_pages_present(unsigned long addr, unsigned long end);
70847-extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
70848-extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
70849- void *buf, int len, int write);
70850+extern ssize_t make_pages_present(unsigned long addr, unsigned long end);
70851+extern ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write);
70852+extern ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
70853+ void *buf, size_t len, int write);
70854
70855 int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
70856 unsigned long start, int len, unsigned int foll_flags,
70857@@ -1068,34 +1074,6 @@ int set_page_dirty(struct page *page);
70858 int set_page_dirty_lock(struct page *page);
70859 int clear_page_dirty_for_io(struct page *page);
70860
70861-/* Is the vma a continuation of the stack vma above it? */
70862-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
70863-{
70864- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
70865-}
70866-
70867-static inline int stack_guard_page_start(struct vm_area_struct *vma,
70868- unsigned long addr)
70869-{
70870- return (vma->vm_flags & VM_GROWSDOWN) &&
70871- (vma->vm_start == addr) &&
70872- !vma_growsdown(vma->vm_prev, addr);
70873-}
70874-
70875-/* Is the vma a continuation of the stack vma below it? */
70876-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
70877-{
70878- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
70879-}
70880-
70881-static inline int stack_guard_page_end(struct vm_area_struct *vma,
70882- unsigned long addr)
70883-{
70884- return (vma->vm_flags & VM_GROWSUP) &&
70885- (vma->vm_end == addr) &&
70886- !vma_growsup(vma->vm_next, addr);
70887-}
70888-
70889 extern pid_t
70890 vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group);
70891
70892@@ -1198,6 +1176,15 @@ static inline void sync_mm_rss(struct mm_struct *mm)
70893 }
70894 #endif
70895
70896+#ifdef CONFIG_MMU
70897+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
70898+#else
70899+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
70900+{
70901+ return __pgprot(0);
70902+}
70903+#endif
70904+
70905 int vma_wants_writenotify(struct vm_area_struct *vma);
70906
70907 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
70908@@ -1216,8 +1203,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
70909 {
70910 return 0;
70911 }
70912+
70913+static inline int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd,
70914+ unsigned long address)
70915+{
70916+ return 0;
70917+}
70918 #else
70919 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
70920+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
70921 #endif
70922
70923 #ifdef __PAGETABLE_PMD_FOLDED
70924@@ -1226,8 +1220,15 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
70925 {
70926 return 0;
70927 }
70928+
70929+static inline int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud,
70930+ unsigned long address)
70931+{
70932+ return 0;
70933+}
70934 #else
70935 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
70936+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address);
70937 #endif
70938
70939 int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
70940@@ -1245,11 +1246,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
70941 NULL: pud_offset(pgd, address);
70942 }
70943
70944+static inline pud_t *pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
70945+{
70946+ return (unlikely(pgd_none(*pgd)) && __pud_alloc_kernel(mm, pgd, address))?
70947+ NULL: pud_offset(pgd, address);
70948+}
70949+
70950 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
70951 {
70952 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
70953 NULL: pmd_offset(pud, address);
70954 }
70955+
70956+static inline pmd_t *pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
70957+{
70958+ return (unlikely(pud_none(*pud)) && __pmd_alloc_kernel(mm, pud, address))?
70959+ NULL: pmd_offset(pud, address);
70960+}
70961 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
70962
70963 #if USE_SPLIT_PTLOCKS
70964@@ -1479,6 +1492,7 @@ extern unsigned long do_mmap_pgoff(struct file *, unsigned long,
70965 unsigned long, unsigned long,
70966 unsigned long, unsigned long);
70967 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
70968+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
70969
70970 /* These take the mm semaphore themselves */
70971 extern unsigned long vm_brk(unsigned long, unsigned long);
70972@@ -1573,6 +1587,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
70973 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
70974 struct vm_area_struct **pprev);
70975
70976+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
70977+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
70978+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
70979+
70980 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
70981 NULL if none. Assume start_addr < end_addr. */
70982 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
70983@@ -1601,15 +1619,6 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
70984 return vma;
70985 }
70986
70987-#ifdef CONFIG_MMU
70988-pgprot_t vm_get_page_prot(unsigned long vm_flags);
70989-#else
70990-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
70991-{
70992- return __pgprot(0);
70993-}
70994-#endif
70995-
70996 #ifdef CONFIG_ARCH_USES_NUMA_PROT_NONE
70997 unsigned long change_prot_numa(struct vm_area_struct *vma,
70998 unsigned long start, unsigned long end);
70999@@ -1651,6 +1660,11 @@ void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
71000 static inline void vm_stat_account(struct mm_struct *mm,
71001 unsigned long flags, struct file *file, long pages)
71002 {
71003+
71004+#ifdef CONFIG_PAX_RANDMMAP
71005+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
71006+#endif
71007+
71008 mm->total_vm += pages;
71009 }
71010 #endif /* CONFIG_PROC_FS */
71011@@ -1723,7 +1737,7 @@ extern int unpoison_memory(unsigned long pfn);
71012 extern int sysctl_memory_failure_early_kill;
71013 extern int sysctl_memory_failure_recovery;
71014 extern void shake_page(struct page *p, int access);
71015-extern atomic_long_t mce_bad_pages;
71016+extern atomic_long_unchecked_t mce_bad_pages;
71017 extern int soft_offline_page(struct page *page, int flags);
71018
71019 extern void dump_page(struct page *page);
71020@@ -1754,5 +1768,11 @@ static inline unsigned int debug_guardpage_minorder(void) { return 0; }
71021 static inline bool page_is_guard(struct page *page) { return false; }
71022 #endif /* CONFIG_DEBUG_PAGEALLOC */
71023
71024+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
71025+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
71026+#else
71027+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
71028+#endif
71029+
71030 #endif /* __KERNEL__ */
71031 #endif /* _LINUX_MM_H */
71032diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
71033index f8f5162..3aaf20f 100644
71034--- a/include/linux/mm_types.h
71035+++ b/include/linux/mm_types.h
71036@@ -288,6 +288,8 @@ struct vm_area_struct {
71037 #ifdef CONFIG_NUMA
71038 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
71039 #endif
71040+
71041+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
71042 };
71043
71044 struct core_thread {
71045@@ -436,6 +438,24 @@ struct mm_struct {
71046 int first_nid;
71047 #endif
71048 struct uprobes_state uprobes_state;
71049+
71050+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
71051+ unsigned long pax_flags;
71052+#endif
71053+
71054+#ifdef CONFIG_PAX_DLRESOLVE
71055+ unsigned long call_dl_resolve;
71056+#endif
71057+
71058+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
71059+ unsigned long call_syscall;
71060+#endif
71061+
71062+#ifdef CONFIG_PAX_ASLR
71063+ unsigned long delta_mmap; /* randomized offset */
71064+ unsigned long delta_stack; /* randomized offset */
71065+#endif
71066+
71067 };
71068
71069 /* first nid will either be a valid NID or one of these values */
71070diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h
71071index c5d5278..f0b68c8 100644
71072--- a/include/linux/mmiotrace.h
71073+++ b/include/linux/mmiotrace.h
71074@@ -46,7 +46,7 @@ extern int kmmio_handler(struct pt_regs *regs, unsigned long addr);
71075 /* Called from ioremap.c */
71076 extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
71077 void __iomem *addr);
71078-extern void mmiotrace_iounmap(volatile void __iomem *addr);
71079+extern void mmiotrace_iounmap(const volatile void __iomem *addr);
71080
71081 /* For anyone to insert markers. Remember trailing newline. */
71082 extern __printf(1, 2) int mmiotrace_printk(const char *fmt, ...);
71083@@ -66,7 +66,7 @@ static inline void mmiotrace_ioremap(resource_size_t offset,
71084 {
71085 }
71086
71087-static inline void mmiotrace_iounmap(volatile void __iomem *addr)
71088+static inline void mmiotrace_iounmap(const volatile void __iomem *addr)
71089 {
71090 }
71091
71092diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
71093index 73b64a3..6562925 100644
71094--- a/include/linux/mmzone.h
71095+++ b/include/linux/mmzone.h
71096@@ -412,7 +412,7 @@ struct zone {
71097 unsigned long flags; /* zone flags, see below */
71098
71099 /* Zone statistics */
71100- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
71101+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
71102
71103 /*
71104 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
71105diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
71106index fed3def..c933f99 100644
71107--- a/include/linux/mod_devicetable.h
71108+++ b/include/linux/mod_devicetable.h
71109@@ -12,7 +12,7 @@
71110 typedef unsigned long kernel_ulong_t;
71111 #endif
71112
71113-#define PCI_ANY_ID (~0)
71114+#define PCI_ANY_ID ((__u16)~0)
71115
71116 struct pci_device_id {
71117 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
71118@@ -139,7 +139,7 @@ struct usb_device_id {
71119 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
71120 #define USB_DEVICE_ID_MATCH_INT_NUMBER 0x0400
71121
71122-#define HID_ANY_ID (~0)
71123+#define HID_ANY_ID (~0U)
71124 #define HID_BUS_ANY 0xffff
71125 #define HID_GROUP_ANY 0x0000
71126
71127@@ -498,7 +498,7 @@ struct dmi_system_id {
71128 const char *ident;
71129 struct dmi_strmatch matches[4];
71130 void *driver_data;
71131-};
71132+} __do_const;
71133 /*
71134 * struct dmi_device_id appears during expansion of
71135 * "MODULE_DEVICE_TABLE(dmi, x)". Compiler doesn't look inside it
71136diff --git a/include/linux/module.h b/include/linux/module.h
71137index 1375ee3..ced8177 100644
71138--- a/include/linux/module.h
71139+++ b/include/linux/module.h
71140@@ -17,9 +17,11 @@
71141 #include <linux/moduleparam.h>
71142 #include <linux/tracepoint.h>
71143 #include <linux/export.h>
71144+#include <linux/fs.h>
71145
71146 #include <linux/percpu.h>
71147 #include <asm/module.h>
71148+#include <asm/pgtable.h>
71149
71150 /* In stripped ARM and x86-64 modules, ~ is surprisingly rare. */
71151 #define MODULE_SIG_STRING "~Module signature appended~\n"
71152@@ -54,12 +56,13 @@ struct module_attribute {
71153 int (*test)(struct module *);
71154 void (*free)(struct module *);
71155 };
71156+typedef struct module_attribute __no_const module_attribute_no_const;
71157
71158 struct module_version_attribute {
71159 struct module_attribute mattr;
71160 const char *module_name;
71161 const char *version;
71162-} __attribute__ ((__aligned__(sizeof(void *))));
71163+} __do_const __attribute__ ((__aligned__(sizeof(void *))));
71164
71165 extern ssize_t __modver_version_show(struct module_attribute *,
71166 struct module_kobject *, char *);
71167@@ -232,7 +235,7 @@ struct module
71168
71169 /* Sysfs stuff. */
71170 struct module_kobject mkobj;
71171- struct module_attribute *modinfo_attrs;
71172+ module_attribute_no_const *modinfo_attrs;
71173 const char *version;
71174 const char *srcversion;
71175 struct kobject *holders_dir;
71176@@ -281,19 +284,16 @@ struct module
71177 int (*init)(void);
71178
71179 /* If this is non-NULL, vfree after init() returns */
71180- void *module_init;
71181+ void *module_init_rx, *module_init_rw;
71182
71183 /* Here is the actual code + data, vfree'd on unload. */
71184- void *module_core;
71185+ void *module_core_rx, *module_core_rw;
71186
71187 /* Here are the sizes of the init and core sections */
71188- unsigned int init_size, core_size;
71189+ unsigned int init_size_rw, core_size_rw;
71190
71191 /* The size of the executable code in each section. */
71192- unsigned int init_text_size, core_text_size;
71193-
71194- /* Size of RO sections of the module (text+rodata) */
71195- unsigned int init_ro_size, core_ro_size;
71196+ unsigned int init_size_rx, core_size_rx;
71197
71198 /* Arch-specific module values */
71199 struct mod_arch_specific arch;
71200@@ -349,6 +349,10 @@ struct module
71201 #ifdef CONFIG_EVENT_TRACING
71202 struct ftrace_event_call **trace_events;
71203 unsigned int num_trace_events;
71204+ struct file_operations trace_id;
71205+ struct file_operations trace_enable;
71206+ struct file_operations trace_format;
71207+ struct file_operations trace_filter;
71208 #endif
71209 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
71210 unsigned int num_ftrace_callsites;
71211@@ -396,16 +400,46 @@ bool is_module_address(unsigned long addr);
71212 bool is_module_percpu_address(unsigned long addr);
71213 bool is_module_text_address(unsigned long addr);
71214
71215+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
71216+{
71217+
71218+#ifdef CONFIG_PAX_KERNEXEC
71219+ if (ktla_ktva(addr) >= (unsigned long)start &&
71220+ ktla_ktva(addr) < (unsigned long)start + size)
71221+ return 1;
71222+#endif
71223+
71224+ return ((void *)addr >= start && (void *)addr < start + size);
71225+}
71226+
71227+static inline int within_module_core_rx(unsigned long addr, struct module *mod)
71228+{
71229+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
71230+}
71231+
71232+static inline int within_module_core_rw(unsigned long addr, struct module *mod)
71233+{
71234+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
71235+}
71236+
71237+static inline int within_module_init_rx(unsigned long addr, struct module *mod)
71238+{
71239+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
71240+}
71241+
71242+static inline int within_module_init_rw(unsigned long addr, struct module *mod)
71243+{
71244+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
71245+}
71246+
71247 static inline int within_module_core(unsigned long addr, struct module *mod)
71248 {
71249- return (unsigned long)mod->module_core <= addr &&
71250- addr < (unsigned long)mod->module_core + mod->core_size;
71251+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
71252 }
71253
71254 static inline int within_module_init(unsigned long addr, struct module *mod)
71255 {
71256- return (unsigned long)mod->module_init <= addr &&
71257- addr < (unsigned long)mod->module_init + mod->init_size;
71258+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
71259 }
71260
71261 /* Search for module by name: must hold module_mutex. */
71262diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
71263index 560ca53..ef621ef 100644
71264--- a/include/linux/moduleloader.h
71265+++ b/include/linux/moduleloader.h
71266@@ -25,9 +25,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
71267 sections. Returns NULL on failure. */
71268 void *module_alloc(unsigned long size);
71269
71270+#ifdef CONFIG_PAX_KERNEXEC
71271+void *module_alloc_exec(unsigned long size);
71272+#else
71273+#define module_alloc_exec(x) module_alloc(x)
71274+#endif
71275+
71276 /* Free memory returned from module_alloc. */
71277 void module_free(struct module *mod, void *module_region);
71278
71279+#ifdef CONFIG_PAX_KERNEXEC
71280+void module_free_exec(struct module *mod, void *module_region);
71281+#else
71282+#define module_free_exec(x, y) module_free((x), (y))
71283+#endif
71284+
71285 /*
71286 * Apply the given relocation to the (simplified) ELF. Return -error
71287 * or 0.
71288@@ -45,7 +57,9 @@ static inline int apply_relocate(Elf_Shdr *sechdrs,
71289 unsigned int relsec,
71290 struct module *me)
71291 {
71292+#ifdef CONFIG_MODULES
71293 printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name);
71294+#endif
71295 return -ENOEXEC;
71296 }
71297 #endif
71298@@ -67,7 +81,9 @@ static inline int apply_relocate_add(Elf_Shdr *sechdrs,
71299 unsigned int relsec,
71300 struct module *me)
71301 {
71302+#ifdef CONFIG_MODULES
71303 printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name);
71304+#endif
71305 return -ENOEXEC;
71306 }
71307 #endif
71308diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
71309index 137b419..fe663ec 100644
71310--- a/include/linux/moduleparam.h
71311+++ b/include/linux/moduleparam.h
71312@@ -284,7 +284,7 @@ static inline void __kernel_param_unlock(void)
71313 * @len is usually just sizeof(string).
71314 */
71315 #define module_param_string(name, string, len, perm) \
71316- static const struct kparam_string __param_string_##name \
71317+ static const struct kparam_string __param_string_##name __used \
71318 = { len, string }; \
71319 __module_param_call(MODULE_PARAM_PREFIX, name, \
71320 &param_ops_string, \
71321@@ -423,7 +423,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
71322 */
71323 #define module_param_array_named(name, array, type, nump, perm) \
71324 param_check_##type(name, &(array)[0]); \
71325- static const struct kparam_array __param_arr_##name \
71326+ static const struct kparam_array __param_arr_##name __used \
71327 = { .max = ARRAY_SIZE(array), .num = nump, \
71328 .ops = &param_ops_##type, \
71329 .elemsize = sizeof(array[0]), .elem = array }; \
71330diff --git a/include/linux/namei.h b/include/linux/namei.h
71331index 5a5ff57..5ae5070 100644
71332--- a/include/linux/namei.h
71333+++ b/include/linux/namei.h
71334@@ -19,7 +19,7 @@ struct nameidata {
71335 unsigned seq;
71336 int last_type;
71337 unsigned depth;
71338- char *saved_names[MAX_NESTED_LINKS + 1];
71339+ const char *saved_names[MAX_NESTED_LINKS + 1];
71340 };
71341
71342 /*
71343@@ -84,12 +84,12 @@ extern void unlock_rename(struct dentry *, struct dentry *);
71344
71345 extern void nd_jump_link(struct nameidata *nd, struct path *path);
71346
71347-static inline void nd_set_link(struct nameidata *nd, char *path)
71348+static inline void nd_set_link(struct nameidata *nd, const char *path)
71349 {
71350 nd->saved_names[nd->depth] = path;
71351 }
71352
71353-static inline char *nd_get_link(struct nameidata *nd)
71354+static inline const char *nd_get_link(const struct nameidata *nd)
71355 {
71356 return nd->saved_names[nd->depth];
71357 }
71358diff --git a/include/linux/net.h b/include/linux/net.h
71359index aa16731..514b875 100644
71360--- a/include/linux/net.h
71361+++ b/include/linux/net.h
71362@@ -183,7 +183,7 @@ struct net_proto_family {
71363 int (*create)(struct net *net, struct socket *sock,
71364 int protocol, int kern);
71365 struct module *owner;
71366-};
71367+} __do_const;
71368
71369 struct iovec;
71370 struct kvec;
71371diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
71372index 9ef07d0..130a5d9 100644
71373--- a/include/linux/netdevice.h
71374+++ b/include/linux/netdevice.h
71375@@ -1012,6 +1012,7 @@ struct net_device_ops {
71376 u32 pid, u32 seq,
71377 struct net_device *dev);
71378 };
71379+typedef struct net_device_ops __no_const net_device_ops_no_const;
71380
71381 /*
71382 * The DEVICE structure.
71383@@ -1078,7 +1079,7 @@ struct net_device {
71384 int iflink;
71385
71386 struct net_device_stats stats;
71387- atomic_long_t rx_dropped; /* dropped packets by core network
71388+ atomic_long_unchecked_t rx_dropped; /* dropped packets by core network
71389 * Do not use this in drivers.
71390 */
71391
71392diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
71393index ee14284..bc65d63 100644
71394--- a/include/linux/netfilter.h
71395+++ b/include/linux/netfilter.h
71396@@ -82,7 +82,7 @@ struct nf_sockopt_ops {
71397 #endif
71398 /* Use the module struct to lock set/get code in place */
71399 struct module *owner;
71400-};
71401+} __do_const;
71402
71403 /* Function to register/unregister hook points. */
71404 int nf_register_hook(struct nf_hook_ops *reg);
71405diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
71406index 7958e84..ed74d7a 100644
71407--- a/include/linux/netfilter/ipset/ip_set.h
71408+++ b/include/linux/netfilter/ipset/ip_set.h
71409@@ -98,7 +98,7 @@ struct ip_set_type_variant {
71410 /* Return true if "b" set is the same as "a"
71411 * according to the create set parameters */
71412 bool (*same_set)(const struct ip_set *a, const struct ip_set *b);
71413-};
71414+} __do_const;
71415
71416 /* The core set type structure */
71417 struct ip_set_type {
71418diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
71419index 4966dde..7d8ce06 100644
71420--- a/include/linux/netfilter/nfnetlink.h
71421+++ b/include/linux/netfilter/nfnetlink.h
71422@@ -16,7 +16,7 @@ struct nfnl_callback {
71423 const struct nlattr * const cda[]);
71424 const struct nla_policy *policy; /* netlink attribute policy */
71425 const u_int16_t attr_count; /* number of nlattr's */
71426-};
71427+} __do_const;
71428
71429 struct nfnetlink_subsystem {
71430 const char *name;
71431diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
71432new file mode 100644
71433index 0000000..33f4af8
71434--- /dev/null
71435+++ b/include/linux/netfilter/xt_gradm.h
71436@@ -0,0 +1,9 @@
71437+#ifndef _LINUX_NETFILTER_XT_GRADM_H
71438+#define _LINUX_NETFILTER_XT_GRADM_H 1
71439+
71440+struct xt_gradm_mtinfo {
71441+ __u16 flags;
71442+ __u16 invflags;
71443+};
71444+
71445+#endif
71446diff --git a/include/linux/nls.h b/include/linux/nls.h
71447index 5dc635f..35f5e11 100644
71448--- a/include/linux/nls.h
71449+++ b/include/linux/nls.h
71450@@ -31,7 +31,7 @@ struct nls_table {
71451 const unsigned char *charset2upper;
71452 struct module *owner;
71453 struct nls_table *next;
71454-};
71455+} __do_const;
71456
71457 /* this value hold the maximum octet of charset */
71458 #define NLS_MAX_CHARSET_SIZE 6 /* for UTF-8 */
71459diff --git a/include/linux/notifier.h b/include/linux/notifier.h
71460index d65746e..62e72c2 100644
71461--- a/include/linux/notifier.h
71462+++ b/include/linux/notifier.h
71463@@ -51,7 +51,8 @@ struct notifier_block {
71464 int (*notifier_call)(struct notifier_block *, unsigned long, void *);
71465 struct notifier_block __rcu *next;
71466 int priority;
71467-};
71468+} __do_const;
71469+typedef struct notifier_block __no_const notifier_block_no_const;
71470
71471 struct atomic_notifier_head {
71472 spinlock_t lock;
71473diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
71474index a4c5624..79d6d88 100644
71475--- a/include/linux/oprofile.h
71476+++ b/include/linux/oprofile.h
71477@@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
71478 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
71479 char const * name, ulong * val);
71480
71481-/** Create a file for read-only access to an atomic_t. */
71482+/** Create a file for read-only access to an atomic_unchecked_t. */
71483 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
71484- char const * name, atomic_t * val);
71485+ char const * name, atomic_unchecked_t * val);
71486
71487 /** create a directory */
71488 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
71489diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
71490index 45fc162..01a4068 100644
71491--- a/include/linux/pci_hotplug.h
71492+++ b/include/linux/pci_hotplug.h
71493@@ -80,7 +80,8 @@ struct hotplug_slot_ops {
71494 int (*get_attention_status) (struct hotplug_slot *slot, u8 *value);
71495 int (*get_latch_status) (struct hotplug_slot *slot, u8 *value);
71496 int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value);
71497-};
71498+} __do_const;
71499+typedef struct hotplug_slot_ops __no_const hotplug_slot_ops_no_const;
71500
71501 /**
71502 * struct hotplug_slot_info - used to notify the hotplug pci core of the state of the slot
71503diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
71504index a280650..2b67b91 100644
71505--- a/include/linux/perf_event.h
71506+++ b/include/linux/perf_event.h
71507@@ -328,8 +328,8 @@ struct perf_event {
71508
71509 enum perf_event_active_state state;
71510 unsigned int attach_state;
71511- local64_t count;
71512- atomic64_t child_count;
71513+ local64_t count; /* PaX: fix it one day */
71514+ atomic64_unchecked_t child_count;
71515
71516 /*
71517 * These are the total time in nanoseconds that the event
71518@@ -380,8 +380,8 @@ struct perf_event {
71519 * These accumulate total time (in nanoseconds) that children
71520 * events have been enabled and running, respectively.
71521 */
71522- atomic64_t child_total_time_enabled;
71523- atomic64_t child_total_time_running;
71524+ atomic64_unchecked_t child_total_time_enabled;
71525+ atomic64_unchecked_t child_total_time_running;
71526
71527 /*
71528 * Protect attach/detach and child_list:
71529@@ -807,7 +807,7 @@ static inline void perf_restore_debug_store(void) { }
71530 */
71531 #define perf_cpu_notifier(fn) \
71532 do { \
71533- static struct notifier_block fn##_nb __cpuinitdata = \
71534+ static struct notifier_block fn##_nb = \
71535 { .notifier_call = fn, .priority = CPU_PRI_PERF }; \
71536 unsigned long cpu = smp_processor_id(); \
71537 unsigned long flags; \
71538diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
71539index ad1a427..6419649 100644
71540--- a/include/linux/pipe_fs_i.h
71541+++ b/include/linux/pipe_fs_i.h
71542@@ -45,9 +45,9 @@ struct pipe_buffer {
71543 struct pipe_inode_info {
71544 wait_queue_head_t wait;
71545 unsigned int nrbufs, curbuf, buffers;
71546- unsigned int readers;
71547- unsigned int writers;
71548- unsigned int waiting_writers;
71549+ atomic_t readers;
71550+ atomic_t writers;
71551+ atomic_t waiting_writers;
71552 unsigned int r_counter;
71553 unsigned int w_counter;
71554 struct page *tmp_page;
71555diff --git a/include/linux/platform_data/usb-ehci-s5p.h b/include/linux/platform_data/usb-ehci-s5p.h
71556index 5f28cae..3d23723 100644
71557--- a/include/linux/platform_data/usb-ehci-s5p.h
71558+++ b/include/linux/platform_data/usb-ehci-s5p.h
71559@@ -14,7 +14,7 @@
71560 struct s5p_ehci_platdata {
71561 int (*phy_init)(struct platform_device *pdev, int type);
71562 int (*phy_exit)(struct platform_device *pdev, int type);
71563-};
71564+} __no_const;
71565
71566 extern void s5p_ehci_set_platdata(struct s5p_ehci_platdata *pd);
71567
71568diff --git a/include/linux/platform_data/usb-exynos.h b/include/linux/platform_data/usb-exynos.h
71569index c256c59..8ea94c7 100644
71570--- a/include/linux/platform_data/usb-exynos.h
71571+++ b/include/linux/platform_data/usb-exynos.h
71572@@ -14,7 +14,7 @@
71573 struct exynos4_ohci_platdata {
71574 int (*phy_init)(struct platform_device *pdev, int type);
71575 int (*phy_exit)(struct platform_device *pdev, int type);
71576-};
71577+} __no_const;
71578
71579 extern void exynos4_ohci_set_platdata(struct exynos4_ohci_platdata *pd);
71580
71581diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
71582index 7c1d252..c5c773e 100644
71583--- a/include/linux/pm_domain.h
71584+++ b/include/linux/pm_domain.h
71585@@ -48,7 +48,7 @@ struct gpd_dev_ops {
71586
71587 struct gpd_cpu_data {
71588 unsigned int saved_exit_latency;
71589- struct cpuidle_state *idle_state;
71590+ cpuidle_state_no_const *idle_state;
71591 };
71592
71593 struct generic_pm_domain {
71594diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
71595index f271860..6b3bec5 100644
71596--- a/include/linux/pm_runtime.h
71597+++ b/include/linux/pm_runtime.h
71598@@ -97,7 +97,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
71599
71600 static inline void pm_runtime_mark_last_busy(struct device *dev)
71601 {
71602- ACCESS_ONCE(dev->power.last_busy) = jiffies;
71603+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
71604 }
71605
71606 #else /* !CONFIG_PM_RUNTIME */
71607diff --git a/include/linux/pnp.h b/include/linux/pnp.h
71608index 195aafc..49a7bc2 100644
71609--- a/include/linux/pnp.h
71610+++ b/include/linux/pnp.h
71611@@ -297,7 +297,7 @@ static inline void pnp_set_drvdata(struct pnp_dev *pdev, void *data)
71612 struct pnp_fixup {
71613 char id[7];
71614 void (*quirk_function) (struct pnp_dev * dev); /* fixup function */
71615-};
71616+} __do_const;
71617
71618 /* config parameters */
71619 #define PNP_CONFIG_NORMAL 0x0001
71620diff --git a/include/linux/poison.h b/include/linux/poison.h
71621index 2110a81..13a11bb 100644
71622--- a/include/linux/poison.h
71623+++ b/include/linux/poison.h
71624@@ -19,8 +19,8 @@
71625 * under normal circumstances, used to verify that nobody uses
71626 * non-initialized list entries.
71627 */
71628-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
71629-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
71630+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
71631+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
71632
71633 /********** include/linux/timer.h **********/
71634 /*
71635diff --git a/include/linux/power/smartreflex.h b/include/linux/power/smartreflex.h
71636index c0f44c2..1572583 100644
71637--- a/include/linux/power/smartreflex.h
71638+++ b/include/linux/power/smartreflex.h
71639@@ -238,7 +238,7 @@ struct omap_sr_class_data {
71640 int (*notify)(struct omap_sr *sr, u32 status);
71641 u8 notify_flags;
71642 u8 class_type;
71643-};
71644+} __do_const;
71645
71646 /**
71647 * struct omap_sr_nvalue_table - Smartreflex n-target value info
71648diff --git a/include/linux/ppp-comp.h b/include/linux/ppp-comp.h
71649index 4ea1d37..80f4b33 100644
71650--- a/include/linux/ppp-comp.h
71651+++ b/include/linux/ppp-comp.h
71652@@ -84,7 +84,7 @@ struct compressor {
71653 struct module *owner;
71654 /* Extra skb space needed by the compressor algorithm */
71655 unsigned int comp_extra;
71656-};
71657+} __do_const;
71658
71659 /*
71660 * The return value from decompress routine is the length of the
71661diff --git a/include/linux/printk.h b/include/linux/printk.h
71662index 9afc01e..92c32e8 100644
71663--- a/include/linux/printk.h
71664+++ b/include/linux/printk.h
71665@@ -101,6 +101,8 @@ void early_printk(const char *fmt, ...);
71666 extern int printk_needs_cpu(int cpu);
71667 extern void printk_tick(void);
71668
71669+extern int kptr_restrict;
71670+
71671 #ifdef CONFIG_PRINTK
71672 asmlinkage __printf(5, 0)
71673 int vprintk_emit(int facility, int level,
71674@@ -135,7 +137,6 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
71675
71676 extern int printk_delay_msec;
71677 extern int dmesg_restrict;
71678-extern int kptr_restrict;
71679
71680 void log_buf_kexec_setup(void);
71681 void __init setup_log_buf(int early);
71682diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
71683index 32676b3..e46f2c0 100644
71684--- a/include/linux/proc_fs.h
71685+++ b/include/linux/proc_fs.h
71686@@ -159,6 +159,18 @@ static inline struct proc_dir_entry *proc_create(const char *name, umode_t mode,
71687 return proc_create_data(name, mode, parent, proc_fops, NULL);
71688 }
71689
71690+static inline struct proc_dir_entry *proc_create_grsec(const char *name, umode_t mode,
71691+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
71692+{
71693+#ifdef CONFIG_GRKERNSEC_PROC_USER
71694+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
71695+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
71696+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
71697+#else
71698+ return proc_create_data(name, mode, parent, proc_fops, NULL);
71699+#endif
71700+}
71701+
71702 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
71703 umode_t mode, struct proc_dir_entry *base,
71704 read_proc_t *read_proc, void * data)
71705@@ -268,7 +280,7 @@ struct proc_ns_operations {
71706 void (*put)(void *ns);
71707 int (*install)(struct nsproxy *nsproxy, void *ns);
71708 unsigned int (*inum)(void *ns);
71709-};
71710+} __do_const;
71711 extern const struct proc_ns_operations netns_operations;
71712 extern const struct proc_ns_operations utsns_operations;
71713 extern const struct proc_ns_operations ipcns_operations;
71714diff --git a/include/linux/random.h b/include/linux/random.h
71715index d984608..d6f0042 100644
71716--- a/include/linux/random.h
71717+++ b/include/linux/random.h
71718@@ -39,6 +39,11 @@ void prandom_seed(u32 seed);
71719 u32 prandom_u32_state(struct rnd_state *);
71720 void prandom_bytes_state(struct rnd_state *state, void *buf, int nbytes);
71721
71722+static inline unsigned long pax_get_random_long(void)
71723+{
71724+ return prandom_u32() + (sizeof(long) > 4 ? (unsigned long)prandom_u32() << 32 : 0);
71725+}
71726+
71727 /*
71728 * Handle minimum values for seeds
71729 */
71730diff --git a/include/linux/rculist.h b/include/linux/rculist.h
71731index c92dd28..08f4eab 100644
71732--- a/include/linux/rculist.h
71733+++ b/include/linux/rculist.h
71734@@ -44,6 +44,9 @@ extern void __list_add_rcu(struct list_head *new,
71735 struct list_head *prev, struct list_head *next);
71736 #endif
71737
71738+extern void __pax_list_add_rcu(struct list_head *new,
71739+ struct list_head *prev, struct list_head *next);
71740+
71741 /**
71742 * list_add_rcu - add a new entry to rcu-protected list
71743 * @new: new entry to be added
71744@@ -65,6 +68,11 @@ static inline void list_add_rcu(struct list_head *new, struct list_head *head)
71745 __list_add_rcu(new, head, head->next);
71746 }
71747
71748+static inline void pax_list_add_rcu(struct list_head *new, struct list_head *head)
71749+{
71750+ __pax_list_add_rcu(new, head, head->next);
71751+}
71752+
71753 /**
71754 * list_add_tail_rcu - add a new entry to rcu-protected list
71755 * @new: new entry to be added
71756@@ -87,6 +95,12 @@ static inline void list_add_tail_rcu(struct list_head *new,
71757 __list_add_rcu(new, head->prev, head);
71758 }
71759
71760+static inline void pax_list_add_tail_rcu(struct list_head *new,
71761+ struct list_head *head)
71762+{
71763+ __pax_list_add_rcu(new, head->prev, head);
71764+}
71765+
71766 /**
71767 * list_del_rcu - deletes entry from list without re-initialization
71768 * @entry: the element to delete from the list.
71769@@ -117,6 +131,8 @@ static inline void list_del_rcu(struct list_head *entry)
71770 entry->prev = LIST_POISON2;
71771 }
71772
71773+extern void pax_list_del_rcu(struct list_head *entry);
71774+
71775 /**
71776 * hlist_del_init_rcu - deletes entry from hash list with re-initialization
71777 * @n: the element to delete from the hash list.
71778diff --git a/include/linux/reboot.h b/include/linux/reboot.h
71779index 23b3630..e1bc12b 100644
71780--- a/include/linux/reboot.h
71781+++ b/include/linux/reboot.h
71782@@ -18,9 +18,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
71783 * Architecture-specific implementations of sys_reboot commands.
71784 */
71785
71786-extern void machine_restart(char *cmd);
71787-extern void machine_halt(void);
71788-extern void machine_power_off(void);
71789+extern void machine_restart(char *cmd) __noreturn;
71790+extern void machine_halt(void) __noreturn;
71791+extern void machine_power_off(void) __noreturn;
71792
71793 extern void machine_shutdown(void);
71794 struct pt_regs;
71795@@ -31,9 +31,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
71796 */
71797
71798 extern void kernel_restart_prepare(char *cmd);
71799-extern void kernel_restart(char *cmd);
71800-extern void kernel_halt(void);
71801-extern void kernel_power_off(void);
71802+extern void kernel_restart(char *cmd) __noreturn;
71803+extern void kernel_halt(void) __noreturn;
71804+extern void kernel_power_off(void) __noreturn;
71805
71806 extern int C_A_D; /* for sysctl */
71807 void ctrl_alt_del(void);
71808@@ -47,7 +47,7 @@ extern int orderly_poweroff(bool force);
71809 * Emergency restart, callable from an interrupt handler.
71810 */
71811
71812-extern void emergency_restart(void);
71813+extern void emergency_restart(void) __noreturn;
71814 #include <asm/emergency-restart.h>
71815
71816 #endif /* _LINUX_REBOOT_H */
71817diff --git a/include/linux/regset.h b/include/linux/regset.h
71818index 8e0c9fe..ac4d221 100644
71819--- a/include/linux/regset.h
71820+++ b/include/linux/regset.h
71821@@ -161,7 +161,8 @@ struct user_regset {
71822 unsigned int align;
71823 unsigned int bias;
71824 unsigned int core_note_type;
71825-};
71826+} __do_const;
71827+typedef struct user_regset __no_const user_regset_no_const;
71828
71829 /**
71830 * struct user_regset_view - available regsets
71831diff --git a/include/linux/relay.h b/include/linux/relay.h
71832index 91cacc3..b55ff74 100644
71833--- a/include/linux/relay.h
71834+++ b/include/linux/relay.h
71835@@ -160,7 +160,7 @@ struct rchan_callbacks
71836 * The callback should return 0 if successful, negative if not.
71837 */
71838 int (*remove_buf_file)(struct dentry *dentry);
71839-};
71840+} __no_const;
71841
71842 /*
71843 * CONFIG_RELAY kernel API, kernel/relay.c
71844diff --git a/include/linux/rio.h b/include/linux/rio.h
71845index a3e7842..d973ca6 100644
71846--- a/include/linux/rio.h
71847+++ b/include/linux/rio.h
71848@@ -339,7 +339,7 @@ struct rio_ops {
71849 int (*map_inb)(struct rio_mport *mport, dma_addr_t lstart,
71850 u64 rstart, u32 size, u32 flags);
71851 void (*unmap_inb)(struct rio_mport *mport, dma_addr_t lstart);
71852-};
71853+} __no_const;
71854
71855 #define RIO_RESOURCE_MEM 0x00000100
71856 #define RIO_RESOURCE_DOORBELL 0x00000200
71857diff --git a/include/linux/rmap.h b/include/linux/rmap.h
71858index c20635c..2f5def4 100644
71859--- a/include/linux/rmap.h
71860+++ b/include/linux/rmap.h
71861@@ -145,8 +145,8 @@ static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
71862 void anon_vma_init(void); /* create anon_vma_cachep */
71863 int anon_vma_prepare(struct vm_area_struct *);
71864 void unlink_anon_vmas(struct vm_area_struct *);
71865-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
71866-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
71867+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
71868+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
71869
71870 static inline void anon_vma_merge(struct vm_area_struct *vma,
71871 struct vm_area_struct *next)
71872diff --git a/include/linux/sched.h b/include/linux/sched.h
71873index 7e49270..835d8d9 100644
71874--- a/include/linux/sched.h
71875+++ b/include/linux/sched.h
71876@@ -61,6 +61,7 @@ struct bio_list;
71877 struct fs_struct;
71878 struct perf_event_context;
71879 struct blk_plug;
71880+struct linux_binprm;
71881
71882 /*
71883 * List of flags we want to share for kernel threads,
71884@@ -328,7 +329,7 @@ extern char __sched_text_start[], __sched_text_end[];
71885 extern int in_sched_functions(unsigned long addr);
71886
71887 #define MAX_SCHEDULE_TIMEOUT LONG_MAX
71888-extern signed long schedule_timeout(signed long timeout);
71889+extern signed long schedule_timeout(signed long timeout) __intentional_overflow(-1);
71890 extern signed long schedule_timeout_interruptible(signed long timeout);
71891 extern signed long schedule_timeout_killable(signed long timeout);
71892 extern signed long schedule_timeout_uninterruptible(signed long timeout);
71893@@ -355,10 +356,23 @@ struct user_namespace;
71894 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
71895
71896 extern int sysctl_max_map_count;
71897+extern unsigned long sysctl_heap_stack_gap;
71898
71899 #include <linux/aio.h>
71900
71901 #ifdef CONFIG_MMU
71902+
71903+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
71904+extern unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags);
71905+#else
71906+static inline unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
71907+{
71908+ return 0;
71909+}
71910+#endif
71911+
71912+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset);
71913+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset);
71914 extern void arch_pick_mmap_layout(struct mm_struct *mm);
71915 extern unsigned long
71916 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
71917@@ -640,6 +654,17 @@ struct signal_struct {
71918 #ifdef CONFIG_TASKSTATS
71919 struct taskstats *stats;
71920 #endif
71921+
71922+#ifdef CONFIG_GRKERNSEC
71923+ u32 curr_ip;
71924+ u32 saved_ip;
71925+ u32 gr_saddr;
71926+ u32 gr_daddr;
71927+ u16 gr_sport;
71928+ u16 gr_dport;
71929+ u8 used_accept:1;
71930+#endif
71931+
71932 #ifdef CONFIG_AUDIT
71933 unsigned audit_tty;
71934 struct tty_audit_buf *tty_audit_buf;
71935@@ -718,6 +743,11 @@ struct user_struct {
71936 struct key *session_keyring; /* UID's default session keyring */
71937 #endif
71938
71939+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
71940+ unsigned int banned;
71941+ unsigned long ban_expires;
71942+#endif
71943+
71944 /* Hash table maintenance information */
71945 struct hlist_node uidhash_node;
71946 kuid_t uid;
71947@@ -1117,7 +1147,7 @@ struct sched_class {
71948 #ifdef CONFIG_FAIR_GROUP_SCHED
71949 void (*task_move_group) (struct task_struct *p, int on_rq);
71950 #endif
71951-};
71952+} __do_const;
71953
71954 struct load_weight {
71955 unsigned long weight, inv_weight;
71956@@ -1361,8 +1391,8 @@ struct task_struct {
71957 struct list_head thread_group;
71958
71959 struct completion *vfork_done; /* for vfork() */
71960- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
71961- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
71962+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
71963+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
71964
71965 cputime_t utime, stime, utimescaled, stimescaled;
71966 cputime_t gtime;
71967@@ -1378,11 +1408,6 @@ struct task_struct {
71968 struct task_cputime cputime_expires;
71969 struct list_head cpu_timers[3];
71970
71971-/* process credentials */
71972- const struct cred __rcu *real_cred; /* objective and real subjective task
71973- * credentials (COW) */
71974- const struct cred __rcu *cred; /* effective (overridable) subjective task
71975- * credentials (COW) */
71976 char comm[TASK_COMM_LEN]; /* executable name excluding path
71977 - access with [gs]et_task_comm (which lock
71978 it with task_lock())
71979@@ -1399,6 +1424,10 @@ struct task_struct {
71980 #endif
71981 /* CPU-specific state of this task */
71982 struct thread_struct thread;
71983+/* thread_info moved to task_struct */
71984+#ifdef CONFIG_X86
71985+ struct thread_info tinfo;
71986+#endif
71987 /* filesystem information */
71988 struct fs_struct *fs;
71989 /* open file information */
71990@@ -1472,6 +1501,10 @@ struct task_struct {
71991 gfp_t lockdep_reclaim_gfp;
71992 #endif
71993
71994+/* process credentials */
71995+ const struct cred __rcu *real_cred; /* objective and real subjective task
71996+ * credentials (COW) */
71997+
71998 /* journalling filesystem info */
71999 void *journal_info;
72000
72001@@ -1510,6 +1543,10 @@ struct task_struct {
72002 /* cg_list protected by css_set_lock and tsk->alloc_lock */
72003 struct list_head cg_list;
72004 #endif
72005+
72006+ const struct cred __rcu *cred; /* effective (overridable) subjective task
72007+ * credentials (COW) */
72008+
72009 #ifdef CONFIG_FUTEX
72010 struct robust_list_head __user *robust_list;
72011 #ifdef CONFIG_COMPAT
72012@@ -1606,8 +1643,74 @@ struct task_struct {
72013 #ifdef CONFIG_UPROBES
72014 struct uprobe_task *utask;
72015 #endif
72016+
72017+#ifdef CONFIG_GRKERNSEC
72018+ /* grsecurity */
72019+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
72020+ u64 exec_id;
72021+#endif
72022+#ifdef CONFIG_GRKERNSEC_SETXID
72023+ const struct cred *delayed_cred;
72024+#endif
72025+ struct dentry *gr_chroot_dentry;
72026+ struct acl_subject_label *acl;
72027+ struct acl_role_label *role;
72028+ struct file *exec_file;
72029+ unsigned long brute_expires;
72030+ u16 acl_role_id;
72031+ /* is this the task that authenticated to the special role */
72032+ u8 acl_sp_role;
72033+ u8 is_writable;
72034+ u8 brute;
72035+ u8 gr_is_chrooted;
72036+#endif
72037+
72038 };
72039
72040+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
72041+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
72042+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
72043+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
72044+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
72045+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
72046+
72047+#ifdef CONFIG_PAX_SOFTMODE
72048+extern int pax_softmode;
72049+#endif
72050+
72051+extern int pax_check_flags(unsigned long *);
72052+
72053+/* if tsk != current then task_lock must be held on it */
72054+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
72055+static inline unsigned long pax_get_flags(struct task_struct *tsk)
72056+{
72057+ if (likely(tsk->mm))
72058+ return tsk->mm->pax_flags;
72059+ else
72060+ return 0UL;
72061+}
72062+
72063+/* if tsk != current then task_lock must be held on it */
72064+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
72065+{
72066+ if (likely(tsk->mm)) {
72067+ tsk->mm->pax_flags = flags;
72068+ return 0;
72069+ }
72070+ return -EINVAL;
72071+}
72072+#endif
72073+
72074+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
72075+extern void pax_set_initial_flags(struct linux_binprm *bprm);
72076+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
72077+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
72078+#endif
72079+
72080+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
72081+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
72082+extern void pax_report_refcount_overflow(struct pt_regs *regs);
72083+
72084 /* Future-safe accessor for struct task_struct's cpus_allowed. */
72085 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
72086
72087@@ -1697,7 +1800,7 @@ struct pid_namespace;
72088 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
72089 struct pid_namespace *ns);
72090
72091-static inline pid_t task_pid_nr(struct task_struct *tsk)
72092+static inline pid_t task_pid_nr(const struct task_struct *tsk)
72093 {
72094 return tsk->pid;
72095 }
72096@@ -2156,7 +2259,9 @@ void yield(void);
72097 extern struct exec_domain default_exec_domain;
72098
72099 union thread_union {
72100+#ifndef CONFIG_X86
72101 struct thread_info thread_info;
72102+#endif
72103 unsigned long stack[THREAD_SIZE/sizeof(long)];
72104 };
72105
72106@@ -2189,6 +2294,7 @@ extern struct pid_namespace init_pid_ns;
72107 */
72108
72109 extern struct task_struct *find_task_by_vpid(pid_t nr);
72110+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
72111 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
72112 struct pid_namespace *ns);
72113
72114@@ -2345,7 +2451,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
72115 extern void exit_itimers(struct signal_struct *);
72116 extern void flush_itimer_signals(void);
72117
72118-extern void do_group_exit(int);
72119+extern __noreturn void do_group_exit(int);
72120
72121 extern int allow_signal(int);
72122 extern int disallow_signal(int);
72123@@ -2546,9 +2652,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
72124
72125 #endif
72126
72127-static inline int object_is_on_stack(void *obj)
72128+static inline int object_starts_on_stack(void *obj)
72129 {
72130- void *stack = task_stack_page(current);
72131+ const void *stack = task_stack_page(current);
72132
72133 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
72134 }
72135diff --git a/include/linux/security.h b/include/linux/security.h
72136index eee7478..290f7ba 100644
72137--- a/include/linux/security.h
72138+++ b/include/linux/security.h
72139@@ -26,6 +26,7 @@
72140 #include <linux/capability.h>
72141 #include <linux/slab.h>
72142 #include <linux/err.h>
72143+#include <linux/grsecurity.h>
72144
72145 struct linux_binprm;
72146 struct cred;
72147diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
72148index 68a04a3..866e6a1 100644
72149--- a/include/linux/seq_file.h
72150+++ b/include/linux/seq_file.h
72151@@ -26,6 +26,9 @@ struct seq_file {
72152 struct mutex lock;
72153 const struct seq_operations *op;
72154 int poll_event;
72155+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
72156+ u64 exec_id;
72157+#endif
72158 #ifdef CONFIG_USER_NS
72159 struct user_namespace *user_ns;
72160 #endif
72161@@ -38,6 +41,7 @@ struct seq_operations {
72162 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
72163 int (*show) (struct seq_file *m, void *v);
72164 };
72165+typedef struct seq_operations __no_const seq_operations_no_const;
72166
72167 #define SEQ_SKIP 1
72168
72169diff --git a/include/linux/shm.h b/include/linux/shm.h
72170index 429c199..4d42e38 100644
72171--- a/include/linux/shm.h
72172+++ b/include/linux/shm.h
72173@@ -21,6 +21,10 @@ struct shmid_kernel /* private to the kernel */
72174
72175 /* The task created the shm object. NULL if the task is dead. */
72176 struct task_struct *shm_creator;
72177+#ifdef CONFIG_GRKERNSEC
72178+ time_t shm_createtime;
72179+ pid_t shm_lapid;
72180+#endif
72181 };
72182
72183 /* shm_mode upper byte flags */
72184diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
72185index 98399e2..7c74c41 100644
72186--- a/include/linux/skbuff.h
72187+++ b/include/linux/skbuff.h
72188@@ -590,7 +590,7 @@ extern bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
72189 extern struct sk_buff *__alloc_skb(unsigned int size,
72190 gfp_t priority, int flags, int node);
72191 extern struct sk_buff *build_skb(void *data, unsigned int frag_size);
72192-static inline struct sk_buff *alloc_skb(unsigned int size,
72193+static inline struct sk_buff * __intentional_overflow(0) alloc_skb(unsigned int size,
72194 gfp_t priority)
72195 {
72196 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
72197@@ -700,7 +700,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
72198 */
72199 static inline int skb_queue_empty(const struct sk_buff_head *list)
72200 {
72201- return list->next == (struct sk_buff *)list;
72202+ return list->next == (const struct sk_buff *)list;
72203 }
72204
72205 /**
72206@@ -713,7 +713,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
72207 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
72208 const struct sk_buff *skb)
72209 {
72210- return skb->next == (struct sk_buff *)list;
72211+ return skb->next == (const struct sk_buff *)list;
72212 }
72213
72214 /**
72215@@ -726,7 +726,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
72216 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
72217 const struct sk_buff *skb)
72218 {
72219- return skb->prev == (struct sk_buff *)list;
72220+ return skb->prev == (const struct sk_buff *)list;
72221 }
72222
72223 /**
72224@@ -1727,7 +1727,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
72225 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
72226 */
72227 #ifndef NET_SKB_PAD
72228-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
72229+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
72230 #endif
72231
72232 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
72233@@ -2305,7 +2305,7 @@ extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
72234 int noblock, int *err);
72235 extern unsigned int datagram_poll(struct file *file, struct socket *sock,
72236 struct poll_table_struct *wait);
72237-extern int skb_copy_datagram_iovec(const struct sk_buff *from,
72238+extern int __intentional_overflow(0) skb_copy_datagram_iovec(const struct sk_buff *from,
72239 int offset, struct iovec *to,
72240 int size);
72241 extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
72242@@ -2595,6 +2595,9 @@ static inline void nf_reset(struct sk_buff *skb)
72243 nf_bridge_put(skb->nf_bridge);
72244 skb->nf_bridge = NULL;
72245 #endif
72246+#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
72247+ skb->nf_trace = 0;
72248+#endif
72249 }
72250
72251 /* Note: This doesn't put any conntrack and bridge info in dst. */
72252diff --git a/include/linux/slab.h b/include/linux/slab.h
72253index 5d168d7..720bff3 100644
72254--- a/include/linux/slab.h
72255+++ b/include/linux/slab.h
72256@@ -12,13 +12,20 @@
72257 #include <linux/gfp.h>
72258 #include <linux/types.h>
72259 #include <linux/workqueue.h>
72260-
72261+#include <linux/err.h>
72262
72263 /*
72264 * Flags to pass to kmem_cache_create().
72265 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
72266 */
72267 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
72268+
72269+#ifdef CONFIG_PAX_USERCOPY_SLABS
72270+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
72271+#else
72272+#define SLAB_USERCOPY 0x00000000UL
72273+#endif
72274+
72275 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
72276 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
72277 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
72278@@ -89,10 +96,13 @@
72279 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
72280 * Both make kfree a no-op.
72281 */
72282-#define ZERO_SIZE_PTR ((void *)16)
72283+#define ZERO_SIZE_PTR \
72284+({ \
72285+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
72286+ (void *)(-MAX_ERRNO-1L); \
72287+})
72288
72289-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
72290- (unsigned long)ZERO_SIZE_PTR)
72291+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
72292
72293 /*
72294 * Common fields provided in kmem_cache by all slab allocators
72295@@ -112,7 +122,7 @@ struct kmem_cache {
72296 unsigned int align; /* Alignment as calculated */
72297 unsigned long flags; /* Active flags on the slab */
72298 const char *name; /* Slab name for sysfs */
72299- int refcount; /* Use counter */
72300+ atomic_t refcount; /* Use counter */
72301 void (*ctor)(void *); /* Called on object slot creation */
72302 struct list_head list; /* List of all slab caches on the system */
72303 };
72304@@ -232,6 +242,8 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
72305 void kfree(const void *);
72306 void kzfree(const void *);
72307 size_t ksize(const void *);
72308+const char *check_heap_object(const void *ptr, unsigned long n);
72309+bool is_usercopy_object(const void *ptr);
72310
72311 /*
72312 * Allocator specific definitions. These are mainly used to establish optimized
72313@@ -311,6 +323,7 @@ size_t ksize(const void *);
72314 * for general use, and so are not documented here. For a full list of
72315 * potential flags, always refer to linux/gfp.h.
72316 */
72317+
72318 static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
72319 {
72320 if (size != 0 && n > SIZE_MAX / size)
72321@@ -370,7 +383,7 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
72322 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
72323 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
72324 (defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
72325-extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
72326+extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long) __size_overflow(1);
72327 #define kmalloc_track_caller(size, flags) \
72328 __kmalloc_track_caller(size, flags, _RET_IP_)
72329 #else
72330@@ -390,7 +403,7 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
72331 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
72332 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
72333 (defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
72334-extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
72335+extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long) __size_overflow(1);
72336 #define kmalloc_node_track_caller(size, flags, node) \
72337 __kmalloc_node_track_caller(size, flags, node, \
72338 _RET_IP_)
72339diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
72340index 8bb6e0e..8eb0dbe 100644
72341--- a/include/linux/slab_def.h
72342+++ b/include/linux/slab_def.h
72343@@ -52,7 +52,7 @@ struct kmem_cache {
72344 /* 4) cache creation/removal */
72345 const char *name;
72346 struct list_head list;
72347- int refcount;
72348+ atomic_t refcount;
72349 int object_size;
72350 int align;
72351
72352@@ -68,10 +68,10 @@ struct kmem_cache {
72353 unsigned long node_allocs;
72354 unsigned long node_frees;
72355 unsigned long node_overflow;
72356- atomic_t allochit;
72357- atomic_t allocmiss;
72358- atomic_t freehit;
72359- atomic_t freemiss;
72360+ atomic_unchecked_t allochit;
72361+ atomic_unchecked_t allocmiss;
72362+ atomic_unchecked_t freehit;
72363+ atomic_unchecked_t freemiss;
72364
72365 /*
72366 * If debugging is enabled, then the allocator can add additional
72367@@ -111,11 +111,16 @@ struct cache_sizes {
72368 #ifdef CONFIG_ZONE_DMA
72369 struct kmem_cache *cs_dmacachep;
72370 #endif
72371+
72372+#ifdef CONFIG_PAX_USERCOPY_SLABS
72373+ struct kmem_cache *cs_usercopycachep;
72374+#endif
72375+
72376 };
72377 extern struct cache_sizes malloc_sizes[];
72378
72379 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
72380-void *__kmalloc(size_t size, gfp_t flags);
72381+void *__kmalloc(size_t size, gfp_t flags) __size_overflow(1);
72382
72383 #ifdef CONFIG_TRACING
72384 extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t);
72385@@ -152,6 +157,13 @@ found:
72386 cachep = malloc_sizes[i].cs_dmacachep;
72387 else
72388 #endif
72389+
72390+#ifdef CONFIG_PAX_USERCOPY_SLABS
72391+ if (flags & GFP_USERCOPY)
72392+ cachep = malloc_sizes[i].cs_usercopycachep;
72393+ else
72394+#endif
72395+
72396 cachep = malloc_sizes[i].cs_cachep;
72397
72398 ret = kmem_cache_alloc_trace(cachep, flags, size);
72399@@ -162,7 +174,7 @@ found:
72400 }
72401
72402 #ifdef CONFIG_NUMA
72403-extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
72404+extern void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
72405 extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
72406
72407 #ifdef CONFIG_TRACING
72408@@ -205,6 +217,13 @@ found:
72409 cachep = malloc_sizes[i].cs_dmacachep;
72410 else
72411 #endif
72412+
72413+#ifdef CONFIG_PAX_USERCOPY_SLABS
72414+ if (flags & GFP_USERCOPY)
72415+ cachep = malloc_sizes[i].cs_usercopycachep;
72416+ else
72417+#endif
72418+
72419 cachep = malloc_sizes[i].cs_cachep;
72420
72421 return kmem_cache_alloc_node_trace(cachep, flags, node, size);
72422diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
72423index f28e14a..7831211 100644
72424--- a/include/linux/slob_def.h
72425+++ b/include/linux/slob_def.h
72426@@ -11,7 +11,7 @@ static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
72427 return kmem_cache_alloc_node(cachep, flags, NUMA_NO_NODE);
72428 }
72429
72430-void *__kmalloc_node(size_t size, gfp_t flags, int node);
72431+void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
72432
72433 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
72434 {
72435@@ -31,7 +31,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
72436 return __kmalloc_node(size, flags, NUMA_NO_NODE);
72437 }
72438
72439-static __always_inline void *__kmalloc(size_t size, gfp_t flags)
72440+static __always_inline __size_overflow(1) void *__kmalloc(size_t size, gfp_t flags)
72441 {
72442 return kmalloc(size, flags);
72443 }
72444diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
72445index 9db4825..ed42fb5 100644
72446--- a/include/linux/slub_def.h
72447+++ b/include/linux/slub_def.h
72448@@ -91,7 +91,7 @@ struct kmem_cache {
72449 struct kmem_cache_order_objects max;
72450 struct kmem_cache_order_objects min;
72451 gfp_t allocflags; /* gfp flags to use on each alloc */
72452- int refcount; /* Refcount for slab cache destroy */
72453+ atomic_t refcount; /* Refcount for slab cache destroy */
72454 void (*ctor)(void *);
72455 int inuse; /* Offset to metadata */
72456 int align; /* Alignment */
72457@@ -156,7 +156,7 @@ extern struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT];
72458 * Sorry that the following has to be that ugly but some versions of GCC
72459 * have trouble with constant propagation and loops.
72460 */
72461-static __always_inline int kmalloc_index(size_t size)
72462+static __always_inline __size_overflow(1) int kmalloc_index(size_t size)
72463 {
72464 if (!size)
72465 return 0;
72466@@ -221,7 +221,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
72467 }
72468
72469 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
72470-void *__kmalloc(size_t size, gfp_t flags);
72471+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1) __size_overflow(1);
72472
72473 static __always_inline void *
72474 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
72475@@ -265,7 +265,7 @@ kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
72476 }
72477 #endif
72478
72479-static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
72480+static __always_inline __size_overflow(1) void *kmalloc_large(size_t size, gfp_t flags)
72481 {
72482 unsigned int order = get_order(size);
72483 return kmalloc_order_trace(size, flags, order);
72484@@ -290,7 +290,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
72485 }
72486
72487 #ifdef CONFIG_NUMA
72488-void *__kmalloc_node(size_t size, gfp_t flags, int node);
72489+void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
72490 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
72491
72492 #ifdef CONFIG_TRACING
72493diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
72494index e8d702e..0a56eb4 100644
72495--- a/include/linux/sock_diag.h
72496+++ b/include/linux/sock_diag.h
72497@@ -10,7 +10,7 @@ struct sock;
72498 struct sock_diag_handler {
72499 __u8 family;
72500 int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh);
72501-};
72502+} __do_const;
72503
72504 int sock_diag_register(const struct sock_diag_handler *h);
72505 void sock_diag_unregister(const struct sock_diag_handler *h);
72506diff --git a/include/linux/sonet.h b/include/linux/sonet.h
72507index 680f9a3..f13aeb0 100644
72508--- a/include/linux/sonet.h
72509+++ b/include/linux/sonet.h
72510@@ -7,7 +7,7 @@
72511 #include <uapi/linux/sonet.h>
72512
72513 struct k_sonet_stats {
72514-#define __HANDLE_ITEM(i) atomic_t i
72515+#define __HANDLE_ITEM(i) atomic_unchecked_t i
72516 __SONET_ITEMS
72517 #undef __HANDLE_ITEM
72518 };
72519diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
72520index 34206b8..3db7f1c 100644
72521--- a/include/linux/sunrpc/clnt.h
72522+++ b/include/linux/sunrpc/clnt.h
72523@@ -96,7 +96,7 @@ struct rpc_procinfo {
72524 unsigned int p_timer; /* Which RTT timer to use */
72525 u32 p_statidx; /* Which procedure to account */
72526 const char * p_name; /* name of procedure */
72527-};
72528+} __do_const;
72529
72530 #ifdef __KERNEL__
72531
72532@@ -176,9 +176,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
72533 {
72534 switch (sap->sa_family) {
72535 case AF_INET:
72536- return ntohs(((struct sockaddr_in *)sap)->sin_port);
72537+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
72538 case AF_INET6:
72539- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
72540+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
72541 }
72542 return 0;
72543 }
72544@@ -211,7 +211,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
72545 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
72546 const struct sockaddr *src)
72547 {
72548- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
72549+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
72550 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
72551
72552 dsin->sin_family = ssin->sin_family;
72553@@ -314,7 +314,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
72554 if (sa->sa_family != AF_INET6)
72555 return 0;
72556
72557- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
72558+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
72559 }
72560
72561 #endif /* __KERNEL__ */
72562diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
72563index 676ddf5..4c519a1 100644
72564--- a/include/linux/sunrpc/svc.h
72565+++ b/include/linux/sunrpc/svc.h
72566@@ -410,7 +410,7 @@ struct svc_procedure {
72567 unsigned int pc_count; /* call count */
72568 unsigned int pc_cachetype; /* cache info (NFS) */
72569 unsigned int pc_xdrressize; /* maximum size of XDR reply */
72570-};
72571+} __do_const;
72572
72573 /*
72574 * Function prototypes.
72575diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
72576index 0b8e3e6..33e0a01 100644
72577--- a/include/linux/sunrpc/svc_rdma.h
72578+++ b/include/linux/sunrpc/svc_rdma.h
72579@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
72580 extern unsigned int svcrdma_max_requests;
72581 extern unsigned int svcrdma_max_req_size;
72582
72583-extern atomic_t rdma_stat_recv;
72584-extern atomic_t rdma_stat_read;
72585-extern atomic_t rdma_stat_write;
72586-extern atomic_t rdma_stat_sq_starve;
72587-extern atomic_t rdma_stat_rq_starve;
72588-extern atomic_t rdma_stat_rq_poll;
72589-extern atomic_t rdma_stat_rq_prod;
72590-extern atomic_t rdma_stat_sq_poll;
72591-extern atomic_t rdma_stat_sq_prod;
72592+extern atomic_unchecked_t rdma_stat_recv;
72593+extern atomic_unchecked_t rdma_stat_read;
72594+extern atomic_unchecked_t rdma_stat_write;
72595+extern atomic_unchecked_t rdma_stat_sq_starve;
72596+extern atomic_unchecked_t rdma_stat_rq_starve;
72597+extern atomic_unchecked_t rdma_stat_rq_poll;
72598+extern atomic_unchecked_t rdma_stat_rq_prod;
72599+extern atomic_unchecked_t rdma_stat_sq_poll;
72600+extern atomic_unchecked_t rdma_stat_sq_prod;
72601
72602 #define RPCRDMA_VERSION 1
72603
72604diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
72605index dd74084a..7f509d5 100644
72606--- a/include/linux/sunrpc/svcauth.h
72607+++ b/include/linux/sunrpc/svcauth.h
72608@@ -109,7 +109,7 @@ struct auth_ops {
72609 int (*release)(struct svc_rqst *rq);
72610 void (*domain_release)(struct auth_domain *);
72611 int (*set_client)(struct svc_rqst *rq);
72612-};
72613+} __do_const;
72614
72615 #define SVC_GARBAGE 1
72616 #define SVC_SYSERR 2
72617diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
72618index 071d62c..4ccc7ac 100644
72619--- a/include/linux/swiotlb.h
72620+++ b/include/linux/swiotlb.h
72621@@ -59,7 +59,8 @@ extern void
72622
72623 extern void
72624 swiotlb_free_coherent(struct device *hwdev, size_t size,
72625- void *vaddr, dma_addr_t dma_handle);
72626+ void *vaddr, dma_addr_t dma_handle,
72627+ struct dma_attrs *attrs);
72628
72629 extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
72630 unsigned long offset, size_t size,
72631diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
72632index 45e2db2..1635156a 100644
72633--- a/include/linux/syscalls.h
72634+++ b/include/linux/syscalls.h
72635@@ -615,7 +615,7 @@ asmlinkage long sys_getsockname(int, struct sockaddr __user *, int __user *);
72636 asmlinkage long sys_getpeername(int, struct sockaddr __user *, int __user *);
72637 asmlinkage long sys_send(int, void __user *, size_t, unsigned);
72638 asmlinkage long sys_sendto(int, void __user *, size_t, unsigned,
72639- struct sockaddr __user *, int);
72640+ struct sockaddr __user *, int) __intentional_overflow(0);
72641 asmlinkage long sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags);
72642 asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg,
72643 unsigned int vlen, unsigned flags);
72644diff --git a/include/linux/syscore_ops.h b/include/linux/syscore_ops.h
72645index 27b3b0b..e093dd9 100644
72646--- a/include/linux/syscore_ops.h
72647+++ b/include/linux/syscore_ops.h
72648@@ -16,7 +16,7 @@ struct syscore_ops {
72649 int (*suspend)(void);
72650 void (*resume)(void);
72651 void (*shutdown)(void);
72652-};
72653+} __do_const;
72654
72655 extern void register_syscore_ops(struct syscore_ops *ops);
72656 extern void unregister_syscore_ops(struct syscore_ops *ops);
72657diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
72658index 14a8ff2..af52bad 100644
72659--- a/include/linux/sysctl.h
72660+++ b/include/linux/sysctl.h
72661@@ -34,13 +34,13 @@ struct ctl_table_root;
72662 struct ctl_table_header;
72663 struct ctl_dir;
72664
72665-typedef struct ctl_table ctl_table;
72666-
72667 typedef int proc_handler (struct ctl_table *ctl, int write,
72668 void __user *buffer, size_t *lenp, loff_t *ppos);
72669
72670 extern int proc_dostring(struct ctl_table *, int,
72671 void __user *, size_t *, loff_t *);
72672+extern int proc_dostring_modpriv(struct ctl_table *, int,
72673+ void __user *, size_t *, loff_t *);
72674 extern int proc_dointvec(struct ctl_table *, int,
72675 void __user *, size_t *, loff_t *);
72676 extern int proc_dointvec_minmax(struct ctl_table *, int,
72677@@ -115,7 +115,9 @@ struct ctl_table
72678 struct ctl_table_poll *poll;
72679 void *extra1;
72680 void *extra2;
72681-};
72682+} __do_const;
72683+typedef struct ctl_table __no_const ctl_table_no_const;
72684+typedef struct ctl_table ctl_table;
72685
72686 struct ctl_node {
72687 struct rb_node node;
72688diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
72689index 381f06d..dc16cc7 100644
72690--- a/include/linux/sysfs.h
72691+++ b/include/linux/sysfs.h
72692@@ -31,7 +31,8 @@ struct attribute {
72693 struct lock_class_key *key;
72694 struct lock_class_key skey;
72695 #endif
72696-};
72697+} __do_const;
72698+typedef struct attribute __no_const attribute_no_const;
72699
72700 /**
72701 * sysfs_attr_init - initialize a dynamically allocated sysfs attribute
72702@@ -59,8 +60,8 @@ struct attribute_group {
72703 umode_t (*is_visible)(struct kobject *,
72704 struct attribute *, int);
72705 struct attribute **attrs;
72706-};
72707-
72708+} __do_const;
72709+typedef struct attribute_group __no_const attribute_group_no_const;
72710
72711
72712 /**
72713@@ -107,7 +108,8 @@ struct bin_attribute {
72714 char *, loff_t, size_t);
72715 int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr,
72716 struct vm_area_struct *vma);
72717-};
72718+} __do_const;
72719+typedef struct bin_attribute __no_const bin_attribute_no_const;
72720
72721 /**
72722 * sysfs_bin_attr_init - initialize a dynamically allocated bin_attribute
72723diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
72724index 7faf933..9b85a0c 100644
72725--- a/include/linux/sysrq.h
72726+++ b/include/linux/sysrq.h
72727@@ -16,6 +16,7 @@
72728
72729 #include <linux/errno.h>
72730 #include <linux/types.h>
72731+#include <linux/compiler.h>
72732
72733 /* Enable/disable SYSRQ support by default (0==no, 1==yes). */
72734 #define SYSRQ_DEFAULT_ENABLE 1
72735@@ -36,7 +37,7 @@ struct sysrq_key_op {
72736 char *help_msg;
72737 char *action_msg;
72738 int enable_mask;
72739-};
72740+} __do_const;
72741
72742 #ifdef CONFIG_MAGIC_SYSRQ
72743
72744diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
72745index e7e0473..7989295 100644
72746--- a/include/linux/thread_info.h
72747+++ b/include/linux/thread_info.h
72748@@ -148,6 +148,15 @@ static inline bool test_and_clear_restore_sigmask(void)
72749 #error "no set_restore_sigmask() provided and default one won't work"
72750 #endif
72751
72752+extern void __check_object_size(const void *ptr, unsigned long n, bool to_user);
72753+static inline void check_object_size(const void *ptr, unsigned long n, bool to_user)
72754+{
72755+#ifndef CONFIG_PAX_USERCOPY_DEBUG
72756+ if (!__builtin_constant_p(n))
72757+#endif
72758+ __check_object_size(ptr, n, to_user);
72759+}
72760+
72761 #endif /* __KERNEL__ */
72762
72763 #endif /* _LINUX_THREAD_INFO_H */
72764diff --git a/include/linux/tty.h b/include/linux/tty.h
72765index 8db1b56..c16a040 100644
72766--- a/include/linux/tty.h
72767+++ b/include/linux/tty.h
72768@@ -194,7 +194,7 @@ struct tty_port {
72769 const struct tty_port_operations *ops; /* Port operations */
72770 spinlock_t lock; /* Lock protecting tty field */
72771 int blocked_open; /* Waiting to open */
72772- int count; /* Usage count */
72773+ atomic_t count; /* Usage count */
72774 wait_queue_head_t open_wait; /* Open waiters */
72775 wait_queue_head_t close_wait; /* Close waiters */
72776 wait_queue_head_t delta_msr_wait; /* Modem status change */
72777@@ -490,7 +490,7 @@ extern int tty_port_open(struct tty_port *port,
72778 struct tty_struct *tty, struct file *filp);
72779 static inline int tty_port_users(struct tty_port *port)
72780 {
72781- return port->count + port->blocked_open;
72782+ return atomic_read(&port->count) + port->blocked_open;
72783 }
72784
72785 extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc);
72786diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
72787index dd976cf..e272742 100644
72788--- a/include/linux/tty_driver.h
72789+++ b/include/linux/tty_driver.h
72790@@ -284,7 +284,7 @@ struct tty_operations {
72791 void (*poll_put_char)(struct tty_driver *driver, int line, char ch);
72792 #endif
72793 const struct file_operations *proc_fops;
72794-};
72795+} __do_const;
72796
72797 struct tty_driver {
72798 int magic; /* magic number for this structure */
72799diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
72800index fb79dd8d..07d4773 100644
72801--- a/include/linux/tty_ldisc.h
72802+++ b/include/linux/tty_ldisc.h
72803@@ -149,7 +149,7 @@ struct tty_ldisc_ops {
72804
72805 struct module *owner;
72806
72807- int refcount;
72808+ atomic_t refcount;
72809 };
72810
72811 struct tty_ldisc {
72812diff --git a/include/linux/types.h b/include/linux/types.h
72813index 4d118ba..c3ee9bf 100644
72814--- a/include/linux/types.h
72815+++ b/include/linux/types.h
72816@@ -176,10 +176,26 @@ typedef struct {
72817 int counter;
72818 } atomic_t;
72819
72820+#ifdef CONFIG_PAX_REFCOUNT
72821+typedef struct {
72822+ int counter;
72823+} atomic_unchecked_t;
72824+#else
72825+typedef atomic_t atomic_unchecked_t;
72826+#endif
72827+
72828 #ifdef CONFIG_64BIT
72829 typedef struct {
72830 long counter;
72831 } atomic64_t;
72832+
72833+#ifdef CONFIG_PAX_REFCOUNT
72834+typedef struct {
72835+ long counter;
72836+} atomic64_unchecked_t;
72837+#else
72838+typedef atomic64_t atomic64_unchecked_t;
72839+#endif
72840 #endif
72841
72842 struct list_head {
72843diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
72844index 5ca0951..ab496a5 100644
72845--- a/include/linux/uaccess.h
72846+++ b/include/linux/uaccess.h
72847@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
72848 long ret; \
72849 mm_segment_t old_fs = get_fs(); \
72850 \
72851- set_fs(KERNEL_DS); \
72852 pagefault_disable(); \
72853- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
72854- pagefault_enable(); \
72855+ set_fs(KERNEL_DS); \
72856+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
72857 set_fs(old_fs); \
72858+ pagefault_enable(); \
72859 ret; \
72860 })
72861
72862diff --git a/include/linux/uidgid.h b/include/linux/uidgid.h
72863index 8e522cbc..aa8572d 100644
72864--- a/include/linux/uidgid.h
72865+++ b/include/linux/uidgid.h
72866@@ -197,4 +197,9 @@ static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid)
72867
72868 #endif /* CONFIG_USER_NS */
72869
72870+#define GR_GLOBAL_UID(x) from_kuid_munged(&init_user_ns, (x))
72871+#define GR_GLOBAL_GID(x) from_kgid_munged(&init_user_ns, (x))
72872+#define gr_is_global_root(x) uid_eq((x), GLOBAL_ROOT_UID)
72873+#define gr_is_global_nonroot(x) (!uid_eq((x), GLOBAL_ROOT_UID))
72874+
72875 #endif /* _LINUX_UIDGID_H */
72876diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
72877index 99c1b4d..562e6f3 100644
72878--- a/include/linux/unaligned/access_ok.h
72879+++ b/include/linux/unaligned/access_ok.h
72880@@ -4,34 +4,34 @@
72881 #include <linux/kernel.h>
72882 #include <asm/byteorder.h>
72883
72884-static inline u16 get_unaligned_le16(const void *p)
72885+static inline u16 __intentional_overflow(-1) get_unaligned_le16(const void *p)
72886 {
72887- return le16_to_cpup((__le16 *)p);
72888+ return le16_to_cpup((const __le16 *)p);
72889 }
72890
72891-static inline u32 get_unaligned_le32(const void *p)
72892+static inline u32 __intentional_overflow(-1) get_unaligned_le32(const void *p)
72893 {
72894- return le32_to_cpup((__le32 *)p);
72895+ return le32_to_cpup((const __le32 *)p);
72896 }
72897
72898-static inline u64 get_unaligned_le64(const void *p)
72899+static inline u64 __intentional_overflow(-1) get_unaligned_le64(const void *p)
72900 {
72901- return le64_to_cpup((__le64 *)p);
72902+ return le64_to_cpup((const __le64 *)p);
72903 }
72904
72905-static inline u16 get_unaligned_be16(const void *p)
72906+static inline u16 __intentional_overflow(-1) get_unaligned_be16(const void *p)
72907 {
72908- return be16_to_cpup((__be16 *)p);
72909+ return be16_to_cpup((const __be16 *)p);
72910 }
72911
72912-static inline u32 get_unaligned_be32(const void *p)
72913+static inline u32 __intentional_overflow(-1) get_unaligned_be32(const void *p)
72914 {
72915- return be32_to_cpup((__be32 *)p);
72916+ return be32_to_cpup((const __be32 *)p);
72917 }
72918
72919-static inline u64 get_unaligned_be64(const void *p)
72920+static inline u64 __intentional_overflow(-1) get_unaligned_be64(const void *p)
72921 {
72922- return be64_to_cpup((__be64 *)p);
72923+ return be64_to_cpup((const __be64 *)p);
72924 }
72925
72926 static inline void put_unaligned_le16(u16 val, void *p)
72927diff --git a/include/linux/usb.h b/include/linux/usb.h
72928index 4d22d0f..8d0e8f8 100644
72929--- a/include/linux/usb.h
72930+++ b/include/linux/usb.h
72931@@ -554,7 +554,7 @@ struct usb_device {
72932 int maxchild;
72933
72934 u32 quirks;
72935- atomic_t urbnum;
72936+ atomic_unchecked_t urbnum;
72937
72938 unsigned long active_duration;
72939
72940@@ -1604,7 +1604,7 @@ void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in,
72941
72942 extern int usb_control_msg(struct usb_device *dev, unsigned int pipe,
72943 __u8 request, __u8 requesttype, __u16 value, __u16 index,
72944- void *data, __u16 size, int timeout);
72945+ void *data, __u16 size, int timeout) __intentional_overflow(-1);
72946 extern int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
72947 void *data, int len, int *actual_length, int timeout);
72948 extern int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
72949diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
72950index c5d36c6..108f4f9 100644
72951--- a/include/linux/usb/renesas_usbhs.h
72952+++ b/include/linux/usb/renesas_usbhs.h
72953@@ -39,7 +39,7 @@ enum {
72954 */
72955 struct renesas_usbhs_driver_callback {
72956 int (*notify_hotplug)(struct platform_device *pdev);
72957-};
72958+} __no_const;
72959
72960 /*
72961 * callback functions for platform
72962diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
72963index 5209cfe..b6b215f 100644
72964--- a/include/linux/user_namespace.h
72965+++ b/include/linux/user_namespace.h
72966@@ -21,7 +21,7 @@ struct user_namespace {
72967 struct uid_gid_map uid_map;
72968 struct uid_gid_map gid_map;
72969 struct uid_gid_map projid_map;
72970- struct kref kref;
72971+ atomic_t count;
72972 struct user_namespace *parent;
72973 kuid_t owner;
72974 kgid_t group;
72975@@ -37,18 +37,18 @@ extern struct user_namespace init_user_ns;
72976 static inline struct user_namespace *get_user_ns(struct user_namespace *ns)
72977 {
72978 if (ns)
72979- kref_get(&ns->kref);
72980+ atomic_inc(&ns->count);
72981 return ns;
72982 }
72983
72984 extern int create_user_ns(struct cred *new);
72985 extern int unshare_userns(unsigned long unshare_flags, struct cred **new_cred);
72986-extern void free_user_ns(struct kref *kref);
72987+extern void free_user_ns(struct user_namespace *ns);
72988
72989 static inline void put_user_ns(struct user_namespace *ns)
72990 {
72991- if (ns)
72992- kref_put(&ns->kref, free_user_ns);
72993+ if (ns && atomic_dec_and_test(&ns->count))
72994+ free_user_ns(ns);
72995 }
72996
72997 struct seq_operations;
72998diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
72999index 6f8fbcf..8259001 100644
73000--- a/include/linux/vermagic.h
73001+++ b/include/linux/vermagic.h
73002@@ -25,9 +25,35 @@
73003 #define MODULE_ARCH_VERMAGIC ""
73004 #endif
73005
73006+#ifdef CONFIG_PAX_REFCOUNT
73007+#define MODULE_PAX_REFCOUNT "REFCOUNT "
73008+#else
73009+#define MODULE_PAX_REFCOUNT ""
73010+#endif
73011+
73012+#ifdef CONSTIFY_PLUGIN
73013+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
73014+#else
73015+#define MODULE_CONSTIFY_PLUGIN ""
73016+#endif
73017+
73018+#ifdef STACKLEAK_PLUGIN
73019+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
73020+#else
73021+#define MODULE_STACKLEAK_PLUGIN ""
73022+#endif
73023+
73024+#ifdef CONFIG_GRKERNSEC
73025+#define MODULE_GRSEC "GRSEC "
73026+#else
73027+#define MODULE_GRSEC ""
73028+#endif
73029+
73030 #define VERMAGIC_STRING \
73031 UTS_RELEASE " " \
73032 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
73033 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
73034- MODULE_ARCH_VERMAGIC
73035+ MODULE_ARCH_VERMAGIC \
73036+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
73037+ MODULE_GRSEC
73038
73039diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
73040index 6071e91..ca6a489 100644
73041--- a/include/linux/vmalloc.h
73042+++ b/include/linux/vmalloc.h
73043@@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
73044 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
73045 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
73046 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
73047+
73048+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
73049+#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
73050+#endif
73051+
73052 /* bits [20..32] reserved for arch specific ioremap internals */
73053
73054 /*
73055@@ -62,7 +67,7 @@ extern void *vmalloc_32_user(unsigned long size);
73056 extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
73057 extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
73058 unsigned long start, unsigned long end, gfp_t gfp_mask,
73059- pgprot_t prot, int node, const void *caller);
73060+ pgprot_t prot, int node, const void *caller) __size_overflow(1);
73061 extern void vfree(const void *addr);
73062
73063 extern void *vmap(struct page **pages, unsigned int count,
73064@@ -124,8 +129,8 @@ extern struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes);
73065 extern void free_vm_area(struct vm_struct *area);
73066
73067 /* for /dev/kmem */
73068-extern long vread(char *buf, char *addr, unsigned long count);
73069-extern long vwrite(char *buf, char *addr, unsigned long count);
73070+extern long vread(char *buf, char *addr, unsigned long count) __size_overflow(3);
73071+extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
73072
73073 /*
73074 * Internals. Dont't use..
73075diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
73076index a13291f..af51fa3 100644
73077--- a/include/linux/vmstat.h
73078+++ b/include/linux/vmstat.h
73079@@ -95,18 +95,18 @@ static inline void vm_events_fold_cpu(int cpu)
73080 /*
73081 * Zone based page accounting with per cpu differentials.
73082 */
73083-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
73084+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
73085
73086 static inline void zone_page_state_add(long x, struct zone *zone,
73087 enum zone_stat_item item)
73088 {
73089- atomic_long_add(x, &zone->vm_stat[item]);
73090- atomic_long_add(x, &vm_stat[item]);
73091+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
73092+ atomic_long_add_unchecked(x, &vm_stat[item]);
73093 }
73094
73095 static inline unsigned long global_page_state(enum zone_stat_item item)
73096 {
73097- long x = atomic_long_read(&vm_stat[item]);
73098+ long x = atomic_long_read_unchecked(&vm_stat[item]);
73099 #ifdef CONFIG_SMP
73100 if (x < 0)
73101 x = 0;
73102@@ -117,7 +117,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
73103 static inline unsigned long zone_page_state(struct zone *zone,
73104 enum zone_stat_item item)
73105 {
73106- long x = atomic_long_read(&zone->vm_stat[item]);
73107+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
73108 #ifdef CONFIG_SMP
73109 if (x < 0)
73110 x = 0;
73111@@ -134,7 +134,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
73112 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
73113 enum zone_stat_item item)
73114 {
73115- long x = atomic_long_read(&zone->vm_stat[item]);
73116+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
73117
73118 #ifdef CONFIG_SMP
73119 int cpu;
73120@@ -226,8 +226,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
73121
73122 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
73123 {
73124- atomic_long_inc(&zone->vm_stat[item]);
73125- atomic_long_inc(&vm_stat[item]);
73126+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
73127+ atomic_long_inc_unchecked(&vm_stat[item]);
73128 }
73129
73130 static inline void __inc_zone_page_state(struct page *page,
73131@@ -238,8 +238,8 @@ static inline void __inc_zone_page_state(struct page *page,
73132
73133 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
73134 {
73135- atomic_long_dec(&zone->vm_stat[item]);
73136- atomic_long_dec(&vm_stat[item]);
73137+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
73138+ atomic_long_dec_unchecked(&vm_stat[item]);
73139 }
73140
73141 static inline void __dec_zone_page_state(struct page *page,
73142diff --git a/include/linux/xattr.h b/include/linux/xattr.h
73143index fdbafc6..b7ffd47 100644
73144--- a/include/linux/xattr.h
73145+++ b/include/linux/xattr.h
73146@@ -28,7 +28,7 @@ struct xattr_handler {
73147 size_t size, int handler_flags);
73148 int (*set)(struct dentry *dentry, const char *name, const void *buffer,
73149 size_t size, int flags, int handler_flags);
73150-};
73151+} __do_const;
73152
73153 struct xattr {
73154 char *name;
73155diff --git a/include/linux/zlib.h b/include/linux/zlib.h
73156index 9c5a6b4..09c9438 100644
73157--- a/include/linux/zlib.h
73158+++ b/include/linux/zlib.h
73159@@ -31,6 +31,7 @@
73160 #define _ZLIB_H
73161
73162 #include <linux/zconf.h>
73163+#include <linux/compiler.h>
73164
73165 /* zlib deflate based on ZLIB_VERSION "1.1.3" */
73166 /* zlib inflate based on ZLIB_VERSION "1.2.3" */
73167@@ -179,7 +180,7 @@ typedef z_stream *z_streamp;
73168
73169 /* basic functions */
73170
73171-extern int zlib_deflate_workspacesize (int windowBits, int memLevel);
73172+extern int zlib_deflate_workspacesize (int windowBits, int memLevel) __intentional_overflow(0);
73173 /*
73174 Returns the number of bytes that needs to be allocated for a per-
73175 stream workspace with the specified parameters. A pointer to this
73176diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
73177index 95d1c91..6798cca 100644
73178--- a/include/media/v4l2-dev.h
73179+++ b/include/media/v4l2-dev.h
73180@@ -76,7 +76,7 @@ struct v4l2_file_operations {
73181 int (*mmap) (struct file *, struct vm_area_struct *);
73182 int (*open) (struct file *);
73183 int (*release) (struct file *);
73184-};
73185+} __do_const;
73186
73187 /*
73188 * Newer version of video_device, handled by videodev2.c
73189diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
73190index 4118ad1..cb7e25f 100644
73191--- a/include/media/v4l2-ioctl.h
73192+++ b/include/media/v4l2-ioctl.h
73193@@ -284,7 +284,6 @@ struct v4l2_ioctl_ops {
73194 bool valid_prio, int cmd, void *arg);
73195 };
73196
73197-
73198 /* v4l debugging and diagnostics */
73199
73200 /* Debug bitmask flags to be used on V4L2 */
73201diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
73202index adcbb20..62c2559 100644
73203--- a/include/net/9p/transport.h
73204+++ b/include/net/9p/transport.h
73205@@ -57,7 +57,7 @@ struct p9_trans_module {
73206 int (*cancel) (struct p9_client *, struct p9_req_t *req);
73207 int (*zc_request)(struct p9_client *, struct p9_req_t *,
73208 char *, char *, int , int, int, int);
73209-};
73210+} __do_const;
73211
73212 void v9fs_register_trans(struct p9_trans_module *m);
73213 void v9fs_unregister_trans(struct p9_trans_module *m);
73214diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
73215index 7588ef4..e62d35f 100644
73216--- a/include/net/bluetooth/l2cap.h
73217+++ b/include/net/bluetooth/l2cap.h
73218@@ -552,7 +552,7 @@ struct l2cap_ops {
73219 void (*defer) (struct l2cap_chan *chan);
73220 struct sk_buff *(*alloc_skb) (struct l2cap_chan *chan,
73221 unsigned long len, int nb);
73222-};
73223+} __do_const;
73224
73225 struct l2cap_conn {
73226 struct hci_conn *hcon;
73227diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
73228index 9e5425b..8136ffc 100644
73229--- a/include/net/caif/cfctrl.h
73230+++ b/include/net/caif/cfctrl.h
73231@@ -52,7 +52,7 @@ struct cfctrl_rsp {
73232 void (*radioset_rsp)(void);
73233 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
73234 struct cflayer *client_layer);
73235-};
73236+} __no_const;
73237
73238 /* Link Setup Parameters for CAIF-Links. */
73239 struct cfctrl_link_param {
73240@@ -101,8 +101,8 @@ struct cfctrl_request_info {
73241 struct cfctrl {
73242 struct cfsrvl serv;
73243 struct cfctrl_rsp res;
73244- atomic_t req_seq_no;
73245- atomic_t rsp_seq_no;
73246+ atomic_unchecked_t req_seq_no;
73247+ atomic_unchecked_t rsp_seq_no;
73248 struct list_head list;
73249 /* Protects from simultaneous access to first_req list */
73250 spinlock_t info_list_lock;
73251diff --git a/include/net/flow.h b/include/net/flow.h
73252index 628e11b..4c475df 100644
73253--- a/include/net/flow.h
73254+++ b/include/net/flow.h
73255@@ -221,6 +221,6 @@ extern struct flow_cache_object *flow_cache_lookup(
73256
73257 extern void flow_cache_flush(void);
73258 extern void flow_cache_flush_deferred(void);
73259-extern atomic_t flow_cache_genid;
73260+extern atomic_unchecked_t flow_cache_genid;
73261
73262 #endif
73263diff --git a/include/net/genetlink.h b/include/net/genetlink.h
73264index bdfbe68..4402ebe 100644
73265--- a/include/net/genetlink.h
73266+++ b/include/net/genetlink.h
73267@@ -118,7 +118,7 @@ struct genl_ops {
73268 struct netlink_callback *cb);
73269 int (*done)(struct netlink_callback *cb);
73270 struct list_head ops_list;
73271-};
73272+} __do_const;
73273
73274 extern int genl_register_family(struct genl_family *family);
73275 extern int genl_register_family_with_ops(struct genl_family *family,
73276diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h
73277index e5062c9..48a9a4b 100644
73278--- a/include/net/gro_cells.h
73279+++ b/include/net/gro_cells.h
73280@@ -29,7 +29,7 @@ static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *s
73281 cell += skb_get_rx_queue(skb) & gcells->gro_cells_mask;
73282
73283 if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
73284- atomic_long_inc(&dev->rx_dropped);
73285+ atomic_long_inc_unchecked(&dev->rx_dropped);
73286 kfree_skb(skb);
73287 return;
73288 }
73289@@ -73,8 +73,8 @@ static inline int gro_cells_init(struct gro_cells *gcells, struct net_device *de
73290 int i;
73291
73292 gcells->gro_cells_mask = roundup_pow_of_two(netif_get_num_default_rss_queues()) - 1;
73293- gcells->cells = kcalloc(sizeof(struct gro_cell),
73294- gcells->gro_cells_mask + 1,
73295+ gcells->cells = kcalloc(gcells->gro_cells_mask + 1,
73296+ sizeof(struct gro_cell),
73297 GFP_KERNEL);
73298 if (!gcells->cells)
73299 return -ENOMEM;
73300diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
73301index 1832927..ce39aea 100644
73302--- a/include/net/inet_connection_sock.h
73303+++ b/include/net/inet_connection_sock.h
73304@@ -62,7 +62,7 @@ struct inet_connection_sock_af_ops {
73305 void (*addr2sockaddr)(struct sock *sk, struct sockaddr *);
73306 int (*bind_conflict)(const struct sock *sk,
73307 const struct inet_bind_bucket *tb, bool relax);
73308-};
73309+} __do_const;
73310
73311 /** inet_connection_sock - INET connection oriented sock
73312 *
73313diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
73314index 53f464d..ba76aaa 100644
73315--- a/include/net/inetpeer.h
73316+++ b/include/net/inetpeer.h
73317@@ -47,8 +47,8 @@ struct inet_peer {
73318 */
73319 union {
73320 struct {
73321- atomic_t rid; /* Frag reception counter */
73322- atomic_t ip_id_count; /* IP ID for the next packet */
73323+ atomic_unchecked_t rid; /* Frag reception counter */
73324+ atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
73325 };
73326 struct rcu_head rcu;
73327 struct inet_peer *gc_next;
73328@@ -182,11 +182,11 @@ static inline int inet_getid(struct inet_peer *p, int more)
73329 more++;
73330 inet_peer_refcheck(p);
73331 do {
73332- old = atomic_read(&p->ip_id_count);
73333+ old = atomic_read_unchecked(&p->ip_id_count);
73334 new = old + more;
73335 if (!new)
73336 new = 1;
73337- } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
73338+ } while (atomic_cmpxchg_unchecked(&p->ip_id_count, old, new) != old);
73339 return new;
73340 }
73341
73342diff --git a/include/net/ip.h b/include/net/ip.h
73343index a68f838..74518ab 100644
73344--- a/include/net/ip.h
73345+++ b/include/net/ip.h
73346@@ -202,7 +202,7 @@ extern struct local_ports {
73347 } sysctl_local_ports;
73348 extern void inet_get_local_port_range(int *low, int *high);
73349
73350-extern unsigned long *sysctl_local_reserved_ports;
73351+extern unsigned long sysctl_local_reserved_ports[65536 / 8 / sizeof(unsigned long)];
73352 static inline int inet_is_reserved_local_port(int port)
73353 {
73354 return test_bit(port, sysctl_local_reserved_ports);
73355diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
73356index e49db91..76a81de 100644
73357--- a/include/net/ip_fib.h
73358+++ b/include/net/ip_fib.h
73359@@ -167,7 +167,7 @@ extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
73360
73361 #define FIB_RES_SADDR(net, res) \
73362 ((FIB_RES_NH(res).nh_saddr_genid == \
73363- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
73364+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
73365 FIB_RES_NH(res).nh_saddr : \
73366 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
73367 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
73368diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
73369index 68c69d5..bdab192 100644
73370--- a/include/net/ip_vs.h
73371+++ b/include/net/ip_vs.h
73372@@ -599,7 +599,7 @@ struct ip_vs_conn {
73373 struct ip_vs_conn *control; /* Master control connection */
73374 atomic_t n_control; /* Number of controlled ones */
73375 struct ip_vs_dest *dest; /* real server */
73376- atomic_t in_pkts; /* incoming packet counter */
73377+ atomic_unchecked_t in_pkts; /* incoming packet counter */
73378
73379 /* packet transmitter for different forwarding methods. If it
73380 mangles the packet, it must return NF_DROP or better NF_STOLEN,
73381@@ -737,7 +737,7 @@ struct ip_vs_dest {
73382 __be16 port; /* port number of the server */
73383 union nf_inet_addr addr; /* IP address of the server */
73384 volatile unsigned int flags; /* dest status flags */
73385- atomic_t conn_flags; /* flags to copy to conn */
73386+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
73387 atomic_t weight; /* server weight */
73388
73389 atomic_t refcnt; /* reference counter */
73390@@ -980,11 +980,11 @@ struct netns_ipvs {
73391 /* ip_vs_lblc */
73392 int sysctl_lblc_expiration;
73393 struct ctl_table_header *lblc_ctl_header;
73394- struct ctl_table *lblc_ctl_table;
73395+ ctl_table_no_const *lblc_ctl_table;
73396 /* ip_vs_lblcr */
73397 int sysctl_lblcr_expiration;
73398 struct ctl_table_header *lblcr_ctl_header;
73399- struct ctl_table *lblcr_ctl_table;
73400+ ctl_table_no_const *lblcr_ctl_table;
73401 /* ip_vs_est */
73402 struct list_head est_list; /* estimator list */
73403 spinlock_t est_lock;
73404diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
73405index 80ffde3..968b0f4 100644
73406--- a/include/net/irda/ircomm_tty.h
73407+++ b/include/net/irda/ircomm_tty.h
73408@@ -35,6 +35,7 @@
73409 #include <linux/termios.h>
73410 #include <linux/timer.h>
73411 #include <linux/tty.h> /* struct tty_struct */
73412+#include <asm/local.h>
73413
73414 #include <net/irda/irias_object.h>
73415 #include <net/irda/ircomm_core.h>
73416diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
73417index cc7c197..9f2da2a 100644
73418--- a/include/net/iucv/af_iucv.h
73419+++ b/include/net/iucv/af_iucv.h
73420@@ -141,7 +141,7 @@ struct iucv_sock {
73421 struct iucv_sock_list {
73422 struct hlist_head head;
73423 rwlock_t lock;
73424- atomic_t autobind_name;
73425+ atomic_unchecked_t autobind_name;
73426 };
73427
73428 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
73429diff --git a/include/net/llc_c_ac.h b/include/net/llc_c_ac.h
73430index df83f69..9b640b8 100644
73431--- a/include/net/llc_c_ac.h
73432+++ b/include/net/llc_c_ac.h
73433@@ -87,7 +87,7 @@
73434 #define LLC_CONN_AC_STOP_SENDACK_TMR 70
73435 #define LLC_CONN_AC_START_SENDACK_TMR_IF_NOT_RUNNING 71
73436
73437-typedef int (*llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
73438+typedef int (* const llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
73439
73440 extern int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb);
73441 extern int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb);
73442diff --git a/include/net/llc_c_ev.h b/include/net/llc_c_ev.h
73443index 6ca3113..f8026dd 100644
73444--- a/include/net/llc_c_ev.h
73445+++ b/include/net/llc_c_ev.h
73446@@ -125,8 +125,8 @@ static __inline__ struct llc_conn_state_ev *llc_conn_ev(struct sk_buff *skb)
73447 return (struct llc_conn_state_ev *)skb->cb;
73448 }
73449
73450-typedef int (*llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
73451-typedef int (*llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
73452+typedef int (* const llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
73453+typedef int (* const llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
73454
73455 extern int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb);
73456 extern int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb);
73457diff --git a/include/net/llc_c_st.h b/include/net/llc_c_st.h
73458index 0e79cfb..f46db31 100644
73459--- a/include/net/llc_c_st.h
73460+++ b/include/net/llc_c_st.h
73461@@ -37,7 +37,7 @@ struct llc_conn_state_trans {
73462 u8 next_state;
73463 llc_conn_ev_qfyr_t *ev_qualifiers;
73464 llc_conn_action_t *ev_actions;
73465-};
73466+} __do_const;
73467
73468 struct llc_conn_state {
73469 u8 current_state;
73470diff --git a/include/net/llc_s_ac.h b/include/net/llc_s_ac.h
73471index 37a3bbd..55a4241 100644
73472--- a/include/net/llc_s_ac.h
73473+++ b/include/net/llc_s_ac.h
73474@@ -23,7 +23,7 @@
73475 #define SAP_ACT_TEST_IND 9
73476
73477 /* All action functions must look like this */
73478-typedef int (*llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
73479+typedef int (* const llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
73480
73481 extern int llc_sap_action_unitdata_ind(struct llc_sap *sap,
73482 struct sk_buff *skb);
73483diff --git a/include/net/llc_s_st.h b/include/net/llc_s_st.h
73484index 567c681..cd73ac0 100644
73485--- a/include/net/llc_s_st.h
73486+++ b/include/net/llc_s_st.h
73487@@ -20,7 +20,7 @@ struct llc_sap_state_trans {
73488 llc_sap_ev_t ev;
73489 u8 next_state;
73490 llc_sap_action_t *ev_actions;
73491-};
73492+} __do_const;
73493
73494 struct llc_sap_state {
73495 u8 curr_state;
73496diff --git a/include/net/mac80211.h b/include/net/mac80211.h
73497index ee50c5e..1bc3b1a 100644
73498--- a/include/net/mac80211.h
73499+++ b/include/net/mac80211.h
73500@@ -3996,7 +3996,7 @@ struct rate_control_ops {
73501 void (*add_sta_debugfs)(void *priv, void *priv_sta,
73502 struct dentry *dir);
73503 void (*remove_sta_debugfs)(void *priv, void *priv_sta);
73504-};
73505+} __do_const;
73506
73507 static inline int rate_supported(struct ieee80211_sta *sta,
73508 enum ieee80211_band band,
73509diff --git a/include/net/neighbour.h b/include/net/neighbour.h
73510index 0dab173..1b76af0 100644
73511--- a/include/net/neighbour.h
73512+++ b/include/net/neighbour.h
73513@@ -123,7 +123,7 @@ struct neigh_ops {
73514 void (*error_report)(struct neighbour *, struct sk_buff *);
73515 int (*output)(struct neighbour *, struct sk_buff *);
73516 int (*connected_output)(struct neighbour *, struct sk_buff *);
73517-};
73518+} __do_const;
73519
73520 struct pneigh_entry {
73521 struct pneigh_entry *next;
73522diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
73523index de644bc..dfbcc4c 100644
73524--- a/include/net/net_namespace.h
73525+++ b/include/net/net_namespace.h
73526@@ -115,7 +115,7 @@ struct net {
73527 #endif
73528 struct netns_ipvs *ipvs;
73529 struct sock *diag_nlsk;
73530- atomic_t rt_genid;
73531+ atomic_unchecked_t rt_genid;
73532 };
73533
73534 /*
73535@@ -272,7 +272,11 @@ static inline struct net *read_pnet(struct net * const *pnet)
73536 #define __net_init __init
73537 #define __net_exit __exit_refok
73538 #define __net_initdata __initdata
73539+#ifdef CONSTIFY_PLUGIN
73540 #define __net_initconst __initconst
73541+#else
73542+#define __net_initconst __initdata
73543+#endif
73544 #endif
73545
73546 struct pernet_operations {
73547@@ -282,7 +286,7 @@ struct pernet_operations {
73548 void (*exit_batch)(struct list_head *net_exit_list);
73549 int *id;
73550 size_t size;
73551-};
73552+} __do_const;
73553
73554 /*
73555 * Use these carefully. If you implement a network device and it
73556@@ -330,12 +334,12 @@ static inline void unregister_net_sysctl_table(struct ctl_table_header *header)
73557
73558 static inline int rt_genid(struct net *net)
73559 {
73560- return atomic_read(&net->rt_genid);
73561+ return atomic_read_unchecked(&net->rt_genid);
73562 }
73563
73564 static inline void rt_genid_bump(struct net *net)
73565 {
73566- atomic_inc(&net->rt_genid);
73567+ atomic_inc_unchecked(&net->rt_genid);
73568 }
73569
73570 #endif /* __NET_NET_NAMESPACE_H */
73571diff --git a/include/net/netdma.h b/include/net/netdma.h
73572index 8ba8ce2..99b7fff 100644
73573--- a/include/net/netdma.h
73574+++ b/include/net/netdma.h
73575@@ -24,7 +24,7 @@
73576 #include <linux/dmaengine.h>
73577 #include <linux/skbuff.h>
73578
73579-int dma_skb_copy_datagram_iovec(struct dma_chan* chan,
73580+int __intentional_overflow(3,5) dma_skb_copy_datagram_iovec(struct dma_chan* chan,
73581 struct sk_buff *skb, int offset, struct iovec *to,
73582 size_t len, struct dma_pinned_list *pinned_list);
73583
73584diff --git a/include/net/netlink.h b/include/net/netlink.h
73585index 9690b0f..87aded7 100644
73586--- a/include/net/netlink.h
73587+++ b/include/net/netlink.h
73588@@ -534,7 +534,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
73589 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
73590 {
73591 if (mark)
73592- skb_trim(skb, (unsigned char *) mark - skb->data);
73593+ skb_trim(skb, (const unsigned char *) mark - skb->data);
73594 }
73595
73596 /**
73597diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
73598index 923cb20..deae816 100644
73599--- a/include/net/netns/conntrack.h
73600+++ b/include/net/netns/conntrack.h
73601@@ -12,10 +12,10 @@ struct nf_conntrack_ecache;
73602 struct nf_proto_net {
73603 #ifdef CONFIG_SYSCTL
73604 struct ctl_table_header *ctl_table_header;
73605- struct ctl_table *ctl_table;
73606+ ctl_table_no_const *ctl_table;
73607 #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
73608 struct ctl_table_header *ctl_compat_header;
73609- struct ctl_table *ctl_compat_table;
73610+ ctl_table_no_const *ctl_compat_table;
73611 #endif
73612 #endif
73613 unsigned int users;
73614@@ -58,7 +58,7 @@ struct nf_ip_net {
73615 struct nf_icmp_net icmpv6;
73616 #if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
73617 struct ctl_table_header *ctl_table_header;
73618- struct ctl_table *ctl_table;
73619+ ctl_table_no_const *ctl_table;
73620 #endif
73621 };
73622
73623diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
73624index 2ae2b83..dbdc85e 100644
73625--- a/include/net/netns/ipv4.h
73626+++ b/include/net/netns/ipv4.h
73627@@ -64,7 +64,7 @@ struct netns_ipv4 {
73628 kgid_t sysctl_ping_group_range[2];
73629 long sysctl_tcp_mem[3];
73630
73631- atomic_t dev_addr_genid;
73632+ atomic_unchecked_t dev_addr_genid;
73633
73634 #ifdef CONFIG_IP_MROUTE
73635 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
73636diff --git a/include/net/protocol.h b/include/net/protocol.h
73637index 047c047..b9dad15 100644
73638--- a/include/net/protocol.h
73639+++ b/include/net/protocol.h
73640@@ -44,7 +44,7 @@ struct net_protocol {
73641 void (*err_handler)(struct sk_buff *skb, u32 info);
73642 unsigned int no_policy:1,
73643 netns_ok:1;
73644-};
73645+} __do_const;
73646
73647 #if IS_ENABLED(CONFIG_IPV6)
73648 struct inet6_protocol {
73649@@ -57,7 +57,7 @@ struct inet6_protocol {
73650 u8 type, u8 code, int offset,
73651 __be32 info);
73652 unsigned int flags; /* INET6_PROTO_xxx */
73653-};
73654+} __do_const;
73655
73656 #define INET6_PROTO_NOPOLICY 0x1
73657 #define INET6_PROTO_FINAL 0x2
73658diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
73659index 5a15fab..d799ea7 100644
73660--- a/include/net/rtnetlink.h
73661+++ b/include/net/rtnetlink.h
73662@@ -81,7 +81,7 @@ struct rtnl_link_ops {
73663 const struct net_device *dev);
73664 unsigned int (*get_num_tx_queues)(void);
73665 unsigned int (*get_num_rx_queues)(void);
73666-};
73667+} __do_const;
73668
73669 extern int __rtnl_link_register(struct rtnl_link_ops *ops);
73670 extern void __rtnl_link_unregister(struct rtnl_link_ops *ops);
73671diff --git a/include/net/scm.h b/include/net/scm.h
73672index 975cca0..b117081 100644
73673--- a/include/net/scm.h
73674+++ b/include/net/scm.h
73675@@ -56,8 +56,8 @@ static __inline__ void scm_set_cred(struct scm_cookie *scm,
73676 scm->pid = get_pid(pid);
73677 scm->cred = cred ? get_cred(cred) : NULL;
73678 scm->creds.pid = pid_vnr(pid);
73679- scm->creds.uid = cred ? cred->euid : INVALID_UID;
73680- scm->creds.gid = cred ? cred->egid : INVALID_GID;
73681+ scm->creds.uid = cred ? cred->uid : INVALID_UID;
73682+ scm->creds.gid = cred ? cred->gid : INVALID_GID;
73683 }
73684
73685 static __inline__ void scm_destroy_cred(struct scm_cookie *scm)
73686diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
73687index 7fdf298..197e9f7 100644
73688--- a/include/net/sctp/sctp.h
73689+++ b/include/net/sctp/sctp.h
73690@@ -330,9 +330,9 @@ do { \
73691
73692 #else /* SCTP_DEBUG */
73693
73694-#define SCTP_DEBUG_PRINTK(whatever...)
73695-#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
73696-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
73697+#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
73698+#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
73699+#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
73700 #define SCTP_ENABLE_DEBUG
73701 #define SCTP_DISABLE_DEBUG
73702 #define SCTP_ASSERT(expr, str, func)
73703diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
73704index 2a82d13..62a31c2 100644
73705--- a/include/net/sctp/sm.h
73706+++ b/include/net/sctp/sm.h
73707@@ -87,7 +87,7 @@ typedef void (sctp_timer_event_t) (unsigned long);
73708 typedef struct {
73709 sctp_state_fn_t *fn;
73710 const char *name;
73711-} sctp_sm_table_entry_t;
73712+} __do_const sctp_sm_table_entry_t;
73713
73714 /* A naming convention of "sctp_sf_xxx" applies to all the state functions
73715 * currently in use.
73716@@ -299,7 +299,7 @@ __u32 sctp_generate_tag(const struct sctp_endpoint *);
73717 __u32 sctp_generate_tsn(const struct sctp_endpoint *);
73718
73719 /* Extern declarations for major data structures. */
73720-extern sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
73721+extern sctp_timer_event_t * const sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
73722
73723
73724 /* Get the size of a DATA chunk payload. */
73725diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
73726index fdeb85a..1329d95 100644
73727--- a/include/net/sctp/structs.h
73728+++ b/include/net/sctp/structs.h
73729@@ -517,7 +517,7 @@ struct sctp_pf {
73730 struct sctp_association *asoc);
73731 void (*addr_v4map) (struct sctp_sock *, union sctp_addr *);
73732 struct sctp_af *af;
73733-};
73734+} __do_const;
73735
73736
73737 /* Structure to track chunk fragments that have been acked, but peer
73738diff --git a/include/net/sock.h b/include/net/sock.h
73739index 25afaa0..8bb0070 100644
73740--- a/include/net/sock.h
73741+++ b/include/net/sock.h
73742@@ -322,7 +322,7 @@ struct sock {
73743 #ifdef CONFIG_RPS
73744 __u32 sk_rxhash;
73745 #endif
73746- atomic_t sk_drops;
73747+ atomic_unchecked_t sk_drops;
73748 int sk_rcvbuf;
73749
73750 struct sk_filter __rcu *sk_filter;
73751@@ -1781,7 +1781,7 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
73752 }
73753
73754 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
73755- char __user *from, char *to,
73756+ char __user *from, unsigned char *to,
73757 int copy, int offset)
73758 {
73759 if (skb->ip_summed == CHECKSUM_NONE) {
73760@@ -2040,7 +2040,7 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
73761 }
73762 }
73763
73764-struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
73765+struct sk_buff * __intentional_overflow(0) sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
73766
73767 /**
73768 * sk_page_frag - return an appropriate page_frag
73769diff --git a/include/net/tcp.h b/include/net/tcp.h
73770index aed42c7..43890c6 100644
73771--- a/include/net/tcp.h
73772+++ b/include/net/tcp.h
73773@@ -530,7 +530,7 @@ extern void tcp_retransmit_timer(struct sock *sk);
73774 extern void tcp_xmit_retransmit_queue(struct sock *);
73775 extern void tcp_simple_retransmit(struct sock *);
73776 extern int tcp_trim_head(struct sock *, struct sk_buff *, u32);
73777-extern int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
73778+extern int __intentional_overflow(3) tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
73779
73780 extern void tcp_send_probe0(struct sock *);
73781 extern void tcp_send_partial(struct sock *);
73782@@ -701,8 +701,8 @@ struct tcp_skb_cb {
73783 struct inet6_skb_parm h6;
73784 #endif
73785 } header; /* For incoming frames */
73786- __u32 seq; /* Starting sequence number */
73787- __u32 end_seq; /* SEQ + FIN + SYN + datalen */
73788+ __u32 seq __intentional_overflow(0); /* Starting sequence number */
73789+ __u32 end_seq __intentional_overflow(0); /* SEQ + FIN + SYN + datalen */
73790 __u32 when; /* used to compute rtt's */
73791 __u8 tcp_flags; /* TCP header flags. (tcp[13]) */
73792
73793@@ -716,7 +716,7 @@ struct tcp_skb_cb {
73794
73795 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
73796 /* 1 byte hole */
73797- __u32 ack_seq; /* Sequence number ACK'd */
73798+ __u32 ack_seq __intentional_overflow(0); /* Sequence number ACK'd */
73799 };
73800
73801 #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
73802diff --git a/include/net/xfrm.h b/include/net/xfrm.h
73803index 63445ed..d6fc34f 100644
73804--- a/include/net/xfrm.h
73805+++ b/include/net/xfrm.h
73806@@ -304,7 +304,7 @@ struct xfrm_policy_afinfo {
73807 struct net_device *dev,
73808 const struct flowi *fl);
73809 struct dst_entry *(*blackhole_route)(struct net *net, struct dst_entry *orig);
73810-};
73811+} __do_const;
73812
73813 extern int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
73814 extern int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
73815@@ -340,7 +340,7 @@ struct xfrm_state_afinfo {
73816 struct sk_buff *skb);
73817 int (*transport_finish)(struct sk_buff *skb,
73818 int async);
73819-};
73820+} __do_const;
73821
73822 extern int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
73823 extern int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
73824@@ -423,7 +423,7 @@ struct xfrm_mode {
73825 struct module *owner;
73826 unsigned int encap;
73827 int flags;
73828-};
73829+} __do_const;
73830
73831 /* Flags for xfrm_mode. */
73832 enum {
73833@@ -514,7 +514,7 @@ struct xfrm_policy {
73834 struct timer_list timer;
73835
73836 struct flow_cache_object flo;
73837- atomic_t genid;
73838+ atomic_unchecked_t genid;
73839 u32 priority;
73840 u32 index;
73841 struct xfrm_mark mark;
73842diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
73843index 1a046b1..ee0bef0 100644
73844--- a/include/rdma/iw_cm.h
73845+++ b/include/rdma/iw_cm.h
73846@@ -122,7 +122,7 @@ struct iw_cm_verbs {
73847 int backlog);
73848
73849 int (*destroy_listen)(struct iw_cm_id *cm_id);
73850-};
73851+} __no_const;
73852
73853 /**
73854 * iw_create_cm_id - Create an IW CM identifier.
73855diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
73856index 399162b..b337f1a 100644
73857--- a/include/scsi/libfc.h
73858+++ b/include/scsi/libfc.h
73859@@ -762,6 +762,7 @@ struct libfc_function_template {
73860 */
73861 void (*disc_stop_final) (struct fc_lport *);
73862 };
73863+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
73864
73865 /**
73866 * struct fc_disc - Discovery context
73867@@ -866,7 +867,7 @@ struct fc_lport {
73868 struct fc_vport *vport;
73869
73870 /* Operational Information */
73871- struct libfc_function_template tt;
73872+ libfc_function_template_no_const tt;
73873 u8 link_up;
73874 u8 qfull;
73875 enum fc_lport_state state;
73876diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
73877index e65c62e..aa2e5a2 100644
73878--- a/include/scsi/scsi_device.h
73879+++ b/include/scsi/scsi_device.h
73880@@ -170,9 +170,9 @@ struct scsi_device {
73881 unsigned int max_device_blocked; /* what device_blocked counts down from */
73882 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
73883
73884- atomic_t iorequest_cnt;
73885- atomic_t iodone_cnt;
73886- atomic_t ioerr_cnt;
73887+ atomic_unchecked_t iorequest_cnt;
73888+ atomic_unchecked_t iodone_cnt;
73889+ atomic_unchecked_t ioerr_cnt;
73890
73891 struct device sdev_gendev,
73892 sdev_dev;
73893diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
73894index b797e8f..8e2c3aa 100644
73895--- a/include/scsi/scsi_transport_fc.h
73896+++ b/include/scsi/scsi_transport_fc.h
73897@@ -751,7 +751,8 @@ struct fc_function_template {
73898 unsigned long show_host_system_hostname:1;
73899
73900 unsigned long disable_target_scan:1;
73901-};
73902+} __do_const;
73903+typedef struct fc_function_template __no_const fc_function_template_no_const;
73904
73905
73906 /**
73907diff --git a/include/sound/soc.h b/include/sound/soc.h
73908index bc56738..a4be132 100644
73909--- a/include/sound/soc.h
73910+++ b/include/sound/soc.h
73911@@ -771,7 +771,7 @@ struct snd_soc_codec_driver {
73912 /* probe ordering - for components with runtime dependencies */
73913 int probe_order;
73914 int remove_order;
73915-};
73916+} __do_const;
73917
73918 /* SoC platform interface */
73919 struct snd_soc_platform_driver {
73920@@ -817,7 +817,7 @@ struct snd_soc_platform_driver {
73921 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
73922 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
73923 int (*bespoke_trigger)(struct snd_pcm_substream *, int);
73924-};
73925+} __do_const;
73926
73927 struct snd_soc_platform {
73928 const char *name;
73929diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
73930index 663e34a..91b306a 100644
73931--- a/include/target/target_core_base.h
73932+++ b/include/target/target_core_base.h
73933@@ -654,7 +654,7 @@ struct se_device {
73934 spinlock_t stats_lock;
73935 /* Active commands on this virtual SE device */
73936 atomic_t simple_cmds;
73937- atomic_t dev_ordered_id;
73938+ atomic_unchecked_t dev_ordered_id;
73939 atomic_t dev_ordered_sync;
73940 atomic_t dev_qf_count;
73941 int export_count;
73942diff --git a/include/trace/events/fs.h b/include/trace/events/fs.h
73943new file mode 100644
73944index 0000000..fb634b7
73945--- /dev/null
73946+++ b/include/trace/events/fs.h
73947@@ -0,0 +1,53 @@
73948+#undef TRACE_SYSTEM
73949+#define TRACE_SYSTEM fs
73950+
73951+#if !defined(_TRACE_FS_H) || defined(TRACE_HEADER_MULTI_READ)
73952+#define _TRACE_FS_H
73953+
73954+#include <linux/fs.h>
73955+#include <linux/tracepoint.h>
73956+
73957+TRACE_EVENT(do_sys_open,
73958+
73959+ TP_PROTO(const char *filename, int flags, int mode),
73960+
73961+ TP_ARGS(filename, flags, mode),
73962+
73963+ TP_STRUCT__entry(
73964+ __string( filename, filename )
73965+ __field( int, flags )
73966+ __field( int, mode )
73967+ ),
73968+
73969+ TP_fast_assign(
73970+ __assign_str(filename, filename);
73971+ __entry->flags = flags;
73972+ __entry->mode = mode;
73973+ ),
73974+
73975+ TP_printk("\"%s\" %x %o",
73976+ __get_str(filename), __entry->flags, __entry->mode)
73977+);
73978+
73979+TRACE_EVENT(open_exec,
73980+
73981+ TP_PROTO(const char *filename),
73982+
73983+ TP_ARGS(filename),
73984+
73985+ TP_STRUCT__entry(
73986+ __string( filename, filename )
73987+ ),
73988+
73989+ TP_fast_assign(
73990+ __assign_str(filename, filename);
73991+ ),
73992+
73993+ TP_printk("\"%s\"",
73994+ __get_str(filename))
73995+);
73996+
73997+#endif /* _TRACE_FS_H */
73998+
73999+/* This part must be outside protection */
74000+#include <trace/define_trace.h>
74001diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
74002index 1c09820..7f5ec79 100644
74003--- a/include/trace/events/irq.h
74004+++ b/include/trace/events/irq.h
74005@@ -36,7 +36,7 @@ struct softirq_action;
74006 */
74007 TRACE_EVENT(irq_handler_entry,
74008
74009- TP_PROTO(int irq, struct irqaction *action),
74010+ TP_PROTO(int irq, const struct irqaction *action),
74011
74012 TP_ARGS(irq, action),
74013
74014@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
74015 */
74016 TRACE_EVENT(irq_handler_exit,
74017
74018- TP_PROTO(int irq, struct irqaction *action, int ret),
74019+ TP_PROTO(int irq, const struct irqaction *action, int ret),
74020
74021 TP_ARGS(irq, action, ret),
74022
74023diff --git a/include/uapi/linux/a.out.h b/include/uapi/linux/a.out.h
74024index 7caf44c..23c6f27 100644
74025--- a/include/uapi/linux/a.out.h
74026+++ b/include/uapi/linux/a.out.h
74027@@ -39,6 +39,14 @@ enum machine_type {
74028 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
74029 };
74030
74031+/* Constants for the N_FLAGS field */
74032+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
74033+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
74034+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
74035+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
74036+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
74037+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
74038+
74039 #if !defined (N_MAGIC)
74040 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
74041 #endif
74042diff --git a/include/uapi/linux/byteorder/little_endian.h b/include/uapi/linux/byteorder/little_endian.h
74043index d876736..ccce5c0 100644
74044--- a/include/uapi/linux/byteorder/little_endian.h
74045+++ b/include/uapi/linux/byteorder/little_endian.h
74046@@ -42,51 +42,51 @@
74047
74048 static inline __le64 __cpu_to_le64p(const __u64 *p)
74049 {
74050- return (__force __le64)*p;
74051+ return (__force const __le64)*p;
74052 }
74053-static inline __u64 __le64_to_cpup(const __le64 *p)
74054+static inline __u64 __intentional_overflow(-1) __le64_to_cpup(const __le64 *p)
74055 {
74056- return (__force __u64)*p;
74057+ return (__force const __u64)*p;
74058 }
74059 static inline __le32 __cpu_to_le32p(const __u32 *p)
74060 {
74061- return (__force __le32)*p;
74062+ return (__force const __le32)*p;
74063 }
74064 static inline __u32 __le32_to_cpup(const __le32 *p)
74065 {
74066- return (__force __u32)*p;
74067+ return (__force const __u32)*p;
74068 }
74069 static inline __le16 __cpu_to_le16p(const __u16 *p)
74070 {
74071- return (__force __le16)*p;
74072+ return (__force const __le16)*p;
74073 }
74074 static inline __u16 __le16_to_cpup(const __le16 *p)
74075 {
74076- return (__force __u16)*p;
74077+ return (__force const __u16)*p;
74078 }
74079 static inline __be64 __cpu_to_be64p(const __u64 *p)
74080 {
74081- return (__force __be64)__swab64p(p);
74082+ return (__force const __be64)__swab64p(p);
74083 }
74084 static inline __u64 __be64_to_cpup(const __be64 *p)
74085 {
74086- return __swab64p((__u64 *)p);
74087+ return __swab64p((const __u64 *)p);
74088 }
74089 static inline __be32 __cpu_to_be32p(const __u32 *p)
74090 {
74091- return (__force __be32)__swab32p(p);
74092+ return (__force const __be32)__swab32p(p);
74093 }
74094-static inline __u32 __be32_to_cpup(const __be32 *p)
74095+static inline __u32 __intentional_overflow(-1) __be32_to_cpup(const __be32 *p)
74096 {
74097- return __swab32p((__u32 *)p);
74098+ return __swab32p((const __u32 *)p);
74099 }
74100 static inline __be16 __cpu_to_be16p(const __u16 *p)
74101 {
74102- return (__force __be16)__swab16p(p);
74103+ return (__force const __be16)__swab16p(p);
74104 }
74105 static inline __u16 __be16_to_cpup(const __be16 *p)
74106 {
74107- return __swab16p((__u16 *)p);
74108+ return __swab16p((const __u16 *)p);
74109 }
74110 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
74111 #define __le64_to_cpus(x) do { (void)(x); } while (0)
74112diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
74113index 126a817..d522bd1 100644
74114--- a/include/uapi/linux/elf.h
74115+++ b/include/uapi/linux/elf.h
74116@@ -37,6 +37,17 @@ typedef __s64 Elf64_Sxword;
74117 #define PT_GNU_EH_FRAME 0x6474e550
74118
74119 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
74120+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
74121+
74122+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
74123+
74124+/* Constants for the e_flags field */
74125+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
74126+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
74127+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
74128+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
74129+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
74130+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
74131
74132 /*
74133 * Extended Numbering
74134@@ -94,6 +105,8 @@ typedef __s64 Elf64_Sxword;
74135 #define DT_DEBUG 21
74136 #define DT_TEXTREL 22
74137 #define DT_JMPREL 23
74138+#define DT_FLAGS 30
74139+ #define DF_TEXTREL 0x00000004
74140 #define DT_ENCODING 32
74141 #define OLD_DT_LOOS 0x60000000
74142 #define DT_LOOS 0x6000000d
74143@@ -240,6 +253,19 @@ typedef struct elf64_hdr {
74144 #define PF_W 0x2
74145 #define PF_X 0x1
74146
74147+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
74148+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
74149+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
74150+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
74151+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
74152+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
74153+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
74154+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
74155+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
74156+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
74157+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
74158+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
74159+
74160 typedef struct elf32_phdr{
74161 Elf32_Word p_type;
74162 Elf32_Off p_offset;
74163@@ -332,6 +358,8 @@ typedef struct elf64_shdr {
74164 #define EI_OSABI 7
74165 #define EI_PAD 8
74166
74167+#define EI_PAX 14
74168+
74169 #define ELFMAG0 0x7f /* EI_MAG */
74170 #define ELFMAG1 'E'
74171 #define ELFMAG2 'L'
74172diff --git a/include/uapi/linux/personality.h b/include/uapi/linux/personality.h
74173index aa169c4..6a2771d 100644
74174--- a/include/uapi/linux/personality.h
74175+++ b/include/uapi/linux/personality.h
74176@@ -30,6 +30,7 @@ enum {
74177 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
74178 ADDR_NO_RANDOMIZE | \
74179 ADDR_COMPAT_LAYOUT | \
74180+ ADDR_LIMIT_3GB | \
74181 MMAP_PAGE_ZERO)
74182
74183 /*
74184diff --git a/include/uapi/linux/screen_info.h b/include/uapi/linux/screen_info.h
74185index 7530e74..e714828 100644
74186--- a/include/uapi/linux/screen_info.h
74187+++ b/include/uapi/linux/screen_info.h
74188@@ -43,7 +43,8 @@ struct screen_info {
74189 __u16 pages; /* 0x32 */
74190 __u16 vesa_attributes; /* 0x34 */
74191 __u32 capabilities; /* 0x36 */
74192- __u8 _reserved[6]; /* 0x3a */
74193+ __u16 vesapm_size; /* 0x3a */
74194+ __u8 _reserved[4]; /* 0x3c */
74195 } __attribute__((packed));
74196
74197 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
74198diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h
74199index 0e011eb..82681b1 100644
74200--- a/include/uapi/linux/swab.h
74201+++ b/include/uapi/linux/swab.h
74202@@ -43,7 +43,7 @@
74203 * ___swab16, ___swab32, ___swab64, ___swahw32, ___swahb32
74204 */
74205
74206-static inline __attribute_const__ __u16 __fswab16(__u16 val)
74207+static inline __intentional_overflow(-1) __attribute_const__ __u16 __fswab16(__u16 val)
74208 {
74209 #ifdef __HAVE_BUILTIN_BSWAP16__
74210 return __builtin_bswap16(val);
74211@@ -54,7 +54,7 @@ static inline __attribute_const__ __u16 __fswab16(__u16 val)
74212 #endif
74213 }
74214
74215-static inline __attribute_const__ __u32 __fswab32(__u32 val)
74216+static inline __intentional_overflow(-1) __attribute_const__ __u32 __fswab32(__u32 val)
74217 {
74218 #ifdef __HAVE_BUILTIN_BSWAP32__
74219 return __builtin_bswap32(val);
74220@@ -65,7 +65,7 @@ static inline __attribute_const__ __u32 __fswab32(__u32 val)
74221 #endif
74222 }
74223
74224-static inline __attribute_const__ __u64 __fswab64(__u64 val)
74225+static inline __intentional_overflow(-1) __attribute_const__ __u64 __fswab64(__u64 val)
74226 {
74227 #ifdef __HAVE_BUILTIN_BSWAP64__
74228 return __builtin_bswap64(val);
74229diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h
74230index 6d67213..8dab561 100644
74231--- a/include/uapi/linux/sysctl.h
74232+++ b/include/uapi/linux/sysctl.h
74233@@ -155,7 +155,11 @@ enum
74234 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
74235 };
74236
74237-
74238+#ifdef CONFIG_PAX_SOFTMODE
74239+enum {
74240+ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
74241+};
74242+#endif
74243
74244 /* CTL_VM names: */
74245 enum
74246diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
74247index 26607bd..588b65f 100644
74248--- a/include/uapi/linux/xattr.h
74249+++ b/include/uapi/linux/xattr.h
74250@@ -60,5 +60,9 @@
74251 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
74252 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
74253
74254+/* User namespace */
74255+#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
74256+#define XATTR_PAX_FLAGS_SUFFIX "flags"
74257+#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
74258
74259 #endif /* _UAPI_LINUX_XATTR_H */
74260diff --git a/include/video/udlfb.h b/include/video/udlfb.h
74261index f9466fa..f4e2b81 100644
74262--- a/include/video/udlfb.h
74263+++ b/include/video/udlfb.h
74264@@ -53,10 +53,10 @@ struct dlfb_data {
74265 u32 pseudo_palette[256];
74266 int blank_mode; /*one of FB_BLANK_ */
74267 /* blit-only rendering path metrics, exposed through sysfs */
74268- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
74269- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
74270- atomic_t bytes_sent; /* to usb, after compression including overhead */
74271- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
74272+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
74273+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
74274+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
74275+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
74276 };
74277
74278 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
74279diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
74280index 0993a22..32ba2fe 100644
74281--- a/include/video/uvesafb.h
74282+++ b/include/video/uvesafb.h
74283@@ -177,6 +177,7 @@ struct uvesafb_par {
74284 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
74285 u8 pmi_setpal; /* PMI for palette changes */
74286 u16 *pmi_base; /* protected mode interface location */
74287+ u8 *pmi_code; /* protected mode code location */
74288 void *pmi_start;
74289 void *pmi_pal;
74290 u8 *vbe_state_orig; /*
74291diff --git a/init/Kconfig b/init/Kconfig
74292index be8b7f5..1eeca9b 100644
74293--- a/init/Kconfig
74294+++ b/init/Kconfig
74295@@ -990,6 +990,7 @@ endif # CGROUPS
74296
74297 config CHECKPOINT_RESTORE
74298 bool "Checkpoint/restore support" if EXPERT
74299+ depends on !GRKERNSEC
74300 default n
74301 help
74302 Enables additional kernel features in a sake of checkpoint/restore.
74303@@ -1468,7 +1469,7 @@ config SLUB_DEBUG
74304
74305 config COMPAT_BRK
74306 bool "Disable heap randomization"
74307- default y
74308+ default n
74309 help
74310 Randomizing heap placement makes heap exploits harder, but it
74311 also breaks ancient binaries (including anything libc5 based).
74312@@ -1711,7 +1712,7 @@ config INIT_ALL_POSSIBLE
74313 config STOP_MACHINE
74314 bool
74315 default y
74316- depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU
74317+ depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU || GRKERNSEC
74318 help
74319 Need stop_machine() primitive.
74320
74321diff --git a/init/Makefile b/init/Makefile
74322index 7bc47ee..6da2dc7 100644
74323--- a/init/Makefile
74324+++ b/init/Makefile
74325@@ -2,6 +2,9 @@
74326 # Makefile for the linux kernel.
74327 #
74328
74329+ccflags-y := $(GCC_PLUGINS_CFLAGS)
74330+asflags-y := $(GCC_PLUGINS_AFLAGS)
74331+
74332 obj-y := main.o version.o mounts.o
74333 ifneq ($(CONFIG_BLK_DEV_INITRD),y)
74334 obj-y += noinitramfs.o
74335diff --git a/init/do_mounts.c b/init/do_mounts.c
74336index 1d1b634..a1c810f 100644
74337--- a/init/do_mounts.c
74338+++ b/init/do_mounts.c
74339@@ -355,11 +355,11 @@ static void __init get_fs_names(char *page)
74340 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
74341 {
74342 struct super_block *s;
74343- int err = sys_mount(name, "/root", fs, flags, data);
74344+ int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
74345 if (err)
74346 return err;
74347
74348- sys_chdir("/root");
74349+ sys_chdir((const char __force_user *)"/root");
74350 s = current->fs->pwd.dentry->d_sb;
74351 ROOT_DEV = s->s_dev;
74352 printk(KERN_INFO
74353@@ -480,18 +480,18 @@ void __init change_floppy(char *fmt, ...)
74354 va_start(args, fmt);
74355 vsprintf(buf, fmt, args);
74356 va_end(args);
74357- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
74358+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
74359 if (fd >= 0) {
74360 sys_ioctl(fd, FDEJECT, 0);
74361 sys_close(fd);
74362 }
74363 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
74364- fd = sys_open("/dev/console", O_RDWR, 0);
74365+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
74366 if (fd >= 0) {
74367 sys_ioctl(fd, TCGETS, (long)&termios);
74368 termios.c_lflag &= ~ICANON;
74369 sys_ioctl(fd, TCSETSF, (long)&termios);
74370- sys_read(fd, &c, 1);
74371+ sys_read(fd, (char __user *)&c, 1);
74372 termios.c_lflag |= ICANON;
74373 sys_ioctl(fd, TCSETSF, (long)&termios);
74374 sys_close(fd);
74375@@ -585,6 +585,6 @@ void __init prepare_namespace(void)
74376 mount_root();
74377 out:
74378 devtmpfs_mount("dev");
74379- sys_mount(".", "/", NULL, MS_MOVE, NULL);
74380- sys_chroot(".");
74381+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
74382+ sys_chroot((const char __force_user *)".");
74383 }
74384diff --git a/init/do_mounts.h b/init/do_mounts.h
74385index f5b978a..69dbfe8 100644
74386--- a/init/do_mounts.h
74387+++ b/init/do_mounts.h
74388@@ -15,15 +15,15 @@ extern int root_mountflags;
74389
74390 static inline int create_dev(char *name, dev_t dev)
74391 {
74392- sys_unlink(name);
74393- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
74394+ sys_unlink((char __force_user *)name);
74395+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
74396 }
74397
74398 #if BITS_PER_LONG == 32
74399 static inline u32 bstat(char *name)
74400 {
74401 struct stat64 stat;
74402- if (sys_stat64(name, &stat) != 0)
74403+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
74404 return 0;
74405 if (!S_ISBLK(stat.st_mode))
74406 return 0;
74407@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
74408 static inline u32 bstat(char *name)
74409 {
74410 struct stat stat;
74411- if (sys_newstat(name, &stat) != 0)
74412+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
74413 return 0;
74414 if (!S_ISBLK(stat.st_mode))
74415 return 0;
74416diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
74417index f9acf71..1e19144 100644
74418--- a/init/do_mounts_initrd.c
74419+++ b/init/do_mounts_initrd.c
74420@@ -58,8 +58,8 @@ static void __init handle_initrd(void)
74421 create_dev("/dev/root.old", Root_RAM0);
74422 /* mount initrd on rootfs' /root */
74423 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
74424- sys_mkdir("/old", 0700);
74425- sys_chdir("/old");
74426+ sys_mkdir((const char __force_user *)"/old", 0700);
74427+ sys_chdir((const char __force_user *)"/old");
74428
74429 /*
74430 * In case that a resume from disk is carried out by linuxrc or one of
74431@@ -73,31 +73,31 @@ static void __init handle_initrd(void)
74432 current->flags &= ~PF_FREEZER_SKIP;
74433
74434 /* move initrd to rootfs' /old */
74435- sys_mount("..", ".", NULL, MS_MOVE, NULL);
74436+ sys_mount((char __force_user *)"..", (char __force_user *)".", NULL, MS_MOVE, NULL);
74437 /* switch root and cwd back to / of rootfs */
74438- sys_chroot("..");
74439+ sys_chroot((const char __force_user *)"..");
74440
74441 if (new_decode_dev(real_root_dev) == Root_RAM0) {
74442- sys_chdir("/old");
74443+ sys_chdir((const char __force_user *)"/old");
74444 return;
74445 }
74446
74447- sys_chdir("/");
74448+ sys_chdir((const char __force_user *)"/");
74449 ROOT_DEV = new_decode_dev(real_root_dev);
74450 mount_root();
74451
74452 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
74453- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
74454+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
74455 if (!error)
74456 printk("okay\n");
74457 else {
74458- int fd = sys_open("/dev/root.old", O_RDWR, 0);
74459+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
74460 if (error == -ENOENT)
74461 printk("/initrd does not exist. Ignored.\n");
74462 else
74463 printk("failed\n");
74464 printk(KERN_NOTICE "Unmounting old root\n");
74465- sys_umount("/old", MNT_DETACH);
74466+ sys_umount((char __force_user *)"/old", MNT_DETACH);
74467 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
74468 if (fd < 0) {
74469 error = fd;
74470@@ -120,11 +120,11 @@ int __init initrd_load(void)
74471 * mounted in the normal path.
74472 */
74473 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
74474- sys_unlink("/initrd.image");
74475+ sys_unlink((const char __force_user *)"/initrd.image");
74476 handle_initrd();
74477 return 1;
74478 }
74479 }
74480- sys_unlink("/initrd.image");
74481+ sys_unlink((const char __force_user *)"/initrd.image");
74482 return 0;
74483 }
74484diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
74485index 8cb6db5..d729f50 100644
74486--- a/init/do_mounts_md.c
74487+++ b/init/do_mounts_md.c
74488@@ -180,7 +180,7 @@ static void __init md_setup_drive(void)
74489 partitioned ? "_d" : "", minor,
74490 md_setup_args[ent].device_names);
74491
74492- fd = sys_open(name, 0, 0);
74493+ fd = sys_open((char __force_user *)name, 0, 0);
74494 if (fd < 0) {
74495 printk(KERN_ERR "md: open failed - cannot start "
74496 "array %s\n", name);
74497@@ -243,7 +243,7 @@ static void __init md_setup_drive(void)
74498 * array without it
74499 */
74500 sys_close(fd);
74501- fd = sys_open(name, 0, 0);
74502+ fd = sys_open((char __force_user *)name, 0, 0);
74503 sys_ioctl(fd, BLKRRPART, 0);
74504 }
74505 sys_close(fd);
74506@@ -293,7 +293,7 @@ static void __init autodetect_raid(void)
74507
74508 wait_for_device_probe();
74509
74510- fd = sys_open("/dev/md0", 0, 0);
74511+ fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
74512 if (fd >= 0) {
74513 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
74514 sys_close(fd);
74515diff --git a/init/init_task.c b/init/init_task.c
74516index 8b2f399..f0797c9 100644
74517--- a/init/init_task.c
74518+++ b/init/init_task.c
74519@@ -20,5 +20,9 @@ EXPORT_SYMBOL(init_task);
74520 * Initial thread structure. Alignment of this is handled by a special
74521 * linker map entry.
74522 */
74523+#ifdef CONFIG_X86
74524+union thread_union init_thread_union __init_task_data;
74525+#else
74526 union thread_union init_thread_union __init_task_data =
74527 { INIT_THREAD_INFO(init_task) };
74528+#endif
74529diff --git a/init/initramfs.c b/init/initramfs.c
74530index 84c6bf1..8899338 100644
74531--- a/init/initramfs.c
74532+++ b/init/initramfs.c
74533@@ -84,7 +84,7 @@ static void __init free_hash(void)
74534 }
74535 }
74536
74537-static long __init do_utime(char *filename, time_t mtime)
74538+static long __init do_utime(char __force_user *filename, time_t mtime)
74539 {
74540 struct timespec t[2];
74541
74542@@ -119,7 +119,7 @@ static void __init dir_utime(void)
74543 struct dir_entry *de, *tmp;
74544 list_for_each_entry_safe(de, tmp, &dir_list, list) {
74545 list_del(&de->list);
74546- do_utime(de->name, de->mtime);
74547+ do_utime((char __force_user *)de->name, de->mtime);
74548 kfree(de->name);
74549 kfree(de);
74550 }
74551@@ -281,7 +281,7 @@ static int __init maybe_link(void)
74552 if (nlink >= 2) {
74553 char *old = find_link(major, minor, ino, mode, collected);
74554 if (old)
74555- return (sys_link(old, collected) < 0) ? -1 : 1;
74556+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
74557 }
74558 return 0;
74559 }
74560@@ -290,11 +290,11 @@ static void __init clean_path(char *path, umode_t mode)
74561 {
74562 struct stat st;
74563
74564- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
74565+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
74566 if (S_ISDIR(st.st_mode))
74567- sys_rmdir(path);
74568+ sys_rmdir((char __force_user *)path);
74569 else
74570- sys_unlink(path);
74571+ sys_unlink((char __force_user *)path);
74572 }
74573 }
74574
74575@@ -315,7 +315,7 @@ static int __init do_name(void)
74576 int openflags = O_WRONLY|O_CREAT;
74577 if (ml != 1)
74578 openflags |= O_TRUNC;
74579- wfd = sys_open(collected, openflags, mode);
74580+ wfd = sys_open((char __force_user *)collected, openflags, mode);
74581
74582 if (wfd >= 0) {
74583 sys_fchown(wfd, uid, gid);
74584@@ -327,17 +327,17 @@ static int __init do_name(void)
74585 }
74586 }
74587 } else if (S_ISDIR(mode)) {
74588- sys_mkdir(collected, mode);
74589- sys_chown(collected, uid, gid);
74590- sys_chmod(collected, mode);
74591+ sys_mkdir((char __force_user *)collected, mode);
74592+ sys_chown((char __force_user *)collected, uid, gid);
74593+ sys_chmod((char __force_user *)collected, mode);
74594 dir_add(collected, mtime);
74595 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
74596 S_ISFIFO(mode) || S_ISSOCK(mode)) {
74597 if (maybe_link() == 0) {
74598- sys_mknod(collected, mode, rdev);
74599- sys_chown(collected, uid, gid);
74600- sys_chmod(collected, mode);
74601- do_utime(collected, mtime);
74602+ sys_mknod((char __force_user *)collected, mode, rdev);
74603+ sys_chown((char __force_user *)collected, uid, gid);
74604+ sys_chmod((char __force_user *)collected, mode);
74605+ do_utime((char __force_user *)collected, mtime);
74606 }
74607 }
74608 return 0;
74609@@ -346,15 +346,15 @@ static int __init do_name(void)
74610 static int __init do_copy(void)
74611 {
74612 if (count >= body_len) {
74613- sys_write(wfd, victim, body_len);
74614+ sys_write(wfd, (char __force_user *)victim, body_len);
74615 sys_close(wfd);
74616- do_utime(vcollected, mtime);
74617+ do_utime((char __force_user *)vcollected, mtime);
74618 kfree(vcollected);
74619 eat(body_len);
74620 state = SkipIt;
74621 return 0;
74622 } else {
74623- sys_write(wfd, victim, count);
74624+ sys_write(wfd, (char __force_user *)victim, count);
74625 body_len -= count;
74626 eat(count);
74627 return 1;
74628@@ -365,9 +365,9 @@ static int __init do_symlink(void)
74629 {
74630 collected[N_ALIGN(name_len) + body_len] = '\0';
74631 clean_path(collected, 0);
74632- sys_symlink(collected + N_ALIGN(name_len), collected);
74633- sys_lchown(collected, uid, gid);
74634- do_utime(collected, mtime);
74635+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
74636+ sys_lchown((char __force_user *)collected, uid, gid);
74637+ do_utime((char __force_user *)collected, mtime);
74638 state = SkipIt;
74639 next_state = Reset;
74640 return 0;
74641diff --git a/init/main.c b/init/main.c
74642index cee4b5c..360e10a 100644
74643--- a/init/main.c
74644+++ b/init/main.c
74645@@ -96,6 +96,8 @@ static inline void mark_rodata_ro(void) { }
74646 extern void tc_init(void);
74647 #endif
74648
74649+extern void grsecurity_init(void);
74650+
74651 /*
74652 * Debug helper: via this flag we know that we are in 'early bootup code'
74653 * where only the boot processor is running with IRQ disabled. This means
74654@@ -149,6 +151,61 @@ static int __init set_reset_devices(char *str)
74655
74656 __setup("reset_devices", set_reset_devices);
74657
74658+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
74659+kgid_t grsec_proc_gid = KGIDT_INIT(CONFIG_GRKERNSEC_PROC_GID);
74660+static int __init setup_grsec_proc_gid(char *str)
74661+{
74662+ grsec_proc_gid = KGIDT_INIT(simple_strtol(str, NULL, 0));
74663+ return 1;
74664+}
74665+__setup("grsec_proc_gid=", setup_grsec_proc_gid);
74666+#endif
74667+
74668+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
74669+extern char pax_enter_kernel_user[];
74670+extern char pax_exit_kernel_user[];
74671+extern pgdval_t clone_pgd_mask;
74672+#endif
74673+
74674+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
74675+static int __init setup_pax_nouderef(char *str)
74676+{
74677+#ifdef CONFIG_X86_32
74678+ unsigned int cpu;
74679+ struct desc_struct *gdt;
74680+
74681+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
74682+ gdt = get_cpu_gdt_table(cpu);
74683+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
74684+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
74685+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
74686+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
74687+ }
74688+ loadsegment(ds, __KERNEL_DS);
74689+ loadsegment(es, __KERNEL_DS);
74690+ loadsegment(ss, __KERNEL_DS);
74691+#else
74692+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
74693+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
74694+ clone_pgd_mask = ~(pgdval_t)0UL;
74695+#endif
74696+
74697+ return 0;
74698+}
74699+early_param("pax_nouderef", setup_pax_nouderef);
74700+#endif
74701+
74702+#ifdef CONFIG_PAX_SOFTMODE
74703+int pax_softmode;
74704+
74705+static int __init setup_pax_softmode(char *str)
74706+{
74707+ get_option(&str, &pax_softmode);
74708+ return 1;
74709+}
74710+__setup("pax_softmode=", setup_pax_softmode);
74711+#endif
74712+
74713 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
74714 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
74715 static const char *panic_later, *panic_param;
74716@@ -681,6 +738,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
74717 {
74718 int count = preempt_count();
74719 int ret;
74720+ const char *msg1 = "", *msg2 = "";
74721
74722 if (initcall_debug)
74723 ret = do_one_initcall_debug(fn);
74724@@ -693,15 +751,15 @@ int __init_or_module do_one_initcall(initcall_t fn)
74725 sprintf(msgbuf, "error code %d ", ret);
74726
74727 if (preempt_count() != count) {
74728- strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
74729+ msg1 = " preemption imbalance";
74730 preempt_count() = count;
74731 }
74732 if (irqs_disabled()) {
74733- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
74734+ msg2 = " disabled interrupts";
74735 local_irq_enable();
74736 }
74737- if (msgbuf[0]) {
74738- printk("initcall %pF returned with %s\n", fn, msgbuf);
74739+ if (msgbuf[0] || *msg1 || *msg2) {
74740+ printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
74741 }
74742
74743 return ret;
74744@@ -755,8 +813,14 @@ static void __init do_initcall_level(int level)
74745 level, level,
74746 &repair_env_string);
74747
74748- for (fn = initcall_levels[level]; fn < initcall_levels[level+1]; fn++)
74749+ for (fn = initcall_levels[level]; fn < initcall_levels[level+1]; fn++) {
74750 do_one_initcall(*fn);
74751+
74752+#ifdef LATENT_ENTROPY_PLUGIN
74753+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
74754+#endif
74755+
74756+ }
74757 }
74758
74759 static void __init do_initcalls(void)
74760@@ -790,8 +854,14 @@ static void __init do_pre_smp_initcalls(void)
74761 {
74762 initcall_t *fn;
74763
74764- for (fn = __initcall_start; fn < __initcall0_start; fn++)
74765+ for (fn = __initcall_start; fn < __initcall0_start; fn++) {
74766 do_one_initcall(*fn);
74767+
74768+#ifdef LATENT_ENTROPY_PLUGIN
74769+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
74770+#endif
74771+
74772+ }
74773 }
74774
74775 static int run_init_process(const char *init_filename)
74776@@ -877,7 +947,7 @@ static noinline void __init kernel_init_freeable(void)
74777 do_basic_setup();
74778
74779 /* Open the /dev/console on the rootfs, this should never fail */
74780- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
74781+ if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
74782 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
74783
74784 (void) sys_dup(0);
74785@@ -890,11 +960,13 @@ static noinline void __init kernel_init_freeable(void)
74786 if (!ramdisk_execute_command)
74787 ramdisk_execute_command = "/init";
74788
74789- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
74790+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
74791 ramdisk_execute_command = NULL;
74792 prepare_namespace();
74793 }
74794
74795+ grsecurity_init();
74796+
74797 /*
74798 * Ok, we have completed the initial bootup, and
74799 * we're essentially up and running. Get rid of the
74800diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c
74801index 130dfec..cc88451 100644
74802--- a/ipc/ipc_sysctl.c
74803+++ b/ipc/ipc_sysctl.c
74804@@ -30,7 +30,7 @@ static void *get_ipc(ctl_table *table)
74805 static int proc_ipc_dointvec(ctl_table *table, int write,
74806 void __user *buffer, size_t *lenp, loff_t *ppos)
74807 {
74808- struct ctl_table ipc_table;
74809+ ctl_table_no_const ipc_table;
74810
74811 memcpy(&ipc_table, table, sizeof(ipc_table));
74812 ipc_table.data = get_ipc(table);
74813@@ -41,7 +41,7 @@ static int proc_ipc_dointvec(ctl_table *table, int write,
74814 static int proc_ipc_dointvec_minmax(ctl_table *table, int write,
74815 void __user *buffer, size_t *lenp, loff_t *ppos)
74816 {
74817- struct ctl_table ipc_table;
74818+ ctl_table_no_const ipc_table;
74819
74820 memcpy(&ipc_table, table, sizeof(ipc_table));
74821 ipc_table.data = get_ipc(table);
74822@@ -65,7 +65,7 @@ static int proc_ipc_dointvec_minmax_orphans(ctl_table *table, int write,
74823 static int proc_ipc_callback_dointvec(ctl_table *table, int write,
74824 void __user *buffer, size_t *lenp, loff_t *ppos)
74825 {
74826- struct ctl_table ipc_table;
74827+ ctl_table_no_const ipc_table;
74828 size_t lenp_bef = *lenp;
74829 int rc;
74830
74831@@ -88,7 +88,7 @@ static int proc_ipc_callback_dointvec(ctl_table *table, int write,
74832 static int proc_ipc_doulongvec_minmax(ctl_table *table, int write,
74833 void __user *buffer, size_t *lenp, loff_t *ppos)
74834 {
74835- struct ctl_table ipc_table;
74836+ ctl_table_no_const ipc_table;
74837 memcpy(&ipc_table, table, sizeof(ipc_table));
74838 ipc_table.data = get_ipc(table);
74839
74840@@ -122,7 +122,7 @@ static void ipc_auto_callback(int val)
74841 static int proc_ipcauto_dointvec_minmax(ctl_table *table, int write,
74842 void __user *buffer, size_t *lenp, loff_t *ppos)
74843 {
74844- struct ctl_table ipc_table;
74845+ ctl_table_no_const ipc_table;
74846 size_t lenp_bef = *lenp;
74847 int oldval;
74848 int rc;
74849diff --git a/ipc/mq_sysctl.c b/ipc/mq_sysctl.c
74850index 383d638..943fdbb 100644
74851--- a/ipc/mq_sysctl.c
74852+++ b/ipc/mq_sysctl.c
74853@@ -25,7 +25,7 @@ static void *get_mq(ctl_table *table)
74854 static int proc_mq_dointvec_minmax(ctl_table *table, int write,
74855 void __user *buffer, size_t *lenp, loff_t *ppos)
74856 {
74857- struct ctl_table mq_table;
74858+ ctl_table_no_const mq_table;
74859 memcpy(&mq_table, table, sizeof(mq_table));
74860 mq_table.data = get_mq(table);
74861
74862diff --git a/ipc/mqueue.c b/ipc/mqueue.c
74863index f3f40dc..ffe5a3a 100644
74864--- a/ipc/mqueue.c
74865+++ b/ipc/mqueue.c
74866@@ -278,6 +278,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
74867 mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
74868 info->attr.mq_msgsize);
74869
74870+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
74871 spin_lock(&mq_lock);
74872 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
74873 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
74874diff --git a/ipc/msg.c b/ipc/msg.c
74875index fede1d0..9778e0f8 100644
74876--- a/ipc/msg.c
74877+++ b/ipc/msg.c
74878@@ -309,18 +309,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
74879 return security_msg_queue_associate(msq, msgflg);
74880 }
74881
74882+static struct ipc_ops msg_ops = {
74883+ .getnew = newque,
74884+ .associate = msg_security,
74885+ .more_checks = NULL
74886+};
74887+
74888 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
74889 {
74890 struct ipc_namespace *ns;
74891- struct ipc_ops msg_ops;
74892 struct ipc_params msg_params;
74893
74894 ns = current->nsproxy->ipc_ns;
74895
74896- msg_ops.getnew = newque;
74897- msg_ops.associate = msg_security;
74898- msg_ops.more_checks = NULL;
74899-
74900 msg_params.key = key;
74901 msg_params.flg = msgflg;
74902
74903diff --git a/ipc/sem.c b/ipc/sem.c
74904index 58d31f1..cce7a55 100644
74905--- a/ipc/sem.c
74906+++ b/ipc/sem.c
74907@@ -364,10 +364,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
74908 return 0;
74909 }
74910
74911+static struct ipc_ops sem_ops = {
74912+ .getnew = newary,
74913+ .associate = sem_security,
74914+ .more_checks = sem_more_checks
74915+};
74916+
74917 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
74918 {
74919 struct ipc_namespace *ns;
74920- struct ipc_ops sem_ops;
74921 struct ipc_params sem_params;
74922
74923 ns = current->nsproxy->ipc_ns;
74924@@ -375,10 +380,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
74925 if (nsems < 0 || nsems > ns->sc_semmsl)
74926 return -EINVAL;
74927
74928- sem_ops.getnew = newary;
74929- sem_ops.associate = sem_security;
74930- sem_ops.more_checks = sem_more_checks;
74931-
74932 sem_params.key = key;
74933 sem_params.flg = semflg;
74934 sem_params.u.nsems = nsems;
74935diff --git a/ipc/shm.c b/ipc/shm.c
74936index 4fa6d8f..55cff14 100644
74937--- a/ipc/shm.c
74938+++ b/ipc/shm.c
74939@@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
74940 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
74941 #endif
74942
74943+#ifdef CONFIG_GRKERNSEC
74944+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
74945+ const time_t shm_createtime, const kuid_t cuid,
74946+ const int shmid);
74947+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
74948+ const time_t shm_createtime);
74949+#endif
74950+
74951 void shm_init_ns(struct ipc_namespace *ns)
74952 {
74953 ns->shm_ctlmax = SHMMAX;
74954@@ -521,6 +529,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
74955 shp->shm_lprid = 0;
74956 shp->shm_atim = shp->shm_dtim = 0;
74957 shp->shm_ctim = get_seconds();
74958+#ifdef CONFIG_GRKERNSEC
74959+ {
74960+ struct timespec timeval;
74961+ do_posix_clock_monotonic_gettime(&timeval);
74962+
74963+ shp->shm_createtime = timeval.tv_sec;
74964+ }
74965+#endif
74966 shp->shm_segsz = size;
74967 shp->shm_nattch = 0;
74968 shp->shm_file = file;
74969@@ -572,18 +588,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
74970 return 0;
74971 }
74972
74973+static struct ipc_ops shm_ops = {
74974+ .getnew = newseg,
74975+ .associate = shm_security,
74976+ .more_checks = shm_more_checks
74977+};
74978+
74979 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
74980 {
74981 struct ipc_namespace *ns;
74982- struct ipc_ops shm_ops;
74983 struct ipc_params shm_params;
74984
74985 ns = current->nsproxy->ipc_ns;
74986
74987- shm_ops.getnew = newseg;
74988- shm_ops.associate = shm_security;
74989- shm_ops.more_checks = shm_more_checks;
74990-
74991 shm_params.key = key;
74992 shm_params.flg = shmflg;
74993 shm_params.u.size = size;
74994@@ -1004,6 +1021,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
74995 f_mode = FMODE_READ | FMODE_WRITE;
74996 }
74997 if (shmflg & SHM_EXEC) {
74998+
74999+#ifdef CONFIG_PAX_MPROTECT
75000+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
75001+ goto out;
75002+#endif
75003+
75004 prot |= PROT_EXEC;
75005 acc_mode |= S_IXUGO;
75006 }
75007@@ -1027,9 +1050,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
75008 if (err)
75009 goto out_unlock;
75010
75011+#ifdef CONFIG_GRKERNSEC
75012+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
75013+ shp->shm_perm.cuid, shmid) ||
75014+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
75015+ err = -EACCES;
75016+ goto out_unlock;
75017+ }
75018+#endif
75019+
75020 path = shp->shm_file->f_path;
75021 path_get(&path);
75022 shp->shm_nattch++;
75023+#ifdef CONFIG_GRKERNSEC
75024+ shp->shm_lapid = current->pid;
75025+#endif
75026 size = i_size_read(path.dentry->d_inode);
75027 shm_unlock(shp);
75028
75029diff --git a/kernel/acct.c b/kernel/acct.c
75030index 051e071..15e0920 100644
75031--- a/kernel/acct.c
75032+++ b/kernel/acct.c
75033@@ -550,7 +550,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
75034 */
75035 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
75036 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
75037- file->f_op->write(file, (char *)&ac,
75038+ file->f_op->write(file, (char __force_user *)&ac,
75039 sizeof(acct_t), &file->f_pos);
75040 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
75041 set_fs(fs);
75042diff --git a/kernel/audit.c b/kernel/audit.c
75043index d596e53..dbef3c3 100644
75044--- a/kernel/audit.c
75045+++ b/kernel/audit.c
75046@@ -116,7 +116,7 @@ u32 audit_sig_sid = 0;
75047 3) suppressed due to audit_rate_limit
75048 4) suppressed due to audit_backlog_limit
75049 */
75050-static atomic_t audit_lost = ATOMIC_INIT(0);
75051+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
75052
75053 /* The netlink socket. */
75054 static struct sock *audit_sock;
75055@@ -238,7 +238,7 @@ void audit_log_lost(const char *message)
75056 unsigned long now;
75057 int print;
75058
75059- atomic_inc(&audit_lost);
75060+ atomic_inc_unchecked(&audit_lost);
75061
75062 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
75063
75064@@ -257,7 +257,7 @@ void audit_log_lost(const char *message)
75065 printk(KERN_WARNING
75066 "audit: audit_lost=%d audit_rate_limit=%d "
75067 "audit_backlog_limit=%d\n",
75068- atomic_read(&audit_lost),
75069+ atomic_read_unchecked(&audit_lost),
75070 audit_rate_limit,
75071 audit_backlog_limit);
75072 audit_panic(message);
75073@@ -681,7 +681,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
75074 status_set.pid = audit_pid;
75075 status_set.rate_limit = audit_rate_limit;
75076 status_set.backlog_limit = audit_backlog_limit;
75077- status_set.lost = atomic_read(&audit_lost);
75078+ status_set.lost = atomic_read_unchecked(&audit_lost);
75079 status_set.backlog = skb_queue_len(&audit_skb_queue);
75080 audit_send_reply(NETLINK_CB(skb).portid, seq, AUDIT_GET, 0, 0,
75081 &status_set, sizeof(status_set));
75082diff --git a/kernel/auditsc.c b/kernel/auditsc.c
75083index a371f85..da826c1 100644
75084--- a/kernel/auditsc.c
75085+++ b/kernel/auditsc.c
75086@@ -2292,7 +2292,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
75087 }
75088
75089 /* global counter which is incremented every time something logs in */
75090-static atomic_t session_id = ATOMIC_INIT(0);
75091+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
75092
75093 /**
75094 * audit_set_loginuid - set current task's audit_context loginuid
75095@@ -2316,7 +2316,7 @@ int audit_set_loginuid(kuid_t loginuid)
75096 return -EPERM;
75097 #endif /* CONFIG_AUDIT_LOGINUID_IMMUTABLE */
75098
75099- sessionid = atomic_inc_return(&session_id);
75100+ sessionid = atomic_inc_return_unchecked(&session_id);
75101 if (context && context->in_syscall) {
75102 struct audit_buffer *ab;
75103
75104diff --git a/kernel/capability.c b/kernel/capability.c
75105index f6c2ce5..982c0f9 100644
75106--- a/kernel/capability.c
75107+++ b/kernel/capability.c
75108@@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
75109 * before modification is attempted and the application
75110 * fails.
75111 */
75112+ if (tocopy > ARRAY_SIZE(kdata))
75113+ return -EFAULT;
75114+
75115 if (copy_to_user(dataptr, kdata, tocopy
75116 * sizeof(struct __user_cap_data_struct))) {
75117 return -EFAULT;
75118@@ -303,10 +306,11 @@ bool has_ns_capability(struct task_struct *t,
75119 int ret;
75120
75121 rcu_read_lock();
75122- ret = security_capable(__task_cred(t), ns, cap);
75123+ ret = security_capable(__task_cred(t), ns, cap) == 0 &&
75124+ gr_task_is_capable(t, __task_cred(t), cap);
75125 rcu_read_unlock();
75126
75127- return (ret == 0);
75128+ return ret;
75129 }
75130
75131 /**
75132@@ -343,10 +347,10 @@ bool has_ns_capability_noaudit(struct task_struct *t,
75133 int ret;
75134
75135 rcu_read_lock();
75136- ret = security_capable_noaudit(__task_cred(t), ns, cap);
75137+ ret = security_capable_noaudit(__task_cred(t), ns, cap) == 0 && gr_task_is_capable_nolog(t, cap);
75138 rcu_read_unlock();
75139
75140- return (ret == 0);
75141+ return ret;
75142 }
75143
75144 /**
75145@@ -384,7 +388,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
75146 BUG();
75147 }
75148
75149- if (security_capable(current_cred(), ns, cap) == 0) {
75150+ if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable(cap)) {
75151 current->flags |= PF_SUPERPRIV;
75152 return true;
75153 }
75154@@ -392,6 +396,21 @@ bool ns_capable(struct user_namespace *ns, int cap)
75155 }
75156 EXPORT_SYMBOL(ns_capable);
75157
75158+bool ns_capable_nolog(struct user_namespace *ns, int cap)
75159+{
75160+ if (unlikely(!cap_valid(cap))) {
75161+ printk(KERN_CRIT "capable_nolog() called with invalid cap=%u\n", cap);
75162+ BUG();
75163+ }
75164+
75165+ if (security_capable_noaudit(current_cred(), ns, cap) == 0 && gr_is_capable_nolog(cap)) {
75166+ current->flags |= PF_SUPERPRIV;
75167+ return true;
75168+ }
75169+ return false;
75170+}
75171+EXPORT_SYMBOL(ns_capable_nolog);
75172+
75173 /**
75174 * file_ns_capable - Determine if the file's opener had a capability in effect
75175 * @file: The file we want to check
75176@@ -432,6 +451,12 @@ bool capable(int cap)
75177 }
75178 EXPORT_SYMBOL(capable);
75179
75180+bool capable_nolog(int cap)
75181+{
75182+ return ns_capable_nolog(&init_user_ns, cap);
75183+}
75184+EXPORT_SYMBOL(capable_nolog);
75185+
75186 /**
75187 * nsown_capable - Check superior capability to one's own user_ns
75188 * @cap: The capability in question
75189@@ -464,3 +489,10 @@ bool inode_capable(const struct inode *inode, int cap)
75190
75191 return ns_capable(ns, cap) && kuid_has_mapping(ns, inode->i_uid);
75192 }
75193+
75194+bool inode_capable_nolog(const struct inode *inode, int cap)
75195+{
75196+ struct user_namespace *ns = current_user_ns();
75197+
75198+ return ns_capable_nolog(ns, cap) && kuid_has_mapping(ns, inode->i_uid);
75199+}
75200diff --git a/kernel/cgroup.c b/kernel/cgroup.c
75201index 1e23664..570a83d 100644
75202--- a/kernel/cgroup.c
75203+++ b/kernel/cgroup.c
75204@@ -5543,7 +5543,7 @@ static int cgroup_css_links_read(struct cgroup *cont,
75205 struct css_set *cg = link->cg;
75206 struct task_struct *task;
75207 int count = 0;
75208- seq_printf(seq, "css_set %p\n", cg);
75209+ seq_printf(seq, "css_set %pK\n", cg);
75210 list_for_each_entry(task, &cg->tasks, cg_list) {
75211 if (count++ > MAX_TASKS_SHOWN_PER_CSS) {
75212 seq_puts(seq, " ...\n");
75213diff --git a/kernel/compat.c b/kernel/compat.c
75214index 36700e9..73d770c 100644
75215--- a/kernel/compat.c
75216+++ b/kernel/compat.c
75217@@ -13,6 +13,7 @@
75218
75219 #include <linux/linkage.h>
75220 #include <linux/compat.h>
75221+#include <linux/module.h>
75222 #include <linux/errno.h>
75223 #include <linux/time.h>
75224 #include <linux/signal.h>
75225@@ -220,7 +221,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
75226 mm_segment_t oldfs;
75227 long ret;
75228
75229- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
75230+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
75231 oldfs = get_fs();
75232 set_fs(KERNEL_DS);
75233 ret = hrtimer_nanosleep_restart(restart);
75234@@ -252,7 +253,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
75235 oldfs = get_fs();
75236 set_fs(KERNEL_DS);
75237 ret = hrtimer_nanosleep(&tu,
75238- rmtp ? (struct timespec __user *)&rmt : NULL,
75239+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
75240 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
75241 set_fs(oldfs);
75242
75243@@ -361,7 +362,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
75244 mm_segment_t old_fs = get_fs();
75245
75246 set_fs(KERNEL_DS);
75247- ret = sys_sigpending((old_sigset_t __user *) &s);
75248+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
75249 set_fs(old_fs);
75250 if (ret == 0)
75251 ret = put_user(s, set);
75252@@ -451,7 +452,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
75253 mm_segment_t old_fs = get_fs();
75254
75255 set_fs(KERNEL_DS);
75256- ret = sys_old_getrlimit(resource, &r);
75257+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
75258 set_fs(old_fs);
75259
75260 if (!ret) {
75261@@ -523,7 +524,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
75262 mm_segment_t old_fs = get_fs();
75263
75264 set_fs(KERNEL_DS);
75265- ret = sys_getrusage(who, (struct rusage __user *) &r);
75266+ ret = sys_getrusage(who, (struct rusage __force_user *) &r);
75267 set_fs(old_fs);
75268
75269 if (ret)
75270@@ -552,8 +553,8 @@ COMPAT_SYSCALL_DEFINE4(wait4,
75271 set_fs (KERNEL_DS);
75272 ret = sys_wait4(pid,
75273 (stat_addr ?
75274- (unsigned int __user *) &status : NULL),
75275- options, (struct rusage __user *) &r);
75276+ (unsigned int __force_user *) &status : NULL),
75277+ options, (struct rusage __force_user *) &r);
75278 set_fs (old_fs);
75279
75280 if (ret > 0) {
75281@@ -579,8 +580,8 @@ COMPAT_SYSCALL_DEFINE5(waitid,
75282 memset(&info, 0, sizeof(info));
75283
75284 set_fs(KERNEL_DS);
75285- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
75286- uru ? (struct rusage __user *)&ru : NULL);
75287+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
75288+ uru ? (struct rusage __force_user *)&ru : NULL);
75289 set_fs(old_fs);
75290
75291 if ((ret < 0) || (info.si_signo == 0))
75292@@ -714,8 +715,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
75293 oldfs = get_fs();
75294 set_fs(KERNEL_DS);
75295 err = sys_timer_settime(timer_id, flags,
75296- (struct itimerspec __user *) &newts,
75297- (struct itimerspec __user *) &oldts);
75298+ (struct itimerspec __force_user *) &newts,
75299+ (struct itimerspec __force_user *) &oldts);
75300 set_fs(oldfs);
75301 if (!err && old && put_compat_itimerspec(old, &oldts))
75302 return -EFAULT;
75303@@ -732,7 +733,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
75304 oldfs = get_fs();
75305 set_fs(KERNEL_DS);
75306 err = sys_timer_gettime(timer_id,
75307- (struct itimerspec __user *) &ts);
75308+ (struct itimerspec __force_user *) &ts);
75309 set_fs(oldfs);
75310 if (!err && put_compat_itimerspec(setting, &ts))
75311 return -EFAULT;
75312@@ -751,7 +752,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
75313 oldfs = get_fs();
75314 set_fs(KERNEL_DS);
75315 err = sys_clock_settime(which_clock,
75316- (struct timespec __user *) &ts);
75317+ (struct timespec __force_user *) &ts);
75318 set_fs(oldfs);
75319 return err;
75320 }
75321@@ -766,7 +767,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
75322 oldfs = get_fs();
75323 set_fs(KERNEL_DS);
75324 err = sys_clock_gettime(which_clock,
75325- (struct timespec __user *) &ts);
75326+ (struct timespec __force_user *) &ts);
75327 set_fs(oldfs);
75328 if (!err && put_compat_timespec(&ts, tp))
75329 return -EFAULT;
75330@@ -786,7 +787,7 @@ long compat_sys_clock_adjtime(clockid_t which_clock,
75331
75332 oldfs = get_fs();
75333 set_fs(KERNEL_DS);
75334- ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
75335+ ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
75336 set_fs(oldfs);
75337
75338 err = compat_put_timex(utp, &txc);
75339@@ -806,7 +807,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
75340 oldfs = get_fs();
75341 set_fs(KERNEL_DS);
75342 err = sys_clock_getres(which_clock,
75343- (struct timespec __user *) &ts);
75344+ (struct timespec __force_user *) &ts);
75345 set_fs(oldfs);
75346 if (!err && tp && put_compat_timespec(&ts, tp))
75347 return -EFAULT;
75348@@ -818,9 +819,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
75349 long err;
75350 mm_segment_t oldfs;
75351 struct timespec tu;
75352- struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
75353+ struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
75354
75355- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
75356+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
75357 oldfs = get_fs();
75358 set_fs(KERNEL_DS);
75359 err = clock_nanosleep_restart(restart);
75360@@ -852,8 +853,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
75361 oldfs = get_fs();
75362 set_fs(KERNEL_DS);
75363 err = sys_clock_nanosleep(which_clock, flags,
75364- (struct timespec __user *) &in,
75365- (struct timespec __user *) &out);
75366+ (struct timespec __force_user *) &in,
75367+ (struct timespec __force_user *) &out);
75368 set_fs(oldfs);
75369
75370 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
75371diff --git a/kernel/configs.c b/kernel/configs.c
75372index 42e8fa0..9e7406b 100644
75373--- a/kernel/configs.c
75374+++ b/kernel/configs.c
75375@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
75376 struct proc_dir_entry *entry;
75377
75378 /* create the current config file */
75379+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
75380+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
75381+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
75382+ &ikconfig_file_ops);
75383+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
75384+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
75385+ &ikconfig_file_ops);
75386+#endif
75387+#else
75388 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
75389 &ikconfig_file_ops);
75390+#endif
75391+
75392 if (!entry)
75393 return -ENOMEM;
75394
75395diff --git a/kernel/cred.c b/kernel/cred.c
75396index e0573a4..3874e41 100644
75397--- a/kernel/cred.c
75398+++ b/kernel/cred.c
75399@@ -164,6 +164,16 @@ void exit_creds(struct task_struct *tsk)
75400 validate_creds(cred);
75401 alter_cred_subscribers(cred, -1);
75402 put_cred(cred);
75403+
75404+#ifdef CONFIG_GRKERNSEC_SETXID
75405+ cred = (struct cred *) tsk->delayed_cred;
75406+ if (cred != NULL) {
75407+ tsk->delayed_cred = NULL;
75408+ validate_creds(cred);
75409+ alter_cred_subscribers(cred, -1);
75410+ put_cred(cred);
75411+ }
75412+#endif
75413 }
75414
75415 /**
75416@@ -411,7 +421,7 @@ static bool cred_cap_issubset(const struct cred *set, const struct cred *subset)
75417 * Always returns 0 thus allowing this function to be tail-called at the end
75418 * of, say, sys_setgid().
75419 */
75420-int commit_creds(struct cred *new)
75421+static int __commit_creds(struct cred *new)
75422 {
75423 struct task_struct *task = current;
75424 const struct cred *old = task->real_cred;
75425@@ -430,6 +440,8 @@ int commit_creds(struct cred *new)
75426
75427 get_cred(new); /* we will require a ref for the subj creds too */
75428
75429+ gr_set_role_label(task, new->uid, new->gid);
75430+
75431 /* dumpability changes */
75432 if (!uid_eq(old->euid, new->euid) ||
75433 !gid_eq(old->egid, new->egid) ||
75434@@ -479,6 +491,102 @@ int commit_creds(struct cred *new)
75435 put_cred(old);
75436 return 0;
75437 }
75438+#ifdef CONFIG_GRKERNSEC_SETXID
75439+extern int set_user(struct cred *new);
75440+
75441+void gr_delayed_cred_worker(void)
75442+{
75443+ const struct cred *new = current->delayed_cred;
75444+ struct cred *ncred;
75445+
75446+ current->delayed_cred = NULL;
75447+
75448+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID) && new != NULL) {
75449+ // from doing get_cred on it when queueing this
75450+ put_cred(new);
75451+ return;
75452+ } else if (new == NULL)
75453+ return;
75454+
75455+ ncred = prepare_creds();
75456+ if (!ncred)
75457+ goto die;
75458+ // uids
75459+ ncred->uid = new->uid;
75460+ ncred->euid = new->euid;
75461+ ncred->suid = new->suid;
75462+ ncred->fsuid = new->fsuid;
75463+ // gids
75464+ ncred->gid = new->gid;
75465+ ncred->egid = new->egid;
75466+ ncred->sgid = new->sgid;
75467+ ncred->fsgid = new->fsgid;
75468+ // groups
75469+ if (set_groups(ncred, new->group_info) < 0) {
75470+ abort_creds(ncred);
75471+ goto die;
75472+ }
75473+ // caps
75474+ ncred->securebits = new->securebits;
75475+ ncred->cap_inheritable = new->cap_inheritable;
75476+ ncred->cap_permitted = new->cap_permitted;
75477+ ncred->cap_effective = new->cap_effective;
75478+ ncred->cap_bset = new->cap_bset;
75479+
75480+ if (set_user(ncred)) {
75481+ abort_creds(ncred);
75482+ goto die;
75483+ }
75484+
75485+ // from doing get_cred on it when queueing this
75486+ put_cred(new);
75487+
75488+ __commit_creds(ncred);
75489+ return;
75490+die:
75491+ // from doing get_cred on it when queueing this
75492+ put_cred(new);
75493+ do_group_exit(SIGKILL);
75494+}
75495+#endif
75496+
75497+int commit_creds(struct cred *new)
75498+{
75499+#ifdef CONFIG_GRKERNSEC_SETXID
75500+ int ret;
75501+ int schedule_it = 0;
75502+ struct task_struct *t;
75503+
75504+ /* we won't get called with tasklist_lock held for writing
75505+ and interrupts disabled as the cred struct in that case is
75506+ init_cred
75507+ */
75508+ if (grsec_enable_setxid && !current_is_single_threaded() &&
75509+ uid_eq(current_uid(), GLOBAL_ROOT_UID) &&
75510+ !uid_eq(new->uid, GLOBAL_ROOT_UID)) {
75511+ schedule_it = 1;
75512+ }
75513+ ret = __commit_creds(new);
75514+ if (schedule_it) {
75515+ rcu_read_lock();
75516+ read_lock(&tasklist_lock);
75517+ for (t = next_thread(current); t != current;
75518+ t = next_thread(t)) {
75519+ if (t->delayed_cred == NULL) {
75520+ t->delayed_cred = get_cred(new);
75521+ set_tsk_thread_flag(t, TIF_GRSEC_SETXID);
75522+ set_tsk_need_resched(t);
75523+ }
75524+ }
75525+ read_unlock(&tasklist_lock);
75526+ rcu_read_unlock();
75527+ }
75528+ return ret;
75529+#else
75530+ return __commit_creds(new);
75531+#endif
75532+}
75533+
75534 EXPORT_SYMBOL(commit_creds);
75535
75536 /**
75537diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
75538index 9a61738..c5c8f3a 100644
75539--- a/kernel/debug/debug_core.c
75540+++ b/kernel/debug/debug_core.c
75541@@ -122,7 +122,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
75542 */
75543 static atomic_t masters_in_kgdb;
75544 static atomic_t slaves_in_kgdb;
75545-static atomic_t kgdb_break_tasklet_var;
75546+static atomic_unchecked_t kgdb_break_tasklet_var;
75547 atomic_t kgdb_setting_breakpoint;
75548
75549 struct task_struct *kgdb_usethread;
75550@@ -132,7 +132,7 @@ int kgdb_single_step;
75551 static pid_t kgdb_sstep_pid;
75552
75553 /* to keep track of the CPU which is doing the single stepping*/
75554-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
75555+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
75556
75557 /*
75558 * If you are debugging a problem where roundup (the collection of
75559@@ -540,7 +540,7 @@ return_normal:
75560 * kernel will only try for the value of sstep_tries before
75561 * giving up and continuing on.
75562 */
75563- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
75564+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
75565 (kgdb_info[cpu].task &&
75566 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
75567 atomic_set(&kgdb_active, -1);
75568@@ -634,8 +634,8 @@ cpu_master_loop:
75569 }
75570
75571 kgdb_restore:
75572- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
75573- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
75574+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
75575+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
75576 if (kgdb_info[sstep_cpu].task)
75577 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
75578 else
75579@@ -887,18 +887,18 @@ static void kgdb_unregister_callbacks(void)
75580 static void kgdb_tasklet_bpt(unsigned long ing)
75581 {
75582 kgdb_breakpoint();
75583- atomic_set(&kgdb_break_tasklet_var, 0);
75584+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
75585 }
75586
75587 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
75588
75589 void kgdb_schedule_breakpoint(void)
75590 {
75591- if (atomic_read(&kgdb_break_tasklet_var) ||
75592+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
75593 atomic_read(&kgdb_active) != -1 ||
75594 atomic_read(&kgdb_setting_breakpoint))
75595 return;
75596- atomic_inc(&kgdb_break_tasklet_var);
75597+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
75598 tasklet_schedule(&kgdb_tasklet_breakpoint);
75599 }
75600 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
75601diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
75602index 8875254..7cf4928 100644
75603--- a/kernel/debug/kdb/kdb_main.c
75604+++ b/kernel/debug/kdb/kdb_main.c
75605@@ -1974,7 +1974,7 @@ static int kdb_lsmod(int argc, const char **argv)
75606 continue;
75607
75608 kdb_printf("%-20s%8u 0x%p ", mod->name,
75609- mod->core_size, (void *)mod);
75610+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
75611 #ifdef CONFIG_MODULE_UNLOAD
75612 kdb_printf("%4ld ", module_refcount(mod));
75613 #endif
75614@@ -1984,7 +1984,7 @@ static int kdb_lsmod(int argc, const char **argv)
75615 kdb_printf(" (Loading)");
75616 else
75617 kdb_printf(" (Live)");
75618- kdb_printf(" 0x%p", mod->module_core);
75619+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
75620
75621 #ifdef CONFIG_MODULE_UNLOAD
75622 {
75623diff --git a/kernel/events/core.c b/kernel/events/core.c
75624index 0600d3b..742ab1b 100644
75625--- a/kernel/events/core.c
75626+++ b/kernel/events/core.c
75627@@ -182,7 +182,7 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
75628 return 0;
75629 }
75630
75631-static atomic64_t perf_event_id;
75632+static atomic64_unchecked_t perf_event_id;
75633
75634 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
75635 enum event_type_t event_type);
75636@@ -2677,7 +2677,7 @@ static void __perf_event_read(void *info)
75637
75638 static inline u64 perf_event_count(struct perf_event *event)
75639 {
75640- return local64_read(&event->count) + atomic64_read(&event->child_count);
75641+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
75642 }
75643
75644 static u64 perf_event_read(struct perf_event *event)
75645@@ -3007,9 +3007,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
75646 mutex_lock(&event->child_mutex);
75647 total += perf_event_read(event);
75648 *enabled += event->total_time_enabled +
75649- atomic64_read(&event->child_total_time_enabled);
75650+ atomic64_read_unchecked(&event->child_total_time_enabled);
75651 *running += event->total_time_running +
75652- atomic64_read(&event->child_total_time_running);
75653+ atomic64_read_unchecked(&event->child_total_time_running);
75654
75655 list_for_each_entry(child, &event->child_list, child_list) {
75656 total += perf_event_read(child);
75657@@ -3412,10 +3412,10 @@ void perf_event_update_userpage(struct perf_event *event)
75658 userpg->offset -= local64_read(&event->hw.prev_count);
75659
75660 userpg->time_enabled = enabled +
75661- atomic64_read(&event->child_total_time_enabled);
75662+ atomic64_read_unchecked(&event->child_total_time_enabled);
75663
75664 userpg->time_running = running +
75665- atomic64_read(&event->child_total_time_running);
75666+ atomic64_read_unchecked(&event->child_total_time_running);
75667
75668 arch_perf_update_userpage(userpg, now);
75669
75670@@ -3974,11 +3974,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
75671 values[n++] = perf_event_count(event);
75672 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
75673 values[n++] = enabled +
75674- atomic64_read(&event->child_total_time_enabled);
75675+ atomic64_read_unchecked(&event->child_total_time_enabled);
75676 }
75677 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
75678 values[n++] = running +
75679- atomic64_read(&event->child_total_time_running);
75680+ atomic64_read_unchecked(&event->child_total_time_running);
75681 }
75682 if (read_format & PERF_FORMAT_ID)
75683 values[n++] = primary_event_id(event);
75684@@ -4721,12 +4721,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
75685 * need to add enough zero bytes after the string to handle
75686 * the 64bit alignment we do later.
75687 */
75688- buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
75689+ buf = kzalloc(PATH_MAX, GFP_KERNEL);
75690 if (!buf) {
75691 name = strncpy(tmp, "//enomem", sizeof(tmp));
75692 goto got_name;
75693 }
75694- name = d_path(&file->f_path, buf, PATH_MAX);
75695+ name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
75696 if (IS_ERR(name)) {
75697 name = strncpy(tmp, "//toolong", sizeof(tmp));
75698 goto got_name;
75699@@ -6165,7 +6165,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
75700 event->parent = parent_event;
75701
75702 event->ns = get_pid_ns(task_active_pid_ns(current));
75703- event->id = atomic64_inc_return(&perf_event_id);
75704+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
75705
75706 event->state = PERF_EVENT_STATE_INACTIVE;
75707
75708@@ -6790,10 +6790,10 @@ static void sync_child_event(struct perf_event *child_event,
75709 /*
75710 * Add back the child's count to the parent's count:
75711 */
75712- atomic64_add(child_val, &parent_event->child_count);
75713- atomic64_add(child_event->total_time_enabled,
75714+ atomic64_add_unchecked(child_val, &parent_event->child_count);
75715+ atomic64_add_unchecked(child_event->total_time_enabled,
75716 &parent_event->child_total_time_enabled);
75717- atomic64_add(child_event->total_time_running,
75718+ atomic64_add_unchecked(child_event->total_time_running,
75719 &parent_event->child_total_time_running);
75720
75721 /*
75722diff --git a/kernel/exit.c b/kernel/exit.c
75723index b4df219..f13c02d 100644
75724--- a/kernel/exit.c
75725+++ b/kernel/exit.c
75726@@ -170,6 +170,10 @@ void release_task(struct task_struct * p)
75727 struct task_struct *leader;
75728 int zap_leader;
75729 repeat:
75730+#ifdef CONFIG_NET
75731+ gr_del_task_from_ip_table(p);
75732+#endif
75733+
75734 /* don't need to get the RCU readlock here - the process is dead and
75735 * can't be modifying its own credentials. But shut RCU-lockdep up */
75736 rcu_read_lock();
75737@@ -338,7 +342,7 @@ int allow_signal(int sig)
75738 * know it'll be handled, so that they don't get converted to
75739 * SIGKILL or just silently dropped.
75740 */
75741- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
75742+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
75743 recalc_sigpending();
75744 spin_unlock_irq(&current->sighand->siglock);
75745 return 0;
75746@@ -708,6 +712,8 @@ void do_exit(long code)
75747 struct task_struct *tsk = current;
75748 int group_dead;
75749
75750+ set_fs(USER_DS);
75751+
75752 profile_task_exit(tsk);
75753
75754 WARN_ON(blk_needs_flush_plug(tsk));
75755@@ -724,7 +730,6 @@ void do_exit(long code)
75756 * mm_release()->clear_child_tid() from writing to a user-controlled
75757 * kernel address.
75758 */
75759- set_fs(USER_DS);
75760
75761 ptrace_event(PTRACE_EVENT_EXIT, code);
75762
75763@@ -783,6 +788,9 @@ void do_exit(long code)
75764 tsk->exit_code = code;
75765 taskstats_exit(tsk, group_dead);
75766
75767+ gr_acl_handle_psacct(tsk, code);
75768+ gr_acl_handle_exit();
75769+
75770 exit_mm(tsk);
75771
75772 if (group_dead)
75773@@ -903,7 +911,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
75774 * Take down every thread in the group. This is called by fatal signals
75775 * as well as by sys_exit_group (below).
75776 */
75777-void
75778+__noreturn void
75779 do_group_exit(int exit_code)
75780 {
75781 struct signal_struct *sig = current->signal;
75782diff --git a/kernel/fork.c b/kernel/fork.c
75783index 5630e52..0cee608 100644
75784--- a/kernel/fork.c
75785+++ b/kernel/fork.c
75786@@ -318,7 +318,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
75787 *stackend = STACK_END_MAGIC; /* for overflow detection */
75788
75789 #ifdef CONFIG_CC_STACKPROTECTOR
75790- tsk->stack_canary = get_random_int();
75791+ tsk->stack_canary = pax_get_random_long();
75792 #endif
75793
75794 /*
75795@@ -344,13 +344,81 @@ free_tsk:
75796 }
75797
75798 #ifdef CONFIG_MMU
75799+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct mm_struct *oldmm, struct vm_area_struct *mpnt)
75800+{
75801+ struct vm_area_struct *tmp;
75802+ unsigned long charge;
75803+ struct mempolicy *pol;
75804+ struct file *file;
75805+
75806+ charge = 0;
75807+ if (mpnt->vm_flags & VM_ACCOUNT) {
75808+ unsigned long len = vma_pages(mpnt);
75809+
75810+ if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
75811+ goto fail_nomem;
75812+ charge = len;
75813+ }
75814+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
75815+ if (!tmp)
75816+ goto fail_nomem;
75817+ *tmp = *mpnt;
75818+ tmp->vm_mm = mm;
75819+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
75820+ pol = mpol_dup(vma_policy(mpnt));
75821+ if (IS_ERR(pol))
75822+ goto fail_nomem_policy;
75823+ vma_set_policy(tmp, pol);
75824+ if (anon_vma_fork(tmp, mpnt))
75825+ goto fail_nomem_anon_vma_fork;
75826+ tmp->vm_flags &= ~VM_LOCKED;
75827+ tmp->vm_next = tmp->vm_prev = NULL;
75828+ tmp->vm_mirror = NULL;
75829+ file = tmp->vm_file;
75830+ if (file) {
75831+ struct inode *inode = file->f_path.dentry->d_inode;
75832+ struct address_space *mapping = file->f_mapping;
75833+
75834+ get_file(file);
75835+ if (tmp->vm_flags & VM_DENYWRITE)
75836+ atomic_dec(&inode->i_writecount);
75837+ mutex_lock(&mapping->i_mmap_mutex);
75838+ if (tmp->vm_flags & VM_SHARED)
75839+ mapping->i_mmap_writable++;
75840+ flush_dcache_mmap_lock(mapping);
75841+ /* insert tmp into the share list, just after mpnt */
75842+ if (unlikely(tmp->vm_flags & VM_NONLINEAR))
75843+ vma_nonlinear_insert(tmp, &mapping->i_mmap_nonlinear);
75844+ else
75845+ vma_interval_tree_insert_after(tmp, mpnt, &mapping->i_mmap);
75846+ flush_dcache_mmap_unlock(mapping);
75847+ mutex_unlock(&mapping->i_mmap_mutex);
75848+ }
75849+
75850+ /*
75851+ * Clear hugetlb-related page reserves for children. This only
75852+ * affects MAP_PRIVATE mappings. Faults generated by the child
75853+ * are not guaranteed to succeed, even if read-only
75854+ */
75855+ if (is_vm_hugetlb_page(tmp))
75856+ reset_vma_resv_huge_pages(tmp);
75857+
75858+ return tmp;
75859+
75860+fail_nomem_anon_vma_fork:
75861+ mpol_put(pol);
75862+fail_nomem_policy:
75863+ kmem_cache_free(vm_area_cachep, tmp);
75864+fail_nomem:
75865+ vm_unacct_memory(charge);
75866+ return NULL;
75867+}
75868+
75869 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
75870 {
75871 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
75872 struct rb_node **rb_link, *rb_parent;
75873 int retval;
75874- unsigned long charge;
75875- struct mempolicy *pol;
75876
75877 uprobe_start_dup_mmap();
75878 down_write(&oldmm->mmap_sem);
75879@@ -364,8 +432,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
75880 mm->locked_vm = 0;
75881 mm->mmap = NULL;
75882 mm->mmap_cache = NULL;
75883- mm->free_area_cache = oldmm->mmap_base;
75884- mm->cached_hole_size = ~0UL;
75885+ mm->free_area_cache = oldmm->free_area_cache;
75886+ mm->cached_hole_size = oldmm->cached_hole_size;
75887 mm->map_count = 0;
75888 cpumask_clear(mm_cpumask(mm));
75889 mm->mm_rb = RB_ROOT;
75890@@ -381,57 +449,15 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
75891
75892 prev = NULL;
75893 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
75894- struct file *file;
75895-
75896 if (mpnt->vm_flags & VM_DONTCOPY) {
75897 vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
75898 -vma_pages(mpnt));
75899 continue;
75900 }
75901- charge = 0;
75902- if (mpnt->vm_flags & VM_ACCOUNT) {
75903- unsigned long len = vma_pages(mpnt);
75904-
75905- if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
75906- goto fail_nomem;
75907- charge = len;
75908- }
75909- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
75910- if (!tmp)
75911- goto fail_nomem;
75912- *tmp = *mpnt;
75913- INIT_LIST_HEAD(&tmp->anon_vma_chain);
75914- pol = mpol_dup(vma_policy(mpnt));
75915- retval = PTR_ERR(pol);
75916- if (IS_ERR(pol))
75917- goto fail_nomem_policy;
75918- vma_set_policy(tmp, pol);
75919- tmp->vm_mm = mm;
75920- if (anon_vma_fork(tmp, mpnt))
75921- goto fail_nomem_anon_vma_fork;
75922- tmp->vm_flags &= ~VM_LOCKED;
75923- tmp->vm_next = tmp->vm_prev = NULL;
75924- file = tmp->vm_file;
75925- if (file) {
75926- struct inode *inode = file->f_path.dentry->d_inode;
75927- struct address_space *mapping = file->f_mapping;
75928-
75929- get_file(file);
75930- if (tmp->vm_flags & VM_DENYWRITE)
75931- atomic_dec(&inode->i_writecount);
75932- mutex_lock(&mapping->i_mmap_mutex);
75933- if (tmp->vm_flags & VM_SHARED)
75934- mapping->i_mmap_writable++;
75935- flush_dcache_mmap_lock(mapping);
75936- /* insert tmp into the share list, just after mpnt */
75937- if (unlikely(tmp->vm_flags & VM_NONLINEAR))
75938- vma_nonlinear_insert(tmp,
75939- &mapping->i_mmap_nonlinear);
75940- else
75941- vma_interval_tree_insert_after(tmp, mpnt,
75942- &mapping->i_mmap);
75943- flush_dcache_mmap_unlock(mapping);
75944- mutex_unlock(&mapping->i_mmap_mutex);
75945+ tmp = dup_vma(mm, oldmm, mpnt);
75946+ if (!tmp) {
75947+ retval = -ENOMEM;
75948+ goto out;
75949 }
75950
75951 /*
75952@@ -463,6 +489,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
75953 if (retval)
75954 goto out;
75955 }
75956+
75957+#ifdef CONFIG_PAX_SEGMEXEC
75958+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
75959+ struct vm_area_struct *mpnt_m;
75960+
75961+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
75962+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
75963+
75964+ if (!mpnt->vm_mirror)
75965+ continue;
75966+
75967+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
75968+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
75969+ mpnt->vm_mirror = mpnt_m;
75970+ } else {
75971+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
75972+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
75973+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
75974+ mpnt->vm_mirror->vm_mirror = mpnt;
75975+ }
75976+ }
75977+ BUG_ON(mpnt_m);
75978+ }
75979+#endif
75980+
75981 /* a new mm has just been created */
75982 arch_dup_mmap(oldmm, mm);
75983 retval = 0;
75984@@ -472,14 +523,6 @@ out:
75985 up_write(&oldmm->mmap_sem);
75986 uprobe_end_dup_mmap();
75987 return retval;
75988-fail_nomem_anon_vma_fork:
75989- mpol_put(pol);
75990-fail_nomem_policy:
75991- kmem_cache_free(vm_area_cachep, tmp);
75992-fail_nomem:
75993- retval = -ENOMEM;
75994- vm_unacct_memory(charge);
75995- goto out;
75996 }
75997
75998 static inline int mm_alloc_pgd(struct mm_struct *mm)
75999@@ -694,8 +737,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
76000 return ERR_PTR(err);
76001
76002 mm = get_task_mm(task);
76003- if (mm && mm != current->mm &&
76004- !ptrace_may_access(task, mode)) {
76005+ if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
76006+ (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
76007 mmput(mm);
76008 mm = ERR_PTR(-EACCES);
76009 }
76010@@ -917,13 +960,20 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
76011 spin_unlock(&fs->lock);
76012 return -EAGAIN;
76013 }
76014- fs->users++;
76015+ atomic_inc(&fs->users);
76016 spin_unlock(&fs->lock);
76017 return 0;
76018 }
76019 tsk->fs = copy_fs_struct(fs);
76020 if (!tsk->fs)
76021 return -ENOMEM;
76022+ /* Carry through gr_chroot_dentry and is_chrooted instead
76023+ of recomputing it here. Already copied when the task struct
76024+ is duplicated. This allows pivot_root to not be treated as
76025+ a chroot
76026+ */
76027+ //gr_set_chroot_entries(tsk, &tsk->fs->root);
76028+
76029 return 0;
76030 }
76031
76032@@ -1196,6 +1246,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
76033 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
76034 #endif
76035 retval = -EAGAIN;
76036+
76037+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
76038+
76039 if (atomic_read(&p->real_cred->user->processes) >=
76040 task_rlimit(p, RLIMIT_NPROC)) {
76041 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
76042@@ -1435,6 +1488,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,
76043 goto bad_fork_free_pid;
76044 }
76045
76046+ /* synchronizes with gr_set_acls()
76047+ we need to call this past the point of no return for fork()
76048+ */
76049+ gr_copy_label(p);
76050+
76051 if (clone_flags & CLONE_THREAD) {
76052 current->signal->nr_threads++;
76053 atomic_inc(&current->signal->live);
76054@@ -1518,6 +1576,8 @@ bad_fork_cleanup_count:
76055 bad_fork_free:
76056 free_task(p);
76057 fork_out:
76058+ gr_log_forkfail(retval);
76059+
76060 return ERR_PTR(retval);
76061 }
76062
76063@@ -1568,6 +1628,23 @@ long do_fork(unsigned long clone_flags,
76064 return -EINVAL;
76065 }
76066
76067+#ifdef CONFIG_GRKERNSEC
76068+ if (clone_flags & CLONE_NEWUSER) {
76069+ /*
76070+ * This doesn't really inspire confidence:
76071+ * http://marc.info/?l=linux-kernel&m=135543612731939&w=2
76072+ * http://marc.info/?l=linux-kernel&m=135545831607095&w=2
76073+ * Increases kernel attack surface in areas developers
76074+ * previously cared little about ("low importance due
76075+ * to requiring "root" capability")
76076+ * To be removed when this code receives *proper* review
76077+ */
76078+ if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
76079+ !capable(CAP_SETGID))
76080+ return -EPERM;
76081+ }
76082+#endif
76083+
76084 /*
76085 * Determine whether and which event to report to ptracer. When
76086 * called from kernel_thread or CLONE_UNTRACED is explicitly
76087@@ -1602,6 +1679,8 @@ long do_fork(unsigned long clone_flags,
76088 if (clone_flags & CLONE_PARENT_SETTID)
76089 put_user(nr, parent_tidptr);
76090
76091+ gr_handle_brute_check();
76092+
76093 if (clone_flags & CLONE_VFORK) {
76094 p->vfork_done = &vfork;
76095 init_completion(&vfork);
76096@@ -1755,7 +1834,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
76097 return 0;
76098
76099 /* don't need lock here; in the worst case we'll do useless copy */
76100- if (fs->users == 1)
76101+ if (atomic_read(&fs->users) == 1)
76102 return 0;
76103
76104 *new_fsp = copy_fs_struct(fs);
76105@@ -1869,7 +1948,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
76106 fs = current->fs;
76107 spin_lock(&fs->lock);
76108 current->fs = new_fs;
76109- if (--fs->users)
76110+ gr_set_chroot_entries(current, &current->fs->root);
76111+ if (atomic_dec_return(&fs->users))
76112 new_fs = NULL;
76113 else
76114 new_fs = fs;
76115diff --git a/kernel/futex.c b/kernel/futex.c
76116index 8879430..31696f1 100644
76117--- a/kernel/futex.c
76118+++ b/kernel/futex.c
76119@@ -54,6 +54,7 @@
76120 #include <linux/mount.h>
76121 #include <linux/pagemap.h>
76122 #include <linux/syscalls.h>
76123+#include <linux/ptrace.h>
76124 #include <linux/signal.h>
76125 #include <linux/export.h>
76126 #include <linux/magic.h>
76127@@ -239,6 +240,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
76128 struct page *page, *page_head;
76129 int err, ro = 0;
76130
76131+#ifdef CONFIG_PAX_SEGMEXEC
76132+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
76133+ return -EFAULT;
76134+#endif
76135+
76136 /*
76137 * The futex address must be "naturally" aligned.
76138 */
76139@@ -2731,6 +2737,7 @@ static int __init futex_init(void)
76140 {
76141 u32 curval;
76142 int i;
76143+ mm_segment_t oldfs;
76144
76145 /*
76146 * This will fail and we want it. Some arch implementations do
76147@@ -2742,8 +2749,11 @@ static int __init futex_init(void)
76148 * implementation, the non-functional ones will return
76149 * -ENOSYS.
76150 */
76151+ oldfs = get_fs();
76152+ set_fs(USER_DS);
76153 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
76154 futex_cmpxchg_enabled = 1;
76155+ set_fs(oldfs);
76156
76157 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
76158 plist_head_init(&futex_queues[i].chain);
76159diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
76160index a9642d5..51eb98c 100644
76161--- a/kernel/futex_compat.c
76162+++ b/kernel/futex_compat.c
76163@@ -31,7 +31,7 @@ fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
76164 return 0;
76165 }
76166
76167-static void __user *futex_uaddr(struct robust_list __user *entry,
76168+static void __user __intentional_overflow(-1) *futex_uaddr(struct robust_list __user *entry,
76169 compat_long_t futex_offset)
76170 {
76171 compat_uptr_t base = ptr_to_compat(entry);
76172diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
76173index 9b22d03..6295b62 100644
76174--- a/kernel/gcov/base.c
76175+++ b/kernel/gcov/base.c
76176@@ -102,11 +102,6 @@ void gcov_enable_events(void)
76177 }
76178
76179 #ifdef CONFIG_MODULES
76180-static inline int within(void *addr, void *start, unsigned long size)
76181-{
76182- return ((addr >= start) && (addr < start + size));
76183-}
76184-
76185 /* Update list and generate events when modules are unloaded. */
76186 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
76187 void *data)
76188@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
76189 prev = NULL;
76190 /* Remove entries located in module from linked list. */
76191 for (info = gcov_info_head; info; info = info->next) {
76192- if (within(info, mod->module_core, mod->core_size)) {
76193+ if (within_module_core_rw((unsigned long)info, mod)) {
76194 if (prev)
76195 prev->next = info->next;
76196 else
76197diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
76198index e4cee8d..f31f503 100644
76199--- a/kernel/hrtimer.c
76200+++ b/kernel/hrtimer.c
76201@@ -1408,7 +1408,7 @@ void hrtimer_peek_ahead_timers(void)
76202 local_irq_restore(flags);
76203 }
76204
76205-static void run_hrtimer_softirq(struct softirq_action *h)
76206+static void run_hrtimer_softirq(void)
76207 {
76208 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
76209
76210@@ -1750,7 +1750,7 @@ static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self,
76211 return NOTIFY_OK;
76212 }
76213
76214-static struct notifier_block __cpuinitdata hrtimers_nb = {
76215+static struct notifier_block hrtimers_nb = {
76216 .notifier_call = hrtimer_cpu_notify,
76217 };
76218
76219diff --git a/kernel/jump_label.c b/kernel/jump_label.c
76220index 60f48fa..7f3a770 100644
76221--- a/kernel/jump_label.c
76222+++ b/kernel/jump_label.c
76223@@ -13,6 +13,7 @@
76224 #include <linux/sort.h>
76225 #include <linux/err.h>
76226 #include <linux/static_key.h>
76227+#include <linux/mm.h>
76228
76229 #ifdef HAVE_JUMP_LABEL
76230
76231@@ -50,7 +51,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
76232
76233 size = (((unsigned long)stop - (unsigned long)start)
76234 / sizeof(struct jump_entry));
76235+ pax_open_kernel();
76236 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
76237+ pax_close_kernel();
76238 }
76239
76240 static void jump_label_update(struct static_key *key, int enable);
76241@@ -357,10 +360,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
76242 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
76243 struct jump_entry *iter;
76244
76245+ pax_open_kernel();
76246 for (iter = iter_start; iter < iter_stop; iter++) {
76247 if (within_module_init(iter->code, mod))
76248 iter->code = 0;
76249 }
76250+ pax_close_kernel();
76251 }
76252
76253 static int
76254diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
76255index 2169fee..706ccca 100644
76256--- a/kernel/kallsyms.c
76257+++ b/kernel/kallsyms.c
76258@@ -11,6 +11,9 @@
76259 * Changed the compression method from stem compression to "table lookup"
76260 * compression (see scripts/kallsyms.c for a more complete description)
76261 */
76262+#ifdef CONFIG_GRKERNSEC_HIDESYM
76263+#define __INCLUDED_BY_HIDESYM 1
76264+#endif
76265 #include <linux/kallsyms.h>
76266 #include <linux/module.h>
76267 #include <linux/init.h>
76268@@ -53,12 +56,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
76269
76270 static inline int is_kernel_inittext(unsigned long addr)
76271 {
76272+ if (system_state != SYSTEM_BOOTING)
76273+ return 0;
76274+
76275 if (addr >= (unsigned long)_sinittext
76276 && addr <= (unsigned long)_einittext)
76277 return 1;
76278 return 0;
76279 }
76280
76281+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
76282+#ifdef CONFIG_MODULES
76283+static inline int is_module_text(unsigned long addr)
76284+{
76285+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
76286+ return 1;
76287+
76288+ addr = ktla_ktva(addr);
76289+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
76290+}
76291+#else
76292+static inline int is_module_text(unsigned long addr)
76293+{
76294+ return 0;
76295+}
76296+#endif
76297+#endif
76298+
76299 static inline int is_kernel_text(unsigned long addr)
76300 {
76301 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
76302@@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigned long addr)
76303
76304 static inline int is_kernel(unsigned long addr)
76305 {
76306+
76307+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
76308+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
76309+ return 1;
76310+
76311+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
76312+#else
76313 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
76314+#endif
76315+
76316 return 1;
76317 return in_gate_area_no_mm(addr);
76318 }
76319
76320 static int is_ksym_addr(unsigned long addr)
76321 {
76322+
76323+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
76324+ if (is_module_text(addr))
76325+ return 0;
76326+#endif
76327+
76328 if (all_var)
76329 return is_kernel(addr);
76330
76331@@ -470,7 +509,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
76332
76333 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
76334 {
76335- iter->name[0] = '\0';
76336 iter->nameoff = get_symbol_offset(new_pos);
76337 iter->pos = new_pos;
76338 }
76339@@ -518,6 +556,11 @@ static int s_show(struct seq_file *m, void *p)
76340 {
76341 struct kallsym_iter *iter = m->private;
76342
76343+#ifdef CONFIG_GRKERNSEC_HIDESYM
76344+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID))
76345+ return 0;
76346+#endif
76347+
76348 /* Some debugging symbols have no name. Ignore them. */
76349 if (!iter->name[0])
76350 return 0;
76351@@ -531,6 +574,7 @@ static int s_show(struct seq_file *m, void *p)
76352 */
76353 type = iter->exported ? toupper(iter->type) :
76354 tolower(iter->type);
76355+
76356 seq_printf(m, "%pK %c %s\t[%s]\n", (void *)iter->value,
76357 type, iter->name, iter->module_name);
76358 } else
76359@@ -556,7 +600,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
76360 struct kallsym_iter *iter;
76361 int ret;
76362
76363- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
76364+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
76365 if (!iter)
76366 return -ENOMEM;
76367 reset_iter(iter, 0);
76368diff --git a/kernel/kcmp.c b/kernel/kcmp.c
76369index e30ac0f..3528cac 100644
76370--- a/kernel/kcmp.c
76371+++ b/kernel/kcmp.c
76372@@ -99,6 +99,10 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
76373 struct task_struct *task1, *task2;
76374 int ret;
76375
76376+#ifdef CONFIG_GRKERNSEC
76377+ return -ENOSYS;
76378+#endif
76379+
76380 rcu_read_lock();
76381
76382 /*
76383diff --git a/kernel/kexec.c b/kernel/kexec.c
76384index 5e4bd78..00c5b91 100644
76385--- a/kernel/kexec.c
76386+++ b/kernel/kexec.c
76387@@ -1045,7 +1045,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
76388 unsigned long flags)
76389 {
76390 struct compat_kexec_segment in;
76391- struct kexec_segment out, __user *ksegments;
76392+ struct kexec_segment out;
76393+ struct kexec_segment __user *ksegments;
76394 unsigned long i, result;
76395
76396 /* Don't allow clients that don't understand the native
76397diff --git a/kernel/kmod.c b/kernel/kmod.c
76398index 0023a87..9c0c068 100644
76399--- a/kernel/kmod.c
76400+++ b/kernel/kmod.c
76401@@ -74,7 +74,7 @@ static void free_modprobe_argv(struct subprocess_info *info)
76402 kfree(info->argv);
76403 }
76404
76405-static int call_modprobe(char *module_name, int wait)
76406+static int call_modprobe(char *module_name, char *module_param, int wait)
76407 {
76408 static char *envp[] = {
76409 "HOME=/",
76410@@ -83,7 +83,7 @@ static int call_modprobe(char *module_name, int wait)
76411 NULL
76412 };
76413
76414- char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
76415+ char **argv = kmalloc(sizeof(char *[6]), GFP_KERNEL);
76416 if (!argv)
76417 goto out;
76418
76419@@ -95,7 +95,8 @@ static int call_modprobe(char *module_name, int wait)
76420 argv[1] = "-q";
76421 argv[2] = "--";
76422 argv[3] = module_name; /* check free_modprobe_argv() */
76423- argv[4] = NULL;
76424+ argv[4] = module_param;
76425+ argv[5] = NULL;
76426
76427 return call_usermodehelper_fns(modprobe_path, argv, envp,
76428 wait | UMH_KILLABLE, NULL, free_modprobe_argv, NULL);
76429@@ -120,9 +121,8 @@ out:
76430 * If module auto-loading support is disabled then this function
76431 * becomes a no-operation.
76432 */
76433-int __request_module(bool wait, const char *fmt, ...)
76434+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
76435 {
76436- va_list args;
76437 char module_name[MODULE_NAME_LEN];
76438 unsigned int max_modprobes;
76439 int ret;
76440@@ -130,9 +130,7 @@ int __request_module(bool wait, const char *fmt, ...)
76441 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
76442 static int kmod_loop_msg;
76443
76444- va_start(args, fmt);
76445- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
76446- va_end(args);
76447+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
76448 if (ret >= MODULE_NAME_LEN)
76449 return -ENAMETOOLONG;
76450
76451@@ -140,6 +138,20 @@ int __request_module(bool wait, const char *fmt, ...)
76452 if (ret)
76453 return ret;
76454
76455+#ifdef CONFIG_GRKERNSEC_MODHARDEN
76456+ if (uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
76457+ /* hack to workaround consolekit/udisks stupidity */
76458+ read_lock(&tasklist_lock);
76459+ if (!strcmp(current->comm, "mount") &&
76460+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
76461+ read_unlock(&tasklist_lock);
76462+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
76463+ return -EPERM;
76464+ }
76465+ read_unlock(&tasklist_lock);
76466+ }
76467+#endif
76468+
76469 /* If modprobe needs a service that is in a module, we get a recursive
76470 * loop. Limit the number of running kmod threads to max_threads/2 or
76471 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
76472@@ -168,11 +180,52 @@ int __request_module(bool wait, const char *fmt, ...)
76473
76474 trace_module_request(module_name, wait, _RET_IP_);
76475
76476- ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
76477+ ret = call_modprobe(module_name, module_param, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
76478
76479 atomic_dec(&kmod_concurrent);
76480 return ret;
76481 }
76482+
76483+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
76484+{
76485+ va_list args;
76486+ int ret;
76487+
76488+ va_start(args, fmt);
76489+ ret = ____request_module(wait, module_param, fmt, args);
76490+ va_end(args);
76491+
76492+ return ret;
76493+}
76494+
76495+int __request_module(bool wait, const char *fmt, ...)
76496+{
76497+ va_list args;
76498+ int ret;
76499+
76500+#ifdef CONFIG_GRKERNSEC_MODHARDEN
76501+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
76502+ char module_param[MODULE_NAME_LEN];
76503+
76504+ memset(module_param, 0, sizeof(module_param));
76505+
76506+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", GR_GLOBAL_UID(current_uid()));
76507+
76508+ va_start(args, fmt);
76509+ ret = ____request_module(wait, module_param, fmt, args);
76510+ va_end(args);
76511+
76512+ return ret;
76513+ }
76514+#endif
76515+
76516+ va_start(args, fmt);
76517+ ret = ____request_module(wait, NULL, fmt, args);
76518+ va_end(args);
76519+
76520+ return ret;
76521+}
76522+
76523 EXPORT_SYMBOL(__request_module);
76524 #endif /* CONFIG_MODULES */
76525
76526@@ -283,7 +336,7 @@ static int wait_for_helper(void *data)
76527 *
76528 * Thus the __user pointer cast is valid here.
76529 */
76530- sys_wait4(pid, (int __user *)&ret, 0, NULL);
76531+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
76532
76533 /*
76534 * If ret is 0, either ____call_usermodehelper failed and the
76535@@ -635,7 +688,7 @@ EXPORT_SYMBOL(call_usermodehelper_fns);
76536 static int proc_cap_handler(struct ctl_table *table, int write,
76537 void __user *buffer, size_t *lenp, loff_t *ppos)
76538 {
76539- struct ctl_table t;
76540+ ctl_table_no_const t;
76541 unsigned long cap_array[_KERNEL_CAPABILITY_U32S];
76542 kernel_cap_t new_cap;
76543 int err, i;
76544diff --git a/kernel/kprobes.c b/kernel/kprobes.c
76545index 098f396..fe85ff1 100644
76546--- a/kernel/kprobes.c
76547+++ b/kernel/kprobes.c
76548@@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
76549 * kernel image and loaded module images reside. This is required
76550 * so x86_64 can correctly handle the %rip-relative fixups.
76551 */
76552- kip->insns = module_alloc(PAGE_SIZE);
76553+ kip->insns = module_alloc_exec(PAGE_SIZE);
76554 if (!kip->insns) {
76555 kfree(kip);
76556 return NULL;
76557@@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
76558 */
76559 if (!list_is_singular(&kip->list)) {
76560 list_del(&kip->list);
76561- module_free(NULL, kip->insns);
76562+ module_free_exec(NULL, kip->insns);
76563 kfree(kip);
76564 }
76565 return 1;
76566@@ -2063,7 +2063,7 @@ static int __init init_kprobes(void)
76567 {
76568 int i, err = 0;
76569 unsigned long offset = 0, size = 0;
76570- char *modname, namebuf[128];
76571+ char *modname, namebuf[KSYM_NAME_LEN];
76572 const char *symbol_name;
76573 void *addr;
76574 struct kprobe_blackpoint *kb;
76575@@ -2148,11 +2148,11 @@ static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
76576 kprobe_type = "k";
76577
76578 if (sym)
76579- seq_printf(pi, "%p %s %s+0x%x %s ",
76580+ seq_printf(pi, "%pK %s %s+0x%x %s ",
76581 p->addr, kprobe_type, sym, offset,
76582 (modname ? modname : " "));
76583 else
76584- seq_printf(pi, "%p %s %p ",
76585+ seq_printf(pi, "%pK %s %pK ",
76586 p->addr, kprobe_type, p->addr);
76587
76588 if (!pp)
76589@@ -2190,7 +2190,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
76590 const char *sym = NULL;
76591 unsigned int i = *(loff_t *) v;
76592 unsigned long offset = 0;
76593- char *modname, namebuf[128];
76594+ char *modname, namebuf[KSYM_NAME_LEN];
76595
76596 head = &kprobe_table[i];
76597 preempt_disable();
76598diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
76599index 6ada93c..dce7d5d 100644
76600--- a/kernel/ksysfs.c
76601+++ b/kernel/ksysfs.c
76602@@ -46,6 +46,8 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
76603 {
76604 if (count+1 > UEVENT_HELPER_PATH_LEN)
76605 return -ENOENT;
76606+ if (!capable(CAP_SYS_ADMIN))
76607+ return -EPERM;
76608 memcpy(uevent_helper, buf, count);
76609 uevent_helper[count] = '\0';
76610 if (count && uevent_helper[count-1] == '\n')
76611@@ -172,7 +174,7 @@ static ssize_t notes_read(struct file *filp, struct kobject *kobj,
76612 return count;
76613 }
76614
76615-static struct bin_attribute notes_attr = {
76616+static bin_attribute_no_const notes_attr __read_only = {
76617 .attr = {
76618 .name = "notes",
76619 .mode = S_IRUGO,
76620diff --git a/kernel/lockdep.c b/kernel/lockdep.c
76621index 7981e5b..7f2105c 100644
76622--- a/kernel/lockdep.c
76623+++ b/kernel/lockdep.c
76624@@ -590,6 +590,10 @@ static int static_obj(void *obj)
76625 end = (unsigned long) &_end,
76626 addr = (unsigned long) obj;
76627
76628+#ifdef CONFIG_PAX_KERNEXEC
76629+ start = ktla_ktva(start);
76630+#endif
76631+
76632 /*
76633 * static variable?
76634 */
76635@@ -730,6 +734,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
76636 if (!static_obj(lock->key)) {
76637 debug_locks_off();
76638 printk("INFO: trying to register non-static key.\n");
76639+ printk("lock:%pS key:%pS.\n", lock, lock->key);
76640 printk("the code is fine but needs lockdep annotation.\n");
76641 printk("turning off the locking correctness validator.\n");
76642 dump_stack();
76643@@ -3078,7 +3083,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
76644 if (!class)
76645 return 0;
76646 }
76647- atomic_inc((atomic_t *)&class->ops);
76648+ atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
76649 if (very_verbose(class)) {
76650 printk("\nacquire class [%p] %s", class->key, class->name);
76651 if (class->name_version > 1)
76652diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
76653index b2c71c5..7b88d63 100644
76654--- a/kernel/lockdep_proc.c
76655+++ b/kernel/lockdep_proc.c
76656@@ -65,7 +65,7 @@ static int l_show(struct seq_file *m, void *v)
76657 return 0;
76658 }
76659
76660- seq_printf(m, "%p", class->key);
76661+ seq_printf(m, "%pK", class->key);
76662 #ifdef CONFIG_DEBUG_LOCKDEP
76663 seq_printf(m, " OPS:%8ld", class->ops);
76664 #endif
76665@@ -83,7 +83,7 @@ static int l_show(struct seq_file *m, void *v)
76666
76667 list_for_each_entry(entry, &class->locks_after, entry) {
76668 if (entry->distance == 1) {
76669- seq_printf(m, " -> [%p] ", entry->class->key);
76670+ seq_printf(m, " -> [%pK] ", entry->class->key);
76671 print_name(m, entry->class);
76672 seq_puts(m, "\n");
76673 }
76674@@ -152,7 +152,7 @@ static int lc_show(struct seq_file *m, void *v)
76675 if (!class->key)
76676 continue;
76677
76678- seq_printf(m, "[%p] ", class->key);
76679+ seq_printf(m, "[%pK] ", class->key);
76680 print_name(m, class);
76681 seq_puts(m, "\n");
76682 }
76683@@ -495,7 +495,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
76684 if (!i)
76685 seq_line(m, '-', 40-namelen, namelen);
76686
76687- snprintf(ip, sizeof(ip), "[<%p>]",
76688+ snprintf(ip, sizeof(ip), "[<%pK>]",
76689 (void *)class->contention_point[i]);
76690 seq_printf(m, "%40s %14lu %29s %pS\n",
76691 name, stats->contention_point[i],
76692@@ -510,7 +510,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
76693 if (!i)
76694 seq_line(m, '-', 40-namelen, namelen);
76695
76696- snprintf(ip, sizeof(ip), "[<%p>]",
76697+ snprintf(ip, sizeof(ip), "[<%pK>]",
76698 (void *)class->contending_point[i]);
76699 seq_printf(m, "%40s %14lu %29s %pS\n",
76700 name, stats->contending_point[i],
76701diff --git a/kernel/module.c b/kernel/module.c
76702index eab0827..f488603 100644
76703--- a/kernel/module.c
76704+++ b/kernel/module.c
76705@@ -61,6 +61,7 @@
76706 #include <linux/pfn.h>
76707 #include <linux/bsearch.h>
76708 #include <linux/fips.h>
76709+#include <linux/grsecurity.h>
76710 #include <uapi/linux/module.h>
76711 #include "module-internal.h"
76712
76713@@ -156,7 +157,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
76714
76715 /* Bounds of module allocation, for speeding __module_address.
76716 * Protected by module_mutex. */
76717-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
76718+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
76719+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
76720
76721 int register_module_notifier(struct notifier_block * nb)
76722 {
76723@@ -322,7 +324,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
76724 return true;
76725
76726 list_for_each_entry_rcu(mod, &modules, list) {
76727- struct symsearch arr[] = {
76728+ struct symsearch modarr[] = {
76729 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
76730 NOT_GPL_ONLY, false },
76731 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
76732@@ -347,7 +349,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
76733 if (mod->state == MODULE_STATE_UNFORMED)
76734 continue;
76735
76736- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
76737+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
76738 return true;
76739 }
76740 return false;
76741@@ -484,7 +486,7 @@ static inline void __percpu *mod_percpu(struct module *mod)
76742 static int percpu_modalloc(struct module *mod,
76743 unsigned long size, unsigned long align)
76744 {
76745- if (align > PAGE_SIZE) {
76746+ if (align-1 >= PAGE_SIZE) {
76747 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
76748 mod->name, align, PAGE_SIZE);
76749 align = PAGE_SIZE;
76750@@ -1088,7 +1090,7 @@ struct module_attribute module_uevent =
76751 static ssize_t show_coresize(struct module_attribute *mattr,
76752 struct module_kobject *mk, char *buffer)
76753 {
76754- return sprintf(buffer, "%u\n", mk->mod->core_size);
76755+ return sprintf(buffer, "%u\n", mk->mod->core_size_rx + mk->mod->core_size_rw);
76756 }
76757
76758 static struct module_attribute modinfo_coresize =
76759@@ -1097,7 +1099,7 @@ static struct module_attribute modinfo_coresize =
76760 static ssize_t show_initsize(struct module_attribute *mattr,
76761 struct module_kobject *mk, char *buffer)
76762 {
76763- return sprintf(buffer, "%u\n", mk->mod->init_size);
76764+ return sprintf(buffer, "%u\n", mk->mod->init_size_rx + mk->mod->init_size_rw);
76765 }
76766
76767 static struct module_attribute modinfo_initsize =
76768@@ -1311,7 +1313,7 @@ resolve_symbol_wait(struct module *mod,
76769 */
76770 #ifdef CONFIG_SYSFS
76771
76772-#ifdef CONFIG_KALLSYMS
76773+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
76774 static inline bool sect_empty(const Elf_Shdr *sect)
76775 {
76776 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
76777@@ -1451,7 +1453,7 @@ static void add_notes_attrs(struct module *mod, const struct load_info *info)
76778 {
76779 unsigned int notes, loaded, i;
76780 struct module_notes_attrs *notes_attrs;
76781- struct bin_attribute *nattr;
76782+ bin_attribute_no_const *nattr;
76783
76784 /* failed to create section attributes, so can't create notes */
76785 if (!mod->sect_attrs)
76786@@ -1563,7 +1565,7 @@ static void del_usage_links(struct module *mod)
76787 static int module_add_modinfo_attrs(struct module *mod)
76788 {
76789 struct module_attribute *attr;
76790- struct module_attribute *temp_attr;
76791+ module_attribute_no_const *temp_attr;
76792 int error = 0;
76793 int i;
76794
76795@@ -1777,21 +1779,21 @@ static void set_section_ro_nx(void *base,
76796
76797 static void unset_module_core_ro_nx(struct module *mod)
76798 {
76799- set_page_attributes(mod->module_core + mod->core_text_size,
76800- mod->module_core + mod->core_size,
76801+ set_page_attributes(mod->module_core_rw,
76802+ mod->module_core_rw + mod->core_size_rw,
76803 set_memory_x);
76804- set_page_attributes(mod->module_core,
76805- mod->module_core + mod->core_ro_size,
76806+ set_page_attributes(mod->module_core_rx,
76807+ mod->module_core_rx + mod->core_size_rx,
76808 set_memory_rw);
76809 }
76810
76811 static void unset_module_init_ro_nx(struct module *mod)
76812 {
76813- set_page_attributes(mod->module_init + mod->init_text_size,
76814- mod->module_init + mod->init_size,
76815+ set_page_attributes(mod->module_init_rw,
76816+ mod->module_init_rw + mod->init_size_rw,
76817 set_memory_x);
76818- set_page_attributes(mod->module_init,
76819- mod->module_init + mod->init_ro_size,
76820+ set_page_attributes(mod->module_init_rx,
76821+ mod->module_init_rx + mod->init_size_rx,
76822 set_memory_rw);
76823 }
76824
76825@@ -1804,14 +1806,14 @@ void set_all_modules_text_rw(void)
76826 list_for_each_entry_rcu(mod, &modules, list) {
76827 if (mod->state == MODULE_STATE_UNFORMED)
76828 continue;
76829- if ((mod->module_core) && (mod->core_text_size)) {
76830- set_page_attributes(mod->module_core,
76831- mod->module_core + mod->core_text_size,
76832+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
76833+ set_page_attributes(mod->module_core_rx,
76834+ mod->module_core_rx + mod->core_size_rx,
76835 set_memory_rw);
76836 }
76837- if ((mod->module_init) && (mod->init_text_size)) {
76838- set_page_attributes(mod->module_init,
76839- mod->module_init + mod->init_text_size,
76840+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
76841+ set_page_attributes(mod->module_init_rx,
76842+ mod->module_init_rx + mod->init_size_rx,
76843 set_memory_rw);
76844 }
76845 }
76846@@ -1827,14 +1829,14 @@ void set_all_modules_text_ro(void)
76847 list_for_each_entry_rcu(mod, &modules, list) {
76848 if (mod->state == MODULE_STATE_UNFORMED)
76849 continue;
76850- if ((mod->module_core) && (mod->core_text_size)) {
76851- set_page_attributes(mod->module_core,
76852- mod->module_core + mod->core_text_size,
76853+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
76854+ set_page_attributes(mod->module_core_rx,
76855+ mod->module_core_rx + mod->core_size_rx,
76856 set_memory_ro);
76857 }
76858- if ((mod->module_init) && (mod->init_text_size)) {
76859- set_page_attributes(mod->module_init,
76860- mod->module_init + mod->init_text_size,
76861+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
76862+ set_page_attributes(mod->module_init_rx,
76863+ mod->module_init_rx + mod->init_size_rx,
76864 set_memory_ro);
76865 }
76866 }
76867@@ -1880,16 +1882,19 @@ static void free_module(struct module *mod)
76868
76869 /* This may be NULL, but that's OK */
76870 unset_module_init_ro_nx(mod);
76871- module_free(mod, mod->module_init);
76872+ module_free(mod, mod->module_init_rw);
76873+ module_free_exec(mod, mod->module_init_rx);
76874 kfree(mod->args);
76875 percpu_modfree(mod);
76876
76877 /* Free lock-classes: */
76878- lockdep_free_key_range(mod->module_core, mod->core_size);
76879+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
76880+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
76881
76882 /* Finally, free the core (containing the module structure) */
76883 unset_module_core_ro_nx(mod);
76884- module_free(mod, mod->module_core);
76885+ module_free_exec(mod, mod->module_core_rx);
76886+ module_free(mod, mod->module_core_rw);
76887
76888 #ifdef CONFIG_MPU
76889 update_protections(current->mm);
76890@@ -1959,9 +1964,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
76891 int ret = 0;
76892 const struct kernel_symbol *ksym;
76893
76894+#ifdef CONFIG_GRKERNSEC_MODHARDEN
76895+ int is_fs_load = 0;
76896+ int register_filesystem_found = 0;
76897+ char *p;
76898+
76899+ p = strstr(mod->args, "grsec_modharden_fs");
76900+ if (p) {
76901+ char *endptr = p + sizeof("grsec_modharden_fs") - 1;
76902+ /* copy \0 as well */
76903+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
76904+ is_fs_load = 1;
76905+ }
76906+#endif
76907+
76908 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
76909 const char *name = info->strtab + sym[i].st_name;
76910
76911+#ifdef CONFIG_GRKERNSEC_MODHARDEN
76912+ /* it's a real shame this will never get ripped and copied
76913+ upstream! ;(
76914+ */
76915+ if (is_fs_load && !strcmp(name, "register_filesystem"))
76916+ register_filesystem_found = 1;
76917+#endif
76918+
76919 switch (sym[i].st_shndx) {
76920 case SHN_COMMON:
76921 /* We compiled with -fno-common. These are not
76922@@ -1982,7 +2009,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
76923 ksym = resolve_symbol_wait(mod, info, name);
76924 /* Ok if resolved. */
76925 if (ksym && !IS_ERR(ksym)) {
76926+ pax_open_kernel();
76927 sym[i].st_value = ksym->value;
76928+ pax_close_kernel();
76929 break;
76930 }
76931
76932@@ -2001,11 +2030,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
76933 secbase = (unsigned long)mod_percpu(mod);
76934 else
76935 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
76936+ pax_open_kernel();
76937 sym[i].st_value += secbase;
76938+ pax_close_kernel();
76939 break;
76940 }
76941 }
76942
76943+#ifdef CONFIG_GRKERNSEC_MODHARDEN
76944+ if (is_fs_load && !register_filesystem_found) {
76945+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
76946+ ret = -EPERM;
76947+ }
76948+#endif
76949+
76950 return ret;
76951 }
76952
76953@@ -2089,22 +2127,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
76954 || s->sh_entsize != ~0UL
76955 || strstarts(sname, ".init"))
76956 continue;
76957- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
76958+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
76959+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
76960+ else
76961+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
76962 pr_debug("\t%s\n", sname);
76963 }
76964- switch (m) {
76965- case 0: /* executable */
76966- mod->core_size = debug_align(mod->core_size);
76967- mod->core_text_size = mod->core_size;
76968- break;
76969- case 1: /* RO: text and ro-data */
76970- mod->core_size = debug_align(mod->core_size);
76971- mod->core_ro_size = mod->core_size;
76972- break;
76973- case 3: /* whole core */
76974- mod->core_size = debug_align(mod->core_size);
76975- break;
76976- }
76977 }
76978
76979 pr_debug("Init section allocation order:\n");
76980@@ -2118,23 +2146,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
76981 || s->sh_entsize != ~0UL
76982 || !strstarts(sname, ".init"))
76983 continue;
76984- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
76985- | INIT_OFFSET_MASK);
76986+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
76987+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
76988+ else
76989+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
76990+ s->sh_entsize |= INIT_OFFSET_MASK;
76991 pr_debug("\t%s\n", sname);
76992 }
76993- switch (m) {
76994- case 0: /* executable */
76995- mod->init_size = debug_align(mod->init_size);
76996- mod->init_text_size = mod->init_size;
76997- break;
76998- case 1: /* RO: text and ro-data */
76999- mod->init_size = debug_align(mod->init_size);
77000- mod->init_ro_size = mod->init_size;
77001- break;
77002- case 3: /* whole init */
77003- mod->init_size = debug_align(mod->init_size);
77004- break;
77005- }
77006 }
77007 }
77008
77009@@ -2306,7 +2324,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
77010
77011 /* Put symbol section at end of init part of module. */
77012 symsect->sh_flags |= SHF_ALLOC;
77013- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
77014+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
77015 info->index.sym) | INIT_OFFSET_MASK;
77016 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
77017
77018@@ -2323,13 +2341,13 @@ static void layout_symtab(struct module *mod, struct load_info *info)
77019 }
77020
77021 /* Append room for core symbols at end of core part. */
77022- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
77023- info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
77024- mod->core_size += strtab_size;
77025+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
77026+ info->stroffs = mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
77027+ mod->core_size_rx += strtab_size;
77028
77029 /* Put string table section at end of init part of module. */
77030 strsect->sh_flags |= SHF_ALLOC;
77031- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
77032+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
77033 info->index.str) | INIT_OFFSET_MASK;
77034 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
77035 }
77036@@ -2347,12 +2365,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
77037 /* Make sure we get permanent strtab: don't use info->strtab. */
77038 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
77039
77040+ pax_open_kernel();
77041+
77042 /* Set types up while we still have access to sections. */
77043 for (i = 0; i < mod->num_symtab; i++)
77044 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
77045
77046- mod->core_symtab = dst = mod->module_core + info->symoffs;
77047- mod->core_strtab = s = mod->module_core + info->stroffs;
77048+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
77049+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
77050 src = mod->symtab;
77051 for (ndst = i = 0; i < mod->num_symtab; i++) {
77052 if (i == 0 ||
77053@@ -2364,6 +2384,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
77054 }
77055 }
77056 mod->core_num_syms = ndst;
77057+
77058+ pax_close_kernel();
77059 }
77060 #else
77061 static inline void layout_symtab(struct module *mod, struct load_info *info)
77062@@ -2397,17 +2419,33 @@ void * __weak module_alloc(unsigned long size)
77063 return vmalloc_exec(size);
77064 }
77065
77066-static void *module_alloc_update_bounds(unsigned long size)
77067+static void *module_alloc_update_bounds_rw(unsigned long size)
77068 {
77069 void *ret = module_alloc(size);
77070
77071 if (ret) {
77072 mutex_lock(&module_mutex);
77073 /* Update module bounds. */
77074- if ((unsigned long)ret < module_addr_min)
77075- module_addr_min = (unsigned long)ret;
77076- if ((unsigned long)ret + size > module_addr_max)
77077- module_addr_max = (unsigned long)ret + size;
77078+ if ((unsigned long)ret < module_addr_min_rw)
77079+ module_addr_min_rw = (unsigned long)ret;
77080+ if ((unsigned long)ret + size > module_addr_max_rw)
77081+ module_addr_max_rw = (unsigned long)ret + size;
77082+ mutex_unlock(&module_mutex);
77083+ }
77084+ return ret;
77085+}
77086+
77087+static void *module_alloc_update_bounds_rx(unsigned long size)
77088+{
77089+ void *ret = module_alloc_exec(size);
77090+
77091+ if (ret) {
77092+ mutex_lock(&module_mutex);
77093+ /* Update module bounds. */
77094+ if ((unsigned long)ret < module_addr_min_rx)
77095+ module_addr_min_rx = (unsigned long)ret;
77096+ if ((unsigned long)ret + size > module_addr_max_rx)
77097+ module_addr_max_rx = (unsigned long)ret + size;
77098 mutex_unlock(&module_mutex);
77099 }
77100 return ret;
77101@@ -2683,8 +2721,14 @@ static struct module *setup_load_info(struct load_info *info, int flags)
77102 static int check_modinfo(struct module *mod, struct load_info *info, int flags)
77103 {
77104 const char *modmagic = get_modinfo(info, "vermagic");
77105+ const char *license = get_modinfo(info, "license");
77106 int err;
77107
77108+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
77109+ if (!license || !license_is_gpl_compatible(license))
77110+ return -ENOEXEC;
77111+#endif
77112+
77113 if (flags & MODULE_INIT_IGNORE_VERMAGIC)
77114 modmagic = NULL;
77115
77116@@ -2710,7 +2754,7 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags)
77117 }
77118
77119 /* Set up license info based on the info section */
77120- set_license(mod, get_modinfo(info, "license"));
77121+ set_license(mod, license);
77122
77123 return 0;
77124 }
77125@@ -2804,7 +2848,7 @@ static int move_module(struct module *mod, struct load_info *info)
77126 void *ptr;
77127
77128 /* Do the allocs. */
77129- ptr = module_alloc_update_bounds(mod->core_size);
77130+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
77131 /*
77132 * The pointer to this block is stored in the module structure
77133 * which is inside the block. Just mark it as not being a
77134@@ -2814,11 +2858,11 @@ static int move_module(struct module *mod, struct load_info *info)
77135 if (!ptr)
77136 return -ENOMEM;
77137
77138- memset(ptr, 0, mod->core_size);
77139- mod->module_core = ptr;
77140+ memset(ptr, 0, mod->core_size_rw);
77141+ mod->module_core_rw = ptr;
77142
77143- if (mod->init_size) {
77144- ptr = module_alloc_update_bounds(mod->init_size);
77145+ if (mod->init_size_rw) {
77146+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
77147 /*
77148 * The pointer to this block is stored in the module structure
77149 * which is inside the block. This block doesn't need to be
77150@@ -2827,13 +2871,45 @@ static int move_module(struct module *mod, struct load_info *info)
77151 */
77152 kmemleak_ignore(ptr);
77153 if (!ptr) {
77154- module_free(mod, mod->module_core);
77155+ module_free(mod, mod->module_core_rw);
77156 return -ENOMEM;
77157 }
77158- memset(ptr, 0, mod->init_size);
77159- mod->module_init = ptr;
77160+ memset(ptr, 0, mod->init_size_rw);
77161+ mod->module_init_rw = ptr;
77162 } else
77163- mod->module_init = NULL;
77164+ mod->module_init_rw = NULL;
77165+
77166+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
77167+ kmemleak_not_leak(ptr);
77168+ if (!ptr) {
77169+ if (mod->module_init_rw)
77170+ module_free(mod, mod->module_init_rw);
77171+ module_free(mod, mod->module_core_rw);
77172+ return -ENOMEM;
77173+ }
77174+
77175+ pax_open_kernel();
77176+ memset(ptr, 0, mod->core_size_rx);
77177+ pax_close_kernel();
77178+ mod->module_core_rx = ptr;
77179+
77180+ if (mod->init_size_rx) {
77181+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
77182+ kmemleak_ignore(ptr);
77183+ if (!ptr && mod->init_size_rx) {
77184+ module_free_exec(mod, mod->module_core_rx);
77185+ if (mod->module_init_rw)
77186+ module_free(mod, mod->module_init_rw);
77187+ module_free(mod, mod->module_core_rw);
77188+ return -ENOMEM;
77189+ }
77190+
77191+ pax_open_kernel();
77192+ memset(ptr, 0, mod->init_size_rx);
77193+ pax_close_kernel();
77194+ mod->module_init_rx = ptr;
77195+ } else
77196+ mod->module_init_rx = NULL;
77197
77198 /* Transfer each section which specifies SHF_ALLOC */
77199 pr_debug("final section addresses:\n");
77200@@ -2844,16 +2920,45 @@ static int move_module(struct module *mod, struct load_info *info)
77201 if (!(shdr->sh_flags & SHF_ALLOC))
77202 continue;
77203
77204- if (shdr->sh_entsize & INIT_OFFSET_MASK)
77205- dest = mod->module_init
77206- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
77207- else
77208- dest = mod->module_core + shdr->sh_entsize;
77209+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
77210+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
77211+ dest = mod->module_init_rw
77212+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
77213+ else
77214+ dest = mod->module_init_rx
77215+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
77216+ } else {
77217+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
77218+ dest = mod->module_core_rw + shdr->sh_entsize;
77219+ else
77220+ dest = mod->module_core_rx + shdr->sh_entsize;
77221+ }
77222+
77223+ if (shdr->sh_type != SHT_NOBITS) {
77224+
77225+#ifdef CONFIG_PAX_KERNEXEC
77226+#ifdef CONFIG_X86_64
77227+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
77228+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
77229+#endif
77230+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
77231+ pax_open_kernel();
77232+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
77233+ pax_close_kernel();
77234+ } else
77235+#endif
77236
77237- if (shdr->sh_type != SHT_NOBITS)
77238 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
77239+ }
77240 /* Update sh_addr to point to copy in image. */
77241- shdr->sh_addr = (unsigned long)dest;
77242+
77243+#ifdef CONFIG_PAX_KERNEXEC
77244+ if (shdr->sh_flags & SHF_EXECINSTR)
77245+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
77246+ else
77247+#endif
77248+
77249+ shdr->sh_addr = (unsigned long)dest;
77250 pr_debug("\t0x%lx %s\n",
77251 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
77252 }
77253@@ -2908,12 +3013,12 @@ static void flush_module_icache(const struct module *mod)
77254 * Do it before processing of module parameters, so the module
77255 * can provide parameter accessor functions of its own.
77256 */
77257- if (mod->module_init)
77258- flush_icache_range((unsigned long)mod->module_init,
77259- (unsigned long)mod->module_init
77260- + mod->init_size);
77261- flush_icache_range((unsigned long)mod->module_core,
77262- (unsigned long)mod->module_core + mod->core_size);
77263+ if (mod->module_init_rx)
77264+ flush_icache_range((unsigned long)mod->module_init_rx,
77265+ (unsigned long)mod->module_init_rx
77266+ + mod->init_size_rx);
77267+ flush_icache_range((unsigned long)mod->module_core_rx,
77268+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
77269
77270 set_fs(old_fs);
77271 }
77272@@ -2983,8 +3088,10 @@ out:
77273 static void module_deallocate(struct module *mod, struct load_info *info)
77274 {
77275 percpu_modfree(mod);
77276- module_free(mod, mod->module_init);
77277- module_free(mod, mod->module_core);
77278+ module_free_exec(mod, mod->module_init_rx);
77279+ module_free_exec(mod, mod->module_core_rx);
77280+ module_free(mod, mod->module_init_rw);
77281+ module_free(mod, mod->module_core_rw);
77282 }
77283
77284 int __weak module_finalize(const Elf_Ehdr *hdr,
77285@@ -2997,7 +3104,9 @@ int __weak module_finalize(const Elf_Ehdr *hdr,
77286 static int post_relocation(struct module *mod, const struct load_info *info)
77287 {
77288 /* Sort exception table now relocations are done. */
77289+ pax_open_kernel();
77290 sort_extable(mod->extable, mod->extable + mod->num_exentries);
77291+ pax_close_kernel();
77292
77293 /* Copy relocated percpu area over. */
77294 percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
77295@@ -3051,16 +3160,16 @@ static int do_init_module(struct module *mod)
77296 MODULE_STATE_COMING, mod);
77297
77298 /* Set RO and NX regions for core */
77299- set_section_ro_nx(mod->module_core,
77300- mod->core_text_size,
77301- mod->core_ro_size,
77302- mod->core_size);
77303+ set_section_ro_nx(mod->module_core_rx,
77304+ mod->core_size_rx,
77305+ mod->core_size_rx,
77306+ mod->core_size_rx);
77307
77308 /* Set RO and NX regions for init */
77309- set_section_ro_nx(mod->module_init,
77310- mod->init_text_size,
77311- mod->init_ro_size,
77312- mod->init_size);
77313+ set_section_ro_nx(mod->module_init_rx,
77314+ mod->init_size_rx,
77315+ mod->init_size_rx,
77316+ mod->init_size_rx);
77317
77318 do_mod_ctors(mod);
77319 /* Start the module */
77320@@ -3122,11 +3231,12 @@ static int do_init_module(struct module *mod)
77321 mod->strtab = mod->core_strtab;
77322 #endif
77323 unset_module_init_ro_nx(mod);
77324- module_free(mod, mod->module_init);
77325- mod->module_init = NULL;
77326- mod->init_size = 0;
77327- mod->init_ro_size = 0;
77328- mod->init_text_size = 0;
77329+ module_free(mod, mod->module_init_rw);
77330+ module_free_exec(mod, mod->module_init_rx);
77331+ mod->module_init_rw = NULL;
77332+ mod->module_init_rx = NULL;
77333+ mod->init_size_rw = 0;
77334+ mod->init_size_rx = 0;
77335 mutex_unlock(&module_mutex);
77336 wake_up_all(&module_wq);
77337
77338@@ -3209,9 +3319,38 @@ again:
77339 if (err)
77340 goto free_unload;
77341
77342+ /* Now copy in args */
77343+ mod->args = strndup_user(uargs, ~0UL >> 1);
77344+ if (IS_ERR(mod->args)) {
77345+ err = PTR_ERR(mod->args);
77346+ goto free_unload;
77347+ }
77348+
77349 /* Set up MODINFO_ATTR fields */
77350 setup_modinfo(mod, info);
77351
77352+#ifdef CONFIG_GRKERNSEC_MODHARDEN
77353+ {
77354+ char *p, *p2;
77355+
77356+ if (strstr(mod->args, "grsec_modharden_netdev")) {
77357+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
77358+ err = -EPERM;
77359+ goto free_modinfo;
77360+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
77361+ p += sizeof("grsec_modharden_normal") - 1;
77362+ p2 = strstr(p, "_");
77363+ if (p2) {
77364+ *p2 = '\0';
77365+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
77366+ *p2 = '_';
77367+ }
77368+ err = -EPERM;
77369+ goto free_modinfo;
77370+ }
77371+ }
77372+#endif
77373+
77374 /* Fix up syms, so that st_value is a pointer to location. */
77375 err = simplify_symbols(mod, info);
77376 if (err < 0)
77377@@ -3227,13 +3366,6 @@ again:
77378
77379 flush_module_icache(mod);
77380
77381- /* Now copy in args */
77382- mod->args = strndup_user(uargs, ~0UL >> 1);
77383- if (IS_ERR(mod->args)) {
77384- err = PTR_ERR(mod->args);
77385- goto free_arch_cleanup;
77386- }
77387-
77388 dynamic_debug_setup(info->debug, info->num_debug);
77389
77390 mutex_lock(&module_mutex);
77391@@ -3278,11 +3410,10 @@ again:
77392 mutex_unlock(&module_mutex);
77393 dynamic_debug_remove(info->debug);
77394 synchronize_sched();
77395- kfree(mod->args);
77396- free_arch_cleanup:
77397 module_arch_cleanup(mod);
77398 free_modinfo:
77399 free_modinfo(mod);
77400+ kfree(mod->args);
77401 free_unload:
77402 module_unload_free(mod);
77403 unlink_mod:
77404@@ -3365,10 +3496,16 @@ static const char *get_ksymbol(struct module *mod,
77405 unsigned long nextval;
77406
77407 /* At worse, next value is at end of module */
77408- if (within_module_init(addr, mod))
77409- nextval = (unsigned long)mod->module_init+mod->init_text_size;
77410+ if (within_module_init_rx(addr, mod))
77411+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
77412+ else if (within_module_init_rw(addr, mod))
77413+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
77414+ else if (within_module_core_rx(addr, mod))
77415+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
77416+ else if (within_module_core_rw(addr, mod))
77417+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
77418 else
77419- nextval = (unsigned long)mod->module_core+mod->core_text_size;
77420+ return NULL;
77421
77422 /* Scan for closest preceding symbol, and next symbol. (ELF
77423 starts real symbols at 1). */
77424@@ -3621,7 +3758,7 @@ static int m_show(struct seq_file *m, void *p)
77425 return 0;
77426
77427 seq_printf(m, "%s %u",
77428- mod->name, mod->init_size + mod->core_size);
77429+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
77430 print_unload_info(m, mod);
77431
77432 /* Informative for users. */
77433@@ -3630,7 +3767,7 @@ static int m_show(struct seq_file *m, void *p)
77434 mod->state == MODULE_STATE_COMING ? "Loading":
77435 "Live");
77436 /* Used by oprofile and other similar tools. */
77437- seq_printf(m, " 0x%pK", mod->module_core);
77438+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
77439
77440 /* Taints info */
77441 if (mod->taints)
77442@@ -3666,7 +3803,17 @@ static const struct file_operations proc_modules_operations = {
77443
77444 static int __init proc_modules_init(void)
77445 {
77446+#ifndef CONFIG_GRKERNSEC_HIDESYM
77447+#ifdef CONFIG_GRKERNSEC_PROC_USER
77448+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
77449+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
77450+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
77451+#else
77452 proc_create("modules", 0, NULL, &proc_modules_operations);
77453+#endif
77454+#else
77455+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
77456+#endif
77457 return 0;
77458 }
77459 module_init(proc_modules_init);
77460@@ -3727,14 +3874,14 @@ struct module *__module_address(unsigned long addr)
77461 {
77462 struct module *mod;
77463
77464- if (addr < module_addr_min || addr > module_addr_max)
77465+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
77466+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
77467 return NULL;
77468
77469 list_for_each_entry_rcu(mod, &modules, list) {
77470 if (mod->state == MODULE_STATE_UNFORMED)
77471 continue;
77472- if (within_module_core(addr, mod)
77473- || within_module_init(addr, mod))
77474+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
77475 return mod;
77476 }
77477 return NULL;
77478@@ -3769,11 +3916,20 @@ bool is_module_text_address(unsigned long addr)
77479 */
77480 struct module *__module_text_address(unsigned long addr)
77481 {
77482- struct module *mod = __module_address(addr);
77483+ struct module *mod;
77484+
77485+#ifdef CONFIG_X86_32
77486+ addr = ktla_ktva(addr);
77487+#endif
77488+
77489+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
77490+ return NULL;
77491+
77492+ mod = __module_address(addr);
77493+
77494 if (mod) {
77495 /* Make sure it's within the text section. */
77496- if (!within(addr, mod->module_init, mod->init_text_size)
77497- && !within(addr, mod->module_core, mod->core_text_size))
77498+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
77499 mod = NULL;
77500 }
77501 return mod;
77502diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
77503index 7e3443f..b2a1e6b 100644
77504--- a/kernel/mutex-debug.c
77505+++ b/kernel/mutex-debug.c
77506@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
77507 }
77508
77509 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
77510- struct thread_info *ti)
77511+ struct task_struct *task)
77512 {
77513 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
77514
77515 /* Mark the current thread as blocked on the lock: */
77516- ti->task->blocked_on = waiter;
77517+ task->blocked_on = waiter;
77518 }
77519
77520 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
77521- struct thread_info *ti)
77522+ struct task_struct *task)
77523 {
77524 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
77525- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
77526- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
77527- ti->task->blocked_on = NULL;
77528+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
77529+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
77530+ task->blocked_on = NULL;
77531
77532 list_del_init(&waiter->list);
77533 waiter->task = NULL;
77534diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
77535index 0799fd3..d06ae3b 100644
77536--- a/kernel/mutex-debug.h
77537+++ b/kernel/mutex-debug.h
77538@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
77539 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
77540 extern void debug_mutex_add_waiter(struct mutex *lock,
77541 struct mutex_waiter *waiter,
77542- struct thread_info *ti);
77543+ struct task_struct *task);
77544 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
77545- struct thread_info *ti);
77546+ struct task_struct *task);
77547 extern void debug_mutex_unlock(struct mutex *lock);
77548 extern void debug_mutex_init(struct mutex *lock, const char *name,
77549 struct lock_class_key *key);
77550diff --git a/kernel/mutex.c b/kernel/mutex.c
77551index a307cc9..27fd2e9 100644
77552--- a/kernel/mutex.c
77553+++ b/kernel/mutex.c
77554@@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
77555 spin_lock_mutex(&lock->wait_lock, flags);
77556
77557 debug_mutex_lock_common(lock, &waiter);
77558- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
77559+ debug_mutex_add_waiter(lock, &waiter, task);
77560
77561 /* add waiting tasks to the end of the waitqueue (FIFO): */
77562 list_add_tail(&waiter.list, &lock->wait_list);
77563@@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
77564 * TASK_UNINTERRUPTIBLE case.)
77565 */
77566 if (unlikely(signal_pending_state(state, task))) {
77567- mutex_remove_waiter(lock, &waiter,
77568- task_thread_info(task));
77569+ mutex_remove_waiter(lock, &waiter, task);
77570 mutex_release(&lock->dep_map, 1, ip);
77571 spin_unlock_mutex(&lock->wait_lock, flags);
77572
77573@@ -247,7 +246,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
77574 done:
77575 lock_acquired(&lock->dep_map, ip);
77576 /* got the lock - rejoice! */
77577- mutex_remove_waiter(lock, &waiter, current_thread_info());
77578+ mutex_remove_waiter(lock, &waiter, task);
77579 mutex_set_owner(lock);
77580
77581 /* set it to 0 if there are no waiters left: */
77582diff --git a/kernel/notifier.c b/kernel/notifier.c
77583index 2d5cc4c..d9ea600 100644
77584--- a/kernel/notifier.c
77585+++ b/kernel/notifier.c
77586@@ -5,6 +5,7 @@
77587 #include <linux/rcupdate.h>
77588 #include <linux/vmalloc.h>
77589 #include <linux/reboot.h>
77590+#include <linux/mm.h>
77591
77592 /*
77593 * Notifier list for kernel code which wants to be called
77594@@ -24,10 +25,12 @@ static int notifier_chain_register(struct notifier_block **nl,
77595 while ((*nl) != NULL) {
77596 if (n->priority > (*nl)->priority)
77597 break;
77598- nl = &((*nl)->next);
77599+ nl = (struct notifier_block **)&((*nl)->next);
77600 }
77601- n->next = *nl;
77602+ pax_open_kernel();
77603+ *(const void **)&n->next = *nl;
77604 rcu_assign_pointer(*nl, n);
77605+ pax_close_kernel();
77606 return 0;
77607 }
77608
77609@@ -39,10 +42,12 @@ static int notifier_chain_cond_register(struct notifier_block **nl,
77610 return 0;
77611 if (n->priority > (*nl)->priority)
77612 break;
77613- nl = &((*nl)->next);
77614+ nl = (struct notifier_block **)&((*nl)->next);
77615 }
77616- n->next = *nl;
77617+ pax_open_kernel();
77618+ *(const void **)&n->next = *nl;
77619 rcu_assign_pointer(*nl, n);
77620+ pax_close_kernel();
77621 return 0;
77622 }
77623
77624@@ -51,10 +56,12 @@ static int notifier_chain_unregister(struct notifier_block **nl,
77625 {
77626 while ((*nl) != NULL) {
77627 if ((*nl) == n) {
77628+ pax_open_kernel();
77629 rcu_assign_pointer(*nl, n->next);
77630+ pax_close_kernel();
77631 return 0;
77632 }
77633- nl = &((*nl)->next);
77634+ nl = (struct notifier_block **)&((*nl)->next);
77635 }
77636 return -ENOENT;
77637 }
77638diff --git a/kernel/panic.c b/kernel/panic.c
77639index e1b2822..5edc1d9 100644
77640--- a/kernel/panic.c
77641+++ b/kernel/panic.c
77642@@ -410,7 +410,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
77643 const char *board;
77644
77645 printk(KERN_WARNING "------------[ cut here ]------------\n");
77646- printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
77647+ printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
77648 board = dmi_get_system_info(DMI_PRODUCT_NAME);
77649 if (board)
77650 printk(KERN_WARNING "Hardware name: %s\n", board);
77651@@ -465,7 +465,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
77652 */
77653 void __stack_chk_fail(void)
77654 {
77655- panic("stack-protector: Kernel stack is corrupted in: %p\n",
77656+ dump_stack();
77657+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
77658 __builtin_return_address(0));
77659 }
77660 EXPORT_SYMBOL(__stack_chk_fail);
77661diff --git a/kernel/pid.c b/kernel/pid.c
77662index f2c6a68..4922d97 100644
77663--- a/kernel/pid.c
77664+++ b/kernel/pid.c
77665@@ -33,6 +33,7 @@
77666 #include <linux/rculist.h>
77667 #include <linux/bootmem.h>
77668 #include <linux/hash.h>
77669+#include <linux/security.h>
77670 #include <linux/pid_namespace.h>
77671 #include <linux/init_task.h>
77672 #include <linux/syscalls.h>
77673@@ -46,7 +47,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
77674
77675 int pid_max = PID_MAX_DEFAULT;
77676
77677-#define RESERVED_PIDS 300
77678+#define RESERVED_PIDS 500
77679
77680 int pid_max_min = RESERVED_PIDS + 1;
77681 int pid_max_max = PID_MAX_LIMIT;
77682@@ -441,10 +442,18 @@ EXPORT_SYMBOL(pid_task);
77683 */
77684 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
77685 {
77686+ struct task_struct *task;
77687+
77688 rcu_lockdep_assert(rcu_read_lock_held(),
77689 "find_task_by_pid_ns() needs rcu_read_lock()"
77690 " protection");
77691- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
77692+
77693+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
77694+
77695+ if (gr_pid_is_chrooted(task))
77696+ return NULL;
77697+
77698+ return task;
77699 }
77700
77701 struct task_struct *find_task_by_vpid(pid_t vnr)
77702@@ -452,6 +461,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
77703 return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
77704 }
77705
77706+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
77707+{
77708+ rcu_lockdep_assert(rcu_read_lock_held(),
77709+ "find_task_by_pid_ns() needs rcu_read_lock()"
77710+ " protection");
77711+ return pid_task(find_pid_ns(vnr, task_active_pid_ns(current)), PIDTYPE_PID);
77712+}
77713+
77714 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
77715 {
77716 struct pid *pid;
77717diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
77718index bea15bd..789f3d0 100644
77719--- a/kernel/pid_namespace.c
77720+++ b/kernel/pid_namespace.c
77721@@ -249,7 +249,7 @@ static int pid_ns_ctl_handler(struct ctl_table *table, int write,
77722 void __user *buffer, size_t *lenp, loff_t *ppos)
77723 {
77724 struct pid_namespace *pid_ns = task_active_pid_ns(current);
77725- struct ctl_table tmp = *table;
77726+ ctl_table_no_const tmp = *table;
77727
77728 if (write && !ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN))
77729 return -EPERM;
77730diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
77731index 942ca27..111e609 100644
77732--- a/kernel/posix-cpu-timers.c
77733+++ b/kernel/posix-cpu-timers.c
77734@@ -1576,14 +1576,14 @@ struct k_clock clock_posix_cpu = {
77735
77736 static __init int init_posix_cpu_timers(void)
77737 {
77738- struct k_clock process = {
77739+ static struct k_clock process = {
77740 .clock_getres = process_cpu_clock_getres,
77741 .clock_get = process_cpu_clock_get,
77742 .timer_create = process_cpu_timer_create,
77743 .nsleep = process_cpu_nsleep,
77744 .nsleep_restart = process_cpu_nsleep_restart,
77745 };
77746- struct k_clock thread = {
77747+ static struct k_clock thread = {
77748 .clock_getres = thread_cpu_clock_getres,
77749 .clock_get = thread_cpu_clock_get,
77750 .timer_create = thread_cpu_timer_create,
77751diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
77752index e885be1..380fe76 100644
77753--- a/kernel/posix-timers.c
77754+++ b/kernel/posix-timers.c
77755@@ -43,6 +43,7 @@
77756 #include <linux/idr.h>
77757 #include <linux/posix-clock.h>
77758 #include <linux/posix-timers.h>
77759+#include <linux/grsecurity.h>
77760 #include <linux/syscalls.h>
77761 #include <linux/wait.h>
77762 #include <linux/workqueue.h>
77763@@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
77764 * which we beg off on and pass to do_sys_settimeofday().
77765 */
77766
77767-static struct k_clock posix_clocks[MAX_CLOCKS];
77768+static struct k_clock *posix_clocks[MAX_CLOCKS];
77769
77770 /*
77771 * These ones are defined below.
77772@@ -227,7 +228,7 @@ static int posix_get_boottime(const clockid_t which_clock, struct timespec *tp)
77773 */
77774 static __init int init_posix_timers(void)
77775 {
77776- struct k_clock clock_realtime = {
77777+ static struct k_clock clock_realtime = {
77778 .clock_getres = hrtimer_get_res,
77779 .clock_get = posix_clock_realtime_get,
77780 .clock_set = posix_clock_realtime_set,
77781@@ -239,7 +240,7 @@ static __init int init_posix_timers(void)
77782 .timer_get = common_timer_get,
77783 .timer_del = common_timer_del,
77784 };
77785- struct k_clock clock_monotonic = {
77786+ static struct k_clock clock_monotonic = {
77787 .clock_getres = hrtimer_get_res,
77788 .clock_get = posix_ktime_get_ts,
77789 .nsleep = common_nsleep,
77790@@ -249,19 +250,19 @@ static __init int init_posix_timers(void)
77791 .timer_get = common_timer_get,
77792 .timer_del = common_timer_del,
77793 };
77794- struct k_clock clock_monotonic_raw = {
77795+ static struct k_clock clock_monotonic_raw = {
77796 .clock_getres = hrtimer_get_res,
77797 .clock_get = posix_get_monotonic_raw,
77798 };
77799- struct k_clock clock_realtime_coarse = {
77800+ static struct k_clock clock_realtime_coarse = {
77801 .clock_getres = posix_get_coarse_res,
77802 .clock_get = posix_get_realtime_coarse,
77803 };
77804- struct k_clock clock_monotonic_coarse = {
77805+ static struct k_clock clock_monotonic_coarse = {
77806 .clock_getres = posix_get_coarse_res,
77807 .clock_get = posix_get_monotonic_coarse,
77808 };
77809- struct k_clock clock_boottime = {
77810+ static struct k_clock clock_boottime = {
77811 .clock_getres = hrtimer_get_res,
77812 .clock_get = posix_get_boottime,
77813 .nsleep = common_nsleep,
77814@@ -473,7 +474,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
77815 return;
77816 }
77817
77818- posix_clocks[clock_id] = *new_clock;
77819+ posix_clocks[clock_id] = new_clock;
77820 }
77821 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
77822
77823@@ -519,9 +520,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
77824 return (id & CLOCKFD_MASK) == CLOCKFD ?
77825 &clock_posix_dynamic : &clock_posix_cpu;
77826
77827- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
77828+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
77829 return NULL;
77830- return &posix_clocks[id];
77831+ return posix_clocks[id];
77832 }
77833
77834 static int common_timer_create(struct k_itimer *new_timer)
77835@@ -966,6 +967,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
77836 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
77837 return -EFAULT;
77838
77839+ /* only the CLOCK_REALTIME clock can be set, all other clocks
77840+ have their clock_set fptr set to a nosettime dummy function
77841+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
77842+ call common_clock_set, which calls do_sys_settimeofday, which
77843+ we hook
77844+ */
77845+
77846 return kc->clock_set(which_clock, &new_tp);
77847 }
77848
77849diff --git a/kernel/power/process.c b/kernel/power/process.c
77850index d5a258b..4271191 100644
77851--- a/kernel/power/process.c
77852+++ b/kernel/power/process.c
77853@@ -33,6 +33,7 @@ static int try_to_freeze_tasks(bool user_only)
77854 u64 elapsed_csecs64;
77855 unsigned int elapsed_csecs;
77856 bool wakeup = false;
77857+ bool timedout = false;
77858
77859 do_gettimeofday(&start);
77860
77861@@ -43,13 +44,20 @@ static int try_to_freeze_tasks(bool user_only)
77862
77863 while (true) {
77864 todo = 0;
77865+ if (time_after(jiffies, end_time))
77866+ timedout = true;
77867 read_lock(&tasklist_lock);
77868 do_each_thread(g, p) {
77869 if (p == current || !freeze_task(p))
77870 continue;
77871
77872- if (!freezer_should_skip(p))
77873+ if (!freezer_should_skip(p)) {
77874 todo++;
77875+ if (timedout) {
77876+ printk(KERN_ERR "Task refusing to freeze:\n");
77877+ sched_show_task(p);
77878+ }
77879+ }
77880 } while_each_thread(g, p);
77881 read_unlock(&tasklist_lock);
77882
77883@@ -58,7 +66,7 @@ static int try_to_freeze_tasks(bool user_only)
77884 todo += wq_busy;
77885 }
77886
77887- if (!todo || time_after(jiffies, end_time))
77888+ if (!todo || timedout)
77889 break;
77890
77891 if (pm_wakeup_pending()) {
77892diff --git a/kernel/printk.c b/kernel/printk.c
77893index 267ce78..2487112 100644
77894--- a/kernel/printk.c
77895+++ b/kernel/printk.c
77896@@ -609,11 +609,17 @@ static unsigned int devkmsg_poll(struct file *file, poll_table *wait)
77897 return ret;
77898 }
77899
77900+static int check_syslog_permissions(int type, bool from_file);
77901+
77902 static int devkmsg_open(struct inode *inode, struct file *file)
77903 {
77904 struct devkmsg_user *user;
77905 int err;
77906
77907+ err = check_syslog_permissions(SYSLOG_ACTION_OPEN, SYSLOG_FROM_FILE);
77908+ if (err)
77909+ return err;
77910+
77911 /* write-only does not need any file context */
77912 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
77913 return 0;
77914@@ -822,7 +828,7 @@ static int syslog_action_restricted(int type)
77915 if (dmesg_restrict)
77916 return 1;
77917 /* Unless restricted, we allow "read all" and "get buffer size" for everybody */
77918- return type != SYSLOG_ACTION_READ_ALL && type != SYSLOG_ACTION_SIZE_BUFFER;
77919+ return type != SYSLOG_ACTION_OPEN && type != SYSLOG_ACTION_READ_ALL && type != SYSLOG_ACTION_SIZE_BUFFER;
77920 }
77921
77922 static int check_syslog_permissions(int type, bool from_file)
77923@@ -834,6 +840,11 @@ static int check_syslog_permissions(int type, bool from_file)
77924 if (from_file && type != SYSLOG_ACTION_OPEN)
77925 return 0;
77926
77927+#ifdef CONFIG_GRKERNSEC_DMESG
77928+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
77929+ return -EPERM;
77930+#endif
77931+
77932 if (syslog_action_restricted(type)) {
77933 if (capable(CAP_SYSLOG))
77934 return 0;
77935diff --git a/kernel/profile.c b/kernel/profile.c
77936index 1f39181..86093471 100644
77937--- a/kernel/profile.c
77938+++ b/kernel/profile.c
77939@@ -40,7 +40,7 @@ struct profile_hit {
77940 /* Oprofile timer tick hook */
77941 static int (*timer_hook)(struct pt_regs *) __read_mostly;
77942
77943-static atomic_t *prof_buffer;
77944+static atomic_unchecked_t *prof_buffer;
77945 static unsigned long prof_len, prof_shift;
77946
77947 int prof_on __read_mostly;
77948@@ -282,7 +282,7 @@ static void profile_flip_buffers(void)
77949 hits[i].pc = 0;
77950 continue;
77951 }
77952- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
77953+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
77954 hits[i].hits = hits[i].pc = 0;
77955 }
77956 }
77957@@ -343,9 +343,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
77958 * Add the current hit(s) and flush the write-queue out
77959 * to the global buffer:
77960 */
77961- atomic_add(nr_hits, &prof_buffer[pc]);
77962+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
77963 for (i = 0; i < NR_PROFILE_HIT; ++i) {
77964- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
77965+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
77966 hits[i].pc = hits[i].hits = 0;
77967 }
77968 out:
77969@@ -420,7 +420,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
77970 {
77971 unsigned long pc;
77972 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
77973- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
77974+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
77975 }
77976 #endif /* !CONFIG_SMP */
77977
77978@@ -518,7 +518,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
77979 return -EFAULT;
77980 buf++; p++; count--; read++;
77981 }
77982- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
77983+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
77984 if (copy_to_user(buf, (void *)pnt, count))
77985 return -EFAULT;
77986 read += count;
77987@@ -549,7 +549,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
77988 }
77989 #endif
77990 profile_discard_flip_buffers();
77991- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
77992+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
77993 return count;
77994 }
77995
77996diff --git a/kernel/ptrace.c b/kernel/ptrace.c
77997index 6cbeaae..cfe7ff0 100644
77998--- a/kernel/ptrace.c
77999+++ b/kernel/ptrace.c
78000@@ -324,7 +324,7 @@ static int ptrace_attach(struct task_struct *task, long request,
78001 if (seize)
78002 flags |= PT_SEIZED;
78003 rcu_read_lock();
78004- if (ns_capable(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
78005+ if (ns_capable_nolog(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
78006 flags |= PT_PTRACE_CAP;
78007 rcu_read_unlock();
78008 task->ptrace = flags;
78009@@ -535,7 +535,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
78010 break;
78011 return -EIO;
78012 }
78013- if (copy_to_user(dst, buf, retval))
78014+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
78015 return -EFAULT;
78016 copied += retval;
78017 src += retval;
78018@@ -720,7 +720,7 @@ int ptrace_request(struct task_struct *child, long request,
78019 bool seized = child->ptrace & PT_SEIZED;
78020 int ret = -EIO;
78021 siginfo_t siginfo, *si;
78022- void __user *datavp = (void __user *) data;
78023+ void __user *datavp = (__force void __user *) data;
78024 unsigned long __user *datalp = datavp;
78025 unsigned long flags;
78026
78027@@ -922,14 +922,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
78028 goto out;
78029 }
78030
78031+ if (gr_handle_ptrace(child, request)) {
78032+ ret = -EPERM;
78033+ goto out_put_task_struct;
78034+ }
78035+
78036 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
78037 ret = ptrace_attach(child, request, addr, data);
78038 /*
78039 * Some architectures need to do book-keeping after
78040 * a ptrace attach.
78041 */
78042- if (!ret)
78043+ if (!ret) {
78044 arch_ptrace_attach(child);
78045+ gr_audit_ptrace(child);
78046+ }
78047 goto out_put_task_struct;
78048 }
78049
78050@@ -957,7 +964,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
78051 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
78052 if (copied != sizeof(tmp))
78053 return -EIO;
78054- return put_user(tmp, (unsigned long __user *)data);
78055+ return put_user(tmp, (__force unsigned long __user *)data);
78056 }
78057
78058 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
78059@@ -1051,7 +1058,7 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
78060 }
78061
78062 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
78063- compat_long_t addr, compat_long_t data)
78064+ compat_ulong_t addr, compat_ulong_t data)
78065 {
78066 struct task_struct *child;
78067 long ret;
78068@@ -1067,14 +1074,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
78069 goto out;
78070 }
78071
78072+ if (gr_handle_ptrace(child, request)) {
78073+ ret = -EPERM;
78074+ goto out_put_task_struct;
78075+ }
78076+
78077 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
78078 ret = ptrace_attach(child, request, addr, data);
78079 /*
78080 * Some architectures need to do book-keeping after
78081 * a ptrace attach.
78082 */
78083- if (!ret)
78084+ if (!ret) {
78085 arch_ptrace_attach(child);
78086+ gr_audit_ptrace(child);
78087+ }
78088 goto out_put_task_struct;
78089 }
78090
78091diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
78092index e7dce58..ad0d7b7 100644
78093--- a/kernel/rcutiny.c
78094+++ b/kernel/rcutiny.c
78095@@ -46,7 +46,7 @@
78096 struct rcu_ctrlblk;
78097 static void invoke_rcu_callbacks(void);
78098 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
78099-static void rcu_process_callbacks(struct softirq_action *unused);
78100+static void rcu_process_callbacks(void);
78101 static void __call_rcu(struct rcu_head *head,
78102 void (*func)(struct rcu_head *rcu),
78103 struct rcu_ctrlblk *rcp);
78104@@ -310,7 +310,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
78105 rcu_is_callbacks_kthread()));
78106 }
78107
78108-static void rcu_process_callbacks(struct softirq_action *unused)
78109+static void rcu_process_callbacks(void)
78110 {
78111 __rcu_process_callbacks(&rcu_sched_ctrlblk);
78112 __rcu_process_callbacks(&rcu_bh_ctrlblk);
78113diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h
78114index f85016a..91cb03b 100644
78115--- a/kernel/rcutiny_plugin.h
78116+++ b/kernel/rcutiny_plugin.h
78117@@ -896,7 +896,7 @@ static int rcu_kthread(void *arg)
78118 have_rcu_kthread_work = morework;
78119 local_irq_restore(flags);
78120 if (work)
78121- rcu_process_callbacks(NULL);
78122+ rcu_process_callbacks();
78123 schedule_timeout_interruptible(1); /* Leave CPU for others. */
78124 }
78125
78126diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
78127index 31dea01..ad91ffb 100644
78128--- a/kernel/rcutorture.c
78129+++ b/kernel/rcutorture.c
78130@@ -163,12 +163,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
78131 { 0 };
78132 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
78133 { 0 };
78134-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
78135-static atomic_t n_rcu_torture_alloc;
78136-static atomic_t n_rcu_torture_alloc_fail;
78137-static atomic_t n_rcu_torture_free;
78138-static atomic_t n_rcu_torture_mberror;
78139-static atomic_t n_rcu_torture_error;
78140+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
78141+static atomic_unchecked_t n_rcu_torture_alloc;
78142+static atomic_unchecked_t n_rcu_torture_alloc_fail;
78143+static atomic_unchecked_t n_rcu_torture_free;
78144+static atomic_unchecked_t n_rcu_torture_mberror;
78145+static atomic_unchecked_t n_rcu_torture_error;
78146 static long n_rcu_torture_barrier_error;
78147 static long n_rcu_torture_boost_ktrerror;
78148 static long n_rcu_torture_boost_rterror;
78149@@ -272,11 +272,11 @@ rcu_torture_alloc(void)
78150
78151 spin_lock_bh(&rcu_torture_lock);
78152 if (list_empty(&rcu_torture_freelist)) {
78153- atomic_inc(&n_rcu_torture_alloc_fail);
78154+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
78155 spin_unlock_bh(&rcu_torture_lock);
78156 return NULL;
78157 }
78158- atomic_inc(&n_rcu_torture_alloc);
78159+ atomic_inc_unchecked(&n_rcu_torture_alloc);
78160 p = rcu_torture_freelist.next;
78161 list_del_init(p);
78162 spin_unlock_bh(&rcu_torture_lock);
78163@@ -289,7 +289,7 @@ rcu_torture_alloc(void)
78164 static void
78165 rcu_torture_free(struct rcu_torture *p)
78166 {
78167- atomic_inc(&n_rcu_torture_free);
78168+ atomic_inc_unchecked(&n_rcu_torture_free);
78169 spin_lock_bh(&rcu_torture_lock);
78170 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
78171 spin_unlock_bh(&rcu_torture_lock);
78172@@ -409,7 +409,7 @@ rcu_torture_cb(struct rcu_head *p)
78173 i = rp->rtort_pipe_count;
78174 if (i > RCU_TORTURE_PIPE_LEN)
78175 i = RCU_TORTURE_PIPE_LEN;
78176- atomic_inc(&rcu_torture_wcount[i]);
78177+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
78178 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
78179 rp->rtort_mbtest = 0;
78180 rcu_torture_free(rp);
78181@@ -457,7 +457,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
78182 i = rp->rtort_pipe_count;
78183 if (i > RCU_TORTURE_PIPE_LEN)
78184 i = RCU_TORTURE_PIPE_LEN;
78185- atomic_inc(&rcu_torture_wcount[i]);
78186+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
78187 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
78188 rp->rtort_mbtest = 0;
78189 list_del(&rp->rtort_free);
78190@@ -975,7 +975,7 @@ rcu_torture_writer(void *arg)
78191 i = old_rp->rtort_pipe_count;
78192 if (i > RCU_TORTURE_PIPE_LEN)
78193 i = RCU_TORTURE_PIPE_LEN;
78194- atomic_inc(&rcu_torture_wcount[i]);
78195+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
78196 old_rp->rtort_pipe_count++;
78197 cur_ops->deferred_free(old_rp);
78198 }
78199@@ -1060,7 +1060,7 @@ static void rcu_torture_timer(unsigned long unused)
78200 }
78201 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);
78202 if (p->rtort_mbtest == 0)
78203- atomic_inc(&n_rcu_torture_mberror);
78204+ atomic_inc_unchecked(&n_rcu_torture_mberror);
78205 spin_lock(&rand_lock);
78206 cur_ops->read_delay(&rand);
78207 n_rcu_torture_timers++;
78208@@ -1124,7 +1124,7 @@ rcu_torture_reader(void *arg)
78209 }
78210 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);
78211 if (p->rtort_mbtest == 0)
78212- atomic_inc(&n_rcu_torture_mberror);
78213+ atomic_inc_unchecked(&n_rcu_torture_mberror);
78214 cur_ops->read_delay(&rand);
78215 preempt_disable();
78216 pipe_count = p->rtort_pipe_count;
78217@@ -1183,11 +1183,11 @@ rcu_torture_printk(char *page)
78218 rcu_torture_current,
78219 rcu_torture_current_version,
78220 list_empty(&rcu_torture_freelist),
78221- atomic_read(&n_rcu_torture_alloc),
78222- atomic_read(&n_rcu_torture_alloc_fail),
78223- atomic_read(&n_rcu_torture_free));
78224+ atomic_read_unchecked(&n_rcu_torture_alloc),
78225+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
78226+ atomic_read_unchecked(&n_rcu_torture_free));
78227 cnt += sprintf(&page[cnt], "rtmbe: %d rtbke: %ld rtbre: %ld ",
78228- atomic_read(&n_rcu_torture_mberror),
78229+ atomic_read_unchecked(&n_rcu_torture_mberror),
78230 n_rcu_torture_boost_ktrerror,
78231 n_rcu_torture_boost_rterror);
78232 cnt += sprintf(&page[cnt], "rtbf: %ld rtb: %ld nt: %ld ",
78233@@ -1206,14 +1206,14 @@ rcu_torture_printk(char *page)
78234 n_barrier_attempts,
78235 n_rcu_torture_barrier_error);
78236 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
78237- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
78238+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
78239 n_rcu_torture_barrier_error != 0 ||
78240 n_rcu_torture_boost_ktrerror != 0 ||
78241 n_rcu_torture_boost_rterror != 0 ||
78242 n_rcu_torture_boost_failure != 0 ||
78243 i > 1) {
78244 cnt += sprintf(&page[cnt], "!!! ");
78245- atomic_inc(&n_rcu_torture_error);
78246+ atomic_inc_unchecked(&n_rcu_torture_error);
78247 WARN_ON_ONCE(1);
78248 }
78249 cnt += sprintf(&page[cnt], "Reader Pipe: ");
78250@@ -1227,7 +1227,7 @@ rcu_torture_printk(char *page)
78251 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
78252 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
78253 cnt += sprintf(&page[cnt], " %d",
78254- atomic_read(&rcu_torture_wcount[i]));
78255+ atomic_read_unchecked(&rcu_torture_wcount[i]));
78256 }
78257 cnt += sprintf(&page[cnt], "\n");
78258 if (cur_ops->stats)
78259@@ -1920,7 +1920,7 @@ rcu_torture_cleanup(void)
78260
78261 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
78262
78263- if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
78264+ if (atomic_read_unchecked(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
78265 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
78266 else if (n_online_successes != n_online_attempts ||
78267 n_offline_successes != n_offline_attempts)
78268@@ -1989,18 +1989,18 @@ rcu_torture_init(void)
78269
78270 rcu_torture_current = NULL;
78271 rcu_torture_current_version = 0;
78272- atomic_set(&n_rcu_torture_alloc, 0);
78273- atomic_set(&n_rcu_torture_alloc_fail, 0);
78274- atomic_set(&n_rcu_torture_free, 0);
78275- atomic_set(&n_rcu_torture_mberror, 0);
78276- atomic_set(&n_rcu_torture_error, 0);
78277+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
78278+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
78279+ atomic_set_unchecked(&n_rcu_torture_free, 0);
78280+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
78281+ atomic_set_unchecked(&n_rcu_torture_error, 0);
78282 n_rcu_torture_barrier_error = 0;
78283 n_rcu_torture_boost_ktrerror = 0;
78284 n_rcu_torture_boost_rterror = 0;
78285 n_rcu_torture_boost_failure = 0;
78286 n_rcu_torture_boosts = 0;
78287 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
78288- atomic_set(&rcu_torture_wcount[i], 0);
78289+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
78290 for_each_possible_cpu(cpu) {
78291 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
78292 per_cpu(rcu_torture_count, cpu)[i] = 0;
78293diff --git a/kernel/rcutree.c b/kernel/rcutree.c
78294index e441b77..dd54f17 100644
78295--- a/kernel/rcutree.c
78296+++ b/kernel/rcutree.c
78297@@ -349,9 +349,9 @@ static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
78298 rcu_prepare_for_idle(smp_processor_id());
78299 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
78300 smp_mb__before_atomic_inc(); /* See above. */
78301- atomic_inc(&rdtp->dynticks);
78302+ atomic_inc_unchecked(&rdtp->dynticks);
78303 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
78304- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
78305+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
78306
78307 /*
78308 * It is illegal to enter an extended quiescent state while
78309@@ -487,10 +487,10 @@ static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval,
78310 int user)
78311 {
78312 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
78313- atomic_inc(&rdtp->dynticks);
78314+ atomic_inc_unchecked(&rdtp->dynticks);
78315 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
78316 smp_mb__after_atomic_inc(); /* See above. */
78317- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
78318+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
78319 rcu_cleanup_after_idle(smp_processor_id());
78320 trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting);
78321 if (!user && !is_idle_task(current)) {
78322@@ -629,14 +629,14 @@ void rcu_nmi_enter(void)
78323 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
78324
78325 if (rdtp->dynticks_nmi_nesting == 0 &&
78326- (atomic_read(&rdtp->dynticks) & 0x1))
78327+ (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
78328 return;
78329 rdtp->dynticks_nmi_nesting++;
78330 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
78331- atomic_inc(&rdtp->dynticks);
78332+ atomic_inc_unchecked(&rdtp->dynticks);
78333 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
78334 smp_mb__after_atomic_inc(); /* See above. */
78335- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
78336+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
78337 }
78338
78339 /**
78340@@ -655,9 +655,9 @@ void rcu_nmi_exit(void)
78341 return;
78342 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
78343 smp_mb__before_atomic_inc(); /* See above. */
78344- atomic_inc(&rdtp->dynticks);
78345+ atomic_inc_unchecked(&rdtp->dynticks);
78346 smp_mb__after_atomic_inc(); /* Force delay to next write. */
78347- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
78348+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
78349 }
78350
78351 /**
78352@@ -671,7 +671,7 @@ int rcu_is_cpu_idle(void)
78353 int ret;
78354
78355 preempt_disable();
78356- ret = (atomic_read(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
78357+ ret = (atomic_read_unchecked(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
78358 preempt_enable();
78359 return ret;
78360 }
78361@@ -739,7 +739,7 @@ int rcu_is_cpu_rrupt_from_idle(void)
78362 */
78363 static int dyntick_save_progress_counter(struct rcu_data *rdp)
78364 {
78365- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
78366+ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
78367 return (rdp->dynticks_snap & 0x1) == 0;
78368 }
78369
78370@@ -754,7 +754,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
78371 unsigned int curr;
78372 unsigned int snap;
78373
78374- curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
78375+ curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
78376 snap = (unsigned int)rdp->dynticks_snap;
78377
78378 /*
78379@@ -802,10 +802,10 @@ static int jiffies_till_stall_check(void)
78380 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
78381 */
78382 if (till_stall_check < 3) {
78383- ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
78384+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 3;
78385 till_stall_check = 3;
78386 } else if (till_stall_check > 300) {
78387- ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
78388+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 300;
78389 till_stall_check = 300;
78390 }
78391 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
78392@@ -1592,7 +1592,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
78393 rsp->qlen += rdp->qlen;
78394 rdp->n_cbs_orphaned += rdp->qlen;
78395 rdp->qlen_lazy = 0;
78396- ACCESS_ONCE(rdp->qlen) = 0;
78397+ ACCESS_ONCE_RW(rdp->qlen) = 0;
78398 }
78399
78400 /*
78401@@ -1838,7 +1838,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
78402 }
78403 smp_mb(); /* List handling before counting for rcu_barrier(). */
78404 rdp->qlen_lazy -= count_lazy;
78405- ACCESS_ONCE(rdp->qlen) -= count;
78406+ ACCESS_ONCE_RW(rdp->qlen) -= count;
78407 rdp->n_cbs_invoked += count;
78408
78409 /* Reinstate batch limit if we have worked down the excess. */
78410@@ -2031,7 +2031,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
78411 /*
78412 * Do RCU core processing for the current CPU.
78413 */
78414-static void rcu_process_callbacks(struct softirq_action *unused)
78415+static void rcu_process_callbacks(void)
78416 {
78417 struct rcu_state *rsp;
78418
78419@@ -2154,7 +2154,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
78420 local_irq_restore(flags);
78421 return;
78422 }
78423- ACCESS_ONCE(rdp->qlen)++;
78424+ ACCESS_ONCE_RW(rdp->qlen)++;
78425 if (lazy)
78426 rdp->qlen_lazy++;
78427 else
78428@@ -2363,11 +2363,11 @@ void synchronize_sched_expedited(void)
78429 * counter wrap on a 32-bit system. Quite a few more CPUs would of
78430 * course be required on a 64-bit system.
78431 */
78432- if (ULONG_CMP_GE((ulong)atomic_long_read(&rsp->expedited_start),
78433+ if (ULONG_CMP_GE((ulong)atomic_long_read_unchecked(&rsp->expedited_start),
78434 (ulong)atomic_long_read(&rsp->expedited_done) +
78435 ULONG_MAX / 8)) {
78436 synchronize_sched();
78437- atomic_long_inc(&rsp->expedited_wrap);
78438+ atomic_long_inc_unchecked(&rsp->expedited_wrap);
78439 return;
78440 }
78441
78442@@ -2375,7 +2375,7 @@ void synchronize_sched_expedited(void)
78443 * Take a ticket. Note that atomic_inc_return() implies a
78444 * full memory barrier.
78445 */
78446- snap = atomic_long_inc_return(&rsp->expedited_start);
78447+ snap = atomic_long_inc_return_unchecked(&rsp->expedited_start);
78448 firstsnap = snap;
78449 get_online_cpus();
78450 WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
78451@@ -2388,14 +2388,14 @@ void synchronize_sched_expedited(void)
78452 synchronize_sched_expedited_cpu_stop,
78453 NULL) == -EAGAIN) {
78454 put_online_cpus();
78455- atomic_long_inc(&rsp->expedited_tryfail);
78456+ atomic_long_inc_unchecked(&rsp->expedited_tryfail);
78457
78458 /* Check to see if someone else did our work for us. */
78459 s = atomic_long_read(&rsp->expedited_done);
78460 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
78461 /* ensure test happens before caller kfree */
78462 smp_mb__before_atomic_inc(); /* ^^^ */
78463- atomic_long_inc(&rsp->expedited_workdone1);
78464+ atomic_long_inc_unchecked(&rsp->expedited_workdone1);
78465 return;
78466 }
78467
78468@@ -2404,7 +2404,7 @@ void synchronize_sched_expedited(void)
78469 udelay(trycount * num_online_cpus());
78470 } else {
78471 wait_rcu_gp(call_rcu_sched);
78472- atomic_long_inc(&rsp->expedited_normal);
78473+ atomic_long_inc_unchecked(&rsp->expedited_normal);
78474 return;
78475 }
78476
78477@@ -2413,7 +2413,7 @@ void synchronize_sched_expedited(void)
78478 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
78479 /* ensure test happens before caller kfree */
78480 smp_mb__before_atomic_inc(); /* ^^^ */
78481- atomic_long_inc(&rsp->expedited_workdone2);
78482+ atomic_long_inc_unchecked(&rsp->expedited_workdone2);
78483 return;
78484 }
78485
78486@@ -2425,10 +2425,10 @@ void synchronize_sched_expedited(void)
78487 * period works for us.
78488 */
78489 get_online_cpus();
78490- snap = atomic_long_read(&rsp->expedited_start);
78491+ snap = atomic_long_read_unchecked(&rsp->expedited_start);
78492 smp_mb(); /* ensure read is before try_stop_cpus(). */
78493 }
78494- atomic_long_inc(&rsp->expedited_stoppedcpus);
78495+ atomic_long_inc_unchecked(&rsp->expedited_stoppedcpus);
78496
78497 /*
78498 * Everyone up to our most recent fetch is covered by our grace
78499@@ -2437,16 +2437,16 @@ void synchronize_sched_expedited(void)
78500 * than we did already did their update.
78501 */
78502 do {
78503- atomic_long_inc(&rsp->expedited_done_tries);
78504+ atomic_long_inc_unchecked(&rsp->expedited_done_tries);
78505 s = atomic_long_read(&rsp->expedited_done);
78506 if (ULONG_CMP_GE((ulong)s, (ulong)snap)) {
78507 /* ensure test happens before caller kfree */
78508 smp_mb__before_atomic_inc(); /* ^^^ */
78509- atomic_long_inc(&rsp->expedited_done_lost);
78510+ atomic_long_inc_unchecked(&rsp->expedited_done_lost);
78511 break;
78512 }
78513 } while (atomic_long_cmpxchg(&rsp->expedited_done, s, snap) != s);
78514- atomic_long_inc(&rsp->expedited_done_exit);
78515+ atomic_long_inc_unchecked(&rsp->expedited_done_exit);
78516
78517 put_online_cpus();
78518 }
78519@@ -2620,7 +2620,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
78520 * ACCESS_ONCE() to prevent the compiler from speculating
78521 * the increment to precede the early-exit check.
78522 */
78523- ACCESS_ONCE(rsp->n_barrier_done)++;
78524+ ACCESS_ONCE_RW(rsp->n_barrier_done)++;
78525 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
78526 _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
78527 smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
78528@@ -2670,7 +2670,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
78529
78530 /* Increment ->n_barrier_done to prevent duplicate work. */
78531 smp_mb(); /* Keep increment after above mechanism. */
78532- ACCESS_ONCE(rsp->n_barrier_done)++;
78533+ ACCESS_ONCE_RW(rsp->n_barrier_done)++;
78534 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
78535 _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
78536 smp_mb(); /* Keep increment before caller's subsequent code. */
78537@@ -2715,10 +2715,10 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
78538 rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
78539 init_callback_list(rdp);
78540 rdp->qlen_lazy = 0;
78541- ACCESS_ONCE(rdp->qlen) = 0;
78542+ ACCESS_ONCE_RW(rdp->qlen) = 0;
78543 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
78544 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
78545- WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
78546+ WARN_ON_ONCE(atomic_read_unchecked(&rdp->dynticks->dynticks) != 1);
78547 #ifdef CONFIG_RCU_USER_QS
78548 WARN_ON_ONCE(rdp->dynticks->in_user);
78549 #endif
78550@@ -2754,8 +2754,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
78551 rdp->blimit = blimit;
78552 init_callback_list(rdp); /* Re-enable callbacks on this CPU. */
78553 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
78554- atomic_set(&rdp->dynticks->dynticks,
78555- (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
78556+ atomic_set_unchecked(&rdp->dynticks->dynticks,
78557+ (atomic_read_unchecked(&rdp->dynticks->dynticks) & ~0x1) + 1);
78558 rcu_prepare_for_idle_init(cpu);
78559 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
78560
78561diff --git a/kernel/rcutree.h b/kernel/rcutree.h
78562index 4b69291..704c92e 100644
78563--- a/kernel/rcutree.h
78564+++ b/kernel/rcutree.h
78565@@ -86,7 +86,7 @@ struct rcu_dynticks {
78566 long long dynticks_nesting; /* Track irq/process nesting level. */
78567 /* Process level is worth LLONG_MAX/2. */
78568 int dynticks_nmi_nesting; /* Track NMI nesting level. */
78569- atomic_t dynticks; /* Even value for idle, else odd. */
78570+ atomic_unchecked_t dynticks;/* Even value for idle, else odd. */
78571 #ifdef CONFIG_RCU_FAST_NO_HZ
78572 int dyntick_drain; /* Prepare-for-idle state variable. */
78573 unsigned long dyntick_holdoff;
78574@@ -423,17 +423,17 @@ struct rcu_state {
78575 /* _rcu_barrier(). */
78576 /* End of fields guarded by barrier_mutex. */
78577
78578- atomic_long_t expedited_start; /* Starting ticket. */
78579- atomic_long_t expedited_done; /* Done ticket. */
78580- atomic_long_t expedited_wrap; /* # near-wrap incidents. */
78581- atomic_long_t expedited_tryfail; /* # acquisition failures. */
78582- atomic_long_t expedited_workdone1; /* # done by others #1. */
78583- atomic_long_t expedited_workdone2; /* # done by others #2. */
78584- atomic_long_t expedited_normal; /* # fallbacks to normal. */
78585- atomic_long_t expedited_stoppedcpus; /* # successful stop_cpus. */
78586- atomic_long_t expedited_done_tries; /* # tries to update _done. */
78587- atomic_long_t expedited_done_lost; /* # times beaten to _done. */
78588- atomic_long_t expedited_done_exit; /* # times exited _done loop. */
78589+ atomic_long_unchecked_t expedited_start; /* Starting ticket. */
78590+ atomic_long_t expedited_done; /* Done ticket. */
78591+ atomic_long_unchecked_t expedited_wrap; /* # near-wrap incidents. */
78592+ atomic_long_unchecked_t expedited_tryfail; /* # acquisition failures. */
78593+ atomic_long_unchecked_t expedited_workdone1; /* # done by others #1. */
78594+ atomic_long_unchecked_t expedited_workdone2; /* # done by others #2. */
78595+ atomic_long_unchecked_t expedited_normal; /* # fallbacks to normal. */
78596+ atomic_long_unchecked_t expedited_stoppedcpus; /* # successful stop_cpus. */
78597+ atomic_long_unchecked_t expedited_done_tries; /* # tries to update _done. */
78598+ atomic_long_unchecked_t expedited_done_lost; /* # times beaten to _done. */
78599+ atomic_long_unchecked_t expedited_done_exit; /* # times exited _done loop. */
78600
78601 unsigned long jiffies_force_qs; /* Time at which to invoke */
78602 /* force_quiescent_state(). */
78603diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
78604index c1cc7e1..f62e436 100644
78605--- a/kernel/rcutree_plugin.h
78606+++ b/kernel/rcutree_plugin.h
78607@@ -892,7 +892,7 @@ void synchronize_rcu_expedited(void)
78608
78609 /* Clean up and exit. */
78610 smp_mb(); /* ensure expedited GP seen before counter increment. */
78611- ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
78612+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
78613 unlock_mb_ret:
78614 mutex_unlock(&sync_rcu_preempt_exp_mutex);
78615 mb_ret:
78616@@ -1440,7 +1440,7 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
78617 free_cpumask_var(cm);
78618 }
78619
78620-static struct smp_hotplug_thread rcu_cpu_thread_spec = {
78621+static struct smp_hotplug_thread rcu_cpu_thread_spec __read_only = {
78622 .store = &rcu_cpu_kthread_task,
78623 .thread_should_run = rcu_cpu_kthread_should_run,
78624 .thread_fn = rcu_cpu_kthread,
78625@@ -2072,7 +2072,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
78626 print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
78627 printk(KERN_ERR "\t%d: (%lu %s) idle=%03x/%llx/%d %s\n",
78628 cpu, ticks_value, ticks_title,
78629- atomic_read(&rdtp->dynticks) & 0xfff,
78630+ atomic_read_unchecked(&rdtp->dynticks) & 0xfff,
78631 rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
78632 fast_no_hz);
78633 }
78634@@ -2192,7 +2192,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
78635
78636 /* Enqueue the callback on the nocb list and update counts. */
78637 old_rhpp = xchg(&rdp->nocb_tail, rhtp);
78638- ACCESS_ONCE(*old_rhpp) = rhp;
78639+ ACCESS_ONCE_RW(*old_rhpp) = rhp;
78640 atomic_long_add(rhcount, &rdp->nocb_q_count);
78641 atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
78642
78643@@ -2384,12 +2384,12 @@ static int rcu_nocb_kthread(void *arg)
78644 * Extract queued callbacks, update counts, and wait
78645 * for a grace period to elapse.
78646 */
78647- ACCESS_ONCE(rdp->nocb_head) = NULL;
78648+ ACCESS_ONCE_RW(rdp->nocb_head) = NULL;
78649 tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
78650 c = atomic_long_xchg(&rdp->nocb_q_count, 0);
78651 cl = atomic_long_xchg(&rdp->nocb_q_count_lazy, 0);
78652- ACCESS_ONCE(rdp->nocb_p_count) += c;
78653- ACCESS_ONCE(rdp->nocb_p_count_lazy) += cl;
78654+ ACCESS_ONCE_RW(rdp->nocb_p_count) += c;
78655+ ACCESS_ONCE_RW(rdp->nocb_p_count_lazy) += cl;
78656 wait_rcu_gp(rdp->rsp->call_remote);
78657
78658 /* Each pass through the following loop invokes a callback. */
78659@@ -2411,8 +2411,8 @@ static int rcu_nocb_kthread(void *arg)
78660 list = next;
78661 }
78662 trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
78663- ACCESS_ONCE(rdp->nocb_p_count) -= c;
78664- ACCESS_ONCE(rdp->nocb_p_count_lazy) -= cl;
78665+ ACCESS_ONCE_RW(rdp->nocb_p_count) -= c;
78666+ ACCESS_ONCE_RW(rdp->nocb_p_count_lazy) -= cl;
78667 rdp->n_nocbs_invoked += c;
78668 }
78669 return 0;
78670@@ -2438,7 +2438,7 @@ static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp)
78671 rdp = per_cpu_ptr(rsp->rda, cpu);
78672 t = kthread_run(rcu_nocb_kthread, rdp, "rcuo%d", cpu);
78673 BUG_ON(IS_ERR(t));
78674- ACCESS_ONCE(rdp->nocb_kthread) = t;
78675+ ACCESS_ONCE_RW(rdp->nocb_kthread) = t;
78676 }
78677 }
78678
78679diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
78680index 0d095dc..1985b19 100644
78681--- a/kernel/rcutree_trace.c
78682+++ b/kernel/rcutree_trace.c
78683@@ -123,7 +123,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
78684 ulong2long(rdp->completed), ulong2long(rdp->gpnum),
78685 rdp->passed_quiesce, rdp->qs_pending);
78686 seq_printf(m, " dt=%d/%llx/%d df=%lu",
78687- atomic_read(&rdp->dynticks->dynticks),
78688+ atomic_read_unchecked(&rdp->dynticks->dynticks),
78689 rdp->dynticks->dynticks_nesting,
78690 rdp->dynticks->dynticks_nmi_nesting,
78691 rdp->dynticks_fqs);
78692@@ -184,17 +184,17 @@ static int show_rcuexp(struct seq_file *m, void *v)
78693 struct rcu_state *rsp = (struct rcu_state *)m->private;
78694
78695 seq_printf(m, "s=%lu d=%lu w=%lu tf=%lu wd1=%lu wd2=%lu n=%lu sc=%lu dt=%lu dl=%lu dx=%lu\n",
78696- atomic_long_read(&rsp->expedited_start),
78697+ atomic_long_read_unchecked(&rsp->expedited_start),
78698 atomic_long_read(&rsp->expedited_done),
78699- atomic_long_read(&rsp->expedited_wrap),
78700- atomic_long_read(&rsp->expedited_tryfail),
78701- atomic_long_read(&rsp->expedited_workdone1),
78702- atomic_long_read(&rsp->expedited_workdone2),
78703- atomic_long_read(&rsp->expedited_normal),
78704- atomic_long_read(&rsp->expedited_stoppedcpus),
78705- atomic_long_read(&rsp->expedited_done_tries),
78706- atomic_long_read(&rsp->expedited_done_lost),
78707- atomic_long_read(&rsp->expedited_done_exit));
78708+ atomic_long_read_unchecked(&rsp->expedited_wrap),
78709+ atomic_long_read_unchecked(&rsp->expedited_tryfail),
78710+ atomic_long_read_unchecked(&rsp->expedited_workdone1),
78711+ atomic_long_read_unchecked(&rsp->expedited_workdone2),
78712+ atomic_long_read_unchecked(&rsp->expedited_normal),
78713+ atomic_long_read_unchecked(&rsp->expedited_stoppedcpus),
78714+ atomic_long_read_unchecked(&rsp->expedited_done_tries),
78715+ atomic_long_read_unchecked(&rsp->expedited_done_lost),
78716+ atomic_long_read_unchecked(&rsp->expedited_done_exit));
78717 return 0;
78718 }
78719
78720diff --git a/kernel/resource.c b/kernel/resource.c
78721index 73f35d4..4684fc4 100644
78722--- a/kernel/resource.c
78723+++ b/kernel/resource.c
78724@@ -143,8 +143,18 @@ static const struct file_operations proc_iomem_operations = {
78725
78726 static int __init ioresources_init(void)
78727 {
78728+#ifdef CONFIG_GRKERNSEC_PROC_ADD
78729+#ifdef CONFIG_GRKERNSEC_PROC_USER
78730+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
78731+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
78732+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
78733+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
78734+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
78735+#endif
78736+#else
78737 proc_create("ioports", 0, NULL, &proc_ioports_operations);
78738 proc_create("iomem", 0, NULL, &proc_iomem_operations);
78739+#endif
78740 return 0;
78741 }
78742 __initcall(ioresources_init);
78743diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
78744index 98ec494..4241d6d 100644
78745--- a/kernel/rtmutex-tester.c
78746+++ b/kernel/rtmutex-tester.c
78747@@ -20,7 +20,7 @@
78748 #define MAX_RT_TEST_MUTEXES 8
78749
78750 static spinlock_t rttest_lock;
78751-static atomic_t rttest_event;
78752+static atomic_unchecked_t rttest_event;
78753
78754 struct test_thread_data {
78755 int opcode;
78756@@ -61,7 +61,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
78757
78758 case RTTEST_LOCKCONT:
78759 td->mutexes[td->opdata] = 1;
78760- td->event = atomic_add_return(1, &rttest_event);
78761+ td->event = atomic_add_return_unchecked(1, &rttest_event);
78762 return 0;
78763
78764 case RTTEST_RESET:
78765@@ -74,7 +74,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
78766 return 0;
78767
78768 case RTTEST_RESETEVENT:
78769- atomic_set(&rttest_event, 0);
78770+ atomic_set_unchecked(&rttest_event, 0);
78771 return 0;
78772
78773 default:
78774@@ -91,9 +91,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
78775 return ret;
78776
78777 td->mutexes[id] = 1;
78778- td->event = atomic_add_return(1, &rttest_event);
78779+ td->event = atomic_add_return_unchecked(1, &rttest_event);
78780 rt_mutex_lock(&mutexes[id]);
78781- td->event = atomic_add_return(1, &rttest_event);
78782+ td->event = atomic_add_return_unchecked(1, &rttest_event);
78783 td->mutexes[id] = 4;
78784 return 0;
78785
78786@@ -104,9 +104,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
78787 return ret;
78788
78789 td->mutexes[id] = 1;
78790- td->event = atomic_add_return(1, &rttest_event);
78791+ td->event = atomic_add_return_unchecked(1, &rttest_event);
78792 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
78793- td->event = atomic_add_return(1, &rttest_event);
78794+ td->event = atomic_add_return_unchecked(1, &rttest_event);
78795 td->mutexes[id] = ret ? 0 : 4;
78796 return ret ? -EINTR : 0;
78797
78798@@ -115,9 +115,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
78799 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
78800 return ret;
78801
78802- td->event = atomic_add_return(1, &rttest_event);
78803+ td->event = atomic_add_return_unchecked(1, &rttest_event);
78804 rt_mutex_unlock(&mutexes[id]);
78805- td->event = atomic_add_return(1, &rttest_event);
78806+ td->event = atomic_add_return_unchecked(1, &rttest_event);
78807 td->mutexes[id] = 0;
78808 return 0;
78809
78810@@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
78811 break;
78812
78813 td->mutexes[dat] = 2;
78814- td->event = atomic_add_return(1, &rttest_event);
78815+ td->event = atomic_add_return_unchecked(1, &rttest_event);
78816 break;
78817
78818 default:
78819@@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
78820 return;
78821
78822 td->mutexes[dat] = 3;
78823- td->event = atomic_add_return(1, &rttest_event);
78824+ td->event = atomic_add_return_unchecked(1, &rttest_event);
78825 break;
78826
78827 case RTTEST_LOCKNOWAIT:
78828@@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
78829 return;
78830
78831 td->mutexes[dat] = 1;
78832- td->event = atomic_add_return(1, &rttest_event);
78833+ td->event = atomic_add_return_unchecked(1, &rttest_event);
78834 return;
78835
78836 default:
78837diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
78838index 0984a21..939f183 100644
78839--- a/kernel/sched/auto_group.c
78840+++ b/kernel/sched/auto_group.c
78841@@ -11,7 +11,7 @@
78842
78843 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
78844 static struct autogroup autogroup_default;
78845-static atomic_t autogroup_seq_nr;
78846+static atomic_unchecked_t autogroup_seq_nr;
78847
78848 void __init autogroup_init(struct task_struct *init_task)
78849 {
78850@@ -78,7 +78,7 @@ static inline struct autogroup *autogroup_create(void)
78851
78852 kref_init(&ag->kref);
78853 init_rwsem(&ag->lock);
78854- ag->id = atomic_inc_return(&autogroup_seq_nr);
78855+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
78856 ag->tg = tg;
78857 #ifdef CONFIG_RT_GROUP_SCHED
78858 /*
78859diff --git a/kernel/sched/core.c b/kernel/sched/core.c
78860index 5e2f7c3..4002d41 100644
78861--- a/kernel/sched/core.c
78862+++ b/kernel/sched/core.c
78863@@ -3369,7 +3369,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible);
78864 * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
78865 * positive (at least 1, or number of jiffies left till timeout) if completed.
78866 */
78867-long __sched
78868+long __sched __intentional_overflow(-1)
78869 wait_for_completion_interruptible_timeout(struct completion *x,
78870 unsigned long timeout)
78871 {
78872@@ -3386,7 +3386,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
78873 *
78874 * The return value is -ERESTARTSYS if interrupted, 0 if completed.
78875 */
78876-int __sched wait_for_completion_killable(struct completion *x)
78877+int __sched __intentional_overflow(-1) wait_for_completion_killable(struct completion *x)
78878 {
78879 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
78880 if (t == -ERESTARTSYS)
78881@@ -3407,7 +3407,7 @@ EXPORT_SYMBOL(wait_for_completion_killable);
78882 * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
78883 * positive (at least 1, or number of jiffies left till timeout) if completed.
78884 */
78885-long __sched
78886+long __sched __intentional_overflow(-1)
78887 wait_for_completion_killable_timeout(struct completion *x,
78888 unsigned long timeout)
78889 {
78890@@ -3633,6 +3633,8 @@ int can_nice(const struct task_struct *p, const int nice)
78891 /* convert nice value [19,-20] to rlimit style value [1,40] */
78892 int nice_rlim = 20 - nice;
78893
78894+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
78895+
78896 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
78897 capable(CAP_SYS_NICE));
78898 }
78899@@ -3666,7 +3668,8 @@ SYSCALL_DEFINE1(nice, int, increment)
78900 if (nice > 19)
78901 nice = 19;
78902
78903- if (increment < 0 && !can_nice(current, nice))
78904+ if (increment < 0 && (!can_nice(current, nice) ||
78905+ gr_handle_chroot_nice()))
78906 return -EPERM;
78907
78908 retval = security_task_setnice(current, nice);
78909@@ -3820,6 +3823,7 @@ recheck:
78910 unsigned long rlim_rtprio =
78911 task_rlimit(p, RLIMIT_RTPRIO);
78912
78913+ gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
78914 /* can't set/change the rt policy */
78915 if (policy != p->policy && !rlim_rtprio)
78916 return -EPERM;
78917@@ -4903,7 +4907,7 @@ static void migrate_tasks(unsigned int dead_cpu)
78918
78919 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
78920
78921-static struct ctl_table sd_ctl_dir[] = {
78922+static ctl_table_no_const sd_ctl_dir[] __read_only = {
78923 {
78924 .procname = "sched_domain",
78925 .mode = 0555,
78926@@ -4920,17 +4924,17 @@ static struct ctl_table sd_ctl_root[] = {
78927 {}
78928 };
78929
78930-static struct ctl_table *sd_alloc_ctl_entry(int n)
78931+static ctl_table_no_const *sd_alloc_ctl_entry(int n)
78932 {
78933- struct ctl_table *entry =
78934+ ctl_table_no_const *entry =
78935 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
78936
78937 return entry;
78938 }
78939
78940-static void sd_free_ctl_entry(struct ctl_table **tablep)
78941+static void sd_free_ctl_entry(ctl_table_no_const *tablep)
78942 {
78943- struct ctl_table *entry;
78944+ ctl_table_no_const *entry;
78945
78946 /*
78947 * In the intermediate directories, both the child directory and
78948@@ -4938,22 +4942,25 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
78949 * will always be set. In the lowest directory the names are
78950 * static strings and all have proc handlers.
78951 */
78952- for (entry = *tablep; entry->mode; entry++) {
78953- if (entry->child)
78954- sd_free_ctl_entry(&entry->child);
78955+ for (entry = tablep; entry->mode; entry++) {
78956+ if (entry->child) {
78957+ sd_free_ctl_entry(entry->child);
78958+ pax_open_kernel();
78959+ entry->child = NULL;
78960+ pax_close_kernel();
78961+ }
78962 if (entry->proc_handler == NULL)
78963 kfree(entry->procname);
78964 }
78965
78966- kfree(*tablep);
78967- *tablep = NULL;
78968+ kfree(tablep);
78969 }
78970
78971 static int min_load_idx = 0;
78972 static int max_load_idx = CPU_LOAD_IDX_MAX-1;
78973
78974 static void
78975-set_table_entry(struct ctl_table *entry,
78976+set_table_entry(ctl_table_no_const *entry,
78977 const char *procname, void *data, int maxlen,
78978 umode_t mode, proc_handler *proc_handler,
78979 bool load_idx)
78980@@ -4973,7 +4980,7 @@ set_table_entry(struct ctl_table *entry,
78981 static struct ctl_table *
78982 sd_alloc_ctl_domain_table(struct sched_domain *sd)
78983 {
78984- struct ctl_table *table = sd_alloc_ctl_entry(13);
78985+ ctl_table_no_const *table = sd_alloc_ctl_entry(13);
78986
78987 if (table == NULL)
78988 return NULL;
78989@@ -5008,9 +5015,9 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
78990 return table;
78991 }
78992
78993-static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
78994+static ctl_table_no_const *sd_alloc_ctl_cpu_table(int cpu)
78995 {
78996- struct ctl_table *entry, *table;
78997+ ctl_table_no_const *entry, *table;
78998 struct sched_domain *sd;
78999 int domain_num = 0, i;
79000 char buf[32];
79001@@ -5037,11 +5044,13 @@ static struct ctl_table_header *sd_sysctl_header;
79002 static void register_sched_domain_sysctl(void)
79003 {
79004 int i, cpu_num = num_possible_cpus();
79005- struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
79006+ ctl_table_no_const *entry = sd_alloc_ctl_entry(cpu_num + 1);
79007 char buf[32];
79008
79009 WARN_ON(sd_ctl_dir[0].child);
79010+ pax_open_kernel();
79011 sd_ctl_dir[0].child = entry;
79012+ pax_close_kernel();
79013
79014 if (entry == NULL)
79015 return;
79016@@ -5064,8 +5073,12 @@ static void unregister_sched_domain_sysctl(void)
79017 if (sd_sysctl_header)
79018 unregister_sysctl_table(sd_sysctl_header);
79019 sd_sysctl_header = NULL;
79020- if (sd_ctl_dir[0].child)
79021- sd_free_ctl_entry(&sd_ctl_dir[0].child);
79022+ if (sd_ctl_dir[0].child) {
79023+ sd_free_ctl_entry(sd_ctl_dir[0].child);
79024+ pax_open_kernel();
79025+ sd_ctl_dir[0].child = NULL;
79026+ pax_close_kernel();
79027+ }
79028 }
79029 #else
79030 static void register_sched_domain_sysctl(void)
79031@@ -5164,7 +5177,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
79032 * happens before everything else. This has to be lower priority than
79033 * the notifier in the perf_event subsystem, though.
79034 */
79035-static struct notifier_block __cpuinitdata migration_notifier = {
79036+static struct notifier_block migration_notifier = {
79037 .notifier_call = migration_call,
79038 .priority = CPU_PRI_MIGRATION,
79039 };
79040diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
79041index 81fa536..6ccf96a 100644
79042--- a/kernel/sched/fair.c
79043+++ b/kernel/sched/fair.c
79044@@ -830,7 +830,7 @@ void task_numa_fault(int node, int pages, bool migrated)
79045
79046 static void reset_ptenuma_scan(struct task_struct *p)
79047 {
79048- ACCESS_ONCE(p->mm->numa_scan_seq)++;
79049+ ACCESS_ONCE_RW(p->mm->numa_scan_seq)++;
79050 p->mm->numa_scan_offset = 0;
79051 }
79052
79053@@ -3254,25 +3254,18 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
79054 */
79055 static int select_idle_sibling(struct task_struct *p, int target)
79056 {
79057- int cpu = smp_processor_id();
79058- int prev_cpu = task_cpu(p);
79059 struct sched_domain *sd;
79060 struct sched_group *sg;
79061- int i;
79062+ int i = task_cpu(p);
79063
79064- /*
79065- * If the task is going to be woken-up on this cpu and if it is
79066- * already idle, then it is the right target.
79067- */
79068- if (target == cpu && idle_cpu(cpu))
79069- return cpu;
79070+ if (idle_cpu(target))
79071+ return target;
79072
79073 /*
79074- * If the task is going to be woken-up on the cpu where it previously
79075- * ran and if it is currently idle, then it the right target.
79076+ * If the prevous cpu is cache affine and idle, don't be stupid.
79077 */
79078- if (target == prev_cpu && idle_cpu(prev_cpu))
79079- return prev_cpu;
79080+ if (i != target && cpus_share_cache(i, target) && idle_cpu(i))
79081+ return i;
79082
79083 /*
79084 * Otherwise, iterate the domains and find an elegible idle cpu.
79085@@ -3286,7 +3279,7 @@ static int select_idle_sibling(struct task_struct *p, int target)
79086 goto next;
79087
79088 for_each_cpu(i, sched_group_cpus(sg)) {
79089- if (!idle_cpu(i))
79090+ if (i == target || !idle_cpu(i))
79091 goto next;
79092 }
79093
79094@@ -5663,7 +5656,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
79095 * run_rebalance_domains is triggered when needed from the scheduler tick.
79096 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
79097 */
79098-static void run_rebalance_domains(struct softirq_action *h)
79099+static void run_rebalance_domains(void)
79100 {
79101 int this_cpu = smp_processor_id();
79102 struct rq *this_rq = cpu_rq(this_cpu);
79103diff --git a/kernel/signal.c b/kernel/signal.c
79104index 50e425c..92c8f65 100644
79105--- a/kernel/signal.c
79106+++ b/kernel/signal.c
79107@@ -50,12 +50,12 @@ static struct kmem_cache *sigqueue_cachep;
79108
79109 int print_fatal_signals __read_mostly;
79110
79111-static void __user *sig_handler(struct task_struct *t, int sig)
79112+static __sighandler_t sig_handler(struct task_struct *t, int sig)
79113 {
79114 return t->sighand->action[sig - 1].sa.sa_handler;
79115 }
79116
79117-static int sig_handler_ignored(void __user *handler, int sig)
79118+static int sig_handler_ignored(__sighandler_t handler, int sig)
79119 {
79120 /* Is it explicitly or implicitly ignored? */
79121 return handler == SIG_IGN ||
79122@@ -64,7 +64,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
79123
79124 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
79125 {
79126- void __user *handler;
79127+ __sighandler_t handler;
79128
79129 handler = sig_handler(t, sig);
79130
79131@@ -368,6 +368,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
79132 atomic_inc(&user->sigpending);
79133 rcu_read_unlock();
79134
79135+ if (!override_rlimit)
79136+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
79137+
79138 if (override_rlimit ||
79139 atomic_read(&user->sigpending) <=
79140 task_rlimit(t, RLIMIT_SIGPENDING)) {
79141@@ -495,7 +498,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
79142
79143 int unhandled_signal(struct task_struct *tsk, int sig)
79144 {
79145- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
79146+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
79147 if (is_global_init(tsk))
79148 return 1;
79149 if (handler != SIG_IGN && handler != SIG_DFL)
79150@@ -815,6 +818,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
79151 }
79152 }
79153
79154+ /* allow glibc communication via tgkill to other threads in our
79155+ thread group */
79156+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
79157+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
79158+ && gr_handle_signal(t, sig))
79159+ return -EPERM;
79160+
79161 return security_task_kill(t, info, sig, 0);
79162 }
79163
79164@@ -1197,7 +1207,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
79165 return send_signal(sig, info, p, 1);
79166 }
79167
79168-static int
79169+int
79170 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
79171 {
79172 return send_signal(sig, info, t, 0);
79173@@ -1234,6 +1244,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
79174 unsigned long int flags;
79175 int ret, blocked, ignored;
79176 struct k_sigaction *action;
79177+ int is_unhandled = 0;
79178
79179 spin_lock_irqsave(&t->sighand->siglock, flags);
79180 action = &t->sighand->action[sig-1];
79181@@ -1248,9 +1259,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
79182 }
79183 if (action->sa.sa_handler == SIG_DFL)
79184 t->signal->flags &= ~SIGNAL_UNKILLABLE;
79185+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
79186+ is_unhandled = 1;
79187 ret = specific_send_sig_info(sig, info, t);
79188 spin_unlock_irqrestore(&t->sighand->siglock, flags);
79189
79190+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
79191+ normal operation */
79192+ if (is_unhandled) {
79193+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
79194+ gr_handle_crash(t, sig);
79195+ }
79196+
79197 return ret;
79198 }
79199
79200@@ -1317,8 +1337,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
79201 ret = check_kill_permission(sig, info, p);
79202 rcu_read_unlock();
79203
79204- if (!ret && sig)
79205+ if (!ret && sig) {
79206 ret = do_send_sig_info(sig, info, p, true);
79207+ if (!ret)
79208+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
79209+ }
79210
79211 return ret;
79212 }
79213@@ -2855,7 +2878,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
79214 int error = -ESRCH;
79215
79216 rcu_read_lock();
79217- p = find_task_by_vpid(pid);
79218+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
79219+ /* allow glibc communication via tgkill to other threads in our
79220+ thread group */
79221+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
79222+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
79223+ p = find_task_by_vpid_unrestricted(pid);
79224+ else
79225+#endif
79226+ p = find_task_by_vpid(pid);
79227 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
79228 error = check_kill_permission(sig, info, p);
79229 /*
79230@@ -3138,8 +3169,8 @@ COMPAT_SYSCALL_DEFINE2(sigaltstack,
79231 }
79232 seg = get_fs();
79233 set_fs(KERNEL_DS);
79234- ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
79235- (stack_t __force __user *) &uoss,
79236+ ret = do_sigaltstack((stack_t __force_user *) (uss_ptr ? &uss : NULL),
79237+ (stack_t __force_user *) &uoss,
79238 compat_user_stack_pointer());
79239 set_fs(seg);
79240 if (ret >= 0 && uoss_ptr) {
79241diff --git a/kernel/smp.c b/kernel/smp.c
79242index 69f38bd..77bbf12 100644
79243--- a/kernel/smp.c
79244+++ b/kernel/smp.c
79245@@ -77,7 +77,7 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
79246 return NOTIFY_OK;
79247 }
79248
79249-static struct notifier_block __cpuinitdata hotplug_cfd_notifier = {
79250+static struct notifier_block hotplug_cfd_notifier = {
79251 .notifier_call = hotplug_cfd,
79252 };
79253
79254diff --git a/kernel/smpboot.c b/kernel/smpboot.c
79255index d6c5fc0..530560c 100644
79256--- a/kernel/smpboot.c
79257+++ b/kernel/smpboot.c
79258@@ -275,7 +275,7 @@ int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
79259 }
79260 smpboot_unpark_thread(plug_thread, cpu);
79261 }
79262- list_add(&plug_thread->list, &hotplug_threads);
79263+ pax_list_add(&plug_thread->list, &hotplug_threads);
79264 out:
79265 mutex_unlock(&smpboot_threads_lock);
79266 return ret;
79267@@ -292,7 +292,7 @@ void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread)
79268 {
79269 get_online_cpus();
79270 mutex_lock(&smpboot_threads_lock);
79271- list_del(&plug_thread->list);
79272+ pax_list_del(&plug_thread->list);
79273 smpboot_destroy_threads(plug_thread);
79274 mutex_unlock(&smpboot_threads_lock);
79275 put_online_cpus();
79276diff --git a/kernel/softirq.c b/kernel/softirq.c
79277index ed567ba..e71dabf 100644
79278--- a/kernel/softirq.c
79279+++ b/kernel/softirq.c
79280@@ -53,11 +53,11 @@ irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
79281 EXPORT_SYMBOL(irq_stat);
79282 #endif
79283
79284-static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
79285+static struct softirq_action softirq_vec[NR_SOFTIRQS] __read_only __aligned(PAGE_SIZE);
79286
79287 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
79288
79289-char *softirq_to_name[NR_SOFTIRQS] = {
79290+const char * const softirq_to_name[NR_SOFTIRQS] = {
79291 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
79292 "TASKLET", "SCHED", "HRTIMER", "RCU"
79293 };
79294@@ -244,7 +244,7 @@ restart:
79295 kstat_incr_softirqs_this_cpu(vec_nr);
79296
79297 trace_softirq_entry(vec_nr);
79298- h->action(h);
79299+ h->action();
79300 trace_softirq_exit(vec_nr);
79301 if (unlikely(prev_count != preempt_count())) {
79302 printk(KERN_ERR "huh, entered softirq %u %s %p"
79303@@ -391,7 +391,7 @@ void __raise_softirq_irqoff(unsigned int nr)
79304 or_softirq_pending(1UL << nr);
79305 }
79306
79307-void open_softirq(int nr, void (*action)(struct softirq_action *))
79308+void __init open_softirq(int nr, void (*action)(void))
79309 {
79310 softirq_vec[nr].action = action;
79311 }
79312@@ -447,7 +447,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
79313
79314 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
79315
79316-static void tasklet_action(struct softirq_action *a)
79317+static void tasklet_action(void)
79318 {
79319 struct tasklet_struct *list;
79320
79321@@ -482,7 +482,7 @@ static void tasklet_action(struct softirq_action *a)
79322 }
79323 }
79324
79325-static void tasklet_hi_action(struct softirq_action *a)
79326+static void tasklet_hi_action(void)
79327 {
79328 struct tasklet_struct *list;
79329
79330@@ -718,7 +718,7 @@ static int __cpuinit remote_softirq_cpu_notify(struct notifier_block *self,
79331 return NOTIFY_OK;
79332 }
79333
79334-static struct notifier_block __cpuinitdata remote_softirq_cpu_notifier = {
79335+static struct notifier_block remote_softirq_cpu_notifier = {
79336 .notifier_call = remote_softirq_cpu_notify,
79337 };
79338
79339@@ -835,11 +835,11 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb,
79340 return NOTIFY_OK;
79341 }
79342
79343-static struct notifier_block __cpuinitdata cpu_nfb = {
79344+static struct notifier_block cpu_nfb = {
79345 .notifier_call = cpu_callback
79346 };
79347
79348-static struct smp_hotplug_thread softirq_threads = {
79349+static struct smp_hotplug_thread softirq_threads __read_only = {
79350 .store = &ksoftirqd,
79351 .thread_should_run = ksoftirqd_should_run,
79352 .thread_fn = run_ksoftirqd,
79353diff --git a/kernel/srcu.c b/kernel/srcu.c
79354index 2b85982..d52ab26 100644
79355--- a/kernel/srcu.c
79356+++ b/kernel/srcu.c
79357@@ -305,9 +305,9 @@ int __srcu_read_lock(struct srcu_struct *sp)
79358 preempt_disable();
79359 idx = rcu_dereference_index_check(sp->completed,
79360 rcu_read_lock_sched_held()) & 0x1;
79361- ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
79362+ ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
79363 smp_mb(); /* B */ /* Avoid leaking the critical section. */
79364- ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
79365+ ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
79366 preempt_enable();
79367 return idx;
79368 }
79369@@ -323,7 +323,7 @@ void __srcu_read_unlock(struct srcu_struct *sp, int idx)
79370 {
79371 preempt_disable();
79372 smp_mb(); /* C */ /* Avoid leaking the critical section. */
79373- ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) -= 1;
79374+ ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) -= 1;
79375 preempt_enable();
79376 }
79377 EXPORT_SYMBOL_GPL(__srcu_read_unlock);
79378diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
79379index 2f194e9..2c05ea9 100644
79380--- a/kernel/stop_machine.c
79381+++ b/kernel/stop_machine.c
79382@@ -362,7 +362,7 @@ static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb,
79383 * cpu notifiers. It currently shares the same priority as sched
79384 * migration_notifier.
79385 */
79386-static struct notifier_block __cpuinitdata cpu_stop_cpu_notifier = {
79387+static struct notifier_block cpu_stop_cpu_notifier = {
79388 .notifier_call = cpu_stop_cpu_callback,
79389 .priority = 10,
79390 };
79391diff --git a/kernel/sys.c b/kernel/sys.c
79392index 47f1d1b..04c769e 100644
79393--- a/kernel/sys.c
79394+++ b/kernel/sys.c
79395@@ -157,6 +157,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
79396 error = -EACCES;
79397 goto out;
79398 }
79399+
79400+ if (gr_handle_chroot_setpriority(p, niceval)) {
79401+ error = -EACCES;
79402+ goto out;
79403+ }
79404+
79405 no_nice = security_task_setnice(p, niceval);
79406 if (no_nice) {
79407 error = no_nice;
79408@@ -596,6 +602,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
79409 goto error;
79410 }
79411
79412+ if (gr_check_group_change(new->gid, new->egid, INVALID_GID))
79413+ goto error;
79414+
79415 if (rgid != (gid_t) -1 ||
79416 (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
79417 new->sgid = new->egid;
79418@@ -631,6 +640,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
79419 old = current_cred();
79420
79421 retval = -EPERM;
79422+
79423+ if (gr_check_group_change(kgid, kgid, kgid))
79424+ goto error;
79425+
79426 if (nsown_capable(CAP_SETGID))
79427 new->gid = new->egid = new->sgid = new->fsgid = kgid;
79428 else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
79429@@ -648,7 +661,7 @@ error:
79430 /*
79431 * change the user struct in a credentials set to match the new UID
79432 */
79433-static int set_user(struct cred *new)
79434+int set_user(struct cred *new)
79435 {
79436 struct user_struct *new_user;
79437
79438@@ -728,6 +741,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
79439 goto error;
79440 }
79441
79442+ if (gr_check_user_change(new->uid, new->euid, INVALID_UID))
79443+ goto error;
79444+
79445 if (!uid_eq(new->uid, old->uid)) {
79446 retval = set_user(new);
79447 if (retval < 0)
79448@@ -778,6 +794,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
79449 old = current_cred();
79450
79451 retval = -EPERM;
79452+
79453+ if (gr_check_crash_uid(kuid))
79454+ goto error;
79455+ if (gr_check_user_change(kuid, kuid, kuid))
79456+ goto error;
79457+
79458 if (nsown_capable(CAP_SETUID)) {
79459 new->suid = new->uid = kuid;
79460 if (!uid_eq(kuid, old->uid)) {
79461@@ -847,6 +869,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
79462 goto error;
79463 }
79464
79465+ if (gr_check_user_change(kruid, keuid, INVALID_UID))
79466+ goto error;
79467+
79468 if (ruid != (uid_t) -1) {
79469 new->uid = kruid;
79470 if (!uid_eq(kruid, old->uid)) {
79471@@ -929,6 +954,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
79472 goto error;
79473 }
79474
79475+ if (gr_check_group_change(krgid, kegid, INVALID_GID))
79476+ goto error;
79477+
79478 if (rgid != (gid_t) -1)
79479 new->gid = krgid;
79480 if (egid != (gid_t) -1)
79481@@ -990,12 +1018,16 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
79482 uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) ||
79483 nsown_capable(CAP_SETUID)) {
79484 if (!uid_eq(kuid, old->fsuid)) {
79485+ if (gr_check_user_change(INVALID_UID, INVALID_UID, kuid))
79486+ goto error;
79487+
79488 new->fsuid = kuid;
79489 if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
79490 goto change_okay;
79491 }
79492 }
79493
79494+error:
79495 abort_creds(new);
79496 return old_fsuid;
79497
79498@@ -1028,12 +1060,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
79499 if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) ||
79500 gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
79501 nsown_capable(CAP_SETGID)) {
79502+ if (gr_check_group_change(INVALID_GID, INVALID_GID, kgid))
79503+ goto error;
79504+
79505 if (!gid_eq(kgid, old->fsgid)) {
79506 new->fsgid = kgid;
79507 goto change_okay;
79508 }
79509 }
79510
79511+error:
79512 abort_creds(new);
79513 return old_fsgid;
79514
79515@@ -1341,19 +1377,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
79516 return -EFAULT;
79517
79518 down_read(&uts_sem);
79519- error = __copy_to_user(&name->sysname, &utsname()->sysname,
79520+ error = __copy_to_user(name->sysname, &utsname()->sysname,
79521 __OLD_UTS_LEN);
79522 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
79523- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
79524+ error |= __copy_to_user(name->nodename, &utsname()->nodename,
79525 __OLD_UTS_LEN);
79526 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
79527- error |= __copy_to_user(&name->release, &utsname()->release,
79528+ error |= __copy_to_user(name->release, &utsname()->release,
79529 __OLD_UTS_LEN);
79530 error |= __put_user(0, name->release + __OLD_UTS_LEN);
79531- error |= __copy_to_user(&name->version, &utsname()->version,
79532+ error |= __copy_to_user(name->version, &utsname()->version,
79533 __OLD_UTS_LEN);
79534 error |= __put_user(0, name->version + __OLD_UTS_LEN);
79535- error |= __copy_to_user(&name->machine, &utsname()->machine,
79536+ error |= __copy_to_user(name->machine, &utsname()->machine,
79537 __OLD_UTS_LEN);
79538 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
79539 up_read(&uts_sem);
79540@@ -2027,7 +2063,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
79541 error = get_dumpable(me->mm);
79542 break;
79543 case PR_SET_DUMPABLE:
79544- if (arg2 < 0 || arg2 > 1) {
79545+ if (arg2 > 1) {
79546 error = -EINVAL;
79547 break;
79548 }
79549diff --git a/kernel/sysctl.c b/kernel/sysctl.c
79550index c88878d..e4fa5d1 100644
79551--- a/kernel/sysctl.c
79552+++ b/kernel/sysctl.c
79553@@ -92,7 +92,6 @@
79554
79555
79556 #if defined(CONFIG_SYSCTL)
79557-
79558 /* External variables not in a header file. */
79559 extern int sysctl_overcommit_memory;
79560 extern int sysctl_overcommit_ratio;
79561@@ -172,10 +171,8 @@ static int proc_taint(struct ctl_table *table, int write,
79562 void __user *buffer, size_t *lenp, loff_t *ppos);
79563 #endif
79564
79565-#ifdef CONFIG_PRINTK
79566 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
79567 void __user *buffer, size_t *lenp, loff_t *ppos);
79568-#endif
79569
79570 static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write,
79571 void __user *buffer, size_t *lenp, loff_t *ppos);
79572@@ -206,6 +203,8 @@ static int sysrq_sysctl_handler(ctl_table *table, int write,
79573
79574 #endif
79575
79576+extern struct ctl_table grsecurity_table[];
79577+
79578 static struct ctl_table kern_table[];
79579 static struct ctl_table vm_table[];
79580 static struct ctl_table fs_table[];
79581@@ -220,6 +219,20 @@ extern struct ctl_table epoll_table[];
79582 int sysctl_legacy_va_layout;
79583 #endif
79584
79585+#ifdef CONFIG_PAX_SOFTMODE
79586+static ctl_table pax_table[] = {
79587+ {
79588+ .procname = "softmode",
79589+ .data = &pax_softmode,
79590+ .maxlen = sizeof(unsigned int),
79591+ .mode = 0600,
79592+ .proc_handler = &proc_dointvec,
79593+ },
79594+
79595+ { }
79596+};
79597+#endif
79598+
79599 /* The default sysctl tables: */
79600
79601 static struct ctl_table sysctl_base_table[] = {
79602@@ -268,6 +281,22 @@ static int max_extfrag_threshold = 1000;
79603 #endif
79604
79605 static struct ctl_table kern_table[] = {
79606+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
79607+ {
79608+ .procname = "grsecurity",
79609+ .mode = 0500,
79610+ .child = grsecurity_table,
79611+ },
79612+#endif
79613+
79614+#ifdef CONFIG_PAX_SOFTMODE
79615+ {
79616+ .procname = "pax",
79617+ .mode = 0500,
79618+ .child = pax_table,
79619+ },
79620+#endif
79621+
79622 {
79623 .procname = "sched_child_runs_first",
79624 .data = &sysctl_sched_child_runs_first,
79625@@ -593,7 +622,7 @@ static struct ctl_table kern_table[] = {
79626 .data = &modprobe_path,
79627 .maxlen = KMOD_PATH_LEN,
79628 .mode = 0644,
79629- .proc_handler = proc_dostring,
79630+ .proc_handler = proc_dostring_modpriv,
79631 },
79632 {
79633 .procname = "modules_disabled",
79634@@ -760,16 +789,20 @@ static struct ctl_table kern_table[] = {
79635 .extra1 = &zero,
79636 .extra2 = &one,
79637 },
79638+#endif
79639 {
79640 .procname = "kptr_restrict",
79641 .data = &kptr_restrict,
79642 .maxlen = sizeof(int),
79643 .mode = 0644,
79644 .proc_handler = proc_dointvec_minmax_sysadmin,
79645+#ifdef CONFIG_GRKERNSEC_HIDESYM
79646+ .extra1 = &two,
79647+#else
79648 .extra1 = &zero,
79649+#endif
79650 .extra2 = &two,
79651 },
79652-#endif
79653 {
79654 .procname = "ngroups_max",
79655 .data = &ngroups_max,
79656@@ -1266,6 +1299,13 @@ static struct ctl_table vm_table[] = {
79657 .proc_handler = proc_dointvec_minmax,
79658 .extra1 = &zero,
79659 },
79660+ {
79661+ .procname = "heap_stack_gap",
79662+ .data = &sysctl_heap_stack_gap,
79663+ .maxlen = sizeof(sysctl_heap_stack_gap),
79664+ .mode = 0644,
79665+ .proc_handler = proc_doulongvec_minmax,
79666+ },
79667 #else
79668 {
79669 .procname = "nr_trim_pages",
79670@@ -1716,6 +1756,16 @@ int proc_dostring(struct ctl_table *table, int write,
79671 buffer, lenp, ppos);
79672 }
79673
79674+int proc_dostring_modpriv(struct ctl_table *table, int write,
79675+ void __user *buffer, size_t *lenp, loff_t *ppos)
79676+{
79677+ if (write && !capable(CAP_SYS_MODULE))
79678+ return -EPERM;
79679+
79680+ return _proc_do_string(table->data, table->maxlen, write,
79681+ buffer, lenp, ppos);
79682+}
79683+
79684 static size_t proc_skip_spaces(char **buf)
79685 {
79686 size_t ret;
79687@@ -1821,6 +1871,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
79688 len = strlen(tmp);
79689 if (len > *size)
79690 len = *size;
79691+ if (len > sizeof(tmp))
79692+ len = sizeof(tmp);
79693 if (copy_to_user(*buf, tmp, len))
79694 return -EFAULT;
79695 *size -= len;
79696@@ -1985,7 +2037,7 @@ int proc_dointvec(struct ctl_table *table, int write,
79697 static int proc_taint(struct ctl_table *table, int write,
79698 void __user *buffer, size_t *lenp, loff_t *ppos)
79699 {
79700- struct ctl_table t;
79701+ ctl_table_no_const t;
79702 unsigned long tmptaint = get_taint();
79703 int err;
79704
79705@@ -2013,7 +2065,6 @@ static int proc_taint(struct ctl_table *table, int write,
79706 return err;
79707 }
79708
79709-#ifdef CONFIG_PRINTK
79710 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
79711 void __user *buffer, size_t *lenp, loff_t *ppos)
79712 {
79713@@ -2022,7 +2073,6 @@ static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
79714
79715 return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
79716 }
79717-#endif
79718
79719 struct do_proc_dointvec_minmax_conv_param {
79720 int *min;
79721@@ -2169,8 +2219,11 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
79722 *i = val;
79723 } else {
79724 val = convdiv * (*i) / convmul;
79725- if (!first)
79726+ if (!first) {
79727 err = proc_put_char(&buffer, &left, '\t');
79728+ if (err)
79729+ break;
79730+ }
79731 err = proc_put_long(&buffer, &left, val, false);
79732 if (err)
79733 break;
79734@@ -2562,6 +2615,12 @@ int proc_dostring(struct ctl_table *table, int write,
79735 return -ENOSYS;
79736 }
79737
79738+int proc_dostring_modpriv(struct ctl_table *table, int write,
79739+ void __user *buffer, size_t *lenp, loff_t *ppos)
79740+{
79741+ return -ENOSYS;
79742+}
79743+
79744 int proc_dointvec(struct ctl_table *table, int write,
79745 void __user *buffer, size_t *lenp, loff_t *ppos)
79746 {
79747@@ -2618,5 +2677,6 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
79748 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
79749 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
79750 EXPORT_SYMBOL(proc_dostring);
79751+EXPORT_SYMBOL(proc_dostring_modpriv);
79752 EXPORT_SYMBOL(proc_doulongvec_minmax);
79753 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
79754diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
79755index 0ddf3a0..a199f50 100644
79756--- a/kernel/sysctl_binary.c
79757+++ b/kernel/sysctl_binary.c
79758@@ -989,7 +989,7 @@ static ssize_t bin_intvec(struct file *file,
79759 int i;
79760
79761 set_fs(KERNEL_DS);
79762- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
79763+ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
79764 set_fs(old_fs);
79765 if (result < 0)
79766 goto out_kfree;
79767@@ -1034,7 +1034,7 @@ static ssize_t bin_intvec(struct file *file,
79768 }
79769
79770 set_fs(KERNEL_DS);
79771- result = vfs_write(file, buffer, str - buffer, &pos);
79772+ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
79773 set_fs(old_fs);
79774 if (result < 0)
79775 goto out_kfree;
79776@@ -1067,7 +1067,7 @@ static ssize_t bin_ulongvec(struct file *file,
79777 int i;
79778
79779 set_fs(KERNEL_DS);
79780- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
79781+ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
79782 set_fs(old_fs);
79783 if (result < 0)
79784 goto out_kfree;
79785@@ -1112,7 +1112,7 @@ static ssize_t bin_ulongvec(struct file *file,
79786 }
79787
79788 set_fs(KERNEL_DS);
79789- result = vfs_write(file, buffer, str - buffer, &pos);
79790+ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
79791 set_fs(old_fs);
79792 if (result < 0)
79793 goto out_kfree;
79794@@ -1138,7 +1138,7 @@ static ssize_t bin_uuid(struct file *file,
79795 int i;
79796
79797 set_fs(KERNEL_DS);
79798- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
79799+ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
79800 set_fs(old_fs);
79801 if (result < 0)
79802 goto out;
79803@@ -1185,7 +1185,7 @@ static ssize_t bin_dn_node_address(struct file *file,
79804 __le16 dnaddr;
79805
79806 set_fs(KERNEL_DS);
79807- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
79808+ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
79809 set_fs(old_fs);
79810 if (result < 0)
79811 goto out;
79812@@ -1234,7 +1234,7 @@ static ssize_t bin_dn_node_address(struct file *file,
79813 le16_to_cpu(dnaddr) & 0x3ff);
79814
79815 set_fs(KERNEL_DS);
79816- result = vfs_write(file, buf, len, &pos);
79817+ result = vfs_write(file, (const char __force_user *)buf, len, &pos);
79818 set_fs(old_fs);
79819 if (result < 0)
79820 goto out;
79821diff --git a/kernel/taskstats.c b/kernel/taskstats.c
79822index 145bb4d..b2aa969 100644
79823--- a/kernel/taskstats.c
79824+++ b/kernel/taskstats.c
79825@@ -28,9 +28,12 @@
79826 #include <linux/fs.h>
79827 #include <linux/file.h>
79828 #include <linux/pid_namespace.h>
79829+#include <linux/grsecurity.h>
79830 #include <net/genetlink.h>
79831 #include <linux/atomic.h>
79832
79833+extern int gr_is_taskstats_denied(int pid);
79834+
79835 /*
79836 * Maximum length of a cpumask that can be specified in
79837 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
79838@@ -570,6 +573,9 @@ err:
79839
79840 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
79841 {
79842+ if (gr_is_taskstats_denied(current->pid))
79843+ return -EACCES;
79844+
79845 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
79846 return cmd_attr_register_cpumask(info);
79847 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
79848diff --git a/kernel/time.c b/kernel/time.c
79849index d226c6a..2f0d217 100644
79850--- a/kernel/time.c
79851+++ b/kernel/time.c
79852@@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
79853 return error;
79854
79855 if (tz) {
79856+ /* we log in do_settimeofday called below, so don't log twice
79857+ */
79858+ if (!tv)
79859+ gr_log_timechange();
79860+
79861 sys_tz = *tz;
79862 update_vsyscall_tz();
79863 if (firsttime) {
79864@@ -493,7 +498,7 @@ EXPORT_SYMBOL(usecs_to_jiffies);
79865 * The >> (NSEC_JIFFIE_SC - SEC_JIFFIE_SC) converts the scaled nsec
79866 * value to a scaled second value.
79867 */
79868-unsigned long
79869+unsigned long __intentional_overflow(-1)
79870 timespec_to_jiffies(const struct timespec *value)
79871 {
79872 unsigned long sec = value->tv_sec;
79873diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
79874index f11d83b..d016d91 100644
79875--- a/kernel/time/alarmtimer.c
79876+++ b/kernel/time/alarmtimer.c
79877@@ -750,7 +750,7 @@ static int __init alarmtimer_init(void)
79878 struct platform_device *pdev;
79879 int error = 0;
79880 int i;
79881- struct k_clock alarm_clock = {
79882+ static struct k_clock alarm_clock = {
79883 .clock_getres = alarm_clock_getres,
79884 .clock_get = alarm_clock_get,
79885 .timer_create = alarm_timer_create,
79886diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
79887index a13987a..36cd791 100644
79888--- a/kernel/time/tick-broadcast.c
79889+++ b/kernel/time/tick-broadcast.c
79890@@ -116,7 +116,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
79891 * then clear the broadcast bit.
79892 */
79893 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
79894- int cpu = smp_processor_id();
79895+ cpu = smp_processor_id();
79896
79897 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
79898 tick_broadcast_clear_oneshot(cpu);
79899diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
79900index cbc6acb..3a77191 100644
79901--- a/kernel/time/timekeeping.c
79902+++ b/kernel/time/timekeeping.c
79903@@ -15,6 +15,7 @@
79904 #include <linux/init.h>
79905 #include <linux/mm.h>
79906 #include <linux/sched.h>
79907+#include <linux/grsecurity.h>
79908 #include <linux/syscore_ops.h>
79909 #include <linux/clocksource.h>
79910 #include <linux/jiffies.h>
79911@@ -412,6 +413,8 @@ int do_settimeofday(const struct timespec *tv)
79912 if (!timespec_valid_strict(tv))
79913 return -EINVAL;
79914
79915+ gr_log_timechange();
79916+
79917 write_seqlock_irqsave(&tk->lock, flags);
79918
79919 timekeeping_forward_now(tk);
79920diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
79921index af5a7e9..715611a 100644
79922--- a/kernel/time/timer_list.c
79923+++ b/kernel/time/timer_list.c
79924@@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
79925
79926 static void print_name_offset(struct seq_file *m, void *sym)
79927 {
79928+#ifdef CONFIG_GRKERNSEC_HIDESYM
79929+ SEQ_printf(m, "<%p>", NULL);
79930+#else
79931 char symname[KSYM_NAME_LEN];
79932
79933 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
79934 SEQ_printf(m, "<%pK>", sym);
79935 else
79936 SEQ_printf(m, "%s", symname);
79937+#endif
79938 }
79939
79940 static void
79941@@ -112,7 +116,11 @@ next_one:
79942 static void
79943 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
79944 {
79945+#ifdef CONFIG_GRKERNSEC_HIDESYM
79946+ SEQ_printf(m, " .base: %p\n", NULL);
79947+#else
79948 SEQ_printf(m, " .base: %pK\n", base);
79949+#endif
79950 SEQ_printf(m, " .index: %d\n",
79951 base->index);
79952 SEQ_printf(m, " .resolution: %Lu nsecs\n",
79953@@ -293,7 +301,11 @@ static int __init init_timer_list_procfs(void)
79954 {
79955 struct proc_dir_entry *pe;
79956
79957+#ifdef CONFIG_GRKERNSEC_PROC_ADD
79958+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
79959+#else
79960 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
79961+#endif
79962 if (!pe)
79963 return -ENOMEM;
79964 return 0;
79965diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
79966index 0b537f2..40d6c20 100644
79967--- a/kernel/time/timer_stats.c
79968+++ b/kernel/time/timer_stats.c
79969@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
79970 static unsigned long nr_entries;
79971 static struct entry entries[MAX_ENTRIES];
79972
79973-static atomic_t overflow_count;
79974+static atomic_unchecked_t overflow_count;
79975
79976 /*
79977 * The entries are in a hash-table, for fast lookup:
79978@@ -140,7 +140,7 @@ static void reset_entries(void)
79979 nr_entries = 0;
79980 memset(entries, 0, sizeof(entries));
79981 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
79982- atomic_set(&overflow_count, 0);
79983+ atomic_set_unchecked(&overflow_count, 0);
79984 }
79985
79986 static struct entry *alloc_entry(void)
79987@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
79988 if (likely(entry))
79989 entry->count++;
79990 else
79991- atomic_inc(&overflow_count);
79992+ atomic_inc_unchecked(&overflow_count);
79993
79994 out_unlock:
79995 raw_spin_unlock_irqrestore(lock, flags);
79996@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
79997
79998 static void print_name_offset(struct seq_file *m, unsigned long addr)
79999 {
80000+#ifdef CONFIG_GRKERNSEC_HIDESYM
80001+ seq_printf(m, "<%p>", NULL);
80002+#else
80003 char symname[KSYM_NAME_LEN];
80004
80005 if (lookup_symbol_name(addr, symname) < 0)
80006- seq_printf(m, "<%p>", (void *)addr);
80007+ seq_printf(m, "<%pK>", (void *)addr);
80008 else
80009 seq_printf(m, "%s", symname);
80010+#endif
80011 }
80012
80013 static int tstats_show(struct seq_file *m, void *v)
80014@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
80015
80016 seq_puts(m, "Timer Stats Version: v0.2\n");
80017 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
80018- if (atomic_read(&overflow_count))
80019+ if (atomic_read_unchecked(&overflow_count))
80020 seq_printf(m, "Overflow: %d entries\n",
80021- atomic_read(&overflow_count));
80022+ atomic_read_unchecked(&overflow_count));
80023
80024 for (i = 0; i < nr_entries; i++) {
80025 entry = entries + i;
80026@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
80027 {
80028 struct proc_dir_entry *pe;
80029
80030+#ifdef CONFIG_GRKERNSEC_PROC_ADD
80031+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
80032+#else
80033 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
80034+#endif
80035 if (!pe)
80036 return -ENOMEM;
80037 return 0;
80038diff --git a/kernel/timer.c b/kernel/timer.c
80039index 367d008..5dee98f 100644
80040--- a/kernel/timer.c
80041+++ b/kernel/timer.c
80042@@ -1363,7 +1363,7 @@ void update_process_times(int user_tick)
80043 /*
80044 * This function runs timers and the timer-tq in bottom half context.
80045 */
80046-static void run_timer_softirq(struct softirq_action *h)
80047+static void run_timer_softirq(void)
80048 {
80049 struct tvec_base *base = __this_cpu_read(tvec_bases);
80050
80051@@ -1481,7 +1481,7 @@ static void process_timeout(unsigned long __data)
80052 *
80053 * In all cases the return value is guaranteed to be non-negative.
80054 */
80055-signed long __sched schedule_timeout(signed long timeout)
80056+signed long __sched __intentional_overflow(-1) schedule_timeout(signed long timeout)
80057 {
80058 struct timer_list timer;
80059 unsigned long expire;
80060@@ -1772,7 +1772,7 @@ static int __cpuinit timer_cpu_notify(struct notifier_block *self,
80061 return NOTIFY_OK;
80062 }
80063
80064-static struct notifier_block __cpuinitdata timers_nb = {
80065+static struct notifier_block timers_nb = {
80066 .notifier_call = timer_cpu_notify,
80067 };
80068
80069diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
80070index c0bd030..62a1927 100644
80071--- a/kernel/trace/blktrace.c
80072+++ b/kernel/trace/blktrace.c
80073@@ -317,7 +317,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
80074 struct blk_trace *bt = filp->private_data;
80075 char buf[16];
80076
80077- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
80078+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
80079
80080 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
80081 }
80082@@ -375,7 +375,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
80083 return 1;
80084
80085 bt = buf->chan->private_data;
80086- atomic_inc(&bt->dropped);
80087+ atomic_inc_unchecked(&bt->dropped);
80088 return 0;
80089 }
80090
80091@@ -476,7 +476,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
80092
80093 bt->dir = dir;
80094 bt->dev = dev;
80095- atomic_set(&bt->dropped, 0);
80096+ atomic_set_unchecked(&bt->dropped, 0);
80097
80098 ret = -EIO;
80099 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
80100diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
80101index 35cc3a8..2a47da3 100644
80102--- a/kernel/trace/ftrace.c
80103+++ b/kernel/trace/ftrace.c
80104@@ -1886,12 +1886,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
80105 if (unlikely(ftrace_disabled))
80106 return 0;
80107
80108+ ret = ftrace_arch_code_modify_prepare();
80109+ FTRACE_WARN_ON(ret);
80110+ if (ret)
80111+ return 0;
80112+
80113 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
80114+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
80115 if (ret) {
80116 ftrace_bug(ret, ip);
80117- return 0;
80118 }
80119- return 1;
80120+ return ret ? 0 : 1;
80121 }
80122
80123 /*
80124@@ -2964,7 +2969,7 @@ static void ftrace_free_entry_rcu(struct rcu_head *rhp)
80125
80126 int
80127 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
80128- void *data)
80129+ void *data)
80130 {
80131 struct ftrace_func_probe *entry;
80132 struct ftrace_page *pg;
80133@@ -3831,8 +3836,10 @@ static int ftrace_process_locs(struct module *mod,
80134 if (!count)
80135 return 0;
80136
80137+ pax_open_kernel();
80138 sort(start, count, sizeof(*start),
80139 ftrace_cmp_ips, ftrace_swap_ips);
80140+ pax_close_kernel();
80141
80142 start_pg = ftrace_allocate_pages(count);
80143 if (!start_pg)
80144@@ -4554,8 +4561,6 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
80145 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
80146
80147 static int ftrace_graph_active;
80148-static struct notifier_block ftrace_suspend_notifier;
80149-
80150 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
80151 {
80152 return 0;
80153@@ -4699,6 +4704,10 @@ ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
80154 return NOTIFY_DONE;
80155 }
80156
80157+static struct notifier_block ftrace_suspend_notifier = {
80158+ .notifier_call = ftrace_suspend_notifier_call
80159+};
80160+
80161 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
80162 trace_func_graph_ent_t entryfunc)
80163 {
80164@@ -4712,7 +4721,6 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
80165 goto out;
80166 }
80167
80168- ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
80169 register_pm_notifier(&ftrace_suspend_notifier);
80170
80171 ftrace_graph_active++;
80172diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
80173index ce8514f..8233573 100644
80174--- a/kernel/trace/ring_buffer.c
80175+++ b/kernel/trace/ring_buffer.c
80176@@ -346,9 +346,9 @@ struct buffer_data_page {
80177 */
80178 struct buffer_page {
80179 struct list_head list; /* list of buffer pages */
80180- local_t write; /* index for next write */
80181+ local_unchecked_t write; /* index for next write */
80182 unsigned read; /* index for next read */
80183- local_t entries; /* entries on this page */
80184+ local_unchecked_t entries; /* entries on this page */
80185 unsigned long real_end; /* real end of data */
80186 struct buffer_data_page *page; /* Actual data page */
80187 };
80188@@ -461,8 +461,8 @@ struct ring_buffer_per_cpu {
80189 unsigned long last_overrun;
80190 local_t entries_bytes;
80191 local_t entries;
80192- local_t overrun;
80193- local_t commit_overrun;
80194+ local_unchecked_t overrun;
80195+ local_unchecked_t commit_overrun;
80196 local_t dropped_events;
80197 local_t committing;
80198 local_t commits;
80199@@ -861,8 +861,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
80200 *
80201 * We add a counter to the write field to denote this.
80202 */
80203- old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
80204- old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
80205+ old_write = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->write);
80206+ old_entries = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->entries);
80207
80208 /*
80209 * Just make sure we have seen our old_write and synchronize
80210@@ -890,8 +890,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
80211 * cmpxchg to only update if an interrupt did not already
80212 * do it for us. If the cmpxchg fails, we don't care.
80213 */
80214- (void)local_cmpxchg(&next_page->write, old_write, val);
80215- (void)local_cmpxchg(&next_page->entries, old_entries, eval);
80216+ (void)local_cmpxchg_unchecked(&next_page->write, old_write, val);
80217+ (void)local_cmpxchg_unchecked(&next_page->entries, old_entries, eval);
80218
80219 /*
80220 * No need to worry about races with clearing out the commit.
80221@@ -1250,12 +1250,12 @@ static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
80222
80223 static inline unsigned long rb_page_entries(struct buffer_page *bpage)
80224 {
80225- return local_read(&bpage->entries) & RB_WRITE_MASK;
80226+ return local_read_unchecked(&bpage->entries) & RB_WRITE_MASK;
80227 }
80228
80229 static inline unsigned long rb_page_write(struct buffer_page *bpage)
80230 {
80231- return local_read(&bpage->write) & RB_WRITE_MASK;
80232+ return local_read_unchecked(&bpage->write) & RB_WRITE_MASK;
80233 }
80234
80235 static int
80236@@ -1350,7 +1350,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
80237 * bytes consumed in ring buffer from here.
80238 * Increment overrun to account for the lost events.
80239 */
80240- local_add(page_entries, &cpu_buffer->overrun);
80241+ local_add_unchecked(page_entries, &cpu_buffer->overrun);
80242 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
80243 }
80244
80245@@ -1906,7 +1906,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
80246 * it is our responsibility to update
80247 * the counters.
80248 */
80249- local_add(entries, &cpu_buffer->overrun);
80250+ local_add_unchecked(entries, &cpu_buffer->overrun);
80251 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
80252
80253 /*
80254@@ -2056,7 +2056,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
80255 if (tail == BUF_PAGE_SIZE)
80256 tail_page->real_end = 0;
80257
80258- local_sub(length, &tail_page->write);
80259+ local_sub_unchecked(length, &tail_page->write);
80260 return;
80261 }
80262
80263@@ -2091,7 +2091,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
80264 rb_event_set_padding(event);
80265
80266 /* Set the write back to the previous setting */
80267- local_sub(length, &tail_page->write);
80268+ local_sub_unchecked(length, &tail_page->write);
80269 return;
80270 }
80271
80272@@ -2103,7 +2103,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
80273
80274 /* Set write to end of buffer */
80275 length = (tail + length) - BUF_PAGE_SIZE;
80276- local_sub(length, &tail_page->write);
80277+ local_sub_unchecked(length, &tail_page->write);
80278 }
80279
80280 /*
80281@@ -2129,7 +2129,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
80282 * about it.
80283 */
80284 if (unlikely(next_page == commit_page)) {
80285- local_inc(&cpu_buffer->commit_overrun);
80286+ local_inc_unchecked(&cpu_buffer->commit_overrun);
80287 goto out_reset;
80288 }
80289
80290@@ -2185,7 +2185,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
80291 cpu_buffer->tail_page) &&
80292 (cpu_buffer->commit_page ==
80293 cpu_buffer->reader_page))) {
80294- local_inc(&cpu_buffer->commit_overrun);
80295+ local_inc_unchecked(&cpu_buffer->commit_overrun);
80296 goto out_reset;
80297 }
80298 }
80299@@ -2233,7 +2233,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
80300 length += RB_LEN_TIME_EXTEND;
80301
80302 tail_page = cpu_buffer->tail_page;
80303- write = local_add_return(length, &tail_page->write);
80304+ write = local_add_return_unchecked(length, &tail_page->write);
80305
80306 /* set write to only the index of the write */
80307 write &= RB_WRITE_MASK;
80308@@ -2250,7 +2250,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
80309 kmemcheck_annotate_bitfield(event, bitfield);
80310 rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
80311
80312- local_inc(&tail_page->entries);
80313+ local_inc_unchecked(&tail_page->entries);
80314
80315 /*
80316 * If this is the first commit on the page, then update
80317@@ -2283,7 +2283,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
80318
80319 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
80320 unsigned long write_mask =
80321- local_read(&bpage->write) & ~RB_WRITE_MASK;
80322+ local_read_unchecked(&bpage->write) & ~RB_WRITE_MASK;
80323 unsigned long event_length = rb_event_length(event);
80324 /*
80325 * This is on the tail page. It is possible that
80326@@ -2293,7 +2293,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
80327 */
80328 old_index += write_mask;
80329 new_index += write_mask;
80330- index = local_cmpxchg(&bpage->write, old_index, new_index);
80331+ index = local_cmpxchg_unchecked(&bpage->write, old_index, new_index);
80332 if (index == old_index) {
80333 /* update counters */
80334 local_sub(event_length, &cpu_buffer->entries_bytes);
80335@@ -2632,7 +2632,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
80336
80337 /* Do the likely case first */
80338 if (likely(bpage->page == (void *)addr)) {
80339- local_dec(&bpage->entries);
80340+ local_dec_unchecked(&bpage->entries);
80341 return;
80342 }
80343
80344@@ -2644,7 +2644,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
80345 start = bpage;
80346 do {
80347 if (bpage->page == (void *)addr) {
80348- local_dec(&bpage->entries);
80349+ local_dec_unchecked(&bpage->entries);
80350 return;
80351 }
80352 rb_inc_page(cpu_buffer, &bpage);
80353@@ -2926,7 +2926,7 @@ static inline unsigned long
80354 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
80355 {
80356 return local_read(&cpu_buffer->entries) -
80357- (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
80358+ (local_read_unchecked(&cpu_buffer->overrun) + cpu_buffer->read);
80359 }
80360
80361 /**
80362@@ -3015,7 +3015,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
80363 return 0;
80364
80365 cpu_buffer = buffer->buffers[cpu];
80366- ret = local_read(&cpu_buffer->overrun);
80367+ ret = local_read_unchecked(&cpu_buffer->overrun);
80368
80369 return ret;
80370 }
80371@@ -3038,7 +3038,7 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
80372 return 0;
80373
80374 cpu_buffer = buffer->buffers[cpu];
80375- ret = local_read(&cpu_buffer->commit_overrun);
80376+ ret = local_read_unchecked(&cpu_buffer->commit_overrun);
80377
80378 return ret;
80379 }
80380@@ -3105,7 +3105,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
80381 /* if you care about this being correct, lock the buffer */
80382 for_each_buffer_cpu(buffer, cpu) {
80383 cpu_buffer = buffer->buffers[cpu];
80384- overruns += local_read(&cpu_buffer->overrun);
80385+ overruns += local_read_unchecked(&cpu_buffer->overrun);
80386 }
80387
80388 return overruns;
80389@@ -3281,8 +3281,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
80390 /*
80391 * Reset the reader page to size zero.
80392 */
80393- local_set(&cpu_buffer->reader_page->write, 0);
80394- local_set(&cpu_buffer->reader_page->entries, 0);
80395+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
80396+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
80397 local_set(&cpu_buffer->reader_page->page->commit, 0);
80398 cpu_buffer->reader_page->real_end = 0;
80399
80400@@ -3316,7 +3316,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
80401 * want to compare with the last_overrun.
80402 */
80403 smp_mb();
80404- overwrite = local_read(&(cpu_buffer->overrun));
80405+ overwrite = local_read_unchecked(&(cpu_buffer->overrun));
80406
80407 /*
80408 * Here's the tricky part.
80409@@ -3886,8 +3886,8 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
80410
80411 cpu_buffer->head_page
80412 = list_entry(cpu_buffer->pages, struct buffer_page, list);
80413- local_set(&cpu_buffer->head_page->write, 0);
80414- local_set(&cpu_buffer->head_page->entries, 0);
80415+ local_set_unchecked(&cpu_buffer->head_page->write, 0);
80416+ local_set_unchecked(&cpu_buffer->head_page->entries, 0);
80417 local_set(&cpu_buffer->head_page->page->commit, 0);
80418
80419 cpu_buffer->head_page->read = 0;
80420@@ -3897,14 +3897,14 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
80421
80422 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
80423 INIT_LIST_HEAD(&cpu_buffer->new_pages);
80424- local_set(&cpu_buffer->reader_page->write, 0);
80425- local_set(&cpu_buffer->reader_page->entries, 0);
80426+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
80427+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
80428 local_set(&cpu_buffer->reader_page->page->commit, 0);
80429 cpu_buffer->reader_page->read = 0;
80430
80431 local_set(&cpu_buffer->entries_bytes, 0);
80432- local_set(&cpu_buffer->overrun, 0);
80433- local_set(&cpu_buffer->commit_overrun, 0);
80434+ local_set_unchecked(&cpu_buffer->overrun, 0);
80435+ local_set_unchecked(&cpu_buffer->commit_overrun, 0);
80436 local_set(&cpu_buffer->dropped_events, 0);
80437 local_set(&cpu_buffer->entries, 0);
80438 local_set(&cpu_buffer->committing, 0);
80439@@ -4308,8 +4308,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
80440 rb_init_page(bpage);
80441 bpage = reader->page;
80442 reader->page = *data_page;
80443- local_set(&reader->write, 0);
80444- local_set(&reader->entries, 0);
80445+ local_set_unchecked(&reader->write, 0);
80446+ local_set_unchecked(&reader->entries, 0);
80447 reader->read = 0;
80448 *data_page = bpage;
80449
80450diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
80451index fe1d581..ea543f1b 100644
80452--- a/kernel/trace/trace.c
80453+++ b/kernel/trace/trace.c
80454@@ -2845,7 +2845,7 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
80455 return 0;
80456 }
80457
80458-int set_tracer_flag(unsigned int mask, int enabled)
80459+int set_tracer_flag(unsigned long mask, int enabled)
80460 {
80461 /* do nothing if flag is already set */
80462 if (!!(trace_flags & mask) == !!enabled)
80463@@ -4494,10 +4494,9 @@ static const struct file_operations tracing_dyn_info_fops = {
80464 };
80465 #endif
80466
80467-static struct dentry *d_tracer;
80468-
80469 struct dentry *tracing_init_dentry(void)
80470 {
80471+ static struct dentry *d_tracer;
80472 static int once;
80473
80474 if (d_tracer)
80475@@ -4517,10 +4516,9 @@ struct dentry *tracing_init_dentry(void)
80476 return d_tracer;
80477 }
80478
80479-static struct dentry *d_percpu;
80480-
80481 struct dentry *tracing_dentry_percpu(void)
80482 {
80483+ static struct dentry *d_percpu;
80484 static int once;
80485 struct dentry *d_tracer;
80486
80487diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
80488index 23f1d2c..6ca7a9b 100644
80489--- a/kernel/trace/trace.h
80490+++ b/kernel/trace/trace.h
80491@@ -840,7 +840,7 @@ extern const char *__stop___trace_bprintk_fmt[];
80492 void trace_printk_init_buffers(void);
80493 void trace_printk_start_comm(void);
80494 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
80495-int set_tracer_flag(unsigned int mask, int enabled);
80496+int set_tracer_flag(unsigned long mask, int enabled);
80497
80498 #undef FTRACE_ENTRY
80499 #define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \
80500diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
80501index 880073d..42db7c3 100644
80502--- a/kernel/trace/trace_events.c
80503+++ b/kernel/trace/trace_events.c
80504@@ -1330,10 +1330,6 @@ static LIST_HEAD(ftrace_module_file_list);
80505 struct ftrace_module_file_ops {
80506 struct list_head list;
80507 struct module *mod;
80508- struct file_operations id;
80509- struct file_operations enable;
80510- struct file_operations format;
80511- struct file_operations filter;
80512 };
80513
80514 static struct ftrace_module_file_ops *
80515@@ -1354,17 +1350,12 @@ trace_create_file_ops(struct module *mod)
80516
80517 file_ops->mod = mod;
80518
80519- file_ops->id = ftrace_event_id_fops;
80520- file_ops->id.owner = mod;
80521-
80522- file_ops->enable = ftrace_enable_fops;
80523- file_ops->enable.owner = mod;
80524-
80525- file_ops->filter = ftrace_event_filter_fops;
80526- file_ops->filter.owner = mod;
80527-
80528- file_ops->format = ftrace_event_format_fops;
80529- file_ops->format.owner = mod;
80530+ pax_open_kernel();
80531+ mod->trace_id.owner = mod;
80532+ mod->trace_enable.owner = mod;
80533+ mod->trace_filter.owner = mod;
80534+ mod->trace_format.owner = mod;
80535+ pax_close_kernel();
80536
80537 list_add(&file_ops->list, &ftrace_module_file_list);
80538
80539@@ -1388,8 +1379,8 @@ static void trace_module_add_events(struct module *mod)
80540
80541 for_each_event(call, start, end) {
80542 __trace_add_event_call(*call, mod,
80543- &file_ops->id, &file_ops->enable,
80544- &file_ops->filter, &file_ops->format);
80545+ &mod->trace_id, &mod->trace_enable,
80546+ &mod->trace_filter, &mod->trace_format);
80547 }
80548 }
80549
80550diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
80551index fd3c8aa..5f324a6 100644
80552--- a/kernel/trace/trace_mmiotrace.c
80553+++ b/kernel/trace/trace_mmiotrace.c
80554@@ -24,7 +24,7 @@ struct header_iter {
80555 static struct trace_array *mmio_trace_array;
80556 static bool overrun_detected;
80557 static unsigned long prev_overruns;
80558-static atomic_t dropped_count;
80559+static atomic_unchecked_t dropped_count;
80560
80561 static void mmio_reset_data(struct trace_array *tr)
80562 {
80563@@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
80564
80565 static unsigned long count_overruns(struct trace_iterator *iter)
80566 {
80567- unsigned long cnt = atomic_xchg(&dropped_count, 0);
80568+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
80569 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
80570
80571 if (over > prev_overruns)
80572@@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
80573 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
80574 sizeof(*entry), 0, pc);
80575 if (!event) {
80576- atomic_inc(&dropped_count);
80577+ atomic_inc_unchecked(&dropped_count);
80578 return;
80579 }
80580 entry = ring_buffer_event_data(event);
80581@@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
80582 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
80583 sizeof(*entry), 0, pc);
80584 if (!event) {
80585- atomic_inc(&dropped_count);
80586+ atomic_inc_unchecked(&dropped_count);
80587 return;
80588 }
80589 entry = ring_buffer_event_data(event);
80590diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
80591index 194d796..76edb8f 100644
80592--- a/kernel/trace/trace_output.c
80593+++ b/kernel/trace/trace_output.c
80594@@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s, const struct path *path)
80595
80596 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
80597 if (!IS_ERR(p)) {
80598- p = mangle_path(s->buffer + s->len, p, "\n");
80599+ p = mangle_path(s->buffer + s->len, p, "\n\\");
80600 if (p) {
80601 s->len = p - s->buffer;
80602 return 1;
80603@@ -852,14 +852,16 @@ int register_ftrace_event(struct trace_event *event)
80604 goto out;
80605 }
80606
80607+ pax_open_kernel();
80608 if (event->funcs->trace == NULL)
80609- event->funcs->trace = trace_nop_print;
80610+ *(void **)&event->funcs->trace = trace_nop_print;
80611 if (event->funcs->raw == NULL)
80612- event->funcs->raw = trace_nop_print;
80613+ *(void **)&event->funcs->raw = trace_nop_print;
80614 if (event->funcs->hex == NULL)
80615- event->funcs->hex = trace_nop_print;
80616+ *(void **)&event->funcs->hex = trace_nop_print;
80617 if (event->funcs->binary == NULL)
80618- event->funcs->binary = trace_nop_print;
80619+ *(void **)&event->funcs->binary = trace_nop_print;
80620+ pax_close_kernel();
80621
80622 key = event->type & (EVENT_HASHSIZE - 1);
80623
80624diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
80625index 83a8b5b..0bf39a9 100644
80626--- a/kernel/trace/trace_stack.c
80627+++ b/kernel/trace/trace_stack.c
80628@@ -52,7 +52,7 @@ static inline void check_stack(void)
80629 return;
80630
80631 /* we do not handle interrupt stacks yet */
80632- if (!object_is_on_stack(&this_size))
80633+ if (!object_starts_on_stack(&this_size))
80634 return;
80635
80636 local_irq_save(flags);
80637diff --git a/kernel/user.c b/kernel/user.c
80638index 7f6ff2b..1ac8f18 100644
80639--- a/kernel/user.c
80640+++ b/kernel/user.c
80641@@ -47,9 +47,7 @@ struct user_namespace init_user_ns = {
80642 .count = 4294967295U,
80643 },
80644 },
80645- .kref = {
80646- .refcount = ATOMIC_INIT(3),
80647- },
80648+ .count = ATOMIC_INIT(3),
80649 .owner = GLOBAL_ROOT_UID,
80650 .group = GLOBAL_ROOT_GID,
80651 .proc_inum = PROC_USER_INIT_INO,
80652diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
80653index f359dc7..ddc606a 100644
80654--- a/kernel/user_namespace.c
80655+++ b/kernel/user_namespace.c
80656@@ -89,7 +89,7 @@ int create_user_ns(struct cred *new)
80657 return ret;
80658 }
80659
80660- kref_init(&ns->kref);
80661+ atomic_set(&ns->count, 1);
80662 /* Leave the new->user_ns reference with the new user namespace. */
80663 ns->parent = parent_ns;
80664 ns->owner = owner;
80665@@ -117,15 +117,16 @@ int unshare_userns(unsigned long unshare_flags, struct cred **new_cred)
80666 return create_user_ns(cred);
80667 }
80668
80669-void free_user_ns(struct kref *kref)
80670+void free_user_ns(struct user_namespace *ns)
80671 {
80672- struct user_namespace *parent, *ns =
80673- container_of(kref, struct user_namespace, kref);
80674+ struct user_namespace *parent;
80675
80676- parent = ns->parent;
80677- proc_free_inum(ns->proc_inum);
80678- kmem_cache_free(user_ns_cachep, ns);
80679- put_user_ns(parent);
80680+ do {
80681+ parent = ns->parent;
80682+ proc_free_inum(ns->proc_inum);
80683+ kmem_cache_free(user_ns_cachep, ns);
80684+ ns = parent;
80685+ } while (atomic_dec_and_test(&parent->count));
80686 }
80687 EXPORT_SYMBOL(free_user_ns);
80688
80689@@ -819,7 +820,7 @@ static int userns_install(struct nsproxy *nsproxy, void *ns)
80690 if (atomic_read(&current->mm->mm_users) > 1)
80691 return -EINVAL;
80692
80693- if (current->fs->users != 1)
80694+ if (atomic_read(&current->fs->users) != 1)
80695 return -EINVAL;
80696
80697 if (!ns_capable(user_ns, CAP_SYS_ADMIN))
80698diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c
80699index 63da38c..639904e 100644
80700--- a/kernel/utsname_sysctl.c
80701+++ b/kernel/utsname_sysctl.c
80702@@ -46,7 +46,7 @@ static void put_uts(ctl_table *table, int write, void *which)
80703 static int proc_do_uts_string(ctl_table *table, int write,
80704 void __user *buffer, size_t *lenp, loff_t *ppos)
80705 {
80706- struct ctl_table uts_table;
80707+ ctl_table_no_const uts_table;
80708 int r;
80709 memcpy(&uts_table, table, sizeof(uts_table));
80710 uts_table.data = get_uts(table, write);
80711diff --git a/kernel/watchdog.c b/kernel/watchdog.c
80712index 75a2ab3..5961da7 100644
80713--- a/kernel/watchdog.c
80714+++ b/kernel/watchdog.c
80715@@ -527,7 +527,7 @@ int proc_dowatchdog(struct ctl_table *table, int write,
80716 }
80717 #endif /* CONFIG_SYSCTL */
80718
80719-static struct smp_hotplug_thread watchdog_threads = {
80720+static struct smp_hotplug_thread watchdog_threads __read_only = {
80721 .store = &softlockup_watchdog,
80722 .thread_should_run = watchdog_should_run,
80723 .thread_fn = watchdog,
80724diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
80725index 67604e5..fe94fb1 100644
80726--- a/lib/Kconfig.debug
80727+++ b/lib/Kconfig.debug
80728@@ -550,7 +550,7 @@ config DEBUG_MUTEXES
80729
80730 config DEBUG_LOCK_ALLOC
80731 bool "Lock debugging: detect incorrect freeing of live locks"
80732- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
80733+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
80734 select DEBUG_SPINLOCK
80735 select DEBUG_MUTEXES
80736 select LOCKDEP
80737@@ -564,7 +564,7 @@ config DEBUG_LOCK_ALLOC
80738
80739 config PROVE_LOCKING
80740 bool "Lock debugging: prove locking correctness"
80741- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
80742+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
80743 select LOCKDEP
80744 select DEBUG_SPINLOCK
80745 select DEBUG_MUTEXES
80746@@ -670,7 +670,7 @@ config LOCKDEP
80747
80748 config LOCK_STAT
80749 bool "Lock usage statistics"
80750- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
80751+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
80752 select LOCKDEP
80753 select DEBUG_SPINLOCK
80754 select DEBUG_MUTEXES
80755@@ -1278,6 +1278,7 @@ config LATENCYTOP
80756 depends on DEBUG_KERNEL
80757 depends on STACKTRACE_SUPPORT
80758 depends on PROC_FS
80759+ depends on !GRKERNSEC_HIDESYM
80760 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND
80761 select KALLSYMS
80762 select KALLSYMS_ALL
80763@@ -1306,7 +1307,7 @@ config INTERVAL_TREE_TEST
80764
80765 config PROVIDE_OHCI1394_DMA_INIT
80766 bool "Remote debugging over FireWire early on boot"
80767- depends on PCI && X86
80768+ depends on PCI && X86 && !GRKERNSEC
80769 help
80770 If you want to debug problems which hang or crash the kernel early
80771 on boot and the crashing machine has a FireWire port, you can use
80772@@ -1335,7 +1336,7 @@ config PROVIDE_OHCI1394_DMA_INIT
80773
80774 config FIREWIRE_OHCI_REMOTE_DMA
80775 bool "Remote debugging over FireWire with firewire-ohci"
80776- depends on FIREWIRE_OHCI
80777+ depends on FIREWIRE_OHCI && !GRKERNSEC
80778 help
80779 This option lets you use the FireWire bus for remote debugging
80780 with help of the firewire-ohci driver. It enables unfiltered
80781diff --git a/lib/Makefile b/lib/Makefile
80782index 02ed6c0..bd243da 100644
80783--- a/lib/Makefile
80784+++ b/lib/Makefile
80785@@ -47,7 +47,7 @@ obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
80786
80787 obj-$(CONFIG_BTREE) += btree.o
80788 obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
80789-obj-$(CONFIG_DEBUG_LIST) += list_debug.o
80790+obj-y += list_debug.o
80791 obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
80792
80793 ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
80794diff --git a/lib/bitmap.c b/lib/bitmap.c
80795index 06f7e4f..f3cf2b0 100644
80796--- a/lib/bitmap.c
80797+++ b/lib/bitmap.c
80798@@ -422,7 +422,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
80799 {
80800 int c, old_c, totaldigits, ndigits, nchunks, nbits;
80801 u32 chunk;
80802- const char __user __force *ubuf = (const char __user __force *)buf;
80803+ const char __user *ubuf = (const char __force_user *)buf;
80804
80805 bitmap_zero(maskp, nmaskbits);
80806
80807@@ -507,7 +507,7 @@ int bitmap_parse_user(const char __user *ubuf,
80808 {
80809 if (!access_ok(VERIFY_READ, ubuf, ulen))
80810 return -EFAULT;
80811- return __bitmap_parse((const char __force *)ubuf,
80812+ return __bitmap_parse((const char __force_kernel *)ubuf,
80813 ulen, 1, maskp, nmaskbits);
80814
80815 }
80816@@ -598,7 +598,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
80817 {
80818 unsigned a, b;
80819 int c, old_c, totaldigits;
80820- const char __user __force *ubuf = (const char __user __force *)buf;
80821+ const char __user *ubuf = (const char __force_user *)buf;
80822 int exp_digit, in_range;
80823
80824 totaldigits = c = 0;
80825@@ -698,7 +698,7 @@ int bitmap_parselist_user(const char __user *ubuf,
80826 {
80827 if (!access_ok(VERIFY_READ, ubuf, ulen))
80828 return -EFAULT;
80829- return __bitmap_parselist((const char __force *)ubuf,
80830+ return __bitmap_parselist((const char __force_kernel *)ubuf,
80831 ulen, 1, maskp, nmaskbits);
80832 }
80833 EXPORT_SYMBOL(bitmap_parselist_user);
80834diff --git a/lib/bug.c b/lib/bug.c
80835index d0cdf14..4d07bd2 100644
80836--- a/lib/bug.c
80837+++ b/lib/bug.c
80838@@ -134,6 +134,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
80839 return BUG_TRAP_TYPE_NONE;
80840
80841 bug = find_bug(bugaddr);
80842+ if (!bug)
80843+ return BUG_TRAP_TYPE_NONE;
80844
80845 file = NULL;
80846 line = 0;
80847diff --git a/lib/debugobjects.c b/lib/debugobjects.c
80848index d11808c..dc2d6f8 100644
80849--- a/lib/debugobjects.c
80850+++ b/lib/debugobjects.c
80851@@ -287,7 +287,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
80852 if (limit > 4)
80853 return;
80854
80855- is_on_stack = object_is_on_stack(addr);
80856+ is_on_stack = object_starts_on_stack(addr);
80857 if (is_on_stack == onstack)
80858 return;
80859
80860diff --git a/lib/devres.c b/lib/devres.c
80861index 80b9c76..9e32279 100644
80862--- a/lib/devres.c
80863+++ b/lib/devres.c
80864@@ -80,7 +80,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
80865 void devm_iounmap(struct device *dev, void __iomem *addr)
80866 {
80867 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
80868- (void *)addr));
80869+ (void __force *)addr));
80870 iounmap(addr);
80871 }
80872 EXPORT_SYMBOL(devm_iounmap);
80873@@ -192,7 +192,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
80874 {
80875 ioport_unmap(addr);
80876 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
80877- devm_ioport_map_match, (void *)addr));
80878+ devm_ioport_map_match, (void __force *)addr));
80879 }
80880 EXPORT_SYMBOL(devm_ioport_unmap);
80881
80882diff --git a/lib/div64.c b/lib/div64.c
80883index a163b6c..9618fa5 100644
80884--- a/lib/div64.c
80885+++ b/lib/div64.c
80886@@ -59,7 +59,7 @@ uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
80887 EXPORT_SYMBOL(__div64_32);
80888
80889 #ifndef div_s64_rem
80890-s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
80891+s64 __intentional_overflow(-1) div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
80892 {
80893 u64 quotient;
80894
80895@@ -90,7 +90,7 @@ EXPORT_SYMBOL(div_s64_rem);
80896 * 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c.txt'
80897 */
80898 #ifndef div64_u64
80899-u64 div64_u64(u64 dividend, u64 divisor)
80900+u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
80901 {
80902 u32 high = divisor >> 32;
80903 u64 quot;
80904diff --git a/lib/dma-debug.c b/lib/dma-debug.c
80905index 5e396ac..58d5de1 100644
80906--- a/lib/dma-debug.c
80907+++ b/lib/dma-debug.c
80908@@ -768,7 +768,7 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti
80909
80910 void dma_debug_add_bus(struct bus_type *bus)
80911 {
80912- struct notifier_block *nb;
80913+ notifier_block_no_const *nb;
80914
80915 if (global_disable)
80916 return;
80917@@ -942,7 +942,7 @@ out:
80918
80919 static void check_for_stack(struct device *dev, void *addr)
80920 {
80921- if (object_is_on_stack(addr))
80922+ if (object_starts_on_stack(addr))
80923 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
80924 "stack [addr=%p]\n", addr);
80925 }
80926diff --git a/lib/inflate.c b/lib/inflate.c
80927index 013a761..c28f3fc 100644
80928--- a/lib/inflate.c
80929+++ b/lib/inflate.c
80930@@ -269,7 +269,7 @@ static void free(void *where)
80931 malloc_ptr = free_mem_ptr;
80932 }
80933 #else
80934-#define malloc(a) kmalloc(a, GFP_KERNEL)
80935+#define malloc(a) kmalloc((a), GFP_KERNEL)
80936 #define free(a) kfree(a)
80937 #endif
80938
80939diff --git a/lib/ioremap.c b/lib/ioremap.c
80940index 0c9216c..863bd89 100644
80941--- a/lib/ioremap.c
80942+++ b/lib/ioremap.c
80943@@ -38,7 +38,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
80944 unsigned long next;
80945
80946 phys_addr -= addr;
80947- pmd = pmd_alloc(&init_mm, pud, addr);
80948+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
80949 if (!pmd)
80950 return -ENOMEM;
80951 do {
80952@@ -56,7 +56,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
80953 unsigned long next;
80954
80955 phys_addr -= addr;
80956- pud = pud_alloc(&init_mm, pgd, addr);
80957+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
80958 if (!pud)
80959 return -ENOMEM;
80960 do {
80961diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
80962index bd2bea9..6b3c95e 100644
80963--- a/lib/is_single_threaded.c
80964+++ b/lib/is_single_threaded.c
80965@@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
80966 struct task_struct *p, *t;
80967 bool ret;
80968
80969+ if (!mm)
80970+ return true;
80971+
80972 if (atomic_read(&task->signal->live) != 1)
80973 return false;
80974
80975diff --git a/lib/kobject.c b/lib/kobject.c
80976index a654866..a4fd13d 100644
80977--- a/lib/kobject.c
80978+++ b/lib/kobject.c
80979@@ -859,9 +859,9 @@ EXPORT_SYMBOL_GPL(kset_create_and_add);
80980
80981
80982 static DEFINE_SPINLOCK(kobj_ns_type_lock);
80983-static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES];
80984+static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES] __read_only;
80985
80986-int kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
80987+int __init kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
80988 {
80989 enum kobj_ns_type type = ops->type;
80990 int error;
80991diff --git a/lib/list_debug.c b/lib/list_debug.c
80992index c24c2f7..06e070b 100644
80993--- a/lib/list_debug.c
80994+++ b/lib/list_debug.c
80995@@ -11,7 +11,9 @@
80996 #include <linux/bug.h>
80997 #include <linux/kernel.h>
80998 #include <linux/rculist.h>
80999+#include <linux/mm.h>
81000
81001+#ifdef CONFIG_DEBUG_LIST
81002 /*
81003 * Insert a new entry between two known consecutive entries.
81004 *
81005@@ -19,21 +21,32 @@
81006 * the prev/next entries already!
81007 */
81008
81009-void __list_add(struct list_head *new,
81010- struct list_head *prev,
81011- struct list_head *next)
81012+static bool __list_add_debug(struct list_head *new,
81013+ struct list_head *prev,
81014+ struct list_head *next)
81015 {
81016- WARN(next->prev != prev,
81017+ if (WARN(next->prev != prev,
81018 "list_add corruption. next->prev should be "
81019 "prev (%p), but was %p. (next=%p).\n",
81020- prev, next->prev, next);
81021- WARN(prev->next != next,
81022+ prev, next->prev, next) ||
81023+ WARN(prev->next != next,
81024 "list_add corruption. prev->next should be "
81025 "next (%p), but was %p. (prev=%p).\n",
81026- next, prev->next, prev);
81027- WARN(new == prev || new == next,
81028- "list_add double add: new=%p, prev=%p, next=%p.\n",
81029- new, prev, next);
81030+ next, prev->next, prev) ||
81031+ WARN(new == prev || new == next,
81032+ "list_add double add: new=%p, prev=%p, next=%p.\n",
81033+ new, prev, next))
81034+ return false;
81035+ return true;
81036+}
81037+
81038+void __list_add(struct list_head *new,
81039+ struct list_head *prev,
81040+ struct list_head *next)
81041+{
81042+ if (!__list_add_debug(new, prev, next))
81043+ return;
81044+
81045 next->prev = new;
81046 new->next = next;
81047 new->prev = prev;
81048@@ -41,7 +54,7 @@ void __list_add(struct list_head *new,
81049 }
81050 EXPORT_SYMBOL(__list_add);
81051
81052-void __list_del_entry(struct list_head *entry)
81053+static bool __list_del_entry_debug(struct list_head *entry)
81054 {
81055 struct list_head *prev, *next;
81056
81057@@ -60,9 +73,16 @@ void __list_del_entry(struct list_head *entry)
81058 WARN(next->prev != entry,
81059 "list_del corruption. next->prev should be %p, "
81060 "but was %p\n", entry, next->prev))
81061+ return false;
81062+ return true;
81063+}
81064+
81065+void __list_del_entry(struct list_head *entry)
81066+{
81067+ if (!__list_del_entry_debug(entry))
81068 return;
81069
81070- __list_del(prev, next);
81071+ __list_del(entry->prev, entry->next);
81072 }
81073 EXPORT_SYMBOL(__list_del_entry);
81074
81075@@ -86,15 +106,85 @@ EXPORT_SYMBOL(list_del);
81076 void __list_add_rcu(struct list_head *new,
81077 struct list_head *prev, struct list_head *next)
81078 {
81079- WARN(next->prev != prev,
81080- "list_add_rcu corruption. next->prev should be prev (%p), but was %p. (next=%p).\n",
81081- prev, next->prev, next);
81082- WARN(prev->next != next,
81083- "list_add_rcu corruption. prev->next should be next (%p), but was %p. (prev=%p).\n",
81084- next, prev->next, prev);
81085+ if (!__list_add_debug(new, prev, next))
81086+ return;
81087+
81088 new->next = next;
81089 new->prev = prev;
81090 rcu_assign_pointer(list_next_rcu(prev), new);
81091 next->prev = new;
81092 }
81093 EXPORT_SYMBOL(__list_add_rcu);
81094+#endif
81095+
81096+void __pax_list_add(struct list_head *new, struct list_head *prev, struct list_head *next)
81097+{
81098+#ifdef CONFIG_DEBUG_LIST
81099+ if (!__list_add_debug(new, prev, next))
81100+ return;
81101+#endif
81102+
81103+ pax_open_kernel();
81104+ next->prev = new;
81105+ new->next = next;
81106+ new->prev = prev;
81107+ prev->next = new;
81108+ pax_close_kernel();
81109+}
81110+EXPORT_SYMBOL(__pax_list_add);
81111+
81112+void pax_list_del(struct list_head *entry)
81113+{
81114+#ifdef CONFIG_DEBUG_LIST
81115+ if (!__list_del_entry_debug(entry))
81116+ return;
81117+#endif
81118+
81119+ pax_open_kernel();
81120+ __list_del(entry->prev, entry->next);
81121+ entry->next = LIST_POISON1;
81122+ entry->prev = LIST_POISON2;
81123+ pax_close_kernel();
81124+}
81125+EXPORT_SYMBOL(pax_list_del);
81126+
81127+void pax_list_del_init(struct list_head *entry)
81128+{
81129+ pax_open_kernel();
81130+ __list_del(entry->prev, entry->next);
81131+ INIT_LIST_HEAD(entry);
81132+ pax_close_kernel();
81133+}
81134+EXPORT_SYMBOL(pax_list_del_init);
81135+
81136+void __pax_list_add_rcu(struct list_head *new,
81137+ struct list_head *prev, struct list_head *next)
81138+{
81139+#ifdef CONFIG_DEBUG_LIST
81140+ if (!__list_add_debug(new, prev, next))
81141+ return;
81142+#endif
81143+
81144+ pax_open_kernel();
81145+ new->next = next;
81146+ new->prev = prev;
81147+ rcu_assign_pointer(list_next_rcu(prev), new);
81148+ next->prev = new;
81149+ pax_close_kernel();
81150+}
81151+EXPORT_SYMBOL(__pax_list_add_rcu);
81152+
81153+void pax_list_del_rcu(struct list_head *entry)
81154+{
81155+#ifdef CONFIG_DEBUG_LIST
81156+ if (!__list_del_entry_debug(entry))
81157+ return;
81158+#endif
81159+
81160+ pax_open_kernel();
81161+ __list_del(entry->prev, entry->next);
81162+ entry->next = LIST_POISON1;
81163+ entry->prev = LIST_POISON2;
81164+ pax_close_kernel();
81165+}
81166+EXPORT_SYMBOL(pax_list_del_rcu);
81167diff --git a/lib/radix-tree.c b/lib/radix-tree.c
81168index e796429..6e38f9f 100644
81169--- a/lib/radix-tree.c
81170+++ b/lib/radix-tree.c
81171@@ -92,7 +92,7 @@ struct radix_tree_preload {
81172 int nr;
81173 struct radix_tree_node *nodes[RADIX_TREE_PRELOAD_SIZE];
81174 };
81175-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
81176+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
81177
81178 static inline void *ptr_to_indirect(void *ptr)
81179 {
81180diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
81181index bb2b201..46abaf9 100644
81182--- a/lib/strncpy_from_user.c
81183+++ b/lib/strncpy_from_user.c
81184@@ -21,7 +21,7 @@
81185 */
81186 static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max)
81187 {
81188- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
81189+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
81190 long res = 0;
81191
81192 /*
81193diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
81194index a28df52..3d55877 100644
81195--- a/lib/strnlen_user.c
81196+++ b/lib/strnlen_user.c
81197@@ -26,7 +26,7 @@
81198 */
81199 static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max)
81200 {
81201- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
81202+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
81203 long align, res = 0;
81204 unsigned long c;
81205
81206diff --git a/lib/swiotlb.c b/lib/swiotlb.c
81207index 196b069..358f342 100644
81208--- a/lib/swiotlb.c
81209+++ b/lib/swiotlb.c
81210@@ -642,7 +642,7 @@ EXPORT_SYMBOL(swiotlb_alloc_coherent);
81211
81212 void
81213 swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
81214- dma_addr_t dev_addr)
81215+ dma_addr_t dev_addr, struct dma_attrs *attrs)
81216 {
81217 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
81218
81219diff --git a/lib/vsprintf.c b/lib/vsprintf.c
81220index fab33a9..3b5fe68 100644
81221--- a/lib/vsprintf.c
81222+++ b/lib/vsprintf.c
81223@@ -16,6 +16,9 @@
81224 * - scnprintf and vscnprintf
81225 */
81226
81227+#ifdef CONFIG_GRKERNSEC_HIDESYM
81228+#define __INCLUDED_BY_HIDESYM 1
81229+#endif
81230 #include <stdarg.h>
81231 #include <linux/module.h> /* for KSYM_SYMBOL_LEN */
81232 #include <linux/types.h>
81233@@ -541,7 +544,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
81234 char sym[KSYM_SYMBOL_LEN];
81235 if (ext == 'B')
81236 sprint_backtrace(sym, value);
81237- else if (ext != 'f' && ext != 's')
81238+ else if (ext != 'f' && ext != 's' && ext != 'a')
81239 sprint_symbol(sym, value);
81240 else
81241 sprint_symbol_no_offset(sym, value);
81242@@ -974,7 +977,11 @@ char *netdev_feature_string(char *buf, char *end, const u8 *addr,
81243 return number(buf, end, *(const netdev_features_t *)addr, spec);
81244 }
81245
81246+#ifdef CONFIG_GRKERNSEC_HIDESYM
81247+int kptr_restrict __read_mostly = 2;
81248+#else
81249 int kptr_restrict __read_mostly;
81250+#endif
81251
81252 /*
81253 * Show a '%p' thing. A kernel extension is that the '%p' is followed
81254@@ -988,6 +995,8 @@ int kptr_restrict __read_mostly;
81255 * - 'S' For symbolic direct pointers with offset
81256 * - 's' For symbolic direct pointers without offset
81257 * - 'B' For backtraced symbolic direct pointers with offset
81258+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
81259+ * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
81260 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
81261 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
81262 * - 'M' For a 6-byte MAC address, it prints the address in the
81263@@ -1043,12 +1052,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
81264
81265 if (!ptr && *fmt != 'K') {
81266 /*
81267- * Print (null) with the same width as a pointer so it makes
81268+ * Print (nil) with the same width as a pointer so it makes
81269 * tabular output look nice.
81270 */
81271 if (spec.field_width == -1)
81272 spec.field_width = default_width;
81273- return string(buf, end, "(null)", spec);
81274+ return string(buf, end, "(nil)", spec);
81275 }
81276
81277 switch (*fmt) {
81278@@ -1058,6 +1067,13 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
81279 /* Fallthrough */
81280 case 'S':
81281 case 's':
81282+#ifdef CONFIG_GRKERNSEC_HIDESYM
81283+ break;
81284+#else
81285+ return symbol_string(buf, end, ptr, spec, *fmt);
81286+#endif
81287+ case 'A':
81288+ case 'a':
81289 case 'B':
81290 return symbol_string(buf, end, ptr, spec, *fmt);
81291 case 'R':
81292@@ -1098,6 +1114,8 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
81293 va_end(va);
81294 return buf;
81295 }
81296+ case 'P':
81297+ break;
81298 case 'K':
81299 /*
81300 * %pK cannot be used in IRQ context because its test
81301@@ -1121,6 +1139,21 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
81302 }
81303 break;
81304 }
81305+
81306+#ifdef CONFIG_GRKERNSEC_HIDESYM
81307+ /* 'P' = approved pointers to copy to userland,
81308+ as in the /proc/kallsyms case, as we make it display nothing
81309+ for non-root users, and the real contents for root users
81310+ Also ignore 'K' pointers, since we force their NULLing for non-root users
81311+ above
81312+ */
81313+ if ((unsigned long)ptr > TASK_SIZE && *fmt != 'P' && *fmt != 'K' && is_usercopy_object(buf)) {
81314+ printk(KERN_ALERT "grsec: kernel infoleak detected! Please report this log to spender@grsecurity.net.\n");
81315+ dump_stack();
81316+ ptr = NULL;
81317+ }
81318+#endif
81319+
81320 spec.flags |= SMALL;
81321 if (spec.field_width == -1) {
81322 spec.field_width = default_width;
81323@@ -1842,11 +1875,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
81324 typeof(type) value; \
81325 if (sizeof(type) == 8) { \
81326 args = PTR_ALIGN(args, sizeof(u32)); \
81327- *(u32 *)&value = *(u32 *)args; \
81328- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
81329+ *(u32 *)&value = *(const u32 *)args; \
81330+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
81331 } else { \
81332 args = PTR_ALIGN(args, sizeof(type)); \
81333- value = *(typeof(type) *)args; \
81334+ value = *(const typeof(type) *)args; \
81335 } \
81336 args += sizeof(type); \
81337 value; \
81338@@ -1909,7 +1942,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
81339 case FORMAT_TYPE_STR: {
81340 const char *str_arg = args;
81341 args += strlen(str_arg) + 1;
81342- str = string(str, end, (char *)str_arg, spec);
81343+ str = string(str, end, str_arg, spec);
81344 break;
81345 }
81346
81347diff --git a/localversion-grsec b/localversion-grsec
81348new file mode 100644
81349index 0000000..7cd6065
81350--- /dev/null
81351+++ b/localversion-grsec
81352@@ -0,0 +1 @@
81353+-grsec
81354diff --git a/mm/Kconfig b/mm/Kconfig
81355index 278e3ab..87c384d 100644
81356--- a/mm/Kconfig
81357+++ b/mm/Kconfig
81358@@ -286,10 +286,10 @@ config KSM
81359 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
81360
81361 config DEFAULT_MMAP_MIN_ADDR
81362- int "Low address space to protect from user allocation"
81363+ int "Low address space to protect from user allocation"
81364 depends on MMU
81365- default 4096
81366- help
81367+ default 65536
81368+ help
81369 This is the portion of low virtual memory which should be protected
81370 from userspace allocation. Keeping a user from writing to low pages
81371 can help reduce the impact of kernel NULL pointer bugs.
81372@@ -320,7 +320,7 @@ config MEMORY_FAILURE
81373
81374 config HWPOISON_INJECT
81375 tristate "HWPoison pages injector"
81376- depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
81377+ depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS && !GRKERNSEC
81378 select PROC_PAGE_MONITOR
81379
81380 config NOMMU_INITIAL_TRIM_EXCESS
81381diff --git a/mm/filemap.c b/mm/filemap.c
81382index 83efee7..3f99381 100644
81383--- a/mm/filemap.c
81384+++ b/mm/filemap.c
81385@@ -1747,7 +1747,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
81386 struct address_space *mapping = file->f_mapping;
81387
81388 if (!mapping->a_ops->readpage)
81389- return -ENOEXEC;
81390+ return -ENODEV;
81391 file_accessed(file);
81392 vma->vm_ops = &generic_file_vm_ops;
81393 return 0;
81394@@ -2087,6 +2087,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
81395 *pos = i_size_read(inode);
81396
81397 if (limit != RLIM_INFINITY) {
81398+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
81399 if (*pos >= limit) {
81400 send_sig(SIGXFSZ, current, 0);
81401 return -EFBIG;
81402diff --git a/mm/fremap.c b/mm/fremap.c
81403index a0aaf0e..20325c3 100644
81404--- a/mm/fremap.c
81405+++ b/mm/fremap.c
81406@@ -157,6 +157,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
81407 retry:
81408 vma = find_vma(mm, start);
81409
81410+#ifdef CONFIG_PAX_SEGMEXEC
81411+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
81412+ goto out;
81413+#endif
81414+
81415 /*
81416 * Make sure the vma is shared, that it supports prefaulting,
81417 * and that the remapped range is valid and fully within
81418diff --git a/mm/highmem.c b/mm/highmem.c
81419index b32b70c..e512eb0 100644
81420--- a/mm/highmem.c
81421+++ b/mm/highmem.c
81422@@ -138,8 +138,9 @@ static void flush_all_zero_pkmaps(void)
81423 * So no dangers, even with speculative execution.
81424 */
81425 page = pte_page(pkmap_page_table[i]);
81426+ pax_open_kernel();
81427 pte_clear(&init_mm, PKMAP_ADDR(i), &pkmap_page_table[i]);
81428-
81429+ pax_close_kernel();
81430 set_page_address(page, NULL);
81431 need_flush = 1;
81432 }
81433@@ -198,9 +199,11 @@ start:
81434 }
81435 }
81436 vaddr = PKMAP_ADDR(last_pkmap_nr);
81437+
81438+ pax_open_kernel();
81439 set_pte_at(&init_mm, vaddr,
81440 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
81441-
81442+ pax_close_kernel();
81443 pkmap_count[last_pkmap_nr] = 1;
81444 set_page_address(page, (void *)vaddr);
81445
81446diff --git a/mm/hugetlb.c b/mm/hugetlb.c
81447index 88eb939..0bd9e7d 100644
81448--- a/mm/hugetlb.c
81449+++ b/mm/hugetlb.c
81450@@ -2008,15 +2008,17 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
81451 struct hstate *h = &default_hstate;
81452 unsigned long tmp;
81453 int ret;
81454+ ctl_table_no_const hugetlb_table;
81455
81456 tmp = h->max_huge_pages;
81457
81458 if (write && h->order >= MAX_ORDER)
81459 return -EINVAL;
81460
81461- table->data = &tmp;
81462- table->maxlen = sizeof(unsigned long);
81463- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
81464+ hugetlb_table = *table;
81465+ hugetlb_table.data = &tmp;
81466+ hugetlb_table.maxlen = sizeof(unsigned long);
81467+ ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
81468 if (ret)
81469 goto out;
81470
81471@@ -2073,15 +2075,17 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
81472 struct hstate *h = &default_hstate;
81473 unsigned long tmp;
81474 int ret;
81475+ ctl_table_no_const hugetlb_table;
81476
81477 tmp = h->nr_overcommit_huge_pages;
81478
81479 if (write && h->order >= MAX_ORDER)
81480 return -EINVAL;
81481
81482- table->data = &tmp;
81483- table->maxlen = sizeof(unsigned long);
81484- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
81485+ hugetlb_table = *table;
81486+ hugetlb_table.data = &tmp;
81487+ hugetlb_table.maxlen = sizeof(unsigned long);
81488+ ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
81489 if (ret)
81490 goto out;
81491
81492@@ -2515,6 +2519,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
81493 return 1;
81494 }
81495
81496+#ifdef CONFIG_PAX_SEGMEXEC
81497+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
81498+{
81499+ struct mm_struct *mm = vma->vm_mm;
81500+ struct vm_area_struct *vma_m;
81501+ unsigned long address_m;
81502+ pte_t *ptep_m;
81503+
81504+ vma_m = pax_find_mirror_vma(vma);
81505+ if (!vma_m)
81506+ return;
81507+
81508+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
81509+ address_m = address + SEGMEXEC_TASK_SIZE;
81510+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
81511+ get_page(page_m);
81512+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
81513+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
81514+}
81515+#endif
81516+
81517 /*
81518 * Hugetlb_cow() should be called with page lock of the original hugepage held.
81519 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
81520@@ -2633,6 +2658,11 @@ retry_avoidcopy:
81521 make_huge_pte(vma, new_page, 1));
81522 page_remove_rmap(old_page);
81523 hugepage_add_new_anon_rmap(new_page, vma, address);
81524+
81525+#ifdef CONFIG_PAX_SEGMEXEC
81526+ pax_mirror_huge_pte(vma, address, new_page);
81527+#endif
81528+
81529 /* Make the old page be freed below */
81530 new_page = old_page;
81531 }
81532@@ -2792,6 +2822,10 @@ retry:
81533 && (vma->vm_flags & VM_SHARED)));
81534 set_huge_pte_at(mm, address, ptep, new_pte);
81535
81536+#ifdef CONFIG_PAX_SEGMEXEC
81537+ pax_mirror_huge_pte(vma, address, page);
81538+#endif
81539+
81540 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
81541 /* Optimization, do the COW without a second fault */
81542 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
81543@@ -2821,6 +2855,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
81544 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
81545 struct hstate *h = hstate_vma(vma);
81546
81547+#ifdef CONFIG_PAX_SEGMEXEC
81548+ struct vm_area_struct *vma_m;
81549+#endif
81550+
81551 address &= huge_page_mask(h);
81552
81553 ptep = huge_pte_offset(mm, address);
81554@@ -2834,6 +2872,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
81555 VM_FAULT_SET_HINDEX(hstate_index(h));
81556 }
81557
81558+#ifdef CONFIG_PAX_SEGMEXEC
81559+ vma_m = pax_find_mirror_vma(vma);
81560+ if (vma_m) {
81561+ unsigned long address_m;
81562+
81563+ if (vma->vm_start > vma_m->vm_start) {
81564+ address_m = address;
81565+ address -= SEGMEXEC_TASK_SIZE;
81566+ vma = vma_m;
81567+ h = hstate_vma(vma);
81568+ } else
81569+ address_m = address + SEGMEXEC_TASK_SIZE;
81570+
81571+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
81572+ return VM_FAULT_OOM;
81573+ address_m &= HPAGE_MASK;
81574+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
81575+ }
81576+#endif
81577+
81578 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
81579 if (!ptep)
81580 return VM_FAULT_OOM;
81581diff --git a/mm/internal.h b/mm/internal.h
81582index 9ba2110..eaf0674 100644
81583--- a/mm/internal.h
81584+++ b/mm/internal.h
81585@@ -100,6 +100,7 @@ extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
81586 * in mm/page_alloc.c
81587 */
81588 extern void __free_pages_bootmem(struct page *page, unsigned int order);
81589+extern void free_compound_page(struct page *page);
81590 extern void prep_compound_page(struct page *page, unsigned long order);
81591 #ifdef CONFIG_MEMORY_FAILURE
81592 extern bool is_free_buddy_page(struct page *page);
81593diff --git a/mm/kmemleak.c b/mm/kmemleak.c
81594index 752a705..6c3102e 100644
81595--- a/mm/kmemleak.c
81596+++ b/mm/kmemleak.c
81597@@ -363,7 +363,7 @@ static void print_unreferenced(struct seq_file *seq,
81598
81599 for (i = 0; i < object->trace_len; i++) {
81600 void *ptr = (void *)object->trace[i];
81601- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
81602+ seq_printf(seq, " [<%pP>] %pA\n", ptr, ptr);
81603 }
81604 }
81605
81606@@ -1853,7 +1853,7 @@ static int __init kmemleak_late_init(void)
81607 return -ENOMEM;
81608 }
81609
81610- dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
81611+ dentry = debugfs_create_file("kmemleak", S_IRUSR, NULL, NULL,
81612 &kmemleak_fops);
81613 if (!dentry)
81614 pr_warning("Failed to create the debugfs kmemleak file\n");
81615diff --git a/mm/maccess.c b/mm/maccess.c
81616index d53adf9..03a24bf 100644
81617--- a/mm/maccess.c
81618+++ b/mm/maccess.c
81619@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
81620 set_fs(KERNEL_DS);
81621 pagefault_disable();
81622 ret = __copy_from_user_inatomic(dst,
81623- (__force const void __user *)src, size);
81624+ (const void __force_user *)src, size);
81625 pagefault_enable();
81626 set_fs(old_fs);
81627
81628@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
81629
81630 set_fs(KERNEL_DS);
81631 pagefault_disable();
81632- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
81633+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
81634 pagefault_enable();
81635 set_fs(old_fs);
81636
81637diff --git a/mm/madvise.c b/mm/madvise.c
81638index 03dfa5c..b032917 100644
81639--- a/mm/madvise.c
81640+++ b/mm/madvise.c
81641@@ -48,6 +48,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
81642 pgoff_t pgoff;
81643 unsigned long new_flags = vma->vm_flags;
81644
81645+#ifdef CONFIG_PAX_SEGMEXEC
81646+ struct vm_area_struct *vma_m;
81647+#endif
81648+
81649 switch (behavior) {
81650 case MADV_NORMAL:
81651 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
81652@@ -123,6 +127,13 @@ success:
81653 /*
81654 * vm_flags is protected by the mmap_sem held in write mode.
81655 */
81656+
81657+#ifdef CONFIG_PAX_SEGMEXEC
81658+ vma_m = pax_find_mirror_vma(vma);
81659+ if (vma_m)
81660+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
81661+#endif
81662+
81663 vma->vm_flags = new_flags;
81664
81665 out:
81666@@ -181,6 +192,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
81667 struct vm_area_struct ** prev,
81668 unsigned long start, unsigned long end)
81669 {
81670+
81671+#ifdef CONFIG_PAX_SEGMEXEC
81672+ struct vm_area_struct *vma_m;
81673+#endif
81674+
81675 *prev = vma;
81676 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
81677 return -EINVAL;
81678@@ -193,6 +209,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
81679 zap_page_range(vma, start, end - start, &details);
81680 } else
81681 zap_page_range(vma, start, end - start, NULL);
81682+
81683+#ifdef CONFIG_PAX_SEGMEXEC
81684+ vma_m = pax_find_mirror_vma(vma);
81685+ if (vma_m) {
81686+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
81687+ struct zap_details details = {
81688+ .nonlinear_vma = vma_m,
81689+ .last_index = ULONG_MAX,
81690+ };
81691+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
81692+ } else
81693+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
81694+ }
81695+#endif
81696+
81697 return 0;
81698 }
81699
81700@@ -397,6 +428,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
81701 if (end < start)
81702 goto out;
81703
81704+#ifdef CONFIG_PAX_SEGMEXEC
81705+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
81706+ if (end > SEGMEXEC_TASK_SIZE)
81707+ goto out;
81708+ } else
81709+#endif
81710+
81711+ if (end > TASK_SIZE)
81712+ goto out;
81713+
81714 error = 0;
81715 if (end == start)
81716 goto out;
81717diff --git a/mm/memory-failure.c b/mm/memory-failure.c
81718index c6e4dd3..1f41988 100644
81719--- a/mm/memory-failure.c
81720+++ b/mm/memory-failure.c
81721@@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
81722
81723 int sysctl_memory_failure_recovery __read_mostly = 1;
81724
81725-atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
81726+atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
81727
81728 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
81729
81730@@ -202,7 +202,7 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
81731 pfn, t->comm, t->pid);
81732 si.si_signo = SIGBUS;
81733 si.si_errno = 0;
81734- si.si_addr = (void *)addr;
81735+ si.si_addr = (void __user *)addr;
81736 #ifdef __ARCH_SI_TRAPNO
81737 si.si_trapno = trapno;
81738 #endif
81739@@ -760,7 +760,7 @@ static struct page_state {
81740 unsigned long res;
81741 char *msg;
81742 int (*action)(struct page *p, unsigned long pfn);
81743-} error_states[] = {
81744+} __do_const error_states[] = {
81745 { reserved, reserved, "reserved kernel", me_kernel },
81746 /*
81747 * free pages are specially detected outside this table:
81748@@ -1040,7 +1040,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
81749 }
81750
81751 nr_pages = 1 << compound_trans_order(hpage);
81752- atomic_long_add(nr_pages, &mce_bad_pages);
81753+ atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
81754
81755 /*
81756 * We need/can do nothing about count=0 pages.
81757@@ -1070,7 +1070,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
81758 if (!PageHWPoison(hpage)
81759 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
81760 || (p != hpage && TestSetPageHWPoison(hpage))) {
81761- atomic_long_sub(nr_pages, &mce_bad_pages);
81762+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
81763 return 0;
81764 }
81765 set_page_hwpoison_huge_page(hpage);
81766@@ -1128,7 +1128,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
81767 }
81768 if (hwpoison_filter(p)) {
81769 if (TestClearPageHWPoison(p))
81770- atomic_long_sub(nr_pages, &mce_bad_pages);
81771+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
81772 unlock_page(hpage);
81773 put_page(hpage);
81774 return 0;
81775@@ -1323,7 +1323,7 @@ int unpoison_memory(unsigned long pfn)
81776 return 0;
81777 }
81778 if (TestClearPageHWPoison(p))
81779- atomic_long_sub(nr_pages, &mce_bad_pages);
81780+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
81781 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
81782 return 0;
81783 }
81784@@ -1337,7 +1337,7 @@ int unpoison_memory(unsigned long pfn)
81785 */
81786 if (TestClearPageHWPoison(page)) {
81787 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
81788- atomic_long_sub(nr_pages, &mce_bad_pages);
81789+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
81790 freeit = 1;
81791 if (PageHuge(page))
81792 clear_page_hwpoison_huge_page(page);
81793@@ -1442,7 +1442,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
81794 }
81795 done:
81796 if (!PageHWPoison(hpage))
81797- atomic_long_add(1 << compound_trans_order(hpage),
81798+ atomic_long_add_unchecked(1 << compound_trans_order(hpage),
81799 &mce_bad_pages);
81800 set_page_hwpoison_huge_page(hpage);
81801 dequeue_hwpoisoned_huge_page(hpage);
81802@@ -1583,7 +1583,7 @@ int soft_offline_page(struct page *page, int flags)
81803 return ret;
81804
81805 done:
81806- atomic_long_add(1, &mce_bad_pages);
81807+ atomic_long_add_unchecked(1, &mce_bad_pages);
81808 SetPageHWPoison(page);
81809 /* keep elevated page count for bad page */
81810 return ret;
81811diff --git a/mm/memory.c b/mm/memory.c
81812index 32a495a..8042dce 100644
81813--- a/mm/memory.c
81814+++ b/mm/memory.c
81815@@ -434,6 +434,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
81816 free_pte_range(tlb, pmd, addr);
81817 } while (pmd++, addr = next, addr != end);
81818
81819+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
81820 start &= PUD_MASK;
81821 if (start < floor)
81822 return;
81823@@ -448,6 +449,8 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
81824 pmd = pmd_offset(pud, start);
81825 pud_clear(pud);
81826 pmd_free_tlb(tlb, pmd, start);
81827+#endif
81828+
81829 }
81830
81831 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
81832@@ -467,6 +470,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
81833 free_pmd_range(tlb, pud, addr, next, floor, ceiling);
81834 } while (pud++, addr = next, addr != end);
81835
81836+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
81837 start &= PGDIR_MASK;
81838 if (start < floor)
81839 return;
81840@@ -481,6 +485,8 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
81841 pud = pud_offset(pgd, start);
81842 pgd_clear(pgd);
81843 pud_free_tlb(tlb, pud, start);
81844+#endif
81845+
81846 }
81847
81848 /*
81849@@ -1619,12 +1625,6 @@ no_page_table:
81850 return page;
81851 }
81852
81853-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
81854-{
81855- return stack_guard_page_start(vma, addr) ||
81856- stack_guard_page_end(vma, addr+PAGE_SIZE);
81857-}
81858-
81859 /**
81860 * __get_user_pages() - pin user pages in memory
81861 * @tsk: task_struct of target task
81862@@ -1710,10 +1710,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
81863
81864 i = 0;
81865
81866- do {
81867+ while (nr_pages) {
81868 struct vm_area_struct *vma;
81869
81870- vma = find_extend_vma(mm, start);
81871+ vma = find_vma(mm, start);
81872 if (!vma && in_gate_area(mm, start)) {
81873 unsigned long pg = start & PAGE_MASK;
81874 pgd_t *pgd;
81875@@ -1761,7 +1761,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
81876 goto next_page;
81877 }
81878
81879- if (!vma ||
81880+ if (!vma || start < vma->vm_start ||
81881 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
81882 !(vm_flags & vma->vm_flags))
81883 return i ? : -EFAULT;
81884@@ -1788,11 +1788,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
81885 int ret;
81886 unsigned int fault_flags = 0;
81887
81888- /* For mlock, just skip the stack guard page. */
81889- if (foll_flags & FOLL_MLOCK) {
81890- if (stack_guard_page(vma, start))
81891- goto next_page;
81892- }
81893 if (foll_flags & FOLL_WRITE)
81894 fault_flags |= FAULT_FLAG_WRITE;
81895 if (nonblocking)
81896@@ -1866,7 +1861,7 @@ next_page:
81897 start += PAGE_SIZE;
81898 nr_pages--;
81899 } while (nr_pages && start < vma->vm_end);
81900- } while (nr_pages);
81901+ }
81902 return i;
81903 }
81904 EXPORT_SYMBOL(__get_user_pages);
81905@@ -2073,6 +2068,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
81906 page_add_file_rmap(page);
81907 set_pte_at(mm, addr, pte, mk_pte(page, prot));
81908
81909+#ifdef CONFIG_PAX_SEGMEXEC
81910+ pax_mirror_file_pte(vma, addr, page, ptl);
81911+#endif
81912+
81913 retval = 0;
81914 pte_unmap_unlock(pte, ptl);
81915 return retval;
81916@@ -2117,9 +2116,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
81917 if (!page_count(page))
81918 return -EINVAL;
81919 if (!(vma->vm_flags & VM_MIXEDMAP)) {
81920+
81921+#ifdef CONFIG_PAX_SEGMEXEC
81922+ struct vm_area_struct *vma_m;
81923+#endif
81924+
81925 BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
81926 BUG_ON(vma->vm_flags & VM_PFNMAP);
81927 vma->vm_flags |= VM_MIXEDMAP;
81928+
81929+#ifdef CONFIG_PAX_SEGMEXEC
81930+ vma_m = pax_find_mirror_vma(vma);
81931+ if (vma_m)
81932+ vma_m->vm_flags |= VM_MIXEDMAP;
81933+#endif
81934+
81935 }
81936 return insert_page(vma, addr, page, vma->vm_page_prot);
81937 }
81938@@ -2202,6 +2213,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
81939 unsigned long pfn)
81940 {
81941 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
81942+ BUG_ON(vma->vm_mirror);
81943
81944 if (addr < vma->vm_start || addr >= vma->vm_end)
81945 return -EFAULT;
81946@@ -2449,7 +2461,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
81947
81948 BUG_ON(pud_huge(*pud));
81949
81950- pmd = pmd_alloc(mm, pud, addr);
81951+ pmd = (mm == &init_mm) ?
81952+ pmd_alloc_kernel(mm, pud, addr) :
81953+ pmd_alloc(mm, pud, addr);
81954 if (!pmd)
81955 return -ENOMEM;
81956 do {
81957@@ -2469,7 +2483,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
81958 unsigned long next;
81959 int err;
81960
81961- pud = pud_alloc(mm, pgd, addr);
81962+ pud = (mm == &init_mm) ?
81963+ pud_alloc_kernel(mm, pgd, addr) :
81964+ pud_alloc(mm, pgd, addr);
81965 if (!pud)
81966 return -ENOMEM;
81967 do {
81968@@ -2557,6 +2573,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
81969 copy_user_highpage(dst, src, va, vma);
81970 }
81971
81972+#ifdef CONFIG_PAX_SEGMEXEC
81973+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
81974+{
81975+ struct mm_struct *mm = vma->vm_mm;
81976+ spinlock_t *ptl;
81977+ pte_t *pte, entry;
81978+
81979+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
81980+ entry = *pte;
81981+ if (!pte_present(entry)) {
81982+ if (!pte_none(entry)) {
81983+ BUG_ON(pte_file(entry));
81984+ free_swap_and_cache(pte_to_swp_entry(entry));
81985+ pte_clear_not_present_full(mm, address, pte, 0);
81986+ }
81987+ } else {
81988+ struct page *page;
81989+
81990+ flush_cache_page(vma, address, pte_pfn(entry));
81991+ entry = ptep_clear_flush(vma, address, pte);
81992+ BUG_ON(pte_dirty(entry));
81993+ page = vm_normal_page(vma, address, entry);
81994+ if (page) {
81995+ update_hiwater_rss(mm);
81996+ if (PageAnon(page))
81997+ dec_mm_counter_fast(mm, MM_ANONPAGES);
81998+ else
81999+ dec_mm_counter_fast(mm, MM_FILEPAGES);
82000+ page_remove_rmap(page);
82001+ page_cache_release(page);
82002+ }
82003+ }
82004+ pte_unmap_unlock(pte, ptl);
82005+}
82006+
82007+/* PaX: if vma is mirrored, synchronize the mirror's PTE
82008+ *
82009+ * the ptl of the lower mapped page is held on entry and is not released on exit
82010+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
82011+ */
82012+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
82013+{
82014+ struct mm_struct *mm = vma->vm_mm;
82015+ unsigned long address_m;
82016+ spinlock_t *ptl_m;
82017+ struct vm_area_struct *vma_m;
82018+ pmd_t *pmd_m;
82019+ pte_t *pte_m, entry_m;
82020+
82021+ BUG_ON(!page_m || !PageAnon(page_m));
82022+
82023+ vma_m = pax_find_mirror_vma(vma);
82024+ if (!vma_m)
82025+ return;
82026+
82027+ BUG_ON(!PageLocked(page_m));
82028+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
82029+ address_m = address + SEGMEXEC_TASK_SIZE;
82030+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
82031+ pte_m = pte_offset_map(pmd_m, address_m);
82032+ ptl_m = pte_lockptr(mm, pmd_m);
82033+ if (ptl != ptl_m) {
82034+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
82035+ if (!pte_none(*pte_m))
82036+ goto out;
82037+ }
82038+
82039+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
82040+ page_cache_get(page_m);
82041+ page_add_anon_rmap(page_m, vma_m, address_m);
82042+ inc_mm_counter_fast(mm, MM_ANONPAGES);
82043+ set_pte_at(mm, address_m, pte_m, entry_m);
82044+ update_mmu_cache(vma_m, address_m, entry_m);
82045+out:
82046+ if (ptl != ptl_m)
82047+ spin_unlock(ptl_m);
82048+ pte_unmap(pte_m);
82049+ unlock_page(page_m);
82050+}
82051+
82052+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
82053+{
82054+ struct mm_struct *mm = vma->vm_mm;
82055+ unsigned long address_m;
82056+ spinlock_t *ptl_m;
82057+ struct vm_area_struct *vma_m;
82058+ pmd_t *pmd_m;
82059+ pte_t *pte_m, entry_m;
82060+
82061+ BUG_ON(!page_m || PageAnon(page_m));
82062+
82063+ vma_m = pax_find_mirror_vma(vma);
82064+ if (!vma_m)
82065+ return;
82066+
82067+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
82068+ address_m = address + SEGMEXEC_TASK_SIZE;
82069+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
82070+ pte_m = pte_offset_map(pmd_m, address_m);
82071+ ptl_m = pte_lockptr(mm, pmd_m);
82072+ if (ptl != ptl_m) {
82073+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
82074+ if (!pte_none(*pte_m))
82075+ goto out;
82076+ }
82077+
82078+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
82079+ page_cache_get(page_m);
82080+ page_add_file_rmap(page_m);
82081+ inc_mm_counter_fast(mm, MM_FILEPAGES);
82082+ set_pte_at(mm, address_m, pte_m, entry_m);
82083+ update_mmu_cache(vma_m, address_m, entry_m);
82084+out:
82085+ if (ptl != ptl_m)
82086+ spin_unlock(ptl_m);
82087+ pte_unmap(pte_m);
82088+}
82089+
82090+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
82091+{
82092+ struct mm_struct *mm = vma->vm_mm;
82093+ unsigned long address_m;
82094+ spinlock_t *ptl_m;
82095+ struct vm_area_struct *vma_m;
82096+ pmd_t *pmd_m;
82097+ pte_t *pte_m, entry_m;
82098+
82099+ vma_m = pax_find_mirror_vma(vma);
82100+ if (!vma_m)
82101+ return;
82102+
82103+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
82104+ address_m = address + SEGMEXEC_TASK_SIZE;
82105+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
82106+ pte_m = pte_offset_map(pmd_m, address_m);
82107+ ptl_m = pte_lockptr(mm, pmd_m);
82108+ if (ptl != ptl_m) {
82109+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
82110+ if (!pte_none(*pte_m))
82111+ goto out;
82112+ }
82113+
82114+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
82115+ set_pte_at(mm, address_m, pte_m, entry_m);
82116+out:
82117+ if (ptl != ptl_m)
82118+ spin_unlock(ptl_m);
82119+ pte_unmap(pte_m);
82120+}
82121+
82122+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
82123+{
82124+ struct page *page_m;
82125+ pte_t entry;
82126+
82127+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
82128+ goto out;
82129+
82130+ entry = *pte;
82131+ page_m = vm_normal_page(vma, address, entry);
82132+ if (!page_m)
82133+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
82134+ else if (PageAnon(page_m)) {
82135+ if (pax_find_mirror_vma(vma)) {
82136+ pte_unmap_unlock(pte, ptl);
82137+ lock_page(page_m);
82138+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
82139+ if (pte_same(entry, *pte))
82140+ pax_mirror_anon_pte(vma, address, page_m, ptl);
82141+ else
82142+ unlock_page(page_m);
82143+ }
82144+ } else
82145+ pax_mirror_file_pte(vma, address, page_m, ptl);
82146+
82147+out:
82148+ pte_unmap_unlock(pte, ptl);
82149+}
82150+#endif
82151+
82152 /*
82153 * This routine handles present pages, when users try to write
82154 * to a shared page. It is done by copying the page to a new address
82155@@ -2773,6 +2969,12 @@ gotten:
82156 */
82157 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
82158 if (likely(pte_same(*page_table, orig_pte))) {
82159+
82160+#ifdef CONFIG_PAX_SEGMEXEC
82161+ if (pax_find_mirror_vma(vma))
82162+ BUG_ON(!trylock_page(new_page));
82163+#endif
82164+
82165 if (old_page) {
82166 if (!PageAnon(old_page)) {
82167 dec_mm_counter_fast(mm, MM_FILEPAGES);
82168@@ -2824,6 +3026,10 @@ gotten:
82169 page_remove_rmap(old_page);
82170 }
82171
82172+#ifdef CONFIG_PAX_SEGMEXEC
82173+ pax_mirror_anon_pte(vma, address, new_page, ptl);
82174+#endif
82175+
82176 /* Free the old page.. */
82177 new_page = old_page;
82178 ret |= VM_FAULT_WRITE;
82179@@ -3099,6 +3305,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
82180 swap_free(entry);
82181 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
82182 try_to_free_swap(page);
82183+
82184+#ifdef CONFIG_PAX_SEGMEXEC
82185+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
82186+#endif
82187+
82188 unlock_page(page);
82189 if (swapcache) {
82190 /*
82191@@ -3122,6 +3333,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
82192
82193 /* No need to invalidate - it was non-present before */
82194 update_mmu_cache(vma, address, page_table);
82195+
82196+#ifdef CONFIG_PAX_SEGMEXEC
82197+ pax_mirror_anon_pte(vma, address, page, ptl);
82198+#endif
82199+
82200 unlock:
82201 pte_unmap_unlock(page_table, ptl);
82202 out:
82203@@ -3141,40 +3357,6 @@ out_release:
82204 }
82205
82206 /*
82207- * This is like a special single-page "expand_{down|up}wards()",
82208- * except we must first make sure that 'address{-|+}PAGE_SIZE'
82209- * doesn't hit another vma.
82210- */
82211-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
82212-{
82213- address &= PAGE_MASK;
82214- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
82215- struct vm_area_struct *prev = vma->vm_prev;
82216-
82217- /*
82218- * Is there a mapping abutting this one below?
82219- *
82220- * That's only ok if it's the same stack mapping
82221- * that has gotten split..
82222- */
82223- if (prev && prev->vm_end == address)
82224- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
82225-
82226- expand_downwards(vma, address - PAGE_SIZE);
82227- }
82228- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
82229- struct vm_area_struct *next = vma->vm_next;
82230-
82231- /* As VM_GROWSDOWN but s/below/above/ */
82232- if (next && next->vm_start == address + PAGE_SIZE)
82233- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
82234-
82235- expand_upwards(vma, address + PAGE_SIZE);
82236- }
82237- return 0;
82238-}
82239-
82240-/*
82241 * We enter with non-exclusive mmap_sem (to exclude vma changes,
82242 * but allow concurrent faults), and pte mapped but not yet locked.
82243 * We return with mmap_sem still held, but pte unmapped and unlocked.
82244@@ -3183,27 +3365,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
82245 unsigned long address, pte_t *page_table, pmd_t *pmd,
82246 unsigned int flags)
82247 {
82248- struct page *page;
82249+ struct page *page = NULL;
82250 spinlock_t *ptl;
82251 pte_t entry;
82252
82253- pte_unmap(page_table);
82254-
82255- /* Check if we need to add a guard page to the stack */
82256- if (check_stack_guard_page(vma, address) < 0)
82257- return VM_FAULT_SIGBUS;
82258-
82259- /* Use the zero-page for reads */
82260 if (!(flags & FAULT_FLAG_WRITE)) {
82261 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
82262 vma->vm_page_prot));
82263- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
82264+ ptl = pte_lockptr(mm, pmd);
82265+ spin_lock(ptl);
82266 if (!pte_none(*page_table))
82267 goto unlock;
82268 goto setpte;
82269 }
82270
82271 /* Allocate our own private page. */
82272+ pte_unmap(page_table);
82273+
82274 if (unlikely(anon_vma_prepare(vma)))
82275 goto oom;
82276 page = alloc_zeroed_user_highpage_movable(vma, address);
82277@@ -3222,6 +3400,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
82278 if (!pte_none(*page_table))
82279 goto release;
82280
82281+#ifdef CONFIG_PAX_SEGMEXEC
82282+ if (pax_find_mirror_vma(vma))
82283+ BUG_ON(!trylock_page(page));
82284+#endif
82285+
82286 inc_mm_counter_fast(mm, MM_ANONPAGES);
82287 page_add_new_anon_rmap(page, vma, address);
82288 setpte:
82289@@ -3229,6 +3412,12 @@ setpte:
82290
82291 /* No need to invalidate - it was non-present before */
82292 update_mmu_cache(vma, address, page_table);
82293+
82294+#ifdef CONFIG_PAX_SEGMEXEC
82295+ if (page)
82296+ pax_mirror_anon_pte(vma, address, page, ptl);
82297+#endif
82298+
82299 unlock:
82300 pte_unmap_unlock(page_table, ptl);
82301 return 0;
82302@@ -3372,6 +3561,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
82303 */
82304 /* Only go through if we didn't race with anybody else... */
82305 if (likely(pte_same(*page_table, orig_pte))) {
82306+
82307+#ifdef CONFIG_PAX_SEGMEXEC
82308+ if (anon && pax_find_mirror_vma(vma))
82309+ BUG_ON(!trylock_page(page));
82310+#endif
82311+
82312 flush_icache_page(vma, page);
82313 entry = mk_pte(page, vma->vm_page_prot);
82314 if (flags & FAULT_FLAG_WRITE)
82315@@ -3391,6 +3586,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
82316
82317 /* no need to invalidate: a not-present page won't be cached */
82318 update_mmu_cache(vma, address, page_table);
82319+
82320+#ifdef CONFIG_PAX_SEGMEXEC
82321+ if (anon)
82322+ pax_mirror_anon_pte(vma, address, page, ptl);
82323+ else
82324+ pax_mirror_file_pte(vma, address, page, ptl);
82325+#endif
82326+
82327 } else {
82328 if (cow_page)
82329 mem_cgroup_uncharge_page(cow_page);
82330@@ -3712,6 +3915,12 @@ int handle_pte_fault(struct mm_struct *mm,
82331 if (flags & FAULT_FLAG_WRITE)
82332 flush_tlb_fix_spurious_fault(vma, address);
82333 }
82334+
82335+#ifdef CONFIG_PAX_SEGMEXEC
82336+ pax_mirror_pte(vma, address, pte, pmd, ptl);
82337+ return 0;
82338+#endif
82339+
82340 unlock:
82341 pte_unmap_unlock(pte, ptl);
82342 return 0;
82343@@ -3728,6 +3937,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
82344 pmd_t *pmd;
82345 pte_t *pte;
82346
82347+#ifdef CONFIG_PAX_SEGMEXEC
82348+ struct vm_area_struct *vma_m;
82349+#endif
82350+
82351 __set_current_state(TASK_RUNNING);
82352
82353 count_vm_event(PGFAULT);
82354@@ -3739,6 +3952,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
82355 if (unlikely(is_vm_hugetlb_page(vma)))
82356 return hugetlb_fault(mm, vma, address, flags);
82357
82358+#ifdef CONFIG_PAX_SEGMEXEC
82359+ vma_m = pax_find_mirror_vma(vma);
82360+ if (vma_m) {
82361+ unsigned long address_m;
82362+ pgd_t *pgd_m;
82363+ pud_t *pud_m;
82364+ pmd_t *pmd_m;
82365+
82366+ if (vma->vm_start > vma_m->vm_start) {
82367+ address_m = address;
82368+ address -= SEGMEXEC_TASK_SIZE;
82369+ vma = vma_m;
82370+ } else
82371+ address_m = address + SEGMEXEC_TASK_SIZE;
82372+
82373+ pgd_m = pgd_offset(mm, address_m);
82374+ pud_m = pud_alloc(mm, pgd_m, address_m);
82375+ if (!pud_m)
82376+ return VM_FAULT_OOM;
82377+ pmd_m = pmd_alloc(mm, pud_m, address_m);
82378+ if (!pmd_m)
82379+ return VM_FAULT_OOM;
82380+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
82381+ return VM_FAULT_OOM;
82382+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
82383+ }
82384+#endif
82385+
82386 retry:
82387 pgd = pgd_offset(mm, address);
82388 pud = pud_alloc(mm, pgd, address);
82389@@ -3837,6 +4078,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
82390 spin_unlock(&mm->page_table_lock);
82391 return 0;
82392 }
82393+
82394+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
82395+{
82396+ pud_t *new = pud_alloc_one(mm, address);
82397+ if (!new)
82398+ return -ENOMEM;
82399+
82400+ smp_wmb(); /* See comment in __pte_alloc */
82401+
82402+ spin_lock(&mm->page_table_lock);
82403+ if (pgd_present(*pgd)) /* Another has populated it */
82404+ pud_free(mm, new);
82405+ else
82406+ pgd_populate_kernel(mm, pgd, new);
82407+ spin_unlock(&mm->page_table_lock);
82408+ return 0;
82409+}
82410 #endif /* __PAGETABLE_PUD_FOLDED */
82411
82412 #ifndef __PAGETABLE_PMD_FOLDED
82413@@ -3867,11 +4125,35 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
82414 spin_unlock(&mm->page_table_lock);
82415 return 0;
82416 }
82417+
82418+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
82419+{
82420+ pmd_t *new = pmd_alloc_one(mm, address);
82421+ if (!new)
82422+ return -ENOMEM;
82423+
82424+ smp_wmb(); /* See comment in __pte_alloc */
82425+
82426+ spin_lock(&mm->page_table_lock);
82427+#ifndef __ARCH_HAS_4LEVEL_HACK
82428+ if (pud_present(*pud)) /* Another has populated it */
82429+ pmd_free(mm, new);
82430+ else
82431+ pud_populate_kernel(mm, pud, new);
82432+#else
82433+ if (pgd_present(*pud)) /* Another has populated it */
82434+ pmd_free(mm, new);
82435+ else
82436+ pgd_populate_kernel(mm, pud, new);
82437+#endif /* __ARCH_HAS_4LEVEL_HACK */
82438+ spin_unlock(&mm->page_table_lock);
82439+ return 0;
82440+}
82441 #endif /* __PAGETABLE_PMD_FOLDED */
82442
82443-int make_pages_present(unsigned long addr, unsigned long end)
82444+ssize_t make_pages_present(unsigned long addr, unsigned long end)
82445 {
82446- int ret, len, write;
82447+ ssize_t ret, len, write;
82448 struct vm_area_struct * vma;
82449
82450 vma = find_vma(current->mm, addr);
82451@@ -3904,7 +4186,7 @@ static int __init gate_vma_init(void)
82452 gate_vma.vm_start = FIXADDR_USER_START;
82453 gate_vma.vm_end = FIXADDR_USER_END;
82454 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
82455- gate_vma.vm_page_prot = __P101;
82456+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
82457
82458 return 0;
82459 }
82460@@ -4038,8 +4320,8 @@ out:
82461 return ret;
82462 }
82463
82464-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
82465- void *buf, int len, int write)
82466+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
82467+ void *buf, size_t len, int write)
82468 {
82469 resource_size_t phys_addr;
82470 unsigned long prot = 0;
82471@@ -4064,8 +4346,8 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
82472 * Access another process' address space as given in mm. If non-NULL, use the
82473 * given task for page fault accounting.
82474 */
82475-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
82476- unsigned long addr, void *buf, int len, int write)
82477+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
82478+ unsigned long addr, void *buf, size_t len, int write)
82479 {
82480 struct vm_area_struct *vma;
82481 void *old_buf = buf;
82482@@ -4073,7 +4355,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
82483 down_read(&mm->mmap_sem);
82484 /* ignore errors, just check how much was successfully transferred */
82485 while (len) {
82486- int bytes, ret, offset;
82487+ ssize_t bytes, ret, offset;
82488 void *maddr;
82489 struct page *page = NULL;
82490
82491@@ -4132,8 +4414,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
82492 *
82493 * The caller must hold a reference on @mm.
82494 */
82495-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
82496- void *buf, int len, int write)
82497+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
82498+ void *buf, size_t len, int write)
82499 {
82500 return __access_remote_vm(NULL, mm, addr, buf, len, write);
82501 }
82502@@ -4143,11 +4425,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
82503 * Source/target buffer must be kernel space,
82504 * Do not walk the page table directly, use get_user_pages
82505 */
82506-int access_process_vm(struct task_struct *tsk, unsigned long addr,
82507- void *buf, int len, int write)
82508+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr,
82509+ void *buf, size_t len, int write)
82510 {
82511 struct mm_struct *mm;
82512- int ret;
82513+ ssize_t ret;
82514
82515 mm = get_task_mm(tsk);
82516 if (!mm)
82517diff --git a/mm/mempolicy.c b/mm/mempolicy.c
82518index 3df6d12..a11056a 100644
82519--- a/mm/mempolicy.c
82520+++ b/mm/mempolicy.c
82521@@ -721,6 +721,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
82522 unsigned long vmstart;
82523 unsigned long vmend;
82524
82525+#ifdef CONFIG_PAX_SEGMEXEC
82526+ struct vm_area_struct *vma_m;
82527+#endif
82528+
82529 vma = find_vma(mm, start);
82530 if (!vma || vma->vm_start > start)
82531 return -EFAULT;
82532@@ -757,9 +761,20 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
82533 if (err)
82534 goto out;
82535 }
82536+
82537 err = vma_replace_policy(vma, new_pol);
82538 if (err)
82539 goto out;
82540+
82541+#ifdef CONFIG_PAX_SEGMEXEC
82542+ vma_m = pax_find_mirror_vma(vma);
82543+ if (vma_m) {
82544+ err = vma_replace_policy(vma_m, new_pol);
82545+ if (err)
82546+ goto out;
82547+ }
82548+#endif
82549+
82550 }
82551
82552 out:
82553@@ -1216,6 +1231,17 @@ static long do_mbind(unsigned long start, unsigned long len,
82554
82555 if (end < start)
82556 return -EINVAL;
82557+
82558+#ifdef CONFIG_PAX_SEGMEXEC
82559+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
82560+ if (end > SEGMEXEC_TASK_SIZE)
82561+ return -EINVAL;
82562+ } else
82563+#endif
82564+
82565+ if (end > TASK_SIZE)
82566+ return -EINVAL;
82567+
82568 if (end == start)
82569 return 0;
82570
82571@@ -1445,8 +1471,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
82572 */
82573 tcred = __task_cred(task);
82574 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
82575- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
82576- !capable(CAP_SYS_NICE)) {
82577+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
82578 rcu_read_unlock();
82579 err = -EPERM;
82580 goto out_put;
82581@@ -1477,6 +1502,15 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
82582 goto out;
82583 }
82584
82585+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
82586+ if (mm != current->mm &&
82587+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
82588+ mmput(mm);
82589+ err = -EPERM;
82590+ goto out;
82591+ }
82592+#endif
82593+
82594 err = do_migrate_pages(mm, old, new,
82595 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
82596
82597diff --git a/mm/migrate.c b/mm/migrate.c
82598index 2fd8b4a..d70358f 100644
82599--- a/mm/migrate.c
82600+++ b/mm/migrate.c
82601@@ -1401,8 +1401,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
82602 */
82603 tcred = __task_cred(task);
82604 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
82605- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
82606- !capable(CAP_SYS_NICE)) {
82607+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
82608 rcu_read_unlock();
82609 err = -EPERM;
82610 goto out;
82611diff --git a/mm/mlock.c b/mm/mlock.c
82612index c9bd528..da8d069 100644
82613--- a/mm/mlock.c
82614+++ b/mm/mlock.c
82615@@ -13,6 +13,7 @@
82616 #include <linux/pagemap.h>
82617 #include <linux/mempolicy.h>
82618 #include <linux/syscalls.h>
82619+#include <linux/security.h>
82620 #include <linux/sched.h>
82621 #include <linux/export.h>
82622 #include <linux/rmap.h>
82623@@ -369,7 +370,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
82624 {
82625 unsigned long nstart, end, tmp;
82626 struct vm_area_struct * vma, * prev;
82627- int error;
82628+ int error = 0;
82629
82630 VM_BUG_ON(start & ~PAGE_MASK);
82631 VM_BUG_ON(len != PAGE_ALIGN(len));
82632@@ -378,6 +379,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
82633 return -EINVAL;
82634 if (end == start)
82635 return 0;
82636+ if (end > TASK_SIZE)
82637+ return -EINVAL;
82638+
82639 vma = find_vma(current->mm, start);
82640 if (!vma || vma->vm_start > start)
82641 return -ENOMEM;
82642@@ -389,6 +393,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
82643 for (nstart = start ; ; ) {
82644 vm_flags_t newflags;
82645
82646+#ifdef CONFIG_PAX_SEGMEXEC
82647+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
82648+ break;
82649+#endif
82650+
82651 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
82652
82653 newflags = vma->vm_flags | VM_LOCKED;
82654@@ -494,6 +503,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
82655 lock_limit >>= PAGE_SHIFT;
82656
82657 /* check against resource limits */
82658+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
82659 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
82660 error = do_mlock(start, len, 1);
82661 up_write(&current->mm->mmap_sem);
82662@@ -528,6 +538,12 @@ static int do_mlockall(int flags)
82663 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
82664 vm_flags_t newflags;
82665
82666+#ifdef CONFIG_PAX_SEGMEXEC
82667+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
82668+ break;
82669+#endif
82670+
82671+ BUG_ON(vma->vm_end > TASK_SIZE);
82672 newflags = vma->vm_flags | VM_LOCKED;
82673 if (!(flags & MCL_CURRENT))
82674 newflags &= ~VM_LOCKED;
82675@@ -560,6 +576,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
82676 lock_limit >>= PAGE_SHIFT;
82677
82678 ret = -ENOMEM;
82679+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
82680 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
82681 capable(CAP_IPC_LOCK))
82682 ret = do_mlockall(flags);
82683diff --git a/mm/mmap.c b/mm/mmap.c
82684index 90db251..04240d1 100644
82685--- a/mm/mmap.c
82686+++ b/mm/mmap.c
82687@@ -32,6 +32,7 @@
82688 #include <linux/khugepaged.h>
82689 #include <linux/uprobes.h>
82690 #include <linux/rbtree_augmented.h>
82691+#include <linux/random.h>
82692
82693 #include <asm/uaccess.h>
82694 #include <asm/cacheflush.h>
82695@@ -48,6 +49,16 @@
82696 #define arch_rebalance_pgtables(addr, len) (addr)
82697 #endif
82698
82699+static inline void verify_mm_writelocked(struct mm_struct *mm)
82700+{
82701+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
82702+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
82703+ up_read(&mm->mmap_sem);
82704+ BUG();
82705+ }
82706+#endif
82707+}
82708+
82709 static void unmap_region(struct mm_struct *mm,
82710 struct vm_area_struct *vma, struct vm_area_struct *prev,
82711 unsigned long start, unsigned long end);
82712@@ -67,22 +78,32 @@ static void unmap_region(struct mm_struct *mm,
82713 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
82714 *
82715 */
82716-pgprot_t protection_map[16] = {
82717+pgprot_t protection_map[16] __read_only = {
82718 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
82719 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
82720 };
82721
82722-pgprot_t vm_get_page_prot(unsigned long vm_flags)
82723+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
82724 {
82725- return __pgprot(pgprot_val(protection_map[vm_flags &
82726+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
82727 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
82728 pgprot_val(arch_vm_get_page_prot(vm_flags)));
82729+
82730+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
82731+ if (!(__supported_pte_mask & _PAGE_NX) &&
82732+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
82733+ (vm_flags & (VM_READ | VM_WRITE)))
82734+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
82735+#endif
82736+
82737+ return prot;
82738 }
82739 EXPORT_SYMBOL(vm_get_page_prot);
82740
82741 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
82742 int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
82743 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
82744+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
82745 /*
82746 * Make sure vm_committed_as in one cacheline and not cacheline shared with
82747 * other variables. It can be updated by several CPUs frequently.
82748@@ -238,6 +259,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
82749 struct vm_area_struct *next = vma->vm_next;
82750
82751 might_sleep();
82752+ BUG_ON(vma->vm_mirror);
82753 if (vma->vm_ops && vma->vm_ops->close)
82754 vma->vm_ops->close(vma);
82755 if (vma->vm_file)
82756@@ -281,6 +303,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
82757 * not page aligned -Ram Gupta
82758 */
82759 rlim = rlimit(RLIMIT_DATA);
82760+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
82761 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
82762 (mm->end_data - mm->start_data) > rlim)
82763 goto out;
82764@@ -888,6 +911,12 @@ static int
82765 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
82766 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
82767 {
82768+
82769+#ifdef CONFIG_PAX_SEGMEXEC
82770+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
82771+ return 0;
82772+#endif
82773+
82774 if (is_mergeable_vma(vma, file, vm_flags) &&
82775 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
82776 if (vma->vm_pgoff == vm_pgoff)
82777@@ -907,6 +936,12 @@ static int
82778 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
82779 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
82780 {
82781+
82782+#ifdef CONFIG_PAX_SEGMEXEC
82783+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
82784+ return 0;
82785+#endif
82786+
82787 if (is_mergeable_vma(vma, file, vm_flags) &&
82788 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
82789 pgoff_t vm_pglen;
82790@@ -949,13 +984,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
82791 struct vm_area_struct *vma_merge(struct mm_struct *mm,
82792 struct vm_area_struct *prev, unsigned long addr,
82793 unsigned long end, unsigned long vm_flags,
82794- struct anon_vma *anon_vma, struct file *file,
82795+ struct anon_vma *anon_vma, struct file *file,
82796 pgoff_t pgoff, struct mempolicy *policy)
82797 {
82798 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
82799 struct vm_area_struct *area, *next;
82800 int err;
82801
82802+#ifdef CONFIG_PAX_SEGMEXEC
82803+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
82804+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
82805+
82806+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
82807+#endif
82808+
82809 /*
82810 * We later require that vma->vm_flags == vm_flags,
82811 * so this tests vma->vm_flags & VM_SPECIAL, too.
82812@@ -971,6 +1013,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
82813 if (next && next->vm_end == end) /* cases 6, 7, 8 */
82814 next = next->vm_next;
82815
82816+#ifdef CONFIG_PAX_SEGMEXEC
82817+ if (prev)
82818+ prev_m = pax_find_mirror_vma(prev);
82819+ if (area)
82820+ area_m = pax_find_mirror_vma(area);
82821+ if (next)
82822+ next_m = pax_find_mirror_vma(next);
82823+#endif
82824+
82825 /*
82826 * Can it merge with the predecessor?
82827 */
82828@@ -990,9 +1041,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
82829 /* cases 1, 6 */
82830 err = vma_adjust(prev, prev->vm_start,
82831 next->vm_end, prev->vm_pgoff, NULL);
82832- } else /* cases 2, 5, 7 */
82833+
82834+#ifdef CONFIG_PAX_SEGMEXEC
82835+ if (!err && prev_m)
82836+ err = vma_adjust(prev_m, prev_m->vm_start,
82837+ next_m->vm_end, prev_m->vm_pgoff, NULL);
82838+#endif
82839+
82840+ } else { /* cases 2, 5, 7 */
82841 err = vma_adjust(prev, prev->vm_start,
82842 end, prev->vm_pgoff, NULL);
82843+
82844+#ifdef CONFIG_PAX_SEGMEXEC
82845+ if (!err && prev_m)
82846+ err = vma_adjust(prev_m, prev_m->vm_start,
82847+ end_m, prev_m->vm_pgoff, NULL);
82848+#endif
82849+
82850+ }
82851 if (err)
82852 return NULL;
82853 khugepaged_enter_vma_merge(prev);
82854@@ -1006,12 +1072,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
82855 mpol_equal(policy, vma_policy(next)) &&
82856 can_vma_merge_before(next, vm_flags,
82857 anon_vma, file, pgoff+pglen)) {
82858- if (prev && addr < prev->vm_end) /* case 4 */
82859+ if (prev && addr < prev->vm_end) { /* case 4 */
82860 err = vma_adjust(prev, prev->vm_start,
82861 addr, prev->vm_pgoff, NULL);
82862- else /* cases 3, 8 */
82863+
82864+#ifdef CONFIG_PAX_SEGMEXEC
82865+ if (!err && prev_m)
82866+ err = vma_adjust(prev_m, prev_m->vm_start,
82867+ addr_m, prev_m->vm_pgoff, NULL);
82868+#endif
82869+
82870+ } else { /* cases 3, 8 */
82871 err = vma_adjust(area, addr, next->vm_end,
82872 next->vm_pgoff - pglen, NULL);
82873+
82874+#ifdef CONFIG_PAX_SEGMEXEC
82875+ if (!err && area_m)
82876+ err = vma_adjust(area_m, addr_m, next_m->vm_end,
82877+ next_m->vm_pgoff - pglen, NULL);
82878+#endif
82879+
82880+ }
82881 if (err)
82882 return NULL;
82883 khugepaged_enter_vma_merge(area);
82884@@ -1120,8 +1201,10 @@ none:
82885 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
82886 struct file *file, long pages)
82887 {
82888- const unsigned long stack_flags
82889- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
82890+
82891+#ifdef CONFIG_PAX_RANDMMAP
82892+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
82893+#endif
82894
82895 mm->total_vm += pages;
82896
82897@@ -1129,7 +1212,7 @@ void vm_stat_account(struct mm_struct *mm, unsigned long flags,
82898 mm->shared_vm += pages;
82899 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
82900 mm->exec_vm += pages;
82901- } else if (flags & stack_flags)
82902+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
82903 mm->stack_vm += pages;
82904 }
82905 #endif /* CONFIG_PROC_FS */
82906@@ -1165,7 +1248,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
82907 * (the exception is when the underlying filesystem is noexec
82908 * mounted, in which case we dont add PROT_EXEC.)
82909 */
82910- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
82911+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
82912 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
82913 prot |= PROT_EXEC;
82914
82915@@ -1191,7 +1274,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
82916 /* Obtain the address to map to. we verify (or select) it and ensure
82917 * that it represents a valid section of the address space.
82918 */
82919- addr = get_unmapped_area(file, addr, len, pgoff, flags);
82920+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
82921 if (addr & ~PAGE_MASK)
82922 return addr;
82923
82924@@ -1202,6 +1285,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
82925 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
82926 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
82927
82928+#ifdef CONFIG_PAX_MPROTECT
82929+ if (mm->pax_flags & MF_PAX_MPROTECT) {
82930+#ifndef CONFIG_PAX_MPROTECT_COMPAT
82931+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
82932+ gr_log_rwxmmap(file);
82933+
82934+#ifdef CONFIG_PAX_EMUPLT
82935+ vm_flags &= ~VM_EXEC;
82936+#else
82937+ return -EPERM;
82938+#endif
82939+
82940+ }
82941+
82942+ if (!(vm_flags & VM_EXEC))
82943+ vm_flags &= ~VM_MAYEXEC;
82944+#else
82945+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
82946+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
82947+#endif
82948+ else
82949+ vm_flags &= ~VM_MAYWRITE;
82950+ }
82951+#endif
82952+
82953+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
82954+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
82955+ vm_flags &= ~VM_PAGEEXEC;
82956+#endif
82957+
82958 if (flags & MAP_LOCKED)
82959 if (!can_do_mlock())
82960 return -EPERM;
82961@@ -1213,6 +1326,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
82962 locked += mm->locked_vm;
82963 lock_limit = rlimit(RLIMIT_MEMLOCK);
82964 lock_limit >>= PAGE_SHIFT;
82965+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
82966 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
82967 return -EAGAIN;
82968 }
82969@@ -1279,6 +1393,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
82970 }
82971 }
82972
82973+ if (!gr_acl_handle_mmap(file, prot))
82974+ return -EACCES;
82975+
82976 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
82977 }
82978
82979@@ -1356,7 +1473,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
82980 vm_flags_t vm_flags = vma->vm_flags;
82981
82982 /* If it was private or non-writable, the write bit is already clear */
82983- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
82984+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
82985 return 0;
82986
82987 /* The backer wishes to know when pages are first written to? */
82988@@ -1405,16 +1522,30 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
82989 unsigned long charged = 0;
82990 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
82991
82992+#ifdef CONFIG_PAX_SEGMEXEC
82993+ struct vm_area_struct *vma_m = NULL;
82994+#endif
82995+
82996+ /*
82997+ * mm->mmap_sem is required to protect against another thread
82998+ * changing the mappings in case we sleep.
82999+ */
83000+ verify_mm_writelocked(mm);
83001+
83002 /* Clear old maps */
83003 error = -ENOMEM;
83004-munmap_back:
83005 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
83006 if (do_munmap(mm, addr, len))
83007 return -ENOMEM;
83008- goto munmap_back;
83009+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
83010 }
83011
83012 /* Check against address space limit. */
83013+
83014+#ifdef CONFIG_PAX_RANDMMAP
83015+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (vm_flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
83016+#endif
83017+
83018 if (!may_expand_vm(mm, len >> PAGE_SHIFT))
83019 return -ENOMEM;
83020
83021@@ -1460,6 +1591,16 @@ munmap_back:
83022 goto unacct_error;
83023 }
83024
83025+#ifdef CONFIG_PAX_SEGMEXEC
83026+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
83027+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
83028+ if (!vma_m) {
83029+ error = -ENOMEM;
83030+ goto free_vma;
83031+ }
83032+ }
83033+#endif
83034+
83035 vma->vm_mm = mm;
83036 vma->vm_start = addr;
83037 vma->vm_end = addr + len;
83038@@ -1484,6 +1625,13 @@ munmap_back:
83039 if (error)
83040 goto unmap_and_free_vma;
83041
83042+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
83043+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
83044+ vma->vm_flags |= VM_PAGEEXEC;
83045+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
83046+ }
83047+#endif
83048+
83049 /* Can addr have changed??
83050 *
83051 * Answer: Yes, several device drivers can do it in their
83052@@ -1522,6 +1670,11 @@ munmap_back:
83053 vma_link(mm, vma, prev, rb_link, rb_parent);
83054 file = vma->vm_file;
83055
83056+#ifdef CONFIG_PAX_SEGMEXEC
83057+ if (vma_m)
83058+ BUG_ON(pax_mirror_vma(vma_m, vma));
83059+#endif
83060+
83061 /* Once vma denies write, undo our temporary denial count */
83062 if (correct_wcount)
83063 atomic_inc(&inode->i_writecount);
83064@@ -1529,6 +1682,7 @@ out:
83065 perf_event_mmap(vma);
83066
83067 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
83068+ track_exec_limit(mm, addr, addr + len, vm_flags);
83069 if (vm_flags & VM_LOCKED) {
83070 if (!mlock_vma_pages_range(vma, addr, addr + len))
83071 mm->locked_vm += (len >> PAGE_SHIFT);
83072@@ -1550,6 +1704,12 @@ unmap_and_free_vma:
83073 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
83074 charged = 0;
83075 free_vma:
83076+
83077+#ifdef CONFIG_PAX_SEGMEXEC
83078+ if (vma_m)
83079+ kmem_cache_free(vm_area_cachep, vma_m);
83080+#endif
83081+
83082 kmem_cache_free(vm_area_cachep, vma);
83083 unacct_error:
83084 if (charged)
83085@@ -1557,6 +1717,62 @@ unacct_error:
83086 return error;
83087 }
83088
83089+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
83090+unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
83091+{
83092+ if ((mm->pax_flags & MF_PAX_RANDMMAP) && !filp && (flags & MAP_STACK))
83093+ return (random32() & 0xFF) << PAGE_SHIFT;
83094+
83095+ return 0;
83096+}
83097+#endif
83098+
83099+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset)
83100+{
83101+ if (!vma) {
83102+#ifdef CONFIG_STACK_GROWSUP
83103+ if (addr > sysctl_heap_stack_gap)
83104+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
83105+ else
83106+ vma = find_vma(current->mm, 0);
83107+ if (vma && (vma->vm_flags & VM_GROWSUP))
83108+ return false;
83109+#endif
83110+ return true;
83111+ }
83112+
83113+ if (addr + len > vma->vm_start)
83114+ return false;
83115+
83116+ if (vma->vm_flags & VM_GROWSDOWN)
83117+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
83118+#ifdef CONFIG_STACK_GROWSUP
83119+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
83120+ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
83121+#endif
83122+ else if (offset)
83123+ return offset <= vma->vm_start - addr - len;
83124+
83125+ return true;
83126+}
83127+
83128+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset)
83129+{
83130+ if (vma->vm_start < len)
83131+ return -ENOMEM;
83132+
83133+ if (!(vma->vm_flags & VM_GROWSDOWN)) {
83134+ if (offset <= vma->vm_start - len)
83135+ return vma->vm_start - len - offset;
83136+ else
83137+ return -ENOMEM;
83138+ }
83139+
83140+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
83141+ return vma->vm_start - len - sysctl_heap_stack_gap;
83142+ return -ENOMEM;
83143+}
83144+
83145 unsigned long unmapped_area(struct vm_unmapped_area_info *info)
83146 {
83147 /*
83148@@ -1776,6 +1992,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
83149 struct mm_struct *mm = current->mm;
83150 struct vm_area_struct *vma;
83151 struct vm_unmapped_area_info info;
83152+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
83153
83154 if (len > TASK_SIZE)
83155 return -ENOMEM;
83156@@ -1783,17 +2000,26 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
83157 if (flags & MAP_FIXED)
83158 return addr;
83159
83160+#ifdef CONFIG_PAX_RANDMMAP
83161+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
83162+#endif
83163+
83164 if (addr) {
83165 addr = PAGE_ALIGN(addr);
83166 vma = find_vma(mm, addr);
83167- if (TASK_SIZE - len >= addr &&
83168- (!vma || addr + len <= vma->vm_start))
83169+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
83170 return addr;
83171 }
83172
83173 info.flags = 0;
83174 info.length = len;
83175 info.low_limit = TASK_UNMAPPED_BASE;
83176+
83177+#ifdef CONFIG_PAX_RANDMMAP
83178+ if (mm->pax_flags & MF_PAX_RANDMMAP)
83179+ info.low_limit += mm->delta_mmap;
83180+#endif
83181+
83182 info.high_limit = TASK_SIZE;
83183 info.align_mask = 0;
83184 return vm_unmapped_area(&info);
83185@@ -1802,10 +2028,16 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
83186
83187 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
83188 {
83189+
83190+#ifdef CONFIG_PAX_SEGMEXEC
83191+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
83192+ return;
83193+#endif
83194+
83195 /*
83196 * Is this a new hole at the lowest possible address?
83197 */
83198- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache)
83199+ if (addr >= mm->mmap_base && addr < mm->free_area_cache)
83200 mm->free_area_cache = addr;
83201 }
83202
83203@@ -1823,6 +2055,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
83204 struct mm_struct *mm = current->mm;
83205 unsigned long addr = addr0;
83206 struct vm_unmapped_area_info info;
83207+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
83208
83209 /* requested length too big for entire address space */
83210 if (len > TASK_SIZE)
83211@@ -1831,12 +2064,15 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
83212 if (flags & MAP_FIXED)
83213 return addr;
83214
83215+#ifdef CONFIG_PAX_RANDMMAP
83216+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
83217+#endif
83218+
83219 /* requesting a specific address */
83220 if (addr) {
83221 addr = PAGE_ALIGN(addr);
83222 vma = find_vma(mm, addr);
83223- if (TASK_SIZE - len >= addr &&
83224- (!vma || addr + len <= vma->vm_start))
83225+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
83226 return addr;
83227 }
83228
83229@@ -1857,6 +2093,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
83230 VM_BUG_ON(addr != -ENOMEM);
83231 info.flags = 0;
83232 info.low_limit = TASK_UNMAPPED_BASE;
83233+
83234+#ifdef CONFIG_PAX_RANDMMAP
83235+ if (mm->pax_flags & MF_PAX_RANDMMAP)
83236+ info.low_limit += mm->delta_mmap;
83237+#endif
83238+
83239 info.high_limit = TASK_SIZE;
83240 addr = vm_unmapped_area(&info);
83241 }
83242@@ -1867,6 +2109,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
83243
83244 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
83245 {
83246+
83247+#ifdef CONFIG_PAX_SEGMEXEC
83248+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
83249+ return;
83250+#endif
83251+
83252 /*
83253 * Is this a new hole at the highest possible address?
83254 */
83255@@ -1874,8 +2122,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
83256 mm->free_area_cache = addr;
83257
83258 /* dont allow allocations above current base */
83259- if (mm->free_area_cache > mm->mmap_base)
83260+ if (mm->free_area_cache > mm->mmap_base) {
83261 mm->free_area_cache = mm->mmap_base;
83262+ mm->cached_hole_size = ~0UL;
83263+ }
83264 }
83265
83266 unsigned long
83267@@ -1974,6 +2224,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
83268 return vma;
83269 }
83270
83271+#ifdef CONFIG_PAX_SEGMEXEC
83272+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
83273+{
83274+ struct vm_area_struct *vma_m;
83275+
83276+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
83277+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
83278+ BUG_ON(vma->vm_mirror);
83279+ return NULL;
83280+ }
83281+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
83282+ vma_m = vma->vm_mirror;
83283+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
83284+ BUG_ON(vma->vm_file != vma_m->vm_file);
83285+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
83286+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
83287+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
83288+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED));
83289+ return vma_m;
83290+}
83291+#endif
83292+
83293 /*
83294 * Verify that the stack growth is acceptable and
83295 * update accounting. This is shared with both the
83296@@ -1990,6 +2262,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
83297 return -ENOMEM;
83298
83299 /* Stack limit test */
83300+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
83301 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
83302 return -ENOMEM;
83303
83304@@ -2000,6 +2273,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
83305 locked = mm->locked_vm + grow;
83306 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
83307 limit >>= PAGE_SHIFT;
83308+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
83309 if (locked > limit && !capable(CAP_IPC_LOCK))
83310 return -ENOMEM;
83311 }
83312@@ -2029,37 +2303,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
83313 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
83314 * vma is the last one with address > vma->vm_end. Have to extend vma.
83315 */
83316+#ifndef CONFIG_IA64
83317+static
83318+#endif
83319 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
83320 {
83321 int error;
83322+ bool locknext;
83323
83324 if (!(vma->vm_flags & VM_GROWSUP))
83325 return -EFAULT;
83326
83327+ /* Also guard against wrapping around to address 0. */
83328+ if (address < PAGE_ALIGN(address+1))
83329+ address = PAGE_ALIGN(address+1);
83330+ else
83331+ return -ENOMEM;
83332+
83333 /*
83334 * We must make sure the anon_vma is allocated
83335 * so that the anon_vma locking is not a noop.
83336 */
83337 if (unlikely(anon_vma_prepare(vma)))
83338 return -ENOMEM;
83339+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
83340+ if (locknext && anon_vma_prepare(vma->vm_next))
83341+ return -ENOMEM;
83342 vma_lock_anon_vma(vma);
83343+ if (locknext)
83344+ vma_lock_anon_vma(vma->vm_next);
83345
83346 /*
83347 * vma->vm_start/vm_end cannot change under us because the caller
83348 * is required to hold the mmap_sem in read mode. We need the
83349- * anon_vma lock to serialize against concurrent expand_stacks.
83350- * Also guard against wrapping around to address 0.
83351+ * anon_vma locks to serialize against concurrent expand_stacks
83352+ * and expand_upwards.
83353 */
83354- if (address < PAGE_ALIGN(address+4))
83355- address = PAGE_ALIGN(address+4);
83356- else {
83357- vma_unlock_anon_vma(vma);
83358- return -ENOMEM;
83359- }
83360 error = 0;
83361
83362 /* Somebody else might have raced and expanded it already */
83363- if (address > vma->vm_end) {
83364+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
83365+ error = -ENOMEM;
83366+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
83367 unsigned long size, grow;
83368
83369 size = address - vma->vm_start;
83370@@ -2094,6 +2379,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
83371 }
83372 }
83373 }
83374+ if (locknext)
83375+ vma_unlock_anon_vma(vma->vm_next);
83376 vma_unlock_anon_vma(vma);
83377 khugepaged_enter_vma_merge(vma);
83378 validate_mm(vma->vm_mm);
83379@@ -2108,6 +2395,8 @@ int expand_downwards(struct vm_area_struct *vma,
83380 unsigned long address)
83381 {
83382 int error;
83383+ bool lockprev = false;
83384+ struct vm_area_struct *prev;
83385
83386 /*
83387 * We must make sure the anon_vma is allocated
83388@@ -2121,6 +2410,15 @@ int expand_downwards(struct vm_area_struct *vma,
83389 if (error)
83390 return error;
83391
83392+ prev = vma->vm_prev;
83393+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
83394+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
83395+#endif
83396+ if (lockprev && anon_vma_prepare(prev))
83397+ return -ENOMEM;
83398+ if (lockprev)
83399+ vma_lock_anon_vma(prev);
83400+
83401 vma_lock_anon_vma(vma);
83402
83403 /*
83404@@ -2130,9 +2428,17 @@ int expand_downwards(struct vm_area_struct *vma,
83405 */
83406
83407 /* Somebody else might have raced and expanded it already */
83408- if (address < vma->vm_start) {
83409+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
83410+ error = -ENOMEM;
83411+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
83412 unsigned long size, grow;
83413
83414+#ifdef CONFIG_PAX_SEGMEXEC
83415+ struct vm_area_struct *vma_m;
83416+
83417+ vma_m = pax_find_mirror_vma(vma);
83418+#endif
83419+
83420 size = vma->vm_end - address;
83421 grow = (vma->vm_start - address) >> PAGE_SHIFT;
83422
83423@@ -2157,6 +2463,18 @@ int expand_downwards(struct vm_area_struct *vma,
83424 vma->vm_pgoff -= grow;
83425 anon_vma_interval_tree_post_update_vma(vma);
83426 vma_gap_update(vma);
83427+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
83428+
83429+#ifdef CONFIG_PAX_SEGMEXEC
83430+ if (vma_m) {
83431+ anon_vma_interval_tree_pre_update_vma(vma_m);
83432+ vma_m->vm_start -= grow << PAGE_SHIFT;
83433+ vma_m->vm_pgoff -= grow;
83434+ anon_vma_interval_tree_post_update_vma(vma_m);
83435+ vma_gap_update(vma_m);
83436+ }
83437+#endif
83438+
83439 spin_unlock(&vma->vm_mm->page_table_lock);
83440
83441 perf_event_mmap(vma);
83442@@ -2263,6 +2581,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
83443 do {
83444 long nrpages = vma_pages(vma);
83445
83446+#ifdef CONFIG_PAX_SEGMEXEC
83447+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
83448+ vma = remove_vma(vma);
83449+ continue;
83450+ }
83451+#endif
83452+
83453 if (vma->vm_flags & VM_ACCOUNT)
83454 nr_accounted += nrpages;
83455 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
83456@@ -2308,6 +2633,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
83457 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
83458 vma->vm_prev = NULL;
83459 do {
83460+
83461+#ifdef CONFIG_PAX_SEGMEXEC
83462+ if (vma->vm_mirror) {
83463+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
83464+ vma->vm_mirror->vm_mirror = NULL;
83465+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
83466+ vma->vm_mirror = NULL;
83467+ }
83468+#endif
83469+
83470 vma_rb_erase(vma, &mm->mm_rb);
83471 mm->map_count--;
83472 tail_vma = vma;
83473@@ -2339,14 +2674,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
83474 struct vm_area_struct *new;
83475 int err = -ENOMEM;
83476
83477+#ifdef CONFIG_PAX_SEGMEXEC
83478+ struct vm_area_struct *vma_m, *new_m = NULL;
83479+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
83480+#endif
83481+
83482 if (is_vm_hugetlb_page(vma) && (addr &
83483 ~(huge_page_mask(hstate_vma(vma)))))
83484 return -EINVAL;
83485
83486+#ifdef CONFIG_PAX_SEGMEXEC
83487+ vma_m = pax_find_mirror_vma(vma);
83488+#endif
83489+
83490 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
83491 if (!new)
83492 goto out_err;
83493
83494+#ifdef CONFIG_PAX_SEGMEXEC
83495+ if (vma_m) {
83496+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
83497+ if (!new_m) {
83498+ kmem_cache_free(vm_area_cachep, new);
83499+ goto out_err;
83500+ }
83501+ }
83502+#endif
83503+
83504 /* most fields are the same, copy all, and then fixup */
83505 *new = *vma;
83506
83507@@ -2359,6 +2713,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
83508 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
83509 }
83510
83511+#ifdef CONFIG_PAX_SEGMEXEC
83512+ if (vma_m) {
83513+ *new_m = *vma_m;
83514+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
83515+ new_m->vm_mirror = new;
83516+ new->vm_mirror = new_m;
83517+
83518+ if (new_below)
83519+ new_m->vm_end = addr_m;
83520+ else {
83521+ new_m->vm_start = addr_m;
83522+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
83523+ }
83524+ }
83525+#endif
83526+
83527 pol = mpol_dup(vma_policy(vma));
83528 if (IS_ERR(pol)) {
83529 err = PTR_ERR(pol);
83530@@ -2381,6 +2751,36 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
83531 else
83532 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
83533
83534+#ifdef CONFIG_PAX_SEGMEXEC
83535+ if (!err && vma_m) {
83536+ if (anon_vma_clone(new_m, vma_m))
83537+ goto out_free_mpol;
83538+
83539+ mpol_get(pol);
83540+ vma_set_policy(new_m, pol);
83541+
83542+ if (new_m->vm_file)
83543+ get_file(new_m->vm_file);
83544+
83545+ if (new_m->vm_ops && new_m->vm_ops->open)
83546+ new_m->vm_ops->open(new_m);
83547+
83548+ if (new_below)
83549+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
83550+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
83551+ else
83552+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
83553+
83554+ if (err) {
83555+ if (new_m->vm_ops && new_m->vm_ops->close)
83556+ new_m->vm_ops->close(new_m);
83557+ if (new_m->vm_file)
83558+ fput(new_m->vm_file);
83559+ mpol_put(pol);
83560+ }
83561+ }
83562+#endif
83563+
83564 /* Success. */
83565 if (!err)
83566 return 0;
83567@@ -2390,10 +2790,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
83568 new->vm_ops->close(new);
83569 if (new->vm_file)
83570 fput(new->vm_file);
83571- unlink_anon_vmas(new);
83572 out_free_mpol:
83573 mpol_put(pol);
83574 out_free_vma:
83575+
83576+#ifdef CONFIG_PAX_SEGMEXEC
83577+ if (new_m) {
83578+ unlink_anon_vmas(new_m);
83579+ kmem_cache_free(vm_area_cachep, new_m);
83580+ }
83581+#endif
83582+
83583+ unlink_anon_vmas(new);
83584 kmem_cache_free(vm_area_cachep, new);
83585 out_err:
83586 return err;
83587@@ -2406,6 +2814,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
83588 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
83589 unsigned long addr, int new_below)
83590 {
83591+
83592+#ifdef CONFIG_PAX_SEGMEXEC
83593+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
83594+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
83595+ if (mm->map_count >= sysctl_max_map_count-1)
83596+ return -ENOMEM;
83597+ } else
83598+#endif
83599+
83600 if (mm->map_count >= sysctl_max_map_count)
83601 return -ENOMEM;
83602
83603@@ -2417,11 +2834,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
83604 * work. This now handles partial unmappings.
83605 * Jeremy Fitzhardinge <jeremy@goop.org>
83606 */
83607+#ifdef CONFIG_PAX_SEGMEXEC
83608 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
83609 {
83610+ int ret = __do_munmap(mm, start, len);
83611+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
83612+ return ret;
83613+
83614+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
83615+}
83616+
83617+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
83618+#else
83619+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
83620+#endif
83621+{
83622 unsigned long end;
83623 struct vm_area_struct *vma, *prev, *last;
83624
83625+ /*
83626+ * mm->mmap_sem is required to protect against another thread
83627+ * changing the mappings in case we sleep.
83628+ */
83629+ verify_mm_writelocked(mm);
83630+
83631 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
83632 return -EINVAL;
83633
83634@@ -2496,6 +2932,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
83635 /* Fix up all other VM information */
83636 remove_vma_list(mm, vma);
83637
83638+ track_exec_limit(mm, start, end, 0UL);
83639+
83640 return 0;
83641 }
83642
83643@@ -2504,6 +2942,13 @@ int vm_munmap(unsigned long start, size_t len)
83644 int ret;
83645 struct mm_struct *mm = current->mm;
83646
83647+
83648+#ifdef CONFIG_PAX_SEGMEXEC
83649+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
83650+ (len > SEGMEXEC_TASK_SIZE || start > SEGMEXEC_TASK_SIZE-len))
83651+ return -EINVAL;
83652+#endif
83653+
83654 down_write(&mm->mmap_sem);
83655 ret = do_munmap(mm, start, len);
83656 up_write(&mm->mmap_sem);
83657@@ -2517,16 +2962,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
83658 return vm_munmap(addr, len);
83659 }
83660
83661-static inline void verify_mm_writelocked(struct mm_struct *mm)
83662-{
83663-#ifdef CONFIG_DEBUG_VM
83664- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
83665- WARN_ON(1);
83666- up_read(&mm->mmap_sem);
83667- }
83668-#endif
83669-}
83670-
83671 /*
83672 * this is really a simplified "do_mmap". it only handles
83673 * anonymous maps. eventually we may be able to do some
83674@@ -2540,6 +2975,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
83675 struct rb_node ** rb_link, * rb_parent;
83676 pgoff_t pgoff = addr >> PAGE_SHIFT;
83677 int error;
83678+ unsigned long charged;
83679
83680 len = PAGE_ALIGN(len);
83681 if (!len)
83682@@ -2547,16 +2983,30 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
83683
83684 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
83685
83686+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
83687+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
83688+ flags &= ~VM_EXEC;
83689+
83690+#ifdef CONFIG_PAX_MPROTECT
83691+ if (mm->pax_flags & MF_PAX_MPROTECT)
83692+ flags &= ~VM_MAYEXEC;
83693+#endif
83694+
83695+ }
83696+#endif
83697+
83698 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
83699 if (error & ~PAGE_MASK)
83700 return error;
83701
83702+ charged = len >> PAGE_SHIFT;
83703+
83704 /*
83705 * mlock MCL_FUTURE?
83706 */
83707 if (mm->def_flags & VM_LOCKED) {
83708 unsigned long locked, lock_limit;
83709- locked = len >> PAGE_SHIFT;
83710+ locked = charged;
83711 locked += mm->locked_vm;
83712 lock_limit = rlimit(RLIMIT_MEMLOCK);
83713 lock_limit >>= PAGE_SHIFT;
83714@@ -2573,21 +3023,20 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
83715 /*
83716 * Clear old maps. this also does some error checking for us
83717 */
83718- munmap_back:
83719 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
83720 if (do_munmap(mm, addr, len))
83721 return -ENOMEM;
83722- goto munmap_back;
83723+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
83724 }
83725
83726 /* Check against address space limits *after* clearing old maps... */
83727- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
83728+ if (!may_expand_vm(mm, charged))
83729 return -ENOMEM;
83730
83731 if (mm->map_count > sysctl_max_map_count)
83732 return -ENOMEM;
83733
83734- if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
83735+ if (security_vm_enough_memory_mm(mm, charged))
83736 return -ENOMEM;
83737
83738 /* Can we just expand an old private anonymous mapping? */
83739@@ -2601,7 +3050,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
83740 */
83741 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
83742 if (!vma) {
83743- vm_unacct_memory(len >> PAGE_SHIFT);
83744+ vm_unacct_memory(charged);
83745 return -ENOMEM;
83746 }
83747
83748@@ -2615,11 +3064,12 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
83749 vma_link(mm, vma, prev, rb_link, rb_parent);
83750 out:
83751 perf_event_mmap(vma);
83752- mm->total_vm += len >> PAGE_SHIFT;
83753+ mm->total_vm += charged;
83754 if (flags & VM_LOCKED) {
83755 if (!mlock_vma_pages_range(vma, addr, addr + len))
83756- mm->locked_vm += (len >> PAGE_SHIFT);
83757+ mm->locked_vm += charged;
83758 }
83759+ track_exec_limit(mm, addr, addr + len, flags);
83760 return addr;
83761 }
83762
83763@@ -2677,6 +3127,7 @@ void exit_mmap(struct mm_struct *mm)
83764 while (vma) {
83765 if (vma->vm_flags & VM_ACCOUNT)
83766 nr_accounted += vma_pages(vma);
83767+ vma->vm_mirror = NULL;
83768 vma = remove_vma(vma);
83769 }
83770 vm_unacct_memory(nr_accounted);
83771@@ -2693,6 +3144,13 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
83772 struct vm_area_struct *prev;
83773 struct rb_node **rb_link, *rb_parent;
83774
83775+#ifdef CONFIG_PAX_SEGMEXEC
83776+ struct vm_area_struct *vma_m = NULL;
83777+#endif
83778+
83779+ if (security_mmap_addr(vma->vm_start))
83780+ return -EPERM;
83781+
83782 /*
83783 * The vm_pgoff of a purely anonymous vma should be irrelevant
83784 * until its first write fault, when page's anon_vma and index
83785@@ -2716,7 +3174,21 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
83786 security_vm_enough_memory_mm(mm, vma_pages(vma)))
83787 return -ENOMEM;
83788
83789+#ifdef CONFIG_PAX_SEGMEXEC
83790+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
83791+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
83792+ if (!vma_m)
83793+ return -ENOMEM;
83794+ }
83795+#endif
83796+
83797 vma_link(mm, vma, prev, rb_link, rb_parent);
83798+
83799+#ifdef CONFIG_PAX_SEGMEXEC
83800+ if (vma_m)
83801+ BUG_ON(pax_mirror_vma(vma_m, vma));
83802+#endif
83803+
83804 return 0;
83805 }
83806
83807@@ -2736,6 +3208,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
83808 struct mempolicy *pol;
83809 bool faulted_in_anon_vma = true;
83810
83811+ BUG_ON(vma->vm_mirror);
83812+
83813 /*
83814 * If anonymous vma has not yet been faulted, update new pgoff
83815 * to match new location, to increase its chance of merging.
83816@@ -2802,6 +3276,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
83817 return NULL;
83818 }
83819
83820+#ifdef CONFIG_PAX_SEGMEXEC
83821+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
83822+{
83823+ struct vm_area_struct *prev_m;
83824+ struct rb_node **rb_link_m, *rb_parent_m;
83825+ struct mempolicy *pol_m;
83826+
83827+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
83828+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
83829+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
83830+ *vma_m = *vma;
83831+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
83832+ if (anon_vma_clone(vma_m, vma))
83833+ return -ENOMEM;
83834+ pol_m = vma_policy(vma_m);
83835+ mpol_get(pol_m);
83836+ vma_set_policy(vma_m, pol_m);
83837+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
83838+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
83839+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
83840+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
83841+ if (vma_m->vm_file)
83842+ get_file(vma_m->vm_file);
83843+ if (vma_m->vm_ops && vma_m->vm_ops->open)
83844+ vma_m->vm_ops->open(vma_m);
83845+ BUG_ON(find_vma_links(vma->vm_mm, vma_m->vm_start, vma_m->vm_end, &prev_m, &rb_link_m, &rb_parent_m));
83846+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
83847+ vma_m->vm_mirror = vma;
83848+ vma->vm_mirror = vma_m;
83849+ return 0;
83850+}
83851+#endif
83852+
83853 /*
83854 * Return true if the calling process may expand its vm space by the passed
83855 * number of pages
83856@@ -2813,6 +3320,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
83857
83858 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
83859
83860+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
83861 if (cur + npages > lim)
83862 return 0;
83863 return 1;
83864@@ -2883,6 +3391,22 @@ int install_special_mapping(struct mm_struct *mm,
83865 vma->vm_start = addr;
83866 vma->vm_end = addr + len;
83867
83868+#ifdef CONFIG_PAX_MPROTECT
83869+ if (mm->pax_flags & MF_PAX_MPROTECT) {
83870+#ifndef CONFIG_PAX_MPROTECT_COMPAT
83871+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
83872+ return -EPERM;
83873+ if (!(vm_flags & VM_EXEC))
83874+ vm_flags &= ~VM_MAYEXEC;
83875+#else
83876+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
83877+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
83878+#endif
83879+ else
83880+ vm_flags &= ~VM_MAYWRITE;
83881+ }
83882+#endif
83883+
83884 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
83885 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
83886
83887diff --git a/mm/mprotect.c b/mm/mprotect.c
83888index 94722a4..9837984 100644
83889--- a/mm/mprotect.c
83890+++ b/mm/mprotect.c
83891@@ -23,10 +23,17 @@
83892 #include <linux/mmu_notifier.h>
83893 #include <linux/migrate.h>
83894 #include <linux/perf_event.h>
83895+
83896+#ifdef CONFIG_PAX_MPROTECT
83897+#include <linux/elf.h>
83898+#include <linux/binfmts.h>
83899+#endif
83900+
83901 #include <asm/uaccess.h>
83902 #include <asm/pgtable.h>
83903 #include <asm/cacheflush.h>
83904 #include <asm/tlbflush.h>
83905+#include <asm/mmu_context.h>
83906
83907 #ifndef pgprot_modify
83908 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
83909@@ -233,6 +240,48 @@ unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
83910 return pages;
83911 }
83912
83913+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
83914+/* called while holding the mmap semaphor for writing except stack expansion */
83915+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
83916+{
83917+ unsigned long oldlimit, newlimit = 0UL;
83918+
83919+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
83920+ return;
83921+
83922+ spin_lock(&mm->page_table_lock);
83923+ oldlimit = mm->context.user_cs_limit;
83924+ if ((prot & VM_EXEC) && oldlimit < end)
83925+ /* USER_CS limit moved up */
83926+ newlimit = end;
83927+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
83928+ /* USER_CS limit moved down */
83929+ newlimit = start;
83930+
83931+ if (newlimit) {
83932+ mm->context.user_cs_limit = newlimit;
83933+
83934+#ifdef CONFIG_SMP
83935+ wmb();
83936+ cpus_clear(mm->context.cpu_user_cs_mask);
83937+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
83938+#endif
83939+
83940+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
83941+ }
83942+ spin_unlock(&mm->page_table_lock);
83943+ if (newlimit == end) {
83944+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
83945+
83946+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
83947+ if (is_vm_hugetlb_page(vma))
83948+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
83949+ else
83950+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma), 0);
83951+ }
83952+}
83953+#endif
83954+
83955 int
83956 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
83957 unsigned long start, unsigned long end, unsigned long newflags)
83958@@ -245,11 +294,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
83959 int error;
83960 int dirty_accountable = 0;
83961
83962+#ifdef CONFIG_PAX_SEGMEXEC
83963+ struct vm_area_struct *vma_m = NULL;
83964+ unsigned long start_m, end_m;
83965+
83966+ start_m = start + SEGMEXEC_TASK_SIZE;
83967+ end_m = end + SEGMEXEC_TASK_SIZE;
83968+#endif
83969+
83970 if (newflags == oldflags) {
83971 *pprev = vma;
83972 return 0;
83973 }
83974
83975+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
83976+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
83977+
83978+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
83979+ return -ENOMEM;
83980+
83981+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
83982+ return -ENOMEM;
83983+ }
83984+
83985 /*
83986 * If we make a private mapping writable we increase our commit;
83987 * but (without finer accounting) cannot reduce our commit if we
83988@@ -266,6 +333,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
83989 }
83990 }
83991
83992+#ifdef CONFIG_PAX_SEGMEXEC
83993+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
83994+ if (start != vma->vm_start) {
83995+ error = split_vma(mm, vma, start, 1);
83996+ if (error)
83997+ goto fail;
83998+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
83999+ *pprev = (*pprev)->vm_next;
84000+ }
84001+
84002+ if (end != vma->vm_end) {
84003+ error = split_vma(mm, vma, end, 0);
84004+ if (error)
84005+ goto fail;
84006+ }
84007+
84008+ if (pax_find_mirror_vma(vma)) {
84009+ error = __do_munmap(mm, start_m, end_m - start_m);
84010+ if (error)
84011+ goto fail;
84012+ } else {
84013+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
84014+ if (!vma_m) {
84015+ error = -ENOMEM;
84016+ goto fail;
84017+ }
84018+ vma->vm_flags = newflags;
84019+ error = pax_mirror_vma(vma_m, vma);
84020+ if (error) {
84021+ vma->vm_flags = oldflags;
84022+ goto fail;
84023+ }
84024+ }
84025+ }
84026+#endif
84027+
84028 /*
84029 * First try to merge with previous and/or next vma.
84030 */
84031@@ -296,9 +399,21 @@ success:
84032 * vm_flags and vm_page_prot are protected by the mmap_sem
84033 * held in write mode.
84034 */
84035+
84036+#ifdef CONFIG_PAX_SEGMEXEC
84037+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
84038+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
84039+#endif
84040+
84041 vma->vm_flags = newflags;
84042+
84043+#ifdef CONFIG_PAX_MPROTECT
84044+ if (mm->binfmt && mm->binfmt->handle_mprotect)
84045+ mm->binfmt->handle_mprotect(vma, newflags);
84046+#endif
84047+
84048 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
84049- vm_get_page_prot(newflags));
84050+ vm_get_page_prot(vma->vm_flags));
84051
84052 if (vma_wants_writenotify(vma)) {
84053 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
84054@@ -337,6 +452,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
84055 end = start + len;
84056 if (end <= start)
84057 return -ENOMEM;
84058+
84059+#ifdef CONFIG_PAX_SEGMEXEC
84060+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
84061+ if (end > SEGMEXEC_TASK_SIZE)
84062+ return -EINVAL;
84063+ } else
84064+#endif
84065+
84066+ if (end > TASK_SIZE)
84067+ return -EINVAL;
84068+
84069 if (!arch_validate_prot(prot))
84070 return -EINVAL;
84071
84072@@ -344,7 +470,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
84073 /*
84074 * Does the application expect PROT_READ to imply PROT_EXEC:
84075 */
84076- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
84077+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
84078 prot |= PROT_EXEC;
84079
84080 vm_flags = calc_vm_prot_bits(prot);
84081@@ -376,6 +502,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
84082 if (start > vma->vm_start)
84083 prev = vma;
84084
84085+#ifdef CONFIG_PAX_MPROTECT
84086+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
84087+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
84088+#endif
84089+
84090 for (nstart = start ; ; ) {
84091 unsigned long newflags;
84092
84093@@ -386,6 +517,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
84094
84095 /* newflags >> 4 shift VM_MAY% in place of VM_% */
84096 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
84097+ if (prot & (PROT_WRITE | PROT_EXEC))
84098+ gr_log_rwxmprotect(vma->vm_file);
84099+
84100+ error = -EACCES;
84101+ goto out;
84102+ }
84103+
84104+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
84105 error = -EACCES;
84106 goto out;
84107 }
84108@@ -400,6 +539,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
84109 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
84110 if (error)
84111 goto out;
84112+
84113+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
84114+
84115 nstart = tmp;
84116
84117 if (nstart < prev->vm_end)
84118diff --git a/mm/mremap.c b/mm/mremap.c
84119index e1031e1..1f2a0a1 100644
84120--- a/mm/mremap.c
84121+++ b/mm/mremap.c
84122@@ -125,6 +125,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
84123 continue;
84124 pte = ptep_get_and_clear(mm, old_addr, old_pte);
84125 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
84126+
84127+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
84128+ if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
84129+ pte = pte_exprotect(pte);
84130+#endif
84131+
84132 set_pte_at(mm, new_addr, new_pte, pte);
84133 }
84134
84135@@ -319,6 +325,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
84136 if (is_vm_hugetlb_page(vma))
84137 goto Einval;
84138
84139+#ifdef CONFIG_PAX_SEGMEXEC
84140+ if (pax_find_mirror_vma(vma))
84141+ goto Einval;
84142+#endif
84143+
84144 /* We can't remap across vm area boundaries */
84145 if (old_len > vma->vm_end - addr)
84146 goto Efault;
84147@@ -375,20 +386,25 @@ static unsigned long mremap_to(unsigned long addr,
84148 unsigned long ret = -EINVAL;
84149 unsigned long charged = 0;
84150 unsigned long map_flags;
84151+ unsigned long pax_task_size = TASK_SIZE;
84152
84153 if (new_addr & ~PAGE_MASK)
84154 goto out;
84155
84156- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
84157+#ifdef CONFIG_PAX_SEGMEXEC
84158+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
84159+ pax_task_size = SEGMEXEC_TASK_SIZE;
84160+#endif
84161+
84162+ pax_task_size -= PAGE_SIZE;
84163+
84164+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
84165 goto out;
84166
84167 /* Check if the location we're moving into overlaps the
84168 * old location at all, and fail if it does.
84169 */
84170- if ((new_addr <= addr) && (new_addr+new_len) > addr)
84171- goto out;
84172-
84173- if ((addr <= new_addr) && (addr+old_len) > new_addr)
84174+ if (addr + old_len > new_addr && new_addr + new_len > addr)
84175 goto out;
84176
84177 ret = do_munmap(mm, new_addr, new_len);
84178@@ -456,6 +472,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
84179 struct vm_area_struct *vma;
84180 unsigned long ret = -EINVAL;
84181 unsigned long charged = 0;
84182+ unsigned long pax_task_size = TASK_SIZE;
84183
84184 down_write(&current->mm->mmap_sem);
84185
84186@@ -476,6 +493,17 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
84187 if (!new_len)
84188 goto out;
84189
84190+#ifdef CONFIG_PAX_SEGMEXEC
84191+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
84192+ pax_task_size = SEGMEXEC_TASK_SIZE;
84193+#endif
84194+
84195+ pax_task_size -= PAGE_SIZE;
84196+
84197+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
84198+ old_len > pax_task_size || addr > pax_task_size-old_len)
84199+ goto out;
84200+
84201 if (flags & MREMAP_FIXED) {
84202 if (flags & MREMAP_MAYMOVE)
84203 ret = mremap_to(addr, old_len, new_addr, new_len);
84204@@ -524,6 +552,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
84205 addr + new_len);
84206 }
84207 ret = addr;
84208+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
84209 goto out;
84210 }
84211 }
84212@@ -547,7 +576,12 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
84213 goto out;
84214 }
84215
84216+ map_flags = vma->vm_flags;
84217 ret = move_vma(vma, addr, old_len, new_len, new_addr);
84218+ if (!(ret & ~PAGE_MASK)) {
84219+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
84220+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
84221+ }
84222 }
84223 out:
84224 if (ret & ~PAGE_MASK)
84225diff --git a/mm/nommu.c b/mm/nommu.c
84226index bbe1f3f..b2601ea 100644
84227--- a/mm/nommu.c
84228+++ b/mm/nommu.c
84229@@ -62,7 +62,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
84230 int sysctl_overcommit_ratio = 50; /* default is 50% */
84231 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
84232 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
84233-int heap_stack_gap = 0;
84234
84235 atomic_long_t mmap_pages_allocated;
84236
84237@@ -839,15 +838,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
84238 EXPORT_SYMBOL(find_vma);
84239
84240 /*
84241- * find a VMA
84242- * - we don't extend stack VMAs under NOMMU conditions
84243- */
84244-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
84245-{
84246- return find_vma(mm, addr);
84247-}
84248-
84249-/*
84250 * expand a stack to a given address
84251 * - not supported under NOMMU conditions
84252 */
84253@@ -1555,6 +1545,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
84254
84255 /* most fields are the same, copy all, and then fixup */
84256 *new = *vma;
84257+ INIT_LIST_HEAD(&new->anon_vma_chain);
84258 *region = *vma->vm_region;
84259 new->vm_region = region;
84260
84261@@ -1975,8 +1966,8 @@ int generic_file_remap_pages(struct vm_area_struct *vma, unsigned long addr,
84262 }
84263 EXPORT_SYMBOL(generic_file_remap_pages);
84264
84265-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
84266- unsigned long addr, void *buf, int len, int write)
84267+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
84268+ unsigned long addr, void *buf, size_t len, int write)
84269 {
84270 struct vm_area_struct *vma;
84271
84272@@ -2017,8 +2008,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
84273 *
84274 * The caller must hold a reference on @mm.
84275 */
84276-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
84277- void *buf, int len, int write)
84278+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
84279+ void *buf, size_t len, int write)
84280 {
84281 return __access_remote_vm(NULL, mm, addr, buf, len, write);
84282 }
84283@@ -2027,7 +2018,7 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
84284 * Access another process' address space.
84285 * - source/target buffer must be kernel space
84286 */
84287-int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
84288+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write)
84289 {
84290 struct mm_struct *mm;
84291
84292diff --git a/mm/page-writeback.c b/mm/page-writeback.c
84293index 0713bfb..b95bb87 100644
84294--- a/mm/page-writeback.c
84295+++ b/mm/page-writeback.c
84296@@ -655,7 +655,7 @@ unsigned long bdi_dirty_limit(struct backing_dev_info *bdi, unsigned long dirty)
84297 * card's bdi_dirty may rush to many times higher than bdi_setpoint.
84298 * - the bdi dirty thresh drops quickly due to change of JBOD workload
84299 */
84300-static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
84301+static unsigned long __intentional_overflow(-1) bdi_position_ratio(struct backing_dev_info *bdi,
84302 unsigned long thresh,
84303 unsigned long bg_thresh,
84304 unsigned long dirty,
84305@@ -1630,7 +1630,7 @@ ratelimit_handler(struct notifier_block *self, unsigned long action,
84306 }
84307 }
84308
84309-static struct notifier_block __cpuinitdata ratelimit_nb = {
84310+static struct notifier_block ratelimit_nb = {
84311 .notifier_call = ratelimit_handler,
84312 .next = NULL,
84313 };
84314diff --git a/mm/page_alloc.c b/mm/page_alloc.c
84315index 6a83cd3..3ab04ef 100644
84316--- a/mm/page_alloc.c
84317+++ b/mm/page_alloc.c
84318@@ -58,6 +58,7 @@
84319 #include <linux/prefetch.h>
84320 #include <linux/migrate.h>
84321 #include <linux/page-debug-flags.h>
84322+#include <linux/random.h>
84323
84324 #include <asm/tlbflush.h>
84325 #include <asm/div64.h>
84326@@ -338,7 +339,7 @@ out:
84327 * This usage means that zero-order pages may not be compound.
84328 */
84329
84330-static void free_compound_page(struct page *page)
84331+void free_compound_page(struct page *page)
84332 {
84333 __free_pages_ok(page, compound_order(page));
84334 }
84335@@ -693,6 +694,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
84336 int i;
84337 int bad = 0;
84338
84339+#ifdef CONFIG_PAX_MEMORY_SANITIZE
84340+ unsigned long index = 1UL << order;
84341+#endif
84342+
84343 trace_mm_page_free(page, order);
84344 kmemcheck_free_shadow(page, order);
84345
84346@@ -708,6 +713,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
84347 debug_check_no_obj_freed(page_address(page),
84348 PAGE_SIZE << order);
84349 }
84350+
84351+#ifdef CONFIG_PAX_MEMORY_SANITIZE
84352+ for (; index; --index)
84353+ sanitize_highpage(page + index - 1);
84354+#endif
84355+
84356 arch_free_page(page, order);
84357 kernel_map_pages(page, 1 << order, 0);
84358
84359@@ -730,6 +741,19 @@ static void __free_pages_ok(struct page *page, unsigned int order)
84360 local_irq_restore(flags);
84361 }
84362
84363+#ifdef CONFIG_PAX_LATENT_ENTROPY
84364+bool __meminitdata extra_latent_entropy;
84365+
84366+static int __init setup_pax_extra_latent_entropy(char *str)
84367+{
84368+ extra_latent_entropy = true;
84369+ return 0;
84370+}
84371+early_param("pax_extra_latent_entropy", setup_pax_extra_latent_entropy);
84372+
84373+volatile u64 latent_entropy;
84374+#endif
84375+
84376 /*
84377 * Read access to zone->managed_pages is safe because it's unsigned long,
84378 * but we still need to serialize writers. Currently all callers of
84379@@ -752,6 +776,19 @@ void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
84380 set_page_count(p, 0);
84381 }
84382
84383+#ifdef CONFIG_PAX_LATENT_ENTROPY
84384+ if (extra_latent_entropy && !PageHighMem(page) && page_to_pfn(page) < 0x100000) {
84385+ u64 hash = 0;
84386+ size_t index, end = PAGE_SIZE * nr_pages / sizeof hash;
84387+ const u64 *data = lowmem_page_address(page);
84388+
84389+ for (index = 0; index < end; index++)
84390+ hash ^= hash + data[index];
84391+ latent_entropy ^= hash;
84392+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
84393+ }
84394+#endif
84395+
84396 page_zone(page)->managed_pages += 1 << order;
84397 set_page_refcounted(page);
84398 __free_pages(page, order);
84399@@ -861,8 +898,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
84400 arch_alloc_page(page, order);
84401 kernel_map_pages(page, 1 << order, 1);
84402
84403+#ifndef CONFIG_PAX_MEMORY_SANITIZE
84404 if (gfp_flags & __GFP_ZERO)
84405 prep_zero_page(page, order, gfp_flags);
84406+#endif
84407
84408 if (order && (gfp_flags & __GFP_COMP))
84409 prep_compound_page(page, order);
84410@@ -3752,7 +3791,13 @@ static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
84411 unsigned long pfn;
84412
84413 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
84414+#ifdef CONFIG_X86_32
84415+ /* boot failures in VMware 8 on 32bit vanilla since
84416+ this change */
84417+ if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn)))
84418+#else
84419 if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
84420+#endif
84421 return 1;
84422 }
84423 return 0;
84424diff --git a/mm/percpu.c b/mm/percpu.c
84425index 8c8e08f..73a5cda 100644
84426--- a/mm/percpu.c
84427+++ b/mm/percpu.c
84428@@ -122,7 +122,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
84429 static unsigned int pcpu_high_unit_cpu __read_mostly;
84430
84431 /* the address of the first chunk which starts with the kernel static area */
84432-void *pcpu_base_addr __read_mostly;
84433+void *pcpu_base_addr __read_only;
84434 EXPORT_SYMBOL_GPL(pcpu_base_addr);
84435
84436 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
84437diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
84438index fd26d04..0cea1b0 100644
84439--- a/mm/process_vm_access.c
84440+++ b/mm/process_vm_access.c
84441@@ -13,6 +13,7 @@
84442 #include <linux/uio.h>
84443 #include <linux/sched.h>
84444 #include <linux/highmem.h>
84445+#include <linux/security.h>
84446 #include <linux/ptrace.h>
84447 #include <linux/slab.h>
84448 #include <linux/syscalls.h>
84449@@ -258,19 +259,19 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
84450 size_t iov_l_curr_offset = 0;
84451 ssize_t iov_len;
84452
84453+ return -ENOSYS; // PaX: until properly audited
84454+
84455 /*
84456 * Work out how many pages of struct pages we're going to need
84457 * when eventually calling get_user_pages
84458 */
84459 for (i = 0; i < riovcnt; i++) {
84460 iov_len = rvec[i].iov_len;
84461- if (iov_len > 0) {
84462- nr_pages_iov = ((unsigned long)rvec[i].iov_base
84463- + iov_len)
84464- / PAGE_SIZE - (unsigned long)rvec[i].iov_base
84465- / PAGE_SIZE + 1;
84466- nr_pages = max(nr_pages, nr_pages_iov);
84467- }
84468+ if (iov_len <= 0)
84469+ continue;
84470+ nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
84471+ (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
84472+ nr_pages = max(nr_pages, nr_pages_iov);
84473 }
84474
84475 if (nr_pages == 0)
84476@@ -298,6 +299,11 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
84477 goto free_proc_pages;
84478 }
84479
84480+ if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
84481+ rc = -EPERM;
84482+ goto put_task_struct;
84483+ }
84484+
84485 mm = mm_access(task, PTRACE_MODE_ATTACH);
84486 if (!mm || IS_ERR(mm)) {
84487 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
84488diff --git a/mm/rmap.c b/mm/rmap.c
84489index 2c78f8c..9e9c624 100644
84490--- a/mm/rmap.c
84491+++ b/mm/rmap.c
84492@@ -163,6 +163,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
84493 struct anon_vma *anon_vma = vma->anon_vma;
84494 struct anon_vma_chain *avc;
84495
84496+#ifdef CONFIG_PAX_SEGMEXEC
84497+ struct anon_vma_chain *avc_m = NULL;
84498+#endif
84499+
84500 might_sleep();
84501 if (unlikely(!anon_vma)) {
84502 struct mm_struct *mm = vma->vm_mm;
84503@@ -172,6 +176,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
84504 if (!avc)
84505 goto out_enomem;
84506
84507+#ifdef CONFIG_PAX_SEGMEXEC
84508+ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
84509+ if (!avc_m)
84510+ goto out_enomem_free_avc;
84511+#endif
84512+
84513 anon_vma = find_mergeable_anon_vma(vma);
84514 allocated = NULL;
84515 if (!anon_vma) {
84516@@ -185,6 +195,18 @@ int anon_vma_prepare(struct vm_area_struct *vma)
84517 /* page_table_lock to protect against threads */
84518 spin_lock(&mm->page_table_lock);
84519 if (likely(!vma->anon_vma)) {
84520+
84521+#ifdef CONFIG_PAX_SEGMEXEC
84522+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
84523+
84524+ if (vma_m) {
84525+ BUG_ON(vma_m->anon_vma);
84526+ vma_m->anon_vma = anon_vma;
84527+ anon_vma_chain_link(vma_m, avc_m, anon_vma);
84528+ avc_m = NULL;
84529+ }
84530+#endif
84531+
84532 vma->anon_vma = anon_vma;
84533 anon_vma_chain_link(vma, avc, anon_vma);
84534 allocated = NULL;
84535@@ -195,12 +217,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
84536
84537 if (unlikely(allocated))
84538 put_anon_vma(allocated);
84539+
84540+#ifdef CONFIG_PAX_SEGMEXEC
84541+ if (unlikely(avc_m))
84542+ anon_vma_chain_free(avc_m);
84543+#endif
84544+
84545 if (unlikely(avc))
84546 anon_vma_chain_free(avc);
84547 }
84548 return 0;
84549
84550 out_enomem_free_avc:
84551+
84552+#ifdef CONFIG_PAX_SEGMEXEC
84553+ if (avc_m)
84554+ anon_vma_chain_free(avc_m);
84555+#endif
84556+
84557 anon_vma_chain_free(avc);
84558 out_enomem:
84559 return -ENOMEM;
84560@@ -236,7 +270,7 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
84561 * Attach the anon_vmas from src to dst.
84562 * Returns 0 on success, -ENOMEM on failure.
84563 */
84564-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
84565+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
84566 {
84567 struct anon_vma_chain *avc, *pavc;
84568 struct anon_vma *root = NULL;
84569@@ -269,7 +303,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
84570 * the corresponding VMA in the parent process is attached to.
84571 * Returns 0 on success, non-zero on failure.
84572 */
84573-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
84574+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
84575 {
84576 struct anon_vma_chain *avc;
84577 struct anon_vma *anon_vma;
84578diff --git a/mm/shmem.c b/mm/shmem.c
84579index efd0b3a..994b702 100644
84580--- a/mm/shmem.c
84581+++ b/mm/shmem.c
84582@@ -31,7 +31,7 @@
84583 #include <linux/export.h>
84584 #include <linux/swap.h>
84585
84586-static struct vfsmount *shm_mnt;
84587+struct vfsmount *shm_mnt;
84588
84589 #ifdef CONFIG_SHMEM
84590 /*
84591@@ -75,7 +75,7 @@ static struct vfsmount *shm_mnt;
84592 #define BOGO_DIRENT_SIZE 20
84593
84594 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
84595-#define SHORT_SYMLINK_LEN 128
84596+#define SHORT_SYMLINK_LEN 64
84597
84598 /*
84599 * shmem_fallocate and shmem_writepage communicate via inode->i_private
84600@@ -2202,6 +2202,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
84601 static int shmem_xattr_validate(const char *name)
84602 {
84603 struct { const char *prefix; size_t len; } arr[] = {
84604+
84605+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
84606+ { XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN},
84607+#endif
84608+
84609 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
84610 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
84611 };
84612@@ -2257,6 +2262,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
84613 if (err)
84614 return err;
84615
84616+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
84617+ if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) {
84618+ if (strcmp(name, XATTR_NAME_PAX_FLAGS))
84619+ return -EOPNOTSUPP;
84620+ if (size > 8)
84621+ return -EINVAL;
84622+ }
84623+#endif
84624+
84625 return simple_xattr_set(&info->xattrs, name, value, size, flags);
84626 }
84627
84628@@ -2562,8 +2576,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
84629 int err = -ENOMEM;
84630
84631 /* Round up to L1_CACHE_BYTES to resist false sharing */
84632- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
84633- L1_CACHE_BYTES), GFP_KERNEL);
84634+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
84635 if (!sbinfo)
84636 return -ENOMEM;
84637
84638diff --git a/mm/slab.c b/mm/slab.c
84639index e7667a3..a48e73b 100644
84640--- a/mm/slab.c
84641+++ b/mm/slab.c
84642@@ -306,7 +306,7 @@ struct kmem_list3 {
84643 * Need this for bootstrapping a per node allocator.
84644 */
84645 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
84646-static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
84647+static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
84648 #define CACHE_CACHE 0
84649 #define SIZE_AC MAX_NUMNODES
84650 #define SIZE_L3 (2 * MAX_NUMNODES)
84651@@ -407,10 +407,10 @@ static void kmem_list3_init(struct kmem_list3 *parent)
84652 if ((x)->max_freeable < i) \
84653 (x)->max_freeable = i; \
84654 } while (0)
84655-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
84656-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
84657-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
84658-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
84659+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
84660+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
84661+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
84662+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
84663 #else
84664 #define STATS_INC_ACTIVE(x) do { } while (0)
84665 #define STATS_DEC_ACTIVE(x) do { } while (0)
84666@@ -518,7 +518,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
84667 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
84668 */
84669 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
84670- const struct slab *slab, void *obj)
84671+ const struct slab *slab, const void *obj)
84672 {
84673 u32 offset = (obj - slab->s_mem);
84674 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
84675@@ -539,12 +539,13 @@ EXPORT_SYMBOL(malloc_sizes);
84676 struct cache_names {
84677 char *name;
84678 char *name_dma;
84679+ char *name_usercopy;
84680 };
84681
84682 static struct cache_names __initdata cache_names[] = {
84683-#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
84684+#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)", .name_usercopy = "size-" #x "(USERCOPY)" },
84685 #include <linux/kmalloc_sizes.h>
84686- {NULL,}
84687+ {NULL}
84688 #undef CACHE
84689 };
84690
84691@@ -729,6 +730,12 @@ static inline struct kmem_cache *__find_general_cachep(size_t size,
84692 if (unlikely(gfpflags & GFP_DMA))
84693 return csizep->cs_dmacachep;
84694 #endif
84695+
84696+#ifdef CONFIG_PAX_USERCOPY_SLABS
84697+ if (unlikely(gfpflags & GFP_USERCOPY))
84698+ return csizep->cs_usercopycachep;
84699+#endif
84700+
84701 return csizep->cs_cachep;
84702 }
84703
84704@@ -1482,7 +1489,7 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
84705 return notifier_from_errno(err);
84706 }
84707
84708-static struct notifier_block __cpuinitdata cpucache_notifier = {
84709+static struct notifier_block cpucache_notifier = {
84710 &cpuup_callback, NULL, 0
84711 };
84712
84713@@ -1667,12 +1674,12 @@ void __init kmem_cache_init(void)
84714 */
84715
84716 sizes[INDEX_AC].cs_cachep = create_kmalloc_cache(names[INDEX_AC].name,
84717- sizes[INDEX_AC].cs_size, ARCH_KMALLOC_FLAGS);
84718+ sizes[INDEX_AC].cs_size, ARCH_KMALLOC_FLAGS|SLAB_USERCOPY);
84719
84720 if (INDEX_AC != INDEX_L3)
84721 sizes[INDEX_L3].cs_cachep =
84722 create_kmalloc_cache(names[INDEX_L3].name,
84723- sizes[INDEX_L3].cs_size, ARCH_KMALLOC_FLAGS);
84724+ sizes[INDEX_L3].cs_size, ARCH_KMALLOC_FLAGS|SLAB_USERCOPY);
84725
84726 slab_early_init = 0;
84727
84728@@ -1686,13 +1693,20 @@ void __init kmem_cache_init(void)
84729 */
84730 if (!sizes->cs_cachep)
84731 sizes->cs_cachep = create_kmalloc_cache(names->name,
84732- sizes->cs_size, ARCH_KMALLOC_FLAGS);
84733+ sizes->cs_size, ARCH_KMALLOC_FLAGS|SLAB_USERCOPY);
84734
84735 #ifdef CONFIG_ZONE_DMA
84736 sizes->cs_dmacachep = create_kmalloc_cache(
84737 names->name_dma, sizes->cs_size,
84738 SLAB_CACHE_DMA|ARCH_KMALLOC_FLAGS);
84739 #endif
84740+
84741+#ifdef CONFIG_PAX_USERCOPY_SLABS
84742+ sizes->cs_usercopycachep = create_kmalloc_cache(
84743+ names->name_usercopy, sizes->cs_size,
84744+ ARCH_KMALLOC_FLAGS|SLAB_USERCOPY);
84745+#endif
84746+
84747 sizes++;
84748 names++;
84749 }
84750@@ -3924,6 +3938,7 @@ void kfree(const void *objp)
84751
84752 if (unlikely(ZERO_OR_NULL_PTR(objp)))
84753 return;
84754+ VM_BUG_ON(!virt_addr_valid(objp));
84755 local_irq_save(flags);
84756 kfree_debugcheck(objp);
84757 c = virt_to_cache(objp);
84758@@ -4365,10 +4380,10 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
84759 }
84760 /* cpu stats */
84761 {
84762- unsigned long allochit = atomic_read(&cachep->allochit);
84763- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
84764- unsigned long freehit = atomic_read(&cachep->freehit);
84765- unsigned long freemiss = atomic_read(&cachep->freemiss);
84766+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
84767+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
84768+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
84769+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
84770
84771 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
84772 allochit, allocmiss, freehit, freemiss);
84773@@ -4600,13 +4615,71 @@ static const struct file_operations proc_slabstats_operations = {
84774 static int __init slab_proc_init(void)
84775 {
84776 #ifdef CONFIG_DEBUG_SLAB_LEAK
84777- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
84778+ proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
84779 #endif
84780 return 0;
84781 }
84782 module_init(slab_proc_init);
84783 #endif
84784
84785+bool is_usercopy_object(const void *ptr)
84786+{
84787+ struct page *page;
84788+ struct kmem_cache *cachep;
84789+
84790+ if (ZERO_OR_NULL_PTR(ptr))
84791+ return false;
84792+
84793+ if (!slab_is_available())
84794+ return false;
84795+
84796+ if (!virt_addr_valid(ptr))
84797+ return false;
84798+
84799+ page = virt_to_head_page(ptr);
84800+
84801+ if (!PageSlab(page))
84802+ return false;
84803+
84804+ cachep = page->slab_cache;
84805+ return cachep->flags & SLAB_USERCOPY;
84806+}
84807+
84808+#ifdef CONFIG_PAX_USERCOPY
84809+const char *check_heap_object(const void *ptr, unsigned long n)
84810+{
84811+ struct page *page;
84812+ struct kmem_cache *cachep;
84813+ struct slab *slabp;
84814+ unsigned int objnr;
84815+ unsigned long offset;
84816+
84817+ if (ZERO_OR_NULL_PTR(ptr))
84818+ return "<null>";
84819+
84820+ if (!virt_addr_valid(ptr))
84821+ return NULL;
84822+
84823+ page = virt_to_head_page(ptr);
84824+
84825+ if (!PageSlab(page))
84826+ return NULL;
84827+
84828+ cachep = page->slab_cache;
84829+ if (!(cachep->flags & SLAB_USERCOPY))
84830+ return cachep->name;
84831+
84832+ slabp = page->slab_page;
84833+ objnr = obj_to_index(cachep, slabp, ptr);
84834+ BUG_ON(objnr >= cachep->num);
84835+ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
84836+ if (offset <= cachep->object_size && n <= cachep->object_size - offset)
84837+ return NULL;
84838+
84839+ return cachep->name;
84840+}
84841+#endif
84842+
84843 /**
84844 * ksize - get the actual amount of memory allocated for a given object
84845 * @objp: Pointer to the object
84846diff --git a/mm/slab.h b/mm/slab.h
84847index 34a98d6..73633d1 100644
84848--- a/mm/slab.h
84849+++ b/mm/slab.h
84850@@ -58,7 +58,7 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
84851
84852 /* Legal flag mask for kmem_cache_create(), for various configurations */
84853 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
84854- SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
84855+ SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS | SLAB_USERCOPY)
84856
84857 #if defined(CONFIG_DEBUG_SLAB)
84858 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
84859@@ -220,6 +220,9 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
84860 return s;
84861
84862 page = virt_to_head_page(x);
84863+
84864+ BUG_ON(!PageSlab(page));
84865+
84866 cachep = page->slab_cache;
84867 if (slab_equal_or_root(cachep, s))
84868 return cachep;
84869diff --git a/mm/slab_common.c b/mm/slab_common.c
84870index 3f3cd97..93b0236 100644
84871--- a/mm/slab_common.c
84872+++ b/mm/slab_common.c
84873@@ -22,7 +22,7 @@
84874
84875 #include "slab.h"
84876
84877-enum slab_state slab_state;
84878+enum slab_state slab_state __read_only;
84879 LIST_HEAD(slab_caches);
84880 DEFINE_MUTEX(slab_mutex);
84881 struct kmem_cache *kmem_cache;
84882@@ -209,7 +209,7 @@ kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size,
84883
84884 err = __kmem_cache_create(s, flags);
84885 if (!err) {
84886- s->refcount = 1;
84887+ atomic_set(&s->refcount, 1);
84888 list_add(&s->list, &slab_caches);
84889 memcg_cache_list_add(memcg, s);
84890 } else {
84891@@ -255,8 +255,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
84892
84893 get_online_cpus();
84894 mutex_lock(&slab_mutex);
84895- s->refcount--;
84896- if (!s->refcount) {
84897+ if (atomic_dec_and_test(&s->refcount)) {
84898 list_del(&s->list);
84899
84900 if (!__kmem_cache_shutdown(s)) {
84901@@ -302,7 +301,7 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t siz
84902 panic("Creation of kmalloc slab %s size=%zd failed. Reason %d\n",
84903 name, size, err);
84904
84905- s->refcount = -1; /* Exempt from merging for now */
84906+ atomic_set(&s->refcount, -1); /* Exempt from merging for now */
84907 }
84908
84909 struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
84910@@ -315,7 +314,7 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
84911
84912 create_boot_cache(s, name, size, flags);
84913 list_add(&s->list, &slab_caches);
84914- s->refcount = 1;
84915+ atomic_set(&s->refcount, 1);
84916 return s;
84917 }
84918
84919diff --git a/mm/slob.c b/mm/slob.c
84920index a99fdf7..6ee34ec 100644
84921--- a/mm/slob.c
84922+++ b/mm/slob.c
84923@@ -157,7 +157,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
84924 /*
84925 * Return the size of a slob block.
84926 */
84927-static slobidx_t slob_units(slob_t *s)
84928+static slobidx_t slob_units(const slob_t *s)
84929 {
84930 if (s->units > 0)
84931 return s->units;
84932@@ -167,7 +167,7 @@ static slobidx_t slob_units(slob_t *s)
84933 /*
84934 * Return the next free slob block pointer after this one.
84935 */
84936-static slob_t *slob_next(slob_t *s)
84937+static slob_t *slob_next(const slob_t *s)
84938 {
84939 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
84940 slobidx_t next;
84941@@ -182,14 +182,14 @@ static slob_t *slob_next(slob_t *s)
84942 /*
84943 * Returns true if s is the last free block in its page.
84944 */
84945-static int slob_last(slob_t *s)
84946+static int slob_last(const slob_t *s)
84947 {
84948 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
84949 }
84950
84951-static void *slob_new_pages(gfp_t gfp, int order, int node)
84952+static struct page *slob_new_pages(gfp_t gfp, unsigned int order, int node)
84953 {
84954- void *page;
84955+ struct page *page;
84956
84957 #ifdef CONFIG_NUMA
84958 if (node != NUMA_NO_NODE)
84959@@ -201,14 +201,18 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
84960 if (!page)
84961 return NULL;
84962
84963- return page_address(page);
84964+ __SetPageSlab(page);
84965+ return page;
84966 }
84967
84968-static void slob_free_pages(void *b, int order)
84969+static void slob_free_pages(struct page *sp, int order)
84970 {
84971 if (current->reclaim_state)
84972 current->reclaim_state->reclaimed_slab += 1 << order;
84973- free_pages((unsigned long)b, order);
84974+ __ClearPageSlab(sp);
84975+ reset_page_mapcount(sp);
84976+ sp->private = 0;
84977+ __free_pages(sp, order);
84978 }
84979
84980 /*
84981@@ -313,15 +317,15 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
84982
84983 /* Not enough space: must allocate a new page */
84984 if (!b) {
84985- b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
84986- if (!b)
84987+ sp = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
84988+ if (!sp)
84989 return NULL;
84990- sp = virt_to_page(b);
84991- __SetPageSlab(sp);
84992+ b = page_address(sp);
84993
84994 spin_lock_irqsave(&slob_lock, flags);
84995 sp->units = SLOB_UNITS(PAGE_SIZE);
84996 sp->freelist = b;
84997+ sp->private = 0;
84998 INIT_LIST_HEAD(&sp->list);
84999 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
85000 set_slob_page_free(sp, slob_list);
85001@@ -359,9 +363,7 @@ static void slob_free(void *block, int size)
85002 if (slob_page_free(sp))
85003 clear_slob_page_free(sp);
85004 spin_unlock_irqrestore(&slob_lock, flags);
85005- __ClearPageSlab(sp);
85006- reset_page_mapcount(sp);
85007- slob_free_pages(b, 0);
85008+ slob_free_pages(sp, 0);
85009 return;
85010 }
85011
85012@@ -424,11 +426,10 @@ out:
85013 */
85014
85015 static __always_inline void *
85016-__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
85017+__do_kmalloc_node_align(size_t size, gfp_t gfp, int node, unsigned long caller, int align)
85018 {
85019- unsigned int *m;
85020- int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
85021- void *ret;
85022+ slob_t *m;
85023+ void *ret = NULL;
85024
85025 gfp &= gfp_allowed_mask;
85026
85027@@ -442,23 +443,41 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
85028
85029 if (!m)
85030 return NULL;
85031- *m = size;
85032+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
85033+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
85034+ m[0].units = size;
85035+ m[1].units = align;
85036 ret = (void *)m + align;
85037
85038 trace_kmalloc_node(caller, ret,
85039 size, size + align, gfp, node);
85040 } else {
85041 unsigned int order = get_order(size);
85042+ struct page *page;
85043
85044 if (likely(order))
85045 gfp |= __GFP_COMP;
85046- ret = slob_new_pages(gfp, order, node);
85047+ page = slob_new_pages(gfp, order, node);
85048+ if (page) {
85049+ ret = page_address(page);
85050+ page->private = size;
85051+ }
85052
85053 trace_kmalloc_node(caller, ret,
85054 size, PAGE_SIZE << order, gfp, node);
85055 }
85056
85057- kmemleak_alloc(ret, size, 1, gfp);
85058+ return ret;
85059+}
85060+
85061+static __always_inline void *
85062+__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
85063+{
85064+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
85065+ void *ret = __do_kmalloc_node_align(size, gfp, node, caller, align);
85066+
85067+ if (!ZERO_OR_NULL_PTR(ret))
85068+ kmemleak_alloc(ret, size, 1, gfp);
85069 return ret;
85070 }
85071
85072@@ -493,34 +512,112 @@ void kfree(const void *block)
85073 return;
85074 kmemleak_free(block);
85075
85076+ VM_BUG_ON(!virt_addr_valid(block));
85077 sp = virt_to_page(block);
85078- if (PageSlab(sp)) {
85079+ VM_BUG_ON(!PageSlab(sp));
85080+ if (!sp->private) {
85081 int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
85082- unsigned int *m = (unsigned int *)(block - align);
85083- slob_free(m, *m + align);
85084- } else
85085+ slob_t *m = (slob_t *)(block - align);
85086+ slob_free(m, m[0].units + align);
85087+ } else {
85088+ __ClearPageSlab(sp);
85089+ reset_page_mapcount(sp);
85090+ sp->private = 0;
85091 __free_pages(sp, compound_order(sp));
85092+ }
85093 }
85094 EXPORT_SYMBOL(kfree);
85095
85096+bool is_usercopy_object(const void *ptr)
85097+{
85098+ if (!slab_is_available())
85099+ return false;
85100+
85101+ // PAX: TODO
85102+
85103+ return false;
85104+}
85105+
85106+#ifdef CONFIG_PAX_USERCOPY
85107+const char *check_heap_object(const void *ptr, unsigned long n)
85108+{
85109+ struct page *page;
85110+ const slob_t *free;
85111+ const void *base;
85112+ unsigned long flags;
85113+
85114+ if (ZERO_OR_NULL_PTR(ptr))
85115+ return "<null>";
85116+
85117+ if (!virt_addr_valid(ptr))
85118+ return NULL;
85119+
85120+ page = virt_to_head_page(ptr);
85121+ if (!PageSlab(page))
85122+ return NULL;
85123+
85124+ if (page->private) {
85125+ base = page;
85126+ if (base <= ptr && n <= page->private - (ptr - base))
85127+ return NULL;
85128+ return "<slob>";
85129+ }
85130+
85131+ /* some tricky double walking to find the chunk */
85132+ spin_lock_irqsave(&slob_lock, flags);
85133+ base = (void *)((unsigned long)ptr & PAGE_MASK);
85134+ free = page->freelist;
85135+
85136+ while (!slob_last(free) && (void *)free <= ptr) {
85137+ base = free + slob_units(free);
85138+ free = slob_next(free);
85139+ }
85140+
85141+ while (base < (void *)free) {
85142+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
85143+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
85144+ int offset;
85145+
85146+ if (ptr < base + align)
85147+ break;
85148+
85149+ offset = ptr - base - align;
85150+ if (offset >= m) {
85151+ base += size;
85152+ continue;
85153+ }
85154+
85155+ if (n > m - offset)
85156+ break;
85157+
85158+ spin_unlock_irqrestore(&slob_lock, flags);
85159+ return NULL;
85160+ }
85161+
85162+ spin_unlock_irqrestore(&slob_lock, flags);
85163+ return "<slob>";
85164+}
85165+#endif
85166+
85167 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
85168 size_t ksize(const void *block)
85169 {
85170 struct page *sp;
85171 int align;
85172- unsigned int *m;
85173+ slob_t *m;
85174
85175 BUG_ON(!block);
85176 if (unlikely(block == ZERO_SIZE_PTR))
85177 return 0;
85178
85179 sp = virt_to_page(block);
85180- if (unlikely(!PageSlab(sp)))
85181- return PAGE_SIZE << compound_order(sp);
85182+ VM_BUG_ON(!PageSlab(sp));
85183+ if (sp->private)
85184+ return sp->private;
85185
85186 align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
85187- m = (unsigned int *)(block - align);
85188- return SLOB_UNITS(*m) * SLOB_UNIT;
85189+ m = (slob_t *)(block - align);
85190+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
85191 }
85192 EXPORT_SYMBOL(ksize);
85193
85194@@ -536,23 +633,33 @@ int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
85195
85196 void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
85197 {
85198- void *b;
85199+ void *b = NULL;
85200
85201 flags &= gfp_allowed_mask;
85202
85203 lockdep_trace_alloc(flags);
85204
85205+#ifdef CONFIG_PAX_USERCOPY_SLABS
85206+ b = __do_kmalloc_node_align(c->size, flags, node, _RET_IP_, c->align);
85207+#else
85208 if (c->size < PAGE_SIZE) {
85209 b = slob_alloc(c->size, flags, c->align, node);
85210 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
85211 SLOB_UNITS(c->size) * SLOB_UNIT,
85212 flags, node);
85213 } else {
85214- b = slob_new_pages(flags, get_order(c->size), node);
85215+ struct page *sp;
85216+
85217+ sp = slob_new_pages(flags, get_order(c->size), node);
85218+ if (sp) {
85219+ b = page_address(sp);
85220+ sp->private = c->size;
85221+ }
85222 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
85223 PAGE_SIZE << get_order(c->size),
85224 flags, node);
85225 }
85226+#endif
85227
85228 if (c->ctor)
85229 c->ctor(b);
85230@@ -564,10 +671,14 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
85231
85232 static void __kmem_cache_free(void *b, int size)
85233 {
85234- if (size < PAGE_SIZE)
85235+ struct page *sp;
85236+
85237+ sp = virt_to_page(b);
85238+ BUG_ON(!PageSlab(sp));
85239+ if (!sp->private)
85240 slob_free(b, size);
85241 else
85242- slob_free_pages(b, get_order(size));
85243+ slob_free_pages(sp, get_order(size));
85244 }
85245
85246 static void kmem_rcu_free(struct rcu_head *head)
85247@@ -580,17 +691,31 @@ static void kmem_rcu_free(struct rcu_head *head)
85248
85249 void kmem_cache_free(struct kmem_cache *c, void *b)
85250 {
85251+ int size = c->size;
85252+
85253+#ifdef CONFIG_PAX_USERCOPY_SLABS
85254+ if (size + c->align < PAGE_SIZE) {
85255+ size += c->align;
85256+ b -= c->align;
85257+ }
85258+#endif
85259+
85260 kmemleak_free_recursive(b, c->flags);
85261 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
85262 struct slob_rcu *slob_rcu;
85263- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
85264- slob_rcu->size = c->size;
85265+ slob_rcu = b + (size - sizeof(struct slob_rcu));
85266+ slob_rcu->size = size;
85267 call_rcu(&slob_rcu->head, kmem_rcu_free);
85268 } else {
85269- __kmem_cache_free(b, c->size);
85270+ __kmem_cache_free(b, size);
85271 }
85272
85273+#ifdef CONFIG_PAX_USERCOPY_SLABS
85274+ trace_kfree(_RET_IP_, b);
85275+#else
85276 trace_kmem_cache_free(_RET_IP_, b);
85277+#endif
85278+
85279 }
85280 EXPORT_SYMBOL(kmem_cache_free);
85281
85282diff --git a/mm/slub.c b/mm/slub.c
85283index ba2ca53..991c4f7 100644
85284--- a/mm/slub.c
85285+++ b/mm/slub.c
85286@@ -197,7 +197,7 @@ struct track {
85287
85288 enum track_item { TRACK_ALLOC, TRACK_FREE };
85289
85290-#ifdef CONFIG_SYSFS
85291+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
85292 static int sysfs_slab_add(struct kmem_cache *);
85293 static int sysfs_slab_alias(struct kmem_cache *, const char *);
85294 static void sysfs_slab_remove(struct kmem_cache *);
85295@@ -518,7 +518,7 @@ static void print_track(const char *s, struct track *t)
85296 if (!t->addr)
85297 return;
85298
85299- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
85300+ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
85301 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
85302 #ifdef CONFIG_STACKTRACE
85303 {
85304@@ -2653,7 +2653,7 @@ static int slub_min_objects;
85305 * Merge control. If this is set then no merging of slab caches will occur.
85306 * (Could be removed. This was introduced to pacify the merge skeptics.)
85307 */
85308-static int slub_nomerge;
85309+static int slub_nomerge = 1;
85310
85311 /*
85312 * Calculate the order of allocation given an slab object size.
85313@@ -3181,6 +3181,10 @@ EXPORT_SYMBOL(kmalloc_caches);
85314 static struct kmem_cache *kmalloc_dma_caches[SLUB_PAGE_SHIFT];
85315 #endif
85316
85317+#ifdef CONFIG_PAX_USERCOPY_SLABS
85318+static struct kmem_cache *kmalloc_usercopy_caches[SLUB_PAGE_SHIFT];
85319+#endif
85320+
85321 static int __init setup_slub_min_order(char *str)
85322 {
85323 get_option(&str, &slub_min_order);
85324@@ -3272,6 +3276,13 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags)
85325 return kmalloc_dma_caches[index];
85326
85327 #endif
85328+
85329+#ifdef CONFIG_PAX_USERCOPY_SLABS
85330+ if (flags & SLAB_USERCOPY)
85331+ return kmalloc_usercopy_caches[index];
85332+
85333+#endif
85334+
85335 return kmalloc_caches[index];
85336 }
85337
85338@@ -3340,6 +3351,59 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
85339 EXPORT_SYMBOL(__kmalloc_node);
85340 #endif
85341
85342+bool is_usercopy_object(const void *ptr)
85343+{
85344+ struct page *page;
85345+ struct kmem_cache *s;
85346+
85347+ if (ZERO_OR_NULL_PTR(ptr))
85348+ return false;
85349+
85350+ if (!slab_is_available())
85351+ return false;
85352+
85353+ if (!virt_addr_valid(ptr))
85354+ return false;
85355+
85356+ page = virt_to_head_page(ptr);
85357+
85358+ if (!PageSlab(page))
85359+ return false;
85360+
85361+ s = page->slab_cache;
85362+ return s->flags & SLAB_USERCOPY;
85363+}
85364+
85365+#ifdef CONFIG_PAX_USERCOPY
85366+const char *check_heap_object(const void *ptr, unsigned long n)
85367+{
85368+ struct page *page;
85369+ struct kmem_cache *s;
85370+ unsigned long offset;
85371+
85372+ if (ZERO_OR_NULL_PTR(ptr))
85373+ return "<null>";
85374+
85375+ if (!virt_addr_valid(ptr))
85376+ return NULL;
85377+
85378+ page = virt_to_head_page(ptr);
85379+
85380+ if (!PageSlab(page))
85381+ return NULL;
85382+
85383+ s = page->slab_cache;
85384+ if (!(s->flags & SLAB_USERCOPY))
85385+ return s->name;
85386+
85387+ offset = (ptr - page_address(page)) % s->size;
85388+ if (offset <= s->object_size && n <= s->object_size - offset)
85389+ return NULL;
85390+
85391+ return s->name;
85392+}
85393+#endif
85394+
85395 size_t ksize(const void *object)
85396 {
85397 struct page *page;
85398@@ -3404,6 +3468,7 @@ void kfree(const void *x)
85399 if (unlikely(ZERO_OR_NULL_PTR(x)))
85400 return;
85401
85402+ VM_BUG_ON(!virt_addr_valid(x));
85403 page = virt_to_head_page(x);
85404 if (unlikely(!PageSlab(page))) {
85405 BUG_ON(!PageCompound(page));
85406@@ -3712,17 +3777,17 @@ void __init kmem_cache_init(void)
85407
85408 /* Caches that are not of the two-to-the-power-of size */
85409 if (KMALLOC_MIN_SIZE <= 32) {
85410- kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
85411+ kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
85412 caches++;
85413 }
85414
85415 if (KMALLOC_MIN_SIZE <= 64) {
85416- kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
85417+ kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
85418 caches++;
85419 }
85420
85421 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
85422- kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
85423+ kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
85424 caches++;
85425 }
85426
85427@@ -3764,6 +3829,22 @@ void __init kmem_cache_init(void)
85428 }
85429 }
85430 #endif
85431+
85432+#ifdef CONFIG_PAX_USERCOPY_SLABS
85433+ for (i = 0; i < SLUB_PAGE_SHIFT; i++) {
85434+ struct kmem_cache *s = kmalloc_caches[i];
85435+
85436+ if (s && s->size) {
85437+ char *name = kasprintf(GFP_NOWAIT,
85438+ "usercopy-kmalloc-%d", s->object_size);
85439+
85440+ BUG_ON(!name);
85441+ kmalloc_usercopy_caches[i] = create_kmalloc_cache(name,
85442+ s->object_size, SLAB_USERCOPY);
85443+ }
85444+ }
85445+#endif
85446+
85447 printk(KERN_INFO
85448 "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
85449 " CPUs=%d, Nodes=%d\n",
85450@@ -3790,7 +3871,7 @@ static int slab_unmergeable(struct kmem_cache *s)
85451 /*
85452 * We may have set a slab to be unmergeable during bootstrap.
85453 */
85454- if (s->refcount < 0)
85455+ if (atomic_read(&s->refcount) < 0)
85456 return 1;
85457
85458 return 0;
85459@@ -3848,7 +3929,7 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
85460
85461 s = find_mergeable(memcg, size, align, flags, name, ctor);
85462 if (s) {
85463- s->refcount++;
85464+ atomic_inc(&s->refcount);
85465 /*
85466 * Adjust the object sizes so that we clear
85467 * the complete object on kzalloc.
85468@@ -3857,7 +3938,7 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
85469 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
85470
85471 if (sysfs_slab_alias(s, name)) {
85472- s->refcount--;
85473+ atomic_dec(&s->refcount);
85474 s = NULL;
85475 }
85476 }
85477@@ -3919,7 +4000,7 @@ static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
85478 return NOTIFY_OK;
85479 }
85480
85481-static struct notifier_block __cpuinitdata slab_notifier = {
85482+static struct notifier_block slab_notifier = {
85483 .notifier_call = slab_cpuup_callback
85484 };
85485
85486@@ -3977,7 +4058,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
85487 }
85488 #endif
85489
85490-#ifdef CONFIG_SYSFS
85491+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
85492 static int count_inuse(struct page *page)
85493 {
85494 return page->inuse;
85495@@ -4364,12 +4445,12 @@ static void resiliency_test(void)
85496 validate_slab_cache(kmalloc_caches[9]);
85497 }
85498 #else
85499-#ifdef CONFIG_SYSFS
85500+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
85501 static void resiliency_test(void) {};
85502 #endif
85503 #endif
85504
85505-#ifdef CONFIG_SYSFS
85506+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
85507 enum slab_stat_type {
85508 SL_ALL, /* All slabs */
85509 SL_PARTIAL, /* Only partially allocated slabs */
85510@@ -4613,7 +4694,7 @@ SLAB_ATTR_RO(ctor);
85511
85512 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
85513 {
85514- return sprintf(buf, "%d\n", s->refcount - 1);
85515+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
85516 }
85517 SLAB_ATTR_RO(aliases);
85518
85519@@ -5266,6 +5347,7 @@ static char *create_unique_id(struct kmem_cache *s)
85520 return name;
85521 }
85522
85523+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
85524 static int sysfs_slab_add(struct kmem_cache *s)
85525 {
85526 int err;
85527@@ -5323,6 +5405,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
85528 kobject_del(&s->kobj);
85529 kobject_put(&s->kobj);
85530 }
85531+#endif
85532
85533 /*
85534 * Need to buffer aliases during bootup until sysfs becomes
85535@@ -5336,6 +5419,7 @@ struct saved_alias {
85536
85537 static struct saved_alias *alias_list;
85538
85539+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
85540 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
85541 {
85542 struct saved_alias *al;
85543@@ -5358,6 +5442,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
85544 alias_list = al;
85545 return 0;
85546 }
85547+#endif
85548
85549 static int __init slab_sysfs_init(void)
85550 {
85551diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
85552index 1b7e22a..3fcd4f3 100644
85553--- a/mm/sparse-vmemmap.c
85554+++ b/mm/sparse-vmemmap.c
85555@@ -128,7 +128,7 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
85556 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
85557 if (!p)
85558 return NULL;
85559- pud_populate(&init_mm, pud, p);
85560+ pud_populate_kernel(&init_mm, pud, p);
85561 }
85562 return pud;
85563 }
85564@@ -140,7 +140,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
85565 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
85566 if (!p)
85567 return NULL;
85568- pgd_populate(&init_mm, pgd, p);
85569+ pgd_populate_kernel(&init_mm, pgd, p);
85570 }
85571 return pgd;
85572 }
85573diff --git a/mm/sparse.c b/mm/sparse.c
85574index 6b5fb76..db0c190 100644
85575--- a/mm/sparse.c
85576+++ b/mm/sparse.c
85577@@ -782,7 +782,7 @@ static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
85578
85579 for (i = 0; i < PAGES_PER_SECTION; i++) {
85580 if (PageHWPoison(&memmap[i])) {
85581- atomic_long_sub(1, &mce_bad_pages);
85582+ atomic_long_sub_unchecked(1, &mce_bad_pages);
85583 ClearPageHWPoison(&memmap[i]);
85584 }
85585 }
85586diff --git a/mm/swap.c b/mm/swap.c
85587index 6310dc2..3662b3f 100644
85588--- a/mm/swap.c
85589+++ b/mm/swap.c
85590@@ -30,6 +30,7 @@
85591 #include <linux/backing-dev.h>
85592 #include <linux/memcontrol.h>
85593 #include <linux/gfp.h>
85594+#include <linux/hugetlb.h>
85595
85596 #include "internal.h"
85597
85598@@ -72,6 +73,8 @@ static void __put_compound_page(struct page *page)
85599
85600 __page_cache_release(page);
85601 dtor = get_compound_page_dtor(page);
85602+ if (!PageHuge(page))
85603+ BUG_ON(dtor != free_compound_page);
85604 (*dtor)(page);
85605 }
85606
85607diff --git a/mm/swapfile.c b/mm/swapfile.c
85608index e97a0e5..b50e796 100644
85609--- a/mm/swapfile.c
85610+++ b/mm/swapfile.c
85611@@ -64,7 +64,7 @@ static DEFINE_MUTEX(swapon_mutex);
85612
85613 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
85614 /* Activity counter to indicate that a swapon or swapoff has occurred */
85615-static atomic_t proc_poll_event = ATOMIC_INIT(0);
85616+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
85617
85618 static inline unsigned char swap_count(unsigned char ent)
85619 {
85620@@ -1608,7 +1608,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
85621 }
85622 filp_close(swap_file, NULL);
85623 err = 0;
85624- atomic_inc(&proc_poll_event);
85625+ atomic_inc_unchecked(&proc_poll_event);
85626 wake_up_interruptible(&proc_poll_wait);
85627
85628 out_dput:
85629@@ -1625,8 +1625,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
85630
85631 poll_wait(file, &proc_poll_wait, wait);
85632
85633- if (seq->poll_event != atomic_read(&proc_poll_event)) {
85634- seq->poll_event = atomic_read(&proc_poll_event);
85635+ if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
85636+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
85637 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
85638 }
85639
85640@@ -1724,7 +1724,7 @@ static int swaps_open(struct inode *inode, struct file *file)
85641 return ret;
85642
85643 seq = file->private_data;
85644- seq->poll_event = atomic_read(&proc_poll_event);
85645+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
85646 return 0;
85647 }
85648
85649@@ -2066,7 +2066,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
85650 (frontswap_map) ? "FS" : "");
85651
85652 mutex_unlock(&swapon_mutex);
85653- atomic_inc(&proc_poll_event);
85654+ atomic_inc_unchecked(&proc_poll_event);
85655 wake_up_interruptible(&proc_poll_wait);
85656
85657 if (S_ISREG(inode->i_mode))
85658diff --git a/mm/util.c b/mm/util.c
85659index c55e26b..3f913a9 100644
85660--- a/mm/util.c
85661+++ b/mm/util.c
85662@@ -292,6 +292,12 @@ done:
85663 void arch_pick_mmap_layout(struct mm_struct *mm)
85664 {
85665 mm->mmap_base = TASK_UNMAPPED_BASE;
85666+
85667+#ifdef CONFIG_PAX_RANDMMAP
85668+ if (mm->pax_flags & MF_PAX_RANDMMAP)
85669+ mm->mmap_base += mm->delta_mmap;
85670+#endif
85671+
85672 mm->get_unmapped_area = arch_get_unmapped_area;
85673 mm->unmap_area = arch_unmap_area;
85674 }
85675diff --git a/mm/vmalloc.c b/mm/vmalloc.c
85676index 5123a16..f234a48 100644
85677--- a/mm/vmalloc.c
85678+++ b/mm/vmalloc.c
85679@@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
85680
85681 pte = pte_offset_kernel(pmd, addr);
85682 do {
85683- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
85684- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
85685+
85686+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
85687+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
85688+ BUG_ON(!pte_exec(*pte));
85689+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
85690+ continue;
85691+ }
85692+#endif
85693+
85694+ {
85695+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
85696+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
85697+ }
85698 } while (pte++, addr += PAGE_SIZE, addr != end);
85699 }
85700
85701@@ -100,16 +111,29 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
85702 pte = pte_alloc_kernel(pmd, addr);
85703 if (!pte)
85704 return -ENOMEM;
85705+
85706+ pax_open_kernel();
85707 do {
85708 struct page *page = pages[*nr];
85709
85710- if (WARN_ON(!pte_none(*pte)))
85711+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
85712+ if (pgprot_val(prot) & _PAGE_NX)
85713+#endif
85714+
85715+ if (!pte_none(*pte)) {
85716+ pax_close_kernel();
85717+ WARN_ON(1);
85718 return -EBUSY;
85719- if (WARN_ON(!page))
85720+ }
85721+ if (!page) {
85722+ pax_close_kernel();
85723+ WARN_ON(1);
85724 return -ENOMEM;
85725+ }
85726 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
85727 (*nr)++;
85728 } while (pte++, addr += PAGE_SIZE, addr != end);
85729+ pax_close_kernel();
85730 return 0;
85731 }
85732
85733@@ -119,7 +143,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
85734 pmd_t *pmd;
85735 unsigned long next;
85736
85737- pmd = pmd_alloc(&init_mm, pud, addr);
85738+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
85739 if (!pmd)
85740 return -ENOMEM;
85741 do {
85742@@ -136,7 +160,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
85743 pud_t *pud;
85744 unsigned long next;
85745
85746- pud = pud_alloc(&init_mm, pgd, addr);
85747+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
85748 if (!pud)
85749 return -ENOMEM;
85750 do {
85751@@ -191,11 +215,20 @@ int is_vmalloc_or_module_addr(const void *x)
85752 * and fall back on vmalloc() if that fails. Others
85753 * just put it in the vmalloc space.
85754 */
85755-#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
85756+#ifdef CONFIG_MODULES
85757+#ifdef MODULES_VADDR
85758 unsigned long addr = (unsigned long)x;
85759 if (addr >= MODULES_VADDR && addr < MODULES_END)
85760 return 1;
85761 #endif
85762+
85763+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
85764+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
85765+ return 1;
85766+#endif
85767+
85768+#endif
85769+
85770 return is_vmalloc_addr(x);
85771 }
85772
85773@@ -216,8 +249,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
85774
85775 if (!pgd_none(*pgd)) {
85776 pud_t *pud = pud_offset(pgd, addr);
85777+#ifdef CONFIG_X86
85778+ if (!pud_large(*pud))
85779+#endif
85780 if (!pud_none(*pud)) {
85781 pmd_t *pmd = pmd_offset(pud, addr);
85782+#ifdef CONFIG_X86
85783+ if (!pmd_large(*pmd))
85784+#endif
85785 if (!pmd_none(*pmd)) {
85786 pte_t *ptep, pte;
85787
85788@@ -329,7 +368,7 @@ static void purge_vmap_area_lazy(void);
85789 * Allocate a region of KVA of the specified size and alignment, within the
85790 * vstart and vend.
85791 */
85792-static struct vmap_area *alloc_vmap_area(unsigned long size,
85793+static __size_overflow(1) struct vmap_area *alloc_vmap_area(unsigned long size,
85794 unsigned long align,
85795 unsigned long vstart, unsigned long vend,
85796 int node, gfp_t gfp_mask)
85797@@ -1328,6 +1367,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
85798 struct vm_struct *area;
85799
85800 BUG_ON(in_interrupt());
85801+
85802+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
85803+ if (flags & VM_KERNEXEC) {
85804+ if (start != VMALLOC_START || end != VMALLOC_END)
85805+ return NULL;
85806+ start = (unsigned long)MODULES_EXEC_VADDR;
85807+ end = (unsigned long)MODULES_EXEC_END;
85808+ }
85809+#endif
85810+
85811 if (flags & VM_IOREMAP) {
85812 int bit = fls(size);
85813
85814@@ -1568,6 +1617,11 @@ void *vmap(struct page **pages, unsigned int count,
85815 if (count > totalram_pages)
85816 return NULL;
85817
85818+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
85819+ if (!(pgprot_val(prot) & _PAGE_NX))
85820+ flags |= VM_KERNEXEC;
85821+#endif
85822+
85823 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
85824 __builtin_return_address(0));
85825 if (!area)
85826@@ -1669,6 +1723,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
85827 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
85828 goto fail;
85829
85830+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
85831+ if (!(pgprot_val(prot) & _PAGE_NX))
85832+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
85833+ VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
85834+ else
85835+#endif
85836+
85837 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
85838 start, end, node, gfp_mask, caller);
85839 if (!area)
85840@@ -1842,10 +1903,9 @@ EXPORT_SYMBOL(vzalloc_node);
85841 * For tight control over page level allocator and protection flags
85842 * use __vmalloc() instead.
85843 */
85844-
85845 void *vmalloc_exec(unsigned long size)
85846 {
85847- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
85848+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
85849 -1, __builtin_return_address(0));
85850 }
85851
85852@@ -2136,6 +2196,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
85853 unsigned long uaddr = vma->vm_start;
85854 unsigned long usize = vma->vm_end - vma->vm_start;
85855
85856+ BUG_ON(vma->vm_mirror);
85857+
85858 if ((PAGE_SIZE-1) & (unsigned long)addr)
85859 return -EINVAL;
85860
85861@@ -2575,7 +2637,11 @@ static int s_show(struct seq_file *m, void *p)
85862 v->addr, v->addr + v->size, v->size);
85863
85864 if (v->caller)
85865+#ifdef CONFIG_GRKERNSEC_HIDESYM
85866+ seq_printf(m, " %pK", v->caller);
85867+#else
85868 seq_printf(m, " %pS", v->caller);
85869+#endif
85870
85871 if (v->nr_pages)
85872 seq_printf(m, " pages=%d", v->nr_pages);
85873diff --git a/mm/vmstat.c b/mm/vmstat.c
85874index 9800306..76b4b27 100644
85875--- a/mm/vmstat.c
85876+++ b/mm/vmstat.c
85877@@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
85878 *
85879 * vm_stat contains the global counters
85880 */
85881-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
85882+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
85883 EXPORT_SYMBOL(vm_stat);
85884
85885 #ifdef CONFIG_SMP
85886@@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
85887 v = p->vm_stat_diff[i];
85888 p->vm_stat_diff[i] = 0;
85889 local_irq_restore(flags);
85890- atomic_long_add(v, &zone->vm_stat[i]);
85891+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
85892 global_diff[i] += v;
85893 #ifdef CONFIG_NUMA
85894 /* 3 seconds idle till flush */
85895@@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
85896
85897 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
85898 if (global_diff[i])
85899- atomic_long_add(global_diff[i], &vm_stat[i]);
85900+ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
85901 }
85902
85903 void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
85904@@ -503,8 +503,8 @@ void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
85905 if (pset->vm_stat_diff[i]) {
85906 int v = pset->vm_stat_diff[i];
85907 pset->vm_stat_diff[i] = 0;
85908- atomic_long_add(v, &zone->vm_stat[i]);
85909- atomic_long_add(v, &vm_stat[i]);
85910+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
85911+ atomic_long_add_unchecked(v, &vm_stat[i]);
85912 }
85913 }
85914 #endif
85915@@ -1223,7 +1223,7 @@ static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb,
85916 return NOTIFY_OK;
85917 }
85918
85919-static struct notifier_block __cpuinitdata vmstat_notifier =
85920+static struct notifier_block vmstat_notifier =
85921 { &vmstat_cpuup_callback, NULL, 0 };
85922 #endif
85923
85924@@ -1238,10 +1238,20 @@ static int __init setup_vmstat(void)
85925 start_cpu_timer(cpu);
85926 #endif
85927 #ifdef CONFIG_PROC_FS
85928- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
85929- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
85930- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
85931- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
85932+ {
85933+ mode_t gr_mode = S_IRUGO;
85934+#ifdef CONFIG_GRKERNSEC_PROC_ADD
85935+ gr_mode = S_IRUSR;
85936+#endif
85937+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
85938+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
85939+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
85940+ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
85941+#else
85942+ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
85943+#endif
85944+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
85945+ }
85946 #endif
85947 return 0;
85948 }
85949diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
85950index acc74ad..be02639 100644
85951--- a/net/8021q/vlan.c
85952+++ b/net/8021q/vlan.c
85953@@ -108,6 +108,13 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
85954 if (vlan_id)
85955 vlan_vid_del(real_dev, vlan_id);
85956
85957+ /* Take it out of our own structures, but be sure to interlock with
85958+ * HW accelerating devices or SW vlan input packet processing if
85959+ * VLAN is not 0 (leave it there for 802.1p).
85960+ */
85961+ if (vlan_id)
85962+ vlan_vid_del(real_dev, vlan_id);
85963+
85964 /* Get rid of the vlan's reference to real_dev */
85965 dev_put(real_dev);
85966 }
85967@@ -485,7 +492,7 @@ out:
85968 return NOTIFY_DONE;
85969 }
85970
85971-static struct notifier_block vlan_notifier_block __read_mostly = {
85972+static struct notifier_block vlan_notifier_block = {
85973 .notifier_call = vlan_device_event,
85974 };
85975
85976@@ -560,8 +567,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
85977 err = -EPERM;
85978 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
85979 break;
85980- if ((args.u.name_type >= 0) &&
85981- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
85982+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
85983 struct vlan_net *vn;
85984
85985 vn = net_generic(net, vlan_net_id);
85986diff --git a/net/9p/mod.c b/net/9p/mod.c
85987index 6ab36ae..6f1841b 100644
85988--- a/net/9p/mod.c
85989+++ b/net/9p/mod.c
85990@@ -84,7 +84,7 @@ static LIST_HEAD(v9fs_trans_list);
85991 void v9fs_register_trans(struct p9_trans_module *m)
85992 {
85993 spin_lock(&v9fs_trans_lock);
85994- list_add_tail(&m->list, &v9fs_trans_list);
85995+ pax_list_add_tail((struct list_head *)&m->list, &v9fs_trans_list);
85996 spin_unlock(&v9fs_trans_lock);
85997 }
85998 EXPORT_SYMBOL(v9fs_register_trans);
85999@@ -97,7 +97,7 @@ EXPORT_SYMBOL(v9fs_register_trans);
86000 void v9fs_unregister_trans(struct p9_trans_module *m)
86001 {
86002 spin_lock(&v9fs_trans_lock);
86003- list_del_init(&m->list);
86004+ pax_list_del_init((struct list_head *)&m->list);
86005 spin_unlock(&v9fs_trans_lock);
86006 }
86007 EXPORT_SYMBOL(v9fs_unregister_trans);
86008diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
86009index 02efb25..41541a9 100644
86010--- a/net/9p/trans_fd.c
86011+++ b/net/9p/trans_fd.c
86012@@ -425,7 +425,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
86013 oldfs = get_fs();
86014 set_fs(get_ds());
86015 /* The cast to a user pointer is valid due to the set_fs() */
86016- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
86017+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
86018 set_fs(oldfs);
86019
86020 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
86021diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
86022index 876fbe8..8bbea9f 100644
86023--- a/net/atm/atm_misc.c
86024+++ b/net/atm/atm_misc.c
86025@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
86026 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
86027 return 1;
86028 atm_return(vcc, truesize);
86029- atomic_inc(&vcc->stats->rx_drop);
86030+ atomic_inc_unchecked(&vcc->stats->rx_drop);
86031 return 0;
86032 }
86033 EXPORT_SYMBOL(atm_charge);
86034@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
86035 }
86036 }
86037 atm_return(vcc, guess);
86038- atomic_inc(&vcc->stats->rx_drop);
86039+ atomic_inc_unchecked(&vcc->stats->rx_drop);
86040 return NULL;
86041 }
86042 EXPORT_SYMBOL(atm_alloc_charge);
86043@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
86044
86045 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
86046 {
86047-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
86048+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
86049 __SONET_ITEMS
86050 #undef __HANDLE_ITEM
86051 }
86052@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
86053
86054 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
86055 {
86056-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
86057+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
86058 __SONET_ITEMS
86059 #undef __HANDLE_ITEM
86060 }
86061diff --git a/net/atm/lec.h b/net/atm/lec.h
86062index a86aff9..3a0d6f6 100644
86063--- a/net/atm/lec.h
86064+++ b/net/atm/lec.h
86065@@ -48,7 +48,7 @@ struct lane2_ops {
86066 const u8 *tlvs, u32 sizeoftlvs);
86067 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
86068 const u8 *tlvs, u32 sizeoftlvs);
86069-};
86070+} __no_const;
86071
86072 /*
86073 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
86074diff --git a/net/atm/proc.c b/net/atm/proc.c
86075index 0d020de..011c7bb 100644
86076--- a/net/atm/proc.c
86077+++ b/net/atm/proc.c
86078@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
86079 const struct k_atm_aal_stats *stats)
86080 {
86081 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
86082- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
86083- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
86084- atomic_read(&stats->rx_drop));
86085+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
86086+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
86087+ atomic_read_unchecked(&stats->rx_drop));
86088 }
86089
86090 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
86091diff --git a/net/atm/resources.c b/net/atm/resources.c
86092index 0447d5d..3cf4728 100644
86093--- a/net/atm/resources.c
86094+++ b/net/atm/resources.c
86095@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
86096 static void copy_aal_stats(struct k_atm_aal_stats *from,
86097 struct atm_aal_stats *to)
86098 {
86099-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
86100+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
86101 __AAL_STAT_ITEMS
86102 #undef __HANDLE_ITEM
86103 }
86104@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
86105 static void subtract_aal_stats(struct k_atm_aal_stats *from,
86106 struct atm_aal_stats *to)
86107 {
86108-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
86109+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
86110 __AAL_STAT_ITEMS
86111 #undef __HANDLE_ITEM
86112 }
86113diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c
86114index d5744b7..506bae3 100644
86115--- a/net/ax25/sysctl_net_ax25.c
86116+++ b/net/ax25/sysctl_net_ax25.c
86117@@ -152,7 +152,7 @@ int ax25_register_dev_sysctl(ax25_dev *ax25_dev)
86118 {
86119 char path[sizeof("net/ax25/") + IFNAMSIZ];
86120 int k;
86121- struct ctl_table *table;
86122+ ctl_table_no_const *table;
86123
86124 table = kmemdup(ax25_param_table, sizeof(ax25_param_table), GFP_KERNEL);
86125 if (!table)
86126diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
86127index 1ee94d0..14beea2 100644
86128--- a/net/batman-adv/bat_iv_ogm.c
86129+++ b/net/batman-adv/bat_iv_ogm.c
86130@@ -63,7 +63,7 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
86131
86132 /* randomize initial seqno to avoid collision */
86133 get_random_bytes(&random_seqno, sizeof(random_seqno));
86134- atomic_set(&hard_iface->bat_iv.ogm_seqno, random_seqno);
86135+ atomic_set_unchecked(&hard_iface->bat_iv.ogm_seqno, random_seqno);
86136
86137 hard_iface->bat_iv.ogm_buff_len = BATADV_OGM_HLEN;
86138 ogm_buff = kmalloc(hard_iface->bat_iv.ogm_buff_len, GFP_ATOMIC);
86139@@ -615,9 +615,9 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
86140 batadv_ogm_packet = (struct batadv_ogm_packet *)(*ogm_buff);
86141
86142 /* change sequence number to network order */
86143- seqno = (uint32_t)atomic_read(&hard_iface->bat_iv.ogm_seqno);
86144+ seqno = (uint32_t)atomic_read_unchecked(&hard_iface->bat_iv.ogm_seqno);
86145 batadv_ogm_packet->seqno = htonl(seqno);
86146- atomic_inc(&hard_iface->bat_iv.ogm_seqno);
86147+ atomic_inc_unchecked(&hard_iface->bat_iv.ogm_seqno);
86148
86149 batadv_ogm_packet->ttvn = atomic_read(&bat_priv->tt.vn);
86150 batadv_ogm_packet->tt_crc = htons(bat_priv->tt.local_crc);
86151@@ -1022,7 +1022,7 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
86152 return;
86153
86154 /* could be changed by schedule_own_packet() */
86155- if_incoming_seqno = atomic_read(&if_incoming->bat_iv.ogm_seqno);
86156+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->bat_iv.ogm_seqno);
86157
86158 if (batadv_ogm_packet->flags & BATADV_DIRECTLINK)
86159 has_directlink_flag = 1;
86160diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
86161index f1d37cd..4190879 100644
86162--- a/net/batman-adv/hard-interface.c
86163+++ b/net/batman-adv/hard-interface.c
86164@@ -370,7 +370,7 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
86165 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
86166 dev_add_pack(&hard_iface->batman_adv_ptype);
86167
86168- atomic_set(&hard_iface->frag_seqno, 1);
86169+ atomic_set_unchecked(&hard_iface->frag_seqno, 1);
86170 batadv_info(hard_iface->soft_iface, "Adding interface: %s\n",
86171 hard_iface->net_dev->name);
86172
86173@@ -493,7 +493,7 @@ batadv_hardif_add_interface(struct net_device *net_dev)
86174 /* This can't be called via a bat_priv callback because
86175 * we have no bat_priv yet.
86176 */
86177- atomic_set(&hard_iface->bat_iv.ogm_seqno, 1);
86178+ atomic_set_unchecked(&hard_iface->bat_iv.ogm_seqno, 1);
86179 hard_iface->bat_iv.ogm_buff = NULL;
86180
86181 return hard_iface;
86182diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
86183index 6b548fd..fc32c8d 100644
86184--- a/net/batman-adv/soft-interface.c
86185+++ b/net/batman-adv/soft-interface.c
86186@@ -252,7 +252,7 @@ static int batadv_interface_tx(struct sk_buff *skb,
86187 primary_if->net_dev->dev_addr, ETH_ALEN);
86188
86189 /* set broadcast sequence number */
86190- seqno = atomic_inc_return(&bat_priv->bcast_seqno);
86191+ seqno = atomic_inc_return_unchecked(&bat_priv->bcast_seqno);
86192 bcast_packet->seqno = htonl(seqno);
86193
86194 batadv_add_bcast_packet_to_list(bat_priv, skb, brd_delay);
86195@@ -497,7 +497,7 @@ struct net_device *batadv_softif_create(const char *name)
86196 atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN);
86197
86198 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
86199- atomic_set(&bat_priv->bcast_seqno, 1);
86200+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
86201 atomic_set(&bat_priv->tt.vn, 0);
86202 atomic_set(&bat_priv->tt.local_changes, 0);
86203 atomic_set(&bat_priv->tt.ogm_append_cnt, 0);
86204diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
86205index ae9ac9a..11e0fe7 100644
86206--- a/net/batman-adv/types.h
86207+++ b/net/batman-adv/types.h
86208@@ -48,7 +48,7 @@
86209 struct batadv_hard_iface_bat_iv {
86210 unsigned char *ogm_buff;
86211 int ogm_buff_len;
86212- atomic_t ogm_seqno;
86213+ atomic_unchecked_t ogm_seqno;
86214 };
86215
86216 struct batadv_hard_iface {
86217@@ -56,7 +56,7 @@ struct batadv_hard_iface {
86218 int16_t if_num;
86219 char if_status;
86220 struct net_device *net_dev;
86221- atomic_t frag_seqno;
86222+ atomic_unchecked_t frag_seqno;
86223 struct kobject *hardif_obj;
86224 atomic_t refcount;
86225 struct packet_type batman_adv_ptype;
86226@@ -284,7 +284,7 @@ struct batadv_priv {
86227 atomic_t orig_interval; /* uint */
86228 atomic_t hop_penalty; /* uint */
86229 atomic_t log_level; /* uint */
86230- atomic_t bcast_seqno;
86231+ atomic_unchecked_t bcast_seqno;
86232 atomic_t bcast_queue_left;
86233 atomic_t batman_queue_left;
86234 char num_ifaces;
86235diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
86236index 10aff49..ea8e021 100644
86237--- a/net/batman-adv/unicast.c
86238+++ b/net/batman-adv/unicast.c
86239@@ -272,7 +272,7 @@ int batadv_frag_send_skb(struct sk_buff *skb, struct batadv_priv *bat_priv,
86240 frag1->flags = BATADV_UNI_FRAG_HEAD | large_tail;
86241 frag2->flags = large_tail;
86242
86243- seqno = atomic_add_return(2, &hard_iface->frag_seqno);
86244+ seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
86245 frag1->seqno = htons(seqno - 1);
86246 frag2->seqno = htons(seqno);
86247
86248diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
86249index 07f0739..3c42e34 100644
86250--- a/net/bluetooth/hci_sock.c
86251+++ b/net/bluetooth/hci_sock.c
86252@@ -934,7 +934,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
86253 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
86254 }
86255
86256- len = min_t(unsigned int, len, sizeof(uf));
86257+ len = min((size_t)len, sizeof(uf));
86258 if (copy_from_user(&uf, optval, len)) {
86259 err = -EFAULT;
86260 break;
86261diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
86262index 22e6583..426e2f3 100644
86263--- a/net/bluetooth/l2cap_core.c
86264+++ b/net/bluetooth/l2cap_core.c
86265@@ -3400,8 +3400,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
86266 break;
86267
86268 case L2CAP_CONF_RFC:
86269- if (olen == sizeof(rfc))
86270- memcpy(&rfc, (void *)val, olen);
86271+ if (olen != sizeof(rfc))
86272+ break;
86273+
86274+ memcpy(&rfc, (void *)val, olen);
86275
86276 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
86277 rfc.mode != chan->mode)
86278diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
86279index 1bcfb84..dad9f98 100644
86280--- a/net/bluetooth/l2cap_sock.c
86281+++ b/net/bluetooth/l2cap_sock.c
86282@@ -479,7 +479,8 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
86283 struct sock *sk = sock->sk;
86284 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
86285 struct l2cap_options opts;
86286- int len, err = 0;
86287+ int err = 0;
86288+ size_t len = optlen;
86289 u32 opt;
86290
86291 BT_DBG("sk %p", sk);
86292@@ -501,7 +502,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
86293 opts.max_tx = chan->max_tx;
86294 opts.txwin_size = chan->tx_win;
86295
86296- len = min_t(unsigned int, sizeof(opts), optlen);
86297+ len = min(sizeof(opts), len);
86298 if (copy_from_user((char *) &opts, optval, len)) {
86299 err = -EFAULT;
86300 break;
86301@@ -581,7 +582,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
86302 struct bt_security sec;
86303 struct bt_power pwr;
86304 struct l2cap_conn *conn;
86305- int len, err = 0;
86306+ int err = 0;
86307+ size_t len = optlen;
86308 u32 opt;
86309
86310 BT_DBG("sk %p", sk);
86311@@ -604,7 +606,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
86312
86313 sec.level = BT_SECURITY_LOW;
86314
86315- len = min_t(unsigned int, sizeof(sec), optlen);
86316+ len = min(sizeof(sec), len);
86317 if (copy_from_user((char *) &sec, optval, len)) {
86318 err = -EFAULT;
86319 break;
86320@@ -701,7 +703,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
86321
86322 pwr.force_active = BT_POWER_FORCE_ACTIVE_ON;
86323
86324- len = min_t(unsigned int, sizeof(pwr), optlen);
86325+ len = min(sizeof(pwr), len);
86326 if (copy_from_user((char *) &pwr, optval, len)) {
86327 err = -EFAULT;
86328 break;
86329diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
86330index ce3f665..2c7d08f 100644
86331--- a/net/bluetooth/rfcomm/sock.c
86332+++ b/net/bluetooth/rfcomm/sock.c
86333@@ -667,7 +667,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
86334 struct sock *sk = sock->sk;
86335 struct bt_security sec;
86336 int err = 0;
86337- size_t len;
86338+ size_t len = optlen;
86339 u32 opt;
86340
86341 BT_DBG("sk %p", sk);
86342@@ -689,7 +689,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
86343
86344 sec.level = BT_SECURITY_LOW;
86345
86346- len = min_t(unsigned int, sizeof(sec), optlen);
86347+ len = min(sizeof(sec), len);
86348 if (copy_from_user((char *) &sec, optval, len)) {
86349 err = -EFAULT;
86350 break;
86351diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
86352index bd6fd0f..6492cba 100644
86353--- a/net/bluetooth/rfcomm/tty.c
86354+++ b/net/bluetooth/rfcomm/tty.c
86355@@ -309,7 +309,7 @@ static void rfcomm_dev_del(struct rfcomm_dev *dev)
86356 BUG_ON(test_and_set_bit(RFCOMM_TTY_RELEASED, &dev->flags));
86357
86358 spin_lock_irqsave(&dev->port.lock, flags);
86359- if (dev->port.count > 0) {
86360+ if (atomic_read(&dev->port.count) > 0) {
86361 spin_unlock_irqrestore(&dev->port.lock, flags);
86362 return;
86363 }
86364@@ -664,10 +664,10 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
86365 return -ENODEV;
86366
86367 BT_DBG("dev %p dst %pMR channel %d opened %d", dev, &dev->dst,
86368- dev->channel, dev->port.count);
86369+ dev->channel, atomic_read(&dev->port.count));
86370
86371 spin_lock_irqsave(&dev->port.lock, flags);
86372- if (++dev->port.count > 1) {
86373+ if (atomic_inc_return(&dev->port.count) > 1) {
86374 spin_unlock_irqrestore(&dev->port.lock, flags);
86375 return 0;
86376 }
86377@@ -732,10 +732,10 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp)
86378 return;
86379
86380 BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc,
86381- dev->port.count);
86382+ atomic_read(&dev->port.count));
86383
86384 spin_lock_irqsave(&dev->port.lock, flags);
86385- if (!--dev->port.count) {
86386+ if (!atomic_dec_return(&dev->port.count)) {
86387 spin_unlock_irqrestore(&dev->port.lock, flags);
86388 if (dev->tty_dev->parent)
86389 device_move(dev->tty_dev, NULL, DPM_ORDER_DEV_LAST);
86390diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
86391index d9576e6..85f4f4e 100644
86392--- a/net/bridge/br_fdb.c
86393+++ b/net/bridge/br_fdb.c
86394@@ -386,7 +386,7 @@ static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
86395 return 0;
86396 br_warn(br, "adding interface %s with same address "
86397 "as a received packet\n",
86398- source->dev->name);
86399+ source ? source->dev->name : br->dev->name);
86400 fdb_delete(br, fdb);
86401 }
86402
86403diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
86404index 5fe2ff3..121d696 100644
86405--- a/net/bridge/netfilter/ebtables.c
86406+++ b/net/bridge/netfilter/ebtables.c
86407@@ -1523,7 +1523,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
86408 tmp.valid_hooks = t->table->valid_hooks;
86409 }
86410 mutex_unlock(&ebt_mutex);
86411- if (copy_to_user(user, &tmp, *len) != 0){
86412+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
86413 BUGPRINT("c2u Didn't work\n");
86414 ret = -EFAULT;
86415 break;
86416@@ -2327,7 +2327,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
86417 goto out;
86418 tmp.valid_hooks = t->valid_hooks;
86419
86420- if (copy_to_user(user, &tmp, *len) != 0) {
86421+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
86422 ret = -EFAULT;
86423 break;
86424 }
86425@@ -2338,7 +2338,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
86426 tmp.entries_size = t->table->entries_size;
86427 tmp.valid_hooks = t->table->valid_hooks;
86428
86429- if (copy_to_user(user, &tmp, *len) != 0) {
86430+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
86431 ret = -EFAULT;
86432 break;
86433 }
86434diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
86435index a376ec1..1fbd6be 100644
86436--- a/net/caif/cfctrl.c
86437+++ b/net/caif/cfctrl.c
86438@@ -10,6 +10,7 @@
86439 #include <linux/spinlock.h>
86440 #include <linux/slab.h>
86441 #include <linux/pkt_sched.h>
86442+#include <linux/sched.h>
86443 #include <net/caif/caif_layer.h>
86444 #include <net/caif/cfpkt.h>
86445 #include <net/caif/cfctrl.h>
86446@@ -43,8 +44,8 @@ struct cflayer *cfctrl_create(void)
86447 memset(&dev_info, 0, sizeof(dev_info));
86448 dev_info.id = 0xff;
86449 cfsrvl_init(&this->serv, 0, &dev_info, false);
86450- atomic_set(&this->req_seq_no, 1);
86451- atomic_set(&this->rsp_seq_no, 1);
86452+ atomic_set_unchecked(&this->req_seq_no, 1);
86453+ atomic_set_unchecked(&this->rsp_seq_no, 1);
86454 this->serv.layer.receive = cfctrl_recv;
86455 sprintf(this->serv.layer.name, "ctrl");
86456 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
86457@@ -130,8 +131,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
86458 struct cfctrl_request_info *req)
86459 {
86460 spin_lock_bh(&ctrl->info_list_lock);
86461- atomic_inc(&ctrl->req_seq_no);
86462- req->sequence_no = atomic_read(&ctrl->req_seq_no);
86463+ atomic_inc_unchecked(&ctrl->req_seq_no);
86464+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
86465 list_add_tail(&req->list, &ctrl->list);
86466 spin_unlock_bh(&ctrl->info_list_lock);
86467 }
86468@@ -149,7 +150,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
86469 if (p != first)
86470 pr_warn("Requests are not received in order\n");
86471
86472- atomic_set(&ctrl->rsp_seq_no,
86473+ atomic_set_unchecked(&ctrl->rsp_seq_no,
86474 p->sequence_no);
86475 list_del(&p->list);
86476 goto out;
86477diff --git a/net/can/af_can.c b/net/can/af_can.c
86478index ddac1ee..3ee0a78 100644
86479--- a/net/can/af_can.c
86480+++ b/net/can/af_can.c
86481@@ -872,7 +872,7 @@ static const struct net_proto_family can_family_ops = {
86482 };
86483
86484 /* notifier block for netdevice event */
86485-static struct notifier_block can_netdev_notifier __read_mostly = {
86486+static struct notifier_block can_netdev_notifier = {
86487 .notifier_call = can_notifier,
86488 };
86489
86490diff --git a/net/can/gw.c b/net/can/gw.c
86491index 28e7bdc..d42c4cd 100644
86492--- a/net/can/gw.c
86493+++ b/net/can/gw.c
86494@@ -67,7 +67,6 @@ MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
86495 MODULE_ALIAS("can-gw");
86496
86497 static HLIST_HEAD(cgw_list);
86498-static struct notifier_block notifier;
86499
86500 static struct kmem_cache *cgw_cache __read_mostly;
86501
86502@@ -893,6 +892,10 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
86503 return err;
86504 }
86505
86506+static struct notifier_block notifier = {
86507+ .notifier_call = cgw_notifier
86508+};
86509+
86510 static __init int cgw_module_init(void)
86511 {
86512 printk(banner);
86513@@ -904,7 +907,6 @@ static __init int cgw_module_init(void)
86514 return -ENOMEM;
86515
86516 /* set notifier */
86517- notifier.notifier_call = cgw_notifier;
86518 register_netdevice_notifier(&notifier);
86519
86520 if (__rtnl_register(PF_CAN, RTM_GETROUTE, NULL, cgw_dump_jobs, NULL)) {
86521diff --git a/net/compat.c b/net/compat.c
86522index 79ae884..17c5c09 100644
86523--- a/net/compat.c
86524+++ b/net/compat.c
86525@@ -71,9 +71,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
86526 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
86527 __get_user(kmsg->msg_flags, &umsg->msg_flags))
86528 return -EFAULT;
86529- kmsg->msg_name = compat_ptr(tmp1);
86530- kmsg->msg_iov = compat_ptr(tmp2);
86531- kmsg->msg_control = compat_ptr(tmp3);
86532+ kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
86533+ kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
86534+ kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
86535 return 0;
86536 }
86537
86538@@ -85,7 +85,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
86539
86540 if (kern_msg->msg_namelen) {
86541 if (mode == VERIFY_READ) {
86542- int err = move_addr_to_kernel(kern_msg->msg_name,
86543+ int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
86544 kern_msg->msg_namelen,
86545 kern_address);
86546 if (err < 0)
86547@@ -96,7 +96,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
86548 kern_msg->msg_name = NULL;
86549
86550 tot_len = iov_from_user_compat_to_kern(kern_iov,
86551- (struct compat_iovec __user *)kern_msg->msg_iov,
86552+ (struct compat_iovec __force_user *)kern_msg->msg_iov,
86553 kern_msg->msg_iovlen);
86554 if (tot_len >= 0)
86555 kern_msg->msg_iov = kern_iov;
86556@@ -116,20 +116,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
86557
86558 #define CMSG_COMPAT_FIRSTHDR(msg) \
86559 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
86560- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
86561+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
86562 (struct compat_cmsghdr __user *)NULL)
86563
86564 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
86565 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
86566 (ucmlen) <= (unsigned long) \
86567 ((mhdr)->msg_controllen - \
86568- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
86569+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
86570
86571 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
86572 struct compat_cmsghdr __user *cmsg, int cmsg_len)
86573 {
86574 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
86575- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
86576+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
86577 msg->msg_controllen)
86578 return NULL;
86579 return (struct compat_cmsghdr __user *)ptr;
86580@@ -219,7 +219,7 @@ Efault:
86581
86582 int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data)
86583 {
86584- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
86585+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
86586 struct compat_cmsghdr cmhdr;
86587 struct compat_timeval ctv;
86588 struct compat_timespec cts[3];
86589@@ -275,7 +275,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
86590
86591 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
86592 {
86593- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
86594+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
86595 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
86596 int fdnum = scm->fp->count;
86597 struct file **fp = scm->fp->fp;
86598@@ -363,7 +363,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
86599 return -EFAULT;
86600 old_fs = get_fs();
86601 set_fs(KERNEL_DS);
86602- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
86603+ err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
86604 set_fs(old_fs);
86605
86606 return err;
86607@@ -424,7 +424,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
86608 len = sizeof(ktime);
86609 old_fs = get_fs();
86610 set_fs(KERNEL_DS);
86611- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
86612+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
86613 set_fs(old_fs);
86614
86615 if (!err) {
86616@@ -567,7 +567,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
86617 case MCAST_JOIN_GROUP:
86618 case MCAST_LEAVE_GROUP:
86619 {
86620- struct compat_group_req __user *gr32 = (void *)optval;
86621+ struct compat_group_req __user *gr32 = (void __user *)optval;
86622 struct group_req __user *kgr =
86623 compat_alloc_user_space(sizeof(struct group_req));
86624 u32 interface;
86625@@ -588,7 +588,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
86626 case MCAST_BLOCK_SOURCE:
86627 case MCAST_UNBLOCK_SOURCE:
86628 {
86629- struct compat_group_source_req __user *gsr32 = (void *)optval;
86630+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
86631 struct group_source_req __user *kgsr = compat_alloc_user_space(
86632 sizeof(struct group_source_req));
86633 u32 interface;
86634@@ -609,7 +609,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
86635 }
86636 case MCAST_MSFILTER:
86637 {
86638- struct compat_group_filter __user *gf32 = (void *)optval;
86639+ struct compat_group_filter __user *gf32 = (void __user *)optval;
86640 struct group_filter __user *kgf;
86641 u32 interface, fmode, numsrc;
86642
86643@@ -647,7 +647,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
86644 char __user *optval, int __user *optlen,
86645 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
86646 {
86647- struct compat_group_filter __user *gf32 = (void *)optval;
86648+ struct compat_group_filter __user *gf32 = (void __user *)optval;
86649 struct group_filter __user *kgf;
86650 int __user *koptlen;
86651 u32 interface, fmode, numsrc;
86652@@ -796,7 +796,7 @@ asmlinkage long compat_sys_socketcall(int call, u32 __user *args)
86653
86654 if (call < SYS_SOCKET || call > SYS_SENDMMSG)
86655 return -EINVAL;
86656- if (copy_from_user(a, args, nas[call]))
86657+ if (nas[call] > sizeof a || copy_from_user(a, args, nas[call]))
86658 return -EFAULT;
86659 a0 = a[0];
86660 a1 = a[1];
86661diff --git a/net/core/datagram.c b/net/core/datagram.c
86662index 368f9c3..f82d4a3 100644
86663--- a/net/core/datagram.c
86664+++ b/net/core/datagram.c
86665@@ -289,7 +289,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
86666 }
86667
86668 kfree_skb(skb);
86669- atomic_inc(&sk->sk_drops);
86670+ atomic_inc_unchecked(&sk->sk_drops);
86671 sk_mem_reclaim_partial(sk);
86672
86673 return err;
86674diff --git a/net/core/dev.c b/net/core/dev.c
86675index 5d9c43d..b471558 100644
86676--- a/net/core/dev.c
86677+++ b/net/core/dev.c
86678@@ -1250,9 +1250,13 @@ void dev_load(struct net *net, const char *name)
86679 if (no_module && capable(CAP_NET_ADMIN))
86680 no_module = request_module("netdev-%s", name);
86681 if (no_module && capable(CAP_SYS_MODULE)) {
86682+#ifdef CONFIG_GRKERNSEC_MODHARDEN
86683+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
86684+#else
86685 if (!request_module("%s", name))
86686 pr_warn("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
86687 name);
86688+#endif
86689 }
86690 }
86691 EXPORT_SYMBOL(dev_load);
86692@@ -1714,7 +1718,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
86693 {
86694 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
86695 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
86696- atomic_long_inc(&dev->rx_dropped);
86697+ atomic_long_inc_unchecked(&dev->rx_dropped);
86698 kfree_skb(skb);
86699 return NET_RX_DROP;
86700 }
86701@@ -1724,7 +1728,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
86702 nf_reset(skb);
86703
86704 if (unlikely(!is_skb_forwardable(dev, skb))) {
86705- atomic_long_inc(&dev->rx_dropped);
86706+ atomic_long_inc_unchecked(&dev->rx_dropped);
86707 kfree_skb(skb);
86708 return NET_RX_DROP;
86709 }
86710@@ -2179,7 +2183,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
86711
86712 struct dev_gso_cb {
86713 void (*destructor)(struct sk_buff *skb);
86714-};
86715+} __no_const;
86716
86717 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
86718
86719@@ -3052,7 +3056,7 @@ enqueue:
86720
86721 local_irq_restore(flags);
86722
86723- atomic_long_inc(&skb->dev->rx_dropped);
86724+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
86725 kfree_skb(skb);
86726 return NET_RX_DROP;
86727 }
86728@@ -3124,7 +3128,7 @@ int netif_rx_ni(struct sk_buff *skb)
86729 }
86730 EXPORT_SYMBOL(netif_rx_ni);
86731
86732-static void net_tx_action(struct softirq_action *h)
86733+static void net_tx_action(void)
86734 {
86735 struct softnet_data *sd = &__get_cpu_var(softnet_data);
86736
86737@@ -3462,7 +3466,7 @@ ncls:
86738 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
86739 } else {
86740 drop:
86741- atomic_long_inc(&skb->dev->rx_dropped);
86742+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
86743 kfree_skb(skb);
86744 /* Jamal, now you will not able to escape explaining
86745 * me how you were going to use this. :-)
86746@@ -4045,7 +4049,7 @@ void netif_napi_del(struct napi_struct *napi)
86747 }
86748 EXPORT_SYMBOL(netif_napi_del);
86749
86750-static void net_rx_action(struct softirq_action *h)
86751+static void net_rx_action(void)
86752 {
86753 struct softnet_data *sd = &__get_cpu_var(softnet_data);
86754 unsigned long time_limit = jiffies + 2;
86755@@ -4529,8 +4533,13 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
86756 else
86757 seq_printf(seq, "%04x", ntohs(pt->type));
86758
86759+#ifdef CONFIG_GRKERNSEC_HIDESYM
86760+ seq_printf(seq, " %-8s %p\n",
86761+ pt->dev ? pt->dev->name : "", NULL);
86762+#else
86763 seq_printf(seq, " %-8s %pF\n",
86764 pt->dev ? pt->dev->name : "", pt->func);
86765+#endif
86766 }
86767
86768 return 0;
86769@@ -6102,7 +6111,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
86770 } else {
86771 netdev_stats_to_stats64(storage, &dev->stats);
86772 }
86773- storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
86774+ storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
86775 return storage;
86776 }
86777 EXPORT_SYMBOL(dev_get_stats);
86778diff --git a/net/core/flow.c b/net/core/flow.c
86779index 3bad824..2071a55 100644
86780--- a/net/core/flow.c
86781+++ b/net/core/flow.c
86782@@ -61,7 +61,7 @@ struct flow_cache {
86783 struct timer_list rnd_timer;
86784 };
86785
86786-atomic_t flow_cache_genid = ATOMIC_INIT(0);
86787+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
86788 EXPORT_SYMBOL(flow_cache_genid);
86789 static struct flow_cache flow_cache_global;
86790 static struct kmem_cache *flow_cachep __read_mostly;
86791@@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
86792
86793 static int flow_entry_valid(struct flow_cache_entry *fle)
86794 {
86795- if (atomic_read(&flow_cache_genid) != fle->genid)
86796+ if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
86797 return 0;
86798 if (fle->object && !fle->object->ops->check(fle->object))
86799 return 0;
86800@@ -259,7 +259,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
86801 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
86802 fcp->hash_count++;
86803 }
86804- } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
86805+ } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
86806 flo = fle->object;
86807 if (!flo)
86808 goto ret_object;
86809@@ -280,7 +280,7 @@ nocache:
86810 }
86811 flo = resolver(net, key, family, dir, flo, ctx);
86812 if (fle) {
86813- fle->genid = atomic_read(&flow_cache_genid);
86814+ fle->genid = atomic_read_unchecked(&flow_cache_genid);
86815 if (!IS_ERR(flo))
86816 fle->object = flo;
86817 else
86818diff --git a/net/core/iovec.c b/net/core/iovec.c
86819index 7e7aeb0..2a998cb 100644
86820--- a/net/core/iovec.c
86821+++ b/net/core/iovec.c
86822@@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
86823 if (m->msg_namelen) {
86824 if (mode == VERIFY_READ) {
86825 void __user *namep;
86826- namep = (void __user __force *) m->msg_name;
86827+ namep = (void __force_user *) m->msg_name;
86828 err = move_addr_to_kernel(namep, m->msg_namelen,
86829 address);
86830 if (err < 0)
86831@@ -54,7 +54,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
86832 }
86833
86834 size = m->msg_iovlen * sizeof(struct iovec);
86835- if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
86836+ if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
86837 return -EFAULT;
86838
86839 m->msg_iov = iov;
86840diff --git a/net/core/neighbour.c b/net/core/neighbour.c
86841index c815f28..e6403f2 100644
86842--- a/net/core/neighbour.c
86843+++ b/net/core/neighbour.c
86844@@ -2776,7 +2776,7 @@ static int proc_unres_qlen(ctl_table *ctl, int write, void __user *buffer,
86845 size_t *lenp, loff_t *ppos)
86846 {
86847 int size, ret;
86848- ctl_table tmp = *ctl;
86849+ ctl_table_no_const tmp = *ctl;
86850
86851 tmp.extra1 = &zero;
86852 tmp.extra2 = &unres_qlen_max;
86853diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
86854index 28c5f5a..7edf2e2 100644
86855--- a/net/core/net-sysfs.c
86856+++ b/net/core/net-sysfs.c
86857@@ -1455,7 +1455,7 @@ void netdev_class_remove_file(struct class_attribute *class_attr)
86858 }
86859 EXPORT_SYMBOL(netdev_class_remove_file);
86860
86861-int netdev_kobject_init(void)
86862+int __init netdev_kobject_init(void)
86863 {
86864 kobj_ns_type_register(&net_ns_type_operations);
86865 return class_register(&net_class);
86866diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
86867index 8acce01..2e306bb 100644
86868--- a/net/core/net_namespace.c
86869+++ b/net/core/net_namespace.c
86870@@ -442,7 +442,7 @@ static int __register_pernet_operations(struct list_head *list,
86871 int error;
86872 LIST_HEAD(net_exit_list);
86873
86874- list_add_tail(&ops->list, list);
86875+ pax_list_add_tail((struct list_head *)&ops->list, list);
86876 if (ops->init || (ops->id && ops->size)) {
86877 for_each_net(net) {
86878 error = ops_init(ops, net);
86879@@ -455,7 +455,7 @@ static int __register_pernet_operations(struct list_head *list,
86880
86881 out_undo:
86882 /* If I have an error cleanup all namespaces I initialized */
86883- list_del(&ops->list);
86884+ pax_list_del((struct list_head *)&ops->list);
86885 ops_exit_list(ops, &net_exit_list);
86886 ops_free_list(ops, &net_exit_list);
86887 return error;
86888@@ -466,7 +466,7 @@ static void __unregister_pernet_operations(struct pernet_operations *ops)
86889 struct net *net;
86890 LIST_HEAD(net_exit_list);
86891
86892- list_del(&ops->list);
86893+ pax_list_del((struct list_head *)&ops->list);
86894 for_each_net(net)
86895 list_add_tail(&net->exit_list, &net_exit_list);
86896 ops_exit_list(ops, &net_exit_list);
86897@@ -600,7 +600,7 @@ int register_pernet_device(struct pernet_operations *ops)
86898 mutex_lock(&net_mutex);
86899 error = register_pernet_operations(&pernet_list, ops);
86900 if (!error && (first_device == &pernet_list))
86901- first_device = &ops->list;
86902+ first_device = (struct list_head *)&ops->list;
86903 mutex_unlock(&net_mutex);
86904 return error;
86905 }
86906diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
86907index 6212ec9..5ee16b2 100644
86908--- a/net/core/rtnetlink.c
86909+++ b/net/core/rtnetlink.c
86910@@ -58,7 +58,7 @@ struct rtnl_link {
86911 rtnl_doit_func doit;
86912 rtnl_dumpit_func dumpit;
86913 rtnl_calcit_func calcit;
86914-};
86915+} __no_const;
86916
86917 static DEFINE_MUTEX(rtnl_mutex);
86918
86919@@ -299,10 +299,13 @@ int __rtnl_link_register(struct rtnl_link_ops *ops)
86920 if (rtnl_link_ops_get(ops->kind))
86921 return -EEXIST;
86922
86923- if (!ops->dellink)
86924- ops->dellink = unregister_netdevice_queue;
86925+ if (!ops->dellink) {
86926+ pax_open_kernel();
86927+ *(void **)&ops->dellink = unregister_netdevice_queue;
86928+ pax_close_kernel();
86929+ }
86930
86931- list_add_tail(&ops->list, &link_ops);
86932+ pax_list_add_tail((struct list_head *)&ops->list, &link_ops);
86933 return 0;
86934 }
86935 EXPORT_SYMBOL_GPL(__rtnl_link_register);
86936@@ -349,7 +352,7 @@ void __rtnl_link_unregister(struct rtnl_link_ops *ops)
86937 for_each_net(net) {
86938 __rtnl_kill_links(net, ops);
86939 }
86940- list_del(&ops->list);
86941+ pax_list_del((struct list_head *)&ops->list);
86942 }
86943 EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
86944
86945@@ -1068,7 +1071,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
86946 rcu_read_lock();
86947 cb->seq = net->dev_base_seq;
86948
86949- if (nlmsg_parse(cb->nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX,
86950+ if (nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX,
86951 ifla_policy) >= 0) {
86952
86953 if (tb[IFLA_EXT_MASK])
86954@@ -1924,7 +1927,7 @@ static u16 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh)
86955 u32 ext_filter_mask = 0;
86956 u16 min_ifinfo_dump_size = 0;
86957
86958- if (nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX,
86959+ if (nlmsg_parse(nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX,
86960 ifla_policy) >= 0) {
86961 if (tb[IFLA_EXT_MASK])
86962 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
86963diff --git a/net/core/scm.c b/net/core/scm.c
86964index 2dc6cda..2159524 100644
86965--- a/net/core/scm.c
86966+++ b/net/core/scm.c
86967@@ -226,7 +226,7 @@ EXPORT_SYMBOL(__scm_send);
86968 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
86969 {
86970 struct cmsghdr __user *cm
86971- = (__force struct cmsghdr __user *)msg->msg_control;
86972+ = (struct cmsghdr __force_user *)msg->msg_control;
86973 struct cmsghdr cmhdr;
86974 int cmlen = CMSG_LEN(len);
86975 int err;
86976@@ -249,7 +249,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
86977 err = -EFAULT;
86978 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
86979 goto out;
86980- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
86981+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
86982 goto out;
86983 cmlen = CMSG_SPACE(len);
86984 if (msg->msg_controllen < cmlen)
86985@@ -265,7 +265,7 @@ EXPORT_SYMBOL(put_cmsg);
86986 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
86987 {
86988 struct cmsghdr __user *cm
86989- = (__force struct cmsghdr __user*)msg->msg_control;
86990+ = (struct cmsghdr __force_user *)msg->msg_control;
86991
86992 int fdmax = 0;
86993 int fdnum = scm->fp->count;
86994@@ -285,7 +285,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
86995 if (fdnum < fdmax)
86996 fdmax = fdnum;
86997
86998- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
86999+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
87000 i++, cmfptr++)
87001 {
87002 struct socket *sock;
87003diff --git a/net/core/sock.c b/net/core/sock.c
87004index bc131d4..029e378 100644
87005--- a/net/core/sock.c
87006+++ b/net/core/sock.c
87007@@ -388,7 +388,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
87008 struct sk_buff_head *list = &sk->sk_receive_queue;
87009
87010 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
87011- atomic_inc(&sk->sk_drops);
87012+ atomic_inc_unchecked(&sk->sk_drops);
87013 trace_sock_rcvqueue_full(sk, skb);
87014 return -ENOMEM;
87015 }
87016@@ -398,7 +398,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
87017 return err;
87018
87019 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
87020- atomic_inc(&sk->sk_drops);
87021+ atomic_inc_unchecked(&sk->sk_drops);
87022 return -ENOBUFS;
87023 }
87024
87025@@ -418,7 +418,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
87026 skb_dst_force(skb);
87027
87028 spin_lock_irqsave(&list->lock, flags);
87029- skb->dropcount = atomic_read(&sk->sk_drops);
87030+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
87031 __skb_queue_tail(list, skb);
87032 spin_unlock_irqrestore(&list->lock, flags);
87033
87034@@ -438,7 +438,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
87035 skb->dev = NULL;
87036
87037 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
87038- atomic_inc(&sk->sk_drops);
87039+ atomic_inc_unchecked(&sk->sk_drops);
87040 goto discard_and_relse;
87041 }
87042 if (nested)
87043@@ -456,7 +456,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
87044 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
87045 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
87046 bh_unlock_sock(sk);
87047- atomic_inc(&sk->sk_drops);
87048+ atomic_inc_unchecked(&sk->sk_drops);
87049 goto discard_and_relse;
87050 }
87051
87052@@ -930,12 +930,12 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
87053 struct timeval tm;
87054 } v;
87055
87056- int lv = sizeof(int);
87057- int len;
87058+ unsigned int lv = sizeof(int);
87059+ unsigned int len;
87060
87061 if (get_user(len, optlen))
87062 return -EFAULT;
87063- if (len < 0)
87064+ if (len > INT_MAX)
87065 return -EINVAL;
87066
87067 memset(&v, 0, sizeof(v));
87068@@ -1083,11 +1083,11 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
87069
87070 case SO_PEERNAME:
87071 {
87072- char address[128];
87073+ char address[_K_SS_MAXSIZE];
87074
87075 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
87076 return -ENOTCONN;
87077- if (lv < len)
87078+ if (lv < len || sizeof address < len)
87079 return -EINVAL;
87080 if (copy_to_user(optval, address, len))
87081 return -EFAULT;
87082@@ -1146,7 +1146,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
87083
87084 if (len > lv)
87085 len = lv;
87086- if (copy_to_user(optval, &v, len))
87087+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
87088 return -EFAULT;
87089 lenout:
87090 if (put_user(len, optlen))
87091@@ -2276,7 +2276,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
87092 */
87093 smp_wmb();
87094 atomic_set(&sk->sk_refcnt, 1);
87095- atomic_set(&sk->sk_drops, 0);
87096+ atomic_set_unchecked(&sk->sk_drops, 0);
87097 }
87098 EXPORT_SYMBOL(sock_init_data);
87099
87100diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
87101index 750f44f..922399c 100644
87102--- a/net/core/sock_diag.c
87103+++ b/net/core/sock_diag.c
87104@@ -9,26 +9,33 @@
87105 #include <linux/inet_diag.h>
87106 #include <linux/sock_diag.h>
87107
87108-static const struct sock_diag_handler *sock_diag_handlers[AF_MAX];
87109+static const struct sock_diag_handler *sock_diag_handlers[AF_MAX] __read_only;
87110 static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
87111 static DEFINE_MUTEX(sock_diag_table_mutex);
87112
87113 int sock_diag_check_cookie(void *sk, __u32 *cookie)
87114 {
87115+#ifndef CONFIG_GRKERNSEC_HIDESYM
87116 if ((cookie[0] != INET_DIAG_NOCOOKIE ||
87117 cookie[1] != INET_DIAG_NOCOOKIE) &&
87118 ((u32)(unsigned long)sk != cookie[0] ||
87119 (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
87120 return -ESTALE;
87121 else
87122+#endif
87123 return 0;
87124 }
87125 EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
87126
87127 void sock_diag_save_cookie(void *sk, __u32 *cookie)
87128 {
87129+#ifdef CONFIG_GRKERNSEC_HIDESYM
87130+ cookie[0] = 0;
87131+ cookie[1] = 0;
87132+#else
87133 cookie[0] = (u32)(unsigned long)sk;
87134 cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
87135+#endif
87136 }
87137 EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
87138
87139@@ -75,8 +82,11 @@ int sock_diag_register(const struct sock_diag_handler *hndl)
87140 mutex_lock(&sock_diag_table_mutex);
87141 if (sock_diag_handlers[hndl->family])
87142 err = -EBUSY;
87143- else
87144+ else {
87145+ pax_open_kernel();
87146 sock_diag_handlers[hndl->family] = hndl;
87147+ pax_close_kernel();
87148+ }
87149 mutex_unlock(&sock_diag_table_mutex);
87150
87151 return err;
87152@@ -92,26 +102,13 @@ void sock_diag_unregister(const struct sock_diag_handler *hnld)
87153
87154 mutex_lock(&sock_diag_table_mutex);
87155 BUG_ON(sock_diag_handlers[family] != hnld);
87156+ pax_open_kernel();
87157 sock_diag_handlers[family] = NULL;
87158+ pax_close_kernel();
87159 mutex_unlock(&sock_diag_table_mutex);
87160 }
87161 EXPORT_SYMBOL_GPL(sock_diag_unregister);
87162
87163-static const inline struct sock_diag_handler *sock_diag_lock_handler(int family)
87164-{
87165- if (sock_diag_handlers[family] == NULL)
87166- request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
87167- NETLINK_SOCK_DIAG, family);
87168-
87169- mutex_lock(&sock_diag_table_mutex);
87170- return sock_diag_handlers[family];
87171-}
87172-
87173-static inline void sock_diag_unlock_handler(const struct sock_diag_handler *h)
87174-{
87175- mutex_unlock(&sock_diag_table_mutex);
87176-}
87177-
87178 static int __sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
87179 {
87180 int err;
87181@@ -124,12 +121,17 @@ static int __sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
87182 if (req->sdiag_family >= AF_MAX)
87183 return -EINVAL;
87184
87185- hndl = sock_diag_lock_handler(req->sdiag_family);
87186+ if (sock_diag_handlers[req->sdiag_family] == NULL)
87187+ request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
87188+ NETLINK_SOCK_DIAG, req->sdiag_family);
87189+
87190+ mutex_lock(&sock_diag_table_mutex);
87191+ hndl = sock_diag_handlers[req->sdiag_family];
87192 if (hndl == NULL)
87193 err = -ENOENT;
87194 else
87195 err = hndl->dump(skb, nlh);
87196- sock_diag_unlock_handler(hndl);
87197+ mutex_unlock(&sock_diag_table_mutex);
87198
87199 return err;
87200 }
87201diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
87202index d1b0804..98cf5f7 100644
87203--- a/net/core/sysctl_net_core.c
87204+++ b/net/core/sysctl_net_core.c
87205@@ -26,7 +26,7 @@ static int rps_sock_flow_sysctl(ctl_table *table, int write,
87206 {
87207 unsigned int orig_size, size;
87208 int ret, i;
87209- ctl_table tmp = {
87210+ ctl_table_no_const tmp = {
87211 .data = &size,
87212 .maxlen = sizeof(size),
87213 .mode = table->mode
87214@@ -205,13 +205,12 @@ static struct ctl_table netns_core_table[] = {
87215
87216 static __net_init int sysctl_core_net_init(struct net *net)
87217 {
87218- struct ctl_table *tbl;
87219+ ctl_table_no_const *tbl = NULL;
87220
87221 net->core.sysctl_somaxconn = SOMAXCONN;
87222
87223- tbl = netns_core_table;
87224 if (!net_eq(net, &init_net)) {
87225- tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL);
87226+ tbl = kmemdup(netns_core_table, sizeof(netns_core_table), GFP_KERNEL);
87227 if (tbl == NULL)
87228 goto err_dup;
87229
87230@@ -221,17 +220,16 @@ static __net_init int sysctl_core_net_init(struct net *net)
87231 if (net->user_ns != &init_user_ns) {
87232 tbl[0].procname = NULL;
87233 }
87234- }
87235-
87236- net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
87237+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
87238+ } else
87239+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", netns_core_table);
87240 if (net->core.sysctl_hdr == NULL)
87241 goto err_reg;
87242
87243 return 0;
87244
87245 err_reg:
87246- if (tbl != netns_core_table)
87247- kfree(tbl);
87248+ kfree(tbl);
87249 err_dup:
87250 return -ENOMEM;
87251 }
87252@@ -246,7 +244,7 @@ static __net_exit void sysctl_core_net_exit(struct net *net)
87253 kfree(tbl);
87254 }
87255
87256-static __net_initdata struct pernet_operations sysctl_core_ops = {
87257+static __net_initconst struct pernet_operations sysctl_core_ops = {
87258 .init = sysctl_core_net_init,
87259 .exit = sysctl_core_net_exit,
87260 };
87261diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
87262index 307c322..78a4c6f 100644
87263--- a/net/decnet/af_decnet.c
87264+++ b/net/decnet/af_decnet.c
87265@@ -468,6 +468,7 @@ static struct proto dn_proto = {
87266 .sysctl_rmem = sysctl_decnet_rmem,
87267 .max_header = DN_MAX_NSP_DATA_HEADER + 64,
87268 .obj_size = sizeof(struct dn_sock),
87269+ .slab_flags = SLAB_USERCOPY,
87270 };
87271
87272 static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gfp)
87273diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
87274index a55eecc..dd8428c 100644
87275--- a/net/decnet/sysctl_net_decnet.c
87276+++ b/net/decnet/sysctl_net_decnet.c
87277@@ -174,7 +174,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
87278
87279 if (len > *lenp) len = *lenp;
87280
87281- if (copy_to_user(buffer, addr, len))
87282+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
87283 return -EFAULT;
87284
87285 *lenp = len;
87286@@ -237,7 +237,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
87287
87288 if (len > *lenp) len = *lenp;
87289
87290- if (copy_to_user(buffer, devname, len))
87291+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
87292 return -EFAULT;
87293
87294 *lenp = len;
87295diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
87296index fcf104e..95552d4 100644
87297--- a/net/ipv4/af_inet.c
87298+++ b/net/ipv4/af_inet.c
87299@@ -1717,13 +1717,9 @@ static int __init inet_init(void)
87300
87301 BUILD_BUG_ON(sizeof(struct inet_skb_parm) > sizeof(dummy_skb->cb));
87302
87303- sysctl_local_reserved_ports = kzalloc(65536 / 8, GFP_KERNEL);
87304- if (!sysctl_local_reserved_ports)
87305- goto out;
87306-
87307 rc = proto_register(&tcp_prot, 1);
87308 if (rc)
87309- goto out_free_reserved_ports;
87310+ goto out;
87311
87312 rc = proto_register(&udp_prot, 1);
87313 if (rc)
87314@@ -1832,8 +1828,6 @@ out_unregister_udp_proto:
87315 proto_unregister(&udp_prot);
87316 out_unregister_tcp_proto:
87317 proto_unregister(&tcp_prot);
87318-out_free_reserved_ports:
87319- kfree(sysctl_local_reserved_ports);
87320 goto out;
87321 }
87322
87323diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
87324index a69b4e4..dbccba5 100644
87325--- a/net/ipv4/ah4.c
87326+++ b/net/ipv4/ah4.c
87327@@ -421,7 +421,7 @@ static void ah4_err(struct sk_buff *skb, u32 info)
87328 return;
87329
87330 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) {
87331- atomic_inc(&flow_cache_genid);
87332+ atomic_inc_unchecked(&flow_cache_genid);
87333 rt_genid_bump(net);
87334
87335 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_AH, 0);
87336diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
87337index a8e4f26..25e5f40 100644
87338--- a/net/ipv4/devinet.c
87339+++ b/net/ipv4/devinet.c
87340@@ -1763,7 +1763,7 @@ static int ipv4_doint_and_flush(ctl_table *ctl, int write,
87341 #define DEVINET_SYSCTL_FLUSHING_ENTRY(attr, name) \
87342 DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, ipv4_doint_and_flush)
87343
87344-static struct devinet_sysctl_table {
87345+static const struct devinet_sysctl_table {
87346 struct ctl_table_header *sysctl_header;
87347 struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX];
87348 } devinet_sysctl = {
87349@@ -1881,7 +1881,7 @@ static __net_init int devinet_init_net(struct net *net)
87350 int err;
87351 struct ipv4_devconf *all, *dflt;
87352 #ifdef CONFIG_SYSCTL
87353- struct ctl_table *tbl = ctl_forward_entry;
87354+ ctl_table_no_const *tbl = NULL;
87355 struct ctl_table_header *forw_hdr;
87356 #endif
87357
87358@@ -1899,7 +1899,7 @@ static __net_init int devinet_init_net(struct net *net)
87359 goto err_alloc_dflt;
87360
87361 #ifdef CONFIG_SYSCTL
87362- tbl = kmemdup(tbl, sizeof(ctl_forward_entry), GFP_KERNEL);
87363+ tbl = kmemdup(ctl_forward_entry, sizeof(ctl_forward_entry), GFP_KERNEL);
87364 if (tbl == NULL)
87365 goto err_alloc_ctl;
87366
87367@@ -1919,7 +1919,10 @@ static __net_init int devinet_init_net(struct net *net)
87368 goto err_reg_dflt;
87369
87370 err = -ENOMEM;
87371- forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
87372+ if (!net_eq(net, &init_net))
87373+ forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
87374+ else
87375+ forw_hdr = register_net_sysctl(net, "net/ipv4", ctl_forward_entry);
87376 if (forw_hdr == NULL)
87377 goto err_reg_ctl;
87378 net->ipv4.forw_hdr = forw_hdr;
87379@@ -1935,8 +1938,7 @@ err_reg_ctl:
87380 err_reg_dflt:
87381 __devinet_sysctl_unregister(all);
87382 err_reg_all:
87383- if (tbl != ctl_forward_entry)
87384- kfree(tbl);
87385+ kfree(tbl);
87386 err_alloc_ctl:
87387 #endif
87388 if (dflt != &ipv4_devconf_dflt)
87389diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
87390index 3b4f0cd..a6ba66e 100644
87391--- a/net/ipv4/esp4.c
87392+++ b/net/ipv4/esp4.c
87393@@ -139,8 +139,6 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
87394
87395 /* skb is pure payload to encrypt */
87396
87397- err = -ENOMEM;
87398-
87399 esp = x->data;
87400 aead = esp->aead;
87401 alen = crypto_aead_authsize(aead);
87402@@ -176,8 +174,10 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
87403 }
87404
87405 tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen);
87406- if (!tmp)
87407+ if (!tmp) {
87408+ err = -ENOMEM;
87409 goto error;
87410+ }
87411
87412 seqhi = esp_tmp_seqhi(tmp);
87413 iv = esp_tmp_iv(aead, tmp, seqhilen);
87414@@ -503,7 +503,7 @@ static void esp4_err(struct sk_buff *skb, u32 info)
87415 return;
87416
87417 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) {
87418- atomic_inc(&flow_cache_genid);
87419+ atomic_inc_unchecked(&flow_cache_genid);
87420 rt_genid_bump(net);
87421
87422 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_ESP, 0);
87423diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
87424index 5cd75e2..f57ef39 100644
87425--- a/net/ipv4/fib_frontend.c
87426+++ b/net/ipv4/fib_frontend.c
87427@@ -1020,12 +1020,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
87428 #ifdef CONFIG_IP_ROUTE_MULTIPATH
87429 fib_sync_up(dev);
87430 #endif
87431- atomic_inc(&net->ipv4.dev_addr_genid);
87432+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
87433 rt_cache_flush(dev_net(dev));
87434 break;
87435 case NETDEV_DOWN:
87436 fib_del_ifaddr(ifa, NULL);
87437- atomic_inc(&net->ipv4.dev_addr_genid);
87438+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
87439 if (ifa->ifa_dev->ifa_list == NULL) {
87440 /* Last address was deleted from this interface.
87441 * Disable IP.
87442@@ -1061,7 +1061,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
87443 #ifdef CONFIG_IP_ROUTE_MULTIPATH
87444 fib_sync_up(dev);
87445 #endif
87446- atomic_inc(&net->ipv4.dev_addr_genid);
87447+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
87448 rt_cache_flush(net);
87449 break;
87450 case NETDEV_DOWN:
87451diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
87452index 4797a80..2bd54e9 100644
87453--- a/net/ipv4/fib_semantics.c
87454+++ b/net/ipv4/fib_semantics.c
87455@@ -767,7 +767,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
87456 nh->nh_saddr = inet_select_addr(nh->nh_dev,
87457 nh->nh_gw,
87458 nh->nh_parent->fib_scope);
87459- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
87460+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
87461
87462 return nh->nh_saddr;
87463 }
87464diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
87465index d0670f0..744ac80 100644
87466--- a/net/ipv4/inet_connection_sock.c
87467+++ b/net/ipv4/inet_connection_sock.c
87468@@ -37,7 +37,7 @@ struct local_ports sysctl_local_ports __read_mostly = {
87469 .range = { 32768, 61000 },
87470 };
87471
87472-unsigned long *sysctl_local_reserved_ports;
87473+unsigned long sysctl_local_reserved_ports[65536 / 8 / sizeof(unsigned long)];
87474 EXPORT_SYMBOL(sysctl_local_reserved_ports);
87475
87476 void inet_get_local_port_range(int *low, int *high)
87477diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
87478index fa3ae81..0dbe6b8 100644
87479--- a/net/ipv4/inet_hashtables.c
87480+++ b/net/ipv4/inet_hashtables.c
87481@@ -18,12 +18,15 @@
87482 #include <linux/sched.h>
87483 #include <linux/slab.h>
87484 #include <linux/wait.h>
87485+#include <linux/security.h>
87486
87487 #include <net/inet_connection_sock.h>
87488 #include <net/inet_hashtables.h>
87489 #include <net/secure_seq.h>
87490 #include <net/ip.h>
87491
87492+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
87493+
87494 /*
87495 * Allocate and initialize a new local port bind bucket.
87496 * The bindhash mutex for snum's hash chain must be held here.
87497@@ -540,6 +543,8 @@ ok:
87498 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
87499 spin_unlock(&head->lock);
87500
87501+ gr_update_task_in_ip_table(current, inet_sk(sk));
87502+
87503 if (tw) {
87504 inet_twsk_deschedule(tw, death_row);
87505 while (twrefcnt) {
87506diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
87507index 000e3d2..5472da3 100644
87508--- a/net/ipv4/inetpeer.c
87509+++ b/net/ipv4/inetpeer.c
87510@@ -503,8 +503,8 @@ relookup:
87511 if (p) {
87512 p->daddr = *daddr;
87513 atomic_set(&p->refcnt, 1);
87514- atomic_set(&p->rid, 0);
87515- atomic_set(&p->ip_id_count,
87516+ atomic_set_unchecked(&p->rid, 0);
87517+ atomic_set_unchecked(&p->ip_id_count,
87518 (daddr->family == AF_INET) ?
87519 secure_ip_id(daddr->addr.a4) :
87520 secure_ipv6_id(daddr->addr.a6));
87521diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
87522index a8fc332..4ca4ca65 100644
87523--- a/net/ipv4/ip_fragment.c
87524+++ b/net/ipv4/ip_fragment.c
87525@@ -319,7 +319,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
87526 return 0;
87527
87528 start = qp->rid;
87529- end = atomic_inc_return(&peer->rid);
87530+ end = atomic_inc_return_unchecked(&peer->rid);
87531 qp->rid = end;
87532
87533 rc = qp->q.fragments && (end - start) > max;
87534@@ -786,12 +786,11 @@ static struct ctl_table ip4_frags_ctl_table[] = {
87535
87536 static int __net_init ip4_frags_ns_ctl_register(struct net *net)
87537 {
87538- struct ctl_table *table;
87539+ ctl_table_no_const *table = NULL;
87540 struct ctl_table_header *hdr;
87541
87542- table = ip4_frags_ns_ctl_table;
87543 if (!net_eq(net, &init_net)) {
87544- table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
87545+ table = kmemdup(ip4_frags_ns_ctl_table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
87546 if (table == NULL)
87547 goto err_alloc;
87548
87549@@ -802,9 +801,10 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
87550 /* Don't export sysctls to unprivileged users */
87551 if (net->user_ns != &init_user_ns)
87552 table[0].procname = NULL;
87553- }
87554+ hdr = register_net_sysctl(net, "net/ipv4", table);
87555+ } else
87556+ hdr = register_net_sysctl(net, "net/ipv4", ip4_frags_ns_ctl_table);
87557
87558- hdr = register_net_sysctl(net, "net/ipv4", table);
87559 if (hdr == NULL)
87560 goto err_reg;
87561
87562@@ -812,8 +812,7 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
87563 return 0;
87564
87565 err_reg:
87566- if (!net_eq(net, &init_net))
87567- kfree(table);
87568+ kfree(table);
87569 err_alloc:
87570 return -ENOMEM;
87571 }
87572diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
87573index a85062b..2958a9b 100644
87574--- a/net/ipv4/ip_gre.c
87575+++ b/net/ipv4/ip_gre.c
87576@@ -124,7 +124,7 @@ static bool log_ecn_error = true;
87577 module_param(log_ecn_error, bool, 0644);
87578 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
87579
87580-static struct rtnl_link_ops ipgre_link_ops __read_mostly;
87581+static struct rtnl_link_ops ipgre_link_ops;
87582 static int ipgre_tunnel_init(struct net_device *dev);
87583 static void ipgre_tunnel_setup(struct net_device *dev);
87584 static int ipgre_tunnel_bind_dev(struct net_device *dev);
87585@@ -1753,7 +1753,7 @@ static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
87586 [IFLA_GRE_PMTUDISC] = { .type = NLA_U8 },
87587 };
87588
87589-static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
87590+static struct rtnl_link_ops ipgre_link_ops = {
87591 .kind = "gre",
87592 .maxtype = IFLA_GRE_MAX,
87593 .policy = ipgre_policy,
87594@@ -1766,7 +1766,7 @@ static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
87595 .fill_info = ipgre_fill_info,
87596 };
87597
87598-static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
87599+static struct rtnl_link_ops ipgre_tap_ops = {
87600 .kind = "gretap",
87601 .maxtype = IFLA_GRE_MAX,
87602 .policy = ipgre_policy,
87603diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
87604index d9c4f11..02b82dbc 100644
87605--- a/net/ipv4/ip_sockglue.c
87606+++ b/net/ipv4/ip_sockglue.c
87607@@ -1152,7 +1152,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
87608 len = min_t(unsigned int, len, opt->optlen);
87609 if (put_user(len, optlen))
87610 return -EFAULT;
87611- if (copy_to_user(optval, opt->__data, len))
87612+ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
87613+ copy_to_user(optval, opt->__data, len))
87614 return -EFAULT;
87615 return 0;
87616 }
87617@@ -1283,7 +1284,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
87618 if (sk->sk_type != SOCK_STREAM)
87619 return -ENOPROTOOPT;
87620
87621- msg.msg_control = optval;
87622+ msg.msg_control = (void __force_kernel *)optval;
87623 msg.msg_controllen = len;
87624 msg.msg_flags = flags;
87625
87626diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
87627index c3a4233..1412161 100644
87628--- a/net/ipv4/ip_vti.c
87629+++ b/net/ipv4/ip_vti.c
87630@@ -47,7 +47,7 @@
87631 #define HASH_SIZE 16
87632 #define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&(HASH_SIZE-1))
87633
87634-static struct rtnl_link_ops vti_link_ops __read_mostly;
87635+static struct rtnl_link_ops vti_link_ops;
87636
87637 static int vti_net_id __read_mostly;
87638 struct vti_net {
87639@@ -886,7 +886,7 @@ static const struct nla_policy vti_policy[IFLA_VTI_MAX + 1] = {
87640 [IFLA_VTI_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
87641 };
87642
87643-static struct rtnl_link_ops vti_link_ops __read_mostly = {
87644+static struct rtnl_link_ops vti_link_ops = {
87645 .kind = "vti",
87646 .maxtype = IFLA_VTI_MAX,
87647 .policy = vti_policy,
87648diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
87649index 9a46dae..5f793a0 100644
87650--- a/net/ipv4/ipcomp.c
87651+++ b/net/ipv4/ipcomp.c
87652@@ -48,7 +48,7 @@ static void ipcomp4_err(struct sk_buff *skb, u32 info)
87653 return;
87654
87655 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) {
87656- atomic_inc(&flow_cache_genid);
87657+ atomic_inc_unchecked(&flow_cache_genid);
87658 rt_genid_bump(net);
87659
87660 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_COMP, 0);
87661diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
87662index a2e50ae..e152b7c 100644
87663--- a/net/ipv4/ipconfig.c
87664+++ b/net/ipv4/ipconfig.c
87665@@ -323,7 +323,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
87666
87667 mm_segment_t oldfs = get_fs();
87668 set_fs(get_ds());
87669- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
87670+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
87671 set_fs(oldfs);
87672 return res;
87673 }
87674@@ -334,7 +334,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
87675
87676 mm_segment_t oldfs = get_fs();
87677 set_fs(get_ds());
87678- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
87679+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
87680 set_fs(oldfs);
87681 return res;
87682 }
87683@@ -345,7 +345,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
87684
87685 mm_segment_t oldfs = get_fs();
87686 set_fs(get_ds());
87687- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
87688+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
87689 set_fs(oldfs);
87690 return res;
87691 }
87692diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
87693index 191fc24..1b3b804 100644
87694--- a/net/ipv4/ipip.c
87695+++ b/net/ipv4/ipip.c
87696@@ -138,7 +138,7 @@ struct ipip_net {
87697 static int ipip_tunnel_init(struct net_device *dev);
87698 static void ipip_tunnel_setup(struct net_device *dev);
87699 static void ipip_dev_free(struct net_device *dev);
87700-static struct rtnl_link_ops ipip_link_ops __read_mostly;
87701+static struct rtnl_link_ops ipip_link_ops;
87702
87703 static struct rtnl_link_stats64 *ipip_get_stats64(struct net_device *dev,
87704 struct rtnl_link_stats64 *tot)
87705@@ -972,7 +972,7 @@ static const struct nla_policy ipip_policy[IFLA_IPTUN_MAX + 1] = {
87706 [IFLA_IPTUN_PMTUDISC] = { .type = NLA_U8 },
87707 };
87708
87709-static struct rtnl_link_ops ipip_link_ops __read_mostly = {
87710+static struct rtnl_link_ops ipip_link_ops = {
87711 .kind = "ipip",
87712 .maxtype = IFLA_IPTUN_MAX,
87713 .policy = ipip_policy,
87714diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
87715index 3ea4127..849297b 100644
87716--- a/net/ipv4/netfilter/arp_tables.c
87717+++ b/net/ipv4/netfilter/arp_tables.c
87718@@ -879,14 +879,14 @@ static int compat_table_info(const struct xt_table_info *info,
87719 #endif
87720
87721 static int get_info(struct net *net, void __user *user,
87722- const int *len, int compat)
87723+ int len, int compat)
87724 {
87725 char name[XT_TABLE_MAXNAMELEN];
87726 struct xt_table *t;
87727 int ret;
87728
87729- if (*len != sizeof(struct arpt_getinfo)) {
87730- duprintf("length %u != %Zu\n", *len,
87731+ if (len != sizeof(struct arpt_getinfo)) {
87732+ duprintf("length %u != %Zu\n", len,
87733 sizeof(struct arpt_getinfo));
87734 return -EINVAL;
87735 }
87736@@ -923,7 +923,7 @@ static int get_info(struct net *net, void __user *user,
87737 info.size = private->size;
87738 strcpy(info.name, name);
87739
87740- if (copy_to_user(user, &info, *len) != 0)
87741+ if (copy_to_user(user, &info, len) != 0)
87742 ret = -EFAULT;
87743 else
87744 ret = 0;
87745@@ -1682,7 +1682,7 @@ static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user,
87746
87747 switch (cmd) {
87748 case ARPT_SO_GET_INFO:
87749- ret = get_info(sock_net(sk), user, len, 1);
87750+ ret = get_info(sock_net(sk), user, *len, 1);
87751 break;
87752 case ARPT_SO_GET_ENTRIES:
87753 ret = compat_get_entries(sock_net(sk), user, len);
87754@@ -1727,7 +1727,7 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
87755
87756 switch (cmd) {
87757 case ARPT_SO_GET_INFO:
87758- ret = get_info(sock_net(sk), user, len, 0);
87759+ ret = get_info(sock_net(sk), user, *len, 0);
87760 break;
87761
87762 case ARPT_SO_GET_ENTRIES:
87763diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
87764index 17c5e06..1b91206 100644
87765--- a/net/ipv4/netfilter/ip_tables.c
87766+++ b/net/ipv4/netfilter/ip_tables.c
87767@@ -1068,14 +1068,14 @@ static int compat_table_info(const struct xt_table_info *info,
87768 #endif
87769
87770 static int get_info(struct net *net, void __user *user,
87771- const int *len, int compat)
87772+ int len, int compat)
87773 {
87774 char name[XT_TABLE_MAXNAMELEN];
87775 struct xt_table *t;
87776 int ret;
87777
87778- if (*len != sizeof(struct ipt_getinfo)) {
87779- duprintf("length %u != %zu\n", *len,
87780+ if (len != sizeof(struct ipt_getinfo)) {
87781+ duprintf("length %u != %zu\n", len,
87782 sizeof(struct ipt_getinfo));
87783 return -EINVAL;
87784 }
87785@@ -1112,7 +1112,7 @@ static int get_info(struct net *net, void __user *user,
87786 info.size = private->size;
87787 strcpy(info.name, name);
87788
87789- if (copy_to_user(user, &info, *len) != 0)
87790+ if (copy_to_user(user, &info, len) != 0)
87791 ret = -EFAULT;
87792 else
87793 ret = 0;
87794@@ -1966,7 +1966,7 @@ compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
87795
87796 switch (cmd) {
87797 case IPT_SO_GET_INFO:
87798- ret = get_info(sock_net(sk), user, len, 1);
87799+ ret = get_info(sock_net(sk), user, *len, 1);
87800 break;
87801 case IPT_SO_GET_ENTRIES:
87802 ret = compat_get_entries(sock_net(sk), user, len);
87803@@ -2013,7 +2013,7 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
87804
87805 switch (cmd) {
87806 case IPT_SO_GET_INFO:
87807- ret = get_info(sock_net(sk), user, len, 0);
87808+ ret = get_info(sock_net(sk), user, *len, 0);
87809 break;
87810
87811 case IPT_SO_GET_ENTRIES:
87812diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
87813index dc454cc..5bb917f 100644
87814--- a/net/ipv4/ping.c
87815+++ b/net/ipv4/ping.c
87816@@ -844,7 +844,7 @@ static void ping_format_sock(struct sock *sp, struct seq_file *f,
87817 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
87818 0, sock_i_ino(sp),
87819 atomic_read(&sp->sk_refcnt), sp,
87820- atomic_read(&sp->sk_drops), len);
87821+ atomic_read_unchecked(&sp->sk_drops), len);
87822 }
87823
87824 static int ping_seq_show(struct seq_file *seq, void *v)
87825diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
87826index 6f08991..55867ad 100644
87827--- a/net/ipv4/raw.c
87828+++ b/net/ipv4/raw.c
87829@@ -311,7 +311,7 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
87830 int raw_rcv(struct sock *sk, struct sk_buff *skb)
87831 {
87832 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
87833- atomic_inc(&sk->sk_drops);
87834+ atomic_inc_unchecked(&sk->sk_drops);
87835 kfree_skb(skb);
87836 return NET_RX_DROP;
87837 }
87838@@ -747,16 +747,20 @@ static int raw_init(struct sock *sk)
87839
87840 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
87841 {
87842+ struct icmp_filter filter;
87843+
87844 if (optlen > sizeof(struct icmp_filter))
87845 optlen = sizeof(struct icmp_filter);
87846- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
87847+ if (copy_from_user(&filter, optval, optlen))
87848 return -EFAULT;
87849+ raw_sk(sk)->filter = filter;
87850 return 0;
87851 }
87852
87853 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
87854 {
87855 int len, ret = -EFAULT;
87856+ struct icmp_filter filter;
87857
87858 if (get_user(len, optlen))
87859 goto out;
87860@@ -766,8 +770,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
87861 if (len > sizeof(struct icmp_filter))
87862 len = sizeof(struct icmp_filter);
87863 ret = -EFAULT;
87864- if (put_user(len, optlen) ||
87865- copy_to_user(optval, &raw_sk(sk)->filter, len))
87866+ filter = raw_sk(sk)->filter;
87867+ if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
87868 goto out;
87869 ret = 0;
87870 out: return ret;
87871@@ -998,7 +1002,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
87872 0, 0L, 0,
87873 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
87874 0, sock_i_ino(sp),
87875- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
87876+ atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
87877 }
87878
87879 static int raw_seq_show(struct seq_file *seq, void *v)
87880diff --git a/net/ipv4/route.c b/net/ipv4/route.c
87881index a0fcc47..32e2c89 100644
87882--- a/net/ipv4/route.c
87883+++ b/net/ipv4/route.c
87884@@ -2552,34 +2552,34 @@ static struct ctl_table ipv4_route_flush_table[] = {
87885 .maxlen = sizeof(int),
87886 .mode = 0200,
87887 .proc_handler = ipv4_sysctl_rtcache_flush,
87888+ .extra1 = &init_net,
87889 },
87890 { },
87891 };
87892
87893 static __net_init int sysctl_route_net_init(struct net *net)
87894 {
87895- struct ctl_table *tbl;
87896+ ctl_table_no_const *tbl = NULL;
87897
87898- tbl = ipv4_route_flush_table;
87899 if (!net_eq(net, &init_net)) {
87900- tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
87901+ tbl = kmemdup(ipv4_route_flush_table, sizeof(ipv4_route_flush_table), GFP_KERNEL);
87902 if (tbl == NULL)
87903 goto err_dup;
87904
87905 /* Don't export sysctls to unprivileged users */
87906 if (net->user_ns != &init_user_ns)
87907 tbl[0].procname = NULL;
87908- }
87909- tbl[0].extra1 = net;
87910+ tbl[0].extra1 = net;
87911+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
87912+ } else
87913+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", ipv4_route_flush_table);
87914
87915- net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
87916 if (net->ipv4.route_hdr == NULL)
87917 goto err_reg;
87918 return 0;
87919
87920 err_reg:
87921- if (tbl != ipv4_route_flush_table)
87922- kfree(tbl);
87923+ kfree(tbl);
87924 err_dup:
87925 return -ENOMEM;
87926 }
87927@@ -2602,7 +2602,7 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
87928
87929 static __net_init int rt_genid_init(struct net *net)
87930 {
87931- atomic_set(&net->rt_genid, 0);
87932+ atomic_set_unchecked(&net->rt_genid, 0);
87933 get_random_bytes(&net->ipv4.dev_addr_genid,
87934 sizeof(net->ipv4.dev_addr_genid));
87935 return 0;
87936diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
87937index b236ef04..f962f19 100644
87938--- a/net/ipv4/syncookies.c
87939+++ b/net/ipv4/syncookies.c
87940@@ -348,8 +348,8 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
87941 * hasn't changed since we received the original syn, but I see
87942 * no easy way to do this.
87943 */
87944- flowi4_init_output(&fl4, 0, sk->sk_mark, RT_CONN_FLAGS(sk),
87945- RT_SCOPE_UNIVERSE, IPPROTO_TCP,
87946+ flowi4_init_output(&fl4, sk->sk_bound_dev_if, sk->sk_mark,
87947+ RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, IPPROTO_TCP,
87948 inet_sk_flowi_flags(sk),
87949 (opt && opt->srr) ? opt->faddr : ireq->rmt_addr,
87950 ireq->loc_addr, th->source, th->dest);
87951diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
87952index d84400b..62e066e 100644
87953--- a/net/ipv4/sysctl_net_ipv4.c
87954+++ b/net/ipv4/sysctl_net_ipv4.c
87955@@ -54,7 +54,7 @@ static int ipv4_local_port_range(ctl_table *table, int write,
87956 {
87957 int ret;
87958 int range[2];
87959- ctl_table tmp = {
87960+ ctl_table_no_const tmp = {
87961 .data = &range,
87962 .maxlen = sizeof(range),
87963 .mode = table->mode,
87964@@ -107,7 +107,7 @@ static int ipv4_ping_group_range(ctl_table *table, int write,
87965 int ret;
87966 gid_t urange[2];
87967 kgid_t low, high;
87968- ctl_table tmp = {
87969+ ctl_table_no_const tmp = {
87970 .data = &urange,
87971 .maxlen = sizeof(urange),
87972 .mode = table->mode,
87973@@ -138,7 +138,7 @@ static int proc_tcp_congestion_control(ctl_table *ctl, int write,
87974 void __user *buffer, size_t *lenp, loff_t *ppos)
87975 {
87976 char val[TCP_CA_NAME_MAX];
87977- ctl_table tbl = {
87978+ ctl_table_no_const tbl = {
87979 .data = val,
87980 .maxlen = TCP_CA_NAME_MAX,
87981 };
87982@@ -157,7 +157,7 @@ static int proc_tcp_available_congestion_control(ctl_table *ctl,
87983 void __user *buffer, size_t *lenp,
87984 loff_t *ppos)
87985 {
87986- ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX, };
87987+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX, };
87988 int ret;
87989
87990 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
87991@@ -174,7 +174,7 @@ static int proc_allowed_congestion_control(ctl_table *ctl,
87992 void __user *buffer, size_t *lenp,
87993 loff_t *ppos)
87994 {
87995- ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX };
87996+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX };
87997 int ret;
87998
87999 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
88000@@ -200,15 +200,17 @@ static int ipv4_tcp_mem(ctl_table *ctl, int write,
88001 struct mem_cgroup *memcg;
88002 #endif
88003
88004- ctl_table tmp = {
88005+ ctl_table_no_const tmp = {
88006 .data = &vec,
88007 .maxlen = sizeof(vec),
88008 .mode = ctl->mode,
88009 };
88010
88011 if (!write) {
88012- ctl->data = &net->ipv4.sysctl_tcp_mem;
88013- return proc_doulongvec_minmax(ctl, write, buffer, lenp, ppos);
88014+ ctl_table_no_const tcp_mem = *ctl;
88015+
88016+ tcp_mem.data = &net->ipv4.sysctl_tcp_mem;
88017+ return proc_doulongvec_minmax(&tcp_mem, write, buffer, lenp, ppos);
88018 }
88019
88020 ret = proc_doulongvec_minmax(&tmp, write, buffer, lenp, ppos);
88021@@ -235,7 +237,7 @@ static int ipv4_tcp_mem(ctl_table *ctl, int write,
88022 int proc_tcp_fastopen_key(ctl_table *ctl, int write, void __user *buffer,
88023 size_t *lenp, loff_t *ppos)
88024 {
88025- ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
88026+ ctl_table_no_const tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
88027 struct tcp_fastopen_context *ctxt;
88028 int ret;
88029 u32 user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */
88030@@ -476,7 +478,7 @@ static struct ctl_table ipv4_table[] = {
88031 },
88032 {
88033 .procname = "ip_local_reserved_ports",
88034- .data = NULL, /* initialized in sysctl_ipv4_init */
88035+ .data = sysctl_local_reserved_ports,
88036 .maxlen = 65536,
88037 .mode = 0644,
88038 .proc_handler = proc_do_large_bitmap,
88039@@ -860,11 +862,10 @@ static struct ctl_table ipv4_net_table[] = {
88040
88041 static __net_init int ipv4_sysctl_init_net(struct net *net)
88042 {
88043- struct ctl_table *table;
88044+ ctl_table_no_const *table = NULL;
88045
88046- table = ipv4_net_table;
88047 if (!net_eq(net, &init_net)) {
88048- table = kmemdup(table, sizeof(ipv4_net_table), GFP_KERNEL);
88049+ table = kmemdup(ipv4_net_table, sizeof(ipv4_net_table), GFP_KERNEL);
88050 if (table == NULL)
88051 goto err_alloc;
88052
88053@@ -897,15 +898,17 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
88054
88055 tcp_init_mem(net);
88056
88057- net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
88058+ if (!net_eq(net, &init_net))
88059+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
88060+ else
88061+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", ipv4_net_table);
88062 if (net->ipv4.ipv4_hdr == NULL)
88063 goto err_reg;
88064
88065 return 0;
88066
88067 err_reg:
88068- if (!net_eq(net, &init_net))
88069- kfree(table);
88070+ kfree(table);
88071 err_alloc:
88072 return -ENOMEM;
88073 }
88074@@ -927,16 +930,6 @@ static __net_initdata struct pernet_operations ipv4_sysctl_ops = {
88075 static __init int sysctl_ipv4_init(void)
88076 {
88077 struct ctl_table_header *hdr;
88078- struct ctl_table *i;
88079-
88080- for (i = ipv4_table; i->procname; i++) {
88081- if (strcmp(i->procname, "ip_local_reserved_ports") == 0) {
88082- i->data = sysctl_local_reserved_ports;
88083- break;
88084- }
88085- }
88086- if (!i->procname)
88087- return -EINVAL;
88088
88089 hdr = register_net_sysctl(&init_net, "net/ipv4", ipv4_table);
88090 if (hdr == NULL)
88091diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
88092index 9841a71..ef60409 100644
88093--- a/net/ipv4/tcp_input.c
88094+++ b/net/ipv4/tcp_input.c
88095@@ -4730,7 +4730,7 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
88096 * simplifies code)
88097 */
88098 static void
88099-tcp_collapse(struct sock *sk, struct sk_buff_head *list,
88100+__intentional_overflow(5,6) tcp_collapse(struct sock *sk, struct sk_buff_head *list,
88101 struct sk_buff *head, struct sk_buff *tail,
88102 u32 start, u32 end)
88103 {
88104@@ -5847,6 +5847,7 @@ discard:
88105 tcp_paws_reject(&tp->rx_opt, 0))
88106 goto discard_and_undo;
88107
88108+#ifndef CONFIG_GRKERNSEC_NO_SIMULT_CONNECT
88109 if (th->syn) {
88110 /* We see SYN without ACK. It is attempt of
88111 * simultaneous connect with crossed SYNs.
88112@@ -5897,6 +5898,7 @@ discard:
88113 goto discard;
88114 #endif
88115 }
88116+#endif
88117 /* "fifth, if neither of the SYN or RST bits is set then
88118 * drop the segment and return."
88119 */
88120@@ -5941,7 +5943,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
88121 goto discard;
88122
88123 if (th->syn) {
88124- if (th->fin)
88125+ if (th->fin || th->urg || th->psh)
88126 goto discard;
88127 if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
88128 return 1;
88129diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
88130index d9130a9..00328ff 100644
88131--- a/net/ipv4/tcp_ipv4.c
88132+++ b/net/ipv4/tcp_ipv4.c
88133@@ -90,6 +90,10 @@ int sysctl_tcp_low_latency __read_mostly;
88134 EXPORT_SYMBOL(sysctl_tcp_low_latency);
88135
88136
88137+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
88138+extern int grsec_enable_blackhole;
88139+#endif
88140+
88141 #ifdef CONFIG_TCP_MD5SIG
88142 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
88143 __be32 daddr, __be32 saddr, const struct tcphdr *th);
88144@@ -1895,6 +1899,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
88145 return 0;
88146
88147 reset:
88148+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
88149+ if (!grsec_enable_blackhole)
88150+#endif
88151 tcp_v4_send_reset(rsk, skb);
88152 discard:
88153 kfree_skb(skb);
88154@@ -1994,12 +2001,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
88155 TCP_SKB_CB(skb)->sacked = 0;
88156
88157 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
88158- if (!sk)
88159+ if (!sk) {
88160+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
88161+ ret = 1;
88162+#endif
88163 goto no_tcp_socket;
88164-
88165+ }
88166 process:
88167- if (sk->sk_state == TCP_TIME_WAIT)
88168+ if (sk->sk_state == TCP_TIME_WAIT) {
88169+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
88170+ ret = 2;
88171+#endif
88172 goto do_time_wait;
88173+ }
88174
88175 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
88176 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
88177@@ -2050,6 +2064,10 @@ no_tcp_socket:
88178 bad_packet:
88179 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
88180 } else {
88181+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
88182+ if (!grsec_enable_blackhole || (ret == 1 &&
88183+ (skb->dev->flags & IFF_LOOPBACK)))
88184+#endif
88185 tcp_v4_send_reset(NULL, skb);
88186 }
88187
88188diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
88189index f35f2df..ccb5ca6 100644
88190--- a/net/ipv4/tcp_minisocks.c
88191+++ b/net/ipv4/tcp_minisocks.c
88192@@ -27,6 +27,10 @@
88193 #include <net/inet_common.h>
88194 #include <net/xfrm.h>
88195
88196+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
88197+extern int grsec_enable_blackhole;
88198+#endif
88199+
88200 int sysctl_tcp_syncookies __read_mostly = 1;
88201 EXPORT_SYMBOL(sysctl_tcp_syncookies);
88202
88203@@ -742,7 +746,10 @@ embryonic_reset:
88204 * avoid becoming vulnerable to outside attack aiming at
88205 * resetting legit local connections.
88206 */
88207- req->rsk_ops->send_reset(sk, skb);
88208+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
88209+ if (!grsec_enable_blackhole)
88210+#endif
88211+ req->rsk_ops->send_reset(sk, skb);
88212 } else if (fastopen) { /* received a valid RST pkt */
88213 reqsk_fastopen_remove(sk, req, true);
88214 tcp_reset(sk);
88215diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
88216index 17d659e..a9f50ee 100644
88217--- a/net/ipv4/tcp_output.c
88218+++ b/net/ipv4/tcp_output.c
88219@@ -2388,8 +2388,12 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
88220 */
88221 TCP_SKB_CB(skb)->when = tcp_time_stamp;
88222
88223- /* make sure skb->data is aligned on arches that require it */
88224- if (unlikely(NET_IP_ALIGN && ((unsigned long)skb->data & 3))) {
88225+ /* make sure skb->data is aligned on arches that require it
88226+ * and check if ack-trimming & collapsing extended the headroom
88227+ * beyond what csum_start can cover.
88228+ */
88229+ if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) ||
88230+ skb_headroom(skb) >= 0xFFFF)) {
88231 struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER,
88232 GFP_ATOMIC);
88233 return nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
88234diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
88235index 4526fe6..1a34e43 100644
88236--- a/net/ipv4/tcp_probe.c
88237+++ b/net/ipv4/tcp_probe.c
88238@@ -204,7 +204,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
88239 if (cnt + width >= len)
88240 break;
88241
88242- if (copy_to_user(buf + cnt, tbuf, width))
88243+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
88244 return -EFAULT;
88245 cnt += width;
88246 }
88247diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
88248index b78aac3..e18230b 100644
88249--- a/net/ipv4/tcp_timer.c
88250+++ b/net/ipv4/tcp_timer.c
88251@@ -22,6 +22,10 @@
88252 #include <linux/gfp.h>
88253 #include <net/tcp.h>
88254
88255+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
88256+extern int grsec_lastack_retries;
88257+#endif
88258+
88259 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
88260 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
88261 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
88262@@ -185,6 +189,13 @@ static int tcp_write_timeout(struct sock *sk)
88263 }
88264 }
88265
88266+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
88267+ if ((sk->sk_state == TCP_LAST_ACK) &&
88268+ (grsec_lastack_retries > 0) &&
88269+ (grsec_lastack_retries < retry_until))
88270+ retry_until = grsec_lastack_retries;
88271+#endif
88272+
88273 if (retransmits_timed_out(sk, retry_until,
88274 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
88275 /* Has it gone just too far? */
88276diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
88277index 1f4d405..3524677 100644
88278--- a/net/ipv4/udp.c
88279+++ b/net/ipv4/udp.c
88280@@ -87,6 +87,7 @@
88281 #include <linux/types.h>
88282 #include <linux/fcntl.h>
88283 #include <linux/module.h>
88284+#include <linux/security.h>
88285 #include <linux/socket.h>
88286 #include <linux/sockios.h>
88287 #include <linux/igmp.h>
88288@@ -111,6 +112,10 @@
88289 #include <trace/events/skb.h>
88290 #include "udp_impl.h"
88291
88292+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
88293+extern int grsec_enable_blackhole;
88294+#endif
88295+
88296 struct udp_table udp_table __read_mostly;
88297 EXPORT_SYMBOL(udp_table);
88298
88299@@ -569,6 +574,9 @@ found:
88300 return s;
88301 }
88302
88303+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
88304+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
88305+
88306 /*
88307 * This routine is called by the ICMP module when it gets some
88308 * sort of error condition. If err < 0 then the socket should
88309@@ -864,9 +872,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
88310 dport = usin->sin_port;
88311 if (dport == 0)
88312 return -EINVAL;
88313+
88314+ err = gr_search_udp_sendmsg(sk, usin);
88315+ if (err)
88316+ return err;
88317 } else {
88318 if (sk->sk_state != TCP_ESTABLISHED)
88319 return -EDESTADDRREQ;
88320+
88321+ err = gr_search_udp_sendmsg(sk, NULL);
88322+ if (err)
88323+ return err;
88324+
88325 daddr = inet->inet_daddr;
88326 dport = inet->inet_dport;
88327 /* Open fast path for connected socket.
88328@@ -1108,7 +1125,7 @@ static unsigned int first_packet_length(struct sock *sk)
88329 udp_lib_checksum_complete(skb)) {
88330 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
88331 IS_UDPLITE(sk));
88332- atomic_inc(&sk->sk_drops);
88333+ atomic_inc_unchecked(&sk->sk_drops);
88334 __skb_unlink(skb, rcvq);
88335 __skb_queue_tail(&list_kill, skb);
88336 }
88337@@ -1194,6 +1211,10 @@ try_again:
88338 if (!skb)
88339 goto out;
88340
88341+ err = gr_search_udp_recvmsg(sk, skb);
88342+ if (err)
88343+ goto out_free;
88344+
88345 ulen = skb->len - sizeof(struct udphdr);
88346 copied = len;
88347 if (copied > ulen)
88348@@ -1227,7 +1248,7 @@ try_again:
88349 if (unlikely(err)) {
88350 trace_kfree_skb(skb, udp_recvmsg);
88351 if (!peeked) {
88352- atomic_inc(&sk->sk_drops);
88353+ atomic_inc_unchecked(&sk->sk_drops);
88354 UDP_INC_STATS_USER(sock_net(sk),
88355 UDP_MIB_INERRORS, is_udplite);
88356 }
88357@@ -1510,7 +1531,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
88358
88359 drop:
88360 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
88361- atomic_inc(&sk->sk_drops);
88362+ atomic_inc_unchecked(&sk->sk_drops);
88363 kfree_skb(skb);
88364 return -1;
88365 }
88366@@ -1529,7 +1550,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
88367 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
88368
88369 if (!skb1) {
88370- atomic_inc(&sk->sk_drops);
88371+ atomic_inc_unchecked(&sk->sk_drops);
88372 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
88373 IS_UDPLITE(sk));
88374 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
88375@@ -1698,6 +1719,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
88376 goto csum_error;
88377
88378 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
88379+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
88380+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
88381+#endif
88382 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
88383
88384 /*
88385@@ -2120,7 +2144,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
88386 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
88387 0, sock_i_ino(sp),
88388 atomic_read(&sp->sk_refcnt), sp,
88389- atomic_read(&sp->sk_drops), len);
88390+ atomic_read_unchecked(&sp->sk_drops), len);
88391 }
88392
88393 int udp4_seq_show(struct seq_file *seq, void *v)
88394diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
88395index a36d17e..96d099f 100644
88396--- a/net/ipv6/addrconf.c
88397+++ b/net/ipv6/addrconf.c
88398@@ -2272,7 +2272,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
88399 p.iph.ihl = 5;
88400 p.iph.protocol = IPPROTO_IPV6;
88401 p.iph.ttl = 64;
88402- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
88403+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
88404
88405 if (ops->ndo_do_ioctl) {
88406 mm_segment_t oldfs = get_fs();
88407@@ -4388,7 +4388,7 @@ int addrconf_sysctl_forward(ctl_table *ctl, int write,
88408 int *valp = ctl->data;
88409 int val = *valp;
88410 loff_t pos = *ppos;
88411- ctl_table lctl;
88412+ ctl_table_no_const lctl;
88413 int ret;
88414
88415 /*
88416@@ -4470,7 +4470,7 @@ int addrconf_sysctl_disable(ctl_table *ctl, int write,
88417 int *valp = ctl->data;
88418 int val = *valp;
88419 loff_t pos = *ppos;
88420- ctl_table lctl;
88421+ ctl_table_no_const lctl;
88422 int ret;
88423
88424 /*
88425diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
88426index fff5bdd..15194fb 100644
88427--- a/net/ipv6/icmp.c
88428+++ b/net/ipv6/icmp.c
88429@@ -973,7 +973,7 @@ ctl_table ipv6_icmp_table_template[] = {
88430
88431 struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net)
88432 {
88433- struct ctl_table *table;
88434+ ctl_table_no_const *table;
88435
88436 table = kmemdup(ipv6_icmp_table_template,
88437 sizeof(ipv6_icmp_table_template),
88438diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
88439index 131dd09..f7ed64f 100644
88440--- a/net/ipv6/ip6_gre.c
88441+++ b/net/ipv6/ip6_gre.c
88442@@ -73,7 +73,7 @@ struct ip6gre_net {
88443 struct net_device *fb_tunnel_dev;
88444 };
88445
88446-static struct rtnl_link_ops ip6gre_link_ops __read_mostly;
88447+static struct rtnl_link_ops ip6gre_link_ops;
88448 static int ip6gre_tunnel_init(struct net_device *dev);
88449 static void ip6gre_tunnel_setup(struct net_device *dev);
88450 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
88451@@ -1337,7 +1337,7 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev)
88452 }
88453
88454
88455-static struct inet6_protocol ip6gre_protocol __read_mostly = {
88456+static struct inet6_protocol ip6gre_protocol = {
88457 .handler = ip6gre_rcv,
88458 .err_handler = ip6gre_err,
88459 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
88460@@ -1671,7 +1671,7 @@ static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = {
88461 [IFLA_GRE_FLAGS] = { .type = NLA_U32 },
88462 };
88463
88464-static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
88465+static struct rtnl_link_ops ip6gre_link_ops = {
88466 .kind = "ip6gre",
88467 .maxtype = IFLA_GRE_MAX,
88468 .policy = ip6gre_policy,
88469@@ -1684,7 +1684,7 @@ static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
88470 .fill_info = ip6gre_fill_info,
88471 };
88472
88473-static struct rtnl_link_ops ip6gre_tap_ops __read_mostly = {
88474+static struct rtnl_link_ops ip6gre_tap_ops = {
88475 .kind = "ip6gretap",
88476 .maxtype = IFLA_GRE_MAX,
88477 .policy = ip6gre_policy,
88478diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
88479index a14f28b..b4b8956 100644
88480--- a/net/ipv6/ip6_tunnel.c
88481+++ b/net/ipv6/ip6_tunnel.c
88482@@ -87,7 +87,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
88483
88484 static int ip6_tnl_dev_init(struct net_device *dev);
88485 static void ip6_tnl_dev_setup(struct net_device *dev);
88486-static struct rtnl_link_ops ip6_link_ops __read_mostly;
88487+static struct rtnl_link_ops ip6_link_ops;
88488
88489 static int ip6_tnl_net_id __read_mostly;
88490 struct ip6_tnl_net {
88491@@ -1686,7 +1686,7 @@ static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = {
88492 [IFLA_IPTUN_PROTO] = { .type = NLA_U8 },
88493 };
88494
88495-static struct rtnl_link_ops ip6_link_ops __read_mostly = {
88496+static struct rtnl_link_ops ip6_link_ops = {
88497 .kind = "ip6tnl",
88498 .maxtype = IFLA_IPTUN_MAX,
88499 .policy = ip6_tnl_policy,
88500diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
88501index d1e2e8e..51c19ae 100644
88502--- a/net/ipv6/ipv6_sockglue.c
88503+++ b/net/ipv6/ipv6_sockglue.c
88504@@ -991,7 +991,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
88505 if (sk->sk_type != SOCK_STREAM)
88506 return -ENOPROTOOPT;
88507
88508- msg.msg_control = optval;
88509+ msg.msg_control = (void __force_kernel *)optval;
88510 msg.msg_controllen = len;
88511 msg.msg_flags = flags;
88512
88513diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
88514index 125a90d..2a11f36 100644
88515--- a/net/ipv6/netfilter/ip6_tables.c
88516+++ b/net/ipv6/netfilter/ip6_tables.c
88517@@ -1076,14 +1076,14 @@ static int compat_table_info(const struct xt_table_info *info,
88518 #endif
88519
88520 static int get_info(struct net *net, void __user *user,
88521- const int *len, int compat)
88522+ int len, int compat)
88523 {
88524 char name[XT_TABLE_MAXNAMELEN];
88525 struct xt_table *t;
88526 int ret;
88527
88528- if (*len != sizeof(struct ip6t_getinfo)) {
88529- duprintf("length %u != %zu\n", *len,
88530+ if (len != sizeof(struct ip6t_getinfo)) {
88531+ duprintf("length %u != %zu\n", len,
88532 sizeof(struct ip6t_getinfo));
88533 return -EINVAL;
88534 }
88535@@ -1120,7 +1120,7 @@ static int get_info(struct net *net, void __user *user,
88536 info.size = private->size;
88537 strcpy(info.name, name);
88538
88539- if (copy_to_user(user, &info, *len) != 0)
88540+ if (copy_to_user(user, &info, len) != 0)
88541 ret = -EFAULT;
88542 else
88543 ret = 0;
88544@@ -1974,7 +1974,7 @@ compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
88545
88546 switch (cmd) {
88547 case IP6T_SO_GET_INFO:
88548- ret = get_info(sock_net(sk), user, len, 1);
88549+ ret = get_info(sock_net(sk), user, *len, 1);
88550 break;
88551 case IP6T_SO_GET_ENTRIES:
88552 ret = compat_get_entries(sock_net(sk), user, len);
88553@@ -2021,7 +2021,7 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
88554
88555 switch (cmd) {
88556 case IP6T_SO_GET_INFO:
88557- ret = get_info(sock_net(sk), user, len, 0);
88558+ ret = get_info(sock_net(sk), user, *len, 0);
88559 break;
88560
88561 case IP6T_SO_GET_ENTRIES:
88562diff --git a/net/ipv6/netfilter/ip6t_NPT.c b/net/ipv6/netfilter/ip6t_NPT.c
88563index 83acc14..0ea43c7 100644
88564--- a/net/ipv6/netfilter/ip6t_NPT.c
88565+++ b/net/ipv6/netfilter/ip6t_NPT.c
88566@@ -57,7 +57,7 @@ static bool ip6t_npt_map_pfx(const struct ip6t_npt_tginfo *npt,
88567 if (pfx_len - i >= 32)
88568 mask = 0;
88569 else
88570- mask = htonl(~((1 << (pfx_len - i)) - 1));
88571+ mask = htonl((1 << (i - pfx_len + 32)) - 1);
88572
88573 idx = i / 32;
88574 addr->s6_addr32[idx] &= mask;
88575diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
88576index 2f3a018..8bca195 100644
88577--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
88578+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
88579@@ -89,12 +89,11 @@ static struct ctl_table nf_ct_frag6_sysctl_table[] = {
88580
88581 static int nf_ct_frag6_sysctl_register(struct net *net)
88582 {
88583- struct ctl_table *table;
88584+ ctl_table_no_const *table = NULL;
88585 struct ctl_table_header *hdr;
88586
88587- table = nf_ct_frag6_sysctl_table;
88588 if (!net_eq(net, &init_net)) {
88589- table = kmemdup(table, sizeof(nf_ct_frag6_sysctl_table),
88590+ table = kmemdup(nf_ct_frag6_sysctl_table, sizeof(nf_ct_frag6_sysctl_table),
88591 GFP_KERNEL);
88592 if (table == NULL)
88593 goto err_alloc;
88594@@ -102,9 +101,9 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
88595 table[0].data = &net->ipv6.frags.high_thresh;
88596 table[1].data = &net->ipv6.frags.low_thresh;
88597 table[2].data = &net->ipv6.frags.timeout;
88598- }
88599-
88600- hdr = register_net_sysctl(net, "net/netfilter", table);
88601+ hdr = register_net_sysctl(net, "net/netfilter", table);
88602+ } else
88603+ hdr = register_net_sysctl(net, "net/netfilter", nf_ct_frag6_sysctl_table);
88604 if (hdr == NULL)
88605 goto err_reg;
88606
88607@@ -112,8 +111,7 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
88608 return 0;
88609
88610 err_reg:
88611- if (!net_eq(net, &init_net))
88612- kfree(table);
88613+ kfree(table);
88614 err_alloc:
88615 return -ENOMEM;
88616 }
88617diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
88618index 70fa814..d70c28c 100644
88619--- a/net/ipv6/raw.c
88620+++ b/net/ipv6/raw.c
88621@@ -379,7 +379,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
88622 {
88623 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
88624 skb_checksum_complete(skb)) {
88625- atomic_inc(&sk->sk_drops);
88626+ atomic_inc_unchecked(&sk->sk_drops);
88627 kfree_skb(skb);
88628 return NET_RX_DROP;
88629 }
88630@@ -407,7 +407,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
88631 struct raw6_sock *rp = raw6_sk(sk);
88632
88633 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
88634- atomic_inc(&sk->sk_drops);
88635+ atomic_inc_unchecked(&sk->sk_drops);
88636 kfree_skb(skb);
88637 return NET_RX_DROP;
88638 }
88639@@ -431,7 +431,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
88640
88641 if (inet->hdrincl) {
88642 if (skb_checksum_complete(skb)) {
88643- atomic_inc(&sk->sk_drops);
88644+ atomic_inc_unchecked(&sk->sk_drops);
88645 kfree_skb(skb);
88646 return NET_RX_DROP;
88647 }
88648@@ -604,7 +604,7 @@ out:
88649 return err;
88650 }
88651
88652-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
88653+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
88654 struct flowi6 *fl6, struct dst_entry **dstp,
88655 unsigned int flags)
88656 {
88657@@ -916,12 +916,15 @@ do_confirm:
88658 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
88659 char __user *optval, int optlen)
88660 {
88661+ struct icmp6_filter filter;
88662+
88663 switch (optname) {
88664 case ICMPV6_FILTER:
88665 if (optlen > sizeof(struct icmp6_filter))
88666 optlen = sizeof(struct icmp6_filter);
88667- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
88668+ if (copy_from_user(&filter, optval, optlen))
88669 return -EFAULT;
88670+ raw6_sk(sk)->filter = filter;
88671 return 0;
88672 default:
88673 return -ENOPROTOOPT;
88674@@ -934,6 +937,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
88675 char __user *optval, int __user *optlen)
88676 {
88677 int len;
88678+ struct icmp6_filter filter;
88679
88680 switch (optname) {
88681 case ICMPV6_FILTER:
88682@@ -945,7 +949,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
88683 len = sizeof(struct icmp6_filter);
88684 if (put_user(len, optlen))
88685 return -EFAULT;
88686- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
88687+ filter = raw6_sk(sk)->filter;
88688+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
88689 return -EFAULT;
88690 return 0;
88691 default:
88692@@ -1253,7 +1258,7 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
88693 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
88694 0,
88695 sock_i_ino(sp),
88696- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
88697+ atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
88698 }
88699
88700 static int raw6_seq_show(struct seq_file *seq, void *v)
88701diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
88702index d9ba8a2..f3f9e14 100644
88703--- a/net/ipv6/reassembly.c
88704+++ b/net/ipv6/reassembly.c
88705@@ -608,12 +608,11 @@ static struct ctl_table ip6_frags_ctl_table[] = {
88706
88707 static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
88708 {
88709- struct ctl_table *table;
88710+ ctl_table_no_const *table = NULL;
88711 struct ctl_table_header *hdr;
88712
88713- table = ip6_frags_ns_ctl_table;
88714 if (!net_eq(net, &init_net)) {
88715- table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
88716+ table = kmemdup(ip6_frags_ns_ctl_table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
88717 if (table == NULL)
88718 goto err_alloc;
88719
88720@@ -624,9 +623,10 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
88721 /* Don't export sysctls to unprivileged users */
88722 if (net->user_ns != &init_user_ns)
88723 table[0].procname = NULL;
88724- }
88725+ hdr = register_net_sysctl(net, "net/ipv6", table);
88726+ } else
88727+ hdr = register_net_sysctl(net, "net/ipv6", ip6_frags_ns_ctl_table);
88728
88729- hdr = register_net_sysctl(net, "net/ipv6", table);
88730 if (hdr == NULL)
88731 goto err_reg;
88732
88733@@ -634,8 +634,7 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
88734 return 0;
88735
88736 err_reg:
88737- if (!net_eq(net, &init_net))
88738- kfree(table);
88739+ kfree(table);
88740 err_alloc:
88741 return -ENOMEM;
88742 }
88743diff --git a/net/ipv6/route.c b/net/ipv6/route.c
88744index 5845613..3af8fc7 100644
88745--- a/net/ipv6/route.c
88746+++ b/net/ipv6/route.c
88747@@ -2966,7 +2966,7 @@ ctl_table ipv6_route_table_template[] = {
88748
88749 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
88750 {
88751- struct ctl_table *table;
88752+ ctl_table_no_const *table;
88753
88754 table = kmemdup(ipv6_route_table_template,
88755 sizeof(ipv6_route_table_template),
88756diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
88757index cfba99b..20ca511 100644
88758--- a/net/ipv6/sit.c
88759+++ b/net/ipv6/sit.c
88760@@ -72,7 +72,7 @@ MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
88761 static int ipip6_tunnel_init(struct net_device *dev);
88762 static void ipip6_tunnel_setup(struct net_device *dev);
88763 static void ipip6_dev_free(struct net_device *dev);
88764-static struct rtnl_link_ops sit_link_ops __read_mostly;
88765+static struct rtnl_link_ops sit_link_ops;
88766
88767 static int sit_net_id __read_mostly;
88768 struct sit_net {
88769@@ -1463,7 +1463,7 @@ static const struct nla_policy ipip6_policy[IFLA_IPTUN_MAX + 1] = {
88770 #endif
88771 };
88772
88773-static struct rtnl_link_ops sit_link_ops __read_mostly = {
88774+static struct rtnl_link_ops sit_link_ops = {
88775 .kind = "sit",
88776 .maxtype = IFLA_IPTUN_MAX,
88777 .policy = ipip6_policy,
88778diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
88779index e85c48b..b8268d3 100644
88780--- a/net/ipv6/sysctl_net_ipv6.c
88781+++ b/net/ipv6/sysctl_net_ipv6.c
88782@@ -40,7 +40,7 @@ static ctl_table ipv6_rotable[] = {
88783
88784 static int __net_init ipv6_sysctl_net_init(struct net *net)
88785 {
88786- struct ctl_table *ipv6_table;
88787+ ctl_table_no_const *ipv6_table;
88788 struct ctl_table *ipv6_route_table;
88789 struct ctl_table *ipv6_icmp_table;
88790 int err;
88791diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
88792index 8d19346..f122ba5 100644
88793--- a/net/ipv6/tcp_ipv6.c
88794+++ b/net/ipv6/tcp_ipv6.c
88795@@ -103,6 +103,10 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
88796 inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
88797 }
88798
88799+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
88800+extern int grsec_enable_blackhole;
88801+#endif
88802+
88803 static void tcp_v6_hash(struct sock *sk)
88804 {
88805 if (sk->sk_state != TCP_CLOSE) {
88806@@ -386,6 +390,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
88807
88808 if (dst)
88809 dst->ops->redirect(dst, sk, skb);
88810+ goto out;
88811 }
88812
88813 if (type == ICMPV6_PKT_TOOBIG) {
88814@@ -1440,6 +1445,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
88815 return 0;
88816
88817 reset:
88818+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
88819+ if (!grsec_enable_blackhole)
88820+#endif
88821 tcp_v6_send_reset(sk, skb);
88822 discard:
88823 if (opt_skb)
88824@@ -1521,12 +1529,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
88825 TCP_SKB_CB(skb)->sacked = 0;
88826
88827 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
88828- if (!sk)
88829+ if (!sk) {
88830+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
88831+ ret = 1;
88832+#endif
88833 goto no_tcp_socket;
88834+ }
88835
88836 process:
88837- if (sk->sk_state == TCP_TIME_WAIT)
88838+ if (sk->sk_state == TCP_TIME_WAIT) {
88839+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
88840+ ret = 2;
88841+#endif
88842 goto do_time_wait;
88843+ }
88844
88845 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
88846 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
88847@@ -1575,6 +1591,10 @@ no_tcp_socket:
88848 bad_packet:
88849 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
88850 } else {
88851+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
88852+ if (!grsec_enable_blackhole || (ret == 1 &&
88853+ (skb->dev->flags & IFF_LOOPBACK)))
88854+#endif
88855 tcp_v6_send_reset(NULL, skb);
88856 }
88857
88858diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
88859index fb08329..2d6919e 100644
88860--- a/net/ipv6/udp.c
88861+++ b/net/ipv6/udp.c
88862@@ -51,6 +51,10 @@
88863 #include <trace/events/skb.h>
88864 #include "udp_impl.h"
88865
88866+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
88867+extern int grsec_enable_blackhole;
88868+#endif
88869+
88870 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
88871 {
88872 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
88873@@ -395,7 +399,7 @@ try_again:
88874 if (unlikely(err)) {
88875 trace_kfree_skb(skb, udpv6_recvmsg);
88876 if (!peeked) {
88877- atomic_inc(&sk->sk_drops);
88878+ atomic_inc_unchecked(&sk->sk_drops);
88879 if (is_udp4)
88880 UDP_INC_STATS_USER(sock_net(sk),
88881 UDP_MIB_INERRORS,
88882@@ -633,7 +637,7 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
88883 return rc;
88884 drop:
88885 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
88886- atomic_inc(&sk->sk_drops);
88887+ atomic_inc_unchecked(&sk->sk_drops);
88888 kfree_skb(skb);
88889 return -1;
88890 }
88891@@ -691,7 +695,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
88892 if (likely(skb1 == NULL))
88893 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
88894 if (!skb1) {
88895- atomic_inc(&sk->sk_drops);
88896+ atomic_inc_unchecked(&sk->sk_drops);
88897 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
88898 IS_UDPLITE(sk));
88899 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
88900@@ -862,6 +866,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
88901 goto discard;
88902
88903 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
88904+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
88905+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
88906+#endif
88907 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
88908
88909 kfree_skb(skb);
88910@@ -1379,7 +1386,7 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
88911 0,
88912 sock_i_ino(sp),
88913 atomic_read(&sp->sk_refcnt), sp,
88914- atomic_read(&sp->sk_drops));
88915+ atomic_read_unchecked(&sp->sk_drops));
88916 }
88917
88918 int udp6_seq_show(struct seq_file *seq, void *v)
88919diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
88920index a68c88c..d55b0c5 100644
88921--- a/net/irda/ircomm/ircomm_tty.c
88922+++ b/net/irda/ircomm/ircomm_tty.c
88923@@ -312,12 +312,12 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
88924 add_wait_queue(&port->open_wait, &wait);
88925
88926 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
88927- __FILE__, __LINE__, tty->driver->name, port->count);
88928+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
88929
88930 spin_lock_irqsave(&port->lock, flags);
88931 if (!tty_hung_up_p(filp)) {
88932 extra_count = 1;
88933- port->count--;
88934+ atomic_dec(&port->count);
88935 }
88936 spin_unlock_irqrestore(&port->lock, flags);
88937 port->blocked_open++;
88938@@ -353,7 +353,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
88939 }
88940
88941 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
88942- __FILE__, __LINE__, tty->driver->name, port->count);
88943+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
88944
88945 schedule();
88946 }
88947@@ -364,13 +364,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
88948 if (extra_count) {
88949 /* ++ is not atomic, so this should be protected - Jean II */
88950 spin_lock_irqsave(&port->lock, flags);
88951- port->count++;
88952+ atomic_inc(&port->count);
88953 spin_unlock_irqrestore(&port->lock, flags);
88954 }
88955 port->blocked_open--;
88956
88957 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
88958- __FILE__, __LINE__, tty->driver->name, port->count);
88959+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
88960
88961 if (!retval)
88962 port->flags |= ASYNC_NORMAL_ACTIVE;
88963@@ -444,12 +444,12 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
88964
88965 /* ++ is not atomic, so this should be protected - Jean II */
88966 spin_lock_irqsave(&self->port.lock, flags);
88967- self->port.count++;
88968+ atomic_inc(&self->port.count);
88969 spin_unlock_irqrestore(&self->port.lock, flags);
88970 tty_port_tty_set(&self->port, tty);
88971
88972 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
88973- self->line, self->port.count);
88974+ self->line, atomic_read(&self->port.count));
88975
88976 /* Not really used by us, but lets do it anyway */
88977 tty->low_latency = (self->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
88978@@ -986,7 +986,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
88979 tty_kref_put(port->tty);
88980 }
88981 port->tty = NULL;
88982- port->count = 0;
88983+ atomic_set(&port->count, 0);
88984 spin_unlock_irqrestore(&port->lock, flags);
88985
88986 wake_up_interruptible(&port->open_wait);
88987@@ -1343,7 +1343,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
88988 seq_putc(m, '\n');
88989
88990 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
88991- seq_printf(m, "Open count: %d\n", self->port.count);
88992+ seq_printf(m, "Open count: %d\n", atomic_read(&self->port.count));
88993 seq_printf(m, "Max data size: %d\n", self->max_data_size);
88994 seq_printf(m, "Max header size: %d\n", self->max_header_size);
88995
88996diff --git a/net/irda/iriap.c b/net/irda/iriap.c
88997index e71e85b..29340a9 100644
88998--- a/net/irda/iriap.c
88999+++ b/net/irda/iriap.c
89000@@ -495,8 +495,11 @@ static void iriap_getvaluebyclass_confirm(struct iriap_cb *self,
89001 /* case CS_ISO_8859_9: */
89002 /* case CS_UNICODE: */
89003 default:
89004- IRDA_DEBUG(0, "%s(), charset %s, not supported\n",
89005- __func__, ias_charset_types[charset]);
89006+ IRDA_DEBUG(0, "%s(), charset [%d] %s, not supported\n",
89007+ __func__, charset,
89008+ charset < ARRAY_SIZE(ias_charset_types) ?
89009+ ias_charset_types[charset] :
89010+ "(unknown)");
89011
89012 /* Aborting, close connection! */
89013 iriap_disconnect_request(self);
89014diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
89015index cd6f7a9..e63fe89 100644
89016--- a/net/iucv/af_iucv.c
89017+++ b/net/iucv/af_iucv.c
89018@@ -782,10 +782,10 @@ static int iucv_sock_autobind(struct sock *sk)
89019
89020 write_lock_bh(&iucv_sk_list.lock);
89021
89022- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
89023+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
89024 while (__iucv_get_sock_by_name(name)) {
89025 sprintf(name, "%08x",
89026- atomic_inc_return(&iucv_sk_list.autobind_name));
89027+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
89028 }
89029
89030 write_unlock_bh(&iucv_sk_list.lock);
89031diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
89032index df08250..02021fe 100644
89033--- a/net/iucv/iucv.c
89034+++ b/net/iucv/iucv.c
89035@@ -690,7 +690,7 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
89036 return NOTIFY_OK;
89037 }
89038
89039-static struct notifier_block __refdata iucv_cpu_notifier = {
89040+static struct notifier_block iucv_cpu_notifier = {
89041 .notifier_call = iucv_cpu_notify,
89042 };
89043
89044diff --git a/net/key/af_key.c b/net/key/af_key.c
89045index 5b426a6..970032b 100644
89046--- a/net/key/af_key.c
89047+++ b/net/key/af_key.c
89048@@ -3019,10 +3019,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
89049 static u32 get_acqseq(void)
89050 {
89051 u32 res;
89052- static atomic_t acqseq;
89053+ static atomic_unchecked_t acqseq;
89054
89055 do {
89056- res = atomic_inc_return(&acqseq);
89057+ res = atomic_inc_return_unchecked(&acqseq);
89058 } while (!res);
89059 return res;
89060 }
89061diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
89062index 49c48c6..9e72ff4 100644
89063--- a/net/mac80211/cfg.c
89064+++ b/net/mac80211/cfg.c
89065@@ -790,7 +790,7 @@ static int ieee80211_set_monitor_channel(struct wiphy *wiphy,
89066 ret = ieee80211_vif_use_channel(sdata, chandef,
89067 IEEE80211_CHANCTX_EXCLUSIVE);
89068 }
89069- } else if (local->open_count == local->monitors) {
89070+ } else if (local_read(&local->open_count) == local->monitors) {
89071 local->_oper_channel = chandef->chan;
89072 local->_oper_channel_type = cfg80211_get_chandef_type(chandef);
89073 ieee80211_hw_config(local, 0);
89074@@ -2718,7 +2718,7 @@ static void ieee80211_mgmt_frame_register(struct wiphy *wiphy,
89075 else
89076 local->probe_req_reg--;
89077
89078- if (!local->open_count)
89079+ if (!local_read(&local->open_count))
89080 break;
89081
89082 ieee80211_queue_work(&local->hw, &local->reconfig_filter);
89083diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
89084index 55d8f89..bec0c2b 100644
89085--- a/net/mac80211/ieee80211_i.h
89086+++ b/net/mac80211/ieee80211_i.h
89087@@ -28,6 +28,7 @@
89088 #include <net/ieee80211_radiotap.h>
89089 #include <net/cfg80211.h>
89090 #include <net/mac80211.h>
89091+#include <asm/local.h>
89092 #include "key.h"
89093 #include "sta_info.h"
89094 #include "debug.h"
89095@@ -910,7 +911,7 @@ struct ieee80211_local {
89096 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
89097 spinlock_t queue_stop_reason_lock;
89098
89099- int open_count;
89100+ local_t open_count;
89101 int monitors, cooked_mntrs;
89102 /* number of interfaces with corresponding FIF_ flags */
89103 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
89104diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
89105index 8be854e..ad72a69 100644
89106--- a/net/mac80211/iface.c
89107+++ b/net/mac80211/iface.c
89108@@ -546,7 +546,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
89109 break;
89110 }
89111
89112- if (local->open_count == 0) {
89113+ if (local_read(&local->open_count) == 0) {
89114 res = drv_start(local);
89115 if (res)
89116 goto err_del_bss;
89117@@ -591,7 +591,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
89118 break;
89119 }
89120
89121- if (local->monitors == 0 && local->open_count == 0) {
89122+ if (local->monitors == 0 && local_read(&local->open_count) == 0) {
89123 res = ieee80211_add_virtual_monitor(local);
89124 if (res)
89125 goto err_stop;
89126@@ -699,7 +699,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
89127 mutex_unlock(&local->mtx);
89128
89129 if (coming_up)
89130- local->open_count++;
89131+ local_inc(&local->open_count);
89132
89133 if (hw_reconf_flags)
89134 ieee80211_hw_config(local, hw_reconf_flags);
89135@@ -713,7 +713,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
89136 err_del_interface:
89137 drv_remove_interface(local, sdata);
89138 err_stop:
89139- if (!local->open_count)
89140+ if (!local_read(&local->open_count))
89141 drv_stop(local);
89142 err_del_bss:
89143 sdata->bss = NULL;
89144@@ -827,7 +827,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
89145 }
89146
89147 if (going_down)
89148- local->open_count--;
89149+ local_dec(&local->open_count);
89150
89151 switch (sdata->vif.type) {
89152 case NL80211_IFTYPE_AP_VLAN:
89153@@ -884,7 +884,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
89154
89155 ieee80211_recalc_ps(local, -1);
89156
89157- if (local->open_count == 0) {
89158+ if (local_read(&local->open_count) == 0) {
89159 if (local->ops->napi_poll)
89160 napi_disable(&local->napi);
89161 ieee80211_clear_tx_pending(local);
89162@@ -910,7 +910,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
89163 }
89164 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
89165
89166- if (local->monitors == local->open_count && local->monitors > 0)
89167+ if (local->monitors == local_read(&local->open_count) && local->monitors > 0)
89168 ieee80211_add_virtual_monitor(local);
89169 }
89170
89171diff --git a/net/mac80211/main.c b/net/mac80211/main.c
89172index 1b087ff..bf600e9 100644
89173--- a/net/mac80211/main.c
89174+++ b/net/mac80211/main.c
89175@@ -181,7 +181,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
89176 changed &= ~(IEEE80211_CONF_CHANGE_CHANNEL |
89177 IEEE80211_CONF_CHANGE_POWER);
89178
89179- if (changed && local->open_count) {
89180+ if (changed && local_read(&local->open_count)) {
89181 ret = drv_config(local, changed);
89182 /*
89183 * Goal:
89184diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
89185index 79a48f3..5e185c9 100644
89186--- a/net/mac80211/pm.c
89187+++ b/net/mac80211/pm.c
89188@@ -35,7 +35,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
89189 struct sta_info *sta;
89190 struct ieee80211_chanctx *ctx;
89191
89192- if (!local->open_count)
89193+ if (!local_read(&local->open_count))
89194 goto suspend;
89195
89196 ieee80211_scan_cancel(local);
89197@@ -73,7 +73,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
89198 cancel_work_sync(&local->dynamic_ps_enable_work);
89199 del_timer_sync(&local->dynamic_ps_timer);
89200
89201- local->wowlan = wowlan && local->open_count;
89202+ local->wowlan = wowlan && local_read(&local->open_count);
89203 if (local->wowlan) {
89204 int err = drv_suspend(local, wowlan);
89205 if (err < 0) {
89206@@ -187,7 +187,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
89207 mutex_unlock(&local->chanctx_mtx);
89208
89209 /* stop hardware - this must stop RX */
89210- if (local->open_count)
89211+ if (local_read(&local->open_count))
89212 ieee80211_stop_device(local);
89213
89214 suspend:
89215diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
89216index dd88381..eef4dd6 100644
89217--- a/net/mac80211/rate.c
89218+++ b/net/mac80211/rate.c
89219@@ -493,7 +493,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
89220
89221 ASSERT_RTNL();
89222
89223- if (local->open_count)
89224+ if (local_read(&local->open_count))
89225 return -EBUSY;
89226
89227 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
89228diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
89229index c97a065..ff61928 100644
89230--- a/net/mac80211/rc80211_pid_debugfs.c
89231+++ b/net/mac80211/rc80211_pid_debugfs.c
89232@@ -193,7 +193,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
89233
89234 spin_unlock_irqrestore(&events->lock, status);
89235
89236- if (copy_to_user(buf, pb, p))
89237+ if (p > sizeof(pb) || copy_to_user(buf, pb, p))
89238 return -EFAULT;
89239
89240 return p;
89241diff --git a/net/mac80211/util.c b/net/mac80211/util.c
89242index f11e8c5..08d0013 100644
89243--- a/net/mac80211/util.c
89244+++ b/net/mac80211/util.c
89245@@ -1380,7 +1380,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
89246 }
89247 #endif
89248 /* everything else happens only if HW was up & running */
89249- if (!local->open_count)
89250+ if (!local_read(&local->open_count))
89251 goto wake_up;
89252
89253 /*
89254diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
89255index 49e96df..63a51c3 100644
89256--- a/net/netfilter/Kconfig
89257+++ b/net/netfilter/Kconfig
89258@@ -936,6 +936,16 @@ config NETFILTER_XT_MATCH_ESP
89259
89260 To compile it as a module, choose M here. If unsure, say N.
89261
89262+config NETFILTER_XT_MATCH_GRADM
89263+ tristate '"gradm" match support'
89264+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
89265+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
89266+ ---help---
89267+ The gradm match allows to match on grsecurity RBAC being enabled.
89268+ It is useful when iptables rules are applied early on bootup to
89269+ prevent connections to the machine (except from a trusted host)
89270+ while the RBAC system is disabled.
89271+
89272 config NETFILTER_XT_MATCH_HASHLIMIT
89273 tristate '"hashlimit" match support'
89274 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
89275diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
89276index 3259697..54d5393 100644
89277--- a/net/netfilter/Makefile
89278+++ b/net/netfilter/Makefile
89279@@ -109,6 +109,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
89280 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
89281 obj-$(CONFIG_NETFILTER_XT_MATCH_ECN) += xt_ecn.o
89282 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
89283+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
89284 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
89285 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
89286 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
89287diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
89288index 6d6d8f2..a676749 100644
89289--- a/net/netfilter/ipset/ip_set_core.c
89290+++ b/net/netfilter/ipset/ip_set_core.c
89291@@ -1800,7 +1800,7 @@ done:
89292 return ret;
89293 }
89294
89295-static struct nf_sockopt_ops so_set __read_mostly = {
89296+static struct nf_sockopt_ops so_set = {
89297 .pf = PF_INET,
89298 .get_optmin = SO_IP_SET,
89299 .get_optmax = SO_IP_SET + 1,
89300diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
89301index 30e764a..c3b6a9d 100644
89302--- a/net/netfilter/ipvs/ip_vs_conn.c
89303+++ b/net/netfilter/ipvs/ip_vs_conn.c
89304@@ -554,7 +554,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
89305 /* Increase the refcnt counter of the dest */
89306 atomic_inc(&dest->refcnt);
89307
89308- conn_flags = atomic_read(&dest->conn_flags);
89309+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
89310 if (cp->protocol != IPPROTO_UDP)
89311 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
89312 flags = cp->flags;
89313@@ -899,7 +899,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
89314 atomic_set(&cp->refcnt, 1);
89315
89316 atomic_set(&cp->n_control, 0);
89317- atomic_set(&cp->in_pkts, 0);
89318+ atomic_set_unchecked(&cp->in_pkts, 0);
89319
89320 atomic_inc(&ipvs->conn_count);
89321 if (flags & IP_VS_CONN_F_NO_CPORT)
89322@@ -1180,7 +1180,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
89323
89324 /* Don't drop the entry if its number of incoming packets is not
89325 located in [0, 8] */
89326- i = atomic_read(&cp->in_pkts);
89327+ i = atomic_read_unchecked(&cp->in_pkts);
89328 if (i > 8 || i < 0) return 0;
89329
89330 if (!todrop_rate[i]) return 0;
89331diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
89332index 47edf5a..235b07d 100644
89333--- a/net/netfilter/ipvs/ip_vs_core.c
89334+++ b/net/netfilter/ipvs/ip_vs_core.c
89335@@ -559,7 +559,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
89336 ret = cp->packet_xmit(skb, cp, pd->pp, iph);
89337 /* do not touch skb anymore */
89338
89339- atomic_inc(&cp->in_pkts);
89340+ atomic_inc_unchecked(&cp->in_pkts);
89341 ip_vs_conn_put(cp);
89342 return ret;
89343 }
89344@@ -1691,7 +1691,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
89345 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
89346 pkts = sysctl_sync_threshold(ipvs);
89347 else
89348- pkts = atomic_add_return(1, &cp->in_pkts);
89349+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
89350
89351 if (ipvs->sync_state & IP_VS_STATE_MASTER)
89352 ip_vs_sync_conn(net, cp, pkts);
89353diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
89354index ec664cb..7f34a77 100644
89355--- a/net/netfilter/ipvs/ip_vs_ctl.c
89356+++ b/net/netfilter/ipvs/ip_vs_ctl.c
89357@@ -787,7 +787,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
89358 ip_vs_rs_hash(ipvs, dest);
89359 write_unlock_bh(&ipvs->rs_lock);
89360 }
89361- atomic_set(&dest->conn_flags, conn_flags);
89362+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
89363
89364 /* bind the service */
89365 if (!dest->svc) {
89366@@ -1688,7 +1688,7 @@ proc_do_sync_ports(ctl_table *table, int write,
89367 * align with netns init in ip_vs_control_net_init()
89368 */
89369
89370-static struct ctl_table vs_vars[] = {
89371+static ctl_table_no_const vs_vars[] __read_only = {
89372 {
89373 .procname = "amemthresh",
89374 .maxlen = sizeof(int),
89375@@ -2081,7 +2081,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
89376 " %-7s %-6d %-10d %-10d\n",
89377 &dest->addr.in6,
89378 ntohs(dest->port),
89379- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
89380+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
89381 atomic_read(&dest->weight),
89382 atomic_read(&dest->activeconns),
89383 atomic_read(&dest->inactconns));
89384@@ -2092,7 +2092,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
89385 "%-7s %-6d %-10d %-10d\n",
89386 ntohl(dest->addr.ip),
89387 ntohs(dest->port),
89388- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
89389+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
89390 atomic_read(&dest->weight),
89391 atomic_read(&dest->activeconns),
89392 atomic_read(&dest->inactconns));
89393@@ -2562,7 +2562,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
89394
89395 entry.addr = dest->addr.ip;
89396 entry.port = dest->port;
89397- entry.conn_flags = atomic_read(&dest->conn_flags);
89398+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
89399 entry.weight = atomic_read(&dest->weight);
89400 entry.u_threshold = dest->u_threshold;
89401 entry.l_threshold = dest->l_threshold;
89402@@ -3098,7 +3098,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
89403 if (nla_put(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr) ||
89404 nla_put_u16(skb, IPVS_DEST_ATTR_PORT, dest->port) ||
89405 nla_put_u32(skb, IPVS_DEST_ATTR_FWD_METHOD,
89406- (atomic_read(&dest->conn_flags) &
89407+ (atomic_read_unchecked(&dest->conn_flags) &
89408 IP_VS_CONN_F_FWD_MASK)) ||
89409 nla_put_u32(skb, IPVS_DEST_ATTR_WEIGHT,
89410 atomic_read(&dest->weight)) ||
89411@@ -3688,7 +3688,7 @@ static int __net_init ip_vs_control_net_init_sysctl(struct net *net)
89412 {
89413 int idx;
89414 struct netns_ipvs *ipvs = net_ipvs(net);
89415- struct ctl_table *tbl;
89416+ ctl_table_no_const *tbl;
89417
89418 atomic_set(&ipvs->dropentry, 0);
89419 spin_lock_init(&ipvs->dropentry_lock);
89420diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
89421index fdd89b9..bd96aa9 100644
89422--- a/net/netfilter/ipvs/ip_vs_lblc.c
89423+++ b/net/netfilter/ipvs/ip_vs_lblc.c
89424@@ -115,7 +115,7 @@ struct ip_vs_lblc_table {
89425 * IPVS LBLC sysctl table
89426 */
89427 #ifdef CONFIG_SYSCTL
89428-static ctl_table vs_vars_table[] = {
89429+static ctl_table_no_const vs_vars_table[] __read_only = {
89430 {
89431 .procname = "lblc_expiration",
89432 .data = NULL,
89433diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
89434index c03b6a3..8ce3681 100644
89435--- a/net/netfilter/ipvs/ip_vs_lblcr.c
89436+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
89437@@ -288,7 +288,7 @@ struct ip_vs_lblcr_table {
89438 * IPVS LBLCR sysctl table
89439 */
89440
89441-static ctl_table vs_vars_table[] = {
89442+static ctl_table_no_const vs_vars_table[] __read_only = {
89443 {
89444 .procname = "lblcr_expiration",
89445 .data = NULL,
89446diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
89447index 44fd10c..2a163b3 100644
89448--- a/net/netfilter/ipvs/ip_vs_sync.c
89449+++ b/net/netfilter/ipvs/ip_vs_sync.c
89450@@ -596,7 +596,7 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
89451 cp = cp->control;
89452 if (cp) {
89453 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
89454- pkts = atomic_add_return(1, &cp->in_pkts);
89455+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
89456 else
89457 pkts = sysctl_sync_threshold(ipvs);
89458 ip_vs_sync_conn(net, cp->control, pkts);
89459@@ -758,7 +758,7 @@ control:
89460 if (!cp)
89461 return;
89462 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
89463- pkts = atomic_add_return(1, &cp->in_pkts);
89464+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
89465 else
89466 pkts = sysctl_sync_threshold(ipvs);
89467 goto sloop;
89468@@ -885,7 +885,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
89469
89470 if (opt)
89471 memcpy(&cp->in_seq, opt, sizeof(*opt));
89472- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
89473+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
89474 cp->state = state;
89475 cp->old_state = cp->state;
89476 /*
89477diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
89478index ee6b7a9..f9a89f6 100644
89479--- a/net/netfilter/ipvs/ip_vs_xmit.c
89480+++ b/net/netfilter/ipvs/ip_vs_xmit.c
89481@@ -1210,7 +1210,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
89482 else
89483 rc = NF_ACCEPT;
89484 /* do not touch skb anymore */
89485- atomic_inc(&cp->in_pkts);
89486+ atomic_inc_unchecked(&cp->in_pkts);
89487 goto out;
89488 }
89489
89490@@ -1332,7 +1332,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
89491 else
89492 rc = NF_ACCEPT;
89493 /* do not touch skb anymore */
89494- atomic_inc(&cp->in_pkts);
89495+ atomic_inc_unchecked(&cp->in_pkts);
89496 goto out;
89497 }
89498
89499diff --git a/net/netfilter/nf_conntrack_acct.c b/net/netfilter/nf_conntrack_acct.c
89500index 7df424e..a527b02 100644
89501--- a/net/netfilter/nf_conntrack_acct.c
89502+++ b/net/netfilter/nf_conntrack_acct.c
89503@@ -60,7 +60,7 @@ static struct nf_ct_ext_type acct_extend __read_mostly = {
89504 #ifdef CONFIG_SYSCTL
89505 static int nf_conntrack_acct_init_sysctl(struct net *net)
89506 {
89507- struct ctl_table *table;
89508+ ctl_table_no_const *table;
89509
89510 table = kmemdup(acct_sysctl_table, sizeof(acct_sysctl_table),
89511 GFP_KERNEL);
89512diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
89513index e4a0c4f..c263f28 100644
89514--- a/net/netfilter/nf_conntrack_core.c
89515+++ b/net/netfilter/nf_conntrack_core.c
89516@@ -1529,6 +1529,10 @@ err_extend:
89517 #define DYING_NULLS_VAL ((1<<30)+1)
89518 #define TEMPLATE_NULLS_VAL ((1<<30)+2)
89519
89520+#ifdef CONFIG_GRKERNSEC_HIDESYM
89521+static atomic_unchecked_t conntrack_cache_id = ATOMIC_INIT(0);
89522+#endif
89523+
89524 static int nf_conntrack_init_net(struct net *net)
89525 {
89526 int ret;
89527@@ -1543,7 +1547,11 @@ static int nf_conntrack_init_net(struct net *net)
89528 goto err_stat;
89529 }
89530
89531+#ifdef CONFIG_GRKERNSEC_HIDESYM
89532+ net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%08lx", atomic_inc_return_unchecked(&conntrack_cache_id));
89533+#else
89534 net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
89535+#endif
89536 if (!net->ct.slabname) {
89537 ret = -ENOMEM;
89538 goto err_slabname;
89539diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
89540index faa978f..1afb18f 100644
89541--- a/net/netfilter/nf_conntrack_ecache.c
89542+++ b/net/netfilter/nf_conntrack_ecache.c
89543@@ -186,7 +186,7 @@ static struct nf_ct_ext_type event_extend __read_mostly = {
89544 #ifdef CONFIG_SYSCTL
89545 static int nf_conntrack_event_init_sysctl(struct net *net)
89546 {
89547- struct ctl_table *table;
89548+ ctl_table_no_const *table;
89549
89550 table = kmemdup(event_sysctl_table, sizeof(event_sysctl_table),
89551 GFP_KERNEL);
89552diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
89553index 884f2b3..d53b33a 100644
89554--- a/net/netfilter/nf_conntrack_helper.c
89555+++ b/net/netfilter/nf_conntrack_helper.c
89556@@ -55,7 +55,7 @@ static struct ctl_table helper_sysctl_table[] = {
89557
89558 static int nf_conntrack_helper_init_sysctl(struct net *net)
89559 {
89560- struct ctl_table *table;
89561+ ctl_table_no_const *table;
89562
89563 table = kmemdup(helper_sysctl_table, sizeof(helper_sysctl_table),
89564 GFP_KERNEL);
89565diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
89566index 51e928d..72a413a 100644
89567--- a/net/netfilter/nf_conntrack_proto.c
89568+++ b/net/netfilter/nf_conntrack_proto.c
89569@@ -51,7 +51,7 @@ nf_ct_register_sysctl(struct net *net,
89570
89571 static void
89572 nf_ct_unregister_sysctl(struct ctl_table_header **header,
89573- struct ctl_table **table,
89574+ ctl_table_no_const **table,
89575 unsigned int users)
89576 {
89577 if (users > 0)
89578diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
89579index e7185c6..4ad6c9c 100644
89580--- a/net/netfilter/nf_conntrack_standalone.c
89581+++ b/net/netfilter/nf_conntrack_standalone.c
89582@@ -470,7 +470,7 @@ static ctl_table nf_ct_netfilter_table[] = {
89583
89584 static int nf_conntrack_standalone_init_sysctl(struct net *net)
89585 {
89586- struct ctl_table *table;
89587+ ctl_table_no_const *table;
89588
89589 if (net_eq(net, &init_net)) {
89590 nf_ct_netfilter_header =
89591diff --git a/net/netfilter/nf_conntrack_timestamp.c b/net/netfilter/nf_conntrack_timestamp.c
89592index 7ea8026..bc9512d 100644
89593--- a/net/netfilter/nf_conntrack_timestamp.c
89594+++ b/net/netfilter/nf_conntrack_timestamp.c
89595@@ -42,7 +42,7 @@ static struct nf_ct_ext_type tstamp_extend __read_mostly = {
89596 #ifdef CONFIG_SYSCTL
89597 static int nf_conntrack_tstamp_init_sysctl(struct net *net)
89598 {
89599- struct ctl_table *table;
89600+ ctl_table_no_const *table;
89601
89602 table = kmemdup(tstamp_sysctl_table, sizeof(tstamp_sysctl_table),
89603 GFP_KERNEL);
89604diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
89605index 9e31269..bc4c1b7 100644
89606--- a/net/netfilter/nf_log.c
89607+++ b/net/netfilter/nf_log.c
89608@@ -215,7 +215,7 @@ static const struct file_operations nflog_file_ops = {
89609
89610 #ifdef CONFIG_SYSCTL
89611 static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3];
89612-static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1];
89613+static ctl_table_no_const nf_log_sysctl_table[NFPROTO_NUMPROTO+1] __read_only;
89614 static struct ctl_table_header *nf_log_dir_header;
89615
89616 static int nf_log_proc_dostring(ctl_table *table, int write,
89617@@ -246,14 +246,16 @@ static int nf_log_proc_dostring(ctl_table *table, int write,
89618 rcu_assign_pointer(nf_loggers[tindex], logger);
89619 mutex_unlock(&nf_log_mutex);
89620 } else {
89621+ ctl_table_no_const nf_log_table = *table;
89622+
89623 mutex_lock(&nf_log_mutex);
89624 logger = rcu_dereference_protected(nf_loggers[tindex],
89625 lockdep_is_held(&nf_log_mutex));
89626 if (!logger)
89627- table->data = "NONE";
89628+ nf_log_table.data = "NONE";
89629 else
89630- table->data = logger->name;
89631- r = proc_dostring(table, write, buffer, lenp, ppos);
89632+ nf_log_table.data = logger->name;
89633+ r = proc_dostring(&nf_log_table, write, buffer, lenp, ppos);
89634 mutex_unlock(&nf_log_mutex);
89635 }
89636
89637diff --git a/net/netfilter/nf_sockopt.c b/net/netfilter/nf_sockopt.c
89638index f042ae5..30ea486 100644
89639--- a/net/netfilter/nf_sockopt.c
89640+++ b/net/netfilter/nf_sockopt.c
89641@@ -45,7 +45,7 @@ int nf_register_sockopt(struct nf_sockopt_ops *reg)
89642 }
89643 }
89644
89645- list_add(&reg->list, &nf_sockopts);
89646+ pax_list_add((struct list_head *)&reg->list, &nf_sockopts);
89647 out:
89648 mutex_unlock(&nf_sockopt_mutex);
89649 return ret;
89650@@ -55,7 +55,7 @@ EXPORT_SYMBOL(nf_register_sockopt);
89651 void nf_unregister_sockopt(struct nf_sockopt_ops *reg)
89652 {
89653 mutex_lock(&nf_sockopt_mutex);
89654- list_del(&reg->list);
89655+ pax_list_del((struct list_head *)&reg->list);
89656 mutex_unlock(&nf_sockopt_mutex);
89657 }
89658 EXPORT_SYMBOL(nf_unregister_sockopt);
89659diff --git a/net/netfilter/nfnetlink_acct.c b/net/netfilter/nfnetlink_acct.c
89660index 589d686..dc3fd5d 100644
89661--- a/net/netfilter/nfnetlink_acct.c
89662+++ b/net/netfilter/nfnetlink_acct.c
89663@@ -49,6 +49,8 @@ nfnl_acct_new(struct sock *nfnl, struct sk_buff *skb,
89664 return -EINVAL;
89665
89666 acct_name = nla_data(tb[NFACCT_NAME]);
89667+ if (strlen(acct_name) == 0)
89668+ return -EINVAL;
89669
89670 list_for_each_entry(nfacct, &nfnl_acct_list, head) {
89671 if (strncmp(nfacct->name, acct_name, NFACCT_NAME_MAX) != 0)
89672diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
89673index 92fd8ec..3f6ea4b 100644
89674--- a/net/netfilter/nfnetlink_log.c
89675+++ b/net/netfilter/nfnetlink_log.c
89676@@ -72,7 +72,7 @@ struct nfulnl_instance {
89677 };
89678
89679 static DEFINE_SPINLOCK(instances_lock);
89680-static atomic_t global_seq;
89681+static atomic_unchecked_t global_seq;
89682
89683 #define INSTANCE_BUCKETS 16
89684 static struct hlist_head instance_table[INSTANCE_BUCKETS];
89685@@ -537,7 +537,7 @@ __build_packet_message(struct nfulnl_instance *inst,
89686 /* global sequence number */
89687 if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) &&
89688 nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL,
89689- htonl(atomic_inc_return(&global_seq))))
89690+ htonl(atomic_inc_return_unchecked(&global_seq))))
89691 goto nla_put_failure;
89692
89693 if (data_len) {
89694diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c
89695index 3158d87..39006c9 100644
89696--- a/net/netfilter/nfnetlink_queue_core.c
89697+++ b/net/netfilter/nfnetlink_queue_core.c
89698@@ -1064,8 +1064,10 @@ static int __init nfnetlink_queue_init(void)
89699
89700 #ifdef CONFIG_PROC_FS
89701 if (!proc_create("nfnetlink_queue", 0440,
89702- proc_net_netfilter, &nfqnl_file_ops))
89703+ proc_net_netfilter, &nfqnl_file_ops)) {
89704+ status = -ENOMEM;
89705 goto cleanup_subsys;
89706+ }
89707 #endif
89708
89709 register_netdevice_notifier(&nfqnl_dev_notifier);
89710diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
89711new file mode 100644
89712index 0000000..c566332
89713--- /dev/null
89714+++ b/net/netfilter/xt_gradm.c
89715@@ -0,0 +1,51 @@
89716+/*
89717+ * gradm match for netfilter
89718