]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-2.9.1-3.7.9-201302202034.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.9.1-3.7.9-201302202034.patch
CommitLineData
6f5a978e
PK
1diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2index 74c25c8..deadba2 100644
3--- a/Documentation/dontdiff
4+++ b/Documentation/dontdiff
5@@ -2,9 +2,11 @@
6 *.aux
7 *.bin
8 *.bz2
9+*.c.[012]*.*
10 *.cis
11 *.cpio
12 *.csp
13+*.dbg
14 *.dsp
15 *.dvi
16 *.elf
17@@ -14,6 +16,7 @@
18 *.gcov
19 *.gen.S
20 *.gif
21+*.gmo
22 *.grep
23 *.grp
24 *.gz
25@@ -48,14 +51,17 @@
26 *.tab.h
27 *.tex
28 *.ver
29+*.vim
30 *.xml
31 *.xz
32 *_MODULES
33+*_reg_safe.h
34 *_vga16.c
35 *~
36 \#*#
37 *.9
38-.*
39+.[^g]*
40+.gen*
41 .*.d
42 .mm
43 53c700_d.h
44@@ -69,6 +75,7 @@ Image
45 Module.markers
46 Module.symvers
47 PENDING
48+PERF*
49 SCCS
50 System.map*
51 TAGS
52@@ -80,6 +87,7 @@ aic7*seq.h*
53 aicasm
54 aicdb.h*
55 altivec*.c
56+ashldi3.S
57 asm-offsets.h
58 asm_offsets.h
59 autoconf.h*
60@@ -92,19 +100,24 @@ bounds.h
61 bsetup
62 btfixupprep
63 build
64+builtin-policy.h
65 bvmlinux
66 bzImage*
67 capability_names.h
68 capflags.c
69 classlist.h*
70+clut_vga16.c
71+common-cmds.h
72 comp*.log
73 compile.h*
74 conf
75 config
76 config-*
77 config_data.h*
78+config.c
79 config.mak
80 config.mak.autogen
81+config.tmp
82 conmakehash
83 consolemap_deftbl.c*
84 cpustr.h
85@@ -115,9 +128,11 @@ devlist.h*
86 dnotify_test
87 docproc
88 dslm
89+dtc-lexer.lex.c
90 elf2ecoff
91 elfconfig.h*
92 evergreen_reg_safe.h
93+exception_policy.conf
94 fixdep
95 flask.h
96 fore200e_mkfirm
97@@ -125,12 +140,15 @@ fore200e_pca_fw.c*
98 gconf
99 gconf.glade.h
100 gen-devlist
101+gen-kdb_cmds.c
102 gen_crc32table
103 gen_init_cpio
104 generated
105 genheaders
106 genksyms
107 *_gray256.c
108+hash
109+hid-example
110 hpet_example
111 hugepage-mmap
112 hugepage-shm
113@@ -145,14 +163,14 @@ int32.c
114 int4.c
115 int8.c
116 kallsyms
117-kconfig
118+kern_constants.h
119 keywords.c
120 ksym.c*
121 ksym.h*
122 kxgettext
123 lex.c
124 lex.*.c
125-linux
126+lib1funcs.S
127 logo_*.c
128 logo_*_clut224.c
129 logo_*_mono.c
130@@ -162,14 +180,15 @@ mach-types.h
131 machtypes.h
132 map
133 map_hugetlb
134-media
135 mconf
136+mdp
137 miboot*
138 mk_elfconfig
139 mkboot
140 mkbugboot
141 mkcpustr
142 mkdep
143+mkpiggy
144 mkprep
145 mkregtable
146 mktables
147@@ -186,6 +205,8 @@ oui.c*
148 page-types
149 parse.c
150 parse.h
151+parse-events*
152+pasyms.h
153 patches*
154 pca200e.bin
155 pca200e_ecd.bin2
156@@ -195,6 +216,7 @@ perf-archive
157 piggyback
158 piggy.gzip
159 piggy.S
160+pmu-*
161 pnmtologo
162 ppc_defs.h*
163 pss_boot.h
164@@ -204,7 +226,10 @@ r200_reg_safe.h
165 r300_reg_safe.h
166 r420_reg_safe.h
167 r600_reg_safe.h
168+realmode.lds
169+realmode.relocs
170 recordmcount
171+regdb.c
172 relocs
173 rlim_names.h
174 rn50_reg_safe.h
175@@ -214,8 +239,11 @@ series
176 setup
177 setup.bin
178 setup.elf
179+size_overflow_hash.h
180 sImage
181+slabinfo
182 sm_tbl*
183+sortextable
184 split-include
185 syscalltab.h
186 tables.c
187@@ -225,6 +253,7 @@ tftpboot.img
188 timeconst.h
189 times.h*
190 trix_boot.h
191+user_constants.h
192 utsrelease.h*
193 vdso-syms.lds
194 vdso.lds
195@@ -236,13 +265,17 @@ vdso32.lds
196 vdso32.so.dbg
197 vdso64.lds
198 vdso64.so.dbg
199+vdsox32.lds
200+vdsox32-syms.lds
201 version.h*
202 vmImage
203 vmlinux
204 vmlinux-*
205 vmlinux.aout
206 vmlinux.bin.all
207+vmlinux.bin.bz2
208 vmlinux.lds
209+vmlinux.relocs
210 vmlinuz
211 voffset.h
212 vsyscall.lds
213@@ -250,9 +283,11 @@ vsyscall_32.lds
214 wanxlfw.inc
215 uImage
216 unifdef
217+utsrelease.h
218 wakeup.bin
219 wakeup.elf
220 wakeup.lds
221 zImage*
222 zconf.hash.c
223+zconf.lex.c
224 zoffset.h
225diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
226index 9776f06..18b1856 100644
227--- a/Documentation/kernel-parameters.txt
228+++ b/Documentation/kernel-parameters.txt
229@@ -905,6 +905,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
230 gpt [EFI] Forces disk with valid GPT signature but
231 invalid Protective MBR to be treated as GPT.
232
233+ grsec_proc_gid= [GRKERNSEC_PROC_USERGROUP] Chooses GID to
234+ ignore grsecurity's /proc restrictions
235+
236 hashdist= [KNL,NUMA] Large hashes allocated during boot
237 are distributed across NUMA nodes. Defaults on
238 for 64-bit NUMA, off otherwise.
239@@ -2082,6 +2085,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
240 the specified number of seconds. This is to be used if
241 your oopses keep scrolling off the screen.
242
243+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
244+ virtualization environments that don't cope well with the
245+ expand down segment used by UDEREF on X86-32 or the frequent
246+ page table updates on X86-64.
247+
248+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
249+
250 pcbit= [HW,ISDN]
251
252 pcd. [PARIDE]
253diff --git a/Makefile b/Makefile
254index 5634228..a3278fc 100644
255--- a/Makefile
256+++ b/Makefile
257@@ -241,8 +241,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
258
259 HOSTCC = gcc
260 HOSTCXX = g++
261-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
262-HOSTCXXFLAGS = -O2
263+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
264+HOSTCLFAGS += $(call cc-option, -Wno-empty-body)
265+HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
266
267 # Decide whether to build built-in, modular, or both.
268 # Normally, just do built-in.
269@@ -414,8 +415,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
270 # Rules shared between *config targets and build targets
271
272 # Basic helpers built in scripts/
273-PHONY += scripts_basic
274-scripts_basic:
275+PHONY += scripts_basic gcc-plugins
276+scripts_basic: gcc-plugins
277 $(Q)$(MAKE) $(build)=scripts/basic
278 $(Q)rm -f .tmp_quiet_recordmcount
279
280@@ -575,6 +576,64 @@ else
281 KBUILD_CFLAGS += -O2
282 endif
283
284+ifndef DISABLE_PAX_PLUGINS
285+ifeq ($(call cc-ifversion, -ge, 0408, y), y)
286+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCXX)" "$(HOSTCXX)" "$(CC)")
287+else
288+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)" "$(CC)")
289+endif
290+ifneq ($(PLUGINCC),)
291+ifndef DISABLE_PAX_CONSTIFY_PLUGIN
292+ifndef CONFIG_UML
293+CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
294+endif
295+endif
296+ifdef CONFIG_PAX_MEMORY_STACKLEAK
297+STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
298+STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
299+endif
300+ifdef CONFIG_KALLOCSTAT_PLUGIN
301+KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
302+endif
303+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
304+KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
305+KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
306+KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
307+endif
308+ifdef CONFIG_CHECKER_PLUGIN
309+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
310+CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
311+endif
312+endif
313+COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
314+ifdef CONFIG_PAX_SIZE_OVERFLOW
315+SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
316+endif
317+ifdef CONFIG_PAX_LATENT_ENTROPY
318+LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN
319+endif
320+GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
321+GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
322+GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY_PLUGIN_CFLAGS)
323+GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
324+export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGINS_AFLAGS CONSTIFY_PLUGIN
325+ifeq ($(KBUILD_EXTMOD),)
326+gcc-plugins:
327+ $(Q)$(MAKE) $(build)=tools/gcc
328+else
329+gcc-plugins: ;
330+endif
331+else
332+gcc-plugins:
333+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
334+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
335+else
336+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
337+endif
338+ $(Q)echo "PAX_MEMORY_STACKLEAK and other features will be less secure"
339+endif
340+endif
341+
342 include $(srctree)/arch/$(SRCARCH)/Makefile
343
344 ifdef CONFIG_READABLE_ASM
345@@ -731,7 +790,7 @@ export mod_sign_cmd
346
347
348 ifeq ($(KBUILD_EXTMOD),)
349-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
350+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
351
352 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
353 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
354@@ -778,6 +837,8 @@ endif
355
356 # The actual objects are generated when descending,
357 # make sure no implicit rule kicks in
358+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
359+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
360 $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
361
362 # Handle descending into subdirectories listed in $(vmlinux-dirs)
363@@ -787,7 +848,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
364 # Error messages still appears in the original language
365
366 PHONY += $(vmlinux-dirs)
367-$(vmlinux-dirs): prepare scripts
368+$(vmlinux-dirs): gcc-plugins prepare scripts
369 $(Q)$(MAKE) $(build)=$@
370
371 # Store (new) KERNELRELASE string in include/config/kernel.release
372@@ -831,6 +892,7 @@ prepare0: archprepare FORCE
373 $(Q)$(MAKE) $(build)=.
374
375 # All the preparing..
376+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
377 prepare: prepare0
378
379 # Generate some files
380@@ -938,6 +1000,8 @@ all: modules
381 # using awk while concatenating to the final file.
382
383 PHONY += modules
384+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
385+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
386 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
387 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
388 @$(kecho) ' Building modules, stage 2.';
389@@ -953,7 +1017,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
390
391 # Target to prepare building external modules
392 PHONY += modules_prepare
393-modules_prepare: prepare scripts
394+modules_prepare: gcc-plugins prepare scripts
395
396 # Target to install modules
397 PHONY += modules_install
398@@ -1013,7 +1077,7 @@ MRPROPER_FILES += .config .config.old .version .old_version $(version_h) \
399 Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
400 signing_key.priv signing_key.x509 x509.genkey \
401 extra_certificates signing_key.x509.keyid \
402- signing_key.x509.signer
403+ signing_key.x509.signer tools/gcc/size_overflow_hash.h
404
405 # clean - Delete most, but leave enough to build external modules
406 #
407@@ -1053,6 +1117,7 @@ distclean: mrproper
408 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
409 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
410 -o -name '.*.rej' \
411+ -o -name '.*.rej' -o -name '*.so' \
412 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
413 -type f -print | xargs rm -f
414
415@@ -1213,6 +1278,8 @@ PHONY += $(module-dirs) modules
416 $(module-dirs): crmodverdir $(objtree)/Module.symvers
417 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
418
419+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
420+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
421 modules: $(module-dirs)
422 @$(kecho) ' Building modules, stage 2.';
423 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
424@@ -1349,17 +1416,21 @@ else
425 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
426 endif
427
428-%.s: %.c prepare scripts FORCE
429+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
430+%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
431+%.s: %.c gcc-plugins prepare scripts FORCE
432 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
433 %.i: %.c prepare scripts FORCE
434 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
435-%.o: %.c prepare scripts FORCE
436+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
437+%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
438+%.o: %.c gcc-plugins prepare scripts FORCE
439 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
440 %.lst: %.c prepare scripts FORCE
441 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
442-%.s: %.S prepare scripts FORCE
443+%.s: %.S gcc-plugins prepare scripts FORCE
444 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
445-%.o: %.S prepare scripts FORCE
446+%.o: %.S gcc-plugins prepare scripts FORCE
447 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
448 %.symtypes: %.c prepare scripts FORCE
449 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
450@@ -1369,11 +1440,15 @@ endif
451 $(cmd_crmodverdir)
452 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
453 $(build)=$(build-dir)
454-%/: prepare scripts FORCE
455+%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
456+%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
457+%/: gcc-plugins prepare scripts FORCE
458 $(cmd_crmodverdir)
459 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
460 $(build)=$(build-dir)
461-%.ko: prepare scripts FORCE
462+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
463+%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
464+%.ko: gcc-plugins prepare scripts FORCE
465 $(cmd_crmodverdir)
466 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
467 $(build)=$(build-dir) $(@:.ko=.o)
468diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
469index c2cbe4f..f7264b4 100644
470--- a/arch/alpha/include/asm/atomic.h
471+++ b/arch/alpha/include/asm/atomic.h
472@@ -250,6 +250,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
473 #define atomic_dec(v) atomic_sub(1,(v))
474 #define atomic64_dec(v) atomic64_sub(1,(v))
475
476+#define atomic64_read_unchecked(v) atomic64_read(v)
477+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
478+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
479+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
480+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
481+#define atomic64_inc_unchecked(v) atomic64_inc(v)
482+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
483+#define atomic64_dec_unchecked(v) atomic64_dec(v)
484+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
485+
486 #define smp_mb__before_atomic_dec() smp_mb()
487 #define smp_mb__after_atomic_dec() smp_mb()
488 #define smp_mb__before_atomic_inc() smp_mb()
489diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
490index ad368a9..fbe0f25 100644
491--- a/arch/alpha/include/asm/cache.h
492+++ b/arch/alpha/include/asm/cache.h
493@@ -4,19 +4,19 @@
494 #ifndef __ARCH_ALPHA_CACHE_H
495 #define __ARCH_ALPHA_CACHE_H
496
497+#include <linux/const.h>
498
499 /* Bytes per L1 (data) cache line. */
500 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
501-# define L1_CACHE_BYTES 64
502 # define L1_CACHE_SHIFT 6
503 #else
504 /* Both EV4 and EV5 are write-through, read-allocate,
505 direct-mapped, physical.
506 */
507-# define L1_CACHE_BYTES 32
508 # define L1_CACHE_SHIFT 5
509 #endif
510
511+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
512 #define SMP_CACHE_BYTES L1_CACHE_BYTES
513
514 #endif
515diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
516index 968d999..d36b2df 100644
517--- a/arch/alpha/include/asm/elf.h
518+++ b/arch/alpha/include/asm/elf.h
519@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
520
521 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
522
523+#ifdef CONFIG_PAX_ASLR
524+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
525+
526+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
527+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
528+#endif
529+
530 /* $0 is set by ld.so to a pointer to a function which might be
531 registered using atexit. This provides a mean for the dynamic
532 linker to call DT_FINI functions for shared libraries that have
533diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
534index bc2a0da..8ad11ee 100644
535--- a/arch/alpha/include/asm/pgalloc.h
536+++ b/arch/alpha/include/asm/pgalloc.h
537@@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
538 pgd_set(pgd, pmd);
539 }
540
541+static inline void
542+pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
543+{
544+ pgd_populate(mm, pgd, pmd);
545+}
546+
547 extern pgd_t *pgd_alloc(struct mm_struct *mm);
548
549 static inline void
550diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
551index 81a4342..348b927 100644
552--- a/arch/alpha/include/asm/pgtable.h
553+++ b/arch/alpha/include/asm/pgtable.h
554@@ -102,6 +102,17 @@ struct vm_area_struct;
555 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
556 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
557 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
558+
559+#ifdef CONFIG_PAX_PAGEEXEC
560+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
561+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
562+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
563+#else
564+# define PAGE_SHARED_NOEXEC PAGE_SHARED
565+# define PAGE_COPY_NOEXEC PAGE_COPY
566+# define PAGE_READONLY_NOEXEC PAGE_READONLY
567+#endif
568+
569 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
570
571 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
572diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
573index 2fd00b7..cfd5069 100644
574--- a/arch/alpha/kernel/module.c
575+++ b/arch/alpha/kernel/module.c
576@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
577
578 /* The small sections were sorted to the end of the segment.
579 The following should definitely cover them. */
580- gp = (u64)me->module_core + me->core_size - 0x8000;
581+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
582 got = sechdrs[me->arch.gotsecindex].sh_addr;
583
584 for (i = 0; i < n; i++) {
585diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
586index 14db93e..47bed62 100644
587--- a/arch/alpha/kernel/osf_sys.c
588+++ b/arch/alpha/kernel/osf_sys.c
589@@ -1295,16 +1295,16 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
590 generic version except that we know how to honor ADDR_LIMIT_32BIT. */
591
592 static unsigned long
593-arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
594- unsigned long limit)
595+arch_get_unmapped_area_1(struct file *filp, unsigned long addr, unsigned long len,
596+ unsigned long limit, unsigned long flags)
597 {
598 struct vm_area_struct *vma = find_vma(current->mm, addr);
599-
600+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
601 while (1) {
602 /* At this point: (!vma || addr < vma->vm_end). */
603 if (limit - len < addr)
604 return -ENOMEM;
605- if (!vma || addr + len <= vma->vm_start)
606+ if (check_heap_stack_gap(vma, addr, len, offset))
607 return addr;
608 addr = vma->vm_end;
609 vma = vma->vm_next;
610@@ -1340,20 +1340,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
611 merely specific addresses, but regions of memory -- perhaps
612 this feature should be incorporated into all ports? */
613
614+#ifdef CONFIG_PAX_RANDMMAP
615+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
616+#endif
617+
618 if (addr) {
619- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
620+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(addr), len, limit, flags);
621 if (addr != (unsigned long) -ENOMEM)
622 return addr;
623 }
624
625 /* Next, try allocating at TASK_UNMAPPED_BASE. */
626- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
627- len, limit);
628+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(current->mm->mmap_base), len, limit, flags);
629+
630 if (addr != (unsigned long) -ENOMEM)
631 return addr;
632
633 /* Finally, try allocating in low memory. */
634- addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit);
635+ addr = arch_get_unmapped_area_1 (filp, PAGE_SIZE, len, limit, flags);
636
637 return addr;
638 }
639diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
640index 0c4132d..88f0d53 100644
641--- a/arch/alpha/mm/fault.c
642+++ b/arch/alpha/mm/fault.c
643@@ -53,6 +53,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
644 __reload_thread(pcb);
645 }
646
647+#ifdef CONFIG_PAX_PAGEEXEC
648+/*
649+ * PaX: decide what to do with offenders (regs->pc = fault address)
650+ *
651+ * returns 1 when task should be killed
652+ * 2 when patched PLT trampoline was detected
653+ * 3 when unpatched PLT trampoline was detected
654+ */
655+static int pax_handle_fetch_fault(struct pt_regs *regs)
656+{
657+
658+#ifdef CONFIG_PAX_EMUPLT
659+ int err;
660+
661+ do { /* PaX: patched PLT emulation #1 */
662+ unsigned int ldah, ldq, jmp;
663+
664+ err = get_user(ldah, (unsigned int *)regs->pc);
665+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
666+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
667+
668+ if (err)
669+ break;
670+
671+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
672+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
673+ jmp == 0x6BFB0000U)
674+ {
675+ unsigned long r27, addr;
676+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
677+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
678+
679+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
680+ err = get_user(r27, (unsigned long *)addr);
681+ if (err)
682+ break;
683+
684+ regs->r27 = r27;
685+ regs->pc = r27;
686+ return 2;
687+ }
688+ } while (0);
689+
690+ do { /* PaX: patched PLT emulation #2 */
691+ unsigned int ldah, lda, br;
692+
693+ err = get_user(ldah, (unsigned int *)regs->pc);
694+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
695+ err |= get_user(br, (unsigned int *)(regs->pc+8));
696+
697+ if (err)
698+ break;
699+
700+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
701+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
702+ (br & 0xFFE00000U) == 0xC3E00000U)
703+ {
704+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
705+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
706+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
707+
708+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
709+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
710+ return 2;
711+ }
712+ } while (0);
713+
714+ do { /* PaX: unpatched PLT emulation */
715+ unsigned int br;
716+
717+ err = get_user(br, (unsigned int *)regs->pc);
718+
719+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
720+ unsigned int br2, ldq, nop, jmp;
721+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
722+
723+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
724+ err = get_user(br2, (unsigned int *)addr);
725+ err |= get_user(ldq, (unsigned int *)(addr+4));
726+ err |= get_user(nop, (unsigned int *)(addr+8));
727+ err |= get_user(jmp, (unsigned int *)(addr+12));
728+ err |= get_user(resolver, (unsigned long *)(addr+16));
729+
730+ if (err)
731+ break;
732+
733+ if (br2 == 0xC3600000U &&
734+ ldq == 0xA77B000CU &&
735+ nop == 0x47FF041FU &&
736+ jmp == 0x6B7B0000U)
737+ {
738+ regs->r28 = regs->pc+4;
739+ regs->r27 = addr+16;
740+ regs->pc = resolver;
741+ return 3;
742+ }
743+ }
744+ } while (0);
745+#endif
746+
747+ return 1;
748+}
749+
750+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
751+{
752+ unsigned long i;
753+
754+ printk(KERN_ERR "PAX: bytes at PC: ");
755+ for (i = 0; i < 5; i++) {
756+ unsigned int c;
757+ if (get_user(c, (unsigned int *)pc+i))
758+ printk(KERN_CONT "???????? ");
759+ else
760+ printk(KERN_CONT "%08x ", c);
761+ }
762+ printk("\n");
763+}
764+#endif
765
766 /*
767 * This routine handles page faults. It determines the address,
768@@ -133,8 +251,29 @@ retry:
769 good_area:
770 si_code = SEGV_ACCERR;
771 if (cause < 0) {
772- if (!(vma->vm_flags & VM_EXEC))
773+ if (!(vma->vm_flags & VM_EXEC)) {
774+
775+#ifdef CONFIG_PAX_PAGEEXEC
776+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
777+ goto bad_area;
778+
779+ up_read(&mm->mmap_sem);
780+ switch (pax_handle_fetch_fault(regs)) {
781+
782+#ifdef CONFIG_PAX_EMUPLT
783+ case 2:
784+ case 3:
785+ return;
786+#endif
787+
788+ }
789+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
790+ do_group_exit(SIGKILL);
791+#else
792 goto bad_area;
793+#endif
794+
795+ }
796 } else if (!cause) {
797 /* Allow reads even for write-only mappings */
798 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
799diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
800index c79f61f..9ac0642 100644
801--- a/arch/arm/include/asm/atomic.h
802+++ b/arch/arm/include/asm/atomic.h
803@@ -17,17 +17,35 @@
804 #include <asm/barrier.h>
805 #include <asm/cmpxchg.h>
806
807+#ifdef CONFIG_GENERIC_ATOMIC64
808+#include <asm-generic/atomic64.h>
809+#endif
810+
811 #define ATOMIC_INIT(i) { (i) }
812
813 #ifdef __KERNEL__
814
815+#define _ASM_EXTABLE(from, to) \
816+" .pushsection __ex_table,\"a\"\n"\
817+" .align 3\n" \
818+" .long " #from ", " #to"\n" \
819+" .popsection"
820+
821 /*
822 * On ARM, ordinary assignment (str instruction) doesn't clear the local
823 * strex/ldrex monitor on some implementations. The reason we can use it for
824 * atomic_set() is the clrex or dummy strex done on every exception return.
825 */
826 #define atomic_read(v) (*(volatile int *)&(v)->counter)
827+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
828+{
829+ return v->counter;
830+}
831 #define atomic_set(v,i) (((v)->counter) = (i))
832+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
833+{
834+ v->counter = i;
835+}
836
837 #if __LINUX_ARM_ARCH__ >= 6
838
839@@ -42,6 +60,35 @@ static inline void atomic_add(int i, atomic_t *v)
840 int result;
841
842 __asm__ __volatile__("@ atomic_add\n"
843+"1: ldrex %1, [%3]\n"
844+" adds %0, %1, %4\n"
845+
846+#ifdef CONFIG_PAX_REFCOUNT
847+" bvc 3f\n"
848+"2: bkpt 0xf103\n"
849+"3:\n"
850+#endif
851+
852+" strex %1, %0, [%3]\n"
853+" teq %1, #0\n"
854+" bne 1b"
855+
856+#ifdef CONFIG_PAX_REFCOUNT
857+"\n4:\n"
858+ _ASM_EXTABLE(2b, 4b)
859+#endif
860+
861+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
862+ : "r" (&v->counter), "Ir" (i)
863+ : "cc");
864+}
865+
866+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
867+{
868+ unsigned long tmp;
869+ int result;
870+
871+ __asm__ __volatile__("@ atomic_add_unchecked\n"
872 "1: ldrex %0, [%3]\n"
873 " add %0, %0, %4\n"
874 " strex %1, %0, [%3]\n"
875@@ -60,6 +107,42 @@ static inline int atomic_add_return(int i, atomic_t *v)
876 smp_mb();
877
878 __asm__ __volatile__("@ atomic_add_return\n"
879+"1: ldrex %1, [%3]\n"
880+" adds %0, %1, %4\n"
881+
882+#ifdef CONFIG_PAX_REFCOUNT
883+" bvc 3f\n"
884+" mov %0, %1\n"
885+"2: bkpt 0xf103\n"
886+"3:\n"
887+#endif
888+
889+" strex %1, %0, [%3]\n"
890+" teq %1, #0\n"
891+" bne 1b"
892+
893+#ifdef CONFIG_PAX_REFCOUNT
894+"\n4:\n"
895+ _ASM_EXTABLE(2b, 4b)
896+#endif
897+
898+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
899+ : "r" (&v->counter), "Ir" (i)
900+ : "cc");
901+
902+ smp_mb();
903+
904+ return result;
905+}
906+
907+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
908+{
909+ unsigned long tmp;
910+ int result;
911+
912+ smp_mb();
913+
914+ __asm__ __volatile__("@ atomic_add_return_unchecked\n"
915 "1: ldrex %0, [%3]\n"
916 " add %0, %0, %4\n"
917 " strex %1, %0, [%3]\n"
918@@ -80,6 +163,35 @@ static inline void atomic_sub(int i, atomic_t *v)
919 int result;
920
921 __asm__ __volatile__("@ atomic_sub\n"
922+"1: ldrex %1, [%3]\n"
923+" subs %0, %1, %4\n"
924+
925+#ifdef CONFIG_PAX_REFCOUNT
926+" bvc 3f\n"
927+"2: bkpt 0xf103\n"
928+"3:\n"
929+#endif
930+
931+" strex %1, %0, [%3]\n"
932+" teq %1, #0\n"
933+" bne 1b"
934+
935+#ifdef CONFIG_PAX_REFCOUNT
936+"\n4:\n"
937+ _ASM_EXTABLE(2b, 4b)
938+#endif
939+
940+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
941+ : "r" (&v->counter), "Ir" (i)
942+ : "cc");
943+}
944+
945+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
946+{
947+ unsigned long tmp;
948+ int result;
949+
950+ __asm__ __volatile__("@ atomic_sub_unchecked\n"
951 "1: ldrex %0, [%3]\n"
952 " sub %0, %0, %4\n"
953 " strex %1, %0, [%3]\n"
954@@ -98,11 +210,25 @@ static inline int atomic_sub_return(int i, atomic_t *v)
955 smp_mb();
956
957 __asm__ __volatile__("@ atomic_sub_return\n"
958-"1: ldrex %0, [%3]\n"
959-" sub %0, %0, %4\n"
960+"1: ldrex %1, [%3]\n"
961+" subs %0, %1, %4\n"
962+
963+#ifdef CONFIG_PAX_REFCOUNT
964+" bvc 3f\n"
965+" mov %0, %1\n"
966+"2: bkpt 0xf103\n"
967+"3:\n"
968+#endif
969+
970 " strex %1, %0, [%3]\n"
971 " teq %1, #0\n"
972 " bne 1b"
973+
974+#ifdef CONFIG_PAX_REFCOUNT
975+"\n4:\n"
976+ _ASM_EXTABLE(2b, 4b)
977+#endif
978+
979 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
980 : "r" (&v->counter), "Ir" (i)
981 : "cc");
982@@ -134,6 +260,28 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
983 return oldval;
984 }
985
986+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
987+{
988+ unsigned long oldval, res;
989+
990+ smp_mb();
991+
992+ do {
993+ __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
994+ "ldrex %1, [%3]\n"
995+ "mov %0, #0\n"
996+ "teq %1, %4\n"
997+ "strexeq %0, %5, [%3]\n"
998+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
999+ : "r" (&ptr->counter), "Ir" (old), "r" (new)
1000+ : "cc");
1001+ } while (res);
1002+
1003+ smp_mb();
1004+
1005+ return oldval;
1006+}
1007+
1008 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1009 {
1010 unsigned long tmp, tmp2;
1011@@ -167,7 +315,17 @@ static inline int atomic_add_return(int i, atomic_t *v)
1012
1013 return val;
1014 }
1015+
1016+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
1017+{
1018+ return atomic_add_return(i, v);
1019+}
1020+
1021 #define atomic_add(i, v) (void) atomic_add_return(i, v)
1022+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
1023+{
1024+ (void) atomic_add_return(i, v);
1025+}
1026
1027 static inline int atomic_sub_return(int i, atomic_t *v)
1028 {
1029@@ -182,6 +340,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
1030 return val;
1031 }
1032 #define atomic_sub(i, v) (void) atomic_sub_return(i, v)
1033+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
1034+{
1035+ (void) atomic_sub_return(i, v);
1036+}
1037
1038 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1039 {
1040@@ -197,6 +359,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1041 return ret;
1042 }
1043
1044+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
1045+{
1046+ return atomic_cmpxchg(v, old, new);
1047+}
1048+
1049 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1050 {
1051 unsigned long flags;
1052@@ -209,6 +376,10 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1053 #endif /* __LINUX_ARM_ARCH__ */
1054
1055 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
1056+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
1057+{
1058+ return xchg(&v->counter, new);
1059+}
1060
1061 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1062 {
1063@@ -221,11 +392,27 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1064 }
1065
1066 #define atomic_inc(v) atomic_add(1, v)
1067+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
1068+{
1069+ atomic_add_unchecked(1, v);
1070+}
1071 #define atomic_dec(v) atomic_sub(1, v)
1072+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
1073+{
1074+ atomic_sub_unchecked(1, v);
1075+}
1076
1077 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
1078+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
1079+{
1080+ return atomic_add_return_unchecked(1, v) == 0;
1081+}
1082 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
1083 #define atomic_inc_return(v) (atomic_add_return(1, v))
1084+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
1085+{
1086+ return atomic_add_return_unchecked(1, v);
1087+}
1088 #define atomic_dec_return(v) (atomic_sub_return(1, v))
1089 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
1090
1091@@ -241,6 +428,14 @@ typedef struct {
1092 u64 __aligned(8) counter;
1093 } atomic64_t;
1094
1095+#ifdef CONFIG_PAX_REFCOUNT
1096+typedef struct {
1097+ u64 __aligned(8) counter;
1098+} atomic64_unchecked_t;
1099+#else
1100+typedef atomic64_t atomic64_unchecked_t;
1101+#endif
1102+
1103 #define ATOMIC64_INIT(i) { (i) }
1104
1105 static inline u64 atomic64_read(const atomic64_t *v)
1106@@ -256,6 +451,19 @@ static inline u64 atomic64_read(const atomic64_t *v)
1107 return result;
1108 }
1109
1110+static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *v)
1111+{
1112+ u64 result;
1113+
1114+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1115+" ldrexd %0, %H0, [%1]"
1116+ : "=&r" (result)
1117+ : "r" (&v->counter), "Qo" (v->counter)
1118+ );
1119+
1120+ return result;
1121+}
1122+
1123 static inline void atomic64_set(atomic64_t *v, u64 i)
1124 {
1125 u64 tmp;
1126@@ -270,6 +478,20 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
1127 : "cc");
1128 }
1129
1130+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, u64 i)
1131+{
1132+ u64 tmp;
1133+
1134+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1135+"1: ldrexd %0, %H0, [%2]\n"
1136+" strexd %0, %3, %H3, [%2]\n"
1137+" teq %0, #0\n"
1138+" bne 1b"
1139+ : "=&r" (tmp), "=Qo" (v->counter)
1140+ : "r" (&v->counter), "r" (i)
1141+ : "cc");
1142+}
1143+
1144 static inline void atomic64_add(u64 i, atomic64_t *v)
1145 {
1146 u64 result;
1147@@ -278,6 +500,36 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1148 __asm__ __volatile__("@ atomic64_add\n"
1149 "1: ldrexd %0, %H0, [%3]\n"
1150 " adds %0, %0, %4\n"
1151+" adcs %H0, %H0, %H4\n"
1152+
1153+#ifdef CONFIG_PAX_REFCOUNT
1154+" bvc 3f\n"
1155+"2: bkpt 0xf103\n"
1156+"3:\n"
1157+#endif
1158+
1159+" strexd %1, %0, %H0, [%3]\n"
1160+" teq %1, #0\n"
1161+" bne 1b"
1162+
1163+#ifdef CONFIG_PAX_REFCOUNT
1164+"\n4:\n"
1165+ _ASM_EXTABLE(2b, 4b)
1166+#endif
1167+
1168+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1169+ : "r" (&v->counter), "r" (i)
1170+ : "cc");
1171+}
1172+
1173+static inline void atomic64_add_unchecked(u64 i, atomic64_unchecked_t *v)
1174+{
1175+ u64 result;
1176+ unsigned long tmp;
1177+
1178+ __asm__ __volatile__("@ atomic64_add_unchecked\n"
1179+"1: ldrexd %0, %H0, [%3]\n"
1180+" adds %0, %0, %4\n"
1181 " adc %H0, %H0, %H4\n"
1182 " strexd %1, %0, %H0, [%3]\n"
1183 " teq %1, #0\n"
1184@@ -289,12 +541,49 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1185
1186 static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
1187 {
1188- u64 result;
1189- unsigned long tmp;
1190+ u64 result, tmp;
1191
1192 smp_mb();
1193
1194 __asm__ __volatile__("@ atomic64_add_return\n"
1195+"1: ldrexd %1, %H1, [%3]\n"
1196+" adds %0, %1, %4\n"
1197+" adcs %H0, %H1, %H4\n"
1198+
1199+#ifdef CONFIG_PAX_REFCOUNT
1200+" bvc 3f\n"
1201+" mov %0, %1\n"
1202+" mov %H0, %H1\n"
1203+"2: bkpt 0xf103\n"
1204+"3:\n"
1205+#endif
1206+
1207+" strexd %1, %0, %H0, [%3]\n"
1208+" teq %1, #0\n"
1209+" bne 1b"
1210+
1211+#ifdef CONFIG_PAX_REFCOUNT
1212+"\n4:\n"
1213+ _ASM_EXTABLE(2b, 4b)
1214+#endif
1215+
1216+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1217+ : "r" (&v->counter), "r" (i)
1218+ : "cc");
1219+
1220+ smp_mb();
1221+
1222+ return result;
1223+}
1224+
1225+static inline u64 atomic64_add_return_unchecked(u64 i, atomic64_unchecked_t *v)
1226+{
1227+ u64 result;
1228+ unsigned long tmp;
1229+
1230+ smp_mb();
1231+
1232+ __asm__ __volatile__("@ atomic64_add_return_unchecked\n"
1233 "1: ldrexd %0, %H0, [%3]\n"
1234 " adds %0, %0, %4\n"
1235 " adc %H0, %H0, %H4\n"
1236@@ -318,6 +607,36 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1237 __asm__ __volatile__("@ atomic64_sub\n"
1238 "1: ldrexd %0, %H0, [%3]\n"
1239 " subs %0, %0, %4\n"
1240+" sbcs %H0, %H0, %H4\n"
1241+
1242+#ifdef CONFIG_PAX_REFCOUNT
1243+" bvc 3f\n"
1244+"2: bkpt 0xf103\n"
1245+"3:\n"
1246+#endif
1247+
1248+" strexd %1, %0, %H0, [%3]\n"
1249+" teq %1, #0\n"
1250+" bne 1b"
1251+
1252+#ifdef CONFIG_PAX_REFCOUNT
1253+"\n4:\n"
1254+ _ASM_EXTABLE(2b, 4b)
1255+#endif
1256+
1257+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1258+ : "r" (&v->counter), "r" (i)
1259+ : "cc");
1260+}
1261+
1262+static inline void atomic64_sub_unchecked(u64 i, atomic64_unchecked_t *v)
1263+{
1264+ u64 result;
1265+ unsigned long tmp;
1266+
1267+ __asm__ __volatile__("@ atomic64_sub_unchecked\n"
1268+"1: ldrexd %0, %H0, [%3]\n"
1269+" subs %0, %0, %4\n"
1270 " sbc %H0, %H0, %H4\n"
1271 " strexd %1, %0, %H0, [%3]\n"
1272 " teq %1, #0\n"
1273@@ -329,18 +648,32 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1274
1275 static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
1276 {
1277- u64 result;
1278- unsigned long tmp;
1279+ u64 result, tmp;
1280
1281 smp_mb();
1282
1283 __asm__ __volatile__("@ atomic64_sub_return\n"
1284-"1: ldrexd %0, %H0, [%3]\n"
1285-" subs %0, %0, %4\n"
1286-" sbc %H0, %H0, %H4\n"
1287+"1: ldrexd %1, %H1, [%3]\n"
1288+" subs %0, %1, %4\n"
1289+" sbcs %H0, %H1, %H4\n"
1290+
1291+#ifdef CONFIG_PAX_REFCOUNT
1292+" bvc 3f\n"
1293+" mov %0, %1\n"
1294+" mov %H0, %H1\n"
1295+"2: bkpt 0xf103\n"
1296+"3:\n"
1297+#endif
1298+
1299 " strexd %1, %0, %H0, [%3]\n"
1300 " teq %1, #0\n"
1301 " bne 1b"
1302+
1303+#ifdef CONFIG_PAX_REFCOUNT
1304+"\n4:\n"
1305+ _ASM_EXTABLE(2b, 4b)
1306+#endif
1307+
1308 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1309 : "r" (&v->counter), "r" (i)
1310 : "cc");
1311@@ -374,6 +707,30 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
1312 return oldval;
1313 }
1314
1315+static inline u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old, u64 new)
1316+{
1317+ u64 oldval;
1318+ unsigned long res;
1319+
1320+ smp_mb();
1321+
1322+ do {
1323+ __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
1324+ "ldrexd %1, %H1, [%3]\n"
1325+ "mov %0, #0\n"
1326+ "teq %1, %4\n"
1327+ "teqeq %H1, %H4\n"
1328+ "strexdeq %0, %5, %H5, [%3]"
1329+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1330+ : "r" (&ptr->counter), "r" (old), "r" (new)
1331+ : "cc");
1332+ } while (res);
1333+
1334+ smp_mb();
1335+
1336+ return oldval;
1337+}
1338+
1339 static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1340 {
1341 u64 result;
1342@@ -397,21 +754,34 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1343
1344 static inline u64 atomic64_dec_if_positive(atomic64_t *v)
1345 {
1346- u64 result;
1347- unsigned long tmp;
1348+ u64 result, tmp;
1349
1350 smp_mb();
1351
1352 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
1353-"1: ldrexd %0, %H0, [%3]\n"
1354-" subs %0, %0, #1\n"
1355-" sbc %H0, %H0, #0\n"
1356+"1: ldrexd %1, %H1, [%3]\n"
1357+" subs %0, %1, #1\n"
1358+" sbcs %H0, %H1, #0\n"
1359+
1360+#ifdef CONFIG_PAX_REFCOUNT
1361+" bvc 3f\n"
1362+" mov %0, %1\n"
1363+" mov %H0, %H1\n"
1364+"2: bkpt 0xf103\n"
1365+"3:\n"
1366+#endif
1367+
1368 " teq %H0, #0\n"
1369-" bmi 2f\n"
1370+" bmi 4f\n"
1371 " strexd %1, %0, %H0, [%3]\n"
1372 " teq %1, #0\n"
1373 " bne 1b\n"
1374-"2:"
1375+"4:\n"
1376+
1377+#ifdef CONFIG_PAX_REFCOUNT
1378+ _ASM_EXTABLE(2b, 4b)
1379+#endif
1380+
1381 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1382 : "r" (&v->counter)
1383 : "cc");
1384@@ -434,13 +804,25 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1385 " teq %0, %5\n"
1386 " teqeq %H0, %H5\n"
1387 " moveq %1, #0\n"
1388-" beq 2f\n"
1389+" beq 4f\n"
1390 " adds %0, %0, %6\n"
1391-" adc %H0, %H0, %H6\n"
1392+" adcs %H0, %H0, %H6\n"
1393+
1394+#ifdef CONFIG_PAX_REFCOUNT
1395+" bvc 3f\n"
1396+"2: bkpt 0xf103\n"
1397+"3:\n"
1398+#endif
1399+
1400 " strexd %2, %0, %H0, [%4]\n"
1401 " teq %2, #0\n"
1402 " bne 1b\n"
1403-"2:"
1404+"4:\n"
1405+
1406+#ifdef CONFIG_PAX_REFCOUNT
1407+ _ASM_EXTABLE(2b, 4b)
1408+#endif
1409+
1410 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
1411 : "r" (&v->counter), "r" (u), "r" (a)
1412 : "cc");
1413@@ -453,10 +835,13 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1414
1415 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
1416 #define atomic64_inc(v) atomic64_add(1LL, (v))
1417+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
1418 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
1419+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
1420 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
1421 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
1422 #define atomic64_dec(v) atomic64_sub(1LL, (v))
1423+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
1424 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
1425 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
1426 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
1427diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
1428index 75fe66b..ba3dee4 100644
1429--- a/arch/arm/include/asm/cache.h
1430+++ b/arch/arm/include/asm/cache.h
1431@@ -4,8 +4,10 @@
1432 #ifndef __ASMARM_CACHE_H
1433 #define __ASMARM_CACHE_H
1434
1435+#include <linux/const.h>
1436+
1437 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
1438-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1439+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1440
1441 /*
1442 * Memory returned by kmalloc() may be used for DMA, so we must make
1443@@ -24,5 +26,6 @@
1444 #endif
1445
1446 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
1447+#define __read_only __attribute__ ((__section__(".data..read_only")))
1448
1449 #endif
1450diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1451index e1489c5..d418304 100644
1452--- a/arch/arm/include/asm/cacheflush.h
1453+++ b/arch/arm/include/asm/cacheflush.h
1454@@ -116,7 +116,7 @@ struct cpu_cache_fns {
1455 void (*dma_unmap_area)(const void *, size_t, int);
1456
1457 void (*dma_flush_range)(const void *, const void *);
1458-};
1459+} __no_const;
1460
1461 /*
1462 * Select the calling method
1463diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
1464index 7eb18c1..e38b6d2 100644
1465--- a/arch/arm/include/asm/cmpxchg.h
1466+++ b/arch/arm/include/asm/cmpxchg.h
1467@@ -102,6 +102,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
1468
1469 #define xchg(ptr,x) \
1470 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1471+#define xchg_unchecked(ptr,x) \
1472+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1473
1474 #include <asm-generic/cmpxchg-local.h>
1475
1476diff --git a/arch/arm/include/asm/delay.h b/arch/arm/include/asm/delay.h
1477index ab98fdd..6b19938 100644
1478--- a/arch/arm/include/asm/delay.h
1479+++ b/arch/arm/include/asm/delay.h
1480@@ -24,9 +24,9 @@ extern struct arm_delay_ops {
1481 void (*delay)(unsigned long);
1482 void (*const_udelay)(unsigned long);
1483 void (*udelay)(unsigned long);
1484-} arm_delay_ops;
1485+} *arm_delay_ops;
1486
1487-#define __delay(n) arm_delay_ops.delay(n)
1488+#define __delay(n) arm_delay_ops->delay(n)
1489
1490 /*
1491 * This function intentionally does not exist; if you see references to
1492@@ -47,8 +47,8 @@ extern void __bad_udelay(void);
1493 * first constant multiplications gets optimized away if the delay is
1494 * a constant)
1495 */
1496-#define __udelay(n) arm_delay_ops.udelay(n)
1497-#define __const_udelay(n) arm_delay_ops.const_udelay(n)
1498+#define __udelay(n) arm_delay_ops->udelay(n)
1499+#define __const_udelay(n) arm_delay_ops->const_udelay(n)
1500
1501 #define udelay(n) \
1502 (__builtin_constant_p(n) ? \
1503diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
1504index 38050b1..9d90e8b 100644
1505--- a/arch/arm/include/asm/elf.h
1506+++ b/arch/arm/include/asm/elf.h
1507@@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1508 the loader. We need to make sure that it is out of the way of the program
1509 that it will "exec", and that there is sufficient room for the brk. */
1510
1511-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1512+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1513+
1514+#ifdef CONFIG_PAX_ASLR
1515+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
1516+
1517+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1518+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1519+#endif
1520
1521 /* When the program starts, a1 contains a pointer to a function to be
1522 registered with atexit, as per the SVR4 ABI. A value of 0 means we
1523@@ -126,8 +133,4 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1524 extern void elf_set_personality(const struct elf32_hdr *);
1525 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
1526
1527-struct mm_struct;
1528-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1529-#define arch_randomize_brk arch_randomize_brk
1530-
1531 #endif
1532diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1533index 83eb2f7..ed77159 100644
1534--- a/arch/arm/include/asm/kmap_types.h
1535+++ b/arch/arm/include/asm/kmap_types.h
1536@@ -4,6 +4,6 @@
1537 /*
1538 * This is the "bare minimum". AIO seems to require this.
1539 */
1540-#define KM_TYPE_NR 16
1541+#define KM_TYPE_NR 17
1542
1543 #endif
1544diff --git a/arch/arm/include/asm/mach/dma.h b/arch/arm/include/asm/mach/dma.h
1545index 9e614a1..3302cca 100644
1546--- a/arch/arm/include/asm/mach/dma.h
1547+++ b/arch/arm/include/asm/mach/dma.h
1548@@ -22,7 +22,7 @@ struct dma_ops {
1549 int (*residue)(unsigned int, dma_t *); /* optional */
1550 int (*setspeed)(unsigned int, dma_t *, int); /* optional */
1551 const char *type;
1552-};
1553+} __do_const;
1554
1555 struct dma_struct {
1556 void *addr; /* single DMA address */
1557diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
1558index 195ac2f..2272f0d 100644
1559--- a/arch/arm/include/asm/mach/map.h
1560+++ b/arch/arm/include/asm/mach/map.h
1561@@ -34,6 +34,9 @@ struct map_desc {
1562 #define MT_MEMORY_ITCM 13
1563 #define MT_MEMORY_SO 14
1564 #define MT_MEMORY_DMA_READY 15
1565+#define MT_MEMORY_R 16
1566+#define MT_MEMORY_RW 17
1567+#define MT_MEMORY_RX 18
1568
1569 #ifdef CONFIG_MMU
1570 extern void iotable_init(struct map_desc *, int);
1571diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
1572index 53426c6..c7baff3 100644
1573--- a/arch/arm/include/asm/outercache.h
1574+++ b/arch/arm/include/asm/outercache.h
1575@@ -35,7 +35,7 @@ struct outer_cache_fns {
1576 #endif
1577 void (*set_debug)(unsigned long);
1578 void (*resume)(void);
1579-};
1580+} __no_const;
1581
1582 #ifdef CONFIG_OUTER_CACHE
1583
1584diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
1585index 812a494..71fc0b6 100644
1586--- a/arch/arm/include/asm/page.h
1587+++ b/arch/arm/include/asm/page.h
1588@@ -114,7 +114,7 @@ struct cpu_user_fns {
1589 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
1590 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
1591 unsigned long vaddr, struct vm_area_struct *vma);
1592-};
1593+} __no_const;
1594
1595 #ifdef MULTI_USER
1596 extern struct cpu_user_fns cpu_user;
1597diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
1598index 943504f..84d0f84 100644
1599--- a/arch/arm/include/asm/pgalloc.h
1600+++ b/arch/arm/include/asm/pgalloc.h
1601@@ -17,6 +17,7 @@
1602 #include <asm/processor.h>
1603 #include <asm/cacheflush.h>
1604 #include <asm/tlbflush.h>
1605+#include <asm/system_info.h>
1606
1607 #define check_pgt_cache() do { } while (0)
1608
1609@@ -43,6 +44,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1610 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
1611 }
1612
1613+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1614+{
1615+ pud_populate(mm, pud, pmd);
1616+}
1617+
1618 #else /* !CONFIG_ARM_LPAE */
1619
1620 /*
1621@@ -51,6 +57,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1622 #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
1623 #define pmd_free(mm, pmd) do { } while (0)
1624 #define pud_populate(mm,pmd,pte) BUG()
1625+#define pud_populate_kernel(mm,pmd,pte) BUG()
1626
1627 #endif /* CONFIG_ARM_LPAE */
1628
1629@@ -126,6 +133,16 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
1630 __free_page(pte);
1631 }
1632
1633+static inline void __pmd_update(pmd_t *pmdp, pmdval_t prot)
1634+{
1635+ pmdval_t pmdval = pmd_val(*pmdp) | prot;
1636+ pmdp[0] = __pmd(pmdval);
1637+#ifndef CONFIG_ARM_LPAE
1638+ pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t));
1639+#endif
1640+ flush_pmd_entry(pmdp);
1641+}
1642+
1643 static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
1644 pmdval_t prot)
1645 {
1646@@ -155,7 +172,7 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
1647 static inline void
1648 pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
1649 {
1650- __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE);
1651+ __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE | __supported_pmd_mask);
1652 }
1653 #define pmd_pgtable(pmd) pmd_page(pmd)
1654
1655diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
1656index 5cfba15..d437dc2 100644
1657--- a/arch/arm/include/asm/pgtable-2level-hwdef.h
1658+++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
1659@@ -20,12 +20,15 @@
1660 #define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0)
1661 #define PMD_TYPE_TABLE (_AT(pmdval_t, 1) << 0)
1662 #define PMD_TYPE_SECT (_AT(pmdval_t, 2) << 0)
1663+#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 2) /* PXN */
1664 #define PMD_BIT4 (_AT(pmdval_t, 1) << 4)
1665 #define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5)
1666 #define PMD_PROTECTION (_AT(pmdval_t, 1) << 9) /* v5 */
1667+
1668 /*
1669 * - section
1670 */
1671+#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0)
1672 #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
1673 #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
1674 #define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */
1675@@ -37,6 +40,7 @@
1676 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 17) /* v6 */
1677 #define PMD_SECT_SUPER (_AT(pmdval_t, 1) << 18) /* v6 */
1678 #define PMD_SECT_AF (_AT(pmdval_t, 0))
1679+#define PMD_SECT_AP_RDONLY (_AT(pmdval_t, 0))
1680
1681 #define PMD_SECT_UNCACHED (_AT(pmdval_t, 0))
1682 #define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE)
1683diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
1684index 2317a71..1897391 100644
1685--- a/arch/arm/include/asm/pgtable-2level.h
1686+++ b/arch/arm/include/asm/pgtable-2level.h
1687@@ -123,6 +123,7 @@
1688 #define L_PTE_USER (_AT(pteval_t, 1) << 8)
1689 #define L_PTE_XN (_AT(pteval_t, 1) << 9)
1690 #define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */
1691+#define L_PTE_PXN (_AT(pteval_t, 1) << 11) /* v7*/
1692
1693 /*
1694 * These are the memory types, defined to be compatible with
1695diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h
1696index d795282..d82ff13 100644
1697--- a/arch/arm/include/asm/pgtable-3level-hwdef.h
1698+++ b/arch/arm/include/asm/pgtable-3level-hwdef.h
1699@@ -32,6 +32,7 @@
1700 #define PMD_TYPE_SECT (_AT(pmdval_t, 1) << 0)
1701 #define PMD_BIT4 (_AT(pmdval_t, 0))
1702 #define PMD_DOMAIN(x) (_AT(pmdval_t, 0))
1703+#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 59) /* PXNTable */
1704
1705 /*
1706 * - section
1707@@ -41,9 +42,11 @@
1708 #define PMD_SECT_S (_AT(pmdval_t, 3) << 8)
1709 #define PMD_SECT_AF (_AT(pmdval_t, 1) << 10)
1710 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 11)
1711+#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 53)
1712 #define PMD_SECT_XN (_AT(pmdval_t, 1) << 54)
1713 #define PMD_SECT_AP_WRITE (_AT(pmdval_t, 0))
1714 #define PMD_SECT_AP_READ (_AT(pmdval_t, 0))
1715+#define PMD_SECT_AP_RDONLY (_AT(pmdval_t, 1) << 7)
1716 #define PMD_SECT_TEX(x) (_AT(pmdval_t, 0))
1717
1718 /*
1719@@ -66,6 +69,7 @@
1720 #define PTE_EXT_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
1721 #define PTE_EXT_AF (_AT(pteval_t, 1) << 10) /* Access Flag */
1722 #define PTE_EXT_NG (_AT(pteval_t, 1) << 11) /* nG */
1723+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 53) /* PXN */
1724 #define PTE_EXT_XN (_AT(pteval_t, 1) << 54) /* XN */
1725
1726 /*
1727diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
1728index b249035..4ab204b 100644
1729--- a/arch/arm/include/asm/pgtable-3level.h
1730+++ b/arch/arm/include/asm/pgtable-3level.h
1731@@ -73,6 +73,7 @@
1732 #define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */
1733 #define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
1734 #define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */
1735+#define L_PTE_PXN (_AT(pteval_t, 1) << 53) /* PXN */
1736 #define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
1737 #define L_PTE_DIRTY (_AT(pteval_t, 1) << 55) /* unused */
1738 #define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56) /* unused */
1739@@ -80,6 +81,7 @@
1740 /*
1741 * To be used in assembly code with the upper page attributes.
1742 */
1743+#define L_PTE_PXN_HIGH (1 << (53 - 32))
1744 #define L_PTE_XN_HIGH (1 << (54 - 32))
1745 #define L_PTE_DIRTY_HIGH (1 << (55 - 32))
1746
1747diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
1748index 08c1231..1031bb4 100644
1749--- a/arch/arm/include/asm/pgtable.h
1750+++ b/arch/arm/include/asm/pgtable.h
1751@@ -30,6 +30,9 @@
1752 #include <asm/pgtable-2level.h>
1753 #endif
1754
1755+#define ktla_ktva(addr) (addr)
1756+#define ktva_ktla(addr) (addr)
1757+
1758 /*
1759 * Just any arbitrary offset to the start of the vmalloc VM area: the
1760 * current 8MB value just means that there will be a 8MB "hole" after the
1761@@ -45,6 +48,9 @@
1762 #define LIBRARY_TEXT_START 0x0c000000
1763
1764 #ifndef __ASSEMBLY__
1765+extern pteval_t __supported_pte_mask;
1766+extern pmdval_t __supported_pmd_mask;
1767+
1768 extern void __pte_error(const char *file, int line, pte_t);
1769 extern void __pmd_error(const char *file, int line, pmd_t);
1770 extern void __pgd_error(const char *file, int line, pgd_t);
1771@@ -53,6 +59,17 @@ extern void __pgd_error(const char *file, int line, pgd_t);
1772 #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
1773 #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd)
1774
1775+#define __HAVE_ARCH_PAX_OPEN_KERNEL
1776+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
1777+
1778+#ifdef CONFIG_PAX_KERNEXEC
1779+static inline unsigned long pax_open_kernel(void) { return 0; /* TODO */ }
1780+static inline unsigned long pax_close_kernel(void) { return 0; /* TODO */ }
1781+#else
1782+static inline unsigned long pax_open_kernel(void) { return 0; }
1783+static inline unsigned long pax_close_kernel(void) { return 0; }
1784+#endif
1785+
1786 /*
1787 * This is the lowest virtual address we can permit any user space
1788 * mapping to be mapped at. This is particularly important for
1789@@ -63,8 +80,8 @@ extern void __pgd_error(const char *file, int line, pgd_t);
1790 /*
1791 * The pgprot_* and protection_map entries will be fixed up in runtime
1792 * to include the cachable and bufferable bits based on memory policy,
1793- * as well as any architecture dependent bits like global/ASID and SMP
1794- * shared mapping bits.
1795+ * as well as any architecture dependent bits like global/ASID, PXN,
1796+ * and SMP shared mapping bits.
1797 */
1798 #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
1799
1800@@ -242,7 +259,7 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
1801
1802 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
1803 {
1804- const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER;
1805+ const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER | __supported_pte_mask;
1806 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
1807 return pte;
1808 }
1809diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h
1810index f3628fb..a0672dd 100644
1811--- a/arch/arm/include/asm/proc-fns.h
1812+++ b/arch/arm/include/asm/proc-fns.h
1813@@ -75,7 +75,7 @@ extern struct processor {
1814 unsigned int suspend_size;
1815 void (*do_suspend)(void *);
1816 void (*do_resume)(void *);
1817-} processor;
1818+} __do_const processor;
1819
1820 #ifndef MULTI_CPU
1821 extern void cpu_proc_init(void);
1822diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
1823index 2e3be16..4dc90fc 100644
1824--- a/arch/arm/include/asm/smp.h
1825+++ b/arch/arm/include/asm/smp.h
1826@@ -106,7 +106,7 @@ struct smp_operations {
1827 int (*cpu_disable)(unsigned int cpu);
1828 #endif
1829 #endif
1830-};
1831+} __no_const;
1832
1833 /*
1834 * set platform specific SMP operations
1835diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
1836index 8477b4c..801a6a9 100644
1837--- a/arch/arm/include/asm/thread_info.h
1838+++ b/arch/arm/include/asm/thread_info.h
1839@@ -151,6 +151,12 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
1840 #define TIF_SYSCALL_TRACE 8
1841 #define TIF_SYSCALL_AUDIT 9
1842 #define TIF_SYSCALL_TRACEPOINT 10
1843+
1844+/* within 8 bits of TIF_SYSCALL_TRACE
1845+ to meet flexible second operand requirements
1846+*/
1847+#define TIF_GRSEC_SETXID 11
1848+
1849 #define TIF_USING_IWMMXT 17
1850 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
1851 #define TIF_RESTORE_SIGMASK 20
1852@@ -165,9 +171,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
1853 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
1854 #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
1855 #define _TIF_SECCOMP (1 << TIF_SECCOMP)
1856+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
1857
1858 /* Checks for any syscall work in entry-common.S */
1859-#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT)
1860+#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT | \
1861+ _TIF_GRSEC_SETXID)
1862
1863 /*
1864 * Change these and you break ASM code in entry-common.S
1865diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
1866index 7e1f760..f2c37b1 100644
1867--- a/arch/arm/include/asm/uaccess.h
1868+++ b/arch/arm/include/asm/uaccess.h
1869@@ -418,8 +418,23 @@ do { \
1870
1871
1872 #ifdef CONFIG_MMU
1873-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
1874-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
1875+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
1876+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
1877+
1878+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
1879+{
1880+ check_object_size(to, n, false);
1881+
1882+ return ___copy_from_user(to, from, n);
1883+}
1884+
1885+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
1886+{
1887+ check_object_size(from, n, true);
1888+
1889+ return ___copy_to_user(to, from, n);
1890+}
1891+
1892 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
1893 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
1894 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
1895@@ -431,6 +446,9 @@ extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned l
1896
1897 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
1898 {
1899+ if ((long)n < 0)
1900+ return n;
1901+
1902 if (access_ok(VERIFY_READ, from, n))
1903 n = __copy_from_user(to, from, n);
1904 else /* security hole - plug it */
1905@@ -440,6 +458,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
1906
1907 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
1908 {
1909+ if ((long)n < 0)
1910+ return n;
1911+
1912 if (access_ok(VERIFY_WRITE, to, n))
1913 n = __copy_to_user(to, from, n);
1914 return n;
1915diff --git a/arch/arm/include/uapi/asm/ptrace.h b/arch/arm/include/uapi/asm/ptrace.h
1916index 96ee092..37f1844 100644
1917--- a/arch/arm/include/uapi/asm/ptrace.h
1918+++ b/arch/arm/include/uapi/asm/ptrace.h
1919@@ -73,7 +73,7 @@
1920 * ARMv7 groups of PSR bits
1921 */
1922 #define APSR_MASK 0xf80f0000 /* N, Z, C, V, Q and GE flags */
1923-#define PSR_ISET_MASK 0x01000010 /* ISA state (J, T) mask */
1924+#define PSR_ISET_MASK 0x01000020 /* ISA state (J, T) mask */
1925 #define PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
1926 #define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */
1927
1928diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
1929index 60d3b73..9168db0 100644
1930--- a/arch/arm/kernel/armksyms.c
1931+++ b/arch/arm/kernel/armksyms.c
1932@@ -89,8 +89,8 @@ EXPORT_SYMBOL(__memzero);
1933 #ifdef CONFIG_MMU
1934 EXPORT_SYMBOL(copy_page);
1935
1936-EXPORT_SYMBOL(__copy_from_user);
1937-EXPORT_SYMBOL(__copy_to_user);
1938+EXPORT_SYMBOL(___copy_from_user);
1939+EXPORT_SYMBOL(___copy_to_user);
1940 EXPORT_SYMBOL(__clear_user);
1941
1942 EXPORT_SYMBOL(__get_user_1);
1943diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
1944index 486a15a..d95523a 100644
1945--- a/arch/arm/kernel/head.S
1946+++ b/arch/arm/kernel/head.S
1947@@ -52,7 +52,9 @@
1948 .equ swapper_pg_dir, KERNEL_RAM_VADDR - PG_DIR_SIZE
1949
1950 .macro pgtbl, rd, phys
1951- add \rd, \phys, #TEXT_OFFSET - PG_DIR_SIZE
1952+ mov \rd, #TEXT_OFFSET
1953+ sub \rd, #PG_DIR_SIZE
1954+ add \rd, \rd, \phys
1955 .endm
1956
1957 /*
1958diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
1959index 1e9be5d..03edbc2 100644
1960--- a/arch/arm/kernel/module.c
1961+++ b/arch/arm/kernel/module.c
1962@@ -37,12 +37,37 @@
1963 #endif
1964
1965 #ifdef CONFIG_MMU
1966-void *module_alloc(unsigned long size)
1967+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
1968 {
1969+ if (!size || PAGE_ALIGN(size) > MODULES_END - MODULES_VADDR)
1970+ return NULL;
1971 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
1972- GFP_KERNEL, PAGE_KERNEL_EXEC, -1,
1973+ GFP_KERNEL, prot, -1,
1974 __builtin_return_address(0));
1975 }
1976+
1977+void *module_alloc(unsigned long size)
1978+{
1979+
1980+#ifdef CONFIG_PAX_KERNEXEC
1981+ return __module_alloc(size, PAGE_KERNEL);
1982+#else
1983+ return __module_alloc(size, PAGE_KERNEL_EXEC);
1984+#endif
1985+
1986+}
1987+
1988+#ifdef CONFIG_PAX_KERNEXEC
1989+void module_free_exec(struct module *mod, void *module_region)
1990+{
1991+ module_free(mod, module_region);
1992+}
1993+
1994+void *module_alloc_exec(unsigned long size)
1995+{
1996+ return __module_alloc(size, PAGE_KERNEL_EXEC);
1997+}
1998+#endif
1999 #endif
2000
2001 int
2002diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
2003index 90084a6..a8b26bc 100644
2004--- a/arch/arm/kernel/process.c
2005+++ b/arch/arm/kernel/process.c
2006@@ -28,7 +28,6 @@
2007 #include <linux/tick.h>
2008 #include <linux/utsname.h>
2009 #include <linux/uaccess.h>
2010-#include <linux/random.h>
2011 #include <linux/hw_breakpoint.h>
2012 #include <linux/cpuidle.h>
2013 #include <linux/leds.h>
2014@@ -256,9 +255,10 @@ void machine_power_off(void)
2015 machine_shutdown();
2016 if (pm_power_off)
2017 pm_power_off();
2018+ BUG();
2019 }
2020
2021-void machine_restart(char *cmd)
2022+__noreturn void machine_restart(char *cmd)
2023 {
2024 machine_shutdown();
2025
2026@@ -283,8 +283,8 @@ void __show_regs(struct pt_regs *regs)
2027 init_utsname()->release,
2028 (int)strcspn(init_utsname()->version, " "),
2029 init_utsname()->version);
2030- print_symbol("PC is at %s\n", instruction_pointer(regs));
2031- print_symbol("LR is at %s\n", regs->ARM_lr);
2032+ printk("PC is at %pA\n", instruction_pointer(regs));
2033+ printk("LR is at %pA\n", regs->ARM_lr);
2034 printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
2035 "sp : %08lx ip : %08lx fp : %08lx\n",
2036 regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
2037@@ -451,12 +451,6 @@ unsigned long get_wchan(struct task_struct *p)
2038 return 0;
2039 }
2040
2041-unsigned long arch_randomize_brk(struct mm_struct *mm)
2042-{
2043- unsigned long range_end = mm->brk + 0x02000000;
2044- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
2045-}
2046-
2047 #ifdef CONFIG_MMU
2048 /*
2049 * The vectors page is always readable from user space for the
2050diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
2051index 739db3a..7f4a272 100644
2052--- a/arch/arm/kernel/ptrace.c
2053+++ b/arch/arm/kernel/ptrace.c
2054@@ -916,6 +916,10 @@ enum ptrace_syscall_dir {
2055 PTRACE_SYSCALL_EXIT,
2056 };
2057
2058+#ifdef CONFIG_GRKERNSEC_SETXID
2059+extern void gr_delayed_cred_worker(void);
2060+#endif
2061+
2062 static int ptrace_syscall_trace(struct pt_regs *regs, int scno,
2063 enum ptrace_syscall_dir dir)
2064 {
2065@@ -923,6 +927,11 @@ static int ptrace_syscall_trace(struct pt_regs *regs, int scno,
2066
2067 current_thread_info()->syscall = scno;
2068
2069+#ifdef CONFIG_GRKERNSEC_SETXID
2070+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
2071+ gr_delayed_cred_worker();
2072+#endif
2073+
2074 if (!test_thread_flag(TIF_SYSCALL_TRACE))
2075 return scno;
2076
2077diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
2078index da1d1aa..ef9bc58 100644
2079--- a/arch/arm/kernel/setup.c
2080+++ b/arch/arm/kernel/setup.c
2081@@ -97,21 +97,23 @@ EXPORT_SYMBOL(system_serial_high);
2082 unsigned int elf_hwcap __read_mostly;
2083 EXPORT_SYMBOL(elf_hwcap);
2084
2085+pteval_t __supported_pte_mask __read_only;
2086+pmdval_t __supported_pmd_mask __read_only;
2087
2088 #ifdef MULTI_CPU
2089-struct processor processor __read_mostly;
2090+struct processor processor;
2091 #endif
2092 #ifdef MULTI_TLB
2093-struct cpu_tlb_fns cpu_tlb __read_mostly;
2094+struct cpu_tlb_fns cpu_tlb __read_only;
2095 #endif
2096 #ifdef MULTI_USER
2097-struct cpu_user_fns cpu_user __read_mostly;
2098+struct cpu_user_fns cpu_user __read_only;
2099 #endif
2100 #ifdef MULTI_CACHE
2101-struct cpu_cache_fns cpu_cache __read_mostly;
2102+struct cpu_cache_fns cpu_cache __read_only;
2103 #endif
2104 #ifdef CONFIG_OUTER_CACHE
2105-struct outer_cache_fns outer_cache __read_mostly;
2106+struct outer_cache_fns outer_cache __read_only;
2107 EXPORT_SYMBOL(outer_cache);
2108 #endif
2109
2110@@ -236,9 +238,13 @@ static int __get_cpu_architecture(void)
2111 asm("mrc p15, 0, %0, c0, c1, 4"
2112 : "=r" (mmfr0));
2113 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
2114- (mmfr0 & 0x000000f0) >= 0x00000030)
2115+ (mmfr0 & 0x000000f0) >= 0x00000030) {
2116 cpu_arch = CPU_ARCH_ARMv7;
2117- else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
2118+ if ((mmfr0 & 0x0000000f) == 0x00000005 || (mmfr0 & 0x0000000f) == 0x00000004) {
2119+ __supported_pte_mask |= L_PTE_PXN;
2120+ __supported_pmd_mask |= PMD_PXNTABLE;
2121+ }
2122+ } else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
2123 (mmfr0 & 0x000000f0) == 0x00000020)
2124 cpu_arch = CPU_ARCH_ARMv6;
2125 else
2126@@ -455,7 +461,7 @@ static void __init setup_processor(void)
2127 __cpu_architecture = __get_cpu_architecture();
2128
2129 #ifdef MULTI_CPU
2130- processor = *list->proc;
2131+ memcpy((void *)&processor, list->proc, sizeof processor);
2132 #endif
2133 #ifdef MULTI_TLB
2134 cpu_tlb = *list->tlb;
2135diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
2136index fbc8b26..000ded0 100644
2137--- a/arch/arm/kernel/smp.c
2138+++ b/arch/arm/kernel/smp.c
2139@@ -70,7 +70,7 @@ enum ipi_msg_type {
2140
2141 static DECLARE_COMPLETION(cpu_running);
2142
2143-static struct smp_operations smp_ops;
2144+static struct smp_operations smp_ops __read_only;
2145
2146 void __init smp_set_ops(struct smp_operations *ops)
2147 {
2148diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
2149index b0179b8..7713948 100644
2150--- a/arch/arm/kernel/traps.c
2151+++ b/arch/arm/kernel/traps.c
2152@@ -57,7 +57,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
2153 void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
2154 {
2155 #ifdef CONFIG_KALLSYMS
2156- printk("[<%08lx>] (%pS) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
2157+ printk("[<%08lx>] (%pA) from [<%08lx>] (%pA)\n", where, (void *)where, from, (void *)from);
2158 #else
2159 printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
2160 #endif
2161@@ -266,6 +266,8 @@ static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
2162 static int die_owner = -1;
2163 static unsigned int die_nest_count;
2164
2165+extern void gr_handle_kernel_exploit(void);
2166+
2167 static unsigned long oops_begin(void)
2168 {
2169 int cpu;
2170@@ -308,6 +310,9 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
2171 panic("Fatal exception in interrupt");
2172 if (panic_on_oops)
2173 panic("Fatal exception");
2174+
2175+ gr_handle_kernel_exploit();
2176+
2177 if (signr)
2178 do_exit(signr);
2179 }
2180diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
2181index 36ff15b..75d9e9d 100644
2182--- a/arch/arm/kernel/vmlinux.lds.S
2183+++ b/arch/arm/kernel/vmlinux.lds.S
2184@@ -8,7 +8,11 @@
2185 #include <asm/thread_info.h>
2186 #include <asm/memory.h>
2187 #include <asm/page.h>
2188-
2189+
2190+#ifdef CONFIG_PAX_KERNEXEC
2191+#include <asm/pgtable.h>
2192+#endif
2193+
2194 #define PROC_INFO \
2195 . = ALIGN(4); \
2196 VMLINUX_SYMBOL(__proc_info_begin) = .; \
2197@@ -90,6 +94,11 @@ SECTIONS
2198 _text = .;
2199 HEAD_TEXT
2200 }
2201+
2202+#ifdef CONFIG_PAX_KERNEXEC
2203+ . = ALIGN(1<<SECTION_SHIFT);
2204+#endif
2205+
2206 .text : { /* Real text segment */
2207 _stext = .; /* Text and read-only data */
2208 __exception_text_start = .;
2209@@ -133,6 +142,10 @@ SECTIONS
2210
2211 _etext = .; /* End of text and rodata section */
2212
2213+#ifdef CONFIG_PAX_KERNEXEC
2214+ . = ALIGN(1<<SECTION_SHIFT);
2215+#endif
2216+
2217 #ifndef CONFIG_XIP_KERNEL
2218 . = ALIGN(PAGE_SIZE);
2219 __init_begin = .;
2220@@ -192,6 +205,11 @@ SECTIONS
2221 . = PAGE_OFFSET + TEXT_OFFSET;
2222 #else
2223 __init_end = .;
2224+
2225+#ifdef CONFIG_PAX_KERNEXEC
2226+ . = ALIGN(1<<SECTION_SHIFT);
2227+#endif
2228+
2229 . = ALIGN(THREAD_SIZE);
2230 __data_loc = .;
2231 #endif
2232diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
2233index 66a477a..bee61d3 100644
2234--- a/arch/arm/lib/copy_from_user.S
2235+++ b/arch/arm/lib/copy_from_user.S
2236@@ -16,7 +16,7 @@
2237 /*
2238 * Prototype:
2239 *
2240- * size_t __copy_from_user(void *to, const void *from, size_t n)
2241+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
2242 *
2243 * Purpose:
2244 *
2245@@ -84,11 +84,11 @@
2246
2247 .text
2248
2249-ENTRY(__copy_from_user)
2250+ENTRY(___copy_from_user)
2251
2252 #include "copy_template.S"
2253
2254-ENDPROC(__copy_from_user)
2255+ENDPROC(___copy_from_user)
2256
2257 .pushsection .fixup,"ax"
2258 .align 0
2259diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
2260index 6ee2f67..d1cce76 100644
2261--- a/arch/arm/lib/copy_page.S
2262+++ b/arch/arm/lib/copy_page.S
2263@@ -10,6 +10,7 @@
2264 * ASM optimised string functions
2265 */
2266 #include <linux/linkage.h>
2267+#include <linux/const.h>
2268 #include <asm/assembler.h>
2269 #include <asm/asm-offsets.h>
2270 #include <asm/cache.h>
2271diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
2272index d066df6..df28194 100644
2273--- a/arch/arm/lib/copy_to_user.S
2274+++ b/arch/arm/lib/copy_to_user.S
2275@@ -16,7 +16,7 @@
2276 /*
2277 * Prototype:
2278 *
2279- * size_t __copy_to_user(void *to, const void *from, size_t n)
2280+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
2281 *
2282 * Purpose:
2283 *
2284@@ -88,11 +88,11 @@
2285 .text
2286
2287 ENTRY(__copy_to_user_std)
2288-WEAK(__copy_to_user)
2289+WEAK(___copy_to_user)
2290
2291 #include "copy_template.S"
2292
2293-ENDPROC(__copy_to_user)
2294+ENDPROC(___copy_to_user)
2295 ENDPROC(__copy_to_user_std)
2296
2297 .pushsection .fixup,"ax"
2298diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
2299index 0dc5385..45833ef 100644
2300--- a/arch/arm/lib/delay.c
2301+++ b/arch/arm/lib/delay.c
2302@@ -28,12 +28,14 @@
2303 /*
2304 * Default to the loop-based delay implementation.
2305 */
2306-struct arm_delay_ops arm_delay_ops = {
2307+static struct arm_delay_ops arm_loop_delay_ops = {
2308 .delay = __loop_delay,
2309 .const_udelay = __loop_const_udelay,
2310 .udelay = __loop_udelay,
2311 };
2312
2313+struct arm_delay_ops *arm_delay_ops __read_only = &arm_loop_delay_ops;
2314+
2315 static const struct delay_timer *delay_timer;
2316 static bool delay_calibrated;
2317
2318@@ -67,6 +69,12 @@ static void __timer_udelay(unsigned long usecs)
2319 __timer_const_udelay(usecs * UDELAY_MULT);
2320 }
2321
2322+static struct arm_delay_ops arm_timer_delay_ops = {
2323+ .delay = __timer_delay,
2324+ .const_udelay = __timer_const_udelay,
2325+ .udelay = __timer_udelay,
2326+};
2327+
2328 void __init register_current_timer_delay(const struct delay_timer *timer)
2329 {
2330 if (!delay_calibrated) {
2331@@ -74,9 +82,7 @@ void __init register_current_timer_delay(const struct delay_timer *timer)
2332 delay_timer = timer;
2333 lpj_fine = timer->freq / HZ;
2334 loops_per_jiffy = lpj_fine;
2335- arm_delay_ops.delay = __timer_delay;
2336- arm_delay_ops.const_udelay = __timer_const_udelay;
2337- arm_delay_ops.udelay = __timer_udelay;
2338+ arm_delay_ops = &arm_timer_delay_ops;
2339 delay_calibrated = true;
2340 } else {
2341 pr_info("Ignoring duplicate/late registration of read_current_timer delay\n");
2342diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
2343index 025f742..8432b08 100644
2344--- a/arch/arm/lib/uaccess_with_memcpy.c
2345+++ b/arch/arm/lib/uaccess_with_memcpy.c
2346@@ -104,7 +104,7 @@ out:
2347 }
2348
2349 unsigned long
2350-__copy_to_user(void __user *to, const void *from, unsigned long n)
2351+___copy_to_user(void __user *to, const void *from, unsigned long n)
2352 {
2353 /*
2354 * This test is stubbed out of the main function above to keep
2355diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
2356index 2c6c218..2b87c2d 100644
2357--- a/arch/arm/mach-kirkwood/common.c
2358+++ b/arch/arm/mach-kirkwood/common.c
2359@@ -150,7 +150,16 @@ static void clk_gate_fn_disable(struct clk_hw *hw)
2360 clk_gate_ops.disable(hw);
2361 }
2362
2363-static struct clk_ops clk_gate_fn_ops;
2364+static int clk_gate_fn_is_enabled(struct clk_hw *hw)
2365+{
2366+ return clk_gate_ops.is_enabled(hw);
2367+}
2368+
2369+static struct clk_ops clk_gate_fn_ops = {
2370+ .enable = clk_gate_fn_enable,
2371+ .disable = clk_gate_fn_disable,
2372+ .is_enabled = clk_gate_fn_is_enabled,
2373+};
2374
2375 static struct clk __init *clk_register_gate_fn(struct device *dev,
2376 const char *name,
2377@@ -184,14 +193,6 @@ static struct clk __init *clk_register_gate_fn(struct device *dev,
2378 gate_fn->fn_en = fn_en;
2379 gate_fn->fn_dis = fn_dis;
2380
2381- /* ops is the gate ops, but with our enable/disable functions */
2382- if (clk_gate_fn_ops.enable != clk_gate_fn_enable ||
2383- clk_gate_fn_ops.disable != clk_gate_fn_disable) {
2384- clk_gate_fn_ops = clk_gate_ops;
2385- clk_gate_fn_ops.enable = clk_gate_fn_enable;
2386- clk_gate_fn_ops.disable = clk_gate_fn_disable;
2387- }
2388-
2389 clk = clk_register(dev, &gate_fn->gate.hw);
2390
2391 if (IS_ERR(clk))
2392diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
2393index d95f727..12f10dd 100644
2394--- a/arch/arm/mach-omap2/board-n8x0.c
2395+++ b/arch/arm/mach-omap2/board-n8x0.c
2396@@ -589,7 +589,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
2397 }
2398 #endif
2399
2400-static struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
2401+static struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
2402 .late_init = n8x0_menelaus_late_init,
2403 };
2404
2405diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
2406index 87cc6d0..fd4f248 100644
2407--- a/arch/arm/mach-omap2/omap_hwmod.c
2408+++ b/arch/arm/mach-omap2/omap_hwmod.c
2409@@ -189,10 +189,10 @@ struct omap_hwmod_soc_ops {
2410 int (*is_hardreset_asserted)(struct omap_hwmod *oh,
2411 struct omap_hwmod_rst_info *ohri);
2412 int (*init_clkdm)(struct omap_hwmod *oh);
2413-};
2414+} __no_const;
2415
2416 /* soc_ops: adapts the omap_hwmod code to the currently-booted SoC */
2417-static struct omap_hwmod_soc_ops soc_ops;
2418+static struct omap_hwmod_soc_ops soc_ops __read_only;
2419
2420 /* omap_hwmod_list contains all registered struct omap_hwmods */
2421 static LIST_HEAD(omap_hwmod_list);
2422diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
2423index 5dbf13f..9be36fd 100644
2424--- a/arch/arm/mm/fault.c
2425+++ b/arch/arm/mm/fault.c
2426@@ -25,6 +25,7 @@
2427 #include <asm/system_misc.h>
2428 #include <asm/system_info.h>
2429 #include <asm/tlbflush.h>
2430+#include <asm/sections.h>
2431
2432 #include "fault.h"
2433
2434@@ -138,6 +139,19 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
2435 if (fixup_exception(regs))
2436 return;
2437
2438+#ifdef CONFIG_PAX_KERNEXEC
2439+ if (fsr & FSR_WRITE) {
2440+ if (((unsigned long)_stext <= addr && addr < init_mm.end_code) || (MODULES_VADDR <= addr && addr < MODULES_END)) {
2441+ if (current->signal->curr_ip)
2442+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
2443+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
2444+ else
2445+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
2446+ current->comm, task_pid_nr(current), current_uid(), current_euid());
2447+ }
2448+ }
2449+#endif
2450+
2451 /*
2452 * No handler, we'll have to terminate things with extreme prejudice.
2453 */
2454@@ -174,6 +188,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
2455 }
2456 #endif
2457
2458+#ifdef CONFIG_PAX_PAGEEXEC
2459+ if (fsr & FSR_LNX_PF) {
2460+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
2461+ do_group_exit(SIGKILL);
2462+ }
2463+#endif
2464+
2465 tsk->thread.address = addr;
2466 tsk->thread.error_code = fsr;
2467 tsk->thread.trap_no = 14;
2468@@ -398,6 +419,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
2469 }
2470 #endif /* CONFIG_MMU */
2471
2472+#ifdef CONFIG_PAX_PAGEEXEC
2473+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2474+{
2475+ long i;
2476+
2477+ printk(KERN_ERR "PAX: bytes at PC: ");
2478+ for (i = 0; i < 20; i++) {
2479+ unsigned char c;
2480+ if (get_user(c, (__force unsigned char __user *)pc+i))
2481+ printk(KERN_CONT "?? ");
2482+ else
2483+ printk(KERN_CONT "%02x ", c);
2484+ }
2485+ printk("\n");
2486+
2487+ printk(KERN_ERR "PAX: bytes at SP-4: ");
2488+ for (i = -1; i < 20; i++) {
2489+ unsigned long c;
2490+ if (get_user(c, (__force unsigned long __user *)sp+i))
2491+ printk(KERN_CONT "???????? ");
2492+ else
2493+ printk(KERN_CONT "%08lx ", c);
2494+ }
2495+ printk("\n");
2496+}
2497+#endif
2498+
2499 /*
2500 * First Level Translation Fault Handler
2501 *
2502@@ -575,12 +623,41 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
2503 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
2504 struct siginfo info;
2505
2506+#ifdef CONFIG_PAX_KERNEXEC
2507+ if (!user_mode(regs) && is_xn_fault(ifsr)) {
2508+ if (current->signal->curr_ip)
2509+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n",
2510+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid(),
2511+ addr >= TASK_SIZE ? "non-executable kernel" : "userland", addr);
2512+ else
2513+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n",
2514+ current->comm, task_pid_nr(current), current_uid(), current_euid(),
2515+ addr >= TASK_SIZE ? "non-executable kernel" : "userland", addr);
2516+ goto die;
2517+ }
2518+#endif
2519+
2520+#ifdef CONFIG_PAX_REFCOUNT
2521+ if (fsr_fs(ifsr) == FAULT_CODE_DEBUG) {
2522+ unsigned int bkpt;
2523+
2524+ if (!probe_kernel_address((unsigned int *)addr, bkpt) && bkpt == 0xe12f1073) {
2525+ current->thread.error_code = ifsr;
2526+ current->thread.trap_no = 0;
2527+ pax_report_refcount_overflow(regs);
2528+ fixup_exception(regs);
2529+ return;
2530+ }
2531+ }
2532+#endif
2533+
2534 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
2535 return;
2536
2537 printk(KERN_ALERT "Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
2538 inf->name, ifsr, addr);
2539
2540+die:
2541 info.si_signo = inf->sig;
2542 info.si_errno = 0;
2543 info.si_code = inf->code;
2544diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
2545index cf08bdf..f1a0383 100644
2546--- a/arch/arm/mm/fault.h
2547+++ b/arch/arm/mm/fault.h
2548@@ -3,6 +3,7 @@
2549
2550 /*
2551 * Fault status register encodings. We steal bit 31 for our own purposes.
2552+ * Set when the FSR value is from an instruction fault.
2553 */
2554 #define FSR_LNX_PF (1 << 31)
2555 #define FSR_WRITE (1 << 11)
2556@@ -22,6 +23,12 @@ static inline int fsr_fs(unsigned int fsr)
2557 }
2558 #endif
2559
2560+/* valid for LPAE and !LPAE */
2561+static inline int is_xn_fault(unsigned int fsr)
2562+{
2563+ return ((fsr_fs(fsr) & 0x3c) == 0xc);
2564+}
2565+
2566 void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
2567 unsigned long search_exception_table(unsigned long addr);
2568
2569diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
2570index ad722f1..46b670e 100644
2571--- a/arch/arm/mm/init.c
2572+++ b/arch/arm/mm/init.c
2573@@ -734,9 +734,43 @@ void __init mem_init(void)
2574
2575 void free_initmem(void)
2576 {
2577+
2578+#ifdef CONFIG_PAX_KERNEXEC
2579+ unsigned long addr;
2580+ pgd_t *pgd;
2581+ pud_t *pud;
2582+ pmd_t *pmd;
2583+#endif
2584+
2585 #ifdef CONFIG_HAVE_TCM
2586 extern char __tcm_start, __tcm_end;
2587+#endif
2588
2589+#ifdef CONFIG_PAX_KERNEXEC
2590+ /* make pages tables, etc before .text NX */
2591+ for (addr = PAGE_OFFSET; addr < (unsigned long)_stext; addr += PMD_SIZE) {
2592+ pgd = pgd_offset_k(addr);
2593+ pud = pud_offset(pgd, addr);
2594+ pmd = pmd_offset(pud, addr);
2595+ __pmd_update(pmd, PMD_SECT_XN);
2596+ }
2597+ /* make init NX */
2598+ for (addr = (unsigned long)__init_begin; addr < (unsigned long)_sdata; addr += PMD_SIZE) {
2599+ pgd = pgd_offset_k(addr);
2600+ pud = pud_offset(pgd, addr);
2601+ pmd = pmd_offset(pud, addr);
2602+ __pmd_update(pmd, PMD_SECT_XN);
2603+ }
2604+ /* make kernel code/rodata read-only */
2605+ for (addr = (unsigned long)_stext; addr < (unsigned long)__init_begin; addr += PMD_SIZE) {
2606+ pgd = pgd_offset_k(addr);
2607+ pud = pud_offset(pgd, addr);
2608+ pmd = pmd_offset(pud, addr);
2609+ __pmd_update(pmd, PMD_SECT_AP_RDONLY);
2610+ }
2611+#endif
2612+
2613+#ifdef CONFIG_HAVE_TCM
2614 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
2615 totalram_pages += free_area(__phys_to_pfn(__pa(&__tcm_start)),
2616 __phys_to_pfn(__pa(&__tcm_end)),
2617diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
2618index ce8cb19..061aa14 100644
2619--- a/arch/arm/mm/mmap.c
2620+++ b/arch/arm/mm/mmap.c
2621@@ -72,6 +72,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
2622 unsigned long start_addr;
2623 int do_align = 0;
2624 int aliasing = cache_is_vipt_aliasing();
2625+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
2626
2627 /*
2628 * We only need to do colour alignment if either the I or D
2629@@ -93,6 +94,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
2630 if (len > TASK_SIZE)
2631 return -ENOMEM;
2632
2633+#ifdef CONFIG_PAX_RANDMMAP
2634+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
2635+#endif
2636+
2637 if (addr) {
2638 if (do_align)
2639 addr = COLOUR_ALIGN(addr, pgoff);
2640@@ -100,15 +105,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
2641 addr = PAGE_ALIGN(addr);
2642
2643 vma = find_vma(mm, addr);
2644- if (TASK_SIZE - len >= addr &&
2645- (!vma || addr + len <= vma->vm_start))
2646+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
2647 return addr;
2648 }
2649 if (len > mm->cached_hole_size) {
2650- start_addr = addr = mm->free_area_cache;
2651+ start_addr = addr = mm->free_area_cache;
2652 } else {
2653- start_addr = addr = mm->mmap_base;
2654- mm->cached_hole_size = 0;
2655+ start_addr = addr = mm->mmap_base;
2656+ mm->cached_hole_size = 0;
2657 }
2658
2659 full_search:
2660@@ -124,14 +128,14 @@ full_search:
2661 * Start a new search - just in case we missed
2662 * some holes.
2663 */
2664- if (start_addr != TASK_UNMAPPED_BASE) {
2665- start_addr = addr = TASK_UNMAPPED_BASE;
2666+ if (start_addr != mm->mmap_base) {
2667+ start_addr = addr = mm->mmap_base;
2668 mm->cached_hole_size = 0;
2669 goto full_search;
2670 }
2671 return -ENOMEM;
2672 }
2673- if (!vma || addr + len <= vma->vm_start) {
2674+ if (check_heap_stack_gap(vma, addr, len, offset)) {
2675 /*
2676 * Remember the place where we stopped the search:
2677 */
2678@@ -156,6 +160,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
2679 unsigned long addr = addr0;
2680 int do_align = 0;
2681 int aliasing = cache_is_vipt_aliasing();
2682+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
2683
2684 /*
2685 * We only need to do colour alignment if either the I or D
2686@@ -175,6 +180,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
2687 return addr;
2688 }
2689
2690+#ifdef CONFIG_PAX_RANDMMAP
2691+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
2692+#endif
2693+
2694 /* requesting a specific address */
2695 if (addr) {
2696 if (do_align)
2697@@ -182,8 +191,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
2698 else
2699 addr = PAGE_ALIGN(addr);
2700 vma = find_vma(mm, addr);
2701- if (TASK_SIZE - len >= addr &&
2702- (!vma || addr + len <= vma->vm_start))
2703+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
2704 return addr;
2705 }
2706
2707@@ -203,7 +211,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
2708 /* make sure it can fit in the remaining address space */
2709 if (addr > len) {
2710 vma = find_vma(mm, addr-len);
2711- if (!vma || addr <= vma->vm_start)
2712+ if (check_heap_stack_gap(vma, addr - len, len, offset))
2713 /* remember the address as a hint for next time */
2714 return (mm->free_area_cache = addr-len);
2715 }
2716@@ -212,17 +220,17 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
2717 goto bottomup;
2718
2719 addr = mm->mmap_base - len;
2720- if (do_align)
2721- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2722
2723 do {
2724+ if (do_align)
2725+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2726 /*
2727 * Lookup failure means no vma is above this address,
2728 * else if new region fits below vma->vm_start,
2729 * return with success:
2730 */
2731 vma = find_vma(mm, addr);
2732- if (!vma || addr+len <= vma->vm_start)
2733+ if (check_heap_stack_gap(vma, addr, len, offset))
2734 /* remember the address as a hint for next time */
2735 return (mm->free_area_cache = addr);
2736
2737@@ -231,10 +239,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
2738 mm->cached_hole_size = vma->vm_start - addr;
2739
2740 /* try just below the current vma->vm_start */
2741- addr = vma->vm_start - len;
2742- if (do_align)
2743- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2744- } while (len < vma->vm_start);
2745+ addr = skip_heap_stack_gap(vma, len, offset);
2746+ } while (!IS_ERR_VALUE(addr));
2747
2748 bottomup:
2749 /*
2750@@ -259,6 +265,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
2751 {
2752 unsigned long random_factor = 0UL;
2753
2754+#ifdef CONFIG_PAX_RANDMMAP
2755+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
2756+#endif
2757+
2758 /* 8 bits of randomness in 20 address space bits */
2759 if ((current->flags & PF_RANDOMIZE) &&
2760 !(current->personality & ADDR_NO_RANDOMIZE))
2761@@ -266,10 +276,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
2762
2763 if (mmap_is_legacy()) {
2764 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
2765+
2766+#ifdef CONFIG_PAX_RANDMMAP
2767+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2768+ mm->mmap_base += mm->delta_mmap;
2769+#endif
2770+
2771 mm->get_unmapped_area = arch_get_unmapped_area;
2772 mm->unmap_area = arch_unmap_area;
2773 } else {
2774 mm->mmap_base = mmap_base(random_factor);
2775+
2776+#ifdef CONFIG_PAX_RANDMMAP
2777+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2778+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2779+#endif
2780+
2781 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2782 mm->unmap_area = arch_unmap_area_topdown;
2783 }
2784diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
2785index 99b47b9..579b667 100644
2786--- a/arch/arm/mm/mmu.c
2787+++ b/arch/arm/mm/mmu.c
2788@@ -227,16 +227,16 @@ static struct mem_type mem_types[] = {
2789 [MT_UNCACHED] = {
2790 .prot_pte = PROT_PTE_DEVICE,
2791 .prot_l1 = PMD_TYPE_TABLE,
2792- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
2793+ .prot_sect = PROT_SECT_DEVICE | PMD_SECT_XN,
2794 .domain = DOMAIN_IO,
2795 },
2796 [MT_CACHECLEAN] = {
2797- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
2798+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_AP_RDONLY,
2799 .domain = DOMAIN_KERNEL,
2800 },
2801 #ifndef CONFIG_ARM_LPAE
2802 [MT_MINICLEAN] = {
2803- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
2804+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE | PMD_SECT_AP_RDONLY,
2805 .domain = DOMAIN_KERNEL,
2806 },
2807 #endif
2808@@ -258,8 +258,26 @@ static struct mem_type mem_types[] = {
2809 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
2810 .domain = DOMAIN_KERNEL,
2811 },
2812+ [MT_MEMORY_R] = {
2813+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_RDONLY | L_PTE_XN,
2814+ .prot_l1 = PMD_TYPE_TABLE,
2815+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_RDONLY | PMD_SECT_XN,
2816+ .domain = DOMAIN_KERNEL,
2817+ },
2818+ [MT_MEMORY_RW] = {
2819+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_XN,
2820+ .prot_l1 = PMD_TYPE_TABLE,
2821+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_XN,
2822+ .domain = DOMAIN_KERNEL,
2823+ },
2824+ [MT_MEMORY_RX] = {
2825+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_RDONLY,
2826+ .prot_l1 = PMD_TYPE_TABLE,
2827+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_RDONLY,
2828+ .domain = DOMAIN_KERNEL,
2829+ },
2830 [MT_ROM] = {
2831- .prot_sect = PMD_TYPE_SECT,
2832+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_RDONLY,
2833 .domain = DOMAIN_KERNEL,
2834 },
2835 [MT_MEMORY_NONCACHED] = {
2836@@ -273,7 +291,7 @@ static struct mem_type mem_types[] = {
2837 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
2838 L_PTE_XN,
2839 .prot_l1 = PMD_TYPE_TABLE,
2840- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
2841+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_AP_RDONLY,
2842 .domain = DOMAIN_KERNEL,
2843 },
2844 [MT_MEMORY_ITCM] = {
2845@@ -432,6 +450,8 @@ static void __init build_mem_type_table(void)
2846 * from SVC mode and no access from userspace.
2847 */
2848 mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
2849+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
2850+ mem_types[MT_MEMORY_R].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
2851 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
2852 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
2853 #endif
2854@@ -450,6 +470,12 @@ static void __init build_mem_type_table(void)
2855 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
2856 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
2857 mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
2858+ mem_types[MT_MEMORY_R].prot_sect |= PMD_SECT_S;
2859+ mem_types[MT_MEMORY_R].prot_pte |= L_PTE_SHARED;
2860+ mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
2861+ mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;
2862+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_S;
2863+ mem_types[MT_MEMORY_RX].prot_pte |= L_PTE_SHARED;
2864 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
2865 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
2866 mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
2867@@ -487,6 +513,8 @@ static void __init build_mem_type_table(void)
2868 vecs_pgprot |= PTE_EXT_AF;
2869 #endif
2870
2871+ user_pgprot |= __supported_pte_mask;
2872+
2873 for (i = 0; i < 16; i++) {
2874 pteval_t v = pgprot_val(protection_map[i]);
2875 protection_map[i] = __pgprot(v | user_pgprot);
2876@@ -503,6 +531,12 @@ static void __init build_mem_type_table(void)
2877 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
2878 mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
2879 mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
2880+ mem_types[MT_MEMORY_R].prot_sect |= ecc_mask | cp->pmd;
2881+ mem_types[MT_MEMORY_R].prot_pte |= kern_pgprot;
2882+ mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
2883+ mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
2884+ mem_types[MT_MEMORY_RX].prot_sect |= ecc_mask | cp->pmd;
2885+ mem_types[MT_MEMORY_RX].prot_pte |= kern_pgprot;
2886 mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
2887 mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
2888 mem_types[MT_ROM].prot_sect |= cp->pmd;
2889@@ -1198,7 +1232,41 @@ static void __init map_lowmem(void)
2890 map.pfn = __phys_to_pfn(start);
2891 map.virtual = __phys_to_virt(start);
2892 map.length = end - start;
2893+
2894+#ifdef CONFIG_PAX_KERNEXEC
2895+ if (map.virtual <= (unsigned long)_stext && ((unsigned long)_end < (map.virtual + map.length))) {
2896+ struct map_desc kernel;
2897+ struct map_desc initmap;
2898+
2899+ /* when freeing initmem we will make this RW */
2900+ initmap.pfn = __phys_to_pfn(__pa(__init_begin));
2901+ initmap.virtual = (unsigned long)__init_begin;
2902+ initmap.length = _sdata - __init_begin;
2903+ initmap.type = MT_MEMORY;
2904+ create_mapping(&initmap);
2905+
2906+ /* when freeing initmem we will make this RX */
2907+ kernel.pfn = __phys_to_pfn(__pa(_stext));
2908+ kernel.virtual = (unsigned long)_stext;
2909+ kernel.length = __init_begin - _stext;
2910+ kernel.type = MT_MEMORY;
2911+ create_mapping(&kernel);
2912+
2913+ if (map.virtual < (unsigned long)_stext) {
2914+ map.length = (unsigned long)_stext - map.virtual;
2915+ map.type = MT_MEMORY;
2916+ create_mapping(&map);
2917+ }
2918+
2919+ map.pfn = __phys_to_pfn(__pa(_sdata));
2920+ map.virtual = (unsigned long)_sdata;
2921+ map.length = end - __pa(_sdata);
2922+ }
2923+
2924+ map.type = MT_MEMORY_RW;
2925+#else
2926 map.type = MT_MEMORY;
2927+#endif
2928
2929 create_mapping(&map);
2930 }
2931diff --git a/arch/arm/plat-orion/include/plat/addr-map.h b/arch/arm/plat-orion/include/plat/addr-map.h
2932index ec63e4a..62aa5f1d 100644
2933--- a/arch/arm/plat-orion/include/plat/addr-map.h
2934+++ b/arch/arm/plat-orion/include/plat/addr-map.h
2935@@ -26,7 +26,7 @@ struct orion_addr_map_cfg {
2936 value in bridge_virt_base */
2937 void __iomem *(*win_cfg_base) (const struct orion_addr_map_cfg *cfg,
2938 const int win);
2939-};
2940+} __no_const;
2941
2942 /*
2943 * Information needed to setup one address mapping.
2944diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
2945index f5144cd..71f6d1f 100644
2946--- a/arch/arm/plat-samsung/include/plat/dma-ops.h
2947+++ b/arch/arm/plat-samsung/include/plat/dma-ops.h
2948@@ -47,7 +47,7 @@ struct samsung_dma_ops {
2949 int (*started)(unsigned ch);
2950 int (*flush)(unsigned ch);
2951 int (*stop)(unsigned ch);
2952-};
2953+} __no_const;
2954
2955 extern void *samsung_dmadev_get_ops(void);
2956 extern void *s3c_dma_get_ops(void);
2957diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
2958index c3a58a1..78fbf54 100644
2959--- a/arch/avr32/include/asm/cache.h
2960+++ b/arch/avr32/include/asm/cache.h
2961@@ -1,8 +1,10 @@
2962 #ifndef __ASM_AVR32_CACHE_H
2963 #define __ASM_AVR32_CACHE_H
2964
2965+#include <linux/const.h>
2966+
2967 #define L1_CACHE_SHIFT 5
2968-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2969+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2970
2971 /*
2972 * Memory returned by kmalloc() may be used for DMA, so we must make
2973diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
2974index e2c3287..6c4f98c 100644
2975--- a/arch/avr32/include/asm/elf.h
2976+++ b/arch/avr32/include/asm/elf.h
2977@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
2978 the loader. We need to make sure that it is out of the way of the program
2979 that it will "exec", and that there is sufficient room for the brk. */
2980
2981-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
2982+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
2983
2984+#ifdef CONFIG_PAX_ASLR
2985+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
2986+
2987+#define PAX_DELTA_MMAP_LEN 15
2988+#define PAX_DELTA_STACK_LEN 15
2989+#endif
2990
2991 /* This yields a mask that user programs can use to figure out what
2992 instruction set this CPU supports. This could be done in user space,
2993diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
2994index 479330b..53717a8 100644
2995--- a/arch/avr32/include/asm/kmap_types.h
2996+++ b/arch/avr32/include/asm/kmap_types.h
2997@@ -2,9 +2,9 @@
2998 #define __ASM_AVR32_KMAP_TYPES_H
2999
3000 #ifdef CONFIG_DEBUG_HIGHMEM
3001-# define KM_TYPE_NR 29
3002+# define KM_TYPE_NR 30
3003 #else
3004-# define KM_TYPE_NR 14
3005+# define KM_TYPE_NR 15
3006 #endif
3007
3008 #endif /* __ASM_AVR32_KMAP_TYPES_H */
3009diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
3010index b2f2d2d..d1c85cb 100644
3011--- a/arch/avr32/mm/fault.c
3012+++ b/arch/avr32/mm/fault.c
3013@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
3014
3015 int exception_trace = 1;
3016
3017+#ifdef CONFIG_PAX_PAGEEXEC
3018+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3019+{
3020+ unsigned long i;
3021+
3022+ printk(KERN_ERR "PAX: bytes at PC: ");
3023+ for (i = 0; i < 20; i++) {
3024+ unsigned char c;
3025+ if (get_user(c, (unsigned char *)pc+i))
3026+ printk(KERN_CONT "???????? ");
3027+ else
3028+ printk(KERN_CONT "%02x ", c);
3029+ }
3030+ printk("\n");
3031+}
3032+#endif
3033+
3034 /*
3035 * This routine handles page faults. It determines the address and the
3036 * problem, and then passes it off to one of the appropriate routines.
3037@@ -174,6 +191,16 @@ bad_area:
3038 up_read(&mm->mmap_sem);
3039
3040 if (user_mode(regs)) {
3041+
3042+#ifdef CONFIG_PAX_PAGEEXEC
3043+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
3044+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
3045+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
3046+ do_group_exit(SIGKILL);
3047+ }
3048+ }
3049+#endif
3050+
3051 if (exception_trace && printk_ratelimit())
3052 printk("%s%s[%d]: segfault at %08lx pc %08lx "
3053 "sp %08lx ecr %lu\n",
3054diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
3055index 568885a..f8008df 100644
3056--- a/arch/blackfin/include/asm/cache.h
3057+++ b/arch/blackfin/include/asm/cache.h
3058@@ -7,6 +7,7 @@
3059 #ifndef __ARCH_BLACKFIN_CACHE_H
3060 #define __ARCH_BLACKFIN_CACHE_H
3061
3062+#include <linux/const.h>
3063 #include <linux/linkage.h> /* for asmlinkage */
3064
3065 /*
3066@@ -14,7 +15,7 @@
3067 * Blackfin loads 32 bytes for cache
3068 */
3069 #define L1_CACHE_SHIFT 5
3070-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
3071+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3072 #define SMP_CACHE_BYTES L1_CACHE_BYTES
3073
3074 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
3075diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
3076index aea2718..3639a60 100644
3077--- a/arch/cris/include/arch-v10/arch/cache.h
3078+++ b/arch/cris/include/arch-v10/arch/cache.h
3079@@ -1,8 +1,9 @@
3080 #ifndef _ASM_ARCH_CACHE_H
3081 #define _ASM_ARCH_CACHE_H
3082
3083+#include <linux/const.h>
3084 /* Etrax 100LX have 32-byte cache-lines. */
3085-#define L1_CACHE_BYTES 32
3086 #define L1_CACHE_SHIFT 5
3087+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3088
3089 #endif /* _ASM_ARCH_CACHE_H */
3090diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
3091index 7caf25d..ee65ac5 100644
3092--- a/arch/cris/include/arch-v32/arch/cache.h
3093+++ b/arch/cris/include/arch-v32/arch/cache.h
3094@@ -1,11 +1,12 @@
3095 #ifndef _ASM_CRIS_ARCH_CACHE_H
3096 #define _ASM_CRIS_ARCH_CACHE_H
3097
3098+#include <linux/const.h>
3099 #include <arch/hwregs/dma.h>
3100
3101 /* A cache-line is 32 bytes. */
3102-#define L1_CACHE_BYTES 32
3103 #define L1_CACHE_SHIFT 5
3104+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3105
3106 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
3107
3108diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
3109index b86329d..6709906 100644
3110--- a/arch/frv/include/asm/atomic.h
3111+++ b/arch/frv/include/asm/atomic.h
3112@@ -186,6 +186,16 @@ static inline void atomic64_dec(atomic64_t *v)
3113 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
3114 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
3115
3116+#define atomic64_read_unchecked(v) atomic64_read(v)
3117+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
3118+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
3119+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
3120+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
3121+#define atomic64_inc_unchecked(v) atomic64_inc(v)
3122+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
3123+#define atomic64_dec_unchecked(v) atomic64_dec(v)
3124+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
3125+
3126 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
3127 {
3128 int c, old;
3129diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
3130index 2797163..c2a401d 100644
3131--- a/arch/frv/include/asm/cache.h
3132+++ b/arch/frv/include/asm/cache.h
3133@@ -12,10 +12,11 @@
3134 #ifndef __ASM_CACHE_H
3135 #define __ASM_CACHE_H
3136
3137+#include <linux/const.h>
3138
3139 /* bytes per L1 cache line */
3140 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
3141-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
3142+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3143
3144 #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
3145 #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
3146diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
3147index 43901f2..0d8b865 100644
3148--- a/arch/frv/include/asm/kmap_types.h
3149+++ b/arch/frv/include/asm/kmap_types.h
3150@@ -2,6 +2,6 @@
3151 #ifndef _ASM_KMAP_TYPES_H
3152 #define _ASM_KMAP_TYPES_H
3153
3154-#define KM_TYPE_NR 17
3155+#define KM_TYPE_NR 18
3156
3157 #endif
3158diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
3159index 385fd30..3aaf4fe 100644
3160--- a/arch/frv/mm/elf-fdpic.c
3161+++ b/arch/frv/mm/elf-fdpic.c
3162@@ -61,6 +61,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
3163 {
3164 struct vm_area_struct *vma;
3165 unsigned long limit;
3166+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
3167
3168 if (len > TASK_SIZE)
3169 return -ENOMEM;
3170@@ -73,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
3171 if (addr) {
3172 addr = PAGE_ALIGN(addr);
3173 vma = find_vma(current->mm, addr);
3174- if (TASK_SIZE - len >= addr &&
3175- (!vma || addr + len <= vma->vm_start))
3176+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
3177 goto success;
3178 }
3179
3180@@ -89,7 +89,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
3181 for (; vma; vma = vma->vm_next) {
3182 if (addr > limit)
3183 break;
3184- if (addr + len <= vma->vm_start)
3185+ if (check_heap_stack_gap(vma, addr, len, offset))
3186 goto success;
3187 addr = vma->vm_end;
3188 }
3189@@ -104,7 +104,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
3190 for (; vma; vma = vma->vm_next) {
3191 if (addr > limit)
3192 break;
3193- if (addr + len <= vma->vm_start)
3194+ if (check_heap_stack_gap(vma, addr, len, offset))
3195 goto success;
3196 addr = vma->vm_end;
3197 }
3198diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
3199index f4ca594..adc72fd6 100644
3200--- a/arch/hexagon/include/asm/cache.h
3201+++ b/arch/hexagon/include/asm/cache.h
3202@@ -21,9 +21,11 @@
3203 #ifndef __ASM_CACHE_H
3204 #define __ASM_CACHE_H
3205
3206+#include <linux/const.h>
3207+
3208 /* Bytes per L1 cache line */
3209-#define L1_CACHE_SHIFT (5)
3210-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
3211+#define L1_CACHE_SHIFT 5
3212+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3213
3214 #define __cacheline_aligned __aligned(L1_CACHE_BYTES)
3215 #define ____cacheline_aligned __aligned(L1_CACHE_BYTES)
3216diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
3217index 6e6fe18..a6ae668 100644
3218--- a/arch/ia64/include/asm/atomic.h
3219+++ b/arch/ia64/include/asm/atomic.h
3220@@ -208,6 +208,16 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
3221 #define atomic64_inc(v) atomic64_add(1, (v))
3222 #define atomic64_dec(v) atomic64_sub(1, (v))
3223
3224+#define atomic64_read_unchecked(v) atomic64_read(v)
3225+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
3226+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
3227+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
3228+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
3229+#define atomic64_inc_unchecked(v) atomic64_inc(v)
3230+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
3231+#define atomic64_dec_unchecked(v) atomic64_dec(v)
3232+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
3233+
3234 /* Atomic operations are already serializing */
3235 #define smp_mb__before_atomic_dec() barrier()
3236 #define smp_mb__after_atomic_dec() barrier()
3237diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
3238index 988254a..e1ee885 100644
3239--- a/arch/ia64/include/asm/cache.h
3240+++ b/arch/ia64/include/asm/cache.h
3241@@ -1,6 +1,7 @@
3242 #ifndef _ASM_IA64_CACHE_H
3243 #define _ASM_IA64_CACHE_H
3244
3245+#include <linux/const.h>
3246
3247 /*
3248 * Copyright (C) 1998-2000 Hewlett-Packard Co
3249@@ -9,7 +10,7 @@
3250
3251 /* Bytes per L1 (data) cache line. */
3252 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
3253-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
3254+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3255
3256 #ifdef CONFIG_SMP
3257 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
3258diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
3259index b5298eb..67c6e62 100644
3260--- a/arch/ia64/include/asm/elf.h
3261+++ b/arch/ia64/include/asm/elf.h
3262@@ -42,6 +42,13 @@
3263 */
3264 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
3265
3266+#ifdef CONFIG_PAX_ASLR
3267+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
3268+
3269+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
3270+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
3271+#endif
3272+
3273 #define PT_IA_64_UNWIND 0x70000001
3274
3275 /* IA-64 relocations: */
3276diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
3277index 96a8d92..617a1cf 100644
3278--- a/arch/ia64/include/asm/pgalloc.h
3279+++ b/arch/ia64/include/asm/pgalloc.h
3280@@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
3281 pgd_val(*pgd_entry) = __pa(pud);
3282 }
3283
3284+static inline void
3285+pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
3286+{
3287+ pgd_populate(mm, pgd_entry, pud);
3288+}
3289+
3290 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
3291 {
3292 return quicklist_alloc(0, GFP_KERNEL, NULL);
3293@@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
3294 pud_val(*pud_entry) = __pa(pmd);
3295 }
3296
3297+static inline void
3298+pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
3299+{
3300+ pud_populate(mm, pud_entry, pmd);
3301+}
3302+
3303 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
3304 {
3305 return quicklist_alloc(0, GFP_KERNEL, NULL);
3306diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
3307index 815810c..d60bd4c 100644
3308--- a/arch/ia64/include/asm/pgtable.h
3309+++ b/arch/ia64/include/asm/pgtable.h
3310@@ -12,7 +12,7 @@
3311 * David Mosberger-Tang <davidm@hpl.hp.com>
3312 */
3313
3314-
3315+#include <linux/const.h>
3316 #include <asm/mman.h>
3317 #include <asm/page.h>
3318 #include <asm/processor.h>
3319@@ -142,6 +142,17 @@
3320 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
3321 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
3322 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
3323+
3324+#ifdef CONFIG_PAX_PAGEEXEC
3325+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
3326+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
3327+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
3328+#else
3329+# define PAGE_SHARED_NOEXEC PAGE_SHARED
3330+# define PAGE_READONLY_NOEXEC PAGE_READONLY
3331+# define PAGE_COPY_NOEXEC PAGE_COPY
3332+#endif
3333+
3334 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
3335 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
3336 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
3337diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
3338index 54ff557..70c88b7 100644
3339--- a/arch/ia64/include/asm/spinlock.h
3340+++ b/arch/ia64/include/asm/spinlock.h
3341@@ -71,7 +71,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
3342 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
3343
3344 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
3345- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
3346+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
3347 }
3348
3349 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
3350diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
3351index 449c8c0..50cdf87 100644
3352--- a/arch/ia64/include/asm/uaccess.h
3353+++ b/arch/ia64/include/asm/uaccess.h
3354@@ -42,6 +42,8 @@
3355 #include <asm/pgtable.h>
3356 #include <asm/io.h>
3357
3358+extern void check_object_size(const void *ptr, unsigned long n, bool to);
3359+
3360 /*
3361 * For historical reasons, the following macros are grossly misnamed:
3362 */
3363@@ -240,12 +242,24 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
3364 static inline unsigned long
3365 __copy_to_user (void __user *to, const void *from, unsigned long count)
3366 {
3367+ if (count > INT_MAX)
3368+ return count;
3369+
3370+ if (!__builtin_constant_p(count))
3371+ check_object_size(from, count, true);
3372+
3373 return __copy_user(to, (__force void __user *) from, count);
3374 }
3375
3376 static inline unsigned long
3377 __copy_from_user (void *to, const void __user *from, unsigned long count)
3378 {
3379+ if (count > INT_MAX)
3380+ return count;
3381+
3382+ if (!__builtin_constant_p(count))
3383+ check_object_size(to, count, false);
3384+
3385 return __copy_user((__force void __user *) to, from, count);
3386 }
3387
3388@@ -255,10 +269,13 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
3389 ({ \
3390 void __user *__cu_to = (to); \
3391 const void *__cu_from = (from); \
3392- long __cu_len = (n); \
3393+ unsigned long __cu_len = (n); \
3394 \
3395- if (__access_ok(__cu_to, __cu_len, get_fs())) \
3396+ if (__cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) { \
3397+ if (!__builtin_constant_p(n)) \
3398+ check_object_size(__cu_from, __cu_len, true); \
3399 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
3400+ } \
3401 __cu_len; \
3402 })
3403
3404@@ -266,11 +283,14 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
3405 ({ \
3406 void *__cu_to = (to); \
3407 const void __user *__cu_from = (from); \
3408- long __cu_len = (n); \
3409+ unsigned long __cu_len = (n); \
3410 \
3411 __chk_user_ptr(__cu_from); \
3412- if (__access_ok(__cu_from, __cu_len, get_fs())) \
3413+ if (__cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) { \
3414+ if (!__builtin_constant_p(n)) \
3415+ check_object_size(__cu_to, __cu_len, false); \
3416 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
3417+ } \
3418 __cu_len; \
3419 })
3420
3421diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
3422index 24603be..948052d 100644
3423--- a/arch/ia64/kernel/module.c
3424+++ b/arch/ia64/kernel/module.c
3425@@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
3426 void
3427 module_free (struct module *mod, void *module_region)
3428 {
3429- if (mod && mod->arch.init_unw_table &&
3430- module_region == mod->module_init) {
3431+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
3432 unw_remove_unwind_table(mod->arch.init_unw_table);
3433 mod->arch.init_unw_table = NULL;
3434 }
3435@@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
3436 }
3437
3438 static inline int
3439+in_init_rx (const struct module *mod, uint64_t addr)
3440+{
3441+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
3442+}
3443+
3444+static inline int
3445+in_init_rw (const struct module *mod, uint64_t addr)
3446+{
3447+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
3448+}
3449+
3450+static inline int
3451 in_init (const struct module *mod, uint64_t addr)
3452 {
3453- return addr - (uint64_t) mod->module_init < mod->init_size;
3454+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
3455+}
3456+
3457+static inline int
3458+in_core_rx (const struct module *mod, uint64_t addr)
3459+{
3460+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
3461+}
3462+
3463+static inline int
3464+in_core_rw (const struct module *mod, uint64_t addr)
3465+{
3466+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
3467 }
3468
3469 static inline int
3470 in_core (const struct module *mod, uint64_t addr)
3471 {
3472- return addr - (uint64_t) mod->module_core < mod->core_size;
3473+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
3474 }
3475
3476 static inline int
3477@@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
3478 break;
3479
3480 case RV_BDREL:
3481- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
3482+ if (in_init_rx(mod, val))
3483+ val -= (uint64_t) mod->module_init_rx;
3484+ else if (in_init_rw(mod, val))
3485+ val -= (uint64_t) mod->module_init_rw;
3486+ else if (in_core_rx(mod, val))
3487+ val -= (uint64_t) mod->module_core_rx;
3488+ else if (in_core_rw(mod, val))
3489+ val -= (uint64_t) mod->module_core_rw;
3490 break;
3491
3492 case RV_LTV:
3493@@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
3494 * addresses have been selected...
3495 */
3496 uint64_t gp;
3497- if (mod->core_size > MAX_LTOFF)
3498+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
3499 /*
3500 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
3501 * at the end of the module.
3502 */
3503- gp = mod->core_size - MAX_LTOFF / 2;
3504+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
3505 else
3506- gp = mod->core_size / 2;
3507- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
3508+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
3509+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
3510 mod->arch.gp = gp;
3511 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
3512 }
3513diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
3514index d9439ef..d0cac6b 100644
3515--- a/arch/ia64/kernel/sys_ia64.c
3516+++ b/arch/ia64/kernel/sys_ia64.c
3517@@ -28,6 +28,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
3518 unsigned long start_addr, align_mask = PAGE_SIZE - 1;
3519 struct mm_struct *mm = current->mm;
3520 struct vm_area_struct *vma;
3521+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
3522
3523 if (len > RGN_MAP_LIMIT)
3524 return -ENOMEM;
3525@@ -43,6 +44,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
3526 if (REGION_NUMBER(addr) == RGN_HPAGE)
3527 addr = 0;
3528 #endif
3529+
3530+#ifdef CONFIG_PAX_RANDMMAP
3531+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3532+ addr = mm->free_area_cache;
3533+ else
3534+#endif
3535+
3536 if (!addr)
3537 addr = mm->free_area_cache;
3538
3539@@ -61,14 +69,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
3540 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
3541 /* At this point: (!vma || addr < vma->vm_end). */
3542 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
3543- if (start_addr != TASK_UNMAPPED_BASE) {
3544+ if (start_addr != mm->mmap_base) {
3545 /* Start a new search --- just in case we missed some holes. */
3546- addr = TASK_UNMAPPED_BASE;
3547+ addr = mm->mmap_base;
3548 goto full_search;
3549 }
3550 return -ENOMEM;
3551 }
3552- if (!vma || addr + len <= vma->vm_start) {
3553+ if (check_heap_stack_gap(vma, addr, len, offset)) {
3554 /* Remember the address where we stopped this search: */
3555 mm->free_area_cache = addr + len;
3556 return addr;
3557diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
3558index c64460b..4d250a6 100644
3559--- a/arch/ia64/kernel/topology.c
3560+++ b/arch/ia64/kernel/topology.c
3561@@ -445,7 +445,7 @@ static int __cpuinit cache_cpu_callback(struct notifier_block *nfb,
3562 return NOTIFY_OK;
3563 }
3564
3565-static struct notifier_block __cpuinitdata cache_cpu_notifier =
3566+static struct notifier_block cache_cpu_notifier =
3567 {
3568 .notifier_call = cache_cpu_callback
3569 };
3570diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
3571index 0ccb28f..8992469 100644
3572--- a/arch/ia64/kernel/vmlinux.lds.S
3573+++ b/arch/ia64/kernel/vmlinux.lds.S
3574@@ -198,7 +198,7 @@ SECTIONS {
3575 /* Per-cpu data: */
3576 . = ALIGN(PERCPU_PAGE_SIZE);
3577 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
3578- __phys_per_cpu_start = __per_cpu_load;
3579+ __phys_per_cpu_start = per_cpu_load;
3580 /*
3581 * ensure percpu data fits
3582 * into percpu page size
3583diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
3584index 6cf0341..d352594 100644
3585--- a/arch/ia64/mm/fault.c
3586+++ b/arch/ia64/mm/fault.c
3587@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
3588 return pte_present(pte);
3589 }
3590
3591+#ifdef CONFIG_PAX_PAGEEXEC
3592+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3593+{
3594+ unsigned long i;
3595+
3596+ printk(KERN_ERR "PAX: bytes at PC: ");
3597+ for (i = 0; i < 8; i++) {
3598+ unsigned int c;
3599+ if (get_user(c, (unsigned int *)pc+i))
3600+ printk(KERN_CONT "???????? ");
3601+ else
3602+ printk(KERN_CONT "%08x ", c);
3603+ }
3604+ printk("\n");
3605+}
3606+#endif
3607+
3608 # define VM_READ_BIT 0
3609 # define VM_WRITE_BIT 1
3610 # define VM_EXEC_BIT 2
3611@@ -149,8 +166,21 @@ retry:
3612 if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
3613 goto bad_area;
3614
3615- if ((vma->vm_flags & mask) != mask)
3616+ if ((vma->vm_flags & mask) != mask) {
3617+
3618+#ifdef CONFIG_PAX_PAGEEXEC
3619+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
3620+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
3621+ goto bad_area;
3622+
3623+ up_read(&mm->mmap_sem);
3624+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
3625+ do_group_exit(SIGKILL);
3626+ }
3627+#endif
3628+
3629 goto bad_area;
3630+ }
3631
3632 /*
3633 * If for any reason at all we couldn't handle the fault, make
3634diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
3635index 5ca674b..127c3cb 100644
3636--- a/arch/ia64/mm/hugetlbpage.c
3637+++ b/arch/ia64/mm/hugetlbpage.c
3638@@ -149,6 +149,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
3639 unsigned long pgoff, unsigned long flags)
3640 {
3641 struct vm_area_struct *vmm;
3642+ unsigned long offset = gr_rand_threadstack_offset(current->mm, file, flags);
3643
3644 if (len > RGN_MAP_LIMIT)
3645 return -ENOMEM;
3646@@ -171,7 +172,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
3647 /* At this point: (!vmm || addr < vmm->vm_end). */
3648 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
3649 return -ENOMEM;
3650- if (!vmm || (addr + len) <= vmm->vm_start)
3651+ if (check_heap_stack_gap(vmm, addr, len, offset))
3652 return addr;
3653 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
3654 }
3655diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
3656index 082e383..fb7be80 100644
3657--- a/arch/ia64/mm/init.c
3658+++ b/arch/ia64/mm/init.c
3659@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
3660 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
3661 vma->vm_end = vma->vm_start + PAGE_SIZE;
3662 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
3663+
3664+#ifdef CONFIG_PAX_PAGEEXEC
3665+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
3666+ vma->vm_flags &= ~VM_EXEC;
3667+
3668+#ifdef CONFIG_PAX_MPROTECT
3669+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
3670+ vma->vm_flags &= ~VM_MAYEXEC;
3671+#endif
3672+
3673+ }
3674+#endif
3675+
3676 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
3677 down_write(&current->mm->mmap_sem);
3678 if (insert_vm_struct(current->mm, vma)) {
3679diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
3680index 40b3ee9..8c2c112 100644
3681--- a/arch/m32r/include/asm/cache.h
3682+++ b/arch/m32r/include/asm/cache.h
3683@@ -1,8 +1,10 @@
3684 #ifndef _ASM_M32R_CACHE_H
3685 #define _ASM_M32R_CACHE_H
3686
3687+#include <linux/const.h>
3688+
3689 /* L1 cache line size */
3690 #define L1_CACHE_SHIFT 4
3691-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
3692+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3693
3694 #endif /* _ASM_M32R_CACHE_H */
3695diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
3696index 82abd15..d95ae5d 100644
3697--- a/arch/m32r/lib/usercopy.c
3698+++ b/arch/m32r/lib/usercopy.c
3699@@ -14,6 +14,9 @@
3700 unsigned long
3701 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
3702 {
3703+ if ((long)n < 0)
3704+ return n;
3705+
3706 prefetch(from);
3707 if (access_ok(VERIFY_WRITE, to, n))
3708 __copy_user(to,from,n);
3709@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
3710 unsigned long
3711 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
3712 {
3713+ if ((long)n < 0)
3714+ return n;
3715+
3716 prefetchw(to);
3717 if (access_ok(VERIFY_READ, from, n))
3718 __copy_user_zeroing(to,from,n);
3719diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
3720index 0395c51..5f26031 100644
3721--- a/arch/m68k/include/asm/cache.h
3722+++ b/arch/m68k/include/asm/cache.h
3723@@ -4,9 +4,11 @@
3724 #ifndef __ARCH_M68K_CACHE_H
3725 #define __ARCH_M68K_CACHE_H
3726
3727+#include <linux/const.h>
3728+
3729 /* bytes per L1 cache line */
3730 #define L1_CACHE_SHIFT 4
3731-#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
3732+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3733
3734 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
3735
3736diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
3737index 4efe96a..60e8699 100644
3738--- a/arch/microblaze/include/asm/cache.h
3739+++ b/arch/microblaze/include/asm/cache.h
3740@@ -13,11 +13,12 @@
3741 #ifndef _ASM_MICROBLAZE_CACHE_H
3742 #define _ASM_MICROBLAZE_CACHE_H
3743
3744+#include <linux/const.h>
3745 #include <asm/registers.h>
3746
3747 #define L1_CACHE_SHIFT 5
3748 /* word-granular cache in microblaze */
3749-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
3750+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3751
3752 #define SMP_CACHE_BYTES L1_CACHE_BYTES
3753
3754diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
3755index 01cc6ba..bcb7a5d 100644
3756--- a/arch/mips/include/asm/atomic.h
3757+++ b/arch/mips/include/asm/atomic.h
3758@@ -21,6 +21,10 @@
3759 #include <asm/cmpxchg.h>
3760 #include <asm/war.h>
3761
3762+#ifdef CONFIG_GENERIC_ATOMIC64
3763+#include <asm-generic/atomic64.h>
3764+#endif
3765+
3766 #define ATOMIC_INIT(i) { (i) }
3767
3768 /*
3769@@ -759,6 +763,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
3770 */
3771 #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
3772
3773+#define atomic64_read_unchecked(v) atomic64_read(v)
3774+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
3775+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
3776+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
3777+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
3778+#define atomic64_inc_unchecked(v) atomic64_inc(v)
3779+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
3780+#define atomic64_dec_unchecked(v) atomic64_dec(v)
3781+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
3782+
3783 #endif /* CONFIG_64BIT */
3784
3785 /*
3786diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
3787index b4db69f..8f3b093 100644
3788--- a/arch/mips/include/asm/cache.h
3789+++ b/arch/mips/include/asm/cache.h
3790@@ -9,10 +9,11 @@
3791 #ifndef _ASM_CACHE_H
3792 #define _ASM_CACHE_H
3793
3794+#include <linux/const.h>
3795 #include <kmalloc.h>
3796
3797 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
3798-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
3799+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3800
3801 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
3802 #define SMP_CACHE_BYTES L1_CACHE_BYTES
3803diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
3804index 455c0ac..ad65fbe 100644
3805--- a/arch/mips/include/asm/elf.h
3806+++ b/arch/mips/include/asm/elf.h
3807@@ -372,13 +372,16 @@ extern const char *__elf_platform;
3808 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
3809 #endif
3810
3811+#ifdef CONFIG_PAX_ASLR
3812+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
3813+
3814+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
3815+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
3816+#endif
3817+
3818 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
3819 struct linux_binprm;
3820 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
3821 int uses_interp);
3822
3823-struct mm_struct;
3824-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
3825-#define arch_randomize_brk arch_randomize_brk
3826-
3827 #endif /* _ASM_ELF_H */
3828diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h
3829index c1f6afa..38cc6e9 100644
3830--- a/arch/mips/include/asm/exec.h
3831+++ b/arch/mips/include/asm/exec.h
3832@@ -12,6 +12,6 @@
3833 #ifndef _ASM_EXEC_H
3834 #define _ASM_EXEC_H
3835
3836-extern unsigned long arch_align_stack(unsigned long sp);
3837+#define arch_align_stack(x) ((x) & ~0xfUL)
3838
3839 #endif /* _ASM_EXEC_H */
3840diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
3841index da9bd7d..91aa7ab 100644
3842--- a/arch/mips/include/asm/page.h
3843+++ b/arch/mips/include/asm/page.h
3844@@ -98,7 +98,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
3845 #ifdef CONFIG_CPU_MIPS32
3846 typedef struct { unsigned long pte_low, pte_high; } pte_t;
3847 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
3848- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
3849+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
3850 #else
3851 typedef struct { unsigned long long pte; } pte_t;
3852 #define pte_val(x) ((x).pte)
3853diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
3854index 881d18b..cea38bc 100644
3855--- a/arch/mips/include/asm/pgalloc.h
3856+++ b/arch/mips/include/asm/pgalloc.h
3857@@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
3858 {
3859 set_pud(pud, __pud((unsigned long)pmd));
3860 }
3861+
3862+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
3863+{
3864+ pud_populate(mm, pud, pmd);
3865+}
3866 #endif
3867
3868 /*
3869diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
3870index 18806a5..141ffcf 100644
3871--- a/arch/mips/include/asm/thread_info.h
3872+++ b/arch/mips/include/asm/thread_info.h
3873@@ -110,6 +110,8 @@ register struct thread_info *__current_thread_info __asm__("$28");
3874 #define TIF_32BIT_ADDR 23 /* 32-bit address space (o32/n32) */
3875 #define TIF_FPUBOUND 24 /* thread bound to FPU-full CPU set */
3876 #define TIF_LOAD_WATCH 25 /* If set, load watch registers */
3877+/* li takes a 32bit immediate */
3878+#define TIF_GRSEC_SETXID 29 /* update credentials on syscall entry/exit */
3879 #define TIF_SYSCALL_TRACE 31 /* syscall trace active */
3880
3881 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
3882@@ -125,15 +127,18 @@ register struct thread_info *__current_thread_info __asm__("$28");
3883 #define _TIF_32BIT_ADDR (1<<TIF_32BIT_ADDR)
3884 #define _TIF_FPUBOUND (1<<TIF_FPUBOUND)
3885 #define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH)
3886+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
3887+
3888+#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
3889
3890 /* work to do in syscall_trace_leave() */
3891-#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)
3892+#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
3893
3894 /* work to do on interrupt/exception return */
3895 #define _TIF_WORK_MASK \
3896 (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_NOTIFY_RESUME)
3897 /* work to do on any return to u-space */
3898-#define _TIF_ALLWORK_MASK (_TIF_WORK_MASK | _TIF_WORK_SYSCALL_EXIT)
3899+#define _TIF_ALLWORK_MASK (_TIF_WORK_MASK | _TIF_WORK_SYSCALL_EXIT | _TIF_GRSEC_SETXID)
3900
3901 #endif /* __KERNEL__ */
3902
3903diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
3904index 9fdd8bc..4bd7f1a 100644
3905--- a/arch/mips/kernel/binfmt_elfn32.c
3906+++ b/arch/mips/kernel/binfmt_elfn32.c
3907@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
3908 #undef ELF_ET_DYN_BASE
3909 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
3910
3911+#ifdef CONFIG_PAX_ASLR
3912+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
3913+
3914+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
3915+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
3916+#endif
3917+
3918 #include <asm/processor.h>
3919 #include <linux/module.h>
3920 #include <linux/elfcore.h>
3921diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
3922index ff44823..97f8906 100644
3923--- a/arch/mips/kernel/binfmt_elfo32.c
3924+++ b/arch/mips/kernel/binfmt_elfo32.c
3925@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
3926 #undef ELF_ET_DYN_BASE
3927 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
3928
3929+#ifdef CONFIG_PAX_ASLR
3930+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
3931+
3932+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
3933+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
3934+#endif
3935+
3936 #include <asm/processor.h>
3937
3938 /*
3939diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
3940index 69b17a9..9db82f9 100644
3941--- a/arch/mips/kernel/process.c
3942+++ b/arch/mips/kernel/process.c
3943@@ -478,15 +478,3 @@ unsigned long get_wchan(struct task_struct *task)
3944 out:
3945 return pc;
3946 }
3947-
3948-/*
3949- * Don't forget that the stack pointer must be aligned on a 8 bytes
3950- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
3951- */
3952-unsigned long arch_align_stack(unsigned long sp)
3953-{
3954- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
3955- sp -= get_random_int() & ~PAGE_MASK;
3956-
3957- return sp & ALMASK;
3958-}
3959diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
3960index 4812c6d..2069554 100644
3961--- a/arch/mips/kernel/ptrace.c
3962+++ b/arch/mips/kernel/ptrace.c
3963@@ -528,6 +528,10 @@ static inline int audit_arch(void)
3964 return arch;
3965 }
3966
3967+#ifdef CONFIG_GRKERNSEC_SETXID
3968+extern void gr_delayed_cred_worker(void);
3969+#endif
3970+
3971 /*
3972 * Notification of system call entry/exit
3973 * - triggered by current->work.syscall_trace
3974@@ -537,6 +541,11 @@ asmlinkage void syscall_trace_enter(struct pt_regs *regs)
3975 /* do the secure computing check first */
3976 secure_computing_strict(regs->regs[2]);
3977
3978+#ifdef CONFIG_GRKERNSEC_SETXID
3979+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
3980+ gr_delayed_cred_worker();
3981+#endif
3982+
3983 if (!(current->ptrace & PT_PTRACED))
3984 goto out;
3985
3986diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
3987index 374f66e..1c882a0 100644
3988--- a/arch/mips/kernel/scall32-o32.S
3989+++ b/arch/mips/kernel/scall32-o32.S
3990@@ -52,7 +52,7 @@ NESTED(handle_sys, PT_SIZE, sp)
3991
3992 stack_done:
3993 lw t0, TI_FLAGS($28) # syscall tracing enabled?
3994- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
3995+ li t1, _TIF_SYSCALL_WORK
3996 and t0, t1
3997 bnez t0, syscall_trace_entry # -> yes
3998
3999diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
4000index 169de6a..f594a89 100644
4001--- a/arch/mips/kernel/scall64-64.S
4002+++ b/arch/mips/kernel/scall64-64.S
4003@@ -54,7 +54,7 @@ NESTED(handle_sys64, PT_SIZE, sp)
4004
4005 sd a3, PT_R26(sp) # save a3 for syscall restarting
4006
4007- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
4008+ li t1, _TIF_SYSCALL_WORK
4009 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
4010 and t0, t1, t0
4011 bnez t0, syscall_trace_entry
4012diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
4013index 86ec03f..1235baf 100644
4014--- a/arch/mips/kernel/scall64-n32.S
4015+++ b/arch/mips/kernel/scall64-n32.S
4016@@ -53,7 +53,7 @@ NESTED(handle_sysn32, PT_SIZE, sp)
4017
4018 sd a3, PT_R26(sp) # save a3 for syscall restarting
4019
4020- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
4021+ li t1, _TIF_SYSCALL_WORK
4022 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
4023 and t0, t1, t0
4024 bnez t0, n32_syscall_trace_entry
4025diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
4026index 53c2d72..3734584 100644
4027--- a/arch/mips/kernel/scall64-o32.S
4028+++ b/arch/mips/kernel/scall64-o32.S
4029@@ -81,7 +81,7 @@ NESTED(handle_sys, PT_SIZE, sp)
4030 PTR 4b, bad_stack
4031 .previous
4032
4033- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
4034+ li t1, _TIF_SYSCALL_WORK
4035 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
4036 and t0, t1, t0
4037 bnez t0, trace_a_syscall
4038diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
4039index ddcec1e..c7f983e 100644
4040--- a/arch/mips/mm/fault.c
4041+++ b/arch/mips/mm/fault.c
4042@@ -27,6 +27,23 @@
4043 #include <asm/highmem.h> /* For VMALLOC_END */
4044 #include <linux/kdebug.h>
4045
4046+#ifdef CONFIG_PAX_PAGEEXEC
4047+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4048+{
4049+ unsigned long i;
4050+
4051+ printk(KERN_ERR "PAX: bytes at PC: ");
4052+ for (i = 0; i < 5; i++) {
4053+ unsigned int c;
4054+ if (get_user(c, (unsigned int *)pc+i))
4055+ printk(KERN_CONT "???????? ");
4056+ else
4057+ printk(KERN_CONT "%08x ", c);
4058+ }
4059+ printk("\n");
4060+}
4061+#endif
4062+
4063 /*
4064 * This routine handles page faults. It determines the address,
4065 * and the problem, and then passes it off to one of the appropriate
4066diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
4067index 302d779..6459dc0 100644
4068--- a/arch/mips/mm/mmap.c
4069+++ b/arch/mips/mm/mmap.c
4070@@ -71,6 +71,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
4071 struct vm_area_struct *vma;
4072 unsigned long addr = addr0;
4073 int do_color_align;
4074+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4075
4076 if (unlikely(len > TASK_SIZE))
4077 return -ENOMEM;
4078@@ -95,6 +96,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
4079 do_color_align = 1;
4080
4081 /* requesting a specific address */
4082+
4083+#ifdef CONFIG_PAX_RANDMMAP
4084+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
4085+#endif
4086+
4087 if (addr) {
4088 if (do_color_align)
4089 addr = COLOUR_ALIGN(addr, pgoff);
4090@@ -102,8 +108,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
4091 addr = PAGE_ALIGN(addr);
4092
4093 vma = find_vma(mm, addr);
4094- if (TASK_SIZE - len >= addr &&
4095- (!vma || addr + len <= vma->vm_start))
4096+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len, offset))
4097 return addr;
4098 }
4099
4100@@ -118,7 +123,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
4101 /* At this point: (!vma || addr < vma->vm_end). */
4102 if (TASK_SIZE - len < addr)
4103 return -ENOMEM;
4104- if (!vma || addr + len <= vma->vm_start)
4105+ if (check_heap_stack_gap(vmm, addr, len, offset))
4106 return addr;
4107 addr = vma->vm_end;
4108 if (do_color_align)
4109@@ -145,7 +150,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
4110 /* make sure it can fit in the remaining address space */
4111 if (likely(addr > len)) {
4112 vma = find_vma(mm, addr - len);
4113- if (!vma || addr <= vma->vm_start) {
4114+ if (check_heap_stack_gap(vmm, addr - len, len, offset))
4115 /* cache the address as a hint for next time */
4116 return mm->free_area_cache = addr - len;
4117 }
4118@@ -155,17 +160,17 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
4119 goto bottomup;
4120
4121 addr = mm->mmap_base - len;
4122- if (do_color_align)
4123- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4124
4125 do {
4126+ if (do_color_align)
4127+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4128 /*
4129 * Lookup failure means no vma is above this address,
4130 * else if new region fits below vma->vm_start,
4131 * return with success:
4132 */
4133 vma = find_vma(mm, addr);
4134- if (likely(!vma || addr + len <= vma->vm_start)) {
4135+ if (check_heap_stack_gap(vmm, addr, len, offset)) {
4136 /* cache the address as a hint for next time */
4137 return mm->free_area_cache = addr;
4138 }
4139@@ -175,10 +180,8 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
4140 mm->cached_hole_size = vma->vm_start - addr;
4141
4142 /* try just below the current vma->vm_start */
4143- addr = vma->vm_start - len;
4144- if (do_color_align)
4145- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4146- } while (likely(len < vma->vm_start));
4147+ addr = skip_heap_stack_gap(vma, len, offset);
4148+ } while (!IS_ERR_VALUE(addr));
4149
4150 bottomup:
4151 /*
4152@@ -223,6 +226,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4153 {
4154 unsigned long random_factor = 0UL;
4155
4156+#ifdef CONFIG_PAX_RANDMMAP
4157+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
4158+#endif
4159+
4160 if (current->flags & PF_RANDOMIZE) {
4161 random_factor = get_random_int();
4162 random_factor = random_factor << PAGE_SHIFT;
4163@@ -234,38 +241,23 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4164
4165 if (mmap_is_legacy()) {
4166 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4167+
4168+#ifdef CONFIG_PAX_RANDMMAP
4169+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4170+ mm->mmap_base += mm->delta_mmap;
4171+#endif
4172+
4173 mm->get_unmapped_area = arch_get_unmapped_area;
4174 mm->unmap_area = arch_unmap_area;
4175 } else {
4176 mm->mmap_base = mmap_base(random_factor);
4177+
4178+#ifdef CONFIG_PAX_RANDMMAP
4179+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4180+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4181+#endif
4182+
4183 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4184 mm->unmap_area = arch_unmap_area_topdown;
4185 }
4186 }
4187-
4188-static inline unsigned long brk_rnd(void)
4189-{
4190- unsigned long rnd = get_random_int();
4191-
4192- rnd = rnd << PAGE_SHIFT;
4193- /* 8MB for 32bit, 256MB for 64bit */
4194- if (TASK_IS_32BIT_ADDR)
4195- rnd = rnd & 0x7ffffful;
4196- else
4197- rnd = rnd & 0xffffffful;
4198-
4199- return rnd;
4200-}
4201-
4202-unsigned long arch_randomize_brk(struct mm_struct *mm)
4203-{
4204- unsigned long base = mm->brk;
4205- unsigned long ret;
4206-
4207- ret = PAGE_ALIGN(base + brk_rnd());
4208-
4209- if (ret < mm->brk)
4210- return mm->brk;
4211-
4212- return ret;
4213-}
4214diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
4215index 967d144..db12197 100644
4216--- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
4217+++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
4218@@ -11,12 +11,14 @@
4219 #ifndef _ASM_PROC_CACHE_H
4220 #define _ASM_PROC_CACHE_H
4221
4222+#include <linux/const.h>
4223+
4224 /* L1 cache */
4225
4226 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
4227 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
4228-#define L1_CACHE_BYTES 16 /* bytes per entry */
4229 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
4230+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
4231 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
4232
4233 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
4234diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
4235index bcb5df2..84fabd2 100644
4236--- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
4237+++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
4238@@ -16,13 +16,15 @@
4239 #ifndef _ASM_PROC_CACHE_H
4240 #define _ASM_PROC_CACHE_H
4241
4242+#include <linux/const.h>
4243+
4244 /*
4245 * L1 cache
4246 */
4247 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
4248 #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
4249-#define L1_CACHE_BYTES 32 /* bytes per entry */
4250 #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
4251+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
4252 #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
4253
4254 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
4255diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
4256index 4ce7a01..449202a 100644
4257--- a/arch/openrisc/include/asm/cache.h
4258+++ b/arch/openrisc/include/asm/cache.h
4259@@ -19,11 +19,13 @@
4260 #ifndef __ASM_OPENRISC_CACHE_H
4261 #define __ASM_OPENRISC_CACHE_H
4262
4263+#include <linux/const.h>
4264+
4265 /* FIXME: How can we replace these with values from the CPU...
4266 * they shouldn't be hard-coded!
4267 */
4268
4269-#define L1_CACHE_BYTES 16
4270 #define L1_CACHE_SHIFT 4
4271+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4272
4273 #endif /* __ASM_OPENRISC_CACHE_H */
4274diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
4275index af9cf30..2aae9b2 100644
4276--- a/arch/parisc/include/asm/atomic.h
4277+++ b/arch/parisc/include/asm/atomic.h
4278@@ -229,6 +229,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
4279
4280 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
4281
4282+#define atomic64_read_unchecked(v) atomic64_read(v)
4283+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4284+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4285+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4286+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4287+#define atomic64_inc_unchecked(v) atomic64_inc(v)
4288+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4289+#define atomic64_dec_unchecked(v) atomic64_dec(v)
4290+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4291+
4292 #endif /* !CONFIG_64BIT */
4293
4294
4295diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
4296index 47f11c7..3420df2 100644
4297--- a/arch/parisc/include/asm/cache.h
4298+++ b/arch/parisc/include/asm/cache.h
4299@@ -5,6 +5,7 @@
4300 #ifndef __ARCH_PARISC_CACHE_H
4301 #define __ARCH_PARISC_CACHE_H
4302
4303+#include <linux/const.h>
4304
4305 /*
4306 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
4307@@ -15,13 +16,13 @@
4308 * just ruin performance.
4309 */
4310 #ifdef CONFIG_PA20
4311-#define L1_CACHE_BYTES 64
4312 #define L1_CACHE_SHIFT 6
4313 #else
4314-#define L1_CACHE_BYTES 32
4315 #define L1_CACHE_SHIFT 5
4316 #endif
4317
4318+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4319+
4320 #ifndef __ASSEMBLY__
4321
4322 #define SMP_CACHE_BYTES L1_CACHE_BYTES
4323diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
4324index 19f6cb1..6c78cf2 100644
4325--- a/arch/parisc/include/asm/elf.h
4326+++ b/arch/parisc/include/asm/elf.h
4327@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
4328
4329 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
4330
4331+#ifdef CONFIG_PAX_ASLR
4332+#define PAX_ELF_ET_DYN_BASE 0x10000UL
4333+
4334+#define PAX_DELTA_MMAP_LEN 16
4335+#define PAX_DELTA_STACK_LEN 16
4336+#endif
4337+
4338 /* This yields a mask that user programs can use to figure out what
4339 instruction set this CPU supports. This could be done in user space,
4340 but it's not easy, and we've already done it here. */
4341diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
4342index fc987a1..6e068ef 100644
4343--- a/arch/parisc/include/asm/pgalloc.h
4344+++ b/arch/parisc/include/asm/pgalloc.h
4345@@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
4346 (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
4347 }
4348
4349+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
4350+{
4351+ pgd_populate(mm, pgd, pmd);
4352+}
4353+
4354 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
4355 {
4356 pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
4357@@ -93,6 +98,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
4358 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
4359 #define pmd_free(mm, x) do { } while (0)
4360 #define pgd_populate(mm, pmd, pte) BUG()
4361+#define pgd_populate_kernel(mm, pmd, pte) BUG()
4362
4363 #endif
4364
4365diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
4366index ee99f23..802b0a1 100644
4367--- a/arch/parisc/include/asm/pgtable.h
4368+++ b/arch/parisc/include/asm/pgtable.h
4369@@ -212,6 +212,17 @@ struct vm_area_struct;
4370 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
4371 #define PAGE_COPY PAGE_EXECREAD
4372 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
4373+
4374+#ifdef CONFIG_PAX_PAGEEXEC
4375+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
4376+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
4377+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
4378+#else
4379+# define PAGE_SHARED_NOEXEC PAGE_SHARED
4380+# define PAGE_COPY_NOEXEC PAGE_COPY
4381+# define PAGE_READONLY_NOEXEC PAGE_READONLY
4382+#endif
4383+
4384 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
4385 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
4386 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
4387diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
4388index 4ba2c93..f5e3974 100644
4389--- a/arch/parisc/include/asm/uaccess.h
4390+++ b/arch/parisc/include/asm/uaccess.h
4391@@ -251,10 +251,10 @@ static inline unsigned long __must_check copy_from_user(void *to,
4392 const void __user *from,
4393 unsigned long n)
4394 {
4395- int sz = __compiletime_object_size(to);
4396+ size_t sz = __compiletime_object_size(to);
4397 int ret = -EFAULT;
4398
4399- if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
4400+ if (likely(sz == (size_t)-1 || !__builtin_constant_p(n) || sz >= n))
4401 ret = __copy_from_user(to, from, n);
4402 else
4403 copy_from_user_overflow();
4404diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
4405index 5e34ccf..672bc9c 100644
4406--- a/arch/parisc/kernel/module.c
4407+++ b/arch/parisc/kernel/module.c
4408@@ -98,16 +98,38 @@
4409
4410 /* three functions to determine where in the module core
4411 * or init pieces the location is */
4412+static inline int in_init_rx(struct module *me, void *loc)
4413+{
4414+ return (loc >= me->module_init_rx &&
4415+ loc < (me->module_init_rx + me->init_size_rx));
4416+}
4417+
4418+static inline int in_init_rw(struct module *me, void *loc)
4419+{
4420+ return (loc >= me->module_init_rw &&
4421+ loc < (me->module_init_rw + me->init_size_rw));
4422+}
4423+
4424 static inline int in_init(struct module *me, void *loc)
4425 {
4426- return (loc >= me->module_init &&
4427- loc <= (me->module_init + me->init_size));
4428+ return in_init_rx(me, loc) || in_init_rw(me, loc);
4429+}
4430+
4431+static inline int in_core_rx(struct module *me, void *loc)
4432+{
4433+ return (loc >= me->module_core_rx &&
4434+ loc < (me->module_core_rx + me->core_size_rx));
4435+}
4436+
4437+static inline int in_core_rw(struct module *me, void *loc)
4438+{
4439+ return (loc >= me->module_core_rw &&
4440+ loc < (me->module_core_rw + me->core_size_rw));
4441 }
4442
4443 static inline int in_core(struct module *me, void *loc)
4444 {
4445- return (loc >= me->module_core &&
4446- loc <= (me->module_core + me->core_size));
4447+ return in_core_rx(me, loc) || in_core_rw(me, loc);
4448 }
4449
4450 static inline int in_local(struct module *me, void *loc)
4451@@ -373,13 +395,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
4452 }
4453
4454 /* align things a bit */
4455- me->core_size = ALIGN(me->core_size, 16);
4456- me->arch.got_offset = me->core_size;
4457- me->core_size += gots * sizeof(struct got_entry);
4458+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
4459+ me->arch.got_offset = me->core_size_rw;
4460+ me->core_size_rw += gots * sizeof(struct got_entry);
4461
4462- me->core_size = ALIGN(me->core_size, 16);
4463- me->arch.fdesc_offset = me->core_size;
4464- me->core_size += fdescs * sizeof(Elf_Fdesc);
4465+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
4466+ me->arch.fdesc_offset = me->core_size_rw;
4467+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
4468
4469 me->arch.got_max = gots;
4470 me->arch.fdesc_max = fdescs;
4471@@ -397,7 +419,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
4472
4473 BUG_ON(value == 0);
4474
4475- got = me->module_core + me->arch.got_offset;
4476+ got = me->module_core_rw + me->arch.got_offset;
4477 for (i = 0; got[i].addr; i++)
4478 if (got[i].addr == value)
4479 goto out;
4480@@ -415,7 +437,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
4481 #ifdef CONFIG_64BIT
4482 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
4483 {
4484- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
4485+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
4486
4487 if (!value) {
4488 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
4489@@ -433,7 +455,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
4490
4491 /* Create new one */
4492 fdesc->addr = value;
4493- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
4494+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
4495 return (Elf_Addr)fdesc;
4496 }
4497 #endif /* CONFIG_64BIT */
4498@@ -845,7 +867,7 @@ register_unwind_table(struct module *me,
4499
4500 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
4501 end = table + sechdrs[me->arch.unwind_section].sh_size;
4502- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
4503+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
4504
4505 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
4506 me->arch.unwind_section, table, end, gp);
4507diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
4508index f76c108..92bad82 100644
4509--- a/arch/parisc/kernel/sys_parisc.c
4510+++ b/arch/parisc/kernel/sys_parisc.c
4511@@ -33,9 +33,11 @@
4512 #include <linux/utsname.h>
4513 #include <linux/personality.h>
4514
4515-static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
4516+static unsigned long get_unshared_area(struct file *filp, unsigned long addr, unsigned long len,
4517+ unsigned long flags)
4518 {
4519 struct vm_area_struct *vma;
4520+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
4521
4522 addr = PAGE_ALIGN(addr);
4523
4524@@ -43,7 +45,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
4525 /* At this point: (!vma || addr < vma->vm_end). */
4526 if (TASK_SIZE - len < addr)
4527 return -ENOMEM;
4528- if (!vma || addr + len <= vma->vm_start)
4529+ if (check_heap_stack_gap(vma, addr, len, offset))
4530 return addr;
4531 addr = vma->vm_end;
4532 }
4533@@ -67,11 +69,12 @@ static int get_offset(struct address_space *mapping)
4534 return offset & 0x3FF000;
4535 }
4536
4537-static unsigned long get_shared_area(struct address_space *mapping,
4538- unsigned long addr, unsigned long len, unsigned long pgoff)
4539+static unsigned long get_shared_area(struct file *filp, struct address_space *mapping,
4540+ unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
4541 {
4542 struct vm_area_struct *vma;
4543 int offset = mapping ? get_offset(mapping) : 0;
4544+ unsigned long rand_offset = gr_rand_threadstack_offset(current->mm, filp, flags);
4545
4546 offset = (offset + (pgoff << PAGE_SHIFT)) & 0x3FF000;
4547
4548@@ -81,7 +84,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
4549 /* At this point: (!vma || addr < vma->vm_end). */
4550 if (TASK_SIZE - len < addr)
4551 return -ENOMEM;
4552- if (!vma || addr + len <= vma->vm_start)
4553+ if (check_heap_stack_gap(vma, addr, len, rand_offset))
4554 return addr;
4555 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
4556 if (addr < vma->vm_end) /* handle wraparound */
4557@@ -100,14 +103,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
4558 if (flags & MAP_FIXED)
4559 return addr;
4560 if (!addr)
4561- addr = TASK_UNMAPPED_BASE;
4562+ addr = current->mm->mmap_base;
4563
4564 if (filp) {
4565- addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
4566+ addr = get_shared_area(filp, filp->f_mapping, addr, len, pgoff, flags);
4567 } else if(flags & MAP_SHARED) {
4568- addr = get_shared_area(NULL, addr, len, pgoff);
4569+ addr = get_shared_area(filp, NULL, addr, len, pgoff, flags);
4570 } else {
4571- addr = get_unshared_area(addr, len);
4572+ addr = get_unshared_area(filp, addr, len, flags);
4573 }
4574 return addr;
4575 }
4576diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
4577index 45ba99f..8e22c33 100644
4578--- a/arch/parisc/kernel/traps.c
4579+++ b/arch/parisc/kernel/traps.c
4580@@ -732,9 +732,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
4581
4582 down_read(&current->mm->mmap_sem);
4583 vma = find_vma(current->mm,regs->iaoq[0]);
4584- if (vma && (regs->iaoq[0] >= vma->vm_start)
4585- && (vma->vm_flags & VM_EXEC)) {
4586-
4587+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
4588 fault_address = regs->iaoq[0];
4589 fault_space = regs->iasq[0];
4590
4591diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
4592index 18162ce..94de376 100644
4593--- a/arch/parisc/mm/fault.c
4594+++ b/arch/parisc/mm/fault.c
4595@@ -15,6 +15,7 @@
4596 #include <linux/sched.h>
4597 #include <linux/interrupt.h>
4598 #include <linux/module.h>
4599+#include <linux/unistd.h>
4600
4601 #include <asm/uaccess.h>
4602 #include <asm/traps.h>
4603@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
4604 static unsigned long
4605 parisc_acctyp(unsigned long code, unsigned int inst)
4606 {
4607- if (code == 6 || code == 16)
4608+ if (code == 6 || code == 7 || code == 16)
4609 return VM_EXEC;
4610
4611 switch (inst & 0xf0000000) {
4612@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
4613 }
4614 #endif
4615
4616+#ifdef CONFIG_PAX_PAGEEXEC
4617+/*
4618+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
4619+ *
4620+ * returns 1 when task should be killed
4621+ * 2 when rt_sigreturn trampoline was detected
4622+ * 3 when unpatched PLT trampoline was detected
4623+ */
4624+static int pax_handle_fetch_fault(struct pt_regs *regs)
4625+{
4626+
4627+#ifdef CONFIG_PAX_EMUPLT
4628+ int err;
4629+
4630+ do { /* PaX: unpatched PLT emulation */
4631+ unsigned int bl, depwi;
4632+
4633+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
4634+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
4635+
4636+ if (err)
4637+ break;
4638+
4639+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
4640+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
4641+
4642+ err = get_user(ldw, (unsigned int *)addr);
4643+ err |= get_user(bv, (unsigned int *)(addr+4));
4644+ err |= get_user(ldw2, (unsigned int *)(addr+8));
4645+
4646+ if (err)
4647+ break;
4648+
4649+ if (ldw == 0x0E801096U &&
4650+ bv == 0xEAC0C000U &&
4651+ ldw2 == 0x0E881095U)
4652+ {
4653+ unsigned int resolver, map;
4654+
4655+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
4656+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
4657+ if (err)
4658+ break;
4659+
4660+ regs->gr[20] = instruction_pointer(regs)+8;
4661+ regs->gr[21] = map;
4662+ regs->gr[22] = resolver;
4663+ regs->iaoq[0] = resolver | 3UL;
4664+ regs->iaoq[1] = regs->iaoq[0] + 4;
4665+ return 3;
4666+ }
4667+ }
4668+ } while (0);
4669+#endif
4670+
4671+#ifdef CONFIG_PAX_EMUTRAMP
4672+
4673+#ifndef CONFIG_PAX_EMUSIGRT
4674+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
4675+ return 1;
4676+#endif
4677+
4678+ do { /* PaX: rt_sigreturn emulation */
4679+ unsigned int ldi1, ldi2, bel, nop;
4680+
4681+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
4682+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
4683+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
4684+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
4685+
4686+ if (err)
4687+ break;
4688+
4689+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
4690+ ldi2 == 0x3414015AU &&
4691+ bel == 0xE4008200U &&
4692+ nop == 0x08000240U)
4693+ {
4694+ regs->gr[25] = (ldi1 & 2) >> 1;
4695+ regs->gr[20] = __NR_rt_sigreturn;
4696+ regs->gr[31] = regs->iaoq[1] + 16;
4697+ regs->sr[0] = regs->iasq[1];
4698+ regs->iaoq[0] = 0x100UL;
4699+ regs->iaoq[1] = regs->iaoq[0] + 4;
4700+ regs->iasq[0] = regs->sr[2];
4701+ regs->iasq[1] = regs->sr[2];
4702+ return 2;
4703+ }
4704+ } while (0);
4705+#endif
4706+
4707+ return 1;
4708+}
4709+
4710+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4711+{
4712+ unsigned long i;
4713+
4714+ printk(KERN_ERR "PAX: bytes at PC: ");
4715+ for (i = 0; i < 5; i++) {
4716+ unsigned int c;
4717+ if (get_user(c, (unsigned int *)pc+i))
4718+ printk(KERN_CONT "???????? ");
4719+ else
4720+ printk(KERN_CONT "%08x ", c);
4721+ }
4722+ printk("\n");
4723+}
4724+#endif
4725+
4726 int fixup_exception(struct pt_regs *regs)
4727 {
4728 const struct exception_table_entry *fix;
4729@@ -192,8 +303,33 @@ good_area:
4730
4731 acc_type = parisc_acctyp(code,regs->iir);
4732
4733- if ((vma->vm_flags & acc_type) != acc_type)
4734+ if ((vma->vm_flags & acc_type) != acc_type) {
4735+
4736+#ifdef CONFIG_PAX_PAGEEXEC
4737+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
4738+ (address & ~3UL) == instruction_pointer(regs))
4739+ {
4740+ up_read(&mm->mmap_sem);
4741+ switch (pax_handle_fetch_fault(regs)) {
4742+
4743+#ifdef CONFIG_PAX_EMUPLT
4744+ case 3:
4745+ return;
4746+#endif
4747+
4748+#ifdef CONFIG_PAX_EMUTRAMP
4749+ case 2:
4750+ return;
4751+#endif
4752+
4753+ }
4754+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
4755+ do_group_exit(SIGKILL);
4756+ }
4757+#endif
4758+
4759 goto bad_area;
4760+ }
4761
4762 /*
4763 * If for any reason at all we couldn't handle the fault, make
4764diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
4765index e3b1d41..8e81edf 100644
4766--- a/arch/powerpc/include/asm/atomic.h
4767+++ b/arch/powerpc/include/asm/atomic.h
4768@@ -523,6 +523,16 @@ static __inline__ long atomic64_inc_not_zero(atomic64_t *v)
4769 return t1;
4770 }
4771
4772+#define atomic64_read_unchecked(v) atomic64_read(v)
4773+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4774+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4775+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4776+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4777+#define atomic64_inc_unchecked(v) atomic64_inc(v)
4778+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4779+#define atomic64_dec_unchecked(v) atomic64_dec(v)
4780+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4781+
4782 #endif /* __powerpc64__ */
4783
4784 #endif /* __KERNEL__ */
4785diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
4786index 9e495c9..b6878e5 100644
4787--- a/arch/powerpc/include/asm/cache.h
4788+++ b/arch/powerpc/include/asm/cache.h
4789@@ -3,6 +3,7 @@
4790
4791 #ifdef __KERNEL__
4792
4793+#include <linux/const.h>
4794
4795 /* bytes per L1 cache line */
4796 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
4797@@ -22,7 +23,7 @@
4798 #define L1_CACHE_SHIFT 7
4799 #endif
4800
4801-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4802+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4803
4804 #define SMP_CACHE_BYTES L1_CACHE_BYTES
4805
4806diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
4807index 6abf0a1..459d0f1 100644
4808--- a/arch/powerpc/include/asm/elf.h
4809+++ b/arch/powerpc/include/asm/elf.h
4810@@ -28,8 +28,19 @@
4811 the loader. We need to make sure that it is out of the way of the program
4812 that it will "exec", and that there is sufficient room for the brk. */
4813
4814-extern unsigned long randomize_et_dyn(unsigned long base);
4815-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
4816+#define ELF_ET_DYN_BASE (0x20000000)
4817+
4818+#ifdef CONFIG_PAX_ASLR
4819+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
4820+
4821+#ifdef __powerpc64__
4822+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
4823+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
4824+#else
4825+#define PAX_DELTA_MMAP_LEN 15
4826+#define PAX_DELTA_STACK_LEN 15
4827+#endif
4828+#endif
4829
4830 /*
4831 * Our registers are always unsigned longs, whether we're a 32 bit
4832@@ -124,10 +135,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
4833 (0x7ff >> (PAGE_SHIFT - 12)) : \
4834 (0x3ffff >> (PAGE_SHIFT - 12)))
4835
4836-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
4837-#define arch_randomize_brk arch_randomize_brk
4838-
4839-
4840 #ifdef CONFIG_SPU_BASE
4841 /* Notes used in ET_CORE. Note name is "SPU/<fd>/<filename>". */
4842 #define NT_SPU 1
4843diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h
4844index 8196e9c..d83a9f3 100644
4845--- a/arch/powerpc/include/asm/exec.h
4846+++ b/arch/powerpc/include/asm/exec.h
4847@@ -4,6 +4,6 @@
4848 #ifndef _ASM_POWERPC_EXEC_H
4849 #define _ASM_POWERPC_EXEC_H
4850
4851-extern unsigned long arch_align_stack(unsigned long sp);
4852+#define arch_align_stack(x) ((x) & ~0xfUL)
4853
4854 #endif /* _ASM_POWERPC_EXEC_H */
4855diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
4856index 5acabbd..7ea14fa 100644
4857--- a/arch/powerpc/include/asm/kmap_types.h
4858+++ b/arch/powerpc/include/asm/kmap_types.h
4859@@ -10,7 +10,7 @@
4860 * 2 of the License, or (at your option) any later version.
4861 */
4862
4863-#define KM_TYPE_NR 16
4864+#define KM_TYPE_NR 17
4865
4866 #endif /* __KERNEL__ */
4867 #endif /* _ASM_POWERPC_KMAP_TYPES_H */
4868diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
4869index 8565c25..2865190 100644
4870--- a/arch/powerpc/include/asm/mman.h
4871+++ b/arch/powerpc/include/asm/mman.h
4872@@ -24,7 +24,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
4873 }
4874 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
4875
4876-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
4877+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
4878 {
4879 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
4880 }
4881diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
4882index f072e97..b436dee 100644
4883--- a/arch/powerpc/include/asm/page.h
4884+++ b/arch/powerpc/include/asm/page.h
4885@@ -220,8 +220,9 @@ extern long long virt_phys_offset;
4886 * and needs to be executable. This means the whole heap ends
4887 * up being executable.
4888 */
4889-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
4890- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
4891+#define VM_DATA_DEFAULT_FLAGS32 \
4892+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
4893+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
4894
4895 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
4896 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
4897@@ -249,6 +250,9 @@ extern long long virt_phys_offset;
4898 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
4899 #endif
4900
4901+#define ktla_ktva(addr) (addr)
4902+#define ktva_ktla(addr) (addr)
4903+
4904 /*
4905 * Use the top bit of the higher-level page table entries to indicate whether
4906 * the entries we point to contain hugepages. This works because we know that
4907diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
4908index cd915d6..c10cee8 100644
4909--- a/arch/powerpc/include/asm/page_64.h
4910+++ b/arch/powerpc/include/asm/page_64.h
4911@@ -154,15 +154,18 @@ do { \
4912 * stack by default, so in the absence of a PT_GNU_STACK program header
4913 * we turn execute permission off.
4914 */
4915-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
4916- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
4917+#define VM_STACK_DEFAULT_FLAGS32 \
4918+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
4919+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
4920
4921 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
4922 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
4923
4924+#ifndef CONFIG_PAX_PAGEEXEC
4925 #define VM_STACK_DEFAULT_FLAGS \
4926 (is_32bit_task() ? \
4927 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
4928+#endif
4929
4930 #include <asm-generic/getorder.h>
4931
4932diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
4933index 292725c..f87ae14 100644
4934--- a/arch/powerpc/include/asm/pgalloc-64.h
4935+++ b/arch/powerpc/include/asm/pgalloc-64.h
4936@@ -50,6 +50,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
4937 #ifndef CONFIG_PPC_64K_PAGES
4938
4939 #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
4940+#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD))
4941
4942 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
4943 {
4944@@ -67,6 +68,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
4945 pud_set(pud, (unsigned long)pmd);
4946 }
4947
4948+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
4949+{
4950+ pud_populate(mm, pud, pmd);
4951+}
4952+
4953 #define pmd_populate(mm, pmd, pte_page) \
4954 pmd_populate_kernel(mm, pmd, page_address(pte_page))
4955 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
4956@@ -76,6 +82,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
4957 #else /* CONFIG_PPC_64K_PAGES */
4958
4959 #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
4960+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
4961
4962 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
4963 pte_t *pte)
4964diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
4965index a9cbd3b..3b67efa 100644
4966--- a/arch/powerpc/include/asm/pgtable.h
4967+++ b/arch/powerpc/include/asm/pgtable.h
4968@@ -2,6 +2,7 @@
4969 #define _ASM_POWERPC_PGTABLE_H
4970 #ifdef __KERNEL__
4971
4972+#include <linux/const.h>
4973 #ifndef __ASSEMBLY__
4974 #include <asm/processor.h> /* For TASK_SIZE */
4975 #include <asm/mmu.h>
4976diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
4977index 4aad413..85d86bf 100644
4978--- a/arch/powerpc/include/asm/pte-hash32.h
4979+++ b/arch/powerpc/include/asm/pte-hash32.h
4980@@ -21,6 +21,7 @@
4981 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
4982 #define _PAGE_USER 0x004 /* usermode access allowed */
4983 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
4984+#define _PAGE_EXEC _PAGE_GUARDED
4985 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
4986 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
4987 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
4988diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
4989index d24c141..b60696e 100644
4990--- a/arch/powerpc/include/asm/reg.h
4991+++ b/arch/powerpc/include/asm/reg.h
4992@@ -215,6 +215,7 @@
4993 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
4994 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
4995 #define DSISR_NOHPTE 0x40000000 /* no translation found */
4996+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
4997 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
4998 #define DSISR_ISSTORE 0x02000000 /* access was a store */
4999 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
5000diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
5001index 406b7b9..af63426 100644
5002--- a/arch/powerpc/include/asm/thread_info.h
5003+++ b/arch/powerpc/include/asm/thread_info.h
5004@@ -97,7 +97,6 @@ static inline struct thread_info *current_thread_info(void)
5005 #define TIF_PERFMON_CTXSW 6 /* perfmon needs ctxsw calls */
5006 #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
5007 #define TIF_SINGLESTEP 8 /* singlestepping active */
5008-#define TIF_MEMDIE 9 /* is terminating due to OOM killer */
5009 #define TIF_SECCOMP 10 /* secure computing */
5010 #define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */
5011 #define TIF_NOERROR 12 /* Force successful syscall return */
5012@@ -106,6 +105,9 @@ static inline struct thread_info *current_thread_info(void)
5013 #define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */
5014 #define TIF_EMULATE_STACK_STORE 16 /* Is an instruction emulation
5015 for stack store? */
5016+#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
5017+/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
5018+#define TIF_GRSEC_SETXID 9 /* update credentials on syscall entry/exit */
5019
5020 /* as above, but as bit values */
5021 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
5022@@ -124,8 +126,10 @@ static inline struct thread_info *current_thread_info(void)
5023 #define _TIF_UPROBE (1<<TIF_UPROBE)
5024 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
5025 #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
5026+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
5027 #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
5028- _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT)
5029+ _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
5030+ _TIF_GRSEC_SETXID)
5031
5032 #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
5033 _TIF_NOTIFY_RESUME | _TIF_UPROBE)
5034diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
5035index 4db4959..335e00c 100644
5036--- a/arch/powerpc/include/asm/uaccess.h
5037+++ b/arch/powerpc/include/asm/uaccess.h
5038@@ -13,6 +13,8 @@
5039 #define VERIFY_READ 0
5040 #define VERIFY_WRITE 1
5041
5042+extern void check_object_size(const void *ptr, unsigned long n, bool to);
5043+
5044 /*
5045 * The fs value determines whether argument validity checking should be
5046 * performed or not. If get_fs() == USER_DS, checking is performed, with
5047@@ -318,52 +320,6 @@ do { \
5048 extern unsigned long __copy_tofrom_user(void __user *to,
5049 const void __user *from, unsigned long size);
5050
5051-#ifndef __powerpc64__
5052-
5053-static inline unsigned long copy_from_user(void *to,
5054- const void __user *from, unsigned long n)
5055-{
5056- unsigned long over;
5057-
5058- if (access_ok(VERIFY_READ, from, n))
5059- return __copy_tofrom_user((__force void __user *)to, from, n);
5060- if ((unsigned long)from < TASK_SIZE) {
5061- over = (unsigned long)from + n - TASK_SIZE;
5062- return __copy_tofrom_user((__force void __user *)to, from,
5063- n - over) + over;
5064- }
5065- return n;
5066-}
5067-
5068-static inline unsigned long copy_to_user(void __user *to,
5069- const void *from, unsigned long n)
5070-{
5071- unsigned long over;
5072-
5073- if (access_ok(VERIFY_WRITE, to, n))
5074- return __copy_tofrom_user(to, (__force void __user *)from, n);
5075- if ((unsigned long)to < TASK_SIZE) {
5076- over = (unsigned long)to + n - TASK_SIZE;
5077- return __copy_tofrom_user(to, (__force void __user *)from,
5078- n - over) + over;
5079- }
5080- return n;
5081-}
5082-
5083-#else /* __powerpc64__ */
5084-
5085-#define __copy_in_user(to, from, size) \
5086- __copy_tofrom_user((to), (from), (size))
5087-
5088-extern unsigned long copy_from_user(void *to, const void __user *from,
5089- unsigned long n);
5090-extern unsigned long copy_to_user(void __user *to, const void *from,
5091- unsigned long n);
5092-extern unsigned long copy_in_user(void __user *to, const void __user *from,
5093- unsigned long n);
5094-
5095-#endif /* __powerpc64__ */
5096-
5097 static inline unsigned long __copy_from_user_inatomic(void *to,
5098 const void __user *from, unsigned long n)
5099 {
5100@@ -387,6 +343,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
5101 if (ret == 0)
5102 return 0;
5103 }
5104+
5105+ if (!__builtin_constant_p(n))
5106+ check_object_size(to, n, false);
5107+
5108 return __copy_tofrom_user((__force void __user *)to, from, n);
5109 }
5110
5111@@ -413,6 +373,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
5112 if (ret == 0)
5113 return 0;
5114 }
5115+
5116+ if (!__builtin_constant_p(n))
5117+ check_object_size(from, n, true);
5118+
5119 return __copy_tofrom_user(to, (__force const void __user *)from, n);
5120 }
5121
5122@@ -430,6 +394,92 @@ static inline unsigned long __copy_to_user(void __user *to,
5123 return __copy_to_user_inatomic(to, from, size);
5124 }
5125
5126+#ifndef __powerpc64__
5127+
5128+static inline unsigned long __must_check copy_from_user(void *to,
5129+ const void __user *from, unsigned long n)
5130+{
5131+ unsigned long over;
5132+
5133+ if ((long)n < 0)
5134+ return n;
5135+
5136+ if (access_ok(VERIFY_READ, from, n)) {
5137+ if (!__builtin_constant_p(n))
5138+ check_object_size(to, n, false);
5139+ return __copy_tofrom_user((__force void __user *)to, from, n);
5140+ }
5141+ if ((unsigned long)from < TASK_SIZE) {
5142+ over = (unsigned long)from + n - TASK_SIZE;
5143+ if (!__builtin_constant_p(n - over))
5144+ check_object_size(to, n - over, false);
5145+ return __copy_tofrom_user((__force void __user *)to, from,
5146+ n - over) + over;
5147+ }
5148+ return n;
5149+}
5150+
5151+static inline unsigned long __must_check copy_to_user(void __user *to,
5152+ const void *from, unsigned long n)
5153+{
5154+ unsigned long over;
5155+
5156+ if ((long)n < 0)
5157+ return n;
5158+
5159+ if (access_ok(VERIFY_WRITE, to, n)) {
5160+ if (!__builtin_constant_p(n))
5161+ check_object_size(from, n, true);
5162+ return __copy_tofrom_user(to, (__force void __user *)from, n);
5163+ }
5164+ if ((unsigned long)to < TASK_SIZE) {
5165+ over = (unsigned long)to + n - TASK_SIZE;
5166+ if (!__builtin_constant_p(n))
5167+ check_object_size(from, n - over, true);
5168+ return __copy_tofrom_user(to, (__force void __user *)from,
5169+ n - over) + over;
5170+ }
5171+ return n;
5172+}
5173+
5174+#else /* __powerpc64__ */
5175+
5176+#define __copy_in_user(to, from, size) \
5177+ __copy_tofrom_user((to), (from), (size))
5178+
5179+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
5180+{
5181+ if ((long)n < 0 || n > INT_MAX)
5182+ return n;
5183+
5184+ if (!__builtin_constant_p(n))
5185+ check_object_size(to, n, false);
5186+
5187+ if (likely(access_ok(VERIFY_READ, from, n)))
5188+ n = __copy_from_user(to, from, n);
5189+ else
5190+ memset(to, 0, n);
5191+ return n;
5192+}
5193+
5194+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
5195+{
5196+ if ((long)n < 0 || n > INT_MAX)
5197+ return n;
5198+
5199+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
5200+ if (!__builtin_constant_p(n))
5201+ check_object_size(from, n, true);
5202+ n = __copy_to_user(to, from, n);
5203+ }
5204+ return n;
5205+}
5206+
5207+extern unsigned long copy_in_user(void __user *to, const void __user *from,
5208+ unsigned long n);
5209+
5210+#endif /* __powerpc64__ */
5211+
5212 extern unsigned long __clear_user(void __user *addr, unsigned long size);
5213
5214 static inline unsigned long clear_user(void __user *addr, unsigned long size)
5215diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
5216index 4684e33..acc4d19e 100644
5217--- a/arch/powerpc/kernel/exceptions-64e.S
5218+++ b/arch/powerpc/kernel/exceptions-64e.S
5219@@ -715,6 +715,7 @@ storage_fault_common:
5220 std r14,_DAR(r1)
5221 std r15,_DSISR(r1)
5222 addi r3,r1,STACK_FRAME_OVERHEAD
5223+ bl .save_nvgprs
5224 mr r4,r14
5225 mr r5,r15
5226 ld r14,PACA_EXGEN+EX_R14(r13)
5227@@ -723,8 +724,7 @@ storage_fault_common:
5228 cmpdi r3,0
5229 bne- 1f
5230 b .ret_from_except_lite
5231-1: bl .save_nvgprs
5232- mr r5,r3
5233+1: mr r5,r3
5234 addi r3,r1,STACK_FRAME_OVERHEAD
5235 ld r4,_DAR(r1)
5236 bl .bad_page_fault
5237diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
5238index 10b658a..e542888 100644
5239--- a/arch/powerpc/kernel/exceptions-64s.S
5240+++ b/arch/powerpc/kernel/exceptions-64s.S
5241@@ -1013,10 +1013,10 @@ handle_page_fault:
5242 11: ld r4,_DAR(r1)
5243 ld r5,_DSISR(r1)
5244 addi r3,r1,STACK_FRAME_OVERHEAD
5245+ bl .save_nvgprs
5246 bl .do_page_fault
5247 cmpdi r3,0
5248 beq+ 12f
5249- bl .save_nvgprs
5250 mr r5,r3
5251 addi r3,r1,STACK_FRAME_OVERHEAD
5252 lwz r4,_DAR(r1)
5253diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
5254index 2e3200c..72095ce 100644
5255--- a/arch/powerpc/kernel/module_32.c
5256+++ b/arch/powerpc/kernel/module_32.c
5257@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
5258 me->arch.core_plt_section = i;
5259 }
5260 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
5261- printk("Module doesn't contain .plt or .init.plt sections.\n");
5262+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
5263 return -ENOEXEC;
5264 }
5265
5266@@ -192,11 +192,16 @@ static uint32_t do_plt_call(void *location,
5267
5268 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
5269 /* Init, or core PLT? */
5270- if (location >= mod->module_core
5271- && location < mod->module_core + mod->core_size)
5272+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
5273+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
5274 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
5275- else
5276+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
5277+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
5278 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
5279+ else {
5280+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
5281+ return ~0UL;
5282+ }
5283
5284 /* Find this entry, or if that fails, the next avail. entry */
5285 while (entry->jump[0]) {
5286diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
5287index ba48233..16ac31d 100644
5288--- a/arch/powerpc/kernel/process.c
5289+++ b/arch/powerpc/kernel/process.c
5290@@ -680,8 +680,8 @@ void show_regs(struct pt_regs * regs)
5291 * Lookup NIP late so we have the best change of getting the
5292 * above info out without failing
5293 */
5294- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
5295- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
5296+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
5297+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
5298 #endif
5299 show_stack(current, (unsigned long *) regs->gpr[1]);
5300 if (!user_mode(regs))
5301@@ -1175,10 +1175,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
5302 newsp = stack[0];
5303 ip = stack[STACK_FRAME_LR_SAVE];
5304 if (!firstframe || ip != lr) {
5305- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
5306+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
5307 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
5308 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
5309- printk(" (%pS)",
5310+ printk(" (%pA)",
5311 (void *)current->ret_stack[curr_frame].ret);
5312 curr_frame--;
5313 }
5314@@ -1198,7 +1198,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
5315 struct pt_regs *regs = (struct pt_regs *)
5316 (sp + STACK_FRAME_OVERHEAD);
5317 lr = regs->link;
5318- printk("--- Exception: %lx at %pS\n LR = %pS\n",
5319+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
5320 regs->trap, (void *)regs->nip, (void *)lr);
5321 firstframe = 1;
5322 }
5323@@ -1240,58 +1240,3 @@ void __ppc64_runlatch_off(void)
5324 mtspr(SPRN_CTRLT, ctrl);
5325 }
5326 #endif /* CONFIG_PPC64 */
5327-
5328-unsigned long arch_align_stack(unsigned long sp)
5329-{
5330- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
5331- sp -= get_random_int() & ~PAGE_MASK;
5332- return sp & ~0xf;
5333-}
5334-
5335-static inline unsigned long brk_rnd(void)
5336-{
5337- unsigned long rnd = 0;
5338-
5339- /* 8MB for 32bit, 1GB for 64bit */
5340- if (is_32bit_task())
5341- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
5342- else
5343- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
5344-
5345- return rnd << PAGE_SHIFT;
5346-}
5347-
5348-unsigned long arch_randomize_brk(struct mm_struct *mm)
5349-{
5350- unsigned long base = mm->brk;
5351- unsigned long ret;
5352-
5353-#ifdef CONFIG_PPC_STD_MMU_64
5354- /*
5355- * If we are using 1TB segments and we are allowed to randomise
5356- * the heap, we can put it above 1TB so it is backed by a 1TB
5357- * segment. Otherwise the heap will be in the bottom 1TB
5358- * which always uses 256MB segments and this may result in a
5359- * performance penalty.
5360- */
5361- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
5362- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
5363-#endif
5364-
5365- ret = PAGE_ALIGN(base + brk_rnd());
5366-
5367- if (ret < mm->brk)
5368- return mm->brk;
5369-
5370- return ret;
5371-}
5372-
5373-unsigned long randomize_et_dyn(unsigned long base)
5374-{
5375- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
5376-
5377- if (ret < base)
5378- return base;
5379-
5380- return ret;
5381-}
5382diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
5383index 79d8e56..38ffcbb 100644
5384--- a/arch/powerpc/kernel/ptrace.c
5385+++ b/arch/powerpc/kernel/ptrace.c
5386@@ -1663,6 +1663,10 @@ long arch_ptrace(struct task_struct *child, long request,
5387 return ret;
5388 }
5389
5390+#ifdef CONFIG_GRKERNSEC_SETXID
5391+extern void gr_delayed_cred_worker(void);
5392+#endif
5393+
5394 /*
5395 * We must return the syscall number to actually look up in the table.
5396 * This can be -1L to skip running any syscall at all.
5397@@ -1673,6 +1677,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
5398
5399 secure_computing_strict(regs->gpr[0]);
5400
5401+#ifdef CONFIG_GRKERNSEC_SETXID
5402+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
5403+ gr_delayed_cred_worker();
5404+#endif
5405+
5406 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
5407 tracehook_report_syscall_entry(regs))
5408 /*
5409@@ -1707,6 +1716,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
5410 {
5411 int step;
5412
5413+#ifdef CONFIG_GRKERNSEC_SETXID
5414+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
5415+ gr_delayed_cred_worker();
5416+#endif
5417+
5418 audit_syscall_exit(regs);
5419
5420 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
5421diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
5422index 804e323..79181c1 100644
5423--- a/arch/powerpc/kernel/signal_32.c
5424+++ b/arch/powerpc/kernel/signal_32.c
5425@@ -851,7 +851,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
5426 /* Save user registers on the stack */
5427 frame = &rt_sf->uc.uc_mcontext;
5428 addr = frame;
5429- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
5430+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
5431 if (save_user_regs(regs, frame, 0, 1))
5432 goto badframe;
5433 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
5434diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
5435index d183f87..1867f1a 100644
5436--- a/arch/powerpc/kernel/signal_64.c
5437+++ b/arch/powerpc/kernel/signal_64.c
5438@@ -430,7 +430,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
5439 current->thread.fpscr.val = 0;
5440
5441 /* Set up to return from userspace. */
5442- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
5443+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
5444 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
5445 } else {
5446 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
5447diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
5448index 3251840..3f7c77a 100644
5449--- a/arch/powerpc/kernel/traps.c
5450+++ b/arch/powerpc/kernel/traps.c
5451@@ -133,6 +133,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
5452 return flags;
5453 }
5454
5455+extern void gr_handle_kernel_exploit(void);
5456+
5457 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
5458 int signr)
5459 {
5460@@ -182,6 +184,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
5461 panic("Fatal exception in interrupt");
5462 if (panic_on_oops)
5463 panic("Fatal exception");
5464+
5465+ gr_handle_kernel_exploit();
5466+
5467 do_exit(signr);
5468 }
5469
5470diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
5471index 1b2076f..835e4be 100644
5472--- a/arch/powerpc/kernel/vdso.c
5473+++ b/arch/powerpc/kernel/vdso.c
5474@@ -34,6 +34,7 @@
5475 #include <asm/firmware.h>
5476 #include <asm/vdso.h>
5477 #include <asm/vdso_datapage.h>
5478+#include <asm/mman.h>
5479
5480 #include "setup.h"
5481
5482@@ -218,7 +219,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
5483 vdso_base = VDSO32_MBASE;
5484 #endif
5485
5486- current->mm->context.vdso_base = 0;
5487+ current->mm->context.vdso_base = ~0UL;
5488
5489 /* vDSO has a problem and was disabled, just don't "enable" it for the
5490 * process
5491@@ -238,7 +239,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
5492 vdso_base = get_unmapped_area(NULL, vdso_base,
5493 (vdso_pages << PAGE_SHIFT) +
5494 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
5495- 0, 0);
5496+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
5497 if (IS_ERR_VALUE(vdso_base)) {
5498 rc = vdso_base;
5499 goto fail_mmapsem;
5500diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
5501index 5eea6f3..5d10396 100644
5502--- a/arch/powerpc/lib/usercopy_64.c
5503+++ b/arch/powerpc/lib/usercopy_64.c
5504@@ -9,22 +9,6 @@
5505 #include <linux/module.h>
5506 #include <asm/uaccess.h>
5507
5508-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
5509-{
5510- if (likely(access_ok(VERIFY_READ, from, n)))
5511- n = __copy_from_user(to, from, n);
5512- else
5513- memset(to, 0, n);
5514- return n;
5515-}
5516-
5517-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
5518-{
5519- if (likely(access_ok(VERIFY_WRITE, to, n)))
5520- n = __copy_to_user(to, from, n);
5521- return n;
5522-}
5523-
5524 unsigned long copy_in_user(void __user *to, const void __user *from,
5525 unsigned long n)
5526 {
5527@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
5528 return n;
5529 }
5530
5531-EXPORT_SYMBOL(copy_from_user);
5532-EXPORT_SYMBOL(copy_to_user);
5533 EXPORT_SYMBOL(copy_in_user);
5534
5535diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
5536index 0a6b283..7674925 100644
5537--- a/arch/powerpc/mm/fault.c
5538+++ b/arch/powerpc/mm/fault.c
5539@@ -32,6 +32,10 @@
5540 #include <linux/perf_event.h>
5541 #include <linux/magic.h>
5542 #include <linux/ratelimit.h>
5543+#include <linux/slab.h>
5544+#include <linux/pagemap.h>
5545+#include <linux/compiler.h>
5546+#include <linux/unistd.h>
5547
5548 #include <asm/firmware.h>
5549 #include <asm/page.h>
5550@@ -68,6 +72,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
5551 }
5552 #endif
5553
5554+#ifdef CONFIG_PAX_PAGEEXEC
5555+/*
5556+ * PaX: decide what to do with offenders (regs->nip = fault address)
5557+ *
5558+ * returns 1 when task should be killed
5559+ */
5560+static int pax_handle_fetch_fault(struct pt_regs *regs)
5561+{
5562+ return 1;
5563+}
5564+
5565+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5566+{
5567+ unsigned long i;
5568+
5569+ printk(KERN_ERR "PAX: bytes at PC: ");
5570+ for (i = 0; i < 5; i++) {
5571+ unsigned int c;
5572+ if (get_user(c, (unsigned int __user *)pc+i))
5573+ printk(KERN_CONT "???????? ");
5574+ else
5575+ printk(KERN_CONT "%08x ", c);
5576+ }
5577+ printk("\n");
5578+}
5579+#endif
5580+
5581 /*
5582 * Check whether the instruction at regs->nip is a store using
5583 * an update addressing form which will update r1.
5584@@ -216,7 +247,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
5585 * indicate errors in DSISR but can validly be set in SRR1.
5586 */
5587 if (trap == 0x400)
5588- error_code &= 0x48200000;
5589+ error_code &= 0x58200000;
5590 else
5591 is_write = error_code & DSISR_ISSTORE;
5592 #else
5593@@ -367,7 +398,7 @@ good_area:
5594 * "undefined". Of those that can be set, this is the only
5595 * one which seems bad.
5596 */
5597- if (error_code & 0x10000000)
5598+ if (error_code & DSISR_GUARDED)
5599 /* Guarded storage error. */
5600 goto bad_area;
5601 #endif /* CONFIG_8xx */
5602@@ -382,7 +413,7 @@ good_area:
5603 * processors use the same I/D cache coherency mechanism
5604 * as embedded.
5605 */
5606- if (error_code & DSISR_PROTFAULT)
5607+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
5608 goto bad_area;
5609 #endif /* CONFIG_PPC_STD_MMU */
5610
5611@@ -465,6 +496,23 @@ bad_area:
5612 bad_area_nosemaphore:
5613 /* User mode accesses cause a SIGSEGV */
5614 if (user_mode(regs)) {
5615+
5616+#ifdef CONFIG_PAX_PAGEEXEC
5617+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
5618+#ifdef CONFIG_PPC_STD_MMU
5619+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
5620+#else
5621+ if (is_exec && regs->nip == address) {
5622+#endif
5623+ switch (pax_handle_fetch_fault(regs)) {
5624+ }
5625+
5626+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
5627+ do_group_exit(SIGKILL);
5628+ }
5629+ }
5630+#endif
5631+
5632 _exception(SIGSEGV, regs, code, address);
5633 return 0;
5634 }
5635diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
5636index 67a42ed..c16ef80 100644
5637--- a/arch/powerpc/mm/mmap_64.c
5638+++ b/arch/powerpc/mm/mmap_64.c
5639@@ -57,6 +57,10 @@ static unsigned long mmap_rnd(void)
5640 {
5641 unsigned long rnd = 0;
5642
5643+#ifdef CONFIG_PAX_RANDMMAP
5644+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
5645+#endif
5646+
5647 if (current->flags & PF_RANDOMIZE) {
5648 /* 8MB for 32bit, 1GB for 64bit */
5649 if (is_32bit_task())
5650@@ -91,10 +95,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5651 */
5652 if (mmap_is_legacy()) {
5653 mm->mmap_base = TASK_UNMAPPED_BASE;
5654+
5655+#ifdef CONFIG_PAX_RANDMMAP
5656+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5657+ mm->mmap_base += mm->delta_mmap;
5658+#endif
5659+
5660 mm->get_unmapped_area = arch_get_unmapped_area;
5661 mm->unmap_area = arch_unmap_area;
5662 } else {
5663 mm->mmap_base = mmap_base();
5664+
5665+#ifdef CONFIG_PAX_RANDMMAP
5666+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5667+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
5668+#endif
5669+
5670 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
5671 mm->unmap_area = arch_unmap_area_topdown;
5672 }
5673diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
5674index 5829d2a..af84242 100644
5675--- a/arch/powerpc/mm/slice.c
5676+++ b/arch/powerpc/mm/slice.c
5677@@ -103,7 +103,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
5678 if ((mm->task_size - len) < addr)
5679 return 0;
5680 vma = find_vma(mm, addr);
5681- return (!vma || (addr + len) <= vma->vm_start);
5682+ return check_heap_stack_gap(vma, addr, len, 0);
5683 }
5684
5685 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
5686@@ -272,7 +272,7 @@ full_search:
5687 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
5688 continue;
5689 }
5690- if (!vma || addr + len <= vma->vm_start) {
5691+ if (check_heap_stack_gap(vma, addr, len, 0)) {
5692 /*
5693 * Remember the place where we stopped the search:
5694 */
5695@@ -329,10 +329,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
5696 }
5697 }
5698
5699- addr = mm->mmap_base;
5700- while (addr > len) {
5701+ if (mm->mmap_base < len)
5702+ addr = -ENOMEM;
5703+ else
5704+ addr = mm->mmap_base - len;
5705+
5706+ while (!IS_ERR_VALUE(addr)) {
5707 /* Go down by chunk size */
5708- addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
5709+ addr = _ALIGN_DOWN(addr, 1ul << pshift);
5710
5711 /* Check for hit with different page size */
5712 mask = slice_range_to_mask(addr, len);
5713@@ -352,7 +356,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
5714 * return with success:
5715 */
5716 vma = find_vma(mm, addr);
5717- if (!vma || (addr + len) <= vma->vm_start) {
5718+ if (check_heap_stack_gap(vma, addr, len, 0)) {
5719 /* remember the address as a hint for next time */
5720 if (use_cache)
5721 mm->free_area_cache = addr;
5722@@ -364,7 +368,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
5723 mm->cached_hole_size = vma->vm_start - addr;
5724
5725 /* try just below the current vma->vm_start */
5726- addr = vma->vm_start;
5727+ addr = skip_heap_stack_gap(vma, len, 0);
5728 }
5729
5730 /*
5731@@ -442,6 +446,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
5732 if (fixed && addr > (mm->task_size - len))
5733 return -EINVAL;
5734
5735+#ifdef CONFIG_PAX_RANDMMAP
5736+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
5737+ addr = 0;
5738+#endif
5739+
5740 /* If hint, make sure it matches our alignment restrictions */
5741 if (!fixed && addr) {
5742 addr = _ALIGN_UP(addr, 1ul << pshift);
5743diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
5744index c797832..ce575c8 100644
5745--- a/arch/s390/include/asm/atomic.h
5746+++ b/arch/s390/include/asm/atomic.h
5747@@ -326,6 +326,16 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
5748 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
5749 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
5750
5751+#define atomic64_read_unchecked(v) atomic64_read(v)
5752+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5753+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5754+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5755+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5756+#define atomic64_inc_unchecked(v) atomic64_inc(v)
5757+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5758+#define atomic64_dec_unchecked(v) atomic64_dec(v)
5759+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5760+
5761 #define smp_mb__before_atomic_dec() smp_mb()
5762 #define smp_mb__after_atomic_dec() smp_mb()
5763 #define smp_mb__before_atomic_inc() smp_mb()
5764diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
5765index 4d7ccac..d03d0ad 100644
5766--- a/arch/s390/include/asm/cache.h
5767+++ b/arch/s390/include/asm/cache.h
5768@@ -9,8 +9,10 @@
5769 #ifndef __ARCH_S390_CACHE_H
5770 #define __ARCH_S390_CACHE_H
5771
5772-#define L1_CACHE_BYTES 256
5773+#include <linux/const.h>
5774+
5775 #define L1_CACHE_SHIFT 8
5776+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5777 #define NET_SKB_PAD 32
5778
5779 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
5780diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
5781index 178ff96..8c93bd1 100644
5782--- a/arch/s390/include/asm/elf.h
5783+++ b/arch/s390/include/asm/elf.h
5784@@ -160,8 +160,14 @@ extern unsigned int vdso_enabled;
5785 the loader. We need to make sure that it is out of the way of the program
5786 that it will "exec", and that there is sufficient room for the brk. */
5787
5788-extern unsigned long randomize_et_dyn(unsigned long base);
5789-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
5790+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
5791+
5792+#ifdef CONFIG_PAX_ASLR
5793+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
5794+
5795+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
5796+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
5797+#endif
5798
5799 /* This yields a mask that user programs can use to figure out what
5800 instruction set this CPU supports. */
5801@@ -210,9 +216,6 @@ struct linux_binprm;
5802 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
5803 int arch_setup_additional_pages(struct linux_binprm *, int);
5804
5805-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
5806-#define arch_randomize_brk arch_randomize_brk
5807-
5808 void *fill_cpu_elf_notes(void *ptr, struct save_area *sa);
5809
5810 #endif
5811diff --git a/arch/s390/include/asm/exec.h b/arch/s390/include/asm/exec.h
5812index c4a93d6..4d2a9b4 100644
5813--- a/arch/s390/include/asm/exec.h
5814+++ b/arch/s390/include/asm/exec.h
5815@@ -7,6 +7,6 @@
5816 #ifndef __ASM_EXEC_H
5817 #define __ASM_EXEC_H
5818
5819-extern unsigned long arch_align_stack(unsigned long sp);
5820+#define arch_align_stack(x) ((x) & ~0xfUL)
5821
5822 #endif /* __ASM_EXEC_H */
5823diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
5824index 34268df..ea97318 100644
5825--- a/arch/s390/include/asm/uaccess.h
5826+++ b/arch/s390/include/asm/uaccess.h
5827@@ -252,6 +252,10 @@ static inline unsigned long __must_check
5828 copy_to_user(void __user *to, const void *from, unsigned long n)
5829 {
5830 might_fault();
5831+
5832+ if ((long)n < 0)
5833+ return n;
5834+
5835 if (access_ok(VERIFY_WRITE, to, n))
5836 n = __copy_to_user(to, from, n);
5837 return n;
5838@@ -277,6 +281,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
5839 static inline unsigned long __must_check
5840 __copy_from_user(void *to, const void __user *from, unsigned long n)
5841 {
5842+ if ((long)n < 0)
5843+ return n;
5844+
5845 if (__builtin_constant_p(n) && (n <= 256))
5846 return uaccess.copy_from_user_small(n, from, to);
5847 else
5848@@ -308,10 +315,14 @@ __compiletime_warning("copy_from_user() buffer size is not provably correct")
5849 static inline unsigned long __must_check
5850 copy_from_user(void *to, const void __user *from, unsigned long n)
5851 {
5852- unsigned int sz = __compiletime_object_size(to);
5853+ size_t sz = __compiletime_object_size(to);
5854
5855 might_fault();
5856- if (unlikely(sz != -1 && sz < n)) {
5857+
5858+ if ((long)n < 0)
5859+ return n;
5860+
5861+ if (unlikely(sz != (size_t)-1 && sz < n)) {
5862 copy_from_user_overflow();
5863 return n;
5864 }
5865diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
5866index 4610dea..cf0af21 100644
5867--- a/arch/s390/kernel/module.c
5868+++ b/arch/s390/kernel/module.c
5869@@ -171,11 +171,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
5870
5871 /* Increase core size by size of got & plt and set start
5872 offsets for got and plt. */
5873- me->core_size = ALIGN(me->core_size, 4);
5874- me->arch.got_offset = me->core_size;
5875- me->core_size += me->arch.got_size;
5876- me->arch.plt_offset = me->core_size;
5877- me->core_size += me->arch.plt_size;
5878+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
5879+ me->arch.got_offset = me->core_size_rw;
5880+ me->core_size_rw += me->arch.got_size;
5881+ me->arch.plt_offset = me->core_size_rx;
5882+ me->core_size_rx += me->arch.plt_size;
5883 return 0;
5884 }
5885
5886@@ -252,7 +252,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
5887 if (info->got_initialized == 0) {
5888 Elf_Addr *gotent;
5889
5890- gotent = me->module_core + me->arch.got_offset +
5891+ gotent = me->module_core_rw + me->arch.got_offset +
5892 info->got_offset;
5893 *gotent = val;
5894 info->got_initialized = 1;
5895@@ -276,7 +276,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
5896 else if (r_type == R_390_GOTENT ||
5897 r_type == R_390_GOTPLTENT)
5898 *(unsigned int *) loc =
5899- (val + (Elf_Addr) me->module_core - loc) >> 1;
5900+ (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
5901 else if (r_type == R_390_GOT64 ||
5902 r_type == R_390_GOTPLT64)
5903 *(unsigned long *) loc = val;
5904@@ -290,7 +290,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
5905 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
5906 if (info->plt_initialized == 0) {
5907 unsigned int *ip;
5908- ip = me->module_core + me->arch.plt_offset +
5909+ ip = me->module_core_rx + me->arch.plt_offset +
5910 info->plt_offset;
5911 #ifndef CONFIG_64BIT
5912 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
5913@@ -315,7 +315,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
5914 val - loc + 0xffffUL < 0x1ffffeUL) ||
5915 (r_type == R_390_PLT32DBL &&
5916 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
5917- val = (Elf_Addr) me->module_core +
5918+ val = (Elf_Addr) me->module_core_rx +
5919 me->arch.plt_offset +
5920 info->plt_offset;
5921 val += rela->r_addend - loc;
5922@@ -337,7 +337,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
5923 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
5924 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
5925 val = val + rela->r_addend -
5926- ((Elf_Addr) me->module_core + me->arch.got_offset);
5927+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
5928 if (r_type == R_390_GOTOFF16)
5929 *(unsigned short *) loc = val;
5930 else if (r_type == R_390_GOTOFF32)
5931@@ -347,7 +347,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
5932 break;
5933 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
5934 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
5935- val = (Elf_Addr) me->module_core + me->arch.got_offset +
5936+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
5937 rela->r_addend - loc;
5938 if (r_type == R_390_GOTPC)
5939 *(unsigned int *) loc = val;
5940diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
5941index cd31ad4..201c5a3 100644
5942--- a/arch/s390/kernel/process.c
5943+++ b/arch/s390/kernel/process.c
5944@@ -283,39 +283,3 @@ unsigned long get_wchan(struct task_struct *p)
5945 }
5946 return 0;
5947 }
5948-
5949-unsigned long arch_align_stack(unsigned long sp)
5950-{
5951- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
5952- sp -= get_random_int() & ~PAGE_MASK;
5953- return sp & ~0xf;
5954-}
5955-
5956-static inline unsigned long brk_rnd(void)
5957-{
5958- /* 8MB for 32bit, 1GB for 64bit */
5959- if (is_32bit_task())
5960- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
5961- else
5962- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
5963-}
5964-
5965-unsigned long arch_randomize_brk(struct mm_struct *mm)
5966-{
5967- unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
5968-
5969- if (ret < mm->brk)
5970- return mm->brk;
5971- return ret;
5972-}
5973-
5974-unsigned long randomize_et_dyn(unsigned long base)
5975-{
5976- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
5977-
5978- if (!(current->flags & PF_RANDOMIZE))
5979- return base;
5980- if (ret < base)
5981- return base;
5982- return ret;
5983-}
5984diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
5985index c59a5ef..3fae59c 100644
5986--- a/arch/s390/mm/mmap.c
5987+++ b/arch/s390/mm/mmap.c
5988@@ -90,10 +90,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5989 */
5990 if (mmap_is_legacy()) {
5991 mm->mmap_base = TASK_UNMAPPED_BASE;
5992+
5993+#ifdef CONFIG_PAX_RANDMMAP
5994+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5995+ mm->mmap_base += mm->delta_mmap;
5996+#endif
5997+
5998 mm->get_unmapped_area = arch_get_unmapped_area;
5999 mm->unmap_area = arch_unmap_area;
6000 } else {
6001 mm->mmap_base = mmap_base();
6002+
6003+#ifdef CONFIG_PAX_RANDMMAP
6004+ if (mm->pax_flags & MF_PAX_RANDMMAP)
6005+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
6006+#endif
6007+
6008 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
6009 mm->unmap_area = arch_unmap_area_topdown;
6010 }
6011@@ -172,10 +184,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
6012 */
6013 if (mmap_is_legacy()) {
6014 mm->mmap_base = TASK_UNMAPPED_BASE;
6015+
6016+#ifdef CONFIG_PAX_RANDMMAP
6017+ if (mm->pax_flags & MF_PAX_RANDMMAP)
6018+ mm->mmap_base += mm->delta_mmap;
6019+#endif
6020+
6021 mm->get_unmapped_area = s390_get_unmapped_area;
6022 mm->unmap_area = arch_unmap_area;
6023 } else {
6024 mm->mmap_base = mmap_base();
6025+
6026+#ifdef CONFIG_PAX_RANDMMAP
6027+ if (mm->pax_flags & MF_PAX_RANDMMAP)
6028+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
6029+#endif
6030+
6031 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
6032 mm->unmap_area = arch_unmap_area_topdown;
6033 }
6034diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
6035index ae3d59f..f65f075 100644
6036--- a/arch/score/include/asm/cache.h
6037+++ b/arch/score/include/asm/cache.h
6038@@ -1,7 +1,9 @@
6039 #ifndef _ASM_SCORE_CACHE_H
6040 #define _ASM_SCORE_CACHE_H
6041
6042+#include <linux/const.h>
6043+
6044 #define L1_CACHE_SHIFT 4
6045-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6046+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6047
6048 #endif /* _ASM_SCORE_CACHE_H */
6049diff --git a/arch/score/include/asm/exec.h b/arch/score/include/asm/exec.h
6050index f9f3cd5..58ff438 100644
6051--- a/arch/score/include/asm/exec.h
6052+++ b/arch/score/include/asm/exec.h
6053@@ -1,6 +1,6 @@
6054 #ifndef _ASM_SCORE_EXEC_H
6055 #define _ASM_SCORE_EXEC_H
6056
6057-extern unsigned long arch_align_stack(unsigned long sp);
6058+#define arch_align_stack(x) (x)
6059
6060 #endif /* _ASM_SCORE_EXEC_H */
6061diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
6062index 637970c..0b6556b 100644
6063--- a/arch/score/kernel/process.c
6064+++ b/arch/score/kernel/process.c
6065@@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_struct *task)
6066
6067 return task_pt_regs(task)->cp0_epc;
6068 }
6069-
6070-unsigned long arch_align_stack(unsigned long sp)
6071-{
6072- return sp;
6073-}
6074diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
6075index ef9e555..331bd29 100644
6076--- a/arch/sh/include/asm/cache.h
6077+++ b/arch/sh/include/asm/cache.h
6078@@ -9,10 +9,11 @@
6079 #define __ASM_SH_CACHE_H
6080 #ifdef __KERNEL__
6081
6082+#include <linux/const.h>
6083 #include <linux/init.h>
6084 #include <cpu/cache.h>
6085
6086-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6087+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6088
6089 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
6090
6091diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
6092index afeb710..e8366ef 100644
6093--- a/arch/sh/mm/mmap.c
6094+++ b/arch/sh/mm/mmap.c
6095@@ -49,6 +49,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
6096 struct vm_area_struct *vma;
6097 unsigned long start_addr;
6098 int do_colour_align;
6099+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
6100
6101 if (flags & MAP_FIXED) {
6102 /* We do not accept a shared mapping if it would violate
6103@@ -74,8 +75,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
6104 addr = PAGE_ALIGN(addr);
6105
6106 vma = find_vma(mm, addr);
6107- if (TASK_SIZE - len >= addr &&
6108- (!vma || addr + len <= vma->vm_start))
6109+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
6110 return addr;
6111 }
6112
6113@@ -106,7 +106,7 @@ full_search:
6114 }
6115 return -ENOMEM;
6116 }
6117- if (likely(!vma || addr + len <= vma->vm_start)) {
6118+ if (likely(check_heap_stack_gap(vma, addr, len, offset))) {
6119 /*
6120 * Remember the place where we stopped the search:
6121 */
6122@@ -131,6 +131,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6123 struct mm_struct *mm = current->mm;
6124 unsigned long addr = addr0;
6125 int do_colour_align;
6126+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
6127
6128 if (flags & MAP_FIXED) {
6129 /* We do not accept a shared mapping if it would violate
6130@@ -157,8 +158,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6131 addr = PAGE_ALIGN(addr);
6132
6133 vma = find_vma(mm, addr);
6134- if (TASK_SIZE - len >= addr &&
6135- (!vma || addr + len <= vma->vm_start))
6136+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
6137 return addr;
6138 }
6139
6140@@ -179,7 +179,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6141 /* make sure it can fit in the remaining address space */
6142 if (likely(addr > len)) {
6143 vma = find_vma(mm, addr-len);
6144- if (!vma || addr <= vma->vm_start) {
6145+ if (check_heap_stack_gap(vma, addr - len, len, offset)) {
6146 /* remember the address as a hint for next time */
6147 return (mm->free_area_cache = addr-len);
6148 }
6149@@ -188,18 +188,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6150 if (unlikely(mm->mmap_base < len))
6151 goto bottomup;
6152
6153- addr = mm->mmap_base-len;
6154- if (do_colour_align)
6155- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
6156+ addr = mm->mmap_base - len;
6157
6158 do {
6159+ if (do_colour_align)
6160+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
6161 /*
6162 * Lookup failure means no vma is above this address,
6163 * else if new region fits below vma->vm_start,
6164 * return with success:
6165 */
6166 vma = find_vma(mm, addr);
6167- if (likely(!vma || addr+len <= vma->vm_start)) {
6168+ if (likely(check_heap_stack_gap(vma, addr, len, offset))) {
6169 /* remember the address as a hint for next time */
6170 return (mm->free_area_cache = addr);
6171 }
6172@@ -209,10 +209,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6173 mm->cached_hole_size = vma->vm_start - addr;
6174
6175 /* try just below the current vma->vm_start */
6176- addr = vma->vm_start-len;
6177- if (do_colour_align)
6178- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
6179- } while (likely(len < vma->vm_start));
6180+ addr = skip_heap_stack_gap(vma, len, offset);
6181+ } while (!IS_ERR_VALUE(addr));
6182
6183 bottomup:
6184 /*
6185diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
6186index be56a24..443328f 100644
6187--- a/arch/sparc/include/asm/atomic_64.h
6188+++ b/arch/sparc/include/asm/atomic_64.h
6189@@ -14,18 +14,40 @@
6190 #define ATOMIC64_INIT(i) { (i) }
6191
6192 #define atomic_read(v) (*(volatile int *)&(v)->counter)
6193+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
6194+{
6195+ return v->counter;
6196+}
6197 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
6198+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
6199+{
6200+ return v->counter;
6201+}
6202
6203 #define atomic_set(v, i) (((v)->counter) = i)
6204+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
6205+{
6206+ v->counter = i;
6207+}
6208 #define atomic64_set(v, i) (((v)->counter) = i)
6209+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
6210+{
6211+ v->counter = i;
6212+}
6213
6214 extern void atomic_add(int, atomic_t *);
6215+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
6216 extern void atomic64_add(long, atomic64_t *);
6217+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
6218 extern void atomic_sub(int, atomic_t *);
6219+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
6220 extern void atomic64_sub(long, atomic64_t *);
6221+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
6222
6223 extern int atomic_add_ret(int, atomic_t *);
6224+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
6225 extern long atomic64_add_ret(long, atomic64_t *);
6226+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
6227 extern int atomic_sub_ret(int, atomic_t *);
6228 extern long atomic64_sub_ret(long, atomic64_t *);
6229
6230@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
6231 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
6232
6233 #define atomic_inc_return(v) atomic_add_ret(1, v)
6234+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
6235+{
6236+ return atomic_add_ret_unchecked(1, v);
6237+}
6238 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
6239+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
6240+{
6241+ return atomic64_add_ret_unchecked(1, v);
6242+}
6243
6244 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
6245 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
6246
6247 #define atomic_add_return(i, v) atomic_add_ret(i, v)
6248+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
6249+{
6250+ return atomic_add_ret_unchecked(i, v);
6251+}
6252 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
6253+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
6254+{
6255+ return atomic64_add_ret_unchecked(i, v);
6256+}
6257
6258 /*
6259 * atomic_inc_and_test - increment and test
6260@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
6261 * other cases.
6262 */
6263 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
6264+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
6265+{
6266+ return atomic_inc_return_unchecked(v) == 0;
6267+}
6268 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
6269
6270 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
6271@@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomic64_t *);
6272 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
6273
6274 #define atomic_inc(v) atomic_add(1, v)
6275+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
6276+{
6277+ atomic_add_unchecked(1, v);
6278+}
6279 #define atomic64_inc(v) atomic64_add(1, v)
6280+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
6281+{
6282+ atomic64_add_unchecked(1, v);
6283+}
6284
6285 #define atomic_dec(v) atomic_sub(1, v)
6286+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
6287+{
6288+ atomic_sub_unchecked(1, v);
6289+}
6290 #define atomic64_dec(v) atomic64_sub(1, v)
6291+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
6292+{
6293+ atomic64_sub_unchecked(1, v);
6294+}
6295
6296 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
6297 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
6298
6299 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
6300+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
6301+{
6302+ return cmpxchg(&v->counter, old, new);
6303+}
6304 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
6305+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
6306+{
6307+ return xchg(&v->counter, new);
6308+}
6309
6310 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
6311 {
6312- int c, old;
6313+ int c, old, new;
6314 c = atomic_read(v);
6315 for (;;) {
6316- if (unlikely(c == (u)))
6317+ if (unlikely(c == u))
6318 break;
6319- old = atomic_cmpxchg((v), c, c + (a));
6320+
6321+ asm volatile("addcc %2, %0, %0\n"
6322+
6323+#ifdef CONFIG_PAX_REFCOUNT
6324+ "tvs %%icc, 6\n"
6325+#endif
6326+
6327+ : "=r" (new)
6328+ : "0" (c), "ir" (a)
6329+ : "cc");
6330+
6331+ old = atomic_cmpxchg(v, c, new);
6332 if (likely(old == c))
6333 break;
6334 c = old;
6335@@ -88,20 +165,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
6336 #define atomic64_cmpxchg(v, o, n) \
6337 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
6338 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
6339+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
6340+{
6341+ return xchg(&v->counter, new);
6342+}
6343
6344 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
6345 {
6346- long c, old;
6347+ long c, old, new;
6348 c = atomic64_read(v);
6349 for (;;) {
6350- if (unlikely(c == (u)))
6351+ if (unlikely(c == u))
6352 break;
6353- old = atomic64_cmpxchg((v), c, c + (a));
6354+
6355+ asm volatile("addcc %2, %0, %0\n"
6356+
6357+#ifdef CONFIG_PAX_REFCOUNT
6358+ "tvs %%xcc, 6\n"
6359+#endif
6360+
6361+ : "=r" (new)
6362+ : "0" (c), "ir" (a)
6363+ : "cc");
6364+
6365+ old = atomic64_cmpxchg(v, c, new);
6366 if (likely(old == c))
6367 break;
6368 c = old;
6369 }
6370- return c != (u);
6371+ return c != u;
6372 }
6373
6374 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
6375diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
6376index 5bb6991..5c2132e 100644
6377--- a/arch/sparc/include/asm/cache.h
6378+++ b/arch/sparc/include/asm/cache.h
6379@@ -7,10 +7,12 @@
6380 #ifndef _SPARC_CACHE_H
6381 #define _SPARC_CACHE_H
6382
6383+#include <linux/const.h>
6384+
6385 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
6386
6387 #define L1_CACHE_SHIFT 5
6388-#define L1_CACHE_BYTES 32
6389+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6390
6391 #ifdef CONFIG_SPARC32
6392 #define SMP_CACHE_BYTES_SHIFT 5
6393diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
6394index ac74a2c..a9e58af 100644
6395--- a/arch/sparc/include/asm/elf_32.h
6396+++ b/arch/sparc/include/asm/elf_32.h
6397@@ -114,6 +114,13 @@ typedef struct {
6398
6399 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
6400
6401+#ifdef CONFIG_PAX_ASLR
6402+#define PAX_ELF_ET_DYN_BASE 0x10000UL
6403+
6404+#define PAX_DELTA_MMAP_LEN 16
6405+#define PAX_DELTA_STACK_LEN 16
6406+#endif
6407+
6408 /* This yields a mask that user programs can use to figure out what
6409 instruction set this cpu supports. This can NOT be done in userspace
6410 on Sparc. */
6411diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
6412index 370ca1e..d4f4a98 100644
6413--- a/arch/sparc/include/asm/elf_64.h
6414+++ b/arch/sparc/include/asm/elf_64.h
6415@@ -189,6 +189,13 @@ typedef struct {
6416 #define ELF_ET_DYN_BASE 0x0000010000000000UL
6417 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
6418
6419+#ifdef CONFIG_PAX_ASLR
6420+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
6421+
6422+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
6423+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
6424+#endif
6425+
6426 extern unsigned long sparc64_elf_hwcap;
6427 #define ELF_HWCAP sparc64_elf_hwcap
6428
6429diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
6430index 9b1c36d..209298b 100644
6431--- a/arch/sparc/include/asm/pgalloc_32.h
6432+++ b/arch/sparc/include/asm/pgalloc_32.h
6433@@ -33,6 +33,7 @@ static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
6434 }
6435
6436 #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
6437+#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
6438
6439 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm,
6440 unsigned long address)
6441diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
6442index bcfe063..b333142 100644
6443--- a/arch/sparc/include/asm/pgalloc_64.h
6444+++ b/arch/sparc/include/asm/pgalloc_64.h
6445@@ -26,6 +26,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
6446 }
6447
6448 #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
6449+#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD))
6450
6451 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
6452 {
6453diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
6454index 6fc1348..390c50a 100644
6455--- a/arch/sparc/include/asm/pgtable_32.h
6456+++ b/arch/sparc/include/asm/pgtable_32.h
6457@@ -50,6 +50,9 @@ extern unsigned long calc_highpages(void);
6458 #define PAGE_SHARED SRMMU_PAGE_SHARED
6459 #define PAGE_COPY SRMMU_PAGE_COPY
6460 #define PAGE_READONLY SRMMU_PAGE_RDONLY
6461+#define PAGE_SHARED_NOEXEC SRMMU_PAGE_SHARED_NOEXEC
6462+#define PAGE_COPY_NOEXEC SRMMU_PAGE_COPY_NOEXEC
6463+#define PAGE_READONLY_NOEXEC SRMMU_PAGE_RDONLY_NOEXEC
6464 #define PAGE_KERNEL SRMMU_PAGE_KERNEL
6465
6466 /* Top-level page directory - dummy used by init-mm.
6467@@ -62,18 +65,18 @@ extern unsigned long ptr_in_current_pgd;
6468
6469 /* xwr */
6470 #define __P000 PAGE_NONE
6471-#define __P001 PAGE_READONLY
6472-#define __P010 PAGE_COPY
6473-#define __P011 PAGE_COPY
6474+#define __P001 PAGE_READONLY_NOEXEC
6475+#define __P010 PAGE_COPY_NOEXEC
6476+#define __P011 PAGE_COPY_NOEXEC
6477 #define __P100 PAGE_READONLY
6478 #define __P101 PAGE_READONLY
6479 #define __P110 PAGE_COPY
6480 #define __P111 PAGE_COPY
6481
6482 #define __S000 PAGE_NONE
6483-#define __S001 PAGE_READONLY
6484-#define __S010 PAGE_SHARED
6485-#define __S011 PAGE_SHARED
6486+#define __S001 PAGE_READONLY_NOEXEC
6487+#define __S010 PAGE_SHARED_NOEXEC
6488+#define __S011 PAGE_SHARED_NOEXEC
6489 #define __S100 PAGE_READONLY
6490 #define __S101 PAGE_READONLY
6491 #define __S110 PAGE_SHARED
6492diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
6493index 79da178..c2eede8 100644
6494--- a/arch/sparc/include/asm/pgtsrmmu.h
6495+++ b/arch/sparc/include/asm/pgtsrmmu.h
6496@@ -115,6 +115,11 @@
6497 SRMMU_EXEC | SRMMU_REF)
6498 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
6499 SRMMU_EXEC | SRMMU_REF)
6500+
6501+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
6502+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
6503+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
6504+
6505 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
6506 SRMMU_DIRTY | SRMMU_REF)
6507
6508diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
6509index 9689176..63c18ea 100644
6510--- a/arch/sparc/include/asm/spinlock_64.h
6511+++ b/arch/sparc/include/asm/spinlock_64.h
6512@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
6513
6514 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
6515
6516-static void inline arch_read_lock(arch_rwlock_t *lock)
6517+static inline void arch_read_lock(arch_rwlock_t *lock)
6518 {
6519 unsigned long tmp1, tmp2;
6520
6521 __asm__ __volatile__ (
6522 "1: ldsw [%2], %0\n"
6523 " brlz,pn %0, 2f\n"
6524-"4: add %0, 1, %1\n"
6525+"4: addcc %0, 1, %1\n"
6526+
6527+#ifdef CONFIG_PAX_REFCOUNT
6528+" tvs %%icc, 6\n"
6529+#endif
6530+
6531 " cas [%2], %0, %1\n"
6532 " cmp %0, %1\n"
6533 " bne,pn %%icc, 1b\n"
6534@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
6535 " .previous"
6536 : "=&r" (tmp1), "=&r" (tmp2)
6537 : "r" (lock)
6538- : "memory");
6539+ : "memory", "cc");
6540 }
6541
6542-static int inline arch_read_trylock(arch_rwlock_t *lock)
6543+static inline int arch_read_trylock(arch_rwlock_t *lock)
6544 {
6545 int tmp1, tmp2;
6546
6547@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
6548 "1: ldsw [%2], %0\n"
6549 " brlz,a,pn %0, 2f\n"
6550 " mov 0, %0\n"
6551-" add %0, 1, %1\n"
6552+" addcc %0, 1, %1\n"
6553+
6554+#ifdef CONFIG_PAX_REFCOUNT
6555+" tvs %%icc, 6\n"
6556+#endif
6557+
6558 " cas [%2], %0, %1\n"
6559 " cmp %0, %1\n"
6560 " bne,pn %%icc, 1b\n"
6561@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
6562 return tmp1;
6563 }
6564
6565-static void inline arch_read_unlock(arch_rwlock_t *lock)
6566+static inline void arch_read_unlock(arch_rwlock_t *lock)
6567 {
6568 unsigned long tmp1, tmp2;
6569
6570 __asm__ __volatile__(
6571 "1: lduw [%2], %0\n"
6572-" sub %0, 1, %1\n"
6573+" subcc %0, 1, %1\n"
6574+
6575+#ifdef CONFIG_PAX_REFCOUNT
6576+" tvs %%icc, 6\n"
6577+#endif
6578+
6579 " cas [%2], %0, %1\n"
6580 " cmp %0, %1\n"
6581 " bne,pn %%xcc, 1b\n"
6582@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
6583 : "memory");
6584 }
6585
6586-static void inline arch_write_lock(arch_rwlock_t *lock)
6587+static inline void arch_write_lock(arch_rwlock_t *lock)
6588 {
6589 unsigned long mask, tmp1, tmp2;
6590
6591@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
6592 : "memory");
6593 }
6594
6595-static void inline arch_write_unlock(arch_rwlock_t *lock)
6596+static inline void arch_write_unlock(arch_rwlock_t *lock)
6597 {
6598 __asm__ __volatile__(
6599 " stw %%g0, [%0]"
6600@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
6601 : "memory");
6602 }
6603
6604-static int inline arch_write_trylock(arch_rwlock_t *lock)
6605+static inline int arch_write_trylock(arch_rwlock_t *lock)
6606 {
6607 unsigned long mask, tmp1, tmp2, result;
6608
6609diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
6610index 25849ae..924c54b 100644
6611--- a/arch/sparc/include/asm/thread_info_32.h
6612+++ b/arch/sparc/include/asm/thread_info_32.h
6613@@ -49,6 +49,8 @@ struct thread_info {
6614 unsigned long w_saved;
6615
6616 struct restart_block restart_block;
6617+
6618+ unsigned long lowest_stack;
6619 };
6620
6621 /*
6622diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
6623index a3fe4dc..cae132a 100644
6624--- a/arch/sparc/include/asm/thread_info_64.h
6625+++ b/arch/sparc/include/asm/thread_info_64.h
6626@@ -63,6 +63,8 @@ struct thread_info {
6627 struct pt_regs *kern_una_regs;
6628 unsigned int kern_una_insn;
6629
6630+ unsigned long lowest_stack;
6631+
6632 unsigned long fpregs[0] __attribute__ ((aligned(64)));
6633 };
6634
6635@@ -193,10 +195,11 @@ register struct thread_info *current_thread_info_reg asm("g6");
6636 #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
6637 /* flag bit 6 is available */
6638 #define TIF_32BIT 7 /* 32-bit binary */
6639-/* flag bit 8 is available */
6640+#define TIF_GRSEC_SETXID 8 /* update credentials on syscall entry/exit */
6641 #define TIF_SECCOMP 9 /* secure computing */
6642 #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
6643 #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
6644+
6645 /* NOTE: Thread flags >= 12 should be ones we have no interest
6646 * in using in assembly, else we can't use the mask as
6647 * an immediate value in instructions such as andcc.
6648@@ -215,12 +218,18 @@ register struct thread_info *current_thread_info_reg asm("g6");
6649 #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
6650 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
6651 #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
6652+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
6653
6654 #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
6655 _TIF_DO_NOTIFY_RESUME_MASK | \
6656 _TIF_NEED_RESCHED)
6657 #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
6658
6659+#define _TIF_WORK_SYSCALL \
6660+ (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
6661+ _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
6662+
6663+
6664 /*
6665 * Thread-synchronous status.
6666 *
6667diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
6668index 0167d26..9acd8ed 100644
6669--- a/arch/sparc/include/asm/uaccess.h
6670+++ b/arch/sparc/include/asm/uaccess.h
6671@@ -1,5 +1,13 @@
6672 #ifndef ___ASM_SPARC_UACCESS_H
6673 #define ___ASM_SPARC_UACCESS_H
6674+
6675+#ifdef __KERNEL__
6676+#ifndef __ASSEMBLY__
6677+#include <linux/types.h>
6678+extern void check_object_size(const void *ptr, unsigned long n, bool to);
6679+#endif
6680+#endif
6681+
6682 #if defined(__sparc__) && defined(__arch64__)
6683 #include <asm/uaccess_64.h>
6684 #else
6685diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
6686index 53a28dd..50c38c3 100644
6687--- a/arch/sparc/include/asm/uaccess_32.h
6688+++ b/arch/sparc/include/asm/uaccess_32.h
6689@@ -250,27 +250,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
6690
6691 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
6692 {
6693- if (n && __access_ok((unsigned long) to, n))
6694+ if ((long)n < 0)
6695+ return n;
6696+
6697+ if (n && __access_ok((unsigned long) to, n)) {
6698+ if (!__builtin_constant_p(n))
6699+ check_object_size(from, n, true);
6700 return __copy_user(to, (__force void __user *) from, n);
6701- else
6702+ } else
6703 return n;
6704 }
6705
6706 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
6707 {
6708+ if ((long)n < 0)
6709+ return n;
6710+
6711+ if (!__builtin_constant_p(n))
6712+ check_object_size(from, n, true);
6713+
6714 return __copy_user(to, (__force void __user *) from, n);
6715 }
6716
6717 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
6718 {
6719- if (n && __access_ok((unsigned long) from, n))
6720+ if ((long)n < 0)
6721+ return n;
6722+
6723+ if (n && __access_ok((unsigned long) from, n)) {
6724+ if (!__builtin_constant_p(n))
6725+ check_object_size(to, n, false);
6726 return __copy_user((__force void __user *) to, from, n);
6727- else
6728+ } else
6729 return n;
6730 }
6731
6732 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
6733 {
6734+ if ((long)n < 0)
6735+ return n;
6736+
6737 return __copy_user((__force void __user *) to, from, n);
6738 }
6739
6740diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
6741index 73083e1..2bc62a6 100644
6742--- a/arch/sparc/include/asm/uaccess_64.h
6743+++ b/arch/sparc/include/asm/uaccess_64.h
6744@@ -10,6 +10,7 @@
6745 #include <linux/compiler.h>
6746 #include <linux/string.h>
6747 #include <linux/thread_info.h>
6748+#include <linux/kernel.h>
6749 #include <asm/asi.h>
6750 #include <asm/spitfire.h>
6751 #include <asm-generic/uaccess-unaligned.h>
6752@@ -214,8 +215,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
6753 static inline unsigned long __must_check
6754 copy_from_user(void *to, const void __user *from, unsigned long size)
6755 {
6756- unsigned long ret = ___copy_from_user(to, from, size);
6757+ unsigned long ret;
6758
6759+ if ((long)size < 0 || size > INT_MAX)
6760+ return size;
6761+
6762+ if (!__builtin_constant_p(size))
6763+ check_object_size(to, size, false);
6764+
6765+ ret = ___copy_from_user(to, from, size);
6766 if (unlikely(ret))
6767 ret = copy_from_user_fixup(to, from, size);
6768
6769@@ -231,8 +239,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
6770 static inline unsigned long __must_check
6771 copy_to_user(void __user *to, const void *from, unsigned long size)
6772 {
6773- unsigned long ret = ___copy_to_user(to, from, size);
6774+ unsigned long ret;
6775
6776+ if ((long)size < 0 || size > INT_MAX)
6777+ return size;
6778+
6779+ if (!__builtin_constant_p(size))
6780+ check_object_size(from, size, true);
6781+
6782+ ret = ___copy_to_user(to, from, size);
6783 if (unlikely(ret))
6784 ret = copy_to_user_fixup(to, from, size);
6785 return ret;
6786diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
6787index 6cf591b..b49e65a 100644
6788--- a/arch/sparc/kernel/Makefile
6789+++ b/arch/sparc/kernel/Makefile
6790@@ -3,7 +3,7 @@
6791 #
6792
6793 asflags-y := -ansi
6794-ccflags-y := -Werror
6795+#ccflags-y := -Werror
6796
6797 extra-y := head_$(BITS).o
6798
6799diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
6800index 487bffb..955a925 100644
6801--- a/arch/sparc/kernel/process_32.c
6802+++ b/arch/sparc/kernel/process_32.c
6803@@ -126,14 +126,14 @@ void show_regs(struct pt_regs *r)
6804
6805 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
6806 r->psr, r->pc, r->npc, r->y, print_tainted());
6807- printk("PC: <%pS>\n", (void *) r->pc);
6808+ printk("PC: <%pA>\n", (void *) r->pc);
6809 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
6810 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
6811 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
6812 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
6813 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
6814 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
6815- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
6816+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
6817
6818 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
6819 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
6820@@ -168,7 +168,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
6821 rw = (struct reg_window32 *) fp;
6822 pc = rw->ins[7];
6823 printk("[%08lx : ", pc);
6824- printk("%pS ] ", (void *) pc);
6825+ printk("%pA ] ", (void *) pc);
6826 fp = rw->ins[6];
6827 } while (++count < 16);
6828 printk("\n");
6829diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
6830index c6e0c29..052832b 100644
6831--- a/arch/sparc/kernel/process_64.c
6832+++ b/arch/sparc/kernel/process_64.c
6833@@ -181,14 +181,14 @@ static void show_regwindow(struct pt_regs *regs)
6834 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
6835 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
6836 if (regs->tstate & TSTATE_PRIV)
6837- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
6838+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
6839 }
6840
6841 void show_regs(struct pt_regs *regs)
6842 {
6843 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
6844 regs->tpc, regs->tnpc, regs->y, print_tainted());
6845- printk("TPC: <%pS>\n", (void *) regs->tpc);
6846+ printk("TPC: <%pA>\n", (void *) regs->tpc);
6847 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
6848 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
6849 regs->u_regs[3]);
6850@@ -201,7 +201,7 @@ void show_regs(struct pt_regs *regs)
6851 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
6852 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
6853 regs->u_regs[15]);
6854- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
6855+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
6856 show_regwindow(regs);
6857 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
6858 }
6859@@ -290,7 +290,7 @@ void arch_trigger_all_cpu_backtrace(void)
6860 ((tp && tp->task) ? tp->task->pid : -1));
6861
6862 if (gp->tstate & TSTATE_PRIV) {
6863- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
6864+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
6865 (void *) gp->tpc,
6866 (void *) gp->o7,
6867 (void *) gp->i7,
6868diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
6869index 7ff45e4..a58f271 100644
6870--- a/arch/sparc/kernel/ptrace_64.c
6871+++ b/arch/sparc/kernel/ptrace_64.c
6872@@ -1057,6 +1057,10 @@ long arch_ptrace(struct task_struct *child, long request,
6873 return ret;
6874 }
6875
6876+#ifdef CONFIG_GRKERNSEC_SETXID
6877+extern void gr_delayed_cred_worker(void);
6878+#endif
6879+
6880 asmlinkage int syscall_trace_enter(struct pt_regs *regs)
6881 {
6882 int ret = 0;
6883@@ -1064,6 +1068,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
6884 /* do the secure computing check first */
6885 secure_computing_strict(regs->u_regs[UREG_G1]);
6886
6887+#ifdef CONFIG_GRKERNSEC_SETXID
6888+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
6889+ gr_delayed_cred_worker();
6890+#endif
6891+
6892 if (test_thread_flag(TIF_SYSCALL_TRACE))
6893 ret = tracehook_report_syscall_entry(regs);
6894
6895@@ -1084,6 +1093,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
6896
6897 asmlinkage void syscall_trace_leave(struct pt_regs *regs)
6898 {
6899+#ifdef CONFIG_GRKERNSEC_SETXID
6900+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
6901+ gr_delayed_cred_worker();
6902+#endif
6903+
6904 audit_syscall_exit(regs);
6905
6906 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
6907diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
6908index 0c9b31b..55a8ba6 100644
6909--- a/arch/sparc/kernel/sys_sparc_32.c
6910+++ b/arch/sparc/kernel/sys_sparc_32.c
6911@@ -39,6 +39,7 @@ asmlinkage unsigned long sys_getpagesize(void)
6912 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
6913 {
6914 struct vm_area_struct * vmm;
6915+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
6916
6917 if (flags & MAP_FIXED) {
6918 /* We do not accept a shared mapping if it would violate
6919@@ -54,7 +55,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
6920 if (len > TASK_SIZE - PAGE_SIZE)
6921 return -ENOMEM;
6922 if (!addr)
6923- addr = TASK_UNMAPPED_BASE;
6924+ addr = current->mm->mmap_base;
6925
6926 if (flags & MAP_SHARED)
6927 addr = COLOUR_ALIGN(addr);
6928@@ -65,7 +66,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
6929 /* At this point: (!vmm || addr < vmm->vm_end). */
6930 if (TASK_SIZE - PAGE_SIZE - len < addr)
6931 return -ENOMEM;
6932- if (!vmm || addr + len <= vmm->vm_start)
6933+ if (check_heap_stack_gap(vmm, addr, len, offset))
6934 return addr;
6935 addr = vmm->vm_end;
6936 if (flags & MAP_SHARED)
6937diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
6938index 878ef3d..f100719 100644
6939--- a/arch/sparc/kernel/sys_sparc_64.c
6940+++ b/arch/sparc/kernel/sys_sparc_64.c
6941@@ -102,12 +102,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
6942 unsigned long task_size = TASK_SIZE;
6943 unsigned long start_addr;
6944 int do_color_align;
6945+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
6946
6947 if (flags & MAP_FIXED) {
6948 /* We do not accept a shared mapping if it would violate
6949 * cache aliasing constraints.
6950 */
6951- if ((flags & MAP_SHARED) &&
6952+ if ((filp || (flags & MAP_SHARED)) &&
6953 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
6954 return -EINVAL;
6955 return addr;
6956@@ -122,6 +123,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
6957 if (filp || (flags & MAP_SHARED))
6958 do_color_align = 1;
6959
6960+#ifdef CONFIG_PAX_RANDMMAP
6961+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
6962+#endif
6963+
6964 if (addr) {
6965 if (do_color_align)
6966 addr = COLOUR_ALIGN(addr, pgoff);
6967@@ -129,15 +134,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
6968 addr = PAGE_ALIGN(addr);
6969
6970 vma = find_vma(mm, addr);
6971- if (task_size - len >= addr &&
6972- (!vma || addr + len <= vma->vm_start))
6973+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
6974 return addr;
6975 }
6976
6977 if (len > mm->cached_hole_size) {
6978- start_addr = addr = mm->free_area_cache;
6979+ start_addr = addr = mm->free_area_cache;
6980 } else {
6981- start_addr = addr = TASK_UNMAPPED_BASE;
6982+ start_addr = addr = mm->mmap_base;
6983 mm->cached_hole_size = 0;
6984 }
6985
6986@@ -157,14 +161,14 @@ full_search:
6987 vma = find_vma(mm, VA_EXCLUDE_END);
6988 }
6989 if (unlikely(task_size < addr)) {
6990- if (start_addr != TASK_UNMAPPED_BASE) {
6991- start_addr = addr = TASK_UNMAPPED_BASE;
6992+ if (start_addr != mm->mmap_base) {
6993+ start_addr = addr = mm->mmap_base;
6994 mm->cached_hole_size = 0;
6995 goto full_search;
6996 }
6997 return -ENOMEM;
6998 }
6999- if (likely(!vma || addr + len <= vma->vm_start)) {
7000+ if (likely(check_heap_stack_gap(vma, addr, len, offset))) {
7001 /*
7002 * Remember the place where we stopped the search:
7003 */
7004@@ -190,6 +194,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7005 unsigned long task_size = STACK_TOP32;
7006 unsigned long addr = addr0;
7007 int do_color_align;
7008+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
7009
7010 /* This should only ever run for 32-bit processes. */
7011 BUG_ON(!test_thread_flag(TIF_32BIT));
7012@@ -198,7 +203,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7013 /* We do not accept a shared mapping if it would violate
7014 * cache aliasing constraints.
7015 */
7016- if ((flags & MAP_SHARED) &&
7017+ if ((filp || (flags & MAP_SHARED)) &&
7018 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
7019 return -EINVAL;
7020 return addr;
7021@@ -219,8 +224,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7022 addr = PAGE_ALIGN(addr);
7023
7024 vma = find_vma(mm, addr);
7025- if (task_size - len >= addr &&
7026- (!vma || addr + len <= vma->vm_start))
7027+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
7028 return addr;
7029 }
7030
7031@@ -241,7 +245,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7032 /* make sure it can fit in the remaining address space */
7033 if (likely(addr > len)) {
7034 vma = find_vma(mm, addr-len);
7035- if (!vma || addr <= vma->vm_start) {
7036+ if (check_heap_stack_gap(vma, addr - len, len, offset)) {
7037 /* remember the address as a hint for next time */
7038 return (mm->free_area_cache = addr-len);
7039 }
7040@@ -250,18 +254,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7041 if (unlikely(mm->mmap_base < len))
7042 goto bottomup;
7043
7044- addr = mm->mmap_base-len;
7045- if (do_color_align)
7046- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
7047+ addr = mm->mmap_base - len;
7048
7049 do {
7050+ if (do_color_align)
7051+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
7052 /*
7053 * Lookup failure means no vma is above this address,
7054 * else if new region fits below vma->vm_start,
7055 * return with success:
7056 */
7057 vma = find_vma(mm, addr);
7058- if (likely(!vma || addr+len <= vma->vm_start)) {
7059+ if (likely(check_heap_stack_gap(vma, addr, len, offset))) {
7060 /* remember the address as a hint for next time */
7061 return (mm->free_area_cache = addr);
7062 }
7063@@ -271,10 +275,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7064 mm->cached_hole_size = vma->vm_start - addr;
7065
7066 /* try just below the current vma->vm_start */
7067- addr = vma->vm_start-len;
7068- if (do_color_align)
7069- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
7070- } while (likely(len < vma->vm_start));
7071+ addr = skip_heap_stack_gap(vma, len, offset);
7072+ } while (!IS_ERR_VALUE(addr));
7073
7074 bottomup:
7075 /*
7076@@ -348,6 +350,10 @@ static unsigned long mmap_rnd(void)
7077 {
7078 unsigned long rnd = 0UL;
7079
7080+#ifdef CONFIG_PAX_RANDMMAP
7081+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
7082+#endif
7083+
7084 if (current->flags & PF_RANDOMIZE) {
7085 unsigned long val = get_random_int();
7086 if (test_thread_flag(TIF_32BIT))
7087@@ -373,6 +379,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7088 gap == RLIM_INFINITY ||
7089 sysctl_legacy_va_layout) {
7090 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
7091+
7092+#ifdef CONFIG_PAX_RANDMMAP
7093+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7094+ mm->mmap_base += mm->delta_mmap;
7095+#endif
7096+
7097 mm->get_unmapped_area = arch_get_unmapped_area;
7098 mm->unmap_area = arch_unmap_area;
7099 } else {
7100@@ -385,6 +397,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7101 gap = (task_size / 6 * 5);
7102
7103 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
7104+
7105+#ifdef CONFIG_PAX_RANDMMAP
7106+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7107+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7108+#endif
7109+
7110 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
7111 mm->unmap_area = arch_unmap_area_topdown;
7112 }
7113diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
7114index bf23477..b7425a6 100644
7115--- a/arch/sparc/kernel/syscalls.S
7116+++ b/arch/sparc/kernel/syscalls.S
7117@@ -62,7 +62,7 @@ sys32_rt_sigreturn:
7118 #endif
7119 .align 32
7120 1: ldx [%g6 + TI_FLAGS], %l5
7121- andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
7122+ andcc %l5, _TIF_WORK_SYSCALL, %g0
7123 be,pt %icc, rtrap
7124 nop
7125 call syscall_trace_leave
7126@@ -189,7 +189,7 @@ linux_sparc_syscall32:
7127
7128 srl %i5, 0, %o5 ! IEU1
7129 srl %i2, 0, %o2 ! IEU0 Group
7130- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
7131+ andcc %l0, _TIF_WORK_SYSCALL, %g0
7132 bne,pn %icc, linux_syscall_trace32 ! CTI
7133 mov %i0, %l5 ! IEU1
7134 call %l7 ! CTI Group brk forced
7135@@ -212,7 +212,7 @@ linux_sparc_syscall:
7136
7137 mov %i3, %o3 ! IEU1
7138 mov %i4, %o4 ! IEU0 Group
7139- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
7140+ andcc %l0, _TIF_WORK_SYSCALL, %g0
7141 bne,pn %icc, linux_syscall_trace ! CTI Group
7142 mov %i0, %l5 ! IEU0
7143 2: call %l7 ! CTI Group brk forced
7144@@ -228,7 +228,7 @@ ret_sys_call:
7145
7146 cmp %o0, -ERESTART_RESTARTBLOCK
7147 bgeu,pn %xcc, 1f
7148- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
7149+ andcc %l0, _TIF_WORK_SYSCALL, %g0
7150 ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
7151
7152 2:
7153diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
7154index a5785ea..405c5f7 100644
7155--- a/arch/sparc/kernel/traps_32.c
7156+++ b/arch/sparc/kernel/traps_32.c
7157@@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
7158 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
7159 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
7160
7161+extern void gr_handle_kernel_exploit(void);
7162+
7163 void die_if_kernel(char *str, struct pt_regs *regs)
7164 {
7165 static int die_counter;
7166@@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
7167 count++ < 30 &&
7168 (((unsigned long) rw) >= PAGE_OFFSET) &&
7169 !(((unsigned long) rw) & 0x7)) {
7170- printk("Caller[%08lx]: %pS\n", rw->ins[7],
7171+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
7172 (void *) rw->ins[7]);
7173 rw = (struct reg_window32 *)rw->ins[6];
7174 }
7175 }
7176 printk("Instruction DUMP:");
7177 instruction_dump ((unsigned long *) regs->pc);
7178- if(regs->psr & PSR_PS)
7179+ if(regs->psr & PSR_PS) {
7180+ gr_handle_kernel_exploit();
7181 do_exit(SIGKILL);
7182+ }
7183 do_exit(SIGSEGV);
7184 }
7185
7186diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
7187index b66a779..8e8d66c 100644
7188--- a/arch/sparc/kernel/traps_64.c
7189+++ b/arch/sparc/kernel/traps_64.c
7190@@ -76,7 +76,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
7191 i + 1,
7192 p->trapstack[i].tstate, p->trapstack[i].tpc,
7193 p->trapstack[i].tnpc, p->trapstack[i].tt);
7194- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
7195+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
7196 }
7197 }
7198
7199@@ -96,6 +96,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
7200
7201 lvl -= 0x100;
7202 if (regs->tstate & TSTATE_PRIV) {
7203+
7204+#ifdef CONFIG_PAX_REFCOUNT
7205+ if (lvl == 6)
7206+ pax_report_refcount_overflow(regs);
7207+#endif
7208+
7209 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
7210 die_if_kernel(buffer, regs);
7211 }
7212@@ -114,11 +120,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
7213 void bad_trap_tl1(struct pt_regs *regs, long lvl)
7214 {
7215 char buffer[32];
7216-
7217+
7218 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
7219 0, lvl, SIGTRAP) == NOTIFY_STOP)
7220 return;
7221
7222+#ifdef CONFIG_PAX_REFCOUNT
7223+ if (lvl == 6)
7224+ pax_report_refcount_overflow(regs);
7225+#endif
7226+
7227 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
7228
7229 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
7230@@ -1142,7 +1153,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
7231 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
7232 printk("%s" "ERROR(%d): ",
7233 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
7234- printk("TPC<%pS>\n", (void *) regs->tpc);
7235+ printk("TPC<%pA>\n", (void *) regs->tpc);
7236 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
7237 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
7238 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
7239@@ -1749,7 +1760,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
7240 smp_processor_id(),
7241 (type & 0x1) ? 'I' : 'D',
7242 regs->tpc);
7243- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
7244+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
7245 panic("Irrecoverable Cheetah+ parity error.");
7246 }
7247
7248@@ -1757,7 +1768,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
7249 smp_processor_id(),
7250 (type & 0x1) ? 'I' : 'D',
7251 regs->tpc);
7252- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
7253+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
7254 }
7255
7256 struct sun4v_error_entry {
7257@@ -2104,9 +2115,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
7258
7259 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
7260 regs->tpc, tl);
7261- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
7262+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
7263 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
7264- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
7265+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
7266 (void *) regs->u_regs[UREG_I7]);
7267 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
7268 "pte[%lx] error[%lx]\n",
7269@@ -2128,9 +2139,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
7270
7271 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
7272 regs->tpc, tl);
7273- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
7274+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
7275 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
7276- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
7277+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
7278 (void *) regs->u_regs[UREG_I7]);
7279 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
7280 "pte[%lx] error[%lx]\n",
7281@@ -2336,13 +2347,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
7282 fp = (unsigned long)sf->fp + STACK_BIAS;
7283 }
7284
7285- printk(" [%016lx] %pS\n", pc, (void *) pc);
7286+ printk(" [%016lx] %pA\n", pc, (void *) pc);
7287 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
7288 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
7289 int index = tsk->curr_ret_stack;
7290 if (tsk->ret_stack && index >= graph) {
7291 pc = tsk->ret_stack[index - graph].ret;
7292- printk(" [%016lx] %pS\n", pc, (void *) pc);
7293+ printk(" [%016lx] %pA\n", pc, (void *) pc);
7294 graph++;
7295 }
7296 }
7297@@ -2367,6 +2378,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
7298 return (struct reg_window *) (fp + STACK_BIAS);
7299 }
7300
7301+extern void gr_handle_kernel_exploit(void);
7302+
7303 void die_if_kernel(char *str, struct pt_regs *regs)
7304 {
7305 static int die_counter;
7306@@ -2395,7 +2408,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
7307 while (rw &&
7308 count++ < 30 &&
7309 kstack_valid(tp, (unsigned long) rw)) {
7310- printk("Caller[%016lx]: %pS\n", rw->ins[7],
7311+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
7312 (void *) rw->ins[7]);
7313
7314 rw = kernel_stack_up(rw);
7315@@ -2408,8 +2421,10 @@ void die_if_kernel(char *str, struct pt_regs *regs)
7316 }
7317 user_instruction_dump ((unsigned int __user *) regs->tpc);
7318 }
7319- if (regs->tstate & TSTATE_PRIV)
7320+ if (regs->tstate & TSTATE_PRIV) {
7321+ gr_handle_kernel_exploit();
7322 do_exit(SIGKILL);
7323+ }
7324 do_exit(SIGSEGV);
7325 }
7326 EXPORT_SYMBOL(die_if_kernel);
7327diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
7328index 8201c25e..072a2a7 100644
7329--- a/arch/sparc/kernel/unaligned_64.c
7330+++ b/arch/sparc/kernel/unaligned_64.c
7331@@ -286,7 +286,7 @@ static void log_unaligned(struct pt_regs *regs)
7332 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
7333
7334 if (__ratelimit(&ratelimit)) {
7335- printk("Kernel unaligned access at TPC[%lx] %pS\n",
7336+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
7337 regs->tpc, (void *) regs->tpc);
7338 }
7339 }
7340diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
7341index 8410065f2..4fd4ca22 100644
7342--- a/arch/sparc/lib/Makefile
7343+++ b/arch/sparc/lib/Makefile
7344@@ -2,7 +2,7 @@
7345 #
7346
7347 asflags-y := -ansi -DST_DIV0=0x02
7348-ccflags-y := -Werror
7349+#ccflags-y := -Werror
7350
7351 lib-$(CONFIG_SPARC32) += ashrdi3.o
7352 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
7353diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
7354index 85c233d..68500e0 100644
7355--- a/arch/sparc/lib/atomic_64.S
7356+++ b/arch/sparc/lib/atomic_64.S
7357@@ -17,7 +17,12 @@
7358 ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
7359 BACKOFF_SETUP(%o2)
7360 1: lduw [%o1], %g1
7361- add %g1, %o0, %g7
7362+ addcc %g1, %o0, %g7
7363+
7364+#ifdef CONFIG_PAX_REFCOUNT
7365+ tvs %icc, 6
7366+#endif
7367+
7368 cas [%o1], %g1, %g7
7369 cmp %g1, %g7
7370 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
7371@@ -27,10 +32,28 @@ ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
7372 2: BACKOFF_SPIN(%o2, %o3, 1b)
7373 ENDPROC(atomic_add)
7374
7375+ENTRY(atomic_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
7376+ BACKOFF_SETUP(%o2)
7377+1: lduw [%o1], %g1
7378+ add %g1, %o0, %g7
7379+ cas [%o1], %g1, %g7
7380+ cmp %g1, %g7
7381+ bne,pn %icc, 2f
7382+ nop
7383+ retl
7384+ nop
7385+2: BACKOFF_SPIN(%o2, %o3, 1b)
7386+ENDPROC(atomic_add_unchecked)
7387+
7388 ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
7389 BACKOFF_SETUP(%o2)
7390 1: lduw [%o1], %g1
7391- sub %g1, %o0, %g7
7392+ subcc %g1, %o0, %g7
7393+
7394+#ifdef CONFIG_PAX_REFCOUNT
7395+ tvs %icc, 6
7396+#endif
7397+
7398 cas [%o1], %g1, %g7
7399 cmp %g1, %g7
7400 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
7401@@ -40,10 +63,28 @@ ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
7402 2: BACKOFF_SPIN(%o2, %o3, 1b)
7403 ENDPROC(atomic_sub)
7404
7405+ENTRY(atomic_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
7406+ BACKOFF_SETUP(%o2)
7407+1: lduw [%o1], %g1
7408+ sub %g1, %o0, %g7
7409+ cas [%o1], %g1, %g7
7410+ cmp %g1, %g7
7411+ bne,pn %icc, 2f
7412+ nop
7413+ retl
7414+ nop
7415+2: BACKOFF_SPIN(%o2, %o3, 1b)
7416+ENDPROC(atomic_sub_unchecked)
7417+
7418 ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
7419 BACKOFF_SETUP(%o2)
7420 1: lduw [%o1], %g1
7421- add %g1, %o0, %g7
7422+ addcc %g1, %o0, %g7
7423+
7424+#ifdef CONFIG_PAX_REFCOUNT
7425+ tvs %icc, 6
7426+#endif
7427+
7428 cas [%o1], %g1, %g7
7429 cmp %g1, %g7
7430 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
7431@@ -53,10 +94,29 @@ ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
7432 2: BACKOFF_SPIN(%o2, %o3, 1b)
7433 ENDPROC(atomic_add_ret)
7434
7435+ENTRY(atomic_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
7436+ BACKOFF_SETUP(%o2)
7437+1: lduw [%o1], %g1
7438+ addcc %g1, %o0, %g7
7439+ cas [%o1], %g1, %g7
7440+ cmp %g1, %g7
7441+ bne,pn %icc, 2f
7442+ add %g7, %o0, %g7
7443+ sra %g7, 0, %o0
7444+ retl
7445+ nop
7446+2: BACKOFF_SPIN(%o2, %o3, 1b)
7447+ENDPROC(atomic_add_ret_unchecked)
7448+
7449 ENTRY(atomic_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
7450 BACKOFF_SETUP(%o2)
7451 1: lduw [%o1], %g1
7452- sub %g1, %o0, %g7
7453+ subcc %g1, %o0, %g7
7454+
7455+#ifdef CONFIG_PAX_REFCOUNT
7456+ tvs %icc, 6
7457+#endif
7458+
7459 cas [%o1], %g1, %g7
7460 cmp %g1, %g7
7461 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
7462@@ -69,7 +129,12 @@ ENDPROC(atomic_sub_ret)
7463 ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
7464 BACKOFF_SETUP(%o2)
7465 1: ldx [%o1], %g1
7466- add %g1, %o0, %g7
7467+ addcc %g1, %o0, %g7
7468+
7469+#ifdef CONFIG_PAX_REFCOUNT
7470+ tvs %xcc, 6
7471+#endif
7472+
7473 casx [%o1], %g1, %g7
7474 cmp %g1, %g7
7475 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
7476@@ -79,10 +144,28 @@ ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
7477 2: BACKOFF_SPIN(%o2, %o3, 1b)
7478 ENDPROC(atomic64_add)
7479
7480+ENTRY(atomic64_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
7481+ BACKOFF_SETUP(%o2)
7482+1: ldx [%o1], %g1
7483+ addcc %g1, %o0, %g7
7484+ casx [%o1], %g1, %g7
7485+ cmp %g1, %g7
7486+ bne,pn %xcc, 2f
7487+ nop
7488+ retl
7489+ nop
7490+2: BACKOFF_SPIN(%o2, %o3, 1b)
7491+ENDPROC(atomic64_add_unchecked)
7492+
7493 ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
7494 BACKOFF_SETUP(%o2)
7495 1: ldx [%o1], %g1
7496- sub %g1, %o0, %g7
7497+ subcc %g1, %o0, %g7
7498+
7499+#ifdef CONFIG_PAX_REFCOUNT
7500+ tvs %xcc, 6
7501+#endif
7502+
7503 casx [%o1], %g1, %g7
7504 cmp %g1, %g7
7505 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
7506@@ -92,10 +175,28 @@ ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
7507 2: BACKOFF_SPIN(%o2, %o3, 1b)
7508 ENDPROC(atomic64_sub)
7509
7510+ENTRY(atomic64_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
7511+ BACKOFF_SETUP(%o2)
7512+1: ldx [%o1], %g1
7513+ subcc %g1, %o0, %g7
7514+ casx [%o1], %g1, %g7
7515+ cmp %g1, %g7
7516+ bne,pn %xcc, 2f
7517+ nop
7518+ retl
7519+ nop
7520+2: BACKOFF_SPIN(%o2, %o3, 1b)
7521+ENDPROC(atomic64_sub_unchecked)
7522+
7523 ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
7524 BACKOFF_SETUP(%o2)
7525 1: ldx [%o1], %g1
7526- add %g1, %o0, %g7
7527+ addcc %g1, %o0, %g7
7528+
7529+#ifdef CONFIG_PAX_REFCOUNT
7530+ tvs %xcc, 6
7531+#endif
7532+
7533 casx [%o1], %g1, %g7
7534 cmp %g1, %g7
7535 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
7536@@ -105,10 +206,29 @@ ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
7537 2: BACKOFF_SPIN(%o2, %o3, 1b)
7538 ENDPROC(atomic64_add_ret)
7539
7540+ENTRY(atomic64_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
7541+ BACKOFF_SETUP(%o2)
7542+1: ldx [%o1], %g1
7543+ addcc %g1, %o0, %g7
7544+ casx [%o1], %g1, %g7
7545+ cmp %g1, %g7
7546+ bne,pn %xcc, 2f
7547+ add %g7, %o0, %g7
7548+ mov %g7, %o0
7549+ retl
7550+ nop
7551+2: BACKOFF_SPIN(%o2, %o3, 1b)
7552+ENDPROC(atomic64_add_ret_unchecked)
7553+
7554 ENTRY(atomic64_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
7555 BACKOFF_SETUP(%o2)
7556 1: ldx [%o1], %g1
7557- sub %g1, %o0, %g7
7558+ subcc %g1, %o0, %g7
7559+
7560+#ifdef CONFIG_PAX_REFCOUNT
7561+ tvs %xcc, 6
7562+#endif
7563+
7564 casx [%o1], %g1, %g7
7565 cmp %g1, %g7
7566 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
7567diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
7568index 0c4e35e..745d3e4 100644
7569--- a/arch/sparc/lib/ksyms.c
7570+++ b/arch/sparc/lib/ksyms.c
7571@@ -109,12 +109,18 @@ EXPORT_SYMBOL(__downgrade_write);
7572
7573 /* Atomic counter implementation. */
7574 EXPORT_SYMBOL(atomic_add);
7575+EXPORT_SYMBOL(atomic_add_unchecked);
7576 EXPORT_SYMBOL(atomic_add_ret);
7577+EXPORT_SYMBOL(atomic_add_ret_unchecked);
7578 EXPORT_SYMBOL(atomic_sub);
7579+EXPORT_SYMBOL(atomic_sub_unchecked);
7580 EXPORT_SYMBOL(atomic_sub_ret);
7581 EXPORT_SYMBOL(atomic64_add);
7582+EXPORT_SYMBOL(atomic64_add_unchecked);
7583 EXPORT_SYMBOL(atomic64_add_ret);
7584+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
7585 EXPORT_SYMBOL(atomic64_sub);
7586+EXPORT_SYMBOL(atomic64_sub_unchecked);
7587 EXPORT_SYMBOL(atomic64_sub_ret);
7588 EXPORT_SYMBOL(atomic64_dec_if_positive);
7589
7590diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
7591index 30c3ecc..736f015 100644
7592--- a/arch/sparc/mm/Makefile
7593+++ b/arch/sparc/mm/Makefile
7594@@ -2,7 +2,7 @@
7595 #
7596
7597 asflags-y := -ansi
7598-ccflags-y := -Werror
7599+#ccflags-y := -Werror
7600
7601 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
7602 obj-y += fault_$(BITS).o
7603diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
7604index e98bfda..ea8d221 100644
7605--- a/arch/sparc/mm/fault_32.c
7606+++ b/arch/sparc/mm/fault_32.c
7607@@ -21,6 +21,9 @@
7608 #include <linux/perf_event.h>
7609 #include <linux/interrupt.h>
7610 #include <linux/kdebug.h>
7611+#include <linux/slab.h>
7612+#include <linux/pagemap.h>
7613+#include <linux/compiler.h>
7614
7615 #include <asm/page.h>
7616 #include <asm/pgtable.h>
7617@@ -159,6 +162,277 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
7618 return safe_compute_effective_address(regs, insn);
7619 }
7620
7621+#ifdef CONFIG_PAX_PAGEEXEC
7622+#ifdef CONFIG_PAX_DLRESOLVE
7623+static void pax_emuplt_close(struct vm_area_struct *vma)
7624+{
7625+ vma->vm_mm->call_dl_resolve = 0UL;
7626+}
7627+
7628+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
7629+{
7630+ unsigned int *kaddr;
7631+
7632+ vmf->page = alloc_page(GFP_HIGHUSER);
7633+ if (!vmf->page)
7634+ return VM_FAULT_OOM;
7635+
7636+ kaddr = kmap(vmf->page);
7637+ memset(kaddr, 0, PAGE_SIZE);
7638+ kaddr[0] = 0x9DE3BFA8U; /* save */
7639+ flush_dcache_page(vmf->page);
7640+ kunmap(vmf->page);
7641+ return VM_FAULT_MAJOR;
7642+}
7643+
7644+static const struct vm_operations_struct pax_vm_ops = {
7645+ .close = pax_emuplt_close,
7646+ .fault = pax_emuplt_fault
7647+};
7648+
7649+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
7650+{
7651+ int ret;
7652+
7653+ INIT_LIST_HEAD(&vma->anon_vma_chain);
7654+ vma->vm_mm = current->mm;
7655+ vma->vm_start = addr;
7656+ vma->vm_end = addr + PAGE_SIZE;
7657+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
7658+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
7659+ vma->vm_ops = &pax_vm_ops;
7660+
7661+ ret = insert_vm_struct(current->mm, vma);
7662+ if (ret)
7663+ return ret;
7664+
7665+ ++current->mm->total_vm;
7666+ return 0;
7667+}
7668+#endif
7669+
7670+/*
7671+ * PaX: decide what to do with offenders (regs->pc = fault address)
7672+ *
7673+ * returns 1 when task should be killed
7674+ * 2 when patched PLT trampoline was detected
7675+ * 3 when unpatched PLT trampoline was detected
7676+ */
7677+static int pax_handle_fetch_fault(struct pt_regs *regs)
7678+{
7679+
7680+#ifdef CONFIG_PAX_EMUPLT
7681+ int err;
7682+
7683+ do { /* PaX: patched PLT emulation #1 */
7684+ unsigned int sethi1, sethi2, jmpl;
7685+
7686+ err = get_user(sethi1, (unsigned int *)regs->pc);
7687+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
7688+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
7689+
7690+ if (err)
7691+ break;
7692+
7693+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
7694+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
7695+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
7696+ {
7697+ unsigned int addr;
7698+
7699+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
7700+ addr = regs->u_regs[UREG_G1];
7701+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
7702+ regs->pc = addr;
7703+ regs->npc = addr+4;
7704+ return 2;
7705+ }
7706+ } while (0);
7707+
7708+ do { /* PaX: patched PLT emulation #2 */
7709+ unsigned int ba;
7710+
7711+ err = get_user(ba, (unsigned int *)regs->pc);
7712+
7713+ if (err)
7714+ break;
7715+
7716+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
7717+ unsigned int addr;
7718+
7719+ if ((ba & 0xFFC00000U) == 0x30800000U)
7720+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
7721+ else
7722+ addr = regs->pc + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
7723+ regs->pc = addr;
7724+ regs->npc = addr+4;
7725+ return 2;
7726+ }
7727+ } while (0);
7728+
7729+ do { /* PaX: patched PLT emulation #3 */
7730+ unsigned int sethi, bajmpl, nop;
7731+
7732+ err = get_user(sethi, (unsigned int *)regs->pc);
7733+ err |= get_user(bajmpl, (unsigned int *)(regs->pc+4));
7734+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
7735+
7736+ if (err)
7737+ break;
7738+
7739+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
7740+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
7741+ nop == 0x01000000U)
7742+ {
7743+ unsigned int addr;
7744+
7745+ addr = (sethi & 0x003FFFFFU) << 10;
7746+ regs->u_regs[UREG_G1] = addr;
7747+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
7748+ addr += (((bajmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
7749+ else
7750+ addr = regs->pc + ((((bajmpl | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
7751+ regs->pc = addr;
7752+ regs->npc = addr+4;
7753+ return 2;
7754+ }
7755+ } while (0);
7756+
7757+ do { /* PaX: unpatched PLT emulation step 1 */
7758+ unsigned int sethi, ba, nop;
7759+
7760+ err = get_user(sethi, (unsigned int *)regs->pc);
7761+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
7762+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
7763+
7764+ if (err)
7765+ break;
7766+
7767+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
7768+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
7769+ nop == 0x01000000U)
7770+ {
7771+ unsigned int addr, save, call;
7772+
7773+ if ((ba & 0xFFC00000U) == 0x30800000U)
7774+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
7775+ else
7776+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
7777+
7778+ err = get_user(save, (unsigned int *)addr);
7779+ err |= get_user(call, (unsigned int *)(addr+4));
7780+ err |= get_user(nop, (unsigned int *)(addr+8));
7781+ if (err)
7782+ break;
7783+
7784+#ifdef CONFIG_PAX_DLRESOLVE
7785+ if (save == 0x9DE3BFA8U &&
7786+ (call & 0xC0000000U) == 0x40000000U &&
7787+ nop == 0x01000000U)
7788+ {
7789+ struct vm_area_struct *vma;
7790+ unsigned long call_dl_resolve;
7791+
7792+ down_read(&current->mm->mmap_sem);
7793+ call_dl_resolve = current->mm->call_dl_resolve;
7794+ up_read(&current->mm->mmap_sem);
7795+ if (likely(call_dl_resolve))
7796+ goto emulate;
7797+
7798+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
7799+
7800+ down_write(&current->mm->mmap_sem);
7801+ if (current->mm->call_dl_resolve) {
7802+ call_dl_resolve = current->mm->call_dl_resolve;
7803+ up_write(&current->mm->mmap_sem);
7804+ if (vma)
7805+ kmem_cache_free(vm_area_cachep, vma);
7806+ goto emulate;
7807+ }
7808+
7809+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
7810+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
7811+ up_write(&current->mm->mmap_sem);
7812+ if (vma)
7813+ kmem_cache_free(vm_area_cachep, vma);
7814+ return 1;
7815+ }
7816+
7817+ if (pax_insert_vma(vma, call_dl_resolve)) {
7818+ up_write(&current->mm->mmap_sem);
7819+ kmem_cache_free(vm_area_cachep, vma);
7820+ return 1;
7821+ }
7822+
7823+ current->mm->call_dl_resolve = call_dl_resolve;
7824+ up_write(&current->mm->mmap_sem);
7825+
7826+emulate:
7827+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7828+ regs->pc = call_dl_resolve;
7829+ regs->npc = addr+4;
7830+ return 3;
7831+ }
7832+#endif
7833+
7834+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
7835+ if ((save & 0xFFC00000U) == 0x05000000U &&
7836+ (call & 0xFFFFE000U) == 0x85C0A000U &&
7837+ nop == 0x01000000U)
7838+ {
7839+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7840+ regs->u_regs[UREG_G2] = addr + 4;
7841+ addr = (save & 0x003FFFFFU) << 10;
7842+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
7843+ regs->pc = addr;
7844+ regs->npc = addr+4;
7845+ return 3;
7846+ }
7847+ }
7848+ } while (0);
7849+
7850+ do { /* PaX: unpatched PLT emulation step 2 */
7851+ unsigned int save, call, nop;
7852+
7853+ err = get_user(save, (unsigned int *)(regs->pc-4));
7854+ err |= get_user(call, (unsigned int *)regs->pc);
7855+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
7856+ if (err)
7857+ break;
7858+
7859+ if (save == 0x9DE3BFA8U &&
7860+ (call & 0xC0000000U) == 0x40000000U &&
7861+ nop == 0x01000000U)
7862+ {
7863+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
7864+
7865+ regs->u_regs[UREG_RETPC] = regs->pc;
7866+ regs->pc = dl_resolve;
7867+ regs->npc = dl_resolve+4;
7868+ return 3;
7869+ }
7870+ } while (0);
7871+#endif
7872+
7873+ return 1;
7874+}
7875+
7876+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
7877+{
7878+ unsigned long i;
7879+
7880+ printk(KERN_ERR "PAX: bytes at PC: ");
7881+ for (i = 0; i < 8; i++) {
7882+ unsigned int c;
7883+ if (get_user(c, (unsigned int *)pc+i))
7884+ printk(KERN_CONT "???????? ");
7885+ else
7886+ printk(KERN_CONT "%08x ", c);
7887+ }
7888+ printk("\n");
7889+}
7890+#endif
7891+
7892 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
7893 int text_fault)
7894 {
7895@@ -230,6 +504,24 @@ good_area:
7896 if (!(vma->vm_flags & VM_WRITE))
7897 goto bad_area;
7898 } else {
7899+
7900+#ifdef CONFIG_PAX_PAGEEXEC
7901+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
7902+ up_read(&mm->mmap_sem);
7903+ switch (pax_handle_fetch_fault(regs)) {
7904+
7905+#ifdef CONFIG_PAX_EMUPLT
7906+ case 2:
7907+ case 3:
7908+ return;
7909+#endif
7910+
7911+ }
7912+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
7913+ do_group_exit(SIGKILL);
7914+ }
7915+#endif
7916+
7917 /* Allow reads even for write-only mappings */
7918 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
7919 goto bad_area;
7920diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
7921index 097aee7..5ca6697 100644
7922--- a/arch/sparc/mm/fault_64.c
7923+++ b/arch/sparc/mm/fault_64.c
7924@@ -21,6 +21,9 @@
7925 #include <linux/kprobes.h>
7926 #include <linux/kdebug.h>
7927 #include <linux/percpu.h>
7928+#include <linux/slab.h>
7929+#include <linux/pagemap.h>
7930+#include <linux/compiler.h>
7931
7932 #include <asm/page.h>
7933 #include <asm/pgtable.h>
7934@@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
7935 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
7936 regs->tpc);
7937 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
7938- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
7939+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
7940 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
7941 dump_stack();
7942 unhandled_fault(regs->tpc, current, regs);
7943@@ -270,6 +273,466 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
7944 show_regs(regs);
7945 }
7946
7947+#ifdef CONFIG_PAX_PAGEEXEC
7948+#ifdef CONFIG_PAX_DLRESOLVE
7949+static void pax_emuplt_close(struct vm_area_struct *vma)
7950+{
7951+ vma->vm_mm->call_dl_resolve = 0UL;
7952+}
7953+
7954+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
7955+{
7956+ unsigned int *kaddr;
7957+
7958+ vmf->page = alloc_page(GFP_HIGHUSER);
7959+ if (!vmf->page)
7960+ return VM_FAULT_OOM;
7961+
7962+ kaddr = kmap(vmf->page);
7963+ memset(kaddr, 0, PAGE_SIZE);
7964+ kaddr[0] = 0x9DE3BFA8U; /* save */
7965+ flush_dcache_page(vmf->page);
7966+ kunmap(vmf->page);
7967+ return VM_FAULT_MAJOR;
7968+}
7969+
7970+static const struct vm_operations_struct pax_vm_ops = {
7971+ .close = pax_emuplt_close,
7972+ .fault = pax_emuplt_fault
7973+};
7974+
7975+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
7976+{
7977+ int ret;
7978+
7979+ INIT_LIST_HEAD(&vma->anon_vma_chain);
7980+ vma->vm_mm = current->mm;
7981+ vma->vm_start = addr;
7982+ vma->vm_end = addr + PAGE_SIZE;
7983+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
7984+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
7985+ vma->vm_ops = &pax_vm_ops;
7986+
7987+ ret = insert_vm_struct(current->mm, vma);
7988+ if (ret)
7989+ return ret;
7990+
7991+ ++current->mm->total_vm;
7992+ return 0;
7993+}
7994+#endif
7995+
7996+/*
7997+ * PaX: decide what to do with offenders (regs->tpc = fault address)
7998+ *
7999+ * returns 1 when task should be killed
8000+ * 2 when patched PLT trampoline was detected
8001+ * 3 when unpatched PLT trampoline was detected
8002+ */
8003+static int pax_handle_fetch_fault(struct pt_regs *regs)
8004+{
8005+
8006+#ifdef CONFIG_PAX_EMUPLT
8007+ int err;
8008+
8009+ do { /* PaX: patched PLT emulation #1 */
8010+ unsigned int sethi1, sethi2, jmpl;
8011+
8012+ err = get_user(sethi1, (unsigned int *)regs->tpc);
8013+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
8014+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
8015+
8016+ if (err)
8017+ break;
8018+
8019+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
8020+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
8021+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
8022+ {
8023+ unsigned long addr;
8024+
8025+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
8026+ addr = regs->u_regs[UREG_G1];
8027+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
8028+
8029+ if (test_thread_flag(TIF_32BIT))
8030+ addr &= 0xFFFFFFFFUL;
8031+
8032+ regs->tpc = addr;
8033+ regs->tnpc = addr+4;
8034+ return 2;
8035+ }
8036+ } while (0);
8037+
8038+ do { /* PaX: patched PLT emulation #2 */
8039+ unsigned int ba;
8040+
8041+ err = get_user(ba, (unsigned int *)regs->tpc);
8042+
8043+ if (err)
8044+ break;
8045+
8046+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
8047+ unsigned long addr;
8048+
8049+ if ((ba & 0xFFC00000U) == 0x30800000U)
8050+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
8051+ else
8052+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
8053+
8054+ if (test_thread_flag(TIF_32BIT))
8055+ addr &= 0xFFFFFFFFUL;
8056+
8057+ regs->tpc = addr;
8058+ regs->tnpc = addr+4;
8059+ return 2;
8060+ }
8061+ } while (0);
8062+
8063+ do { /* PaX: patched PLT emulation #3 */
8064+ unsigned int sethi, bajmpl, nop;
8065+
8066+ err = get_user(sethi, (unsigned int *)regs->tpc);
8067+ err |= get_user(bajmpl, (unsigned int *)(regs->tpc+4));
8068+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
8069+
8070+ if (err)
8071+ break;
8072+
8073+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
8074+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
8075+ nop == 0x01000000U)
8076+ {
8077+ unsigned long addr;
8078+
8079+ addr = (sethi & 0x003FFFFFU) << 10;
8080+ regs->u_regs[UREG_G1] = addr;
8081+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
8082+ addr += (((bajmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
8083+ else
8084+ addr = regs->tpc + ((((bajmpl | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
8085+
8086+ if (test_thread_flag(TIF_32BIT))
8087+ addr &= 0xFFFFFFFFUL;
8088+
8089+ regs->tpc = addr;
8090+ regs->tnpc = addr+4;
8091+ return 2;
8092+ }
8093+ } while (0);
8094+
8095+ do { /* PaX: patched PLT emulation #4 */
8096+ unsigned int sethi, mov1, call, mov2;
8097+
8098+ err = get_user(sethi, (unsigned int *)regs->tpc);
8099+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
8100+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
8101+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
8102+
8103+ if (err)
8104+ break;
8105+
8106+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
8107+ mov1 == 0x8210000FU &&
8108+ (call & 0xC0000000U) == 0x40000000U &&
8109+ mov2 == 0x9E100001U)
8110+ {
8111+ unsigned long addr;
8112+
8113+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
8114+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
8115+
8116+ if (test_thread_flag(TIF_32BIT))
8117+ addr &= 0xFFFFFFFFUL;
8118+
8119+ regs->tpc = addr;
8120+ regs->tnpc = addr+4;
8121+ return 2;
8122+ }
8123+ } while (0);
8124+
8125+ do { /* PaX: patched PLT emulation #5 */
8126+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
8127+
8128+ err = get_user(sethi, (unsigned int *)regs->tpc);
8129+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
8130+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
8131+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
8132+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
8133+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
8134+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
8135+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
8136+
8137+ if (err)
8138+ break;
8139+
8140+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
8141+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
8142+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
8143+ (or1 & 0xFFFFE000U) == 0x82106000U &&
8144+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
8145+ sllx == 0x83287020U &&
8146+ jmpl == 0x81C04005U &&
8147+ nop == 0x01000000U)
8148+ {
8149+ unsigned long addr;
8150+
8151+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
8152+ regs->u_regs[UREG_G1] <<= 32;
8153+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
8154+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
8155+ regs->tpc = addr;
8156+ regs->tnpc = addr+4;
8157+ return 2;
8158+ }
8159+ } while (0);
8160+
8161+ do { /* PaX: patched PLT emulation #6 */
8162+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
8163+
8164+ err = get_user(sethi, (unsigned int *)regs->tpc);
8165+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
8166+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
8167+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
8168+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
8169+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
8170+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
8171+
8172+ if (err)
8173+ break;
8174+
8175+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
8176+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
8177+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
8178+ sllx == 0x83287020U &&
8179+ (or & 0xFFFFE000U) == 0x8A116000U &&
8180+ jmpl == 0x81C04005U &&
8181+ nop == 0x01000000U)
8182+ {
8183+ unsigned long addr;
8184+
8185+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
8186+ regs->u_regs[UREG_G1] <<= 32;
8187+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
8188+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
8189+ regs->tpc = addr;
8190+ regs->tnpc = addr+4;
8191+ return 2;
8192+ }
8193+ } while (0);
8194+
8195+ do { /* PaX: unpatched PLT emulation step 1 */
8196+ unsigned int sethi, ba, nop;
8197+
8198+ err = get_user(sethi, (unsigned int *)regs->tpc);
8199+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
8200+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
8201+
8202+ if (err)
8203+ break;
8204+
8205+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
8206+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
8207+ nop == 0x01000000U)
8208+ {
8209+ unsigned long addr;
8210+ unsigned int save, call;
8211+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
8212+
8213+ if ((ba & 0xFFC00000U) == 0x30800000U)
8214+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
8215+ else
8216+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
8217+
8218+ if (test_thread_flag(TIF_32BIT))
8219+ addr &= 0xFFFFFFFFUL;
8220+
8221+ err = get_user(save, (unsigned int *)addr);
8222+ err |= get_user(call, (unsigned int *)(addr+4));
8223+ err |= get_user(nop, (unsigned int *)(addr+8));
8224+ if (err)
8225+ break;
8226+
8227+#ifdef CONFIG_PAX_DLRESOLVE
8228+ if (save == 0x9DE3BFA8U &&
8229+ (call & 0xC0000000U) == 0x40000000U &&
8230+ nop == 0x01000000U)
8231+ {
8232+ struct vm_area_struct *vma;
8233+ unsigned long call_dl_resolve;
8234+
8235+ down_read(&current->mm->mmap_sem);
8236+ call_dl_resolve = current->mm->call_dl_resolve;
8237+ up_read(&current->mm->mmap_sem);
8238+ if (likely(call_dl_resolve))
8239+ goto emulate;
8240+
8241+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
8242+
8243+ down_write(&current->mm->mmap_sem);
8244+ if (current->mm->call_dl_resolve) {
8245+ call_dl_resolve = current->mm->call_dl_resolve;
8246+ up_write(&current->mm->mmap_sem);
8247+ if (vma)
8248+ kmem_cache_free(vm_area_cachep, vma);
8249+ goto emulate;
8250+ }
8251+
8252+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
8253+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
8254+ up_write(&current->mm->mmap_sem);
8255+ if (vma)
8256+ kmem_cache_free(vm_area_cachep, vma);
8257+ return 1;
8258+ }
8259+
8260+ if (pax_insert_vma(vma, call_dl_resolve)) {
8261+ up_write(&current->mm->mmap_sem);
8262+ kmem_cache_free(vm_area_cachep, vma);
8263+ return 1;
8264+ }
8265+
8266+ current->mm->call_dl_resolve = call_dl_resolve;
8267+ up_write(&current->mm->mmap_sem);
8268+
8269+emulate:
8270+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
8271+ regs->tpc = call_dl_resolve;
8272+ regs->tnpc = addr+4;
8273+ return 3;
8274+ }
8275+#endif
8276+
8277+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
8278+ if ((save & 0xFFC00000U) == 0x05000000U &&
8279+ (call & 0xFFFFE000U) == 0x85C0A000U &&
8280+ nop == 0x01000000U)
8281+ {
8282+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
8283+ regs->u_regs[UREG_G2] = addr + 4;
8284+ addr = (save & 0x003FFFFFU) << 10;
8285+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
8286+
8287+ if (test_thread_flag(TIF_32BIT))
8288+ addr &= 0xFFFFFFFFUL;
8289+
8290+ regs->tpc = addr;
8291+ regs->tnpc = addr+4;
8292+ return 3;
8293+ }
8294+
8295+ /* PaX: 64-bit PLT stub */
8296+ err = get_user(sethi1, (unsigned int *)addr);
8297+ err |= get_user(sethi2, (unsigned int *)(addr+4));
8298+ err |= get_user(or1, (unsigned int *)(addr+8));
8299+ err |= get_user(or2, (unsigned int *)(addr+12));
8300+ err |= get_user(sllx, (unsigned int *)(addr+16));
8301+ err |= get_user(add, (unsigned int *)(addr+20));
8302+ err |= get_user(jmpl, (unsigned int *)(addr+24));
8303+ err |= get_user(nop, (unsigned int *)(addr+28));
8304+ if (err)
8305+ break;
8306+
8307+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
8308+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
8309+ (or1 & 0xFFFFE000U) == 0x88112000U &&
8310+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
8311+ sllx == 0x89293020U &&
8312+ add == 0x8A010005U &&
8313+ jmpl == 0x89C14000U &&
8314+ nop == 0x01000000U)
8315+ {
8316+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
8317+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
8318+ regs->u_regs[UREG_G4] <<= 32;
8319+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
8320+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
8321+ regs->u_regs[UREG_G4] = addr + 24;
8322+ addr = regs->u_regs[UREG_G5];
8323+ regs->tpc = addr;
8324+ regs->tnpc = addr+4;
8325+ return 3;
8326+ }
8327+ }
8328+ } while (0);
8329+
8330+#ifdef CONFIG_PAX_DLRESOLVE
8331+ do { /* PaX: unpatched PLT emulation step 2 */
8332+ unsigned int save, call, nop;
8333+
8334+ err = get_user(save, (unsigned int *)(regs->tpc-4));
8335+ err |= get_user(call, (unsigned int *)regs->tpc);
8336+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
8337+ if (err)
8338+ break;
8339+
8340+ if (save == 0x9DE3BFA8U &&
8341+ (call & 0xC0000000U) == 0x40000000U &&
8342+ nop == 0x01000000U)
8343+ {
8344+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
8345+
8346+ if (test_thread_flag(TIF_32BIT))
8347+ dl_resolve &= 0xFFFFFFFFUL;
8348+
8349+ regs->u_regs[UREG_RETPC] = regs->tpc;
8350+ regs->tpc = dl_resolve;
8351+ regs->tnpc = dl_resolve+4;
8352+ return 3;
8353+ }
8354+ } while (0);
8355+#endif
8356+
8357+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
8358+ unsigned int sethi, ba, nop;
8359+
8360+ err = get_user(sethi, (unsigned int *)regs->tpc);
8361+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
8362+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
8363+
8364+ if (err)
8365+ break;
8366+
8367+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
8368+ (ba & 0xFFF00000U) == 0x30600000U &&
8369+ nop == 0x01000000U)
8370+ {
8371+ unsigned long addr;
8372+
8373+ addr = (sethi & 0x003FFFFFU) << 10;
8374+ regs->u_regs[UREG_G1] = addr;
8375+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
8376+
8377+ if (test_thread_flag(TIF_32BIT))
8378+ addr &= 0xFFFFFFFFUL;
8379+
8380+ regs->tpc = addr;
8381+ regs->tnpc = addr+4;
8382+ return 2;
8383+ }
8384+ } while (0);
8385+
8386+#endif
8387+
8388+ return 1;
8389+}
8390+
8391+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
8392+{
8393+ unsigned long i;
8394+
8395+ printk(KERN_ERR "PAX: bytes at PC: ");
8396+ for (i = 0; i < 8; i++) {
8397+ unsigned int c;
8398+ if (get_user(c, (unsigned int *)pc+i))
8399+ printk(KERN_CONT "???????? ");
8400+ else
8401+ printk(KERN_CONT "%08x ", c);
8402+ }
8403+ printk("\n");
8404+}
8405+#endif
8406+
8407 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
8408 {
8409 struct mm_struct *mm = current->mm;
8410@@ -341,6 +804,29 @@ retry:
8411 if (!vma)
8412 goto bad_area;
8413
8414+#ifdef CONFIG_PAX_PAGEEXEC
8415+ /* PaX: detect ITLB misses on non-exec pages */
8416+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
8417+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
8418+ {
8419+ if (address != regs->tpc)
8420+ goto good_area;
8421+
8422+ up_read(&mm->mmap_sem);
8423+ switch (pax_handle_fetch_fault(regs)) {
8424+
8425+#ifdef CONFIG_PAX_EMUPLT
8426+ case 2:
8427+ case 3:
8428+ return;
8429+#endif
8430+
8431+ }
8432+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
8433+ do_group_exit(SIGKILL);
8434+ }
8435+#endif
8436+
8437 /* Pure DTLB misses do not tell us whether the fault causing
8438 * load/store/atomic was a write or not, it only says that there
8439 * was no match. So in such a case we (carefully) read the
8440diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
8441index f76f83d..ee0d859 100644
8442--- a/arch/sparc/mm/hugetlbpage.c
8443+++ b/arch/sparc/mm/hugetlbpage.c
8444@@ -34,6 +34,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
8445 struct vm_area_struct * vma;
8446 unsigned long task_size = TASK_SIZE;
8447 unsigned long start_addr;
8448+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
8449
8450 if (test_thread_flag(TIF_32BIT))
8451 task_size = STACK_TOP32;
8452@@ -67,7 +68,7 @@ full_search:
8453 }
8454 return -ENOMEM;
8455 }
8456- if (likely(!vma || addr + len <= vma->vm_start)) {
8457+ if (likely(check_heap_stack_gap(vma, addr, len, offset))) {
8458 /*
8459 * Remember the place where we stopped the search:
8460 */
8461@@ -90,6 +91,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8462 struct vm_area_struct *vma;
8463 struct mm_struct *mm = current->mm;
8464 unsigned long addr = addr0;
8465+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
8466
8467 /* This should only ever run for 32-bit processes. */
8468 BUG_ON(!test_thread_flag(TIF_32BIT));
8469@@ -106,7 +108,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8470 /* make sure it can fit in the remaining address space */
8471 if (likely(addr > len)) {
8472 vma = find_vma(mm, addr-len);
8473- if (!vma || addr <= vma->vm_start) {
8474+ if (check_heap_stack_gap(vma, addr - len, len, offset)) {
8475 /* remember the address as a hint for next time */
8476 return (mm->free_area_cache = addr-len);
8477 }
8478@@ -115,16 +117,17 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8479 if (unlikely(mm->mmap_base < len))
8480 goto bottomup;
8481
8482- addr = (mm->mmap_base-len) & HPAGE_MASK;
8483+ addr = mm->mmap_base - len;
8484
8485 do {
8486+ addr &= HPAGE_MASK;
8487 /*
8488 * Lookup failure means no vma is above this address,
8489 * else if new region fits below vma->vm_start,
8490 * return with success:
8491 */
8492 vma = find_vma(mm, addr);
8493- if (likely(!vma || addr+len <= vma->vm_start)) {
8494+ if (likely(check_heap_stack_gap(vma, addr, len, offset))) {
8495 /* remember the address as a hint for next time */
8496 return (mm->free_area_cache = addr);
8497 }
8498@@ -134,8 +137,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8499 mm->cached_hole_size = vma->vm_start - addr;
8500
8501 /* try just below the current vma->vm_start */
8502- addr = (vma->vm_start-len) & HPAGE_MASK;
8503- } while (likely(len < vma->vm_start));
8504+ addr = skip_heap_stack_gap(vma, len, offset);
8505+ } while (!IS_ERR_VALUE(addr));
8506
8507 bottomup:
8508 /*
8509@@ -163,6 +166,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
8510 struct mm_struct *mm = current->mm;
8511 struct vm_area_struct *vma;
8512 unsigned long task_size = TASK_SIZE;
8513+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
8514
8515 if (test_thread_flag(TIF_32BIT))
8516 task_size = STACK_TOP32;
8517@@ -181,8 +185,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
8518 if (addr) {
8519 addr = ALIGN(addr, HPAGE_SIZE);
8520 vma = find_vma(mm, addr);
8521- if (task_size - len >= addr &&
8522- (!vma || addr + len <= vma->vm_start))
8523+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
8524 return addr;
8525 }
8526 if (mm->get_unmapped_area == arch_get_unmapped_area)
8527diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
8528index f4500c6..889656c 100644
8529--- a/arch/tile/include/asm/atomic_64.h
8530+++ b/arch/tile/include/asm/atomic_64.h
8531@@ -143,6 +143,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
8532
8533 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
8534
8535+#define atomic64_read_unchecked(v) atomic64_read(v)
8536+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
8537+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
8538+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
8539+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
8540+#define atomic64_inc_unchecked(v) atomic64_inc(v)
8541+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
8542+#define atomic64_dec_unchecked(v) atomic64_dec(v)
8543+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
8544+
8545 /* Atomic dec and inc don't implement barrier, so provide them if needed. */
8546 #define smp_mb__before_atomic_dec() smp_mb()
8547 #define smp_mb__after_atomic_dec() smp_mb()
8548diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
8549index a9a5299..0fce79e 100644
8550--- a/arch/tile/include/asm/cache.h
8551+++ b/arch/tile/include/asm/cache.h
8552@@ -15,11 +15,12 @@
8553 #ifndef _ASM_TILE_CACHE_H
8554 #define _ASM_TILE_CACHE_H
8555
8556+#include <linux/const.h>
8557 #include <arch/chip.h>
8558
8559 /* bytes per L1 data cache line */
8560 #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
8561-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8562+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8563
8564 /* bytes per L2 cache line */
8565 #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
8566diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
8567index 9ab078a..d6635c2 100644
8568--- a/arch/tile/include/asm/uaccess.h
8569+++ b/arch/tile/include/asm/uaccess.h
8570@@ -403,9 +403,9 @@ static inline unsigned long __must_check copy_from_user(void *to,
8571 const void __user *from,
8572 unsigned long n)
8573 {
8574- int sz = __compiletime_object_size(to);
8575+ size_t sz = __compiletime_object_size(to);
8576
8577- if (likely(sz == -1 || sz >= n))
8578+ if (likely(sz == (size_t)-1 || sz >= n))
8579 n = _copy_from_user(to, from, n);
8580 else
8581 copy_from_user_overflow();
8582diff --git a/arch/um/Makefile b/arch/um/Makefile
8583index 133f7de..1d6f2f1 100644
8584--- a/arch/um/Makefile
8585+++ b/arch/um/Makefile
8586@@ -62,6 +62,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
8587 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
8588 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
8589
8590+ifdef CONSTIFY_PLUGIN
8591+USER_CFLAGS += -fplugin-arg-constify_plugin-no-constify
8592+endif
8593+
8594 #This will adjust *FLAGS accordingly to the platform.
8595 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
8596
8597diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
8598index 19e1bdd..3665b77 100644
8599--- a/arch/um/include/asm/cache.h
8600+++ b/arch/um/include/asm/cache.h
8601@@ -1,6 +1,7 @@
8602 #ifndef __UM_CACHE_H
8603 #define __UM_CACHE_H
8604
8605+#include <linux/const.h>
8606
8607 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
8608 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
8609@@ -12,6 +13,6 @@
8610 # define L1_CACHE_SHIFT 5
8611 #endif
8612
8613-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8614+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8615
8616 #endif
8617diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
8618index 2e0a6b1..a64d0f5 100644
8619--- a/arch/um/include/asm/kmap_types.h
8620+++ b/arch/um/include/asm/kmap_types.h
8621@@ -8,6 +8,6 @@
8622
8623 /* No more #include "asm/arch/kmap_types.h" ! */
8624
8625-#define KM_TYPE_NR 14
8626+#define KM_TYPE_NR 15
8627
8628 #endif
8629diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
8630index 5ff53d9..5850cdf 100644
8631--- a/arch/um/include/asm/page.h
8632+++ b/arch/um/include/asm/page.h
8633@@ -14,6 +14,9 @@
8634 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
8635 #define PAGE_MASK (~(PAGE_SIZE-1))
8636
8637+#define ktla_ktva(addr) (addr)
8638+#define ktva_ktla(addr) (addr)
8639+
8640 #ifndef __ASSEMBLY__
8641
8642 struct page;
8643diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
8644index 0032f92..cd151e0 100644
8645--- a/arch/um/include/asm/pgtable-3level.h
8646+++ b/arch/um/include/asm/pgtable-3level.h
8647@@ -58,6 +58,7 @@
8648 #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
8649 #define pud_populate(mm, pud, pmd) \
8650 set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
8651+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
8652
8653 #ifdef CONFIG_64BIT
8654 #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
8655diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
8656index b6d699c..df7ac1d 100644
8657--- a/arch/um/kernel/process.c
8658+++ b/arch/um/kernel/process.c
8659@@ -387,22 +387,6 @@ int singlestepping(void * t)
8660 return 2;
8661 }
8662
8663-/*
8664- * Only x86 and x86_64 have an arch_align_stack().
8665- * All other arches have "#define arch_align_stack(x) (x)"
8666- * in their asm/system.h
8667- * As this is included in UML from asm-um/system-generic.h,
8668- * we can use it to behave as the subarch does.
8669- */
8670-#ifndef arch_align_stack
8671-unsigned long arch_align_stack(unsigned long sp)
8672-{
8673- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
8674- sp -= get_random_int() % 8192;
8675- return sp & ~0xf;
8676-}
8677-#endif
8678-
8679 unsigned long get_wchan(struct task_struct *p)
8680 {
8681 unsigned long stack_page, sp, ip;
8682diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
8683index ad8f795..2c7eec6 100644
8684--- a/arch/unicore32/include/asm/cache.h
8685+++ b/arch/unicore32/include/asm/cache.h
8686@@ -12,8 +12,10 @@
8687 #ifndef __UNICORE_CACHE_H__
8688 #define __UNICORE_CACHE_H__
8689
8690-#define L1_CACHE_SHIFT (5)
8691-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8692+#include <linux/const.h>
8693+
8694+#define L1_CACHE_SHIFT 5
8695+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8696
8697 /*
8698 * Memory returned by kmalloc() may be used for DMA, so we must make
8699diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
8700index 46c3bff..b82f26b 100644
8701--- a/arch/x86/Kconfig
8702+++ b/arch/x86/Kconfig
8703@@ -241,7 +241,7 @@ config X86_HT
8704
8705 config X86_32_LAZY_GS
8706 def_bool y
8707- depends on X86_32 && !CC_STACKPROTECTOR
8708+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
8709
8710 config ARCH_HWEIGHT_CFLAGS
8711 string
8712@@ -1033,6 +1033,7 @@ config MICROCODE_OLD_INTERFACE
8713
8714 config X86_MSR
8715 tristate "/dev/cpu/*/msr - Model-specific register support"
8716+ depends on !GRKERNSEC_KMEM
8717 ---help---
8718 This device gives privileged processes access to the x86
8719 Model-Specific Registers (MSRs). It is a character device with
8720@@ -1056,7 +1057,7 @@ choice
8721
8722 config NOHIGHMEM
8723 bool "off"
8724- depends on !X86_NUMAQ
8725+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
8726 ---help---
8727 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
8728 However, the address space of 32-bit x86 processors is only 4
8729@@ -1093,7 +1094,7 @@ config NOHIGHMEM
8730
8731 config HIGHMEM4G
8732 bool "4GB"
8733- depends on !X86_NUMAQ
8734+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
8735 ---help---
8736 Select this if you have a 32-bit processor and between 1 and 4
8737 gigabytes of physical RAM.
8738@@ -1147,7 +1148,7 @@ config PAGE_OFFSET
8739 hex
8740 default 0xB0000000 if VMSPLIT_3G_OPT
8741 default 0x80000000 if VMSPLIT_2G
8742- default 0x78000000 if VMSPLIT_2G_OPT
8743+ default 0x70000000 if VMSPLIT_2G_OPT
8744 default 0x40000000 if VMSPLIT_1G
8745 default 0xC0000000
8746 depends on X86_32
8747@@ -1548,6 +1549,7 @@ config SECCOMP
8748
8749 config CC_STACKPROTECTOR
8750 bool "Enable -fstack-protector buffer overflow detection"
8751+ depends on X86_64 || !PAX_MEMORY_UDEREF
8752 ---help---
8753 This option turns on the -fstack-protector GCC feature. This
8754 feature puts, at the beginning of functions, a canary value on
8755@@ -1605,6 +1607,7 @@ config KEXEC_JUMP
8756 config PHYSICAL_START
8757 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
8758 default "0x1000000"
8759+ range 0x400000 0x40000000
8760 ---help---
8761 This gives the physical address where the kernel is loaded.
8762
8763@@ -1668,6 +1671,7 @@ config X86_NEED_RELOCS
8764 config PHYSICAL_ALIGN
8765 hex "Alignment value to which kernel should be aligned" if X86_32
8766 default "0x1000000"
8767+ range 0x400000 0x1000000 if PAX_KERNEXEC
8768 range 0x2000 0x1000000
8769 ---help---
8770 This value puts the alignment restrictions on physical address
8771@@ -1699,9 +1703,10 @@ config HOTPLUG_CPU
8772 Say N if you want to disable CPU hotplug.
8773
8774 config COMPAT_VDSO
8775- def_bool y
8776+ def_bool n
8777 prompt "Compat VDSO support"
8778 depends on X86_32 || IA32_EMULATION
8779+ depends on !PAX_PAGEEXEC && !PAX_SEGMEXEC && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
8780 ---help---
8781 Map the 32-bit VDSO to the predictable old-style address too.
8782
8783diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
8784index f3b86d0..17fd30f 100644
8785--- a/arch/x86/Kconfig.cpu
8786+++ b/arch/x86/Kconfig.cpu
8787@@ -335,7 +335,7 @@ config X86_PPRO_FENCE
8788
8789 config X86_F00F_BUG
8790 def_bool y
8791- depends on M586MMX || M586TSC || M586 || M486 || M386
8792+ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
8793
8794 config X86_INVD_BUG
8795 def_bool y
8796@@ -359,7 +359,7 @@ config X86_POPAD_OK
8797
8798 config X86_ALIGNMENT_16
8799 def_bool y
8800- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
8801+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
8802
8803 config X86_INTEL_USERCOPY
8804 def_bool y
8805@@ -405,7 +405,7 @@ config X86_CMPXCHG64
8806 # generates cmov.
8807 config X86_CMOV
8808 def_bool y
8809- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
8810+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
8811
8812 config X86_MINIMUM_CPU_FAMILY
8813 int
8814diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
8815index b322f12..652d0d9 100644
8816--- a/arch/x86/Kconfig.debug
8817+++ b/arch/x86/Kconfig.debug
8818@@ -84,7 +84,7 @@ config X86_PTDUMP
8819 config DEBUG_RODATA
8820 bool "Write protect kernel read-only data structures"
8821 default y
8822- depends on DEBUG_KERNEL
8823+ depends on DEBUG_KERNEL && BROKEN
8824 ---help---
8825 Mark the kernel read-only data as write-protected in the pagetables,
8826 in order to catch accidental (and incorrect) writes to such const
8827@@ -102,7 +102,7 @@ config DEBUG_RODATA_TEST
8828
8829 config DEBUG_SET_MODULE_RONX
8830 bool "Set loadable kernel module data as NX and text as RO"
8831- depends on MODULES
8832+ depends on MODULES && BROKEN
8833 ---help---
8834 This option helps catch unintended modifications to loadable
8835 kernel module's text and read-only data. It also prevents execution
8836@@ -294,7 +294,7 @@ config OPTIMIZE_INLINING
8837
8838 config DEBUG_STRICT_USER_COPY_CHECKS
8839 bool "Strict copy size checks"
8840- depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
8841+ depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING && !PAX_SIZE_OVERFLOW
8842 ---help---
8843 Enabling this option turns a certain set of sanity checks for user
8844 copy operations into compile time failures.
8845diff --git a/arch/x86/Makefile b/arch/x86/Makefile
8846index 05afcca..b6ecb51 100644
8847--- a/arch/x86/Makefile
8848+++ b/arch/x86/Makefile
8849@@ -50,6 +50,7 @@ else
8850 UTS_MACHINE := x86_64
8851 CHECKFLAGS += -D__x86_64__ -m64
8852
8853+ biarch := $(call cc-option,-m64)
8854 KBUILD_AFLAGS += -m64
8855 KBUILD_CFLAGS += -m64
8856
8857@@ -229,3 +230,12 @@ define archhelp
8858 echo ' FDARGS="..." arguments for the booted kernel'
8859 echo ' FDINITRD=file initrd for the booted kernel'
8860 endef
8861+
8862+define OLD_LD
8863+
8864+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
8865+*** Please upgrade your binutils to 2.18 or newer
8866+endef
8867+
8868+archprepare:
8869+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
8870diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
8871index ccce0ed..fd9da25 100644
8872--- a/arch/x86/boot/Makefile
8873+++ b/arch/x86/boot/Makefile
8874@@ -65,6 +65,9 @@ KBUILD_CFLAGS := $(USERINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
8875 $(call cc-option, -fno-stack-protector) \
8876 $(call cc-option, -mpreferred-stack-boundary=2)
8877 KBUILD_CFLAGS += $(call cc-option, -m32)
8878+ifdef CONSTIFY_PLUGIN
8879+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
8880+endif
8881 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
8882 GCOV_PROFILE := n
8883
8884diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
8885index 878e4b9..20537ab 100644
8886--- a/arch/x86/boot/bitops.h
8887+++ b/arch/x86/boot/bitops.h
8888@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
8889 u8 v;
8890 const u32 *p = (const u32 *)addr;
8891
8892- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
8893+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
8894 return v;
8895 }
8896
8897@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
8898
8899 static inline void set_bit(int nr, void *addr)
8900 {
8901- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
8902+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
8903 }
8904
8905 #endif /* BOOT_BITOPS_H */
8906diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
8907index 18997e5..83d9c67 100644
8908--- a/arch/x86/boot/boot.h
8909+++ b/arch/x86/boot/boot.h
8910@@ -85,7 +85,7 @@ static inline void io_delay(void)
8911 static inline u16 ds(void)
8912 {
8913 u16 seg;
8914- asm("movw %%ds,%0" : "=rm" (seg));
8915+ asm volatile("movw %%ds,%0" : "=rm" (seg));
8916 return seg;
8917 }
8918
8919@@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t addr)
8920 static inline int memcmp(const void *s1, const void *s2, size_t len)
8921 {
8922 u8 diff;
8923- asm("repe; cmpsb; setnz %0"
8924+ asm volatile("repe; cmpsb; setnz %0"
8925 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
8926 return diff;
8927 }
8928diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
8929index 8a84501..b2d165f 100644
8930--- a/arch/x86/boot/compressed/Makefile
8931+++ b/arch/x86/boot/compressed/Makefile
8932@@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
8933 KBUILD_CFLAGS += $(cflags-y)
8934 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
8935 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
8936+ifdef CONSTIFY_PLUGIN
8937+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
8938+endif
8939
8940 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
8941 GCOV_PROFILE := n
8942diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
8943index ccae7e2..8ac70be 100644
8944--- a/arch/x86/boot/compressed/eboot.c
8945+++ b/arch/x86/boot/compressed/eboot.c
8946@@ -144,7 +144,6 @@ again:
8947 *addr = max_addr;
8948 }
8949
8950-free_pool:
8951 efi_call_phys1(sys_table->boottime->free_pool, map);
8952
8953 fail:
8954@@ -208,7 +207,6 @@ static efi_status_t low_alloc(unsigned long size, unsigned long align,
8955 if (i == map_size / desc_size)
8956 status = EFI_NOT_FOUND;
8957
8958-free_pool:
8959 efi_call_phys1(sys_table->boottime->free_pool, map);
8960 fail:
8961 return status;
8962diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
8963index ccb2f4a..e49b20e 100644
8964--- a/arch/x86/boot/compressed/head_32.S
8965+++ b/arch/x86/boot/compressed/head_32.S
8966@@ -118,7 +118,7 @@ preferred_addr:
8967 notl %eax
8968 andl %eax, %ebx
8969 #else
8970- movl $LOAD_PHYSICAL_ADDR, %ebx
8971+ movl $____LOAD_PHYSICAL_ADDR, %ebx
8972 #endif
8973
8974 /* Target address to relocate to for decompression */
8975@@ -204,7 +204,7 @@ relocated:
8976 * and where it was actually loaded.
8977 */
8978 movl %ebp, %ebx
8979- subl $LOAD_PHYSICAL_ADDR, %ebx
8980+ subl $____LOAD_PHYSICAL_ADDR, %ebx
8981 jz 2f /* Nothing to be done if loaded at compiled addr. */
8982 /*
8983 * Process relocations.
8984@@ -212,8 +212,7 @@ relocated:
8985
8986 1: subl $4, %edi
8987 movl (%edi), %ecx
8988- testl %ecx, %ecx
8989- jz 2f
8990+ jecxz 2f
8991 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
8992 jmp 1b
8993 2:
8994diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
8995index 2c4b171..e1fa5b1 100644
8996--- a/arch/x86/boot/compressed/head_64.S
8997+++ b/arch/x86/boot/compressed/head_64.S
8998@@ -91,7 +91,7 @@ ENTRY(startup_32)
8999 notl %eax
9000 andl %eax, %ebx
9001 #else
9002- movl $LOAD_PHYSICAL_ADDR, %ebx
9003+ movl $____LOAD_PHYSICAL_ADDR, %ebx
9004 #endif
9005
9006 /* Target address to relocate to for decompression */
9007@@ -273,7 +273,7 @@ preferred_addr:
9008 notq %rax
9009 andq %rax, %rbp
9010 #else
9011- movq $LOAD_PHYSICAL_ADDR, %rbp
9012+ movq $____LOAD_PHYSICAL_ADDR, %rbp
9013 #endif
9014
9015 /* Target address to relocate to for decompression */
9016diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
9017index 88f7ff6..ed695dd 100644
9018--- a/arch/x86/boot/compressed/misc.c
9019+++ b/arch/x86/boot/compressed/misc.c
9020@@ -303,7 +303,7 @@ static void parse_elf(void *output)
9021 case PT_LOAD:
9022 #ifdef CONFIG_RELOCATABLE
9023 dest = output;
9024- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
9025+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
9026 #else
9027 dest = (void *)(phdr->p_paddr);
9028 #endif
9029@@ -352,7 +352,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
9030 error("Destination address too large");
9031 #endif
9032 #ifndef CONFIG_RELOCATABLE
9033- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
9034+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
9035 error("Wrong destination address");
9036 #endif
9037
9038diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
9039index 4d3ff03..e4972ff 100644
9040--- a/arch/x86/boot/cpucheck.c
9041+++ b/arch/x86/boot/cpucheck.c
9042@@ -74,7 +74,7 @@ static int has_fpu(void)
9043 u16 fcw = -1, fsw = -1;
9044 u32 cr0;
9045
9046- asm("movl %%cr0,%0" : "=r" (cr0));
9047+ asm volatile("movl %%cr0,%0" : "=r" (cr0));
9048 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
9049 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
9050 asm volatile("movl %0,%%cr0" : : "r" (cr0));
9051@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
9052 {
9053 u32 f0, f1;
9054
9055- asm("pushfl ; "
9056+ asm volatile("pushfl ; "
9057 "pushfl ; "
9058 "popl %0 ; "
9059 "movl %0,%1 ; "
9060@@ -115,7 +115,7 @@ static void get_flags(void)
9061 set_bit(X86_FEATURE_FPU, cpu.flags);
9062
9063 if (has_eflag(X86_EFLAGS_ID)) {
9064- asm("cpuid"
9065+ asm volatile("cpuid"
9066 : "=a" (max_intel_level),
9067 "=b" (cpu_vendor[0]),
9068 "=d" (cpu_vendor[1]),
9069@@ -124,7 +124,7 @@ static void get_flags(void)
9070
9071 if (max_intel_level >= 0x00000001 &&
9072 max_intel_level <= 0x0000ffff) {
9073- asm("cpuid"
9074+ asm volatile("cpuid"
9075 : "=a" (tfms),
9076 "=c" (cpu.flags[4]),
9077 "=d" (cpu.flags[0])
9078@@ -136,7 +136,7 @@ static void get_flags(void)
9079 cpu.model += ((tfms >> 16) & 0xf) << 4;
9080 }
9081
9082- asm("cpuid"
9083+ asm volatile("cpuid"
9084 : "=a" (max_amd_level)
9085 : "a" (0x80000000)
9086 : "ebx", "ecx", "edx");
9087@@ -144,7 +144,7 @@ static void get_flags(void)
9088 if (max_amd_level >= 0x80000001 &&
9089 max_amd_level <= 0x8000ffff) {
9090 u32 eax = 0x80000001;
9091- asm("cpuid"
9092+ asm volatile("cpuid"
9093 : "+a" (eax),
9094 "=c" (cpu.flags[6]),
9095 "=d" (cpu.flags[1])
9096@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
9097 u32 ecx = MSR_K7_HWCR;
9098 u32 eax, edx;
9099
9100- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
9101+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
9102 eax &= ~(1 << 15);
9103- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
9104+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
9105
9106 get_flags(); /* Make sure it really did something */
9107 err = check_flags();
9108@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
9109 u32 ecx = MSR_VIA_FCR;
9110 u32 eax, edx;
9111
9112- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
9113+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
9114 eax |= (1<<1)|(1<<7);
9115- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
9116+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
9117
9118 set_bit(X86_FEATURE_CX8, cpu.flags);
9119 err = check_flags();
9120@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
9121 u32 eax, edx;
9122 u32 level = 1;
9123
9124- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
9125- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
9126- asm("cpuid"
9127+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
9128+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
9129+ asm volatile("cpuid"
9130 : "+a" (level), "=d" (cpu.flags[0])
9131 : : "ecx", "ebx");
9132- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
9133+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
9134
9135 err = check_flags();
9136 }
9137diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
9138index 8c132a6..13e5c96 100644
9139--- a/arch/x86/boot/header.S
9140+++ b/arch/x86/boot/header.S
9141@@ -387,10 +387,14 @@ setup_data: .quad 0 # 64-bit physical pointer to
9142 # single linked list of
9143 # struct setup_data
9144
9145-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
9146+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
9147
9148 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
9149+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
9150+#define VO_INIT_SIZE (VO__end - VO__text - __PAGE_OFFSET - ____LOAD_PHYSICAL_ADDR)
9151+#else
9152 #define VO_INIT_SIZE (VO__end - VO__text)
9153+#endif
9154 #if ZO_INIT_SIZE > VO_INIT_SIZE
9155 #define INIT_SIZE ZO_INIT_SIZE
9156 #else
9157diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
9158index db75d07..8e6d0af 100644
9159--- a/arch/x86/boot/memory.c
9160+++ b/arch/x86/boot/memory.c
9161@@ -19,7 +19,7 @@
9162
9163 static int detect_memory_e820(void)
9164 {
9165- int count = 0;
9166+ unsigned int count = 0;
9167 struct biosregs ireg, oreg;
9168 struct e820entry *desc = boot_params.e820_map;
9169 static struct e820entry buf; /* static so it is zeroed */
9170diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
9171index 11e8c6e..fdbb1ed 100644
9172--- a/arch/x86/boot/video-vesa.c
9173+++ b/arch/x86/boot/video-vesa.c
9174@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
9175
9176 boot_params.screen_info.vesapm_seg = oreg.es;
9177 boot_params.screen_info.vesapm_off = oreg.di;
9178+ boot_params.screen_info.vesapm_size = oreg.cx;
9179 }
9180
9181 /*
9182diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
9183index 43eda28..5ab5fdb 100644
9184--- a/arch/x86/boot/video.c
9185+++ b/arch/x86/boot/video.c
9186@@ -96,7 +96,7 @@ static void store_mode_params(void)
9187 static unsigned int get_entry(void)
9188 {
9189 char entry_buf[4];
9190- int i, len = 0;
9191+ unsigned int i, len = 0;
9192 int key;
9193 unsigned int v;
9194
9195diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
9196index 5b577d5..3c1fed4 100644
9197--- a/arch/x86/crypto/aes-x86_64-asm_64.S
9198+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
9199@@ -8,6 +8,8 @@
9200 * including this sentence is retained in full.
9201 */
9202
9203+#include <asm/alternative-asm.h>
9204+
9205 .extern crypto_ft_tab
9206 .extern crypto_it_tab
9207 .extern crypto_fl_tab
9208@@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
9209 je B192; \
9210 leaq 32(r9),r9;
9211
9212+#define ret pax_force_retaddr 0, 1; ret
9213+
9214 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
9215 movq r1,r2; \
9216 movq r3,r4; \
9217diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
9218index 3470624..201259d 100644
9219--- a/arch/x86/crypto/aesni-intel_asm.S
9220+++ b/arch/x86/crypto/aesni-intel_asm.S
9221@@ -31,6 +31,7 @@
9222
9223 #include <linux/linkage.h>
9224 #include <asm/inst.h>
9225+#include <asm/alternative-asm.h>
9226
9227 #ifdef __x86_64__
9228 .data
9229@@ -1436,7 +1437,9 @@ _return_T_done_decrypt:
9230 pop %r14
9231 pop %r13
9232 pop %r12
9233+ pax_force_retaddr 0, 1
9234 ret
9235+ENDPROC(aesni_gcm_dec)
9236
9237
9238 /*****************************************************************************
9239@@ -1699,7 +1702,9 @@ _return_T_done_encrypt:
9240 pop %r14
9241 pop %r13
9242 pop %r12
9243+ pax_force_retaddr 0, 1
9244 ret
9245+ENDPROC(aesni_gcm_enc)
9246
9247 #endif
9248
9249@@ -1714,6 +1719,7 @@ _key_expansion_256a:
9250 pxor %xmm1, %xmm0
9251 movaps %xmm0, (TKEYP)
9252 add $0x10, TKEYP
9253+ pax_force_retaddr_bts
9254 ret
9255
9256 .align 4
9257@@ -1738,6 +1744,7 @@ _key_expansion_192a:
9258 shufps $0b01001110, %xmm2, %xmm1
9259 movaps %xmm1, 0x10(TKEYP)
9260 add $0x20, TKEYP
9261+ pax_force_retaddr_bts
9262 ret
9263
9264 .align 4
9265@@ -1757,6 +1764,7 @@ _key_expansion_192b:
9266
9267 movaps %xmm0, (TKEYP)
9268 add $0x10, TKEYP
9269+ pax_force_retaddr_bts
9270 ret
9271
9272 .align 4
9273@@ -1769,6 +1777,7 @@ _key_expansion_256b:
9274 pxor %xmm1, %xmm2
9275 movaps %xmm2, (TKEYP)
9276 add $0x10, TKEYP
9277+ pax_force_retaddr_bts
9278 ret
9279
9280 /*
9281@@ -1881,7 +1890,9 @@ ENTRY(aesni_set_key)
9282 #ifndef __x86_64__
9283 popl KEYP
9284 #endif
9285+ pax_force_retaddr 0, 1
9286 ret
9287+ENDPROC(aesni_set_key)
9288
9289 /*
9290 * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
9291@@ -1902,7 +1913,9 @@ ENTRY(aesni_enc)
9292 popl KLEN
9293 popl KEYP
9294 #endif
9295+ pax_force_retaddr 0, 1
9296 ret
9297+ENDPROC(aesni_enc)
9298
9299 /*
9300 * _aesni_enc1: internal ABI
9301@@ -1959,6 +1972,7 @@ _aesni_enc1:
9302 AESENC KEY STATE
9303 movaps 0x70(TKEYP), KEY
9304 AESENCLAST KEY STATE
9305+ pax_force_retaddr_bts
9306 ret
9307
9308 /*
9309@@ -2067,6 +2081,7 @@ _aesni_enc4:
9310 AESENCLAST KEY STATE2
9311 AESENCLAST KEY STATE3
9312 AESENCLAST KEY STATE4
9313+ pax_force_retaddr_bts
9314 ret
9315
9316 /*
9317@@ -2089,7 +2104,9 @@ ENTRY(aesni_dec)
9318 popl KLEN
9319 popl KEYP
9320 #endif
9321+ pax_force_retaddr 0, 1
9322 ret
9323+ENDPROC(aesni_dec)
9324
9325 /*
9326 * _aesni_dec1: internal ABI
9327@@ -2146,6 +2163,7 @@ _aesni_dec1:
9328 AESDEC KEY STATE
9329 movaps 0x70(TKEYP), KEY
9330 AESDECLAST KEY STATE
9331+ pax_force_retaddr_bts
9332 ret
9333
9334 /*
9335@@ -2254,6 +2272,7 @@ _aesni_dec4:
9336 AESDECLAST KEY STATE2
9337 AESDECLAST KEY STATE3
9338 AESDECLAST KEY STATE4
9339+ pax_force_retaddr_bts
9340 ret
9341
9342 /*
9343@@ -2311,7 +2330,9 @@ ENTRY(aesni_ecb_enc)
9344 popl KEYP
9345 popl LEN
9346 #endif
9347+ pax_force_retaddr 0, 1
9348 ret
9349+ENDPROC(aesni_ecb_enc)
9350
9351 /*
9352 * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
9353@@ -2369,7 +2390,9 @@ ENTRY(aesni_ecb_dec)
9354 popl KEYP
9355 popl LEN
9356 #endif
9357+ pax_force_retaddr 0, 1
9358 ret
9359+ENDPROC(aesni_ecb_dec)
9360
9361 /*
9362 * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
9363@@ -2410,7 +2433,9 @@ ENTRY(aesni_cbc_enc)
9364 popl LEN
9365 popl IVP
9366 #endif
9367+ pax_force_retaddr 0, 1
9368 ret
9369+ENDPROC(aesni_cbc_enc)
9370
9371 /*
9372 * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
9373@@ -2500,7 +2525,9 @@ ENTRY(aesni_cbc_dec)
9374 popl LEN
9375 popl IVP
9376 #endif
9377+ pax_force_retaddr 0, 1
9378 ret
9379+ENDPROC(aesni_cbc_dec)
9380
9381 #ifdef __x86_64__
9382 .align 16
9383@@ -2526,6 +2553,7 @@ _aesni_inc_init:
9384 mov $1, TCTR_LOW
9385 MOVQ_R64_XMM TCTR_LOW INC
9386 MOVQ_R64_XMM CTR TCTR_LOW
9387+ pax_force_retaddr_bts
9388 ret
9389
9390 /*
9391@@ -2554,6 +2582,7 @@ _aesni_inc:
9392 .Linc_low:
9393 movaps CTR, IV
9394 PSHUFB_XMM BSWAP_MASK IV
9395+ pax_force_retaddr_bts
9396 ret
9397
9398 /*
9399@@ -2614,5 +2643,7 @@ ENTRY(aesni_ctr_enc)
9400 .Lctr_enc_ret:
9401 movups IV, (IVP)
9402 .Lctr_enc_just_ret:
9403+ pax_force_retaddr 0, 1
9404 ret
9405+ENDPROC(aesni_ctr_enc)
9406 #endif
9407diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
9408index 391d245..67f35c2 100644
9409--- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
9410+++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
9411@@ -20,6 +20,8 @@
9412 *
9413 */
9414
9415+#include <asm/alternative-asm.h>
9416+
9417 .file "blowfish-x86_64-asm.S"
9418 .text
9419
9420@@ -151,9 +153,11 @@ __blowfish_enc_blk:
9421 jnz __enc_xor;
9422
9423 write_block();
9424+ pax_force_retaddr 0, 1
9425 ret;
9426 __enc_xor:
9427 xor_block();
9428+ pax_force_retaddr 0, 1
9429 ret;
9430
9431 .align 8
9432@@ -188,6 +192,7 @@ blowfish_dec_blk:
9433
9434 movq %r11, %rbp;
9435
9436+ pax_force_retaddr 0, 1
9437 ret;
9438
9439 /**********************************************************************
9440@@ -342,6 +347,7 @@ __blowfish_enc_blk_4way:
9441
9442 popq %rbx;
9443 popq %rbp;
9444+ pax_force_retaddr 0, 1
9445 ret;
9446
9447 __enc_xor4:
9448@@ -349,6 +355,7 @@ __enc_xor4:
9449
9450 popq %rbx;
9451 popq %rbp;
9452+ pax_force_retaddr 0, 1
9453 ret;
9454
9455 .align 8
9456@@ -386,5 +393,6 @@ blowfish_dec_blk_4way:
9457 popq %rbx;
9458 popq %rbp;
9459
9460+ pax_force_retaddr 0, 1
9461 ret;
9462
9463diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S
9464index 0b33743..7a56206 100644
9465--- a/arch/x86/crypto/camellia-x86_64-asm_64.S
9466+++ b/arch/x86/crypto/camellia-x86_64-asm_64.S
9467@@ -20,6 +20,8 @@
9468 *
9469 */
9470
9471+#include <asm/alternative-asm.h>
9472+
9473 .file "camellia-x86_64-asm_64.S"
9474 .text
9475
9476@@ -229,12 +231,14 @@ __enc_done:
9477 enc_outunpack(mov, RT1);
9478
9479 movq RRBP, %rbp;
9480+ pax_force_retaddr 0, 1
9481 ret;
9482
9483 __enc_xor:
9484 enc_outunpack(xor, RT1);
9485
9486 movq RRBP, %rbp;
9487+ pax_force_retaddr 0, 1
9488 ret;
9489
9490 .global camellia_dec_blk;
9491@@ -275,6 +279,7 @@ __dec_rounds16:
9492 dec_outunpack();
9493
9494 movq RRBP, %rbp;
9495+ pax_force_retaddr 0, 1
9496 ret;
9497
9498 /**********************************************************************
9499@@ -468,6 +473,7 @@ __enc2_done:
9500
9501 movq RRBP, %rbp;
9502 popq %rbx;
9503+ pax_force_retaddr 0, 1
9504 ret;
9505
9506 __enc2_xor:
9507@@ -475,6 +481,7 @@ __enc2_xor:
9508
9509 movq RRBP, %rbp;
9510 popq %rbx;
9511+ pax_force_retaddr 0, 1
9512 ret;
9513
9514 .global camellia_dec_blk_2way;
9515@@ -517,4 +524,5 @@ __dec2_rounds16:
9516
9517 movq RRBP, %rbp;
9518 movq RXOR, %rbx;
9519+ pax_force_retaddr 0, 1
9520 ret;
9521diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
9522index a41a3aa..bdf5753 100644
9523--- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
9524+++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
9525@@ -23,6 +23,8 @@
9526 *
9527 */
9528
9529+#include <asm/alternative-asm.h>
9530+
9531 .file "cast5-avx-x86_64-asm_64.S"
9532
9533 .extern cast5_s1
9534@@ -293,6 +295,7 @@ __skip_enc:
9535 leaq 3*(2*4*4)(%r11), %rax;
9536 outunpack_blocks(%rax, RR4, RL4, RTMP, RX, RKM);
9537
9538+ pax_force_retaddr 0, 1
9539 ret;
9540
9541 __enc_xor16:
9542@@ -303,6 +306,7 @@ __enc_xor16:
9543 leaq 3*(2*4*4)(%r11), %rax;
9544 outunpack_xor_blocks(%rax, RR4, RL4, RTMP, RX, RKM);
9545
9546+ pax_force_retaddr 0, 1
9547 ret;
9548
9549 .align 16
9550@@ -369,6 +373,7 @@ __dec_tail:
9551 leaq 3*(2*4*4)(%r11), %rax;
9552 outunpack_blocks(%rax, RR4, RL4, RTMP, RX, RKM);
9553
9554+ pax_force_retaddr 0, 1
9555 ret;
9556
9557 __skip_dec:
9558diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
9559index 218d283..819e6da 100644
9560--- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
9561+++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
9562@@ -23,6 +23,8 @@
9563 *
9564 */
9565
9566+#include <asm/alternative-asm.h>
9567+
9568 .file "cast6-avx-x86_64-asm_64.S"
9569
9570 .extern cast6_s1
9571@@ -324,12 +326,14 @@ __cast6_enc_blk_8way:
9572 outunpack_blocks(%r11, RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
9573 outunpack_blocks(%rax, RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
9574
9575+ pax_force_retaddr 0, 1
9576 ret;
9577
9578 __enc_xor8:
9579 outunpack_xor_blocks(%r11, RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
9580 outunpack_xor_blocks(%rax, RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
9581
9582+ pax_force_retaddr 0, 1
9583 ret;
9584
9585 .align 16
9586@@ -380,4 +384,5 @@ cast6_dec_blk_8way:
9587 outunpack_blocks(%r11, RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
9588 outunpack_blocks(%rax, RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
9589
9590+ pax_force_retaddr 0, 1
9591 ret;
9592diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
9593index 6214a9b..1f4fc9a 100644
9594--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
9595+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
9596@@ -1,3 +1,5 @@
9597+#include <asm/alternative-asm.h>
9598+
9599 # enter ECRYPT_encrypt_bytes
9600 .text
9601 .p2align 5
9602@@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
9603 add %r11,%rsp
9604 mov %rdi,%rax
9605 mov %rsi,%rdx
9606+ pax_force_retaddr 0, 1
9607 ret
9608 # bytesatleast65:
9609 ._bytesatleast65:
9610@@ -891,6 +894,7 @@ ECRYPT_keysetup:
9611 add %r11,%rsp
9612 mov %rdi,%rax
9613 mov %rsi,%rdx
9614+ pax_force_retaddr
9615 ret
9616 # enter ECRYPT_ivsetup
9617 .text
9618@@ -917,4 +921,5 @@ ECRYPT_ivsetup:
9619 add %r11,%rsp
9620 mov %rdi,%rax
9621 mov %rsi,%rdx
9622+ pax_force_retaddr
9623 ret
9624diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
9625index 504106b..4e50951 100644
9626--- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
9627+++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
9628@@ -24,6 +24,8 @@
9629 *
9630 */
9631
9632+#include <asm/alternative-asm.h>
9633+
9634 .file "serpent-avx-x86_64-asm_64.S"
9635 .text
9636
9637@@ -638,12 +640,14 @@ __serpent_enc_blk_8way_avx:
9638 write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
9639 write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
9640
9641+ pax_force_retaddr
9642 ret;
9643
9644 __enc_xor8:
9645 xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
9646 xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
9647
9648+ pax_force_retaddr
9649 ret;
9650
9651 .align 8
9652@@ -701,4 +705,5 @@ serpent_dec_blk_8way_avx:
9653 write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
9654 write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
9655
9656+ pax_force_retaddr
9657 ret;
9658diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
9659index 3ee1ff0..cbc568b 100644
9660--- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
9661+++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
9662@@ -24,6 +24,8 @@
9663 *
9664 */
9665
9666+#include <asm/alternative-asm.h>
9667+
9668 .file "serpent-sse2-x86_64-asm_64.S"
9669 .text
9670
9671@@ -692,12 +694,14 @@ __serpent_enc_blk_8way:
9672 write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
9673 write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
9674
9675+ pax_force_retaddr
9676 ret;
9677
9678 __enc_xor8:
9679 xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
9680 xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
9681
9682+ pax_force_retaddr
9683 ret;
9684
9685 .align 8
9686@@ -755,4 +759,5 @@ serpent_dec_blk_8way:
9687 write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
9688 write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
9689
9690+ pax_force_retaddr
9691 ret;
9692diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
9693index 49d6987..df66bd4 100644
9694--- a/arch/x86/crypto/sha1_ssse3_asm.S
9695+++ b/arch/x86/crypto/sha1_ssse3_asm.S
9696@@ -28,6 +28,8 @@
9697 * (at your option) any later version.
9698 */
9699
9700+#include <asm/alternative-asm.h>
9701+
9702 #define CTX %rdi // arg1
9703 #define BUF %rsi // arg2
9704 #define CNT %rdx // arg3
9705@@ -104,6 +106,7 @@
9706 pop %r12
9707 pop %rbp
9708 pop %rbx
9709+ pax_force_retaddr 0, 1
9710 ret
9711
9712 .size \name, .-\name
9713diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
9714index 1585abb..1ff9d9b 100644
9715--- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
9716+++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
9717@@ -23,6 +23,8 @@
9718 *
9719 */
9720
9721+#include <asm/alternative-asm.h>
9722+
9723 .file "twofish-avx-x86_64-asm_64.S"
9724 .text
9725
9726@@ -303,12 +305,14 @@ __twofish_enc_blk_8way:
9727 outunpack_blocks(%r11, RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
9728 outunpack_blocks(%rax, RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
9729
9730+ pax_force_retaddr 0, 1
9731 ret;
9732
9733 __enc_xor8:
9734 outunpack_xor_blocks(%r11, RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
9735 outunpack_xor_blocks(%rax, RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
9736
9737+ pax_force_retaddr 0, 1
9738 ret;
9739
9740 .align 8
9741@@ -354,4 +358,5 @@ twofish_dec_blk_8way:
9742 outunpack_blocks(%r11, RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
9743 outunpack_blocks(%rax, RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
9744
9745+ pax_force_retaddr 0, 1
9746 ret;
9747diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
9748index 5b012a2..36d5364 100644
9749--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
9750+++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
9751@@ -20,6 +20,8 @@
9752 *
9753 */
9754
9755+#include <asm/alternative-asm.h>
9756+
9757 .file "twofish-x86_64-asm-3way.S"
9758 .text
9759
9760@@ -260,6 +262,7 @@ __twofish_enc_blk_3way:
9761 popq %r13;
9762 popq %r14;
9763 popq %r15;
9764+ pax_force_retaddr 0, 1
9765 ret;
9766
9767 __enc_xor3:
9768@@ -271,6 +274,7 @@ __enc_xor3:
9769 popq %r13;
9770 popq %r14;
9771 popq %r15;
9772+ pax_force_retaddr 0, 1
9773 ret;
9774
9775 .global twofish_dec_blk_3way
9776@@ -312,5 +316,6 @@ twofish_dec_blk_3way:
9777 popq %r13;
9778 popq %r14;
9779 popq %r15;
9780+ pax_force_retaddr 0, 1
9781 ret;
9782
9783diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
9784index 7bcf3fc..f53832f 100644
9785--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
9786+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
9787@@ -21,6 +21,7 @@
9788 .text
9789
9790 #include <asm/asm-offsets.h>
9791+#include <asm/alternative-asm.h>
9792
9793 #define a_offset 0
9794 #define b_offset 4
9795@@ -268,6 +269,7 @@ twofish_enc_blk:
9796
9797 popq R1
9798 movq $1,%rax
9799+ pax_force_retaddr 0, 1
9800 ret
9801
9802 twofish_dec_blk:
9803@@ -319,4 +321,5 @@ twofish_dec_blk:
9804
9805 popq R1
9806 movq $1,%rax
9807+ pax_force_retaddr 0, 1
9808 ret
9809diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
9810index 07b3a68..bd2a388 100644
9811--- a/arch/x86/ia32/ia32_aout.c
9812+++ b/arch/x86/ia32/ia32_aout.c
9813@@ -159,6 +159,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
9814 unsigned long dump_start, dump_size;
9815 struct user32 dump;
9816
9817+ memset(&dump, 0, sizeof(dump));
9818+
9819 fs = get_fs();
9820 set_fs(KERNEL_DS);
9821 has_dumped = 1;
9822diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
9823index efc6a95..95abfe2 100644
9824--- a/arch/x86/ia32/ia32_signal.c
9825+++ b/arch/x86/ia32/ia32_signal.c
9826@@ -163,8 +163,8 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
9827 }
9828 seg = get_fs();
9829 set_fs(KERNEL_DS);
9830- ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
9831- (stack_t __force __user *) &uoss, regs->sp);
9832+ ret = do_sigaltstack((stack_t __force_user *) (uss_ptr ? &uss : NULL),
9833+ (stack_t __force_user *) &uoss, regs->sp);
9834 set_fs(seg);
9835 if (ret >= 0 && uoss_ptr) {
9836 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
9837@@ -396,7 +396,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
9838 sp -= frame_size;
9839 /* Align the stack pointer according to the i386 ABI,
9840 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
9841- sp = ((sp + 4) & -16ul) - 4;
9842+ sp = ((sp - 12) & -16ul) - 4;
9843 return (void __user *) sp;
9844 }
9845
9846@@ -454,7 +454,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
9847 * These are actually not used anymore, but left because some
9848 * gdb versions depend on them as a marker.
9849 */
9850- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
9851+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
9852 } put_user_catch(err);
9853
9854 if (err)
9855@@ -496,7 +496,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
9856 0xb8,
9857 __NR_ia32_rt_sigreturn,
9858 0x80cd,
9859- 0,
9860+ 0
9861 };
9862
9863 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
9864@@ -522,16 +522,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
9865
9866 if (ka->sa.sa_flags & SA_RESTORER)
9867 restorer = ka->sa.sa_restorer;
9868+ else if (current->mm->context.vdso)
9869+ /* Return stub is in 32bit vsyscall page */
9870+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
9871 else
9872- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
9873- rt_sigreturn);
9874+ restorer = &frame->retcode;
9875 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
9876
9877 /*
9878 * Not actually used anymore, but left because some gdb
9879 * versions need it.
9880 */
9881- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
9882+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
9883 } put_user_catch(err);
9884
9885 err |= copy_siginfo_to_user32(&frame->info, info);
9886diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
9887index e7fa545..9e6fe1a 100644
9888--- a/arch/x86/ia32/ia32entry.S
9889+++ b/arch/x86/ia32/ia32entry.S
9890@@ -15,8 +15,10 @@
9891 #include <asm/irqflags.h>
9892 #include <asm/asm.h>
9893 #include <asm/smap.h>
9894+#include <asm/pgtable.h>
9895 #include <linux/linkage.h>
9896 #include <linux/err.h>
9897+#include <asm/alternative-asm.h>
9898
9899 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
9900 #include <linux/elf-em.h>
9901@@ -96,6 +98,32 @@ ENTRY(native_irq_enable_sysexit)
9902 ENDPROC(native_irq_enable_sysexit)
9903 #endif
9904
9905+ .macro pax_enter_kernel_user
9906+ pax_set_fptr_mask
9907+#ifdef CONFIG_PAX_MEMORY_UDEREF
9908+ call pax_enter_kernel_user
9909+#endif
9910+ .endm
9911+
9912+ .macro pax_exit_kernel_user
9913+#ifdef CONFIG_PAX_MEMORY_UDEREF
9914+ call pax_exit_kernel_user
9915+#endif
9916+#ifdef CONFIG_PAX_RANDKSTACK
9917+ pushq %rax
9918+ pushq %r11
9919+ call pax_randomize_kstack
9920+ popq %r11
9921+ popq %rax
9922+#endif
9923+ .endm
9924+
9925+.macro pax_erase_kstack
9926+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
9927+ call pax_erase_kstack
9928+#endif
9929+.endm
9930+
9931 /*
9932 * 32bit SYSENTER instruction entry.
9933 *
9934@@ -122,12 +150,6 @@ ENTRY(ia32_sysenter_target)
9935 CFI_REGISTER rsp,rbp
9936 SWAPGS_UNSAFE_STACK
9937 movq PER_CPU_VAR(kernel_stack), %rsp
9938- addq $(KERNEL_STACK_OFFSET),%rsp
9939- /*
9940- * No need to follow this irqs on/off section: the syscall
9941- * disabled irqs, here we enable it straight after entry:
9942- */
9943- ENABLE_INTERRUPTS(CLBR_NONE)
9944 movl %ebp,%ebp /* zero extension */
9945 pushq_cfi $__USER32_DS
9946 /*CFI_REL_OFFSET ss,0*/
9947@@ -135,24 +157,44 @@ ENTRY(ia32_sysenter_target)
9948 CFI_REL_OFFSET rsp,0
9949 pushfq_cfi
9950 /*CFI_REL_OFFSET rflags,0*/
9951- movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
9952- CFI_REGISTER rip,r10
9953+ orl $X86_EFLAGS_IF,(%rsp)
9954+ GET_THREAD_INFO(%r11)
9955+ movl TI_sysenter_return(%r11), %r11d
9956+ CFI_REGISTER rip,r11
9957 pushq_cfi $__USER32_CS
9958 /*CFI_REL_OFFSET cs,0*/
9959 movl %eax, %eax
9960- pushq_cfi %r10
9961+ pushq_cfi %r11
9962 CFI_REL_OFFSET rip,0
9963 pushq_cfi %rax
9964 cld
9965 SAVE_ARGS 0,1,0
9966+ pax_enter_kernel_user
9967+
9968+#ifdef CONFIG_PAX_RANDKSTACK
9969+ pax_erase_kstack
9970+#endif
9971+
9972+ /*
9973+ * No need to follow this irqs on/off section: the syscall
9974+ * disabled irqs, here we enable it straight after entry:
9975+ */
9976+ ENABLE_INTERRUPTS(CLBR_NONE)
9977 /* no need to do an access_ok check here because rbp has been
9978 32bit zero extended */
9979+
9980+#ifdef CONFIG_PAX_MEMORY_UDEREF
9981+ mov $PAX_USER_SHADOW_BASE,%r11
9982+ add %r11,%rbp
9983+#endif
9984+
9985 ASM_STAC
9986 1: movl (%rbp),%ebp
9987 _ASM_EXTABLE(1b,ia32_badarg)
9988 ASM_CLAC
9989- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
9990- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
9991+ GET_THREAD_INFO(%r11)
9992+ orl $TS_COMPAT,TI_status(%r11)
9993+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
9994 CFI_REMEMBER_STATE
9995 jnz sysenter_tracesys
9996 cmpq $(IA32_NR_syscalls-1),%rax
9997@@ -162,12 +204,15 @@ sysenter_do_call:
9998 sysenter_dispatch:
9999 call *ia32_sys_call_table(,%rax,8)
10000 movq %rax,RAX-ARGOFFSET(%rsp)
10001+ GET_THREAD_INFO(%r11)
10002 DISABLE_INTERRUPTS(CLBR_NONE)
10003 TRACE_IRQS_OFF
10004- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
10005+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
10006 jnz sysexit_audit
10007 sysexit_from_sys_call:
10008- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
10009+ pax_exit_kernel_user
10010+ pax_erase_kstack
10011+ andl $~TS_COMPAT,TI_status(%r11)
10012 /* clear IF, that popfq doesn't enable interrupts early */
10013 andl $~0x200,EFLAGS-R11(%rsp)
10014 movl RIP-R11(%rsp),%edx /* User %eip */
10015@@ -193,6 +238,9 @@ sysexit_from_sys_call:
10016 movl %eax,%esi /* 2nd arg: syscall number */
10017 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
10018 call __audit_syscall_entry
10019+
10020+ pax_erase_kstack
10021+
10022 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
10023 cmpq $(IA32_NR_syscalls-1),%rax
10024 ja ia32_badsys
10025@@ -204,7 +252,7 @@ sysexit_from_sys_call:
10026 .endm
10027
10028 .macro auditsys_exit exit
10029- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
10030+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
10031 jnz ia32_ret_from_sys_call
10032 TRACE_IRQS_ON
10033 ENABLE_INTERRUPTS(CLBR_NONE)
10034@@ -215,11 +263,12 @@ sysexit_from_sys_call:
10035 1: setbe %al /* 1 if error, 0 if not */
10036 movzbl %al,%edi /* zero-extend that into %edi */
10037 call __audit_syscall_exit
10038+ GET_THREAD_INFO(%r11)
10039 movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
10040 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
10041 DISABLE_INTERRUPTS(CLBR_NONE)
10042 TRACE_IRQS_OFF
10043- testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
10044+ testl %edi,TI_flags(%r11)
10045 jz \exit
10046 CLEAR_RREGS -ARGOFFSET
10047 jmp int_with_check
10048@@ -237,7 +286,7 @@ sysexit_audit:
10049
10050 sysenter_tracesys:
10051 #ifdef CONFIG_AUDITSYSCALL
10052- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
10053+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
10054 jz sysenter_auditsys
10055 #endif
10056 SAVE_REST
10057@@ -249,6 +298,9 @@ sysenter_tracesys:
10058 RESTORE_REST
10059 cmpq $(IA32_NR_syscalls-1),%rax
10060 ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
10061+
10062+ pax_erase_kstack
10063+
10064 jmp sysenter_do_call
10065 CFI_ENDPROC
10066 ENDPROC(ia32_sysenter_target)
10067@@ -276,19 +328,25 @@ ENDPROC(ia32_sysenter_target)
10068 ENTRY(ia32_cstar_target)
10069 CFI_STARTPROC32 simple
10070 CFI_SIGNAL_FRAME
10071- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
10072+ CFI_DEF_CFA rsp,0
10073 CFI_REGISTER rip,rcx
10074 /*CFI_REGISTER rflags,r11*/
10075 SWAPGS_UNSAFE_STACK
10076 movl %esp,%r8d
10077 CFI_REGISTER rsp,r8
10078 movq PER_CPU_VAR(kernel_stack),%rsp
10079+ SAVE_ARGS 8*6,0,0
10080+ pax_enter_kernel_user
10081+
10082+#ifdef CONFIG_PAX_RANDKSTACK
10083+ pax_erase_kstack
10084+#endif
10085+
10086 /*
10087 * No need to follow this irqs on/off section: the syscall
10088 * disabled irqs and here we enable it straight after entry:
10089 */
10090 ENABLE_INTERRUPTS(CLBR_NONE)
10091- SAVE_ARGS 8,0,0
10092 movl %eax,%eax /* zero extension */
10093 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
10094 movq %rcx,RIP-ARGOFFSET(%rsp)
10095@@ -304,12 +362,19 @@ ENTRY(ia32_cstar_target)
10096 /* no need to do an access_ok check here because r8 has been
10097 32bit zero extended */
10098 /* hardware stack frame is complete now */
10099+
10100+#ifdef CONFIG_PAX_MEMORY_UDEREF
10101+ mov $PAX_USER_SHADOW_BASE,%r11
10102+ add %r11,%r8
10103+#endif
10104+
10105 ASM_STAC
10106 1: movl (%r8),%r9d
10107 _ASM_EXTABLE(1b,ia32_badarg)
10108 ASM_CLAC
10109- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
10110- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
10111+ GET_THREAD_INFO(%r11)
10112+ orl $TS_COMPAT,TI_status(%r11)
10113+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
10114 CFI_REMEMBER_STATE
10115 jnz cstar_tracesys
10116 cmpq $IA32_NR_syscalls-1,%rax
10117@@ -319,12 +384,15 @@ cstar_do_call:
10118 cstar_dispatch:
10119 call *ia32_sys_call_table(,%rax,8)
10120 movq %rax,RAX-ARGOFFSET(%rsp)
10121+ GET_THREAD_INFO(%r11)
10122 DISABLE_INTERRUPTS(CLBR_NONE)
10123 TRACE_IRQS_OFF
10124- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
10125+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
10126 jnz sysretl_audit
10127 sysretl_from_sys_call:
10128- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
10129+ pax_exit_kernel_user
10130+ pax_erase_kstack
10131+ andl $~TS_COMPAT,TI_status(%r11)
10132 RESTORE_ARGS 0,-ARG_SKIP,0,0,0
10133 movl RIP-ARGOFFSET(%rsp),%ecx
10134 CFI_REGISTER rip,rcx
10135@@ -352,7 +420,7 @@ sysretl_audit:
10136
10137 cstar_tracesys:
10138 #ifdef CONFIG_AUDITSYSCALL
10139- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
10140+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
10141 jz cstar_auditsys
10142 #endif
10143 xchgl %r9d,%ebp
10144@@ -366,6 +434,9 @@ cstar_tracesys:
10145 xchgl %ebp,%r9d
10146 cmpq $(IA32_NR_syscalls-1),%rax
10147 ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
10148+
10149+ pax_erase_kstack
10150+
10151 jmp cstar_do_call
10152 END(ia32_cstar_target)
10153
10154@@ -407,19 +478,26 @@ ENTRY(ia32_syscall)
10155 CFI_REL_OFFSET rip,RIP-RIP
10156 PARAVIRT_ADJUST_EXCEPTION_FRAME
10157 SWAPGS
10158- /*
10159- * No need to follow this irqs on/off section: the syscall
10160- * disabled irqs and here we enable it straight after entry:
10161- */
10162- ENABLE_INTERRUPTS(CLBR_NONE)
10163 movl %eax,%eax
10164 pushq_cfi %rax
10165 cld
10166 /* note the registers are not zero extended to the sf.
10167 this could be a problem. */
10168 SAVE_ARGS 0,1,0
10169- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
10170- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
10171+ pax_enter_kernel_user
10172+
10173+#ifdef CONFIG_PAX_RANDKSTACK
10174+ pax_erase_kstack
10175+#endif
10176+
10177+ /*
10178+ * No need to follow this irqs on/off section: the syscall
10179+ * disabled irqs and here we enable it straight after entry:
10180+ */
10181+ ENABLE_INTERRUPTS(CLBR_NONE)
10182+ GET_THREAD_INFO(%r11)
10183+ orl $TS_COMPAT,TI_status(%r11)
10184+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
10185 jnz ia32_tracesys
10186 cmpq $(IA32_NR_syscalls-1),%rax
10187 ja ia32_badsys
10188@@ -442,6 +520,9 @@ ia32_tracesys:
10189 RESTORE_REST
10190 cmpq $(IA32_NR_syscalls-1),%rax
10191 ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
10192+
10193+ pax_erase_kstack
10194+
10195 jmp ia32_do_call
10196 END(ia32_syscall)
10197
10198diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
10199index 86d68d1..f9960fe 100644
10200--- a/arch/x86/ia32/sys_ia32.c
10201+++ b/arch/x86/ia32/sys_ia32.c
10202@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
10203 */
10204 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
10205 {
10206- typeof(ubuf->st_uid) uid = 0;
10207- typeof(ubuf->st_gid) gid = 0;
10208+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
10209+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
10210 SET_UID(uid, from_kuid_munged(current_user_ns(), stat->uid));
10211 SET_GID(gid, from_kgid_munged(current_user_ns(), stat->gid));
10212 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
10213@@ -303,7 +303,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
10214 mm_segment_t old_fs = get_fs();
10215
10216 set_fs(KERNEL_DS);
10217- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
10218+ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
10219 set_fs(old_fs);
10220 if (put_compat_timespec(&t, interval))
10221 return -EFAULT;
10222@@ -319,7 +319,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
10223 mm_segment_t old_fs = get_fs();
10224
10225 set_fs(KERNEL_DS);
10226- ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
10227+ ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
10228 set_fs(old_fs);
10229 if (!ret) {
10230 switch (_NSIG_WORDS) {
10231@@ -344,7 +344,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
10232 if (copy_siginfo_from_user32(&info, uinfo))
10233 return -EFAULT;
10234 set_fs(KERNEL_DS);
10235- ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
10236+ ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
10237 set_fs(old_fs);
10238 return ret;
10239 }
10240@@ -376,7 +376,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
10241 return -EFAULT;
10242
10243 set_fs(KERNEL_DS);
10244- ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
10245+ ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
10246 count);
10247 set_fs(old_fs);
10248
10249diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
10250index 372231c..a5aa1a1 100644
10251--- a/arch/x86/include/asm/alternative-asm.h
10252+++ b/arch/x86/include/asm/alternative-asm.h
10253@@ -18,6 +18,45 @@
10254 .endm
10255 #endif
10256
10257+#ifdef KERNEXEC_PLUGIN
10258+ .macro pax_force_retaddr_bts rip=0
10259+ btsq $63,\rip(%rsp)
10260+ .endm
10261+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
10262+ .macro pax_force_retaddr rip=0, reload=0
10263+ btsq $63,\rip(%rsp)
10264+ .endm
10265+ .macro pax_force_fptr ptr
10266+ btsq $63,\ptr
10267+ .endm
10268+ .macro pax_set_fptr_mask
10269+ .endm
10270+#endif
10271+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
10272+ .macro pax_force_retaddr rip=0, reload=0
10273+ .if \reload
10274+ pax_set_fptr_mask
10275+ .endif
10276+ orq %r10,\rip(%rsp)
10277+ .endm
10278+ .macro pax_force_fptr ptr
10279+ orq %r10,\ptr
10280+ .endm
10281+ .macro pax_set_fptr_mask
10282+ movabs $0x8000000000000000,%r10
10283+ .endm
10284+#endif
10285+#else
10286+ .macro pax_force_retaddr rip=0, reload=0
10287+ .endm
10288+ .macro pax_force_fptr ptr
10289+ .endm
10290+ .macro pax_force_retaddr_bts rip=0
10291+ .endm
10292+ .macro pax_set_fptr_mask
10293+ .endm
10294+#endif
10295+
10296 .macro altinstruction_entry orig alt feature orig_len alt_len
10297 .long \orig - .
10298 .long \alt - .
10299diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
10300index 58ed6d9..f1cbe58 100644
10301--- a/arch/x86/include/asm/alternative.h
10302+++ b/arch/x86/include/asm/alternative.h
10303@@ -105,7 +105,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
10304 ".pushsection .discard,\"aw\",@progbits\n" \
10305 DISCARD_ENTRY(1) \
10306 ".popsection\n" \
10307- ".pushsection .altinstr_replacement, \"ax\"\n" \
10308+ ".pushsection .altinstr_replacement, \"a\"\n" \
10309 ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
10310 ".popsection"
10311
10312@@ -119,7 +119,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
10313 DISCARD_ENTRY(1) \
10314 DISCARD_ENTRY(2) \
10315 ".popsection\n" \
10316- ".pushsection .altinstr_replacement, \"ax\"\n" \
10317+ ".pushsection .altinstr_replacement, \"a\"\n" \
10318 ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \
10319 ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
10320 ".popsection"
10321diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
10322index 3388034..050f0b9 100644
10323--- a/arch/x86/include/asm/apic.h
10324+++ b/arch/x86/include/asm/apic.h
10325@@ -44,7 +44,7 @@ static inline void generic_apic_probe(void)
10326
10327 #ifdef CONFIG_X86_LOCAL_APIC
10328
10329-extern unsigned int apic_verbosity;
10330+extern int apic_verbosity;
10331 extern int local_apic_timer_c2_ok;
10332
10333 extern int disable_apic;
10334diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
10335index 20370c6..a2eb9b0 100644
10336--- a/arch/x86/include/asm/apm.h
10337+++ b/arch/x86/include/asm/apm.h
10338@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
10339 __asm__ __volatile__(APM_DO_ZERO_SEGS
10340 "pushl %%edi\n\t"
10341 "pushl %%ebp\n\t"
10342- "lcall *%%cs:apm_bios_entry\n\t"
10343+ "lcall *%%ss:apm_bios_entry\n\t"
10344 "setc %%al\n\t"
10345 "popl %%ebp\n\t"
10346 "popl %%edi\n\t"
10347@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
10348 __asm__ __volatile__(APM_DO_ZERO_SEGS
10349 "pushl %%edi\n\t"
10350 "pushl %%ebp\n\t"
10351- "lcall *%%cs:apm_bios_entry\n\t"
10352+ "lcall *%%ss:apm_bios_entry\n\t"
10353 "setc %%bl\n\t"
10354 "popl %%ebp\n\t"
10355 "popl %%edi\n\t"
10356diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
10357index b6c3b82..b4c077a 100644
10358--- a/arch/x86/include/asm/atomic.h
10359+++ b/arch/x86/include/asm/atomic.h
10360@@ -22,7 +22,18 @@
10361 */
10362 static inline int atomic_read(const atomic_t *v)
10363 {
10364- return (*(volatile int *)&(v)->counter);
10365+ return (*(volatile const int *)&(v)->counter);
10366+}
10367+
10368+/**
10369+ * atomic_read_unchecked - read atomic variable
10370+ * @v: pointer of type atomic_unchecked_t
10371+ *
10372+ * Atomically reads the value of @v.
10373+ */
10374+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
10375+{
10376+ return (*(volatile const int *)&(v)->counter);
10377 }
10378
10379 /**
10380@@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *v, int i)
10381 }
10382
10383 /**
10384+ * atomic_set_unchecked - set atomic variable
10385+ * @v: pointer of type atomic_unchecked_t
10386+ * @i: required value
10387+ *
10388+ * Atomically sets the value of @v to @i.
10389+ */
10390+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
10391+{
10392+ v->counter = i;
10393+}
10394+
10395+/**
10396 * atomic_add - add integer to atomic variable
10397 * @i: integer value to add
10398 * @v: pointer of type atomic_t
10399@@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *v, int i)
10400 */
10401 static inline void atomic_add(int i, atomic_t *v)
10402 {
10403- asm volatile(LOCK_PREFIX "addl %1,%0"
10404+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
10405+
10406+#ifdef CONFIG_PAX_REFCOUNT
10407+ "jno 0f\n"
10408+ LOCK_PREFIX "subl %1,%0\n"
10409+ "int $4\n0:\n"
10410+ _ASM_EXTABLE(0b, 0b)
10411+#endif
10412+
10413+ : "+m" (v->counter)
10414+ : "ir" (i));
10415+}
10416+
10417+/**
10418+ * atomic_add_unchecked - add integer to atomic variable
10419+ * @i: integer value to add
10420+ * @v: pointer of type atomic_unchecked_t
10421+ *
10422+ * Atomically adds @i to @v.
10423+ */
10424+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
10425+{
10426+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
10427 : "+m" (v->counter)
10428 : "ir" (i));
10429 }
10430@@ -60,7 +105,29 @@ static inline void atomic_add(int i, atomic_t *v)
10431 */
10432 static inline void atomic_sub(int i, atomic_t *v)
10433 {
10434- asm volatile(LOCK_PREFIX "subl %1,%0"
10435+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
10436+
10437+#ifdef CONFIG_PAX_REFCOUNT
10438+ "jno 0f\n"
10439+ LOCK_PREFIX "addl %1,%0\n"
10440+ "int $4\n0:\n"
10441+ _ASM_EXTABLE(0b, 0b)
10442+#endif
10443+
10444+ : "+m" (v->counter)
10445+ : "ir" (i));
10446+}
10447+
10448+/**
10449+ * atomic_sub_unchecked - subtract integer from atomic variable
10450+ * @i: integer value to subtract
10451+ * @v: pointer of type atomic_unchecked_t
10452+ *
10453+ * Atomically subtracts @i from @v.
10454+ */
10455+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
10456+{
10457+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
10458 : "+m" (v->counter)
10459 : "ir" (i));
10460 }
10461@@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
10462 {
10463 unsigned char c;
10464
10465- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
10466+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
10467+
10468+#ifdef CONFIG_PAX_REFCOUNT
10469+ "jno 0f\n"
10470+ LOCK_PREFIX "addl %2,%0\n"
10471+ "int $4\n0:\n"
10472+ _ASM_EXTABLE(0b, 0b)
10473+#endif
10474+
10475+ "sete %1\n"
10476 : "+m" (v->counter), "=qm" (c)
10477 : "ir" (i) : "memory");
10478 return c;
10479@@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
10480 */
10481 static inline void atomic_inc(atomic_t *v)
10482 {
10483- asm volatile(LOCK_PREFIX "incl %0"
10484+ asm volatile(LOCK_PREFIX "incl %0\n"
10485+
10486+#ifdef CONFIG_PAX_REFCOUNT
10487+ "jno 0f\n"
10488+ LOCK_PREFIX "decl %0\n"
10489+ "int $4\n0:\n"
10490+ _ASM_EXTABLE(0b, 0b)
10491+#endif
10492+
10493+ : "+m" (v->counter));
10494+}
10495+
10496+/**
10497+ * atomic_inc_unchecked - increment atomic variable
10498+ * @v: pointer of type atomic_unchecked_t
10499+ *
10500+ * Atomically increments @v by 1.
10501+ */
10502+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
10503+{
10504+ asm volatile(LOCK_PREFIX "incl %0\n"
10505 : "+m" (v->counter));
10506 }
10507
10508@@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *v)
10509 */
10510 static inline void atomic_dec(atomic_t *v)
10511 {
10512- asm volatile(LOCK_PREFIX "decl %0"
10513+ asm volatile(LOCK_PREFIX "decl %0\n"
10514+
10515+#ifdef CONFIG_PAX_REFCOUNT
10516+ "jno 0f\n"
10517+ LOCK_PREFIX "incl %0\n"
10518+ "int $4\n0:\n"
10519+ _ASM_EXTABLE(0b, 0b)
10520+#endif
10521+
10522+ : "+m" (v->counter));
10523+}
10524+
10525+/**
10526+ * atomic_dec_unchecked - decrement atomic variable
10527+ * @v: pointer of type atomic_unchecked_t
10528+ *
10529+ * Atomically decrements @v by 1.
10530+ */
10531+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
10532+{
10533+ asm volatile(LOCK_PREFIX "decl %0\n"
10534 : "+m" (v->counter));
10535 }
10536
10537@@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
10538 {
10539 unsigned char c;
10540
10541- asm volatile(LOCK_PREFIX "decl %0; sete %1"
10542+ asm volatile(LOCK_PREFIX "decl %0\n"
10543+
10544+#ifdef CONFIG_PAX_REFCOUNT
10545+ "jno 0f\n"
10546+ LOCK_PREFIX "incl %0\n"
10547+ "int $4\n0:\n"
10548+ _ASM_EXTABLE(0b, 0b)
10549+#endif
10550+
10551+ "sete %1\n"
10552 : "+m" (v->counter), "=qm" (c)
10553 : : "memory");
10554 return c != 0;
10555@@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
10556 {
10557 unsigned char c;
10558
10559- asm volatile(LOCK_PREFIX "incl %0; sete %1"
10560+ asm volatile(LOCK_PREFIX "incl %0\n"
10561+
10562+#ifdef CONFIG_PAX_REFCOUNT
10563+ "jno 0f\n"
10564+ LOCK_PREFIX "decl %0\n"
10565+ "int $4\n0:\n"
10566+ _ASM_EXTABLE(0b, 0b)
10567+#endif
10568+
10569+ "sete %1\n"
10570+ : "+m" (v->counter), "=qm" (c)
10571+ : : "memory");
10572+ return c != 0;
10573+}
10574+
10575+/**
10576+ * atomic_inc_and_test_unchecked - increment and test
10577+ * @v: pointer of type atomic_unchecked_t
10578+ *
10579+ * Atomically increments @v by 1
10580+ * and returns true if the result is zero, or false for all
10581+ * other cases.
10582+ */
10583+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
10584+{
10585+ unsigned char c;
10586+
10587+ asm volatile(LOCK_PREFIX "incl %0\n"
10588+ "sete %1\n"
10589 : "+m" (v->counter), "=qm" (c)
10590 : : "memory");
10591 return c != 0;
10592@@ -157,7 +310,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
10593 {
10594 unsigned char c;
10595
10596- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
10597+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
10598+
10599+#ifdef CONFIG_PAX_REFCOUNT
10600+ "jno 0f\n"
10601+ LOCK_PREFIX "subl %2,%0\n"
10602+ "int $4\n0:\n"
10603+ _ASM_EXTABLE(0b, 0b)
10604+#endif
10605+
10606+ "sets %1\n"
10607 : "+m" (v->counter), "=qm" (c)
10608 : "ir" (i) : "memory");
10609 return c;
10610@@ -179,7 +341,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
10611 goto no_xadd;
10612 #endif
10613 /* Modern 486+ processor */
10614- return i + xadd(&v->counter, i);
10615+ return i + xadd_check_overflow(&v->counter, i);
10616
10617 #ifdef CONFIG_M386
10618 no_xadd: /* Legacy 386 processor */
10619@@ -192,6 +354,34 @@ no_xadd: /* Legacy 386 processor */
10620 }
10621
10622 /**
10623+ * atomic_add_return_unchecked - add integer and return
10624+ * @i: integer value to add
10625+ * @v: pointer of type atomic_unchecked_t
10626+ *
10627+ * Atomically adds @i to @v and returns @i + @v
10628+ */
10629+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
10630+{
10631+#ifdef CONFIG_M386
10632+ int __i;
10633+ unsigned long flags;
10634+ if (unlikely(boot_cpu_data.x86 <= 3))
10635+ goto no_xadd;
10636+#endif
10637+ /* Modern 486+ processor */
10638+ return i + xadd(&v->counter, i);
10639+
10640+#ifdef CONFIG_M386
10641+no_xadd: /* Legacy 386 processor */
10642+ raw_local_irq_save(flags);
10643+ __i = atomic_read_unchecked(v);
10644+ atomic_set_unchecked(v, i + __i);
10645+ raw_local_irq_restore(flags);
10646+ return i + __i;
10647+#endif
10648+}
10649+
10650+/**
10651 * atomic_sub_return - subtract integer and return
10652 * @v: pointer of type atomic_t
10653 * @i: integer value to subtract
10654@@ -204,6 +394,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
10655 }
10656
10657 #define atomic_inc_return(v) (atomic_add_return(1, v))
10658+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
10659+{
10660+ return atomic_add_return_unchecked(1, v);
10661+}
10662 #define atomic_dec_return(v) (atomic_sub_return(1, v))
10663
10664 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
10665@@ -211,11 +405,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
10666 return cmpxchg(&v->counter, old, new);
10667 }
10668
10669+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
10670+{
10671+ return cmpxchg(&v->counter, old, new);
10672+}
10673+
10674 static inline int atomic_xchg(atomic_t *v, int new)
10675 {
10676 return xchg(&v->counter, new);
10677 }
10678
10679+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
10680+{
10681+ return xchg(&v->counter, new);
10682+}
10683+
10684 /**
10685 * __atomic_add_unless - add unless the number is already a given value
10686 * @v: pointer of type atomic_t
10687@@ -227,12 +431,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
10688 */
10689 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
10690 {
10691- int c, old;
10692+ int c, old, new;
10693 c = atomic_read(v);
10694 for (;;) {
10695- if (unlikely(c == (u)))
10696+ if (unlikely(c == u))
10697 break;
10698- old = atomic_cmpxchg((v), c, c + (a));
10699+
10700+ asm volatile("addl %2,%0\n"
10701+
10702+#ifdef CONFIG_PAX_REFCOUNT
10703+ "jno 0f\n"
10704+ "subl %2,%0\n"
10705+ "int $4\n0:\n"
10706+ _ASM_EXTABLE(0b, 0b)
10707+#endif
10708+
10709+ : "=r" (new)
10710+ : "0" (c), "ir" (a));
10711+
10712+ old = atomic_cmpxchg(v, c, new);
10713 if (likely(old == c))
10714 break;
10715 c = old;
10716@@ -241,6 +458,49 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
10717 }
10718
10719 /**
10720+ * atomic_inc_not_zero_hint - increment if not null
10721+ * @v: pointer of type atomic_t
10722+ * @hint: probable value of the atomic before the increment
10723+ *
10724+ * This version of atomic_inc_not_zero() gives a hint of probable
10725+ * value of the atomic. This helps processor to not read the memory
10726+ * before doing the atomic read/modify/write cycle, lowering
10727+ * number of bus transactions on some arches.
10728+ *
10729+ * Returns: 0 if increment was not done, 1 otherwise.
10730+ */
10731+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
10732+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
10733+{
10734+ int val, c = hint, new;
10735+
10736+ /* sanity test, should be removed by compiler if hint is a constant */
10737+ if (!hint)
10738+ return __atomic_add_unless(v, 1, 0);
10739+
10740+ do {
10741+ asm volatile("incl %0\n"
10742+
10743+#ifdef CONFIG_PAX_REFCOUNT
10744+ "jno 0f\n"
10745+ "decl %0\n"
10746+ "int $4\n0:\n"
10747+ _ASM_EXTABLE(0b, 0b)
10748+#endif
10749+
10750+ : "=r" (new)
10751+ : "0" (c));
10752+
10753+ val = atomic_cmpxchg(v, c, new);
10754+ if (val == c)
10755+ return 1;
10756+ c = val;
10757+ } while (c);
10758+
10759+ return 0;
10760+}
10761+
10762+/**
10763 * atomic_inc_short - increment of a short integer
10764 * @v: pointer to type int
10765 *
10766@@ -269,14 +529,37 @@ static inline void atomic_or_long(unsigned long *v1, unsigned long v2)
10767 #endif
10768
10769 /* These are x86-specific, used by some header files */
10770-#define atomic_clear_mask(mask, addr) \
10771- asm volatile(LOCK_PREFIX "andl %0,%1" \
10772- : : "r" (~(mask)), "m" (*(addr)) : "memory")
10773+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
10774+{
10775+ asm volatile(LOCK_PREFIX "andl %1,%0"
10776+ : "+m" (v->counter)
10777+ : "r" (~(mask))
10778+ : "memory");
10779+}
10780
10781-#define atomic_set_mask(mask, addr) \
10782- asm volatile(LOCK_PREFIX "orl %0,%1" \
10783- : : "r" ((unsigned)(mask)), "m" (*(addr)) \
10784- : "memory")
10785+static inline void atomic_clear_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
10786+{
10787+ asm volatile(LOCK_PREFIX "andl %1,%0"
10788+ : "+m" (v->counter)
10789+ : "r" (~(mask))
10790+ : "memory");
10791+}
10792+
10793+static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
10794+{
10795+ asm volatile(LOCK_PREFIX "orl %1,%0"
10796+ : "+m" (v->counter)
10797+ : "r" (mask)
10798+ : "memory");
10799+}
10800+
10801+static inline void atomic_set_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
10802+{
10803+ asm volatile(LOCK_PREFIX "orl %1,%0"
10804+ : "+m" (v->counter)
10805+ : "r" (mask)
10806+ : "memory");
10807+}
10808
10809 /* Atomic operations are already serializing on x86 */
10810 #define smp_mb__before_atomic_dec() barrier()
10811diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
10812index b154de7..aadebd8 100644
10813--- a/arch/x86/include/asm/atomic64_32.h
10814+++ b/arch/x86/include/asm/atomic64_32.h
10815@@ -12,6 +12,14 @@ typedef struct {
10816 u64 __aligned(8) counter;
10817 } atomic64_t;
10818
10819+#ifdef CONFIG_PAX_REFCOUNT
10820+typedef struct {
10821+ u64 __aligned(8) counter;
10822+} atomic64_unchecked_t;
10823+#else
10824+typedef atomic64_t atomic64_unchecked_t;
10825+#endif
10826+
10827 #define ATOMIC64_INIT(val) { (val) }
10828
10829 #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
10830@@ -37,21 +45,31 @@ typedef struct {
10831 ATOMIC64_DECL_ONE(sym##_386)
10832
10833 ATOMIC64_DECL_ONE(add_386);
10834+ATOMIC64_DECL_ONE(add_unchecked_386);
10835 ATOMIC64_DECL_ONE(sub_386);
10836+ATOMIC64_DECL_ONE(sub_unchecked_386);
10837 ATOMIC64_DECL_ONE(inc_386);
10838+ATOMIC64_DECL_ONE(inc_unchecked_386);
10839 ATOMIC64_DECL_ONE(dec_386);
10840+ATOMIC64_DECL_ONE(dec_unchecked_386);
10841 #endif
10842
10843 #define alternative_atomic64(f, out, in...) \
10844 __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
10845
10846 ATOMIC64_DECL(read);
10847+ATOMIC64_DECL(read_unchecked);
10848 ATOMIC64_DECL(set);
10849+ATOMIC64_DECL(set_unchecked);
10850 ATOMIC64_DECL(xchg);
10851 ATOMIC64_DECL(add_return);
10852+ATOMIC64_DECL(add_return_unchecked);
10853 ATOMIC64_DECL(sub_return);
10854+ATOMIC64_DECL(sub_return_unchecked);
10855 ATOMIC64_DECL(inc_return);
10856+ATOMIC64_DECL(inc_return_unchecked);
10857 ATOMIC64_DECL(dec_return);
10858+ATOMIC64_DECL(dec_return_unchecked);
10859 ATOMIC64_DECL(dec_if_positive);
10860 ATOMIC64_DECL(inc_not_zero);
10861 ATOMIC64_DECL(add_unless);
10862@@ -77,6 +95,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
10863 }
10864
10865 /**
10866+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
10867+ * @p: pointer to type atomic64_unchecked_t
10868+ * @o: expected value
10869+ * @n: new value
10870+ *
10871+ * Atomically sets @v to @n if it was equal to @o and returns
10872+ * the old value.
10873+ */
10874+
10875+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
10876+{
10877+ return cmpxchg64(&v->counter, o, n);
10878+}
10879+
10880+/**
10881 * atomic64_xchg - xchg atomic64 variable
10882 * @v: pointer to type atomic64_t
10883 * @n: value to assign
10884@@ -112,6 +145,22 @@ static inline void atomic64_set(atomic64_t *v, long long i)
10885 }
10886
10887 /**
10888+ * atomic64_set_unchecked - set atomic64 variable
10889+ * @v: pointer to type atomic64_unchecked_t
10890+ * @n: value to assign
10891+ *
10892+ * Atomically sets the value of @v to @n.
10893+ */
10894+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
10895+{
10896+ unsigned high = (unsigned)(i >> 32);
10897+ unsigned low = (unsigned)i;
10898+ alternative_atomic64(set, /* no output */,
10899+ "S" (v), "b" (low), "c" (high)
10900+ : "eax", "edx", "memory");
10901+}
10902+
10903+/**
10904 * atomic64_read - read atomic64 variable
10905 * @v: pointer to type atomic64_t
10906 *
10907@@ -125,6 +174,19 @@ static inline long long atomic64_read(const atomic64_t *v)
10908 }
10909
10910 /**
10911+ * atomic64_read_unchecked - read atomic64 variable
10912+ * @v: pointer to type atomic64_unchecked_t
10913+ *
10914+ * Atomically reads the value of @v and returns it.
10915+ */
10916+static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
10917+{
10918+ long long r;
10919+ alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
10920+ return r;
10921+ }
10922+
10923+/**
10924 * atomic64_add_return - add and return
10925 * @i: integer value to add
10926 * @v: pointer to type atomic64_t
10927@@ -139,6 +201,21 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
10928 return i;
10929 }
10930
10931+/**
10932+ * atomic64_add_return_unchecked - add and return
10933+ * @i: integer value to add
10934+ * @v: pointer to type atomic64_unchecked_t
10935+ *
10936+ * Atomically adds @i to @v and returns @i + *@v
10937+ */
10938+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
10939+{
10940+ alternative_atomic64(add_return_unchecked,
10941+ ASM_OUTPUT2("+A" (i), "+c" (v)),
10942+ ASM_NO_INPUT_CLOBBER("memory"));
10943+ return i;
10944+}
10945+
10946 /*
10947 * Other variants with different arithmetic operators:
10948 */
10949@@ -158,6 +235,14 @@ static inline long long atomic64_inc_return(atomic64_t *v)
10950 return a;
10951 }
10952
10953+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
10954+{
10955+ long long a;
10956+ alternative_atomic64(inc_return_unchecked, "=&A" (a),
10957+ "S" (v) : "memory", "ecx");
10958+ return a;
10959+}
10960+
10961 static inline long long atomic64_dec_return(atomic64_t *v)
10962 {
10963 long long a;
10964@@ -182,6 +267,21 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
10965 }
10966
10967 /**
10968+ * atomic64_add_unchecked - add integer to atomic64 variable
10969+ * @i: integer value to add
10970+ * @v: pointer to type atomic64_unchecked_t
10971+ *
10972+ * Atomically adds @i to @v.
10973+ */
10974+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
10975+{
10976+ __alternative_atomic64(add_unchecked, add_return_unchecked,
10977+ ASM_OUTPUT2("+A" (i), "+c" (v)),
10978+ ASM_NO_INPUT_CLOBBER("memory"));
10979+ return i;
10980+}
10981+
10982+/**
10983 * atomic64_sub - subtract the atomic64 variable
10984 * @i: integer value to subtract
10985 * @v: pointer to type atomic64_t
10986diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
10987index 0e1cbfc..5623683 100644
10988--- a/arch/x86/include/asm/atomic64_64.h
10989+++ b/arch/x86/include/asm/atomic64_64.h
10990@@ -18,7 +18,19 @@
10991 */
10992 static inline long atomic64_read(const atomic64_t *v)
10993 {
10994- return (*(volatile long *)&(v)->counter);
10995+ return (*(volatile const long *)&(v)->counter);
10996+}
10997+
10998+/**
10999+ * atomic64_read_unchecked - read atomic64 variable
11000+ * @v: pointer of type atomic64_unchecked_t
11001+ *
11002+ * Atomically reads the value of @v.
11003+ * Doesn't imply a read memory barrier.
11004+ */
11005+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
11006+{
11007+ return (*(volatile const long *)&(v)->counter);
11008 }
11009
11010 /**
11011@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
11012 }
11013
11014 /**
11015+ * atomic64_set_unchecked - set atomic64 variable
11016+ * @v: pointer to type atomic64_unchecked_t
11017+ * @i: required value
11018+ *
11019+ * Atomically sets the value of @v to @i.
11020+ */
11021+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
11022+{
11023+ v->counter = i;
11024+}
11025+
11026+/**
11027 * atomic64_add - add integer to atomic64 variable
11028 * @i: integer value to add
11029 * @v: pointer to type atomic64_t
11030@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
11031 */
11032 static inline void atomic64_add(long i, atomic64_t *v)
11033 {
11034+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
11035+
11036+#ifdef CONFIG_PAX_REFCOUNT
11037+ "jno 0f\n"
11038+ LOCK_PREFIX "subq %1,%0\n"
11039+ "int $4\n0:\n"
11040+ _ASM_EXTABLE(0b, 0b)
11041+#endif
11042+
11043+ : "=m" (v->counter)
11044+ : "er" (i), "m" (v->counter));
11045+}
11046+
11047+/**
11048+ * atomic64_add_unchecked - add integer to atomic64 variable
11049+ * @i: integer value to add
11050+ * @v: pointer to type atomic64_unchecked_t
11051+ *
11052+ * Atomically adds @i to @v.
11053+ */
11054+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
11055+{
11056 asm volatile(LOCK_PREFIX "addq %1,%0"
11057 : "=m" (v->counter)
11058 : "er" (i), "m" (v->counter));
11059@@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
11060 */
11061 static inline void atomic64_sub(long i, atomic64_t *v)
11062 {
11063- asm volatile(LOCK_PREFIX "subq %1,%0"
11064+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
11065+
11066+#ifdef CONFIG_PAX_REFCOUNT
11067+ "jno 0f\n"
11068+ LOCK_PREFIX "addq %1,%0\n"
11069+ "int $4\n0:\n"
11070+ _ASM_EXTABLE(0b, 0b)
11071+#endif
11072+
11073+ : "=m" (v->counter)
11074+ : "er" (i), "m" (v->counter));
11075+}
11076+
11077+/**
11078+ * atomic64_sub_unchecked - subtract the atomic64 variable
11079+ * @i: integer value to subtract
11080+ * @v: pointer to type atomic64_unchecked_t
11081+ *
11082+ * Atomically subtracts @i from @v.
11083+ */
11084+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
11085+{
11086+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
11087 : "=m" (v->counter)
11088 : "er" (i), "m" (v->counter));
11089 }
11090@@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
11091 {
11092 unsigned char c;
11093
11094- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
11095+ asm volatile(LOCK_PREFIX "subq %2,%0\n"
11096+
11097+#ifdef CONFIG_PAX_REFCOUNT
11098+ "jno 0f\n"
11099+ LOCK_PREFIX "addq %2,%0\n"
11100+ "int $4\n0:\n"
11101+ _ASM_EXTABLE(0b, 0b)
11102+#endif
11103+
11104+ "sete %1\n"
11105 : "=m" (v->counter), "=qm" (c)
11106 : "er" (i), "m" (v->counter) : "memory");
11107 return c;
11108@@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
11109 */
11110 static inline void atomic64_inc(atomic64_t *v)
11111 {
11112+ asm volatile(LOCK_PREFIX "incq %0\n"
11113+
11114+#ifdef CONFIG_PAX_REFCOUNT
11115+ "jno 0f\n"
11116+ LOCK_PREFIX "decq %0\n"
11117+ "int $4\n0:\n"
11118+ _ASM_EXTABLE(0b, 0b)
11119+#endif
11120+
11121+ : "=m" (v->counter)
11122+ : "m" (v->counter));
11123+}
11124+
11125+/**
11126+ * atomic64_inc_unchecked - increment atomic64 variable
11127+ * @v: pointer to type atomic64_unchecked_t
11128+ *
11129+ * Atomically increments @v by 1.
11130+ */
11131+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
11132+{
11133 asm volatile(LOCK_PREFIX "incq %0"
11134 : "=m" (v->counter)
11135 : "m" (v->counter));
11136@@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64_t *v)
11137 */
11138 static inline void atomic64_dec(atomic64_t *v)
11139 {
11140- asm volatile(LOCK_PREFIX "decq %0"
11141+ asm volatile(LOCK_PREFIX "decq %0\n"
11142+
11143+#ifdef CONFIG_PAX_REFCOUNT
11144+ "jno 0f\n"
11145+ LOCK_PREFIX "incq %0\n"
11146+ "int $4\n0:\n"
11147+ _ASM_EXTABLE(0b, 0b)
11148+#endif
11149+
11150+ : "=m" (v->counter)
11151+ : "m" (v->counter));
11152+}
11153+
11154+/**
11155+ * atomic64_dec_unchecked - decrement atomic64 variable
11156+ * @v: pointer to type atomic64_t
11157+ *
11158+ * Atomically decrements @v by 1.
11159+ */
11160+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
11161+{
11162+ asm volatile(LOCK_PREFIX "decq %0\n"
11163 : "=m" (v->counter)
11164 : "m" (v->counter));
11165 }
11166@@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
11167 {
11168 unsigned char c;
11169
11170- asm volatile(LOCK_PREFIX "decq %0; sete %1"
11171+ asm volatile(LOCK_PREFIX "decq %0\n"
11172+
11173+#ifdef CONFIG_PAX_REFCOUNT
11174+ "jno 0f\n"
11175+ LOCK_PREFIX "incq %0\n"
11176+ "int $4\n0:\n"
11177+ _ASM_EXTABLE(0b, 0b)
11178+#endif
11179+
11180+ "sete %1\n"
11181 : "=m" (v->counter), "=qm" (c)
11182 : "m" (v->counter) : "memory");
11183 return c != 0;
11184@@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
11185 {
11186 unsigned char c;
11187
11188- asm volatile(LOCK_PREFIX "incq %0; sete %1"
11189+ asm volatile(LOCK_PREFIX "incq %0\n"
11190+
11191+#ifdef CONFIG_PAX_REFCOUNT
11192+ "jno 0f\n"
11193+ LOCK_PREFIX "decq %0\n"
11194+ "int $4\n0:\n"
11195+ _ASM_EXTABLE(0b, 0b)
11196+#endif
11197+
11198+ "sete %1\n"
11199 : "=m" (v->counter), "=qm" (c)
11200 : "m" (v->counter) : "memory");
11201 return c != 0;
11202@@ -155,7 +292,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
11203 {
11204 unsigned char c;
11205
11206- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
11207+ asm volatile(LOCK_PREFIX "addq %2,%0\n"
11208+
11209+#ifdef CONFIG_PAX_REFCOUNT
11210+ "jno 0f\n"
11211+ LOCK_PREFIX "subq %2,%0\n"
11212+ "int $4\n0:\n"
11213+ _ASM_EXTABLE(0b, 0b)
11214+#endif
11215+
11216+ "sets %1\n"
11217 : "=m" (v->counter), "=qm" (c)
11218 : "er" (i), "m" (v->counter) : "memory");
11219 return c;
11220@@ -170,6 +316,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
11221 */
11222 static inline long atomic64_add_return(long i, atomic64_t *v)
11223 {
11224+ return i + xadd_check_overflow(&v->counter, i);
11225+}
11226+
11227+/**
11228+ * atomic64_add_return_unchecked - add and return
11229+ * @i: integer value to add
11230+ * @v: pointer to type atomic64_unchecked_t
11231+ *
11232+ * Atomically adds @i to @v and returns @i + @v
11233+ */
11234+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
11235+{
11236 return i + xadd(&v->counter, i);
11237 }
11238
11239@@ -179,6 +337,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
11240 }
11241
11242 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
11243+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
11244+{
11245+ return atomic64_add_return_unchecked(1, v);
11246+}
11247 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
11248
11249 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
11250@@ -186,6 +348,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
11251 return cmpxchg(&v->counter, old, new);
11252 }
11253
11254+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
11255+{
11256+ return cmpxchg(&v->counter, old, new);
11257+}
11258+
11259 static inline long atomic64_xchg(atomic64_t *v, long new)
11260 {
11261 return xchg(&v->counter, new);
11262@@ -202,17 +369,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
11263 */
11264 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
11265 {
11266- long c, old;
11267+ long c, old, new;
11268 c = atomic64_read(v);
11269 for (;;) {
11270- if (unlikely(c == (u)))
11271+ if (unlikely(c == u))
11272 break;
11273- old = atomic64_cmpxchg((v), c, c + (a));
11274+
11275+ asm volatile("add %2,%0\n"
11276+
11277+#ifdef CONFIG_PAX_REFCOUNT
11278+ "jno 0f\n"
11279+ "sub %2,%0\n"
11280+ "int $4\n0:\n"
11281+ _ASM_EXTABLE(0b, 0b)
11282+#endif
11283+
11284+ : "=r" (new)
11285+ : "0" (c), "ir" (a));
11286+
11287+ old = atomic64_cmpxchg(v, c, new);
11288 if (likely(old == c))
11289 break;
11290 c = old;
11291 }
11292- return c != (u);
11293+ return c != u;
11294 }
11295
11296 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
11297diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
11298index 6dfd019..0c6699f 100644
11299--- a/arch/x86/include/asm/bitops.h
11300+++ b/arch/x86/include/asm/bitops.h
11301@@ -40,7 +40,7 @@
11302 * a mask operation on a byte.
11303 */
11304 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
11305-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
11306+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
11307 #define CONST_MASK(nr) (1 << ((nr) & 7))
11308
11309 /**
11310diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
11311index b13fe63..0dab13a 100644
11312--- a/arch/x86/include/asm/boot.h
11313+++ b/arch/x86/include/asm/boot.h
11314@@ -11,10 +11,15 @@
11315 #include <asm/pgtable_types.h>
11316
11317 /* Physical address where kernel should be loaded. */
11318-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
11319+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
11320 + (CONFIG_PHYSICAL_ALIGN - 1)) \
11321 & ~(CONFIG_PHYSICAL_ALIGN - 1))
11322
11323+#ifndef __ASSEMBLY__
11324+extern unsigned char __LOAD_PHYSICAL_ADDR[];
11325+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
11326+#endif
11327+
11328 /* Minimum kernel alignment, as a power of two */
11329 #ifdef CONFIG_X86_64
11330 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
11331diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
11332index 48f99f1..d78ebf9 100644
11333--- a/arch/x86/include/asm/cache.h
11334+++ b/arch/x86/include/asm/cache.h
11335@@ -5,12 +5,13 @@
11336
11337 /* L1 cache line size */
11338 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
11339-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
11340+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
11341
11342 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
11343+#define __read_only __attribute__((__section__(".data..read_only")))
11344
11345 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
11346-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
11347+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
11348
11349 #ifdef CONFIG_X86_VSMP
11350 #ifdef CONFIG_SMP
11351diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
11352index 9863ee3..4a1f8e1 100644
11353--- a/arch/x86/include/asm/cacheflush.h
11354+++ b/arch/x86/include/asm/cacheflush.h
11355@@ -27,7 +27,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
11356 unsigned long pg_flags = pg->flags & _PGMT_MASK;
11357
11358 if (pg_flags == _PGMT_DEFAULT)
11359- return -1;
11360+ return ~0UL;
11361 else if (pg_flags == _PGMT_WC)
11362 return _PAGE_CACHE_WC;
11363 else if (pg_flags == _PGMT_UC_MINUS)
11364diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
11365index 46fc474..b02b0f9 100644
11366--- a/arch/x86/include/asm/checksum_32.h
11367+++ b/arch/x86/include/asm/checksum_32.h
11368@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
11369 int len, __wsum sum,
11370 int *src_err_ptr, int *dst_err_ptr);
11371
11372+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
11373+ int len, __wsum sum,
11374+ int *src_err_ptr, int *dst_err_ptr);
11375+
11376+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
11377+ int len, __wsum sum,
11378+ int *src_err_ptr, int *dst_err_ptr);
11379+
11380 /*
11381 * Note: when you get a NULL pointer exception here this means someone
11382 * passed in an incorrect kernel address to one of these functions.
11383@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
11384 int *err_ptr)
11385 {
11386 might_sleep();
11387- return csum_partial_copy_generic((__force void *)src, dst,
11388+ return csum_partial_copy_generic_from_user((__force void *)src, dst,
11389 len, sum, err_ptr, NULL);
11390 }
11391
11392@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
11393 {
11394 might_sleep();
11395 if (access_ok(VERIFY_WRITE, dst, len))
11396- return csum_partial_copy_generic(src, (__force void *)dst,
11397+ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
11398 len, sum, NULL, err_ptr);
11399
11400 if (len)
11401diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
11402index 8d871ea..c1a0dc9 100644
11403--- a/arch/x86/include/asm/cmpxchg.h
11404+++ b/arch/x86/include/asm/cmpxchg.h
11405@@ -14,8 +14,12 @@ extern void __cmpxchg_wrong_size(void)
11406 __compiletime_error("Bad argument size for cmpxchg");
11407 extern void __xadd_wrong_size(void)
11408 __compiletime_error("Bad argument size for xadd");
11409+extern void __xadd_check_overflow_wrong_size(void)
11410+ __compiletime_error("Bad argument size for xadd_check_overflow");
11411 extern void __add_wrong_size(void)
11412 __compiletime_error("Bad argument size for add");
11413+extern void __add_check_overflow_wrong_size(void)
11414+ __compiletime_error("Bad argument size for add_check_overflow");
11415
11416 /*
11417 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
11418@@ -67,6 +71,34 @@ extern void __add_wrong_size(void)
11419 __ret; \
11420 })
11421
11422+#define __xchg_op_check_overflow(ptr, arg, op, lock) \
11423+ ({ \
11424+ __typeof__ (*(ptr)) __ret = (arg); \
11425+ switch (sizeof(*(ptr))) { \
11426+ case __X86_CASE_L: \
11427+ asm volatile (lock #op "l %0, %1\n" \
11428+ "jno 0f\n" \
11429+ "mov %0,%1\n" \
11430+ "int $4\n0:\n" \
11431+ _ASM_EXTABLE(0b, 0b) \
11432+ : "+r" (__ret), "+m" (*(ptr)) \
11433+ : : "memory", "cc"); \
11434+ break; \
11435+ case __X86_CASE_Q: \
11436+ asm volatile (lock #op "q %q0, %1\n" \
11437+ "jno 0f\n" \
11438+ "mov %0,%1\n" \
11439+ "int $4\n0:\n" \
11440+ _ASM_EXTABLE(0b, 0b) \
11441+ : "+r" (__ret), "+m" (*(ptr)) \
11442+ : : "memory", "cc"); \
11443+ break; \
11444+ default: \
11445+ __ ## op ## _check_overflow_wrong_size(); \
11446+ } \
11447+ __ret; \
11448+ })
11449+
11450 /*
11451 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
11452 * Since this is generally used to protect other memory information, we
11453@@ -167,6 +199,9 @@ extern void __add_wrong_size(void)
11454 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
11455 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
11456
11457+#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock)
11458+#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
11459+
11460 #define __add(ptr, inc, lock) \
11461 ({ \
11462 __typeof__ (*(ptr)) __ret = (inc); \
11463diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
11464index 8c297aa..7a90f03 100644
11465--- a/arch/x86/include/asm/cpufeature.h
11466+++ b/arch/x86/include/asm/cpufeature.h
11467@@ -205,7 +205,7 @@
11468 #define X86_FEATURE_BMI1 (9*32+ 3) /* 1st group bit manipulation extensions */
11469 #define X86_FEATURE_HLE (9*32+ 4) /* Hardware Lock Elision */
11470 #define X86_FEATURE_AVX2 (9*32+ 5) /* AVX2 instructions */
11471-#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Protection */
11472+#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Prevention */
11473 #define X86_FEATURE_BMI2 (9*32+ 8) /* 2nd group bit manipulation extensions */
11474 #define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */
11475 #define X86_FEATURE_INVPCID (9*32+10) /* Invalidate Processor Context ID */
11476@@ -379,7 +379,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
11477 ".section .discard,\"aw\",@progbits\n"
11478 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
11479 ".previous\n"
11480- ".section .altinstr_replacement,\"ax\"\n"
11481+ ".section .altinstr_replacement,\"a\"\n"
11482 "3: movb $1,%0\n"
11483 "4:\n"
11484 ".previous\n"
11485diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
11486index 8bf1c06..f723dfd 100644
11487--- a/arch/x86/include/asm/desc.h
11488+++ b/arch/x86/include/asm/desc.h
11489@@ -4,6 +4,7 @@
11490 #include <asm/desc_defs.h>
11491 #include <asm/ldt.h>
11492 #include <asm/mmu.h>
11493+#include <asm/pgtable.h>
11494
11495 #include <linux/smp.h>
11496 #include <linux/percpu.h>
11497@@ -17,6 +18,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
11498
11499 desc->type = (info->read_exec_only ^ 1) << 1;
11500 desc->type |= info->contents << 2;
11501+ desc->type |= info->seg_not_present ^ 1;
11502
11503 desc->s = 1;
11504 desc->dpl = 0x3;
11505@@ -35,19 +37,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
11506 }
11507
11508 extern struct desc_ptr idt_descr;
11509-extern gate_desc idt_table[];
11510 extern struct desc_ptr nmi_idt_descr;
11511-extern gate_desc nmi_idt_table[];
11512-
11513-struct gdt_page {
11514- struct desc_struct gdt[GDT_ENTRIES];
11515-} __attribute__((aligned(PAGE_SIZE)));
11516-
11517-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
11518+extern gate_desc idt_table[256];
11519+extern gate_desc nmi_idt_table[256];
11520
11521+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
11522 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
11523 {
11524- return per_cpu(gdt_page, cpu).gdt;
11525+ return cpu_gdt_table[cpu];
11526 }
11527
11528 #ifdef CONFIG_X86_64
11529@@ -72,8 +69,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
11530 unsigned long base, unsigned dpl, unsigned flags,
11531 unsigned short seg)
11532 {
11533- gate->a = (seg << 16) | (base & 0xffff);
11534- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
11535+ gate->gate.offset_low = base;
11536+ gate->gate.seg = seg;
11537+ gate->gate.reserved = 0;
11538+ gate->gate.type = type;
11539+ gate->gate.s = 0;
11540+ gate->gate.dpl = dpl;
11541+ gate->gate.p = 1;
11542+ gate->gate.offset_high = base >> 16;
11543 }
11544
11545 #endif
11546@@ -118,12 +121,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
11547
11548 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
11549 {
11550+ pax_open_kernel();
11551 memcpy(&idt[entry], gate, sizeof(*gate));
11552+ pax_close_kernel();
11553 }
11554
11555 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
11556 {
11557+ pax_open_kernel();
11558 memcpy(&ldt[entry], desc, 8);
11559+ pax_close_kernel();
11560 }
11561
11562 static inline void
11563@@ -137,7 +144,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
11564 default: size = sizeof(*gdt); break;
11565 }
11566
11567+ pax_open_kernel();
11568 memcpy(&gdt[entry], desc, size);
11569+ pax_close_kernel();
11570 }
11571
11572 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
11573@@ -210,7 +219,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
11574
11575 static inline void native_load_tr_desc(void)
11576 {
11577+ pax_open_kernel();
11578 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
11579+ pax_close_kernel();
11580 }
11581
11582 static inline void native_load_gdt(const struct desc_ptr *dtr)
11583@@ -247,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
11584 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
11585 unsigned int i;
11586
11587+ pax_open_kernel();
11588 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
11589 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
11590+ pax_close_kernel();
11591 }
11592
11593 #define _LDT_empty(info) \
11594@@ -311,7 +324,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
11595 }
11596
11597 #ifdef CONFIG_X86_64
11598-static inline void set_nmi_gate(int gate, void *addr)
11599+static inline void set_nmi_gate(int gate, const void *addr)
11600 {
11601 gate_desc s;
11602
11603@@ -320,7 +333,7 @@ static inline void set_nmi_gate(int gate, void *addr)
11604 }
11605 #endif
11606
11607-static inline void _set_gate(int gate, unsigned type, void *addr,
11608+static inline void _set_gate(int gate, unsigned type, const void *addr,
11609 unsigned dpl, unsigned ist, unsigned seg)
11610 {
11611 gate_desc s;
11612@@ -339,7 +352,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
11613 * Pentium F0 0F bugfix can have resulted in the mapped
11614 * IDT being write-protected.
11615 */
11616-static inline void set_intr_gate(unsigned int n, void *addr)
11617+static inline void set_intr_gate(unsigned int n, const void *addr)
11618 {
11619 BUG_ON((unsigned)n > 0xFF);
11620 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
11621@@ -369,19 +382,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
11622 /*
11623 * This routine sets up an interrupt gate at directory privilege level 3.
11624 */
11625-static inline void set_system_intr_gate(unsigned int n, void *addr)
11626+static inline void set_system_intr_gate(unsigned int n, const void *addr)
11627 {
11628 BUG_ON((unsigned)n > 0xFF);
11629 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
11630 }
11631
11632-static inline void set_system_trap_gate(unsigned int n, void *addr)
11633+static inline void set_system_trap_gate(unsigned int n, const void *addr)
11634 {
11635 BUG_ON((unsigned)n > 0xFF);
11636 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
11637 }
11638
11639-static inline void set_trap_gate(unsigned int n, void *addr)
11640+static inline void set_trap_gate(unsigned int n, const void *addr)
11641 {
11642 BUG_ON((unsigned)n > 0xFF);
11643 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
11644@@ -390,19 +403,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
11645 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
11646 {
11647 BUG_ON((unsigned)n > 0xFF);
11648- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
11649+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
11650 }
11651
11652-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
11653+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
11654 {
11655 BUG_ON((unsigned)n > 0xFF);
11656 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
11657 }
11658
11659-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
11660+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
11661 {
11662 BUG_ON((unsigned)n > 0xFF);
11663 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
11664 }
11665
11666+#ifdef CONFIG_X86_32
11667+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
11668+{
11669+ struct desc_struct d;
11670+
11671+ if (likely(limit))
11672+ limit = (limit - 1UL) >> PAGE_SHIFT;
11673+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
11674+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
11675+}
11676+#endif
11677+
11678 #endif /* _ASM_X86_DESC_H */
11679diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
11680index 278441f..b95a174 100644
11681--- a/arch/x86/include/asm/desc_defs.h
11682+++ b/arch/x86/include/asm/desc_defs.h
11683@@ -31,6 +31,12 @@ struct desc_struct {
11684 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
11685 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
11686 };
11687+ struct {
11688+ u16 offset_low;
11689+ u16 seg;
11690+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
11691+ unsigned offset_high: 16;
11692+ } gate;
11693 };
11694 } __attribute__((packed));
11695
11696diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
11697index 3778256..c5d4fce 100644
11698--- a/arch/x86/include/asm/e820.h
11699+++ b/arch/x86/include/asm/e820.h
11700@@ -69,7 +69,7 @@ struct e820map {
11701 #define ISA_START_ADDRESS 0xa0000
11702 #define ISA_END_ADDRESS 0x100000
11703
11704-#define BIOS_BEGIN 0x000a0000
11705+#define BIOS_BEGIN 0x000c0000
11706 #define BIOS_END 0x00100000
11707
11708 #define BIOS_ROM_BASE 0xffe00000
11709diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
11710index 5939f44..f8845f6 100644
11711--- a/arch/x86/include/asm/elf.h
11712+++ b/arch/x86/include/asm/elf.h
11713@@ -243,7 +243,25 @@ extern int force_personality32;
11714 the loader. We need to make sure that it is out of the way of the program
11715 that it will "exec", and that there is sufficient room for the brk. */
11716
11717+#ifdef CONFIG_PAX_SEGMEXEC
11718+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
11719+#else
11720 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
11721+#endif
11722+
11723+#ifdef CONFIG_PAX_ASLR
11724+#ifdef CONFIG_X86_32
11725+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
11726+
11727+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
11728+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
11729+#else
11730+#define PAX_ELF_ET_DYN_BASE 0x400000UL
11731+
11732+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
11733+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
11734+#endif
11735+#endif
11736
11737 /* This yields a mask that user programs can use to figure out what
11738 instruction set this CPU supports. This could be done in user space,
11739@@ -296,16 +314,12 @@ do { \
11740
11741 #define ARCH_DLINFO \
11742 do { \
11743- if (vdso_enabled) \
11744- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
11745- (unsigned long)current->mm->context.vdso); \
11746+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
11747 } while (0)
11748
11749 #define ARCH_DLINFO_X32 \
11750 do { \
11751- if (vdso_enabled) \
11752- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
11753- (unsigned long)current->mm->context.vdso); \
11754+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
11755 } while (0)
11756
11757 #define AT_SYSINFO 32
11758@@ -320,7 +334,7 @@ else \
11759
11760 #endif /* !CONFIG_X86_32 */
11761
11762-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
11763+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
11764
11765 #define VDSO_ENTRY \
11766 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
11767@@ -336,9 +350,6 @@ extern int x32_setup_additional_pages(struct linux_binprm *bprm,
11768 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
11769 #define compat_arch_setup_additional_pages syscall32_setup_pages
11770
11771-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
11772-#define arch_randomize_brk arch_randomize_brk
11773-
11774 /*
11775 * True on X86_32 or when emulating IA32 on X86_64
11776 */
11777diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
11778index 75ce3f4..882e801 100644
11779--- a/arch/x86/include/asm/emergency-restart.h
11780+++ b/arch/x86/include/asm/emergency-restart.h
11781@@ -13,6 +13,6 @@ enum reboot_type {
11782
11783 extern enum reboot_type reboot_type;
11784
11785-extern void machine_emergency_restart(void);
11786+extern void machine_emergency_restart(void) __noreturn;
11787
11788 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
11789diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
11790index 41ab26e..a88c9e6 100644
11791--- a/arch/x86/include/asm/fpu-internal.h
11792+++ b/arch/x86/include/asm/fpu-internal.h
11793@@ -126,7 +126,9 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
11794 ({ \
11795 int err; \
11796 asm volatile(ASM_STAC "\n" \
11797- "1:" #insn "\n\t" \
11798+ "1:" \
11799+ __copyuser_seg \
11800+ #insn "\n\t" \
11801 "2: " ASM_CLAC "\n" \
11802 ".section .fixup,\"ax\"\n" \
11803 "3: movl $-1,%[err]\n" \
11804@@ -299,7 +301,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
11805 "emms\n\t" /* clear stack tags */
11806 "fildl %P[addr]", /* set F?P to defined value */
11807 X86_FEATURE_FXSAVE_LEAK,
11808- [addr] "m" (tsk->thread.fpu.has_fpu));
11809+ [addr] "m" (init_tss[raw_smp_processor_id()].x86_tss.sp0));
11810
11811 return fpu_restore_checking(&tsk->thread.fpu);
11812 }
11813diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
11814index f373046..02653e2 100644
11815--- a/arch/x86/include/asm/futex.h
11816+++ b/arch/x86/include/asm/futex.h
11817@@ -12,6 +12,7 @@
11818 #include <asm/smap.h>
11819
11820 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
11821+ typecheck(u32 __user *, uaddr); \
11822 asm volatile("\t" ASM_STAC "\n" \
11823 "1:\t" insn "\n" \
11824 "2:\t" ASM_CLAC "\n" \
11825@@ -20,15 +21,16 @@
11826 "\tjmp\t2b\n" \
11827 "\t.previous\n" \
11828 _ASM_EXTABLE(1b, 3b) \
11829- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
11830+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr)) \
11831 : "i" (-EFAULT), "0" (oparg), "1" (0))
11832
11833 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
11834+ typecheck(u32 __user *, uaddr); \
11835 asm volatile("\t" ASM_STAC "\n" \
11836 "1:\tmovl %2, %0\n" \
11837 "\tmovl\t%0, %3\n" \
11838 "\t" insn "\n" \
11839- "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \
11840+ "2:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %2\n" \
11841 "\tjnz\t1b\n" \
11842 "3:\t" ASM_CLAC "\n" \
11843 "\t.section .fixup,\"ax\"\n" \
11844@@ -38,7 +40,7 @@
11845 _ASM_EXTABLE(1b, 4b) \
11846 _ASM_EXTABLE(2b, 4b) \
11847 : "=&a" (oldval), "=&r" (ret), \
11848- "+m" (*uaddr), "=&r" (tem) \
11849+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
11850 : "r" (oparg), "i" (-EFAULT), "1" (0))
11851
11852 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
11853@@ -65,10 +67,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
11854
11855 switch (op) {
11856 case FUTEX_OP_SET:
11857- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
11858+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
11859 break;
11860 case FUTEX_OP_ADD:
11861- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
11862+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
11863 uaddr, oparg);
11864 break;
11865 case FUTEX_OP_OR:
11866@@ -128,14 +130,14 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
11867 return -EFAULT;
11868
11869 asm volatile("\t" ASM_STAC "\n"
11870- "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
11871+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
11872 "2:\t" ASM_CLAC "\n"
11873 "\t.section .fixup, \"ax\"\n"
11874 "3:\tmov %3, %0\n"
11875 "\tjmp 2b\n"
11876 "\t.previous\n"
11877 _ASM_EXTABLE(1b, 3b)
11878- : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
11879+ : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
11880 : "i" (-EFAULT), "r" (newval), "1" (oldval)
11881 : "memory"
11882 );
11883diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
11884index eb92a6e..b98b2f4 100644
11885--- a/arch/x86/include/asm/hw_irq.h
11886+++ b/arch/x86/include/asm/hw_irq.h
11887@@ -136,8 +136,8 @@ extern void setup_ioapic_dest(void);
11888 extern void enable_IO_APIC(void);
11889
11890 /* Statistics */
11891-extern atomic_t irq_err_count;
11892-extern atomic_t irq_mis_count;
11893+extern atomic_unchecked_t irq_err_count;
11894+extern atomic_unchecked_t irq_mis_count;
11895
11896 /* EISA */
11897 extern void eisa_set_level_irq(unsigned int irq);
11898diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
11899index d8e8eef..15b1179 100644
11900--- a/arch/x86/include/asm/io.h
11901+++ b/arch/x86/include/asm/io.h
11902@@ -184,7 +184,7 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
11903 return ioremap_nocache(offset, size);
11904 }
11905
11906-extern void iounmap(volatile void __iomem *addr);
11907+extern void iounmap(const volatile void __iomem *addr);
11908
11909 extern void set_iounmap_nonlazy(void);
11910
11911@@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
11912
11913 #include <linux/vmalloc.h>
11914
11915+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
11916+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
11917+{
11918+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
11919+}
11920+
11921+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
11922+{
11923+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
11924+}
11925+
11926 /*
11927 * Convert a virtual cached pointer to an uncached pointer
11928 */
11929diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
11930index bba3cf8..06bc8da 100644
11931--- a/arch/x86/include/asm/irqflags.h
11932+++ b/arch/x86/include/asm/irqflags.h
11933@@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
11934 sti; \
11935 sysexit
11936
11937+#define GET_CR0_INTO_RDI mov %cr0, %rdi
11938+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
11939+#define GET_CR3_INTO_RDI mov %cr3, %rdi
11940+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
11941+
11942 #else
11943 #define INTERRUPT_RETURN iret
11944 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
11945diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
11946index d3ddd17..c9fb0cc 100644
11947--- a/arch/x86/include/asm/kprobes.h
11948+++ b/arch/x86/include/asm/kprobes.h
11949@@ -38,13 +38,8 @@ typedef u8 kprobe_opcode_t;
11950 #define RELATIVEJUMP_SIZE 5
11951 #define RELATIVECALL_OPCODE 0xe8
11952 #define RELATIVE_ADDR_SIZE 4
11953-#define MAX_STACK_SIZE 64
11954-#define MIN_STACK_SIZE(ADDR) \
11955- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
11956- THREAD_SIZE - (unsigned long)(ADDR))) \
11957- ? (MAX_STACK_SIZE) \
11958- : (((unsigned long)current_thread_info()) + \
11959- THREAD_SIZE - (unsigned long)(ADDR)))
11960+#define MAX_STACK_SIZE 64UL
11961+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
11962
11963 #define flush_insn_slot(p) do { } while (0)
11964
11965diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
11966index c8bed0d..85c03fd 100644
11967--- a/arch/x86/include/asm/local.h
11968+++ b/arch/x86/include/asm/local.h
11969@@ -10,33 +10,97 @@ typedef struct {
11970 atomic_long_t a;
11971 } local_t;
11972
11973+typedef struct {
11974+ atomic_long_unchecked_t a;
11975+} local_unchecked_t;
11976+
11977 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
11978
11979 #define local_read(l) atomic_long_read(&(l)->a)
11980+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
11981 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
11982+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
11983
11984 static inline void local_inc(local_t *l)
11985 {
11986- asm volatile(_ASM_INC "%0"
11987+ asm volatile(_ASM_INC "%0\n"
11988+
11989+#ifdef CONFIG_PAX_REFCOUNT
11990+ "jno 0f\n"
11991+ _ASM_DEC "%0\n"
11992+ "int $4\n0:\n"
11993+ _ASM_EXTABLE(0b, 0b)
11994+#endif
11995+
11996+ : "+m" (l->a.counter));
11997+}
11998+
11999+static inline void local_inc_unchecked(local_unchecked_t *l)
12000+{
12001+ asm volatile(_ASM_INC "%0\n"
12002 : "+m" (l->a.counter));
12003 }
12004
12005 static inline void local_dec(local_t *l)
12006 {
12007- asm volatile(_ASM_DEC "%0"
12008+ asm volatile(_ASM_DEC "%0\n"
12009+
12010+#ifdef CONFIG_PAX_REFCOUNT
12011+ "jno 0f\n"
12012+ _ASM_INC "%0\n"
12013+ "int $4\n0:\n"
12014+ _ASM_EXTABLE(0b, 0b)
12015+#endif
12016+
12017+ : "+m" (l->a.counter));
12018+}
12019+
12020+static inline void local_dec_unchecked(local_unchecked_t *l)
12021+{
12022+ asm volatile(_ASM_DEC "%0\n"
12023 : "+m" (l->a.counter));
12024 }
12025
12026 static inline void local_add(long i, local_t *l)
12027 {
12028- asm volatile(_ASM_ADD "%1,%0"
12029+ asm volatile(_ASM_ADD "%1,%0\n"
12030+
12031+#ifdef CONFIG_PAX_REFCOUNT
12032+ "jno 0f\n"
12033+ _ASM_SUB "%1,%0\n"
12034+ "int $4\n0:\n"
12035+ _ASM_EXTABLE(0b, 0b)
12036+#endif
12037+
12038+ : "+m" (l->a.counter)
12039+ : "ir" (i));
12040+}
12041+
12042+static inline void local_add_unchecked(long i, local_unchecked_t *l)
12043+{
12044+ asm volatile(_ASM_ADD "%1,%0\n"
12045 : "+m" (l->a.counter)
12046 : "ir" (i));
12047 }
12048
12049 static inline void local_sub(long i, local_t *l)
12050 {
12051- asm volatile(_ASM_SUB "%1,%0"
12052+ asm volatile(_ASM_SUB "%1,%0\n"
12053+
12054+#ifdef CONFIG_PAX_REFCOUNT
12055+ "jno 0f\n"
12056+ _ASM_ADD "%1,%0\n"
12057+ "int $4\n0:\n"
12058+ _ASM_EXTABLE(0b, 0b)
12059+#endif
12060+
12061+ : "+m" (l->a.counter)
12062+ : "ir" (i));
12063+}
12064+
12065+static inline void local_sub_unchecked(long i, local_unchecked_t *l)
12066+{
12067+ asm volatile(_ASM_SUB "%1,%0\n"
12068 : "+m" (l->a.counter)
12069 : "ir" (i));
12070 }
12071@@ -54,7 +118,16 @@ static inline int local_sub_and_test(long i, local_t *l)
12072 {
12073 unsigned char c;
12074
12075- asm volatile(_ASM_SUB "%2,%0; sete %1"
12076+ asm volatile(_ASM_SUB "%2,%0\n"
12077+
12078+#ifdef CONFIG_PAX_REFCOUNT
12079+ "jno 0f\n"
12080+ _ASM_ADD "%2,%0\n"
12081+ "int $4\n0:\n"
12082+ _ASM_EXTABLE(0b, 0b)
12083+#endif
12084+
12085+ "sete %1\n"
12086 : "+m" (l->a.counter), "=qm" (c)
12087 : "ir" (i) : "memory");
12088 return c;
12089@@ -72,7 +145,16 @@ static inline int local_dec_and_test(local_t *l)
12090 {
12091 unsigned char c;
12092
12093- asm volatile(_ASM_DEC "%0; sete %1"
12094+ asm volatile(_ASM_DEC "%0\n"
12095+
12096+#ifdef CONFIG_PAX_REFCOUNT
12097+ "jno 0f\n"
12098+ _ASM_INC "%0\n"
12099+ "int $4\n0:\n"
12100+ _ASM_EXTABLE(0b, 0b)
12101+#endif
12102+
12103+ "sete %1\n"
12104 : "+m" (l->a.counter), "=qm" (c)
12105 : : "memory");
12106 return c != 0;
12107@@ -90,7 +172,16 @@ static inline int local_inc_and_test(local_t *l)
12108 {
12109 unsigned char c;
12110
12111- asm volatile(_ASM_INC "%0; sete %1"
12112+ asm volatile(_ASM_INC "%0\n"
12113+
12114+#ifdef CONFIG_PAX_REFCOUNT
12115+ "jno 0f\n"
12116+ _ASM_DEC "%0\n"
12117+ "int $4\n0:\n"
12118+ _ASM_EXTABLE(0b, 0b)
12119+#endif
12120+
12121+ "sete %1\n"
12122 : "+m" (l->a.counter), "=qm" (c)
12123 : : "memory");
12124 return c != 0;
12125@@ -109,7 +200,16 @@ static inline int local_add_negative(long i, local_t *l)
12126 {
12127 unsigned char c;
12128
12129- asm volatile(_ASM_ADD "%2,%0; sets %1"
12130+ asm volatile(_ASM_ADD "%2,%0\n"
12131+
12132+#ifdef CONFIG_PAX_REFCOUNT
12133+ "jno 0f\n"
12134+ _ASM_SUB "%2,%0\n"
12135+ "int $4\n0:\n"
12136+ _ASM_EXTABLE(0b, 0b)
12137+#endif
12138+
12139+ "sets %1\n"
12140 : "+m" (l->a.counter), "=qm" (c)
12141 : "ir" (i) : "memory");
12142 return c;
12143@@ -132,7 +232,15 @@ static inline long local_add_return(long i, local_t *l)
12144 #endif
12145 /* Modern 486+ processor */
12146 __i = i;
12147- asm volatile(_ASM_XADD "%0, %1;"
12148+ asm volatile(_ASM_XADD "%0, %1\n"
12149+
12150+#ifdef CONFIG_PAX_REFCOUNT
12151+ "jno 0f\n"
12152+ _ASM_MOV "%0,%1\n"
12153+ "int $4\n0:\n"
12154+ _ASM_EXTABLE(0b, 0b)
12155+#endif
12156+
12157 : "+r" (i), "+m" (l->a.counter)
12158 : : "memory");
12159 return i + __i;
12160@@ -147,6 +255,38 @@ no_xadd: /* Legacy 386 processor */
12161 #endif
12162 }
12163
12164+/**
12165+ * local_add_return_unchecked - add and return
12166+ * @i: integer value to add
12167+ * @l: pointer to type local_unchecked_t
12168+ *
12169+ * Atomically adds @i to @l and returns @i + @l
12170+ */
12171+static inline long local_add_return_unchecked(long i, local_unchecked_t *l)
12172+{
12173+ long __i;
12174+#ifdef CONFIG_M386
12175+ unsigned long flags;
12176+ if (unlikely(boot_cpu_data.x86 <= 3))
12177+ goto no_xadd;
12178+#endif
12179+ /* Modern 486+ processor */
12180+ __i = i;
12181+ asm volatile(_ASM_XADD "%0, %1\n"
12182+ : "+r" (i), "+m" (l->a.counter)
12183+ : : "memory");
12184+ return i + __i;
12185+
12186+#ifdef CONFIG_M386
12187+no_xadd: /* Legacy 386 processor */
12188+ local_irq_save(flags);
12189+ __i = local_read_unchecked(l);
12190+ local_set_unchecked(l, i + __i);
12191+ local_irq_restore(flags);
12192+ return i + __i;
12193+#endif
12194+}
12195+
12196 static inline long local_sub_return(long i, local_t *l)
12197 {
12198 return local_add_return(-i, l);
12199@@ -157,6 +297,8 @@ static inline long local_sub_return(long i, local_t *l)
12200
12201 #define local_cmpxchg(l, o, n) \
12202 (cmpxchg_local(&((l)->a.counter), (o), (n)))
12203+#define local_cmpxchg_unchecked(l, o, n) \
12204+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
12205 /* Always has a lock prefix */
12206 #define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
12207
12208diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
12209index 593e51d..fa69c9a 100644
12210--- a/arch/x86/include/asm/mman.h
12211+++ b/arch/x86/include/asm/mman.h
12212@@ -5,4 +5,14 @@
12213
12214 #include <asm-generic/mman.h>
12215
12216+#ifdef __KERNEL__
12217+#ifndef __ASSEMBLY__
12218+#ifdef CONFIG_X86_32
12219+#define arch_mmap_check i386_mmap_check
12220+int i386_mmap_check(unsigned long addr, unsigned long len,
12221+ unsigned long flags);
12222+#endif
12223+#endif
12224+#endif
12225+
12226 #endif /* _ASM_X86_MMAN_H */
12227diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
12228index 5f55e69..e20bfb1 100644
12229--- a/arch/x86/include/asm/mmu.h
12230+++ b/arch/x86/include/asm/mmu.h
12231@@ -9,7 +9,7 @@
12232 * we put the segment information here.
12233 */
12234 typedef struct {
12235- void *ldt;
12236+ struct desc_struct *ldt;
12237 int size;
12238
12239 #ifdef CONFIG_X86_64
12240@@ -18,7 +18,19 @@ typedef struct {
12241 #endif
12242
12243 struct mutex lock;
12244- void *vdso;
12245+ unsigned long vdso;
12246+
12247+#ifdef CONFIG_X86_32
12248+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
12249+ unsigned long user_cs_base;
12250+ unsigned long user_cs_limit;
12251+
12252+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
12253+ cpumask_t cpu_user_cs_mask;
12254+#endif
12255+
12256+#endif
12257+#endif
12258 } mm_context_t;
12259
12260 #ifdef CONFIG_SMP
12261diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
12262index cdbf367..adb37ac 100644
12263--- a/arch/x86/include/asm/mmu_context.h
12264+++ b/arch/x86/include/asm/mmu_context.h
12265@@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
12266
12267 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
12268 {
12269+
12270+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12271+ unsigned int i;
12272+ pgd_t *pgd;
12273+
12274+ pax_open_kernel();
12275+ pgd = get_cpu_pgd(smp_processor_id());
12276+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
12277+ set_pgd_batched(pgd+i, native_make_pgd(0));
12278+ pax_close_kernel();
12279+#endif
12280+
12281 #ifdef CONFIG_SMP
12282 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
12283 this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
12284@@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
12285 struct task_struct *tsk)
12286 {
12287 unsigned cpu = smp_processor_id();
12288+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
12289+ int tlbstate = TLBSTATE_OK;
12290+#endif
12291
12292 if (likely(prev != next)) {
12293 #ifdef CONFIG_SMP
12294+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
12295+ tlbstate = this_cpu_read(cpu_tlbstate.state);
12296+#endif
12297 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
12298 this_cpu_write(cpu_tlbstate.active_mm, next);
12299 #endif
12300 cpumask_set_cpu(cpu, mm_cpumask(next));
12301
12302 /* Re-load page tables */
12303+#ifdef CONFIG_PAX_PER_CPU_PGD
12304+ pax_open_kernel();
12305+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd);
12306+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd);
12307+ pax_close_kernel();
12308+ load_cr3(get_cpu_pgd(cpu));
12309+#else
12310 load_cr3(next->pgd);
12311+#endif
12312
12313 /* stop flush ipis for the previous mm */
12314 cpumask_clear_cpu(cpu, mm_cpumask(prev));
12315@@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
12316 */
12317 if (unlikely(prev->context.ldt != next->context.ldt))
12318 load_LDT_nolock(&next->context);
12319- }
12320+
12321+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
12322+ if (!(__supported_pte_mask & _PAGE_NX)) {
12323+ smp_mb__before_clear_bit();
12324+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
12325+ smp_mb__after_clear_bit();
12326+ cpu_set(cpu, next->context.cpu_user_cs_mask);
12327+ }
12328+#endif
12329+
12330+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
12331+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
12332+ prev->context.user_cs_limit != next->context.user_cs_limit))
12333+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
12334 #ifdef CONFIG_SMP
12335+ else if (unlikely(tlbstate != TLBSTATE_OK))
12336+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
12337+#endif
12338+#endif
12339+
12340+ }
12341 else {
12342+
12343+#ifdef CONFIG_PAX_PER_CPU_PGD
12344+ pax_open_kernel();
12345+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd);
12346+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd);
12347+ pax_close_kernel();
12348+ load_cr3(get_cpu_pgd(cpu));
12349+#endif
12350+
12351+#ifdef CONFIG_SMP
12352 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
12353 BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
12354
12355@@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
12356 * tlb flush IPI delivery. We must reload CR3
12357 * to make sure to use no freed page tables.
12358 */
12359+
12360+#ifndef CONFIG_PAX_PER_CPU_PGD
12361 load_cr3(next->pgd);
12362+#endif
12363+
12364 load_LDT_nolock(&next->context);
12365+
12366+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
12367+ if (!(__supported_pte_mask & _PAGE_NX))
12368+ cpu_set(cpu, next->context.cpu_user_cs_mask);
12369+#endif
12370+
12371+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
12372+#ifdef CONFIG_PAX_PAGEEXEC
12373+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
12374+#endif
12375+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
12376+#endif
12377+
12378 }
12379+#endif
12380 }
12381-#endif
12382 }
12383
12384 #define activate_mm(prev, next) \
12385diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
12386index 9eae775..c914fea 100644
12387--- a/arch/x86/include/asm/module.h
12388+++ b/arch/x86/include/asm/module.h
12389@@ -5,6 +5,7 @@
12390
12391 #ifdef CONFIG_X86_64
12392 /* X86_64 does not define MODULE_PROC_FAMILY */
12393+#define MODULE_PROC_FAMILY ""
12394 #elif defined CONFIG_M386
12395 #define MODULE_PROC_FAMILY "386 "
12396 #elif defined CONFIG_M486
12397@@ -59,8 +60,20 @@
12398 #error unknown processor family
12399 #endif
12400
12401-#ifdef CONFIG_X86_32
12402-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
12403+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
12404+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
12405+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
12406+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
12407+#else
12408+#define MODULE_PAX_KERNEXEC ""
12409 #endif
12410
12411+#ifdef CONFIG_PAX_MEMORY_UDEREF
12412+#define MODULE_PAX_UDEREF "UDEREF "
12413+#else
12414+#define MODULE_PAX_UDEREF ""
12415+#endif
12416+
12417+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
12418+
12419 #endif /* _ASM_X86_MODULE_H */
12420diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
12421index 320f7bb..e89f8f8 100644
12422--- a/arch/x86/include/asm/page_64_types.h
12423+++ b/arch/x86/include/asm/page_64_types.h
12424@@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
12425
12426 /* duplicated to the one in bootmem.h */
12427 extern unsigned long max_pfn;
12428-extern unsigned long phys_base;
12429+extern const unsigned long phys_base;
12430
12431 extern unsigned long __phys_addr(unsigned long);
12432 #define __phys_reloc_hide(x) (x)
12433diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
12434index a0facf3..c017b15 100644
12435--- a/arch/x86/include/asm/paravirt.h
12436+++ b/arch/x86/include/asm/paravirt.h
12437@@ -632,6 +632,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
12438 val);
12439 }
12440
12441+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
12442+{
12443+ pgdval_t val = native_pgd_val(pgd);
12444+
12445+ if (sizeof(pgdval_t) > sizeof(long))
12446+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
12447+ val, (u64)val >> 32);
12448+ else
12449+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
12450+ val);
12451+}
12452+
12453 static inline void pgd_clear(pgd_t *pgdp)
12454 {
12455 set_pgd(pgdp, __pgd(0));
12456@@ -713,6 +725,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
12457 pv_mmu_ops.set_fixmap(idx, phys, flags);
12458 }
12459
12460+#ifdef CONFIG_PAX_KERNEXEC
12461+static inline unsigned long pax_open_kernel(void)
12462+{
12463+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
12464+}
12465+
12466+static inline unsigned long pax_close_kernel(void)
12467+{
12468+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
12469+}
12470+#else
12471+static inline unsigned long pax_open_kernel(void) { return 0; }
12472+static inline unsigned long pax_close_kernel(void) { return 0; }
12473+#endif
12474+
12475 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
12476
12477 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
12478@@ -929,7 +956,7 @@ extern void default_banner(void);
12479
12480 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
12481 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
12482-#define PARA_INDIRECT(addr) *%cs:addr
12483+#define PARA_INDIRECT(addr) *%ss:addr
12484 #endif
12485
12486 #define INTERRUPT_RETURN \
12487@@ -1004,6 +1031,21 @@ extern void default_banner(void);
12488 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
12489 CLBR_NONE, \
12490 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
12491+
12492+#define GET_CR0_INTO_RDI \
12493+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
12494+ mov %rax,%rdi
12495+
12496+#define SET_RDI_INTO_CR0 \
12497+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
12498+
12499+#define GET_CR3_INTO_RDI \
12500+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
12501+ mov %rax,%rdi
12502+
12503+#define SET_RDI_INTO_CR3 \
12504+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
12505+
12506 #endif /* CONFIG_X86_32 */
12507
12508 #endif /* __ASSEMBLY__ */
12509diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
12510index 142236e..5446ffbc 100644
12511--- a/arch/x86/include/asm/paravirt_types.h
12512+++ b/arch/x86/include/asm/paravirt_types.h
12513@@ -84,7 +84,7 @@ struct pv_init_ops {
12514 */
12515 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
12516 unsigned long addr, unsigned len);
12517-};
12518+} __no_const;
12519
12520
12521 struct pv_lazy_ops {
12522@@ -97,7 +97,7 @@ struct pv_time_ops {
12523 unsigned long long (*sched_clock)(void);
12524 unsigned long long (*steal_clock)(int cpu);
12525 unsigned long (*get_tsc_khz)(void);
12526-};
12527+} __no_const;
12528
12529 struct pv_cpu_ops {
12530 /* hooks for various privileged instructions */
12531@@ -191,7 +191,7 @@ struct pv_cpu_ops {
12532
12533 void (*start_context_switch)(struct task_struct *prev);
12534 void (*end_context_switch)(struct task_struct *next);
12535-};
12536+} __no_const;
12537
12538 struct pv_irq_ops {
12539 /*
12540@@ -222,7 +222,7 @@ struct pv_apic_ops {
12541 unsigned long start_eip,
12542 unsigned long start_esp);
12543 #endif
12544-};
12545+} __no_const;
12546
12547 struct pv_mmu_ops {
12548 unsigned long (*read_cr2)(void);
12549@@ -312,6 +312,7 @@ struct pv_mmu_ops {
12550 struct paravirt_callee_save make_pud;
12551
12552 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
12553+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
12554 #endif /* PAGETABLE_LEVELS == 4 */
12555 #endif /* PAGETABLE_LEVELS >= 3 */
12556
12557@@ -323,6 +324,12 @@ struct pv_mmu_ops {
12558 an mfn. We can tell which is which from the index. */
12559 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
12560 phys_addr_t phys, pgprot_t flags);
12561+
12562+#ifdef CONFIG_PAX_KERNEXEC
12563+ unsigned long (*pax_open_kernel)(void);
12564+ unsigned long (*pax_close_kernel)(void);
12565+#endif
12566+
12567 };
12568
12569 struct arch_spinlock;
12570@@ -333,7 +340,7 @@ struct pv_lock_ops {
12571 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
12572 int (*spin_trylock)(struct arch_spinlock *lock);
12573 void (*spin_unlock)(struct arch_spinlock *lock);
12574-};
12575+} __no_const;
12576
12577 /* This contains all the paravirt structures: we get a convenient
12578 * number for each function using the offset which we use to indicate
12579diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
12580index b4389a4..7024269 100644
12581--- a/arch/x86/include/asm/pgalloc.h
12582+++ b/arch/x86/include/asm/pgalloc.h
12583@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
12584 pmd_t *pmd, pte_t *pte)
12585 {
12586 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
12587+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
12588+}
12589+
12590+static inline void pmd_populate_user(struct mm_struct *mm,
12591+ pmd_t *pmd, pte_t *pte)
12592+{
12593+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
12594 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
12595 }
12596
12597@@ -99,12 +106,22 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
12598
12599 #ifdef CONFIG_X86_PAE
12600 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
12601+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
12602+{
12603+ pud_populate(mm, pudp, pmd);
12604+}
12605 #else /* !CONFIG_X86_PAE */
12606 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
12607 {
12608 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
12609 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
12610 }
12611+
12612+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
12613+{
12614+ paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
12615+ set_pud(pud, __pud(_KERNPG_TABLE | __pa(pmd)));
12616+}
12617 #endif /* CONFIG_X86_PAE */
12618
12619 #if PAGETABLE_LEVELS > 3
12620@@ -114,6 +131,12 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
12621 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
12622 }
12623
12624+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
12625+{
12626+ paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
12627+ set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(pud)));
12628+}
12629+
12630 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
12631 {
12632 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
12633diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
12634index f2b489c..4f7e2e5 100644
12635--- a/arch/x86/include/asm/pgtable-2level.h
12636+++ b/arch/x86/include/asm/pgtable-2level.h
12637@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
12638
12639 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
12640 {
12641+ pax_open_kernel();
12642 *pmdp = pmd;
12643+ pax_close_kernel();
12644 }
12645
12646 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
12647diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
12648index 4cc9f2b..5fd9226 100644
12649--- a/arch/x86/include/asm/pgtable-3level.h
12650+++ b/arch/x86/include/asm/pgtable-3level.h
12651@@ -92,12 +92,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
12652
12653 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
12654 {
12655+ pax_open_kernel();
12656 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
12657+ pax_close_kernel();
12658 }
12659
12660 static inline void native_set_pud(pud_t *pudp, pud_t pud)
12661 {
12662+ pax_open_kernel();
12663 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
12664+ pax_close_kernel();
12665 }
12666
12667 /*
12668diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
12669index 796ed83..9f6c8dd 100644
12670--- a/arch/x86/include/asm/pgtable.h
12671+++ b/arch/x86/include/asm/pgtable.h
12672@@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
12673
12674 #ifndef __PAGETABLE_PUD_FOLDED
12675 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
12676+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
12677 #define pgd_clear(pgd) native_pgd_clear(pgd)
12678 #endif
12679
12680@@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
12681
12682 #define arch_end_context_switch(prev) do {} while(0)
12683
12684+#define pax_open_kernel() native_pax_open_kernel()
12685+#define pax_close_kernel() native_pax_close_kernel()
12686 #endif /* CONFIG_PARAVIRT */
12687
12688+#define __HAVE_ARCH_PAX_OPEN_KERNEL
12689+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
12690+
12691+#ifdef CONFIG_PAX_KERNEXEC
12692+static inline unsigned long native_pax_open_kernel(void)
12693+{
12694+ unsigned long cr0;
12695+
12696+ preempt_disable();
12697+ barrier();
12698+ cr0 = read_cr0() ^ X86_CR0_WP;
12699+ BUG_ON(cr0 & X86_CR0_WP);
12700+ write_cr0(cr0);
12701+ return cr0 ^ X86_CR0_WP;
12702+}
12703+
12704+static inline unsigned long native_pax_close_kernel(void)
12705+{
12706+ unsigned long cr0;
12707+
12708+ cr0 = read_cr0() ^ X86_CR0_WP;
12709+ BUG_ON(!(cr0 & X86_CR0_WP));
12710+ write_cr0(cr0);
12711+ barrier();
12712+ preempt_enable_no_resched();
12713+ return cr0 ^ X86_CR0_WP;
12714+}
12715+#else
12716+static inline unsigned long native_pax_open_kernel(void) { return 0; }
12717+static inline unsigned long native_pax_close_kernel(void) { return 0; }
12718+#endif
12719+
12720 /*
12721 * The following only work if pte_present() is true.
12722 * Undefined behaviour if not..
12723 */
12724+static inline int pte_user(pte_t pte)
12725+{
12726+ return pte_val(pte) & _PAGE_USER;
12727+}
12728+
12729 static inline int pte_dirty(pte_t pte)
12730 {
12731 return pte_flags(pte) & _PAGE_DIRTY;
12732@@ -200,9 +240,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
12733 return pte_clear_flags(pte, _PAGE_RW);
12734 }
12735
12736+static inline pte_t pte_mkread(pte_t pte)
12737+{
12738+ return __pte(pte_val(pte) | _PAGE_USER);
12739+}
12740+
12741 static inline pte_t pte_mkexec(pte_t pte)
12742 {
12743- return pte_clear_flags(pte, _PAGE_NX);
12744+#ifdef CONFIG_X86_PAE
12745+ if (__supported_pte_mask & _PAGE_NX)
12746+ return pte_clear_flags(pte, _PAGE_NX);
12747+ else
12748+#endif
12749+ return pte_set_flags(pte, _PAGE_USER);
12750+}
12751+
12752+static inline pte_t pte_exprotect(pte_t pte)
12753+{
12754+#ifdef CONFIG_X86_PAE
12755+ if (__supported_pte_mask & _PAGE_NX)
12756+ return pte_set_flags(pte, _PAGE_NX);
12757+ else
12758+#endif
12759+ return pte_clear_flags(pte, _PAGE_USER);
12760 }
12761
12762 static inline pte_t pte_mkdirty(pte_t pte)
12763@@ -394,6 +454,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
12764 #endif
12765
12766 #ifndef __ASSEMBLY__
12767+
12768+#ifdef CONFIG_PAX_PER_CPU_PGD
12769+extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
12770+static inline pgd_t *get_cpu_pgd(unsigned int cpu)
12771+{
12772+ return cpu_pgd[cpu];
12773+}
12774+#endif
12775+
12776 #include <linux/mm_types.h>
12777
12778 static inline int pte_none(pte_t pte)
12779@@ -570,7 +639,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
12780
12781 static inline int pgd_bad(pgd_t pgd)
12782 {
12783- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
12784+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
12785 }
12786
12787 static inline int pgd_none(pgd_t pgd)
12788@@ -593,7 +662,12 @@ static inline int pgd_none(pgd_t pgd)
12789 * pgd_offset() returns a (pgd_t *)
12790 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
12791 */
12792-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
12793+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
12794+
12795+#ifdef CONFIG_PAX_PER_CPU_PGD
12796+#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
12797+#endif
12798+
12799 /*
12800 * a shortcut which implies the use of the kernel's pgd, instead
12801 * of a process's
12802@@ -604,6 +678,20 @@ static inline int pgd_none(pgd_t pgd)
12803 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
12804 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
12805
12806+#ifdef CONFIG_X86_32
12807+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
12808+#else
12809+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
12810+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
12811+
12812+#ifdef CONFIG_PAX_MEMORY_UDEREF
12813+#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
12814+#else
12815+#define PAX_USER_SHADOW_BASE (_AC(0,UL))
12816+#endif
12817+
12818+#endif
12819+
12820 #ifndef __ASSEMBLY__
12821
12822 extern int direct_gbpages;
12823@@ -768,11 +856,23 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
12824 * dst and src can be on the same page, but the range must not overlap,
12825 * and must not cross a page boundary.
12826 */
12827-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
12828+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
12829 {
12830- memcpy(dst, src, count * sizeof(pgd_t));
12831+ pax_open_kernel();
12832+ while (count--)
12833+ *dst++ = *src++;
12834+ pax_close_kernel();
12835 }
12836
12837+#ifdef CONFIG_PAX_PER_CPU_PGD
12838+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src);
12839+#endif
12840+
12841+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12842+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src);
12843+#else
12844+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src) {}
12845+#endif
12846
12847 #include <asm-generic/pgtable.h>
12848 #endif /* __ASSEMBLY__ */
12849diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
12850index 8faa215..a8a17ea 100644
12851--- a/arch/x86/include/asm/pgtable_32.h
12852+++ b/arch/x86/include/asm/pgtable_32.h
12853@@ -25,9 +25,6 @@
12854 struct mm_struct;
12855 struct vm_area_struct;
12856
12857-extern pgd_t swapper_pg_dir[1024];
12858-extern pgd_t initial_page_table[1024];
12859-
12860 static inline void pgtable_cache_init(void) { }
12861 static inline void check_pgt_cache(void) { }
12862 void paging_init(void);
12863@@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
12864 # include <asm/pgtable-2level.h>
12865 #endif
12866
12867+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
12868+extern pgd_t initial_page_table[PTRS_PER_PGD];
12869+#ifdef CONFIG_X86_PAE
12870+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
12871+#endif
12872+
12873 #if defined(CONFIG_HIGHPTE)
12874 #define pte_offset_map(dir, address) \
12875 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
12876@@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
12877 /* Clear a kernel PTE and flush it from the TLB */
12878 #define kpte_clear_flush(ptep, vaddr) \
12879 do { \
12880+ pax_open_kernel(); \
12881 pte_clear(&init_mm, (vaddr), (ptep)); \
12882+ pax_close_kernel(); \
12883 __flush_tlb_one((vaddr)); \
12884 } while (0)
12885
12886@@ -75,6 +80,9 @@ do { \
12887
12888 #endif /* !__ASSEMBLY__ */
12889
12890+#define HAVE_ARCH_UNMAPPED_AREA
12891+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
12892+
12893 /*
12894 * kern_addr_valid() is (1) for FLATMEM and (0) for
12895 * SPARSEMEM and DISCONTIGMEM
12896diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
12897index ed5903b..c7fe163 100644
12898--- a/arch/x86/include/asm/pgtable_32_types.h
12899+++ b/arch/x86/include/asm/pgtable_32_types.h
12900@@ -8,7 +8,7 @@
12901 */
12902 #ifdef CONFIG_X86_PAE
12903 # include <asm/pgtable-3level_types.h>
12904-# define PMD_SIZE (1UL << PMD_SHIFT)
12905+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
12906 # define PMD_MASK (~(PMD_SIZE - 1))
12907 #else
12908 # include <asm/pgtable-2level_types.h>
12909@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
12910 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
12911 #endif
12912
12913+#ifdef CONFIG_PAX_KERNEXEC
12914+#ifndef __ASSEMBLY__
12915+extern unsigned char MODULES_EXEC_VADDR[];
12916+extern unsigned char MODULES_EXEC_END[];
12917+#endif
12918+#include <asm/boot.h>
12919+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
12920+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
12921+#else
12922+#define ktla_ktva(addr) (addr)
12923+#define ktva_ktla(addr) (addr)
12924+#endif
12925+
12926 #define MODULES_VADDR VMALLOC_START
12927 #define MODULES_END VMALLOC_END
12928 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
12929diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
12930index 47356f9..deb94a2 100644
12931--- a/arch/x86/include/asm/pgtable_64.h
12932+++ b/arch/x86/include/asm/pgtable_64.h
12933@@ -16,10 +16,14 @@
12934
12935 extern pud_t level3_kernel_pgt[512];
12936 extern pud_t level3_ident_pgt[512];
12937+extern pud_t level3_vmalloc_start_pgt[512];
12938+extern pud_t level3_vmalloc_end_pgt[512];
12939+extern pud_t level3_vmemmap_pgt[512];
12940+extern pud_t level2_vmemmap_pgt[512];
12941 extern pmd_t level2_kernel_pgt[512];
12942 extern pmd_t level2_fixmap_pgt[512];
12943-extern pmd_t level2_ident_pgt[512];
12944-extern pgd_t init_level4_pgt[];
12945+extern pmd_t level2_ident_pgt[512*2];
12946+extern pgd_t init_level4_pgt[512];
12947
12948 #define swapper_pg_dir init_level4_pgt
12949
12950@@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
12951
12952 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
12953 {
12954+ pax_open_kernel();
12955 *pmdp = pmd;
12956+ pax_close_kernel();
12957 }
12958
12959 static inline void native_pmd_clear(pmd_t *pmd)
12960@@ -97,7 +103,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
12961
12962 static inline void native_set_pud(pud_t *pudp, pud_t pud)
12963 {
12964+ pax_open_kernel();
12965 *pudp = pud;
12966+ pax_close_kernel();
12967 }
12968
12969 static inline void native_pud_clear(pud_t *pud)
12970@@ -107,6 +115,13 @@ static inline void native_pud_clear(pud_t *pud)
12971
12972 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
12973 {
12974+ pax_open_kernel();
12975+ *pgdp = pgd;
12976+ pax_close_kernel();
12977+}
12978+
12979+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
12980+{
12981 *pgdp = pgd;
12982 }
12983
12984diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
12985index 766ea16..5b96cb3 100644
12986--- a/arch/x86/include/asm/pgtable_64_types.h
12987+++ b/arch/x86/include/asm/pgtable_64_types.h
12988@@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
12989 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
12990 #define MODULES_END _AC(0xffffffffff000000, UL)
12991 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
12992+#define MODULES_EXEC_VADDR MODULES_VADDR
12993+#define MODULES_EXEC_END MODULES_END
12994+
12995+#define ktla_ktva(addr) (addr)
12996+#define ktva_ktla(addr) (addr)
12997
12998 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
12999diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
13000index ec8a1fc..7ccb593 100644
13001--- a/arch/x86/include/asm/pgtable_types.h
13002+++ b/arch/x86/include/asm/pgtable_types.h
13003@@ -16,13 +16,12 @@
13004 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
13005 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
13006 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
13007-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
13008+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
13009 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
13010 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
13011 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
13012-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
13013-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
13014-#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
13015+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
13016+#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
13017 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
13018
13019 /* If _PAGE_BIT_PRESENT is clear, we use these: */
13020@@ -40,7 +39,6 @@
13021 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
13022 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
13023 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
13024-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
13025 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
13026 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
13027 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
13028@@ -57,8 +55,10 @@
13029
13030 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
13031 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
13032-#else
13033+#elif defined(CONFIG_KMEMCHECK)
13034 #define _PAGE_NX (_AT(pteval_t, 0))
13035+#else
13036+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
13037 #endif
13038
13039 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
13040@@ -96,6 +96,9 @@
13041 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
13042 _PAGE_ACCESSED)
13043
13044+#define PAGE_READONLY_NOEXEC PAGE_READONLY
13045+#define PAGE_SHARED_NOEXEC PAGE_SHARED
13046+
13047 #define __PAGE_KERNEL_EXEC \
13048 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
13049 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
13050@@ -106,7 +109,7 @@
13051 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
13052 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
13053 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
13054-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
13055+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
13056 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
13057 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
13058 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
13059@@ -168,8 +171,8 @@
13060 * bits are combined, this will alow user to access the high address mapped
13061 * VDSO in the presence of CONFIG_COMPAT_VDSO
13062 */
13063-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
13064-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
13065+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
13066+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
13067 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
13068 #endif
13069
13070@@ -207,7 +210,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
13071 {
13072 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
13073 }
13074+#endif
13075
13076+#if PAGETABLE_LEVELS == 3
13077+#include <asm-generic/pgtable-nopud.h>
13078+#endif
13079+
13080+#if PAGETABLE_LEVELS == 2
13081+#include <asm-generic/pgtable-nopmd.h>
13082+#endif
13083+
13084+#ifndef __ASSEMBLY__
13085 #if PAGETABLE_LEVELS > 3
13086 typedef struct { pudval_t pud; } pud_t;
13087
13088@@ -221,8 +234,6 @@ static inline pudval_t native_pud_val(pud_t pud)
13089 return pud.pud;
13090 }
13091 #else
13092-#include <asm-generic/pgtable-nopud.h>
13093-
13094 static inline pudval_t native_pud_val(pud_t pud)
13095 {
13096 return native_pgd_val(pud.pgd);
13097@@ -242,8 +253,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
13098 return pmd.pmd;
13099 }
13100 #else
13101-#include <asm-generic/pgtable-nopmd.h>
13102-
13103 static inline pmdval_t native_pmd_val(pmd_t pmd)
13104 {
13105 return native_pgd_val(pmd.pud.pgd);
13106@@ -283,7 +292,6 @@ typedef struct page *pgtable_t;
13107
13108 extern pteval_t __supported_pte_mask;
13109 extern void set_nx(void);
13110-extern int nx_enabled;
13111
13112 #define pgprot_writecombine pgprot_writecombine
13113 extern pgprot_t pgprot_writecombine(pgprot_t prot);
13114diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
13115index ad1fc85..0b15fe1 100644
13116--- a/arch/x86/include/asm/processor.h
13117+++ b/arch/x86/include/asm/processor.h
13118@@ -289,7 +289,7 @@ struct tss_struct {
13119
13120 } ____cacheline_aligned;
13121
13122-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
13123+extern struct tss_struct init_tss[NR_CPUS];
13124
13125 /*
13126 * Save the original ist values for checking stack pointers during debugging
13127@@ -818,11 +818,18 @@ static inline void spin_lock_prefetch(const void *x)
13128 */
13129 #define TASK_SIZE PAGE_OFFSET
13130 #define TASK_SIZE_MAX TASK_SIZE
13131+
13132+#ifdef CONFIG_PAX_SEGMEXEC
13133+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
13134+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
13135+#else
13136 #define STACK_TOP TASK_SIZE
13137-#define STACK_TOP_MAX STACK_TOP
13138+#endif
13139+
13140+#define STACK_TOP_MAX TASK_SIZE
13141
13142 #define INIT_THREAD { \
13143- .sp0 = sizeof(init_stack) + (long)&init_stack, \
13144+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
13145 .vm86_info = NULL, \
13146 .sysenter_cs = __KERNEL_CS, \
13147 .io_bitmap_ptr = NULL, \
13148@@ -836,7 +843,7 @@ static inline void spin_lock_prefetch(const void *x)
13149 */
13150 #define INIT_TSS { \
13151 .x86_tss = { \
13152- .sp0 = sizeof(init_stack) + (long)&init_stack, \
13153+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
13154 .ss0 = __KERNEL_DS, \
13155 .ss1 = __KERNEL_CS, \
13156 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
13157@@ -847,11 +854,7 @@ static inline void spin_lock_prefetch(const void *x)
13158 extern unsigned long thread_saved_pc(struct task_struct *tsk);
13159
13160 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
13161-#define KSTK_TOP(info) \
13162-({ \
13163- unsigned long *__ptr = (unsigned long *)(info); \
13164- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
13165-})
13166+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
13167
13168 /*
13169 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
13170@@ -866,7 +869,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
13171 #define task_pt_regs(task) \
13172 ({ \
13173 struct pt_regs *__regs__; \
13174- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
13175+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
13176 __regs__ - 1; \
13177 })
13178
13179@@ -876,13 +879,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
13180 /*
13181 * User space process size. 47bits minus one guard page.
13182 */
13183-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
13184+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
13185
13186 /* This decides where the kernel will search for a free chunk of vm
13187 * space during mmap's.
13188 */
13189 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
13190- 0xc0000000 : 0xFFFFe000)
13191+ 0xc0000000 : 0xFFFFf000)
13192
13193 #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
13194 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
13195@@ -893,11 +896,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
13196 #define STACK_TOP_MAX TASK_SIZE_MAX
13197
13198 #define INIT_THREAD { \
13199- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
13200+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
13201 }
13202
13203 #define INIT_TSS { \
13204- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
13205+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
13206 }
13207
13208 /*
13209@@ -925,6 +928,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
13210 */
13211 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
13212
13213+#ifdef CONFIG_PAX_SEGMEXEC
13214+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
13215+#endif
13216+
13217 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
13218
13219 /* Get/set a process' ability to use the timestamp counter instruction */
13220@@ -985,12 +992,12 @@ extern bool cpu_has_amd_erratum(const int *);
13221 #define cpu_has_amd_erratum(x) (false)
13222 #endif /* CONFIG_CPU_SUP_AMD */
13223
13224-extern unsigned long arch_align_stack(unsigned long sp);
13225+#define arch_align_stack(x) ((x) & ~0xfUL)
13226 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
13227
13228 void default_idle(void);
13229 bool set_pm_idle_to_default(void);
13230
13231-void stop_this_cpu(void *dummy);
13232+void stop_this_cpu(void *dummy) __noreturn;
13233
13234 #endif /* _ASM_X86_PROCESSOR_H */
13235diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
13236index 19f16eb..b50624b 100644
13237--- a/arch/x86/include/asm/ptrace.h
13238+++ b/arch/x86/include/asm/ptrace.h
13239@@ -155,28 +155,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
13240 }
13241
13242 /*
13243- * user_mode_vm(regs) determines whether a register set came from user mode.
13244+ * user_mode(regs) determines whether a register set came from user mode.
13245 * This is true if V8086 mode was enabled OR if the register set was from
13246 * protected mode with RPL-3 CS value. This tricky test checks that with
13247 * one comparison. Many places in the kernel can bypass this full check
13248- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
13249+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
13250+ * be used.
13251 */
13252-static inline int user_mode(struct pt_regs *regs)
13253+static inline int user_mode_novm(struct pt_regs *regs)
13254 {
13255 #ifdef CONFIG_X86_32
13256 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
13257 #else
13258- return !!(regs->cs & 3);
13259+ return !!(regs->cs & SEGMENT_RPL_MASK);
13260 #endif
13261 }
13262
13263-static inline int user_mode_vm(struct pt_regs *regs)
13264+static inline int user_mode(struct pt_regs *regs)
13265 {
13266 #ifdef CONFIG_X86_32
13267 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
13268 USER_RPL;
13269 #else
13270- return user_mode(regs);
13271+ return user_mode_novm(regs);
13272 #endif
13273 }
13274
13275@@ -192,15 +193,16 @@ static inline int v8086_mode(struct pt_regs *regs)
13276 #ifdef CONFIG_X86_64
13277 static inline bool user_64bit_mode(struct pt_regs *regs)
13278 {
13279+ unsigned long cs = regs->cs & 0xffff;
13280 #ifndef CONFIG_PARAVIRT
13281 /*
13282 * On non-paravirt systems, this is the only long mode CPL 3
13283 * selector. We do not allow long mode selectors in the LDT.
13284 */
13285- return regs->cs == __USER_CS;
13286+ return cs == __USER_CS;
13287 #else
13288 /* Headers are too twisted for this to go in paravirt.h. */
13289- return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
13290+ return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
13291 #endif
13292 }
13293 #endif
13294diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
13295index fe1ec5b..dc5c3fe 100644
13296--- a/arch/x86/include/asm/realmode.h
13297+++ b/arch/x86/include/asm/realmode.h
13298@@ -22,16 +22,14 @@ struct real_mode_header {
13299 #endif
13300 /* APM/BIOS reboot */
13301 u32 machine_real_restart_asm;
13302-#ifdef CONFIG_X86_64
13303 u32 machine_real_restart_seg;
13304-#endif
13305 };
13306
13307 /* This must match data at trampoline_32/64.S */
13308 struct trampoline_header {
13309 #ifdef CONFIG_X86_32
13310 u32 start;
13311- u16 gdt_pad;
13312+ u16 boot_cs;
13313 u16 gdt_limit;
13314 u32 gdt_base;
13315 #else
13316diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
13317index a82c4f1..ac45053 100644
13318--- a/arch/x86/include/asm/reboot.h
13319+++ b/arch/x86/include/asm/reboot.h
13320@@ -6,13 +6,13 @@
13321 struct pt_regs;
13322
13323 struct machine_ops {
13324- void (*restart)(char *cmd);
13325- void (*halt)(void);
13326- void (*power_off)(void);
13327+ void (* __noreturn restart)(char *cmd);
13328+ void (* __noreturn halt)(void);
13329+ void (* __noreturn power_off)(void);
13330 void (*shutdown)(void);
13331 void (*crash_shutdown)(struct pt_regs *);
13332- void (*emergency_restart)(void);
13333-};
13334+ void (* __noreturn emergency_restart)(void);
13335+} __no_const;
13336
13337 extern struct machine_ops machine_ops;
13338
13339diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
13340index 2dbe4a7..ce1db00 100644
13341--- a/arch/x86/include/asm/rwsem.h
13342+++ b/arch/x86/include/asm/rwsem.h
13343@@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
13344 {
13345 asm volatile("# beginning down_read\n\t"
13346 LOCK_PREFIX _ASM_INC "(%1)\n\t"
13347+
13348+#ifdef CONFIG_PAX_REFCOUNT
13349+ "jno 0f\n"
13350+ LOCK_PREFIX _ASM_DEC "(%1)\n"
13351+ "int $4\n0:\n"
13352+ _ASM_EXTABLE(0b, 0b)
13353+#endif
13354+
13355 /* adds 0x00000001 */
13356 " jns 1f\n"
13357 " call call_rwsem_down_read_failed\n"
13358@@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
13359 "1:\n\t"
13360 " mov %1,%2\n\t"
13361 " add %3,%2\n\t"
13362+
13363+#ifdef CONFIG_PAX_REFCOUNT
13364+ "jno 0f\n"
13365+ "sub %3,%2\n"
13366+ "int $4\n0:\n"
13367+ _ASM_EXTABLE(0b, 0b)
13368+#endif
13369+
13370 " jle 2f\n\t"
13371 LOCK_PREFIX " cmpxchg %2,%0\n\t"
13372 " jnz 1b\n\t"
13373@@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
13374 long tmp;
13375 asm volatile("# beginning down_write\n\t"
13376 LOCK_PREFIX " xadd %1,(%2)\n\t"
13377+
13378+#ifdef CONFIG_PAX_REFCOUNT
13379+ "jno 0f\n"
13380+ "mov %1,(%2)\n"
13381+ "int $4\n0:\n"
13382+ _ASM_EXTABLE(0b, 0b)
13383+#endif
13384+
13385 /* adds 0xffff0001, returns the old value */
13386 " test %1,%1\n\t"
13387 /* was the count 0 before? */
13388@@ -141,6 +165,14 @@ static inline void __up_read(struct rw_semaphore *sem)
13389 long tmp;
13390 asm volatile("# beginning __up_read\n\t"
13391 LOCK_PREFIX " xadd %1,(%2)\n\t"
13392+
13393+#ifdef CONFIG_PAX_REFCOUNT
13394+ "jno 0f\n"
13395+ "mov %1,(%2)\n"
13396+ "int $4\n0:\n"
13397+ _ASM_EXTABLE(0b, 0b)
13398+#endif
13399+
13400 /* subtracts 1, returns the old value */
13401 " jns 1f\n\t"
13402 " call call_rwsem_wake\n" /* expects old value in %edx */
13403@@ -159,6 +191,14 @@ static inline void __up_write(struct rw_semaphore *sem)
13404 long tmp;
13405 asm volatile("# beginning __up_write\n\t"
13406 LOCK_PREFIX " xadd %1,(%2)\n\t"
13407+
13408+#ifdef CONFIG_PAX_REFCOUNT
13409+ "jno 0f\n"
13410+ "mov %1,(%2)\n"
13411+ "int $4\n0:\n"
13412+ _ASM_EXTABLE(0b, 0b)
13413+#endif
13414+
13415 /* subtracts 0xffff0001, returns the old value */
13416 " jns 1f\n\t"
13417 " call call_rwsem_wake\n" /* expects old value in %edx */
13418@@ -176,6 +216,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
13419 {
13420 asm volatile("# beginning __downgrade_write\n\t"
13421 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
13422+
13423+#ifdef CONFIG_PAX_REFCOUNT
13424+ "jno 0f\n"
13425+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
13426+ "int $4\n0:\n"
13427+ _ASM_EXTABLE(0b, 0b)
13428+#endif
13429+
13430 /*
13431 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
13432 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
13433@@ -194,7 +242,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
13434 */
13435 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
13436 {
13437- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
13438+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
13439+
13440+#ifdef CONFIG_PAX_REFCOUNT
13441+ "jno 0f\n"
13442+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
13443+ "int $4\n0:\n"
13444+ _ASM_EXTABLE(0b, 0b)
13445+#endif
13446+
13447 : "+m" (sem->count)
13448 : "er" (delta));
13449 }
13450@@ -204,7 +260,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
13451 */
13452 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
13453 {
13454- return delta + xadd(&sem->count, delta);
13455+ return delta + xadd_check_overflow(&sem->count, delta);
13456 }
13457
13458 #endif /* __KERNEL__ */
13459diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
13460index c48a950..c6d7468 100644
13461--- a/arch/x86/include/asm/segment.h
13462+++ b/arch/x86/include/asm/segment.h
13463@@ -64,10 +64,15 @@
13464 * 26 - ESPFIX small SS
13465 * 27 - per-cpu [ offset to per-cpu data area ]
13466 * 28 - stack_canary-20 [ for stack protector ]
13467- * 29 - unused
13468- * 30 - unused
13469+ * 29 - PCI BIOS CS
13470+ * 30 - PCI BIOS DS
13471 * 31 - TSS for double fault handler
13472 */
13473+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
13474+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
13475+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
13476+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
13477+
13478 #define GDT_ENTRY_TLS_MIN 6
13479 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
13480
13481@@ -79,6 +84,8 @@
13482
13483 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
13484
13485+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
13486+
13487 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
13488
13489 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
13490@@ -104,6 +111,12 @@
13491 #define __KERNEL_STACK_CANARY 0
13492 #endif
13493
13494+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
13495+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
13496+
13497+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
13498+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
13499+
13500 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
13501
13502 /*
13503@@ -141,7 +154,7 @@
13504 */
13505
13506 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
13507-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
13508+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
13509
13510
13511 #else
13512@@ -165,6 +178,8 @@
13513 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
13514 #define __USER32_DS __USER_DS
13515
13516+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
13517+
13518 #define GDT_ENTRY_TSS 8 /* needs two entries */
13519 #define GDT_ENTRY_LDT 10 /* needs two entries */
13520 #define GDT_ENTRY_TLS_MIN 12
13521@@ -185,6 +200,7 @@
13522 #endif
13523
13524 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
13525+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
13526 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
13527 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
13528 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
13529@@ -265,7 +281,7 @@ static inline unsigned long get_limit(unsigned long segment)
13530 {
13531 unsigned long __limit;
13532 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
13533- return __limit + 1;
13534+ return __limit;
13535 }
13536
13537 #endif /* !__ASSEMBLY__ */
13538diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
13539index 4f19a15..9e14f27 100644
13540--- a/arch/x86/include/asm/smp.h
13541+++ b/arch/x86/include/asm/smp.h
13542@@ -36,7 +36,7 @@ DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
13543 /* cpus sharing the last level cache: */
13544 DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
13545 DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id);
13546-DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
13547+DECLARE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
13548
13549 static inline struct cpumask *cpu_sibling_mask(int cpu)
13550 {
13551@@ -79,7 +79,7 @@ struct smp_ops {
13552
13553 void (*send_call_func_ipi)(const struct cpumask *mask);
13554 void (*send_call_func_single_ipi)(int cpu);
13555-};
13556+} __no_const;
13557
13558 /* Globals due to paravirt */
13559 extern void set_cpu_sibling_map(int cpu);
13560@@ -190,14 +190,8 @@ extern unsigned disabled_cpus __cpuinitdata;
13561 extern int safe_smp_processor_id(void);
13562
13563 #elif defined(CONFIG_X86_64_SMP)
13564-#define raw_smp_processor_id() (this_cpu_read(cpu_number))
13565-
13566-#define stack_smp_processor_id() \
13567-({ \
13568- struct thread_info *ti; \
13569- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
13570- ti->cpu; \
13571-})
13572+#define raw_smp_processor_id() (this_cpu_read(cpu_number))
13573+#define stack_smp_processor_id() raw_smp_processor_id()
13574 #define safe_smp_processor_id() smp_processor_id()
13575
13576 #endif
13577diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
13578index 33692ea..350a534 100644
13579--- a/arch/x86/include/asm/spinlock.h
13580+++ b/arch/x86/include/asm/spinlock.h
13581@@ -172,6 +172,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
13582 static inline void arch_read_lock(arch_rwlock_t *rw)
13583 {
13584 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
13585+
13586+#ifdef CONFIG_PAX_REFCOUNT
13587+ "jno 0f\n"
13588+ LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
13589+ "int $4\n0:\n"
13590+ _ASM_EXTABLE(0b, 0b)
13591+#endif
13592+
13593 "jns 1f\n"
13594 "call __read_lock_failed\n\t"
13595 "1:\n"
13596@@ -181,6 +189,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
13597 static inline void arch_write_lock(arch_rwlock_t *rw)
13598 {
13599 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
13600+
13601+#ifdef CONFIG_PAX_REFCOUNT
13602+ "jno 0f\n"
13603+ LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
13604+ "int $4\n0:\n"
13605+ _ASM_EXTABLE(0b, 0b)
13606+#endif
13607+
13608 "jz 1f\n"
13609 "call __write_lock_failed\n\t"
13610 "1:\n"
13611@@ -210,13 +226,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
13612
13613 static inline void arch_read_unlock(arch_rwlock_t *rw)
13614 {
13615- asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
13616+ asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
13617+
13618+#ifdef CONFIG_PAX_REFCOUNT
13619+ "jno 0f\n"
13620+ LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
13621+ "int $4\n0:\n"
13622+ _ASM_EXTABLE(0b, 0b)
13623+#endif
13624+
13625 :"+m" (rw->lock) : : "memory");
13626 }
13627
13628 static inline void arch_write_unlock(arch_rwlock_t *rw)
13629 {
13630- asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
13631+ asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
13632+
13633+#ifdef CONFIG_PAX_REFCOUNT
13634+ "jno 0f\n"
13635+ LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
13636+ "int $4\n0:\n"
13637+ _ASM_EXTABLE(0b, 0b)
13638+#endif
13639+
13640 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
13641 }
13642
13643diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
13644index 6a99859..03cb807 100644
13645--- a/arch/x86/include/asm/stackprotector.h
13646+++ b/arch/x86/include/asm/stackprotector.h
13647@@ -47,7 +47,7 @@
13648 * head_32 for boot CPU and setup_per_cpu_areas() for others.
13649 */
13650 #define GDT_STACK_CANARY_INIT \
13651- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
13652+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
13653
13654 /*
13655 * Initialize the stackprotector canary value.
13656@@ -112,7 +112,7 @@ static inline void setup_stack_canary_segment(int cpu)
13657
13658 static inline void load_stack_canary_segment(void)
13659 {
13660-#ifdef CONFIG_X86_32
13661+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
13662 asm volatile ("mov %0, %%gs" : : "r" (0));
13663 #endif
13664 }
13665diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
13666index 70bbe39..4ae2bd4 100644
13667--- a/arch/x86/include/asm/stacktrace.h
13668+++ b/arch/x86/include/asm/stacktrace.h
13669@@ -11,28 +11,20 @@
13670
13671 extern int kstack_depth_to_print;
13672
13673-struct thread_info;
13674+struct task_struct;
13675 struct stacktrace_ops;
13676
13677-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
13678- unsigned long *stack,
13679- unsigned long bp,
13680- const struct stacktrace_ops *ops,
13681- void *data,
13682- unsigned long *end,
13683- int *graph);
13684+typedef unsigned long walk_stack_t(struct task_struct *task,
13685+ void *stack_start,
13686+ unsigned long *stack,
13687+ unsigned long bp,
13688+ const struct stacktrace_ops *ops,
13689+ void *data,
13690+ unsigned long *end,
13691+ int *graph);
13692
13693-extern unsigned long
13694-print_context_stack(struct thread_info *tinfo,
13695- unsigned long *stack, unsigned long bp,
13696- const struct stacktrace_ops *ops, void *data,
13697- unsigned long *end, int *graph);
13698-
13699-extern unsigned long
13700-print_context_stack_bp(struct thread_info *tinfo,
13701- unsigned long *stack, unsigned long bp,
13702- const struct stacktrace_ops *ops, void *data,
13703- unsigned long *end, int *graph);
13704+extern walk_stack_t print_context_stack;
13705+extern walk_stack_t print_context_stack_bp;
13706
13707 /* Generic stack tracer with callbacks */
13708
13709@@ -40,7 +32,7 @@ struct stacktrace_ops {
13710 void (*address)(void *data, unsigned long address, int reliable);
13711 /* On negative return stop dumping */
13712 int (*stack)(void *data, char *name);
13713- walk_stack_t walk_stack;
13714+ walk_stack_t *walk_stack;
13715 };
13716
13717 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
13718diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
13719index 4ec45b3..a4f0a8a 100644
13720--- a/arch/x86/include/asm/switch_to.h
13721+++ b/arch/x86/include/asm/switch_to.h
13722@@ -108,7 +108,7 @@ do { \
13723 "call __switch_to\n\t" \
13724 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
13725 __switch_canary \
13726- "movq %P[thread_info](%%rsi),%%r8\n\t" \
13727+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
13728 "movq %%rax,%%rdi\n\t" \
13729 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
13730 "jnz ret_from_fork\n\t" \
13731@@ -119,7 +119,7 @@ do { \
13732 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
13733 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
13734 [_tif_fork] "i" (_TIF_FORK), \
13735- [thread_info] "i" (offsetof(struct task_struct, stack)), \
13736+ [thread_info] "m" (current_tinfo), \
13737 [current_task] "m" (current_task) \
13738 __switch_canary_iparam \
13739 : "memory", "cc" __EXTRA_CLOBBER)
13740diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
13741index 2d946e6..e453ec4 100644
13742--- a/arch/x86/include/asm/thread_info.h
13743+++ b/arch/x86/include/asm/thread_info.h
13744@@ -10,6 +10,7 @@
13745 #include <linux/compiler.h>
13746 #include <asm/page.h>
13747 #include <asm/types.h>
13748+#include <asm/percpu.h>
13749
13750 /*
13751 * low level task data that entry.S needs immediate access to
13752@@ -24,7 +25,6 @@ struct exec_domain;
13753 #include <linux/atomic.h>
13754
13755 struct thread_info {
13756- struct task_struct *task; /* main task structure */
13757 struct exec_domain *exec_domain; /* execution domain */
13758 __u32 flags; /* low level flags */
13759 __u32 status; /* thread synchronous flags */
13760@@ -34,19 +34,13 @@ struct thread_info {
13761 mm_segment_t addr_limit;
13762 struct restart_block restart_block;
13763 void __user *sysenter_return;
13764-#ifdef CONFIG_X86_32
13765- unsigned long previous_esp; /* ESP of the previous stack in
13766- case of nested (IRQ) stacks
13767- */
13768- __u8 supervisor_stack[0];
13769-#endif
13770+ unsigned long lowest_stack;
13771 unsigned int sig_on_uaccess_error:1;
13772 unsigned int uaccess_err:1; /* uaccess failed */
13773 };
13774
13775-#define INIT_THREAD_INFO(tsk) \
13776+#define INIT_THREAD_INFO \
13777 { \
13778- .task = &tsk, \
13779 .exec_domain = &default_exec_domain, \
13780 .flags = 0, \
13781 .cpu = 0, \
13782@@ -57,7 +51,7 @@ struct thread_info {
13783 }, \
13784 }
13785
13786-#define init_thread_info (init_thread_union.thread_info)
13787+#define init_thread_info (init_thread_union.stack)
13788 #define init_stack (init_thread_union.stack)
13789
13790 #else /* !__ASSEMBLY__ */
13791@@ -98,6 +92,7 @@ struct thread_info {
13792 #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
13793 #define TIF_ADDR32 29 /* 32-bit address space on 64 bits */
13794 #define TIF_X32 30 /* 32-bit native x86-64 binary */
13795+#define TIF_GRSEC_SETXID 31 /* update credentials on syscall entry/exit */
13796
13797 #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
13798 #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
13799@@ -122,17 +117,18 @@ struct thread_info {
13800 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
13801 #define _TIF_ADDR32 (1 << TIF_ADDR32)
13802 #define _TIF_X32 (1 << TIF_X32)
13803+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
13804
13805 /* work to do in syscall_trace_enter() */
13806 #define _TIF_WORK_SYSCALL_ENTRY \
13807 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
13808 _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | \
13809- _TIF_NOHZ)
13810+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
13811
13812 /* work to do in syscall_trace_leave() */
13813 #define _TIF_WORK_SYSCALL_EXIT \
13814 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
13815- _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ)
13816+ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
13817
13818 /* work to do on interrupt/exception return */
13819 #define _TIF_WORK_MASK \
13820@@ -143,7 +139,7 @@ struct thread_info {
13821 /* work to do on any return to user space */
13822 #define _TIF_ALLWORK_MASK \
13823 ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \
13824- _TIF_NOHZ)
13825+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
13826
13827 /* Only used for 64 bit */
13828 #define _TIF_DO_NOTIFY_MASK \
13829@@ -159,45 +155,40 @@ struct thread_info {
13830
13831 #define PREEMPT_ACTIVE 0x10000000
13832
13833-#ifdef CONFIG_X86_32
13834-
13835-#define STACK_WARN (THREAD_SIZE/8)
13836-/*
13837- * macros/functions for gaining access to the thread information structure
13838- *
13839- * preempt_count needs to be 1 initially, until the scheduler is functional.
13840- */
13841-#ifndef __ASSEMBLY__
13842-
13843-
13844-/* how to get the current stack pointer from C */
13845-register unsigned long current_stack_pointer asm("esp") __used;
13846-
13847-/* how to get the thread information struct from C */
13848-static inline struct thread_info *current_thread_info(void)
13849-{
13850- return (struct thread_info *)
13851- (current_stack_pointer & ~(THREAD_SIZE - 1));
13852-}
13853-
13854-#else /* !__ASSEMBLY__ */
13855-
13856+#ifdef __ASSEMBLY__
13857 /* how to get the thread information struct from ASM */
13858 #define GET_THREAD_INFO(reg) \
13859- movl $-THREAD_SIZE, reg; \
13860- andl %esp, reg
13861+ mov PER_CPU_VAR(current_tinfo), reg
13862
13863 /* use this one if reg already contains %esp */
13864-#define GET_THREAD_INFO_WITH_ESP(reg) \
13865- andl $-THREAD_SIZE, reg
13866+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
13867+#else
13868+/* how to get the thread information struct from C */
13869+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
13870+
13871+static __always_inline struct thread_info *current_thread_info(void)
13872+{
13873+ return this_cpu_read_stable(current_tinfo);
13874+}
13875+#endif
13876+
13877+#ifdef CONFIG_X86_32
13878+
13879+#define STACK_WARN (THREAD_SIZE/8)
13880+/*
13881+ * macros/functions for gaining access to the thread information structure
13882+ *
13883+ * preempt_count needs to be 1 initially, until the scheduler is functional.
13884+ */
13885+#ifndef __ASSEMBLY__
13886+
13887+/* how to get the current stack pointer from C */
13888+register unsigned long current_stack_pointer asm("esp") __used;
13889
13890 #endif
13891
13892 #else /* X86_32 */
13893
13894-#include <asm/percpu.h>
13895-#define KERNEL_STACK_OFFSET (5*8)
13896-
13897 /*
13898 * macros/functions for gaining access to the thread information structure
13899 * preempt_count needs to be 1 initially, until the scheduler is functional.
13900@@ -205,27 +196,8 @@ static inline struct thread_info *current_thread_info(void)
13901 #ifndef __ASSEMBLY__
13902 DECLARE_PER_CPU(unsigned long, kernel_stack);
13903
13904-static inline struct thread_info *current_thread_info(void)
13905-{
13906- struct thread_info *ti;
13907- ti = (void *)(this_cpu_read_stable(kernel_stack) +
13908- KERNEL_STACK_OFFSET - THREAD_SIZE);
13909- return ti;
13910-}
13911-
13912-#else /* !__ASSEMBLY__ */
13913-
13914-/* how to get the thread information struct from ASM */
13915-#define GET_THREAD_INFO(reg) \
13916- movq PER_CPU_VAR(kernel_stack),reg ; \
13917- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
13918-
13919-/*
13920- * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
13921- * a certain register (to be used in assembler memory operands).
13922- */
13923-#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
13924-
13925+/* how to get the current stack pointer from C */
13926+register unsigned long current_stack_pointer asm("rsp") __used;
13927 #endif
13928
13929 #endif /* !X86_32 */
13930@@ -286,5 +258,12 @@ static inline bool is_ia32_task(void)
13931 extern void arch_task_cache_init(void);
13932 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
13933 extern void arch_release_task_struct(struct task_struct *tsk);
13934+
13935+#define __HAVE_THREAD_FUNCTIONS
13936+#define task_thread_info(task) (&(task)->tinfo)
13937+#define task_stack_page(task) ((task)->stack)
13938+#define setup_thread_stack(p, org) do {} while (0)
13939+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
13940+
13941 #endif
13942 #endif /* _ASM_X86_THREAD_INFO_H */
13943diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
13944index 7ccf8d1..9a18110 100644
13945--- a/arch/x86/include/asm/uaccess.h
13946+++ b/arch/x86/include/asm/uaccess.h
13947@@ -7,6 +7,7 @@
13948 #include <linux/compiler.h>
13949 #include <linux/thread_info.h>
13950 #include <linux/string.h>
13951+#include <linux/sched.h>
13952 #include <asm/asm.h>
13953 #include <asm/page.h>
13954 #include <asm/smap.h>
13955@@ -29,7 +30,12 @@
13956
13957 #define get_ds() (KERNEL_DS)
13958 #define get_fs() (current_thread_info()->addr_limit)
13959+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
13960+void __set_fs(mm_segment_t x);
13961+void set_fs(mm_segment_t x);
13962+#else
13963 #define set_fs(x) (current_thread_info()->addr_limit = (x))
13964+#endif
13965
13966 #define segment_eq(a, b) ((a).seg == (b).seg)
13967
13968@@ -77,8 +83,33 @@
13969 * checks that the pointer is in the user space range - after calling
13970 * this function, memory access functions may still return -EFAULT.
13971 */
13972-#define access_ok(type, addr, size) \
13973- (likely(__range_not_ok(addr, size, user_addr_max()) == 0))
13974+#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size, user_addr_max()) == 0))
13975+#define access_ok(type, addr, size) \
13976+({ \
13977+ long __size = size; \
13978+ unsigned long __addr = (unsigned long)addr; \
13979+ unsigned long __addr_ao = __addr & PAGE_MASK; \
13980+ unsigned long __end_ao = __addr + __size - 1; \
13981+ bool __ret_ao = __range_not_ok(__addr, __size, user_addr_max()) == 0;\
13982+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
13983+ while(__addr_ao <= __end_ao) { \
13984+ char __c_ao; \
13985+ __addr_ao += PAGE_SIZE; \
13986+ if (__size > PAGE_SIZE) \
13987+ cond_resched(); \
13988+ if (__get_user(__c_ao, (char __user *)__addr)) \
13989+ break; \
13990+ if (type != VERIFY_WRITE) { \
13991+ __addr = __addr_ao; \
13992+ continue; \
13993+ } \
13994+ if (__put_user(__c_ao, (char __user *)__addr)) \
13995+ break; \
13996+ __addr = __addr_ao; \
13997+ } \
13998+ } \
13999+ __ret_ao; \
14000+})
14001
14002 /*
14003 * The exception table consists of pairs of addresses relative to the
14004@@ -189,13 +220,21 @@ extern int __get_user_bad(void);
14005 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
14006 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
14007
14008-
14009+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
14010+#define __copyuser_seg "gs;"
14011+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
14012+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
14013+#else
14014+#define __copyuser_seg
14015+#define __COPYUSER_SET_ES
14016+#define __COPYUSER_RESTORE_ES
14017+#endif
14018
14019 #ifdef CONFIG_X86_32
14020 #define __put_user_asm_u64(x, addr, err, errret) \
14021 asm volatile(ASM_STAC "\n" \
14022- "1: movl %%eax,0(%2)\n" \
14023- "2: movl %%edx,4(%2)\n" \
14024+ "1: "__copyuser_seg"movl %%eax,0(%2)\n" \
14025+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
14026 "3: " ASM_CLAC "\n" \
14027 ".section .fixup,\"ax\"\n" \
14028 "4: movl %3,%0\n" \
14029@@ -208,8 +247,8 @@ extern int __get_user_bad(void);
14030
14031 #define __put_user_asm_ex_u64(x, addr) \
14032 asm volatile(ASM_STAC "\n" \
14033- "1: movl %%eax,0(%1)\n" \
14034- "2: movl %%edx,4(%1)\n" \
14035+ "1: "__copyuser_seg"movl %%eax,0(%1)\n" \
14036+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
14037 "3: " ASM_CLAC "\n" \
14038 _ASM_EXTABLE_EX(1b, 2b) \
14039 _ASM_EXTABLE_EX(2b, 3b) \
14040@@ -261,7 +300,7 @@ extern void __put_user_8(void);
14041 __typeof__(*(ptr)) __pu_val; \
14042 __chk_user_ptr(ptr); \
14043 might_fault(); \
14044- __pu_val = x; \
14045+ __pu_val = (x); \
14046 switch (sizeof(*(ptr))) { \
14047 case 1: \
14048 __put_user_x(1, __pu_val, ptr, __ret_pu); \
14049@@ -383,7 +422,7 @@ do { \
14050
14051 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
14052 asm volatile(ASM_STAC "\n" \
14053- "1: mov"itype" %2,%"rtype"1\n" \
14054+ "1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
14055 "2: " ASM_CLAC "\n" \
14056 ".section .fixup,\"ax\"\n" \
14057 "3: mov %3,%0\n" \
14058@@ -391,7 +430,7 @@ do { \
14059 " jmp 2b\n" \
14060 ".previous\n" \
14061 _ASM_EXTABLE(1b, 3b) \
14062- : "=r" (err), ltype(x) \
14063+ : "=r" (err), ltype (x) \
14064 : "m" (__m(addr)), "i" (errret), "0" (err))
14065
14066 #define __get_user_size_ex(x, ptr, size) \
14067@@ -416,7 +455,7 @@ do { \
14068 } while (0)
14069
14070 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
14071- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
14072+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
14073 "2:\n" \
14074 _ASM_EXTABLE_EX(1b, 2b) \
14075 : ltype(x) : "m" (__m(addr)))
14076@@ -433,13 +472,24 @@ do { \
14077 int __gu_err; \
14078 unsigned long __gu_val; \
14079 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
14080- (x) = (__force __typeof__(*(ptr)))__gu_val; \
14081+ (x) = (__typeof__(*(ptr)))__gu_val; \
14082 __gu_err; \
14083 })
14084
14085 /* FIXME: this hack is definitely wrong -AK */
14086 struct __large_struct { unsigned long buf[100]; };
14087-#define __m(x) (*(struct __large_struct __user *)(x))
14088+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
14089+#define ____m(x) \
14090+({ \
14091+ unsigned long ____x = (unsigned long)(x); \
14092+ if (____x < PAX_USER_SHADOW_BASE) \
14093+ ____x += PAX_USER_SHADOW_BASE; \
14094+ (void __user *)____x; \
14095+})
14096+#else
14097+#define ____m(x) (x)
14098+#endif
14099+#define __m(x) (*(struct __large_struct __user *)____m(x))
14100
14101 /*
14102 * Tell gcc we read from memory instead of writing: this is because
14103@@ -448,7 +498,7 @@ struct __large_struct { unsigned long buf[100]; };
14104 */
14105 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
14106 asm volatile(ASM_STAC "\n" \
14107- "1: mov"itype" %"rtype"1,%2\n" \
14108+ "1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
14109 "2: " ASM_CLAC "\n" \
14110 ".section .fixup,\"ax\"\n" \
14111 "3: mov %3,%0\n" \
14112@@ -456,10 +506,10 @@ struct __large_struct { unsigned long buf[100]; };
14113 ".previous\n" \
14114 _ASM_EXTABLE(1b, 3b) \
14115 : "=r"(err) \
14116- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
14117+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
14118
14119 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
14120- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
14121+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
14122 "2:\n" \
14123 _ASM_EXTABLE_EX(1b, 2b) \
14124 : : ltype(x), "m" (__m(addr)))
14125@@ -498,8 +548,12 @@ struct __large_struct { unsigned long buf[100]; };
14126 * On error, the variable @x is set to zero.
14127 */
14128
14129+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
14130+#define __get_user(x, ptr) get_user((x), (ptr))
14131+#else
14132 #define __get_user(x, ptr) \
14133 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
14134+#endif
14135
14136 /**
14137 * __put_user: - Write a simple value into user space, with less checking.
14138@@ -521,8 +575,12 @@ struct __large_struct { unsigned long buf[100]; };
14139 * Returns zero on success, or -EFAULT on error.
14140 */
14141
14142+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
14143+#define __put_user(x, ptr) put_user((x), (ptr))
14144+#else
14145 #define __put_user(x, ptr) \
14146 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
14147+#endif
14148
14149 #define __get_user_unaligned __get_user
14150 #define __put_user_unaligned __put_user
14151@@ -540,7 +598,7 @@ struct __large_struct { unsigned long buf[100]; };
14152 #define get_user_ex(x, ptr) do { \
14153 unsigned long __gue_val; \
14154 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
14155- (x) = (__force __typeof__(*(ptr)))__gue_val; \
14156+ (x) = (__typeof__(*(ptr)))__gue_val; \
14157 } while (0)
14158
14159 #ifdef CONFIG_X86_WP_WORKS_OK
14160@@ -574,8 +632,8 @@ strncpy_from_user(char *dst, const char __user *src, long count);
14161 extern __must_check long strlen_user(const char __user *str);
14162 extern __must_check long strnlen_user(const char __user *str, long n);
14163
14164-unsigned long __must_check clear_user(void __user *mem, unsigned long len);
14165-unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
14166+unsigned long __must_check clear_user(void __user *mem, unsigned long len) __size_overflow(2);
14167+unsigned long __must_check __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
14168
14169 /*
14170 * movsl can be slow when source and dest are not both 8-byte aligned
14171diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
14172index 7f760a9..04b1c65 100644
14173--- a/arch/x86/include/asm/uaccess_32.h
14174+++ b/arch/x86/include/asm/uaccess_32.h
14175@@ -11,15 +11,15 @@
14176 #include <asm/page.h>
14177
14178 unsigned long __must_check __copy_to_user_ll
14179- (void __user *to, const void *from, unsigned long n);
14180+ (void __user *to, const void *from, unsigned long n) __size_overflow(3);
14181 unsigned long __must_check __copy_from_user_ll
14182- (void *to, const void __user *from, unsigned long n);
14183+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
14184 unsigned long __must_check __copy_from_user_ll_nozero
14185- (void *to, const void __user *from, unsigned long n);
14186+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
14187 unsigned long __must_check __copy_from_user_ll_nocache
14188- (void *to, const void __user *from, unsigned long n);
14189+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
14190 unsigned long __must_check __copy_from_user_ll_nocache_nozero
14191- (void *to, const void __user *from, unsigned long n);
14192+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
14193
14194 /**
14195 * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
14196@@ -43,6 +43,11 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
14197 static __always_inline unsigned long __must_check
14198 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
14199 {
14200+ if ((long)n < 0)
14201+ return n;
14202+
14203+ check_object_size(from, n, true);
14204+
14205 if (__builtin_constant_p(n)) {
14206 unsigned long ret;
14207
14208@@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
14209 __copy_to_user(void __user *to, const void *from, unsigned long n)
14210 {
14211 might_fault();
14212+
14213 return __copy_to_user_inatomic(to, from, n);
14214 }
14215
14216 static __always_inline unsigned long
14217 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
14218 {
14219+ if ((long)n < 0)
14220+ return n;
14221+
14222 /* Avoid zeroing the tail if the copy fails..
14223 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
14224 * but as the zeroing behaviour is only significant when n is not
14225@@ -137,6 +146,12 @@ static __always_inline unsigned long
14226 __copy_from_user(void *to, const void __user *from, unsigned long n)
14227 {
14228 might_fault();
14229+
14230+ if ((long)n < 0)
14231+ return n;
14232+
14233+ check_object_size(to, n, false);
14234+
14235 if (__builtin_constant_p(n)) {
14236 unsigned long ret;
14237
14238@@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
14239 const void __user *from, unsigned long n)
14240 {
14241 might_fault();
14242+
14243+ if ((long)n < 0)
14244+ return n;
14245+
14246 if (__builtin_constant_p(n)) {
14247 unsigned long ret;
14248
14249@@ -181,15 +200,19 @@ static __always_inline unsigned long
14250 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
14251 unsigned long n)
14252 {
14253- return __copy_from_user_ll_nocache_nozero(to, from, n);
14254+ if ((long)n < 0)
14255+ return n;
14256+
14257+ return __copy_from_user_ll_nocache_nozero(to, from, n);
14258 }
14259
14260-unsigned long __must_check copy_to_user(void __user *to,
14261- const void *from, unsigned long n);
14262-unsigned long __must_check _copy_from_user(void *to,
14263- const void __user *from,
14264- unsigned long n);
14265-
14266+extern void copy_to_user_overflow(void)
14267+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
14268+ __compiletime_error("copy_to_user() buffer size is not provably correct")
14269+#else
14270+ __compiletime_warning("copy_to_user() buffer size is not provably correct")
14271+#endif
14272+;
14273
14274 extern void copy_from_user_overflow(void)
14275 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
14276@@ -199,17 +222,60 @@ extern void copy_from_user_overflow(void)
14277 #endif
14278 ;
14279
14280-static inline unsigned long __must_check copy_from_user(void *to,
14281- const void __user *from,
14282- unsigned long n)
14283+/**
14284+ * copy_to_user: - Copy a block of data into user space.
14285+ * @to: Destination address, in user space.
14286+ * @from: Source address, in kernel space.
14287+ * @n: Number of bytes to copy.
14288+ *
14289+ * Context: User context only. This function may sleep.
14290+ *
14291+ * Copy data from kernel space to user space.
14292+ *
14293+ * Returns number of bytes that could not be copied.
14294+ * On success, this will be zero.
14295+ */
14296+static inline unsigned long __must_check
14297+copy_to_user(void __user *to, const void *from, unsigned long n)
14298 {
14299- int sz = __compiletime_object_size(to);
14300+ size_t sz = __compiletime_object_size(from);
14301
14302- if (likely(sz == -1 || sz >= n))
14303- n = _copy_from_user(to, from, n);
14304- else
14305+ if (unlikely(sz != (size_t)-1 && sz < n))
14306+ copy_to_user_overflow();
14307+ else if (access_ok(VERIFY_WRITE, to, n))
14308+ n = __copy_to_user(to, from, n);
14309+ return n;
14310+}
14311+
14312+/**
14313+ * copy_from_user: - Copy a block of data from user space.
14314+ * @to: Destination address, in kernel space.
14315+ * @from: Source address, in user space.
14316+ * @n: Number of bytes to copy.
14317+ *
14318+ * Context: User context only. This function may sleep.
14319+ *
14320+ * Copy data from user space to kernel space.
14321+ *
14322+ * Returns number of bytes that could not be copied.
14323+ * On success, this will be zero.
14324+ *
14325+ * If some data could not be copied, this function will pad the copied
14326+ * data to the requested size using zero bytes.
14327+ */
14328+static inline unsigned long __must_check
14329+copy_from_user(void *to, const void __user *from, unsigned long n)
14330+{
14331+ size_t sz = __compiletime_object_size(to);
14332+
14333+ check_object_size(to, n, false);
14334+
14335+ if (unlikely(sz != (size_t)-1 && sz < n))
14336 copy_from_user_overflow();
14337-
14338+ else if (access_ok(VERIFY_READ, from, n))
14339+ n = __copy_from_user(to, from, n);
14340+ else if ((long)n > 0)
14341+ memset(to, 0, n);
14342 return n;
14343 }
14344
14345diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
14346index 142810c..4b68a3e 100644
14347--- a/arch/x86/include/asm/uaccess_64.h
14348+++ b/arch/x86/include/asm/uaccess_64.h
14349@@ -10,6 +10,9 @@
14350 #include <asm/alternative.h>
14351 #include <asm/cpufeature.h>
14352 #include <asm/page.h>
14353+#include <asm/pgtable.h>
14354+
14355+#define set_fs(x) (current_thread_info()->addr_limit = (x))
14356
14357 /*
14358 * Copy To/From Userspace
14359@@ -17,13 +20,13 @@
14360
14361 /* Handles exceptions in both to and from, but doesn't do access_ok */
14362 __must_check unsigned long
14363-copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
14364+copy_user_enhanced_fast_string(void *to, const void *from, unsigned len) __size_overflow(3);
14365 __must_check unsigned long
14366-copy_user_generic_string(void *to, const void *from, unsigned len);
14367+copy_user_generic_string(void *to, const void *from, unsigned len) __size_overflow(3);
14368 __must_check unsigned long
14369-copy_user_generic_unrolled(void *to, const void *from, unsigned len);
14370+copy_user_generic_unrolled(void *to, const void *from, unsigned len) __size_overflow(3);
14371
14372-static __always_inline __must_check unsigned long
14373+static __always_inline __must_check __size_overflow(3) unsigned long
14374 copy_user_generic(void *to, const void *from, unsigned len)
14375 {
14376 unsigned ret;
14377@@ -41,142 +44,203 @@ copy_user_generic(void *to, const void *from, unsigned len)
14378 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
14379 "=d" (len)),
14380 "1" (to), "2" (from), "3" (len)
14381- : "memory", "rcx", "r8", "r9", "r10", "r11");
14382+ : "memory", "rcx", "r8", "r9", "r11");
14383 return ret;
14384 }
14385
14386+static __always_inline __must_check unsigned long
14387+__copy_to_user(void __user *to, const void *from, unsigned long len) __size_overflow(3);
14388+static __always_inline __must_check unsigned long
14389+__copy_from_user(void *to, const void __user *from, unsigned long len) __size_overflow(3);
14390 __must_check unsigned long
14391-_copy_to_user(void __user *to, const void *from, unsigned len);
14392-__must_check unsigned long
14393-_copy_from_user(void *to, const void __user *from, unsigned len);
14394-__must_check unsigned long
14395-copy_in_user(void __user *to, const void __user *from, unsigned len);
14396+copy_in_user(void __user *to, const void __user *from, unsigned long len) __size_overflow(3);
14397+
14398+extern void copy_to_user_overflow(void)
14399+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
14400+ __compiletime_error("copy_to_user() buffer size is not provably correct")
14401+#else
14402+ __compiletime_warning("copy_to_user() buffer size is not provably correct")
14403+#endif
14404+;
14405+
14406+extern void copy_from_user_overflow(void)
14407+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
14408+ __compiletime_error("copy_from_user() buffer size is not provably correct")
14409+#else
14410+ __compiletime_warning("copy_from_user() buffer size is not provably correct")
14411+#endif
14412+;
14413
14414 static inline unsigned long __must_check copy_from_user(void *to,
14415 const void __user *from,
14416 unsigned long n)
14417 {
14418- int sz = __compiletime_object_size(to);
14419-
14420 might_fault();
14421- if (likely(sz == -1 || sz >= n))
14422- n = _copy_from_user(to, from, n);
14423-#ifdef CONFIG_DEBUG_VM
14424- else
14425- WARN(1, "Buffer overflow detected!\n");
14426-#endif
14427+
14428+ check_object_size(to, n, false);
14429+ if (access_ok(VERIFY_READ, from, n))
14430+ n = __copy_from_user(to, from, n);
14431+ else if (n < INT_MAX)
14432+ memset(to, 0, n);
14433 return n;
14434 }
14435
14436 static __always_inline __must_check
14437-int copy_to_user(void __user *dst, const void *src, unsigned size)
14438+int copy_to_user(void __user *dst, const void *src, unsigned long size)
14439 {
14440 might_fault();
14441
14442- return _copy_to_user(dst, src, size);
14443+ if (access_ok(VERIFY_WRITE, dst, size))
14444+ size = __copy_to_user(dst, src, size);
14445+ return size;
14446 }
14447
14448 static __always_inline __must_check
14449-int __copy_from_user(void *dst, const void __user *src, unsigned size)
14450+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
14451 {
14452- int ret = 0;
14453+ size_t sz = __compiletime_object_size(dst);
14454+ unsigned ret = 0;
14455
14456 might_fault();
14457+
14458+ if (size > INT_MAX)
14459+ return size;
14460+
14461+ check_object_size(dst, size, false);
14462+
14463+#ifdef CONFIG_PAX_MEMORY_UDEREF
14464+ if (!__access_ok(VERIFY_READ, src, size))
14465+ return size;
14466+#endif
14467+
14468+ if (unlikely(sz != (size_t)-1 && sz < size)) {
14469+ copy_from_user_overflow();
14470+ return size;
14471+ }
14472+
14473 if (!__builtin_constant_p(size))
14474- return copy_user_generic(dst, (__force void *)src, size);
14475+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
14476 switch (size) {
14477- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
14478+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
14479 ret, "b", "b", "=q", 1);
14480 return ret;
14481- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
14482+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
14483 ret, "w", "w", "=r", 2);
14484 return ret;
14485- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
14486+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
14487 ret, "l", "k", "=r", 4);
14488 return ret;
14489- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
14490+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
14491 ret, "q", "", "=r", 8);
14492 return ret;
14493 case 10:
14494- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
14495+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
14496 ret, "q", "", "=r", 10);
14497 if (unlikely(ret))
14498 return ret;
14499 __get_user_asm(*(u16 *)(8 + (char *)dst),
14500- (u16 __user *)(8 + (char __user *)src),
14501+ (const u16 __user *)(8 + (const char __user *)src),
14502 ret, "w", "w", "=r", 2);
14503 return ret;
14504 case 16:
14505- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
14506+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
14507 ret, "q", "", "=r", 16);
14508 if (unlikely(ret))
14509 return ret;
14510 __get_user_asm(*(u64 *)(8 + (char *)dst),
14511- (u64 __user *)(8 + (char __user *)src),
14512+ (const u64 __user *)(8 + (const char __user *)src),
14513 ret, "q", "", "=r", 8);
14514 return ret;
14515 default:
14516- return copy_user_generic(dst, (__force void *)src, size);
14517+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
14518 }
14519 }
14520
14521 static __always_inline __must_check
14522-int __copy_to_user(void __user *dst, const void *src, unsigned size)
14523+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
14524 {
14525- int ret = 0;
14526+ size_t sz = __compiletime_object_size(src);
14527+ unsigned ret = 0;
14528
14529 might_fault();
14530+
14531+ if (size > INT_MAX)
14532+ return size;
14533+
14534+ check_object_size(src, size, true);
14535+
14536+#ifdef CONFIG_PAX_MEMORY_UDEREF
14537+ if (!__access_ok(VERIFY_WRITE, dst, size))
14538+ return size;
14539+#endif
14540+
14541+ if (unlikely(sz != (size_t)-1 && sz < size)) {
14542+ copy_to_user_overflow();
14543+ return size;
14544+ }
14545+
14546 if (!__builtin_constant_p(size))
14547- return copy_user_generic((__force void *)dst, src, size);
14548+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
14549 switch (size) {
14550- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
14551+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
14552 ret, "b", "b", "iq", 1);
14553 return ret;
14554- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
14555+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
14556 ret, "w", "w", "ir", 2);
14557 return ret;
14558- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
14559+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
14560 ret, "l", "k", "ir", 4);
14561 return ret;
14562- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
14563+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
14564 ret, "q", "", "er", 8);
14565 return ret;
14566 case 10:
14567- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
14568+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
14569 ret, "q", "", "er", 10);
14570 if (unlikely(ret))
14571 return ret;
14572 asm("":::"memory");
14573- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
14574+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
14575 ret, "w", "w", "ir", 2);
14576 return ret;
14577 case 16:
14578- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
14579+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
14580 ret, "q", "", "er", 16);
14581 if (unlikely(ret))
14582 return ret;
14583 asm("":::"memory");
14584- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
14585+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
14586 ret, "q", "", "er", 8);
14587 return ret;
14588 default:
14589- return copy_user_generic((__force void *)dst, src, size);
14590+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
14591 }
14592 }
14593
14594 static __always_inline __must_check
14595-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
14596+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
14597 {
14598- int ret = 0;
14599+ unsigned ret = 0;
14600
14601 might_fault();
14602+
14603+ if (size > INT_MAX)
14604+ return size;
14605+
14606+#ifdef CONFIG_PAX_MEMORY_UDEREF
14607+ if (!__access_ok(VERIFY_READ, src, size))
14608+ return size;
14609+ if (!__access_ok(VERIFY_WRITE, dst, size))
14610+ return size;
14611+#endif
14612+
14613 if (!__builtin_constant_p(size))
14614- return copy_user_generic((__force void *)dst,
14615- (__force void *)src, size);
14616+ return copy_user_generic((__force_kernel void *)____m(dst),
14617+ (__force_kernel const void *)____m(src), size);
14618 switch (size) {
14619 case 1: {
14620 u8 tmp;
14621- __get_user_asm(tmp, (u8 __user *)src,
14622+ __get_user_asm(tmp, (const u8 __user *)src,
14623 ret, "b", "b", "=q", 1);
14624 if (likely(!ret))
14625 __put_user_asm(tmp, (u8 __user *)dst,
14626@@ -185,7 +249,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
14627 }
14628 case 2: {
14629 u16 tmp;
14630- __get_user_asm(tmp, (u16 __user *)src,
14631+ __get_user_asm(tmp, (const u16 __user *)src,
14632 ret, "w", "w", "=r", 2);
14633 if (likely(!ret))
14634 __put_user_asm(tmp, (u16 __user *)dst,
14635@@ -195,7 +259,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
14636
14637 case 4: {
14638 u32 tmp;
14639- __get_user_asm(tmp, (u32 __user *)src,
14640+ __get_user_asm(tmp, (const u32 __user *)src,
14641 ret, "l", "k", "=r", 4);
14642 if (likely(!ret))
14643 __put_user_asm(tmp, (u32 __user *)dst,
14644@@ -204,7 +268,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
14645 }
14646 case 8: {
14647 u64 tmp;
14648- __get_user_asm(tmp, (u64 __user *)src,
14649+ __get_user_asm(tmp, (const u64 __user *)src,
14650 ret, "q", "", "=r", 8);
14651 if (likely(!ret))
14652 __put_user_asm(tmp, (u64 __user *)dst,
14653@@ -212,41 +276,72 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
14654 return ret;
14655 }
14656 default:
14657- return copy_user_generic((__force void *)dst,
14658- (__force void *)src, size);
14659+ return copy_user_generic((__force_kernel void *)____m(dst),
14660+ (__force_kernel const void *)____m(src), size);
14661 }
14662 }
14663
14664 static __must_check __always_inline int
14665-__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
14666+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
14667 {
14668- return copy_user_generic(dst, (__force const void *)src, size);
14669+ if (size > INT_MAX)
14670+ return size;
14671+
14672+#ifdef CONFIG_PAX_MEMORY_UDEREF
14673+ if (!__access_ok(VERIFY_READ, src, size))
14674+ return size;
14675+#endif
14676+
14677+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
14678 }
14679
14680-static __must_check __always_inline int
14681-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
14682+static __must_check __always_inline unsigned long
14683+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
14684 {
14685- return copy_user_generic((__force void *)dst, src, size);
14686+ if (size > INT_MAX)
14687+ return size;
14688+
14689+#ifdef CONFIG_PAX_MEMORY_UDEREF
14690+ if (!__access_ok(VERIFY_WRITE, dst, size))
14691+ return size;
14692+#endif
14693+
14694+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
14695 }
14696
14697-extern long __copy_user_nocache(void *dst, const void __user *src,
14698- unsigned size, int zerorest);
14699+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
14700+ unsigned long size, int zerorest) __size_overflow(3);
14701
14702-static inline int
14703-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
14704+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
14705 {
14706 might_sleep();
14707+
14708+ if (size > INT_MAX)
14709+ return size;
14710+
14711+#ifdef CONFIG_PAX_MEMORY_UDEREF
14712+ if (!__access_ok(VERIFY_READ, src, size))
14713+ return size;
14714+#endif
14715+
14716 return __copy_user_nocache(dst, src, size, 1);
14717 }
14718
14719-static inline int
14720-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
14721- unsigned size)
14722+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
14723+ unsigned long size)
14724 {
14725+ if (size > INT_MAX)
14726+ return size;
14727+
14728+#ifdef CONFIG_PAX_MEMORY_UDEREF
14729+ if (!__access_ok(VERIFY_READ, src, size))
14730+ return size;
14731+#endif
14732+
14733 return __copy_user_nocache(dst, src, size, 0);
14734 }
14735
14736-unsigned long
14737-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
14738+extern unsigned long
14739+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest) __size_overflow(3);
14740
14741 #endif /* _ASM_X86_UACCESS_64_H */
14742diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h
14743index 5b238981..77fdd78 100644
14744--- a/arch/x86/include/asm/word-at-a-time.h
14745+++ b/arch/x86/include/asm/word-at-a-time.h
14746@@ -11,7 +11,7 @@
14747 * and shift, for example.
14748 */
14749 struct word_at_a_time {
14750- const unsigned long one_bits, high_bits;
14751+ unsigned long one_bits, high_bits;
14752 };
14753
14754 #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
14755diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
14756index 5769349..a3d3e2a 100644
14757--- a/arch/x86/include/asm/x86_init.h
14758+++ b/arch/x86/include/asm/x86_init.h
14759@@ -141,7 +141,7 @@ struct x86_init_ops {
14760 struct x86_init_timers timers;
14761 struct x86_init_iommu iommu;
14762 struct x86_init_pci pci;
14763-};
14764+} __no_const;
14765
14766 /**
14767 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
14768@@ -152,7 +152,7 @@ struct x86_cpuinit_ops {
14769 void (*setup_percpu_clockev)(void);
14770 void (*early_percpu_clock_init)(void);
14771 void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
14772-};
14773+} __no_const;
14774
14775 /**
14776 * struct x86_platform_ops - platform specific runtime functions
14777@@ -178,7 +178,7 @@ struct x86_platform_ops {
14778 void (*save_sched_clock_state)(void);
14779 void (*restore_sched_clock_state)(void);
14780 void (*apic_post_init)(void);
14781-};
14782+} __no_const;
14783
14784 struct pci_dev;
14785
14786@@ -187,14 +187,14 @@ struct x86_msi_ops {
14787 void (*teardown_msi_irq)(unsigned int irq);
14788 void (*teardown_msi_irqs)(struct pci_dev *dev);
14789 void (*restore_msi_irqs)(struct pci_dev *dev, int irq);
14790-};
14791+} __no_const;
14792
14793 struct x86_io_apic_ops {
14794 void (*init) (void);
14795 unsigned int (*read) (unsigned int apic, unsigned int reg);
14796 void (*write) (unsigned int apic, unsigned int reg, unsigned int value);
14797 void (*modify)(unsigned int apic, unsigned int reg, unsigned int value);
14798-};
14799+} __no_const;
14800
14801 extern struct x86_init_ops x86_init;
14802 extern struct x86_cpuinit_ops x86_cpuinit;
14803diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
14804index 0415cda..b43d877 100644
14805--- a/arch/x86/include/asm/xsave.h
14806+++ b/arch/x86/include/asm/xsave.h
14807@@ -71,7 +71,9 @@ static inline int xsave_user(struct xsave_struct __user *buf)
14808 return -EFAULT;
14809
14810 __asm__ __volatile__(ASM_STAC "\n"
14811- "1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
14812+ "1:"
14813+ __copyuser_seg
14814+ ".byte " REX_PREFIX "0x0f,0xae,0x27\n"
14815 "2: " ASM_CLAC "\n"
14816 ".section .fixup,\"ax\"\n"
14817 "3: movl $-1,%[err]\n"
14818@@ -87,12 +89,14 @@ static inline int xsave_user(struct xsave_struct __user *buf)
14819 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
14820 {
14821 int err;
14822- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
14823+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
14824 u32 lmask = mask;
14825 u32 hmask = mask >> 32;
14826
14827 __asm__ __volatile__(ASM_STAC "\n"
14828- "1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
14829+ "1:"
14830+ __copyuser_seg
14831+ ".byte " REX_PREFIX "0x0f,0xae,0x2f\n"
14832 "2: " ASM_CLAC "\n"
14833 ".section .fixup,\"ax\"\n"
14834 "3: movl $-1,%[err]\n"
14835diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
14836index 91ce48f..a48ea05 100644
14837--- a/arch/x86/kernel/Makefile
14838+++ b/arch/x86/kernel/Makefile
14839@@ -23,7 +23,7 @@ obj-y += time.o ioport.o ldt.o dumpstack.o nmi.o
14840 obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
14841 obj-$(CONFIG_IRQ_WORK) += irq_work.o
14842 obj-y += probe_roms.o
14843-obj-$(CONFIG_X86_32) += i386_ksyms_32.o
14844+obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
14845 obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
14846 obj-y += syscall_$(BITS).o
14847 obj-$(CONFIG_X86_64) += vsyscall_64.o
14848diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
14849index 11676cf..a8cf3ec 100644
14850--- a/arch/x86/kernel/acpi/sleep.c
14851+++ b/arch/x86/kernel/acpi/sleep.c
14852@@ -74,8 +74,12 @@ int acpi_suspend_lowlevel(void)
14853 #else /* CONFIG_64BIT */
14854 #ifdef CONFIG_SMP
14855 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
14856+
14857+ pax_open_kernel();
14858 early_gdt_descr.address =
14859 (unsigned long)get_cpu_gdt_table(smp_processor_id());
14860+ pax_close_kernel();
14861+
14862 initial_gs = per_cpu_offset(smp_processor_id());
14863 #endif
14864 initial_code = (unsigned long)wakeup_long64;
14865diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
14866index 13ab720..95d5442 100644
14867--- a/arch/x86/kernel/acpi/wakeup_32.S
14868+++ b/arch/x86/kernel/acpi/wakeup_32.S
14869@@ -30,13 +30,11 @@ wakeup_pmode_return:
14870 # and restore the stack ... but you need gdt for this to work
14871 movl saved_context_esp, %esp
14872
14873- movl %cs:saved_magic, %eax
14874- cmpl $0x12345678, %eax
14875+ cmpl $0x12345678, saved_magic
14876 jne bogus_magic
14877
14878 # jump to place where we left off
14879- movl saved_eip, %eax
14880- jmp *%eax
14881+ jmp *(saved_eip)
14882
14883 bogus_magic:
14884 jmp bogus_magic
14885diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
14886index ef5ccca..bd83949 100644
14887--- a/arch/x86/kernel/alternative.c
14888+++ b/arch/x86/kernel/alternative.c
14889@@ -268,6 +268,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
14890 */
14891 for (a = start; a < end; a++) {
14892 instr = (u8 *)&a->instr_offset + a->instr_offset;
14893+
14894+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
14895+ instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
14896+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
14897+ instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
14898+#endif
14899+
14900 replacement = (u8 *)&a->repl_offset + a->repl_offset;
14901 BUG_ON(a->replacementlen > a->instrlen);
14902 BUG_ON(a->instrlen > sizeof(insnbuf));
14903@@ -299,10 +306,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
14904 for (poff = start; poff < end; poff++) {
14905 u8 *ptr = (u8 *)poff + *poff;
14906
14907+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
14908+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
14909+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
14910+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
14911+#endif
14912+
14913 if (!*poff || ptr < text || ptr >= text_end)
14914 continue;
14915 /* turn DS segment override prefix into lock prefix */
14916- if (*ptr == 0x3e)
14917+ if (*ktla_ktva(ptr) == 0x3e)
14918 text_poke(ptr, ((unsigned char []){0xf0}), 1);
14919 }
14920 mutex_unlock(&text_mutex);
14921@@ -317,10 +330,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
14922 for (poff = start; poff < end; poff++) {
14923 u8 *ptr = (u8 *)poff + *poff;
14924
14925+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
14926+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
14927+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
14928+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
14929+#endif
14930+
14931 if (!*poff || ptr < text || ptr >= text_end)
14932 continue;
14933 /* turn lock prefix into DS segment override prefix */
14934- if (*ptr == 0xf0)
14935+ if (*ktla_ktva(ptr) == 0xf0)
14936 text_poke(ptr, ((unsigned char []){0x3E}), 1);
14937 }
14938 mutex_unlock(&text_mutex);
14939@@ -468,7 +487,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
14940
14941 BUG_ON(p->len > MAX_PATCH_LEN);
14942 /* prep the buffer with the original instructions */
14943- memcpy(insnbuf, p->instr, p->len);
14944+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
14945 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
14946 (unsigned long)p->instr, p->len);
14947
14948@@ -515,7 +534,7 @@ void __init alternative_instructions(void)
14949 if (!uniproc_patched || num_possible_cpus() == 1)
14950 free_init_pages("SMP alternatives",
14951 (unsigned long)__smp_locks,
14952- (unsigned long)__smp_locks_end);
14953+ PAGE_ALIGN((unsigned long)__smp_locks_end));
14954 #endif
14955
14956 apply_paravirt(__parainstructions, __parainstructions_end);
14957@@ -535,13 +554,17 @@ void __init alternative_instructions(void)
14958 * instructions. And on the local CPU you need to be protected again NMI or MCE
14959 * handlers seeing an inconsistent instruction while you patch.
14960 */
14961-void *__init_or_module text_poke_early(void *addr, const void *opcode,
14962+void *__kprobes text_poke_early(void *addr, const void *opcode,
14963 size_t len)
14964 {
14965 unsigned long flags;
14966 local_irq_save(flags);
14967- memcpy(addr, opcode, len);
14968+
14969+ pax_open_kernel();
14970+ memcpy(ktla_ktva(addr), opcode, len);
14971 sync_core();
14972+ pax_close_kernel();
14973+
14974 local_irq_restore(flags);
14975 /* Could also do a CLFLUSH here to speed up CPU recovery; but
14976 that causes hangs on some VIA CPUs. */
14977@@ -563,36 +586,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
14978 */
14979 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
14980 {
14981- unsigned long flags;
14982- char *vaddr;
14983+ unsigned char *vaddr = ktla_ktva(addr);
14984 struct page *pages[2];
14985- int i;
14986+ size_t i;
14987
14988 if (!core_kernel_text((unsigned long)addr)) {
14989- pages[0] = vmalloc_to_page(addr);
14990- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
14991+ pages[0] = vmalloc_to_page(vaddr);
14992+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
14993 } else {
14994- pages[0] = virt_to_page(addr);
14995+ pages[0] = virt_to_page(vaddr);
14996 WARN_ON(!PageReserved(pages[0]));
14997- pages[1] = virt_to_page(addr + PAGE_SIZE);
14998+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
14999 }
15000 BUG_ON(!pages[0]);
15001- local_irq_save(flags);
15002- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
15003- if (pages[1])
15004- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
15005- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
15006- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
15007- clear_fixmap(FIX_TEXT_POKE0);
15008- if (pages[1])
15009- clear_fixmap(FIX_TEXT_POKE1);
15010- local_flush_tlb();
15011- sync_core();
15012- /* Could also do a CLFLUSH here to speed up CPU recovery; but
15013- that causes hangs on some VIA CPUs. */
15014+ text_poke_early(addr, opcode, len);
15015 for (i = 0; i < len; i++)
15016- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
15017- local_irq_restore(flags);
15018+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
15019 return addr;
15020 }
15021
15022diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
15023index b17416e..5ed0f3e 100644
15024--- a/arch/x86/kernel/apic/apic.c
15025+++ b/arch/x86/kernel/apic/apic.c
15026@@ -185,7 +185,7 @@ int first_system_vector = 0xfe;
15027 /*
15028 * Debug level, exported for io_apic.c
15029 */
15030-unsigned int apic_verbosity;
15031+int apic_verbosity;
15032
15033 int pic_mode;
15034
15035@@ -1923,7 +1923,7 @@ void smp_error_interrupt(struct pt_regs *regs)
15036 apic_write(APIC_ESR, 0);
15037 v1 = apic_read(APIC_ESR);
15038 ack_APIC_irq();
15039- atomic_inc(&irq_err_count);
15040+ atomic_inc_unchecked(&irq_err_count);
15041
15042 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
15043 smp_processor_id(), v0 , v1);
15044diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
15045index 00c77cf..2dc6a2d 100644
15046--- a/arch/x86/kernel/apic/apic_flat_64.c
15047+++ b/arch/x86/kernel/apic/apic_flat_64.c
15048@@ -157,7 +157,7 @@ static int flat_probe(void)
15049 return 1;
15050 }
15051
15052-static struct apic apic_flat = {
15053+static struct apic apic_flat __read_only = {
15054 .name = "flat",
15055 .probe = flat_probe,
15056 .acpi_madt_oem_check = flat_acpi_madt_oem_check,
15057@@ -271,7 +271,7 @@ static int physflat_probe(void)
15058 return 0;
15059 }
15060
15061-static struct apic apic_physflat = {
15062+static struct apic apic_physflat __read_only = {
15063
15064 .name = "physical flat",
15065 .probe = physflat_probe,
15066diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
15067index d50e364..543bee3 100644
15068--- a/arch/x86/kernel/apic/bigsmp_32.c
15069+++ b/arch/x86/kernel/apic/bigsmp_32.c
15070@@ -152,7 +152,7 @@ static int probe_bigsmp(void)
15071 return dmi_bigsmp;
15072 }
15073
15074-static struct apic apic_bigsmp = {
15075+static struct apic apic_bigsmp __read_only = {
15076
15077 .name = "bigsmp",
15078 .probe = probe_bigsmp,
15079diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c
15080index 0874799..24a836e 100644
15081--- a/arch/x86/kernel/apic/es7000_32.c
15082+++ b/arch/x86/kernel/apic/es7000_32.c
15083@@ -608,8 +608,7 @@ static int es7000_mps_oem_check_cluster(struct mpc_table *mpc, char *oem,
15084 return ret && es7000_apic_is_cluster();
15085 }
15086
15087-/* We've been warned by a false positive warning.Use __refdata to keep calm. */
15088-static struct apic __refdata apic_es7000_cluster = {
15089+static struct apic apic_es7000_cluster __read_only = {
15090
15091 .name = "es7000",
15092 .probe = probe_es7000,
15093@@ -675,7 +674,7 @@ static struct apic __refdata apic_es7000_cluster = {
15094 .x86_32_early_logical_apicid = es7000_early_logical_apicid,
15095 };
15096
15097-static struct apic __refdata apic_es7000 = {
15098+static struct apic __refdata apic_es7000 __read_only = {
15099
15100 .name = "es7000",
15101 .probe = probe_es7000,
15102diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
15103index 1817fa9..7bff097 100644
15104--- a/arch/x86/kernel/apic/io_apic.c
15105+++ b/arch/x86/kernel/apic/io_apic.c
15106@@ -1084,7 +1084,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
15107 }
15108 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
15109
15110-void lock_vector_lock(void)
15111+void lock_vector_lock(void) __acquires(vector_lock)
15112 {
15113 /* Used to the online set of cpus does not change
15114 * during assign_irq_vector.
15115@@ -1092,7 +1092,7 @@ void lock_vector_lock(void)
15116 raw_spin_lock(&vector_lock);
15117 }
15118
15119-void unlock_vector_lock(void)
15120+void unlock_vector_lock(void) __releases(vector_lock)
15121 {
15122 raw_spin_unlock(&vector_lock);
15123 }
15124@@ -2411,7 +2411,7 @@ static void ack_apic_edge(struct irq_data *data)
15125 ack_APIC_irq();
15126 }
15127
15128-atomic_t irq_mis_count;
15129+atomic_unchecked_t irq_mis_count;
15130
15131 #ifdef CONFIG_GENERIC_PENDING_IRQ
15132 static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
15133@@ -2552,7 +2552,7 @@ static void ack_apic_level(struct irq_data *data)
15134 * at the cpu.
15135 */
15136 if (!(v & (1 << (i & 0x1f)))) {
15137- atomic_inc(&irq_mis_count);
15138+ atomic_inc_unchecked(&irq_mis_count);
15139
15140 eoi_ioapic_irq(irq, cfg);
15141 }
15142diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c
15143index d661ee9..791fd33 100644
15144--- a/arch/x86/kernel/apic/numaq_32.c
15145+++ b/arch/x86/kernel/apic/numaq_32.c
15146@@ -455,8 +455,7 @@ static void numaq_setup_portio_remap(void)
15147 (u_long) xquad_portio, (u_long) num_quads*XQUAD_PORTIO_QUAD);
15148 }
15149
15150-/* Use __refdata to keep false positive warning calm. */
15151-static struct apic __refdata apic_numaq = {
15152+static struct apic apic_numaq __read_only = {
15153
15154 .name = "NUMAQ",
15155 .probe = probe_numaq,
15156diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
15157index eb35ef9..f184a21 100644
15158--- a/arch/x86/kernel/apic/probe_32.c
15159+++ b/arch/x86/kernel/apic/probe_32.c
15160@@ -72,7 +72,7 @@ static int probe_default(void)
15161 return 1;
15162 }
15163
15164-static struct apic apic_default = {
15165+static struct apic apic_default __read_only = {
15166
15167 .name = "default",
15168 .probe = probe_default,
15169diff --git a/arch/x86/kernel/apic/summit_32.c b/arch/x86/kernel/apic/summit_32.c
15170index 77c95c0..434f8a4 100644
15171--- a/arch/x86/kernel/apic/summit_32.c
15172+++ b/arch/x86/kernel/apic/summit_32.c
15173@@ -486,7 +486,7 @@ void setup_summit(void)
15174 }
15175 #endif
15176
15177-static struct apic apic_summit = {
15178+static struct apic apic_summit __read_only = {
15179
15180 .name = "summit",
15181 .probe = probe_summit,
15182diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
15183index c88baa4..a89def0 100644
15184--- a/arch/x86/kernel/apic/x2apic_cluster.c
15185+++ b/arch/x86/kernel/apic/x2apic_cluster.c
15186@@ -235,7 +235,7 @@ static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask,
15187 cpumask_and(retmask, mask, per_cpu(cpus_in_cluster, cpu));
15188 }
15189
15190-static struct apic apic_x2apic_cluster = {
15191+static struct apic apic_x2apic_cluster __read_only = {
15192
15193 .name = "cluster x2apic",
15194 .probe = x2apic_cluster_probe,
15195diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
15196index 562a76d..a003c0f 100644
15197--- a/arch/x86/kernel/apic/x2apic_phys.c
15198+++ b/arch/x86/kernel/apic/x2apic_phys.c
15199@@ -89,7 +89,7 @@ static int x2apic_phys_probe(void)
15200 return apic == &apic_x2apic_phys;
15201 }
15202
15203-static struct apic apic_x2apic_phys = {
15204+static struct apic apic_x2apic_phys __read_only = {
15205
15206 .name = "physical x2apic",
15207 .probe = x2apic_phys_probe,
15208diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
15209index 8cfade9..b9d04fc 100644
15210--- a/arch/x86/kernel/apic/x2apic_uv_x.c
15211+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
15212@@ -333,7 +333,7 @@ static int uv_probe(void)
15213 return apic == &apic_x2apic_uv_x;
15214 }
15215
15216-static struct apic __refdata apic_x2apic_uv_x = {
15217+static struct apic apic_x2apic_uv_x __read_only = {
15218
15219 .name = "UV large system",
15220 .probe = uv_probe,
15221diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
15222index d65464e..1035d31 100644
15223--- a/arch/x86/kernel/apm_32.c
15224+++ b/arch/x86/kernel/apm_32.c
15225@@ -412,7 +412,7 @@ static DEFINE_MUTEX(apm_mutex);
15226 * This is for buggy BIOS's that refer to (real mode) segment 0x40
15227 * even though they are called in protected mode.
15228 */
15229-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
15230+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
15231 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
15232
15233 static const char driver_version[] = "1.16ac"; /* no spaces */
15234@@ -590,7 +590,10 @@ static long __apm_bios_call(void *_call)
15235 BUG_ON(cpu != 0);
15236 gdt = get_cpu_gdt_table(cpu);
15237 save_desc_40 = gdt[0x40 / 8];
15238+
15239+ pax_open_kernel();
15240 gdt[0x40 / 8] = bad_bios_desc;
15241+ pax_close_kernel();
15242
15243 apm_irq_save(flags);
15244 APM_DO_SAVE_SEGS;
15245@@ -599,7 +602,11 @@ static long __apm_bios_call(void *_call)
15246 &call->esi);
15247 APM_DO_RESTORE_SEGS;
15248 apm_irq_restore(flags);
15249+
15250+ pax_open_kernel();
15251 gdt[0x40 / 8] = save_desc_40;
15252+ pax_close_kernel();
15253+
15254 put_cpu();
15255
15256 return call->eax & 0xff;
15257@@ -666,7 +673,10 @@ static long __apm_bios_call_simple(void *_call)
15258 BUG_ON(cpu != 0);
15259 gdt = get_cpu_gdt_table(cpu);
15260 save_desc_40 = gdt[0x40 / 8];
15261+
15262+ pax_open_kernel();
15263 gdt[0x40 / 8] = bad_bios_desc;
15264+ pax_close_kernel();
15265
15266 apm_irq_save(flags);
15267 APM_DO_SAVE_SEGS;
15268@@ -674,7 +684,11 @@ static long __apm_bios_call_simple(void *_call)
15269 &call->eax);
15270 APM_DO_RESTORE_SEGS;
15271 apm_irq_restore(flags);
15272+
15273+ pax_open_kernel();
15274 gdt[0x40 / 8] = save_desc_40;
15275+ pax_close_kernel();
15276+
15277 put_cpu();
15278 return error;
15279 }
15280@@ -2345,12 +2359,15 @@ static int __init apm_init(void)
15281 * code to that CPU.
15282 */
15283 gdt = get_cpu_gdt_table(0);
15284+
15285+ pax_open_kernel();
15286 set_desc_base(&gdt[APM_CS >> 3],
15287 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
15288 set_desc_base(&gdt[APM_CS_16 >> 3],
15289 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
15290 set_desc_base(&gdt[APM_DS >> 3],
15291 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
15292+ pax_close_kernel();
15293
15294 proc_create("apm", 0, NULL, &apm_file_ops);
15295
15296diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
15297index 2861082..6d4718e 100644
15298--- a/arch/x86/kernel/asm-offsets.c
15299+++ b/arch/x86/kernel/asm-offsets.c
15300@@ -33,6 +33,8 @@ void common(void) {
15301 OFFSET(TI_status, thread_info, status);
15302 OFFSET(TI_addr_limit, thread_info, addr_limit);
15303 OFFSET(TI_preempt_count, thread_info, preempt_count);
15304+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
15305+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
15306
15307 BLANK();
15308 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
15309@@ -53,8 +55,26 @@ void common(void) {
15310 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
15311 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
15312 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
15313+
15314+#ifdef CONFIG_PAX_KERNEXEC
15315+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
15316 #endif
15317
15318+#ifdef CONFIG_PAX_MEMORY_UDEREF
15319+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
15320+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
15321+#ifdef CONFIG_X86_64
15322+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
15323+#endif
15324+#endif
15325+
15326+#endif
15327+
15328+ BLANK();
15329+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
15330+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
15331+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
15332+
15333 #ifdef CONFIG_XEN
15334 BLANK();
15335 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
15336diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
15337index 1b4754f..fbb4227 100644
15338--- a/arch/x86/kernel/asm-offsets_64.c
15339+++ b/arch/x86/kernel/asm-offsets_64.c
15340@@ -76,6 +76,7 @@ int main(void)
15341 BLANK();
15342 #undef ENTRY
15343
15344+ DEFINE(TSS_size, sizeof(struct tss_struct));
15345 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
15346 BLANK();
15347
15348diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
15349index a0e067d..9c7db16 100644
15350--- a/arch/x86/kernel/cpu/Makefile
15351+++ b/arch/x86/kernel/cpu/Makefile
15352@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
15353 CFLAGS_REMOVE_perf_event.o = -pg
15354 endif
15355
15356-# Make sure load_percpu_segment has no stackprotector
15357-nostackp := $(call cc-option, -fno-stack-protector)
15358-CFLAGS_common.o := $(nostackp)
15359-
15360 obj-y := intel_cacheinfo.o scattered.o topology.o
15361 obj-y += proc.o capflags.o powerflags.o common.o
15362 obj-y += vmware.o hypervisor.o mshyperv.o
15363diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
15364index 1b7d165..b9e2627 100644
15365--- a/arch/x86/kernel/cpu/amd.c
15366+++ b/arch/x86/kernel/cpu/amd.c
15367@@ -738,7 +738,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
15368 unsigned int size)
15369 {
15370 /* AMD errata T13 (order #21922) */
15371- if ((c->x86 == 6)) {
15372+ if (c->x86 == 6) {
15373 /* Duron Rev A0 */
15374 if (c->x86_model == 3 && c->x86_mask == 0)
15375 size = 64;
15376diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
15377index 7505f7b..d59dac0 100644
15378--- a/arch/x86/kernel/cpu/common.c
15379+++ b/arch/x86/kernel/cpu/common.c
15380@@ -86,60 +86,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
15381
15382 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
15383
15384-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
15385-#ifdef CONFIG_X86_64
15386- /*
15387- * We need valid kernel segments for data and code in long mode too
15388- * IRET will check the segment types kkeil 2000/10/28
15389- * Also sysret mandates a special GDT layout
15390- *
15391- * TLS descriptors are currently at a different place compared to i386.
15392- * Hopefully nobody expects them at a fixed place (Wine?)
15393- */
15394- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
15395- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
15396- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
15397- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
15398- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
15399- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
15400-#else
15401- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
15402- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
15403- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
15404- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
15405- /*
15406- * Segments used for calling PnP BIOS have byte granularity.
15407- * They code segments and data segments have fixed 64k limits,
15408- * the transfer segment sizes are set at run time.
15409- */
15410- /* 32-bit code */
15411- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
15412- /* 16-bit code */
15413- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
15414- /* 16-bit data */
15415- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
15416- /* 16-bit data */
15417- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
15418- /* 16-bit data */
15419- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
15420- /*
15421- * The APM segments have byte granularity and their bases
15422- * are set at run time. All have 64k limits.
15423- */
15424- /* 32-bit code */
15425- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
15426- /* 16-bit code */
15427- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
15428- /* data */
15429- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
15430-
15431- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
15432- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
15433- GDT_STACK_CANARY_INIT
15434-#endif
15435-} };
15436-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
15437-
15438 static int __init x86_xsave_setup(char *s)
15439 {
15440 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
15441@@ -389,7 +335,7 @@ void switch_to_new_gdt(int cpu)
15442 {
15443 struct desc_ptr gdt_descr;
15444
15445- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
15446+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
15447 gdt_descr.size = GDT_SIZE - 1;
15448 load_gdt(&gdt_descr);
15449 /* Reload the per-cpu base */
15450@@ -885,6 +831,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
15451 /* Filter out anything that depends on CPUID levels we don't have */
15452 filter_cpuid_features(c, true);
15453
15454+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
15455+ setup_clear_cpu_cap(X86_FEATURE_SEP);
15456+#endif
15457+
15458 /* If the model name is still unset, do table lookup. */
15459 if (!c->x86_model_id[0]) {
15460 const char *p;
15461@@ -1068,10 +1018,12 @@ static __init int setup_disablecpuid(char *arg)
15462 }
15463 __setup("clearcpuid=", setup_disablecpuid);
15464
15465+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
15466+EXPORT_PER_CPU_SYMBOL(current_tinfo);
15467+
15468 #ifdef CONFIG_X86_64
15469 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
15470-struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1,
15471- (unsigned long) nmi_idt_table };
15472+struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) nmi_idt_table };
15473
15474 DEFINE_PER_CPU_FIRST(union irq_stack_union,
15475 irq_stack_union) __aligned(PAGE_SIZE);
15476@@ -1085,7 +1037,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
15477 EXPORT_PER_CPU_SYMBOL(current_task);
15478
15479 DEFINE_PER_CPU(unsigned long, kernel_stack) =
15480- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
15481+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
15482 EXPORT_PER_CPU_SYMBOL(kernel_stack);
15483
15484 DEFINE_PER_CPU(char *, irq_stack_ptr) =
15485@@ -1178,7 +1130,7 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
15486 {
15487 memset(regs, 0, sizeof(struct pt_regs));
15488 regs->fs = __KERNEL_PERCPU;
15489- regs->gs = __KERNEL_STACK_CANARY;
15490+ savesegment(gs, regs->gs);
15491
15492 return regs;
15493 }
15494@@ -1233,7 +1185,7 @@ void __cpuinit cpu_init(void)
15495 int i;
15496
15497 cpu = stack_smp_processor_id();
15498- t = &per_cpu(init_tss, cpu);
15499+ t = init_tss + cpu;
15500 oist = &per_cpu(orig_ist, cpu);
15501
15502 #ifdef CONFIG_NUMA
15503@@ -1259,7 +1211,7 @@ void __cpuinit cpu_init(void)
15504 switch_to_new_gdt(cpu);
15505 loadsegment(fs, 0);
15506
15507- load_idt((const struct desc_ptr *)&idt_descr);
15508+ load_idt(&idt_descr);
15509
15510 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
15511 syscall_init();
15512@@ -1268,7 +1220,6 @@ void __cpuinit cpu_init(void)
15513 wrmsrl(MSR_KERNEL_GS_BASE, 0);
15514 barrier();
15515
15516- x86_configure_nx();
15517 if (cpu != 0)
15518 enable_x2apic();
15519
15520@@ -1321,7 +1272,7 @@ void __cpuinit cpu_init(void)
15521 {
15522 int cpu = smp_processor_id();
15523 struct task_struct *curr = current;
15524- struct tss_struct *t = &per_cpu(init_tss, cpu);
15525+ struct tss_struct *t = init_tss + cpu;
15526 struct thread_struct *thread = &curr->thread;
15527
15528 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
15529diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
15530index 198e019..867575e 100644
15531--- a/arch/x86/kernel/cpu/intel.c
15532+++ b/arch/x86/kernel/cpu/intel.c
15533@@ -174,7 +174,7 @@ static void __cpuinit trap_init_f00f_bug(void)
15534 * Update the IDT descriptor and reload the IDT so that
15535 * it uses the read-only mapped virtual address.
15536 */
15537- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
15538+ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
15539 load_idt(&idt_descr);
15540 }
15541 #endif
15542diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
15543index 93c5451..3887433 100644
15544--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
15545+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
15546@@ -983,6 +983,22 @@ static struct attribute *default_attrs[] = {
15547 };
15548
15549 #ifdef CONFIG_AMD_NB
15550+static struct attribute *default_attrs_amd_nb[] = {
15551+ &type.attr,
15552+ &level.attr,
15553+ &coherency_line_size.attr,
15554+ &physical_line_partition.attr,
15555+ &ways_of_associativity.attr,
15556+ &number_of_sets.attr,
15557+ &size.attr,
15558+ &shared_cpu_map.attr,
15559+ &shared_cpu_list.attr,
15560+ NULL,
15561+ NULL,
15562+ NULL,
15563+ NULL
15564+};
15565+
15566 static struct attribute ** __cpuinit amd_l3_attrs(void)
15567 {
15568 static struct attribute **attrs;
15569@@ -993,18 +1009,7 @@ static struct attribute ** __cpuinit amd_l3_attrs(void)
15570
15571 n = ARRAY_SIZE(default_attrs);
15572
15573- if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
15574- n += 2;
15575-
15576- if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
15577- n += 1;
15578-
15579- attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
15580- if (attrs == NULL)
15581- return attrs = default_attrs;
15582-
15583- for (n = 0; default_attrs[n]; n++)
15584- attrs[n] = default_attrs[n];
15585+ attrs = default_attrs_amd_nb;
15586
15587 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
15588 attrs[n++] = &cache_disable_0.attr;
15589@@ -1055,6 +1060,13 @@ static struct kobj_type ktype_cache = {
15590 .default_attrs = default_attrs,
15591 };
15592
15593+#ifdef CONFIG_AMD_NB
15594+static struct kobj_type ktype_cache_amd_nb = {
15595+ .sysfs_ops = &sysfs_ops,
15596+ .default_attrs = default_attrs_amd_nb,
15597+};
15598+#endif
15599+
15600 static struct kobj_type ktype_percpu_entry = {
15601 .sysfs_ops = &sysfs_ops,
15602 };
15603@@ -1120,20 +1132,26 @@ static int __cpuinit cache_add_dev(struct device *dev)
15604 return retval;
15605 }
15606
15607+#ifdef CONFIG_AMD_NB
15608+ amd_l3_attrs();
15609+#endif
15610+
15611 for (i = 0; i < num_cache_leaves; i++) {
15612+ struct kobj_type *ktype;
15613+
15614 this_object = INDEX_KOBJECT_PTR(cpu, i);
15615 this_object->cpu = cpu;
15616 this_object->index = i;
15617
15618 this_leaf = CPUID4_INFO_IDX(cpu, i);
15619
15620- ktype_cache.default_attrs = default_attrs;
15621+ ktype = &ktype_cache;
15622 #ifdef CONFIG_AMD_NB
15623 if (this_leaf->base.nb)
15624- ktype_cache.default_attrs = amd_l3_attrs();
15625+ ktype = &ktype_cache_amd_nb;
15626 #endif
15627 retval = kobject_init_and_add(&(this_object->kobj),
15628- &ktype_cache,
15629+ ktype,
15630 per_cpu(ici_cache_kobject, cpu),
15631 "index%1lu", i);
15632 if (unlikely(retval)) {
15633diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
15634index 46cbf86..55c7292 100644
15635--- a/arch/x86/kernel/cpu/mcheck/mce.c
15636+++ b/arch/x86/kernel/cpu/mcheck/mce.c
15637@@ -45,6 +45,7 @@
15638 #include <asm/processor.h>
15639 #include <asm/mce.h>
15640 #include <asm/msr.h>
15641+#include <asm/local.h>
15642
15643 #include "mce-internal.h"
15644
15645@@ -254,7 +255,7 @@ static void print_mce(struct mce *m)
15646 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
15647 m->cs, m->ip);
15648
15649- if (m->cs == __KERNEL_CS)
15650+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
15651 print_symbol("{%s}", m->ip);
15652 pr_cont("\n");
15653 }
15654@@ -287,10 +288,10 @@ static void print_mce(struct mce *m)
15655
15656 #define PANIC_TIMEOUT 5 /* 5 seconds */
15657
15658-static atomic_t mce_paniced;
15659+static atomic_unchecked_t mce_paniced;
15660
15661 static int fake_panic;
15662-static atomic_t mce_fake_paniced;
15663+static atomic_unchecked_t mce_fake_paniced;
15664
15665 /* Panic in progress. Enable interrupts and wait for final IPI */
15666 static void wait_for_panic(void)
15667@@ -314,7 +315,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
15668 /*
15669 * Make sure only one CPU runs in machine check panic
15670 */
15671- if (atomic_inc_return(&mce_paniced) > 1)
15672+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
15673 wait_for_panic();
15674 barrier();
15675
15676@@ -322,7 +323,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
15677 console_verbose();
15678 } else {
15679 /* Don't log too much for fake panic */
15680- if (atomic_inc_return(&mce_fake_paniced) > 1)
15681+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
15682 return;
15683 }
15684 /* First print corrected ones that are still unlogged */
15685@@ -694,7 +695,7 @@ static int mce_timed_out(u64 *t)
15686 * might have been modified by someone else.
15687 */
15688 rmb();
15689- if (atomic_read(&mce_paniced))
15690+ if (atomic_read_unchecked(&mce_paniced))
15691 wait_for_panic();
15692 if (!monarch_timeout)
15693 goto out;
15694@@ -1659,7 +1660,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
15695 }
15696
15697 /* Call the installed machine check handler for this CPU setup. */
15698-void (*machine_check_vector)(struct pt_regs *, long error_code) =
15699+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
15700 unexpected_machine_check;
15701
15702 /*
15703@@ -1682,7 +1683,9 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
15704 return;
15705 }
15706
15707+ pax_open_kernel();
15708 machine_check_vector = do_machine_check;
15709+ pax_close_kernel();
15710
15711 __mcheck_cpu_init_generic();
15712 __mcheck_cpu_init_vendor(c);
15713@@ -1696,7 +1699,7 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
15714 */
15715
15716 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
15717-static int mce_chrdev_open_count; /* #times opened */
15718+static local_t mce_chrdev_open_count; /* #times opened */
15719 static int mce_chrdev_open_exclu; /* already open exclusive? */
15720
15721 static int mce_chrdev_open(struct inode *inode, struct file *file)
15722@@ -1704,7 +1707,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
15723 spin_lock(&mce_chrdev_state_lock);
15724
15725 if (mce_chrdev_open_exclu ||
15726- (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
15727+ (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
15728 spin_unlock(&mce_chrdev_state_lock);
15729
15730 return -EBUSY;
15731@@ -1712,7 +1715,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
15732
15733 if (file->f_flags & O_EXCL)
15734 mce_chrdev_open_exclu = 1;
15735- mce_chrdev_open_count++;
15736+ local_inc(&mce_chrdev_open_count);
15737
15738 spin_unlock(&mce_chrdev_state_lock);
15739
15740@@ -1723,7 +1726,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
15741 {
15742 spin_lock(&mce_chrdev_state_lock);
15743
15744- mce_chrdev_open_count--;
15745+ local_dec(&mce_chrdev_open_count);
15746 mce_chrdev_open_exclu = 0;
15747
15748 spin_unlock(&mce_chrdev_state_lock);
15749@@ -2367,7 +2370,7 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
15750 return NOTIFY_OK;
15751 }
15752
15753-static struct notifier_block mce_cpu_notifier __cpuinitdata = {
15754+static struct notifier_block mce_cpu_notifier __cpuinitconst = {
15755 .notifier_call = mce_cpu_callback,
15756 };
15757
15758@@ -2445,7 +2448,7 @@ struct dentry *mce_get_debugfs_dir(void)
15759 static void mce_reset(void)
15760 {
15761 cpu_missing = 0;
15762- atomic_set(&mce_fake_paniced, 0);
15763+ atomic_set_unchecked(&mce_fake_paniced, 0);
15764 atomic_set(&mce_executing, 0);
15765 atomic_set(&mce_callin, 0);
15766 atomic_set(&global_nwo, 0);
15767diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
15768index 2d5454c..51987eb 100644
15769--- a/arch/x86/kernel/cpu/mcheck/p5.c
15770+++ b/arch/x86/kernel/cpu/mcheck/p5.c
15771@@ -11,6 +11,7 @@
15772 #include <asm/processor.h>
15773 #include <asm/mce.h>
15774 #include <asm/msr.h>
15775+#include <asm/pgtable.h>
15776
15777 /* By default disabled */
15778 int mce_p5_enabled __read_mostly;
15779@@ -49,7 +50,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
15780 if (!cpu_has(c, X86_FEATURE_MCE))
15781 return;
15782
15783+ pax_open_kernel();
15784 machine_check_vector = pentium_machine_check;
15785+ pax_close_kernel();
15786 /* Make sure the vector pointer is visible before we enable MCEs: */
15787 wmb();
15788
15789diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
15790index 2d7998f..17c9de1 100644
15791--- a/arch/x86/kernel/cpu/mcheck/winchip.c
15792+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
15793@@ -10,6 +10,7 @@
15794 #include <asm/processor.h>
15795 #include <asm/mce.h>
15796 #include <asm/msr.h>
15797+#include <asm/pgtable.h>
15798
15799 /* Machine check handler for WinChip C6: */
15800 static void winchip_machine_check(struct pt_regs *regs, long error_code)
15801@@ -23,7 +24,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
15802 {
15803 u32 lo, hi;
15804
15805+ pax_open_kernel();
15806 machine_check_vector = winchip_machine_check;
15807+ pax_close_kernel();
15808 /* Make sure the vector pointer is visible before we enable MCEs: */
15809 wmb();
15810
15811diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
15812index 6b96110..0da73eb 100644
15813--- a/arch/x86/kernel/cpu/mtrr/main.c
15814+++ b/arch/x86/kernel/cpu/mtrr/main.c
15815@@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
15816 u64 size_or_mask, size_and_mask;
15817 static bool mtrr_aps_delayed_init;
15818
15819-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
15820+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
15821
15822 const struct mtrr_ops *mtrr_if;
15823
15824diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
15825index df5e41f..816c719 100644
15826--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
15827+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
15828@@ -25,7 +25,7 @@ struct mtrr_ops {
15829 int (*validate_add_page)(unsigned long base, unsigned long size,
15830 unsigned int type);
15831 int (*have_wrcomb)(void);
15832-};
15833+} __do_const;
15834
15835 extern int generic_get_free_region(unsigned long base, unsigned long size,
15836 int replace_reg);
15837diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
15838index d18b2b8..d3b834c 100644
15839--- a/arch/x86/kernel/cpu/perf_event.c
15840+++ b/arch/x86/kernel/cpu/perf_event.c
15841@@ -1759,7 +1759,7 @@ static unsigned long get_segment_base(unsigned int segment)
15842 if (idx > GDT_ENTRIES)
15843 return 0;
15844
15845- desc = __this_cpu_ptr(&gdt_page.gdt[0]);
15846+ desc = get_cpu_gdt_table(smp_processor_id());
15847 }
15848
15849 return get_desc_base(desc + idx);
15850@@ -1849,7 +1849,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
15851 break;
15852
15853 perf_callchain_store(entry, frame.return_address);
15854- fp = frame.next_frame;
15855+ fp = (const void __force_user *)frame.next_frame;
15856 }
15857 }
15858
15859diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
15860index 324bb52..1a93d85 100644
15861--- a/arch/x86/kernel/cpu/perf_event_intel.c
15862+++ b/arch/x86/kernel/cpu/perf_event_intel.c
15863@@ -1949,10 +1949,10 @@ __init int intel_pmu_init(void)
15864 * v2 and above have a perf capabilities MSR
15865 */
15866 if (version > 1) {
15867- u64 capabilities;
15868+ u64 capabilities = x86_pmu.intel_cap.capabilities;
15869
15870- rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
15871- x86_pmu.intel_cap.capabilities = capabilities;
15872+ if (rdmsrl_safe(MSR_IA32_PERF_CAPABILITIES, &x86_pmu.intel_cap.capabilities))
15873+ x86_pmu.intel_cap.capabilities = capabilities;
15874 }
15875
15876 intel_ds_init();
15877diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
15878index 13ad899..f642b9a 100644
15879--- a/arch/x86/kernel/crash.c
15880+++ b/arch/x86/kernel/crash.c
15881@@ -36,10 +36,8 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
15882 {
15883 #ifdef CONFIG_X86_32
15884 struct pt_regs fixed_regs;
15885-#endif
15886
15887-#ifdef CONFIG_X86_32
15888- if (!user_mode_vm(regs)) {
15889+ if (!user_mode(regs)) {
15890 crash_fixup_ss_esp(&fixed_regs, regs);
15891 regs = &fixed_regs;
15892 }
15893diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
15894index 37250fe..bf2ec74 100644
15895--- a/arch/x86/kernel/doublefault_32.c
15896+++ b/arch/x86/kernel/doublefault_32.c
15897@@ -11,7 +11,7 @@
15898
15899 #define DOUBLEFAULT_STACKSIZE (1024)
15900 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
15901-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
15902+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
15903
15904 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
15905
15906@@ -21,7 +21,7 @@ static void doublefault_fn(void)
15907 unsigned long gdt, tss;
15908
15909 store_gdt(&gdt_desc);
15910- gdt = gdt_desc.address;
15911+ gdt = (unsigned long)gdt_desc.address;
15912
15913 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
15914
15915@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
15916 /* 0x2 bit is always set */
15917 .flags = X86_EFLAGS_SF | 0x2,
15918 .sp = STACK_START,
15919- .es = __USER_DS,
15920+ .es = __KERNEL_DS,
15921 .cs = __KERNEL_CS,
15922 .ss = __KERNEL_DS,
15923- .ds = __USER_DS,
15924+ .ds = __KERNEL_DS,
15925 .fs = __KERNEL_PERCPU,
15926
15927 .__cr3 = __pa_nodebug(swapper_pg_dir),
15928diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
15929index ae42418b..787c16b 100644
15930--- a/arch/x86/kernel/dumpstack.c
15931+++ b/arch/x86/kernel/dumpstack.c
15932@@ -2,6 +2,9 @@
15933 * Copyright (C) 1991, 1992 Linus Torvalds
15934 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
15935 */
15936+#ifdef CONFIG_GRKERNSEC_HIDESYM
15937+#define __INCLUDED_BY_HIDESYM 1
15938+#endif
15939 #include <linux/kallsyms.h>
15940 #include <linux/kprobes.h>
15941 #include <linux/uaccess.h>
15942@@ -35,16 +38,14 @@ void printk_address(unsigned long address, int reliable)
15943 static void
15944 print_ftrace_graph_addr(unsigned long addr, void *data,
15945 const struct stacktrace_ops *ops,
15946- struct thread_info *tinfo, int *graph)
15947+ struct task_struct *task, int *graph)
15948 {
15949- struct task_struct *task;
15950 unsigned long ret_addr;
15951 int index;
15952
15953 if (addr != (unsigned long)return_to_handler)
15954 return;
15955
15956- task = tinfo->task;
15957 index = task->curr_ret_stack;
15958
15959 if (!task->ret_stack || index < *graph)
15960@@ -61,7 +62,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
15961 static inline void
15962 print_ftrace_graph_addr(unsigned long addr, void *data,
15963 const struct stacktrace_ops *ops,
15964- struct thread_info *tinfo, int *graph)
15965+ struct task_struct *task, int *graph)
15966 { }
15967 #endif
15968
15969@@ -72,10 +73,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
15970 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
15971 */
15972
15973-static inline int valid_stack_ptr(struct thread_info *tinfo,
15974- void *p, unsigned int size, void *end)
15975+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
15976 {
15977- void *t = tinfo;
15978 if (end) {
15979 if (p < end && p >= (end-THREAD_SIZE))
15980 return 1;
15981@@ -86,14 +85,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
15982 }
15983
15984 unsigned long
15985-print_context_stack(struct thread_info *tinfo,
15986+print_context_stack(struct task_struct *task, void *stack_start,
15987 unsigned long *stack, unsigned long bp,
15988 const struct stacktrace_ops *ops, void *data,
15989 unsigned long *end, int *graph)
15990 {
15991 struct stack_frame *frame = (struct stack_frame *)bp;
15992
15993- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
15994+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
15995 unsigned long addr;
15996
15997 addr = *stack;
15998@@ -105,7 +104,7 @@ print_context_stack(struct thread_info *tinfo,
15999 } else {
16000 ops->address(data, addr, 0);
16001 }
16002- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
16003+ print_ftrace_graph_addr(addr, data, ops, task, graph);
16004 }
16005 stack++;
16006 }
16007@@ -114,7 +113,7 @@ print_context_stack(struct thread_info *tinfo,
16008 EXPORT_SYMBOL_GPL(print_context_stack);
16009
16010 unsigned long
16011-print_context_stack_bp(struct thread_info *tinfo,
16012+print_context_stack_bp(struct task_struct *task, void *stack_start,
16013 unsigned long *stack, unsigned long bp,
16014 const struct stacktrace_ops *ops, void *data,
16015 unsigned long *end, int *graph)
16016@@ -122,7 +121,7 @@ print_context_stack_bp(struct thread_info *tinfo,
16017 struct stack_frame *frame = (struct stack_frame *)bp;
16018 unsigned long *ret_addr = &frame->return_address;
16019
16020- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
16021+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
16022 unsigned long addr = *ret_addr;
16023
16024 if (!__kernel_text_address(addr))
16025@@ -131,7 +130,7 @@ print_context_stack_bp(struct thread_info *tinfo,
16026 ops->address(data, addr, 1);
16027 frame = frame->next_frame;
16028 ret_addr = &frame->return_address;
16029- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
16030+ print_ftrace_graph_addr(addr, data, ops, task, graph);
16031 }
16032
16033 return (unsigned long)frame;
16034@@ -189,7 +188,7 @@ void dump_stack(void)
16035
16036 bp = stack_frame(current, NULL);
16037 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
16038- current->pid, current->comm, print_tainted(),
16039+ task_pid_nr(current), current->comm, print_tainted(),
16040 init_utsname()->release,
16041 (int)strcspn(init_utsname()->version, " "),
16042 init_utsname()->version);
16043@@ -225,6 +224,8 @@ unsigned __kprobes long oops_begin(void)
16044 }
16045 EXPORT_SYMBOL_GPL(oops_begin);
16046
16047+extern void gr_handle_kernel_exploit(void);
16048+
16049 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
16050 {
16051 if (regs && kexec_should_crash(current))
16052@@ -246,7 +247,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
16053 panic("Fatal exception in interrupt");
16054 if (panic_on_oops)
16055 panic("Fatal exception");
16056- do_exit(signr);
16057+
16058+ gr_handle_kernel_exploit();
16059+
16060+ do_group_exit(signr);
16061 }
16062
16063 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
16064@@ -274,7 +278,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
16065 print_modules();
16066 show_regs(regs);
16067 #ifdef CONFIG_X86_32
16068- if (user_mode_vm(regs)) {
16069+ if (user_mode(regs)) {
16070 sp = regs->sp;
16071 ss = regs->ss & 0xffff;
16072 } else {
16073@@ -302,7 +306,7 @@ void die(const char *str, struct pt_regs *regs, long err)
16074 unsigned long flags = oops_begin();
16075 int sig = SIGSEGV;
16076
16077- if (!user_mode_vm(regs))
16078+ if (!user_mode(regs))
16079 report_bug(regs->ip, regs);
16080
16081 if (__die(str, regs, err))
16082diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
16083index 1038a41..db2c12b 100644
16084--- a/arch/x86/kernel/dumpstack_32.c
16085+++ b/arch/x86/kernel/dumpstack_32.c
16086@@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
16087 bp = stack_frame(task, regs);
16088
16089 for (;;) {
16090- struct thread_info *context;
16091+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
16092
16093- context = (struct thread_info *)
16094- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
16095- bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
16096+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
16097
16098- stack = (unsigned long *)context->previous_esp;
16099- if (!stack)
16100+ if (stack_start == task_stack_page(task))
16101 break;
16102+ stack = *(unsigned long **)stack_start;
16103 if (ops->stack(data, "IRQ") < 0)
16104 break;
16105 touch_nmi_watchdog();
16106@@ -86,7 +84,7 @@ void show_regs(struct pt_regs *regs)
16107 {
16108 int i;
16109
16110- __show_regs(regs, !user_mode_vm(regs));
16111+ __show_regs(regs, !user_mode(regs));
16112
16113 pr_emerg("Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)\n",
16114 TASK_COMM_LEN, current->comm, task_pid_nr(current),
16115@@ -95,21 +93,22 @@ void show_regs(struct pt_regs *regs)
16116 * When in-kernel, we also print out the stack and code at the
16117 * time of the fault..
16118 */
16119- if (!user_mode_vm(regs)) {
16120+ if (!user_mode(regs)) {
16121 unsigned int code_prologue = code_bytes * 43 / 64;
16122 unsigned int code_len = code_bytes;
16123 unsigned char c;
16124 u8 *ip;
16125+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(0)[(0xffff & regs->cs) >> 3]);
16126
16127 pr_emerg("Stack:\n");
16128 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
16129
16130 pr_emerg("Code:");
16131
16132- ip = (u8 *)regs->ip - code_prologue;
16133+ ip = (u8 *)regs->ip - code_prologue + cs_base;
16134 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
16135 /* try starting at IP */
16136- ip = (u8 *)regs->ip;
16137+ ip = (u8 *)regs->ip + cs_base;
16138 code_len = code_len - code_prologue + 1;
16139 }
16140 for (i = 0; i < code_len; i++, ip++) {
16141@@ -118,7 +117,7 @@ void show_regs(struct pt_regs *regs)
16142 pr_cont(" Bad EIP value.");
16143 break;
16144 }
16145- if (ip == (u8 *)regs->ip)
16146+ if (ip == (u8 *)regs->ip + cs_base)
16147 pr_cont(" <%02x>", c);
16148 else
16149 pr_cont(" %02x", c);
16150@@ -131,6 +130,7 @@ int is_valid_bugaddr(unsigned long ip)
16151 {
16152 unsigned short ud2;
16153
16154+ ip = ktla_ktva(ip);
16155 if (ip < PAGE_OFFSET)
16156 return 0;
16157 if (probe_kernel_address((unsigned short *)ip, ud2))
16158@@ -138,3 +138,15 @@ int is_valid_bugaddr(unsigned long ip)
16159
16160 return ud2 == 0x0b0f;
16161 }
16162+
16163+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
16164+void pax_check_alloca(unsigned long size)
16165+{
16166+ unsigned long sp = (unsigned long)&sp, stack_left;
16167+
16168+ /* all kernel stacks are of the same size */
16169+ stack_left = sp & (THREAD_SIZE - 1);
16170+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
16171+}
16172+EXPORT_SYMBOL(pax_check_alloca);
16173+#endif
16174diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
16175index b653675..51cc8c0 100644
16176--- a/arch/x86/kernel/dumpstack_64.c
16177+++ b/arch/x86/kernel/dumpstack_64.c
16178@@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
16179 unsigned long *irq_stack_end =
16180 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
16181 unsigned used = 0;
16182- struct thread_info *tinfo;
16183 int graph = 0;
16184 unsigned long dummy;
16185+ void *stack_start;
16186
16187 if (!task)
16188 task = current;
16189@@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
16190 * current stack address. If the stacks consist of nested
16191 * exceptions
16192 */
16193- tinfo = task_thread_info(task);
16194 for (;;) {
16195 char *id;
16196 unsigned long *estack_end;
16197+
16198 estack_end = in_exception_stack(cpu, (unsigned long)stack,
16199 &used, &id);
16200
16201@@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
16202 if (ops->stack(data, id) < 0)
16203 break;
16204
16205- bp = ops->walk_stack(tinfo, stack, bp, ops,
16206+ bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
16207 data, estack_end, &graph);
16208 ops->stack(data, "<EOE>");
16209 /*
16210@@ -161,6 +161,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
16211 * second-to-last pointer (index -2 to end) in the
16212 * exception stack:
16213 */
16214+ if ((u16)estack_end[-1] != __KERNEL_DS)
16215+ goto out;
16216 stack = (unsigned long *) estack_end[-2];
16217 continue;
16218 }
16219@@ -172,7 +174,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
16220 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
16221 if (ops->stack(data, "IRQ") < 0)
16222 break;
16223- bp = ops->walk_stack(tinfo, stack, bp,
16224+ bp = ops->walk_stack(task, irq_stack, stack, bp,
16225 ops, data, irq_stack_end, &graph);
16226 /*
16227 * We link to the next stack (which would be
16228@@ -191,7 +193,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
16229 /*
16230 * This handles the process stack:
16231 */
16232- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
16233+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
16234+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
16235+out:
16236 put_cpu();
16237 }
16238 EXPORT_SYMBOL(dump_trace);
16239@@ -249,7 +253,7 @@ void show_regs(struct pt_regs *regs)
16240 {
16241 int i;
16242 unsigned long sp;
16243- const int cpu = smp_processor_id();
16244+ const int cpu = raw_smp_processor_id();
16245 struct task_struct *cur = current;
16246
16247 sp = regs->sp;
16248@@ -304,3 +308,50 @@ int is_valid_bugaddr(unsigned long ip)
16249
16250 return ud2 == 0x0b0f;
16251 }
16252+
16253+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
16254+void pax_check_alloca(unsigned long size)
16255+{
16256+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
16257+ unsigned cpu, used;
16258+ char *id;
16259+
16260+ /* check the process stack first */
16261+ stack_start = (unsigned long)task_stack_page(current);
16262+ stack_end = stack_start + THREAD_SIZE;
16263+ if (likely(stack_start <= sp && sp < stack_end)) {
16264+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
16265+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
16266+ return;
16267+ }
16268+
16269+ cpu = get_cpu();
16270+
16271+ /* check the irq stacks */
16272+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
16273+ stack_start = stack_end - IRQ_STACK_SIZE;
16274+ if (stack_start <= sp && sp < stack_end) {
16275+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
16276+ put_cpu();
16277+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
16278+ return;
16279+ }
16280+
16281+ /* check the exception stacks */
16282+ used = 0;
16283+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
16284+ stack_start = stack_end - EXCEPTION_STKSZ;
16285+ if (stack_end && stack_start <= sp && sp < stack_end) {
16286+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
16287+ put_cpu();
16288+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
16289+ return;
16290+ }
16291+
16292+ put_cpu();
16293+
16294+ /* unknown stack */
16295+ BUG();
16296+}
16297+EXPORT_SYMBOL(pax_check_alloca);
16298+#endif
16299diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
16300index 9b9f18b..9fcaa04 100644
16301--- a/arch/x86/kernel/early_printk.c
16302+++ b/arch/x86/kernel/early_printk.c
16303@@ -7,6 +7,7 @@
16304 #include <linux/pci_regs.h>
16305 #include <linux/pci_ids.h>
16306 #include <linux/errno.h>
16307+#include <linux/sched.h>
16308 #include <asm/io.h>
16309 #include <asm/processor.h>
16310 #include <asm/fcntl.h>
16311diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
16312index cf8639b..6c6a674 100644
16313--- a/arch/x86/kernel/entry_32.S
16314+++ b/arch/x86/kernel/entry_32.S
16315@@ -177,13 +177,153 @@
16316 /*CFI_REL_OFFSET gs, PT_GS*/
16317 .endm
16318 .macro SET_KERNEL_GS reg
16319+
16320+#ifdef CONFIG_CC_STACKPROTECTOR
16321 movl $(__KERNEL_STACK_CANARY), \reg
16322+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
16323+ movl $(__USER_DS), \reg
16324+#else
16325+ xorl \reg, \reg
16326+#endif
16327+
16328 movl \reg, %gs
16329 .endm
16330
16331 #endif /* CONFIG_X86_32_LAZY_GS */
16332
16333-.macro SAVE_ALL
16334+.macro pax_enter_kernel
16335+#ifdef CONFIG_PAX_KERNEXEC
16336+ call pax_enter_kernel
16337+#endif
16338+.endm
16339+
16340+.macro pax_exit_kernel
16341+#ifdef CONFIG_PAX_KERNEXEC
16342+ call pax_exit_kernel
16343+#endif
16344+.endm
16345+
16346+#ifdef CONFIG_PAX_KERNEXEC
16347+ENTRY(pax_enter_kernel)
16348+#ifdef CONFIG_PARAVIRT
16349+ pushl %eax
16350+ pushl %ecx
16351+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
16352+ mov %eax, %esi
16353+#else
16354+ mov %cr0, %esi
16355+#endif
16356+ bts $16, %esi
16357+ jnc 1f
16358+ mov %cs, %esi
16359+ cmp $__KERNEL_CS, %esi
16360+ jz 3f
16361+ ljmp $__KERNEL_CS, $3f
16362+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
16363+2:
16364+#ifdef CONFIG_PARAVIRT
16365+ mov %esi, %eax
16366+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
16367+#else
16368+ mov %esi, %cr0
16369+#endif
16370+3:
16371+#ifdef CONFIG_PARAVIRT
16372+ popl %ecx
16373+ popl %eax
16374+#endif
16375+ ret
16376+ENDPROC(pax_enter_kernel)
16377+
16378+ENTRY(pax_exit_kernel)
16379+#ifdef CONFIG_PARAVIRT
16380+ pushl %eax
16381+ pushl %ecx
16382+#endif
16383+ mov %cs, %esi
16384+ cmp $__KERNEXEC_KERNEL_CS, %esi
16385+ jnz 2f
16386+#ifdef CONFIG_PARAVIRT
16387+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
16388+ mov %eax, %esi
16389+#else
16390+ mov %cr0, %esi
16391+#endif
16392+ btr $16, %esi
16393+ ljmp $__KERNEL_CS, $1f
16394+1:
16395+#ifdef CONFIG_PARAVIRT
16396+ mov %esi, %eax
16397+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
16398+#else
16399+ mov %esi, %cr0
16400+#endif
16401+2:
16402+#ifdef CONFIG_PARAVIRT
16403+ popl %ecx
16404+ popl %eax
16405+#endif
16406+ ret
16407+ENDPROC(pax_exit_kernel)
16408+#endif
16409+
16410+.macro pax_erase_kstack
16411+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
16412+ call pax_erase_kstack
16413+#endif
16414+.endm
16415+
16416+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
16417+/*
16418+ * ebp: thread_info
16419+ */
16420+ENTRY(pax_erase_kstack)
16421+ pushl %edi
16422+ pushl %ecx
16423+ pushl %eax
16424+
16425+ mov TI_lowest_stack(%ebp), %edi
16426+ mov $-0xBEEF, %eax
16427+ std
16428+
16429+1: mov %edi, %ecx
16430+ and $THREAD_SIZE_asm - 1, %ecx
16431+ shr $2, %ecx
16432+ repne scasl
16433+ jecxz 2f
16434+
16435+ cmp $2*16, %ecx
16436+ jc 2f
16437+
16438+ mov $2*16, %ecx
16439+ repe scasl
16440+ jecxz 2f
16441+ jne 1b
16442+
16443+2: cld
16444+ mov %esp, %ecx
16445+ sub %edi, %ecx
16446+
16447+ cmp $THREAD_SIZE_asm, %ecx
16448+ jb 3f
16449+ ud2
16450+3:
16451+
16452+ shr $2, %ecx
16453+ rep stosl
16454+
16455+ mov TI_task_thread_sp0(%ebp), %edi
16456+ sub $128, %edi
16457+ mov %edi, TI_lowest_stack(%ebp)
16458+
16459+ popl %eax
16460+ popl %ecx
16461+ popl %edi
16462+ ret
16463+ENDPROC(pax_erase_kstack)
16464+#endif
16465+
16466+.macro __SAVE_ALL _DS
16467 cld
16468 PUSH_GS
16469 pushl_cfi %fs
16470@@ -206,7 +346,7 @@
16471 CFI_REL_OFFSET ecx, 0
16472 pushl_cfi %ebx
16473 CFI_REL_OFFSET ebx, 0
16474- movl $(__USER_DS), %edx
16475+ movl $\_DS, %edx
16476 movl %edx, %ds
16477 movl %edx, %es
16478 movl $(__KERNEL_PERCPU), %edx
16479@@ -214,6 +354,15 @@
16480 SET_KERNEL_GS %edx
16481 .endm
16482
16483+.macro SAVE_ALL
16484+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
16485+ __SAVE_ALL __KERNEL_DS
16486+ pax_enter_kernel
16487+#else
16488+ __SAVE_ALL __USER_DS
16489+#endif
16490+.endm
16491+
16492 .macro RESTORE_INT_REGS
16493 popl_cfi %ebx
16494 CFI_RESTORE ebx
16495@@ -297,7 +446,7 @@ ENTRY(ret_from_fork)
16496 popfl_cfi
16497 jmp syscall_exit
16498 CFI_ENDPROC
16499-END(ret_from_fork)
16500+ENDPROC(ret_from_fork)
16501
16502 ENTRY(ret_from_kernel_thread)
16503 CFI_STARTPROC
16504@@ -344,7 +493,15 @@ ret_from_intr:
16505 andl $SEGMENT_RPL_MASK, %eax
16506 #endif
16507 cmpl $USER_RPL, %eax
16508+
16509+#ifdef CONFIG_PAX_KERNEXEC
16510+ jae resume_userspace
16511+
16512+ pax_exit_kernel
16513+ jmp resume_kernel
16514+#else
16515 jb resume_kernel # not returning to v8086 or userspace
16516+#endif
16517
16518 ENTRY(resume_userspace)
16519 LOCKDEP_SYS_EXIT
16520@@ -356,8 +513,8 @@ ENTRY(resume_userspace)
16521 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
16522 # int/exception return?
16523 jne work_pending
16524- jmp restore_all
16525-END(ret_from_exception)
16526+ jmp restore_all_pax
16527+ENDPROC(ret_from_exception)
16528
16529 #ifdef CONFIG_PREEMPT
16530 ENTRY(resume_kernel)
16531@@ -372,7 +529,7 @@ need_resched:
16532 jz restore_all
16533 call preempt_schedule_irq
16534 jmp need_resched
16535-END(resume_kernel)
16536+ENDPROC(resume_kernel)
16537 #endif
16538 CFI_ENDPROC
16539 /*
16540@@ -406,30 +563,45 @@ sysenter_past_esp:
16541 /*CFI_REL_OFFSET cs, 0*/
16542 /*
16543 * Push current_thread_info()->sysenter_return to the stack.
16544- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
16545- * pushed above; +8 corresponds to copy_thread's esp0 setting.
16546 */
16547- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
16548+ pushl_cfi $0
16549 CFI_REL_OFFSET eip, 0
16550
16551 pushl_cfi %eax
16552 SAVE_ALL
16553+ GET_THREAD_INFO(%ebp)
16554+ movl TI_sysenter_return(%ebp),%ebp
16555+ movl %ebp,PT_EIP(%esp)
16556 ENABLE_INTERRUPTS(CLBR_NONE)
16557
16558 /*
16559 * Load the potential sixth argument from user stack.
16560 * Careful about security.
16561 */
16562+ movl PT_OLDESP(%esp),%ebp
16563+
16564+#ifdef CONFIG_PAX_MEMORY_UDEREF
16565+ mov PT_OLDSS(%esp),%ds
16566+1: movl %ds:(%ebp),%ebp
16567+ push %ss
16568+ pop %ds
16569+#else
16570 cmpl $__PAGE_OFFSET-3,%ebp
16571 jae syscall_fault
16572 ASM_STAC
16573 1: movl (%ebp),%ebp
16574 ASM_CLAC
16575+#endif
16576+
16577 movl %ebp,PT_EBP(%esp)
16578 _ASM_EXTABLE(1b,syscall_fault)
16579
16580 GET_THREAD_INFO(%ebp)
16581
16582+#ifdef CONFIG_PAX_RANDKSTACK
16583+ pax_erase_kstack
16584+#endif
16585+
16586 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
16587 jnz sysenter_audit
16588 sysenter_do_call:
16589@@ -444,12 +616,24 @@ sysenter_do_call:
16590 testl $_TIF_ALLWORK_MASK, %ecx
16591 jne sysexit_audit
16592 sysenter_exit:
16593+
16594+#ifdef CONFIG_PAX_RANDKSTACK
16595+ pushl_cfi %eax
16596+ movl %esp, %eax
16597+ call pax_randomize_kstack
16598+ popl_cfi %eax
16599+#endif
16600+
16601+ pax_erase_kstack
16602+
16603 /* if something modifies registers it must also disable sysexit */
16604 movl PT_EIP(%esp), %edx
16605 movl PT_OLDESP(%esp), %ecx
16606 xorl %ebp,%ebp
16607 TRACE_IRQS_ON
16608 1: mov PT_FS(%esp), %fs
16609+2: mov PT_DS(%esp), %ds
16610+3: mov PT_ES(%esp), %es
16611 PTGS_TO_GS
16612 ENABLE_INTERRUPTS_SYSEXIT
16613
16614@@ -466,6 +650,9 @@ sysenter_audit:
16615 movl %eax,%edx /* 2nd arg: syscall number */
16616 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
16617 call __audit_syscall_entry
16618+
16619+ pax_erase_kstack
16620+
16621 pushl_cfi %ebx
16622 movl PT_EAX(%esp),%eax /* reload syscall number */
16623 jmp sysenter_do_call
16624@@ -491,10 +678,16 @@ sysexit_audit:
16625
16626 CFI_ENDPROC
16627 .pushsection .fixup,"ax"
16628-2: movl $0,PT_FS(%esp)
16629+4: movl $0,PT_FS(%esp)
16630+ jmp 1b
16631+5: movl $0,PT_DS(%esp)
16632+ jmp 1b
16633+6: movl $0,PT_ES(%esp)
16634 jmp 1b
16635 .popsection
16636- _ASM_EXTABLE(1b,2b)
16637+ _ASM_EXTABLE(1b,4b)
16638+ _ASM_EXTABLE(2b,5b)
16639+ _ASM_EXTABLE(3b,6b)
16640 PTGS_TO_GS_EX
16641 ENDPROC(ia32_sysenter_target)
16642
16643@@ -509,6 +702,11 @@ ENTRY(system_call)
16644 pushl_cfi %eax # save orig_eax
16645 SAVE_ALL
16646 GET_THREAD_INFO(%ebp)
16647+
16648+#ifdef CONFIG_PAX_RANDKSTACK
16649+ pax_erase_kstack
16650+#endif
16651+
16652 # system call tracing in operation / emulation
16653 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
16654 jnz syscall_trace_entry
16655@@ -527,6 +725,15 @@ syscall_exit:
16656 testl $_TIF_ALLWORK_MASK, %ecx # current->work
16657 jne syscall_exit_work
16658
16659+restore_all_pax:
16660+
16661+#ifdef CONFIG_PAX_RANDKSTACK
16662+ movl %esp, %eax
16663+ call pax_randomize_kstack
16664+#endif
16665+
16666+ pax_erase_kstack
16667+
16668 restore_all:
16669 TRACE_IRQS_IRET
16670 restore_all_notrace:
16671@@ -583,14 +790,34 @@ ldt_ss:
16672 * compensating for the offset by changing to the ESPFIX segment with
16673 * a base address that matches for the difference.
16674 */
16675-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
16676+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
16677 mov %esp, %edx /* load kernel esp */
16678 mov PT_OLDESP(%esp), %eax /* load userspace esp */
16679 mov %dx, %ax /* eax: new kernel esp */
16680 sub %eax, %edx /* offset (low word is 0) */
16681+#ifdef CONFIG_SMP
16682+ movl PER_CPU_VAR(cpu_number), %ebx
16683+ shll $PAGE_SHIFT_asm, %ebx
16684+ addl $cpu_gdt_table, %ebx
16685+#else
16686+ movl $cpu_gdt_table, %ebx
16687+#endif
16688 shr $16, %edx
16689- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
16690- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
16691+
16692+#ifdef CONFIG_PAX_KERNEXEC
16693+ mov %cr0, %esi
16694+ btr $16, %esi
16695+ mov %esi, %cr0
16696+#endif
16697+
16698+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
16699+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
16700+
16701+#ifdef CONFIG_PAX_KERNEXEC
16702+ bts $16, %esi
16703+ mov %esi, %cr0
16704+#endif
16705+
16706 pushl_cfi $__ESPFIX_SS
16707 pushl_cfi %eax /* new kernel esp */
16708 /* Disable interrupts, but do not irqtrace this section: we
16709@@ -619,20 +846,18 @@ work_resched:
16710 movl TI_flags(%ebp), %ecx
16711 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
16712 # than syscall tracing?
16713- jz restore_all
16714+ jz restore_all_pax
16715 testb $_TIF_NEED_RESCHED, %cl
16716 jnz work_resched
16717
16718 work_notifysig: # deal with pending signals and
16719 # notify-resume requests
16720+ movl %esp, %eax
16721 #ifdef CONFIG_VM86
16722 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
16723- movl %esp, %eax
16724 jne work_notifysig_v86 # returning to kernel-space or
16725 # vm86-space
16726 1:
16727-#else
16728- movl %esp, %eax
16729 #endif
16730 TRACE_IRQS_ON
16731 ENABLE_INTERRUPTS(CLBR_NONE)
16732@@ -653,7 +878,7 @@ work_notifysig_v86:
16733 movl %eax, %esp
16734 jmp 1b
16735 #endif
16736-END(work_pending)
16737+ENDPROC(work_pending)
16738
16739 # perform syscall exit tracing
16740 ALIGN
16741@@ -661,11 +886,14 @@ syscall_trace_entry:
16742 movl $-ENOSYS,PT_EAX(%esp)
16743 movl %esp, %eax
16744 call syscall_trace_enter
16745+
16746+ pax_erase_kstack
16747+
16748 /* What it returned is what we'll actually use. */
16749 cmpl $(NR_syscalls), %eax
16750 jnae syscall_call
16751 jmp syscall_exit
16752-END(syscall_trace_entry)
16753+ENDPROC(syscall_trace_entry)
16754
16755 # perform syscall exit tracing
16756 ALIGN
16757@@ -678,21 +906,25 @@ syscall_exit_work:
16758 movl %esp, %eax
16759 call syscall_trace_leave
16760 jmp resume_userspace
16761-END(syscall_exit_work)
16762+ENDPROC(syscall_exit_work)
16763 CFI_ENDPROC
16764
16765 RING0_INT_FRAME # can't unwind into user space anyway
16766 syscall_fault:
16767+#ifdef CONFIG_PAX_MEMORY_UDEREF
16768+ push %ss
16769+ pop %ds
16770+#endif
16771 ASM_CLAC
16772 GET_THREAD_INFO(%ebp)
16773 movl $-EFAULT,PT_EAX(%esp)
16774 jmp resume_userspace
16775-END(syscall_fault)
16776+ENDPROC(syscall_fault)
16777
16778 syscall_badsys:
16779 movl $-ENOSYS,PT_EAX(%esp)
16780 jmp resume_userspace
16781-END(syscall_badsys)
16782+ENDPROC(syscall_badsys)
16783 CFI_ENDPROC
16784 /*
16785 * End of kprobes section
16786@@ -772,8 +1004,15 @@ ENDPROC(ptregs_clone)
16787 * normal stack and adjusts ESP with the matching offset.
16788 */
16789 /* fixup the stack */
16790- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
16791- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
16792+#ifdef CONFIG_SMP
16793+ movl PER_CPU_VAR(cpu_number), %ebx
16794+ shll $PAGE_SHIFT_asm, %ebx
16795+ addl $cpu_gdt_table, %ebx
16796+#else
16797+ movl $cpu_gdt_table, %ebx
16798+#endif
16799+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
16800+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
16801 shl $16, %eax
16802 addl %esp, %eax /* the adjusted stack pointer */
16803 pushl_cfi $__KERNEL_DS
16804@@ -826,7 +1065,7 @@ vector=vector+1
16805 .endr
16806 2: jmp common_interrupt
16807 .endr
16808-END(irq_entries_start)
16809+ENDPROC(irq_entries_start)
16810
16811 .previous
16812 END(interrupt)
16813@@ -877,7 +1116,7 @@ ENTRY(coprocessor_error)
16814 pushl_cfi $do_coprocessor_error
16815 jmp error_code
16816 CFI_ENDPROC
16817-END(coprocessor_error)
16818+ENDPROC(coprocessor_error)
16819
16820 ENTRY(simd_coprocessor_error)
16821 RING0_INT_FRAME
16822@@ -899,7 +1138,7 @@ ENTRY(simd_coprocessor_error)
16823 #endif
16824 jmp error_code
16825 CFI_ENDPROC
16826-END(simd_coprocessor_error)
16827+ENDPROC(simd_coprocessor_error)
16828
16829 ENTRY(device_not_available)
16830 RING0_INT_FRAME
16831@@ -908,18 +1147,18 @@ ENTRY(device_not_available)
16832 pushl_cfi $do_device_not_available
16833 jmp error_code
16834 CFI_ENDPROC
16835-END(device_not_available)
16836+ENDPROC(device_not_available)
16837
16838 #ifdef CONFIG_PARAVIRT
16839 ENTRY(native_iret)
16840 iret
16841 _ASM_EXTABLE(native_iret, iret_exc)
16842-END(native_iret)
16843+ENDPROC(native_iret)
16844
16845 ENTRY(native_irq_enable_sysexit)
16846 sti
16847 sysexit
16848-END(native_irq_enable_sysexit)
16849+ENDPROC(native_irq_enable_sysexit)
16850 #endif
16851
16852 ENTRY(overflow)
16853@@ -929,7 +1168,7 @@ ENTRY(overflow)
16854 pushl_cfi $do_overflow
16855 jmp error_code
16856 CFI_ENDPROC
16857-END(overflow)
16858+ENDPROC(overflow)
16859
16860 ENTRY(bounds)
16861 RING0_INT_FRAME
16862@@ -938,7 +1177,7 @@ ENTRY(bounds)
16863 pushl_cfi $do_bounds
16864 jmp error_code
16865 CFI_ENDPROC
16866-END(bounds)
16867+ENDPROC(bounds)
16868
16869 ENTRY(invalid_op)
16870 RING0_INT_FRAME
16871@@ -947,7 +1186,7 @@ ENTRY(invalid_op)
16872 pushl_cfi $do_invalid_op
16873 jmp error_code
16874 CFI_ENDPROC
16875-END(invalid_op)
16876+ENDPROC(invalid_op)
16877
16878 ENTRY(coprocessor_segment_overrun)
16879 RING0_INT_FRAME
16880@@ -956,7 +1195,7 @@ ENTRY(coprocessor_segment_overrun)
16881 pushl_cfi $do_coprocessor_segment_overrun
16882 jmp error_code
16883 CFI_ENDPROC
16884-END(coprocessor_segment_overrun)
16885+ENDPROC(coprocessor_segment_overrun)
16886
16887 ENTRY(invalid_TSS)
16888 RING0_EC_FRAME
16889@@ -964,7 +1203,7 @@ ENTRY(invalid_TSS)
16890 pushl_cfi $do_invalid_TSS
16891 jmp error_code
16892 CFI_ENDPROC
16893-END(invalid_TSS)
16894+ENDPROC(invalid_TSS)
16895
16896 ENTRY(segment_not_present)
16897 RING0_EC_FRAME
16898@@ -972,7 +1211,7 @@ ENTRY(segment_not_present)
16899 pushl_cfi $do_segment_not_present
16900 jmp error_code
16901 CFI_ENDPROC
16902-END(segment_not_present)
16903+ENDPROC(segment_not_present)
16904
16905 ENTRY(stack_segment)
16906 RING0_EC_FRAME
16907@@ -980,7 +1219,7 @@ ENTRY(stack_segment)
16908 pushl_cfi $do_stack_segment
16909 jmp error_code
16910 CFI_ENDPROC
16911-END(stack_segment)
16912+ENDPROC(stack_segment)
16913
16914 ENTRY(alignment_check)
16915 RING0_EC_FRAME
16916@@ -988,7 +1227,7 @@ ENTRY(alignment_check)
16917 pushl_cfi $do_alignment_check
16918 jmp error_code
16919 CFI_ENDPROC
16920-END(alignment_check)
16921+ENDPROC(alignment_check)
16922
16923 ENTRY(divide_error)
16924 RING0_INT_FRAME
16925@@ -997,7 +1236,7 @@ ENTRY(divide_error)
16926 pushl_cfi $do_divide_error
16927 jmp error_code
16928 CFI_ENDPROC
16929-END(divide_error)
16930+ENDPROC(divide_error)
16931
16932 #ifdef CONFIG_X86_MCE
16933 ENTRY(machine_check)
16934@@ -1007,7 +1246,7 @@ ENTRY(machine_check)
16935 pushl_cfi machine_check_vector
16936 jmp error_code
16937 CFI_ENDPROC
16938-END(machine_check)
16939+ENDPROC(machine_check)
16940 #endif
16941
16942 ENTRY(spurious_interrupt_bug)
16943@@ -1017,7 +1256,7 @@ ENTRY(spurious_interrupt_bug)
16944 pushl_cfi $do_spurious_interrupt_bug
16945 jmp error_code
16946 CFI_ENDPROC
16947-END(spurious_interrupt_bug)
16948+ENDPROC(spurious_interrupt_bug)
16949 /*
16950 * End of kprobes section
16951 */
16952@@ -1120,7 +1359,7 @@ BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK,
16953
16954 ENTRY(mcount)
16955 ret
16956-END(mcount)
16957+ENDPROC(mcount)
16958
16959 ENTRY(ftrace_caller)
16960 cmpl $0, function_trace_stop
16961@@ -1153,7 +1392,7 @@ ftrace_graph_call:
16962 .globl ftrace_stub
16963 ftrace_stub:
16964 ret
16965-END(ftrace_caller)
16966+ENDPROC(ftrace_caller)
16967
16968 ENTRY(ftrace_regs_caller)
16969 pushf /* push flags before compare (in cs location) */
16970@@ -1254,7 +1493,7 @@ trace:
16971 popl %ecx
16972 popl %eax
16973 jmp ftrace_stub
16974-END(mcount)
16975+ENDPROC(mcount)
16976 #endif /* CONFIG_DYNAMIC_FTRACE */
16977 #endif /* CONFIG_FUNCTION_TRACER */
16978
16979@@ -1272,7 +1511,7 @@ ENTRY(ftrace_graph_caller)
16980 popl %ecx
16981 popl %eax
16982 ret
16983-END(ftrace_graph_caller)
16984+ENDPROC(ftrace_graph_caller)
16985
16986 .globl return_to_handler
16987 return_to_handler:
16988@@ -1328,15 +1567,18 @@ error_code:
16989 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
16990 REG_TO_PTGS %ecx
16991 SET_KERNEL_GS %ecx
16992- movl $(__USER_DS), %ecx
16993+ movl $(__KERNEL_DS), %ecx
16994 movl %ecx, %ds
16995 movl %ecx, %es
16996+
16997+ pax_enter_kernel
16998+
16999 TRACE_IRQS_OFF
17000 movl %esp,%eax # pt_regs pointer
17001 call *%edi
17002 jmp ret_from_exception
17003 CFI_ENDPROC
17004-END(page_fault)
17005+ENDPROC(page_fault)
17006
17007 /*
17008 * Debug traps and NMI can happen at the one SYSENTER instruction
17009@@ -1379,7 +1621,7 @@ debug_stack_correct:
17010 call do_debug
17011 jmp ret_from_exception
17012 CFI_ENDPROC
17013-END(debug)
17014+ENDPROC(debug)
17015
17016 /*
17017 * NMI is doubly nasty. It can happen _while_ we're handling
17018@@ -1417,6 +1659,9 @@ nmi_stack_correct:
17019 xorl %edx,%edx # zero error code
17020 movl %esp,%eax # pt_regs pointer
17021 call do_nmi
17022+
17023+ pax_exit_kernel
17024+
17025 jmp restore_all_notrace
17026 CFI_ENDPROC
17027
17028@@ -1453,12 +1698,15 @@ nmi_espfix_stack:
17029 FIXUP_ESPFIX_STACK # %eax == %esp
17030 xorl %edx,%edx # zero error code
17031 call do_nmi
17032+
17033+ pax_exit_kernel
17034+
17035 RESTORE_REGS
17036 lss 12+4(%esp), %esp # back to espfix stack
17037 CFI_ADJUST_CFA_OFFSET -24
17038 jmp irq_return
17039 CFI_ENDPROC
17040-END(nmi)
17041+ENDPROC(nmi)
17042
17043 ENTRY(int3)
17044 RING0_INT_FRAME
17045@@ -1471,14 +1719,14 @@ ENTRY(int3)
17046 call do_int3
17047 jmp ret_from_exception
17048 CFI_ENDPROC
17049-END(int3)
17050+ENDPROC(int3)
17051
17052 ENTRY(general_protection)
17053 RING0_EC_FRAME
17054 pushl_cfi $do_general_protection
17055 jmp error_code
17056 CFI_ENDPROC
17057-END(general_protection)
17058+ENDPROC(general_protection)
17059
17060 #ifdef CONFIG_KVM_GUEST
17061 ENTRY(async_page_fault)
17062@@ -1487,7 +1735,7 @@ ENTRY(async_page_fault)
17063 pushl_cfi $do_async_page_fault
17064 jmp error_code
17065 CFI_ENDPROC
17066-END(async_page_fault)
17067+ENDPROC(async_page_fault)
17068 #endif
17069
17070 /*
17071diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
17072index 1328fe4..cb03298 100644
17073--- a/arch/x86/kernel/entry_64.S
17074+++ b/arch/x86/kernel/entry_64.S
17075@@ -59,6 +59,8 @@
17076 #include <asm/rcu.h>
17077 #include <asm/smap.h>
17078 #include <linux/err.h>
17079+#include <asm/pgtable.h>
17080+#include <asm/alternative-asm.h>
17081
17082 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
17083 #include <linux/elf-em.h>
17084@@ -80,8 +82,9 @@
17085 #ifdef CONFIG_DYNAMIC_FTRACE
17086
17087 ENTRY(function_hook)
17088+ pax_force_retaddr
17089 retq
17090-END(function_hook)
17091+ENDPROC(function_hook)
17092
17093 /* skip is set if stack has been adjusted */
17094 .macro ftrace_caller_setup skip=0
17095@@ -122,8 +125,9 @@ GLOBAL(ftrace_graph_call)
17096 #endif
17097
17098 GLOBAL(ftrace_stub)
17099+ pax_force_retaddr
17100 retq
17101-END(ftrace_caller)
17102+ENDPROC(ftrace_caller)
17103
17104 ENTRY(ftrace_regs_caller)
17105 /* Save the current flags before compare (in SS location)*/
17106@@ -191,7 +195,7 @@ ftrace_restore_flags:
17107 popfq
17108 jmp ftrace_stub
17109
17110-END(ftrace_regs_caller)
17111+ENDPROC(ftrace_regs_caller)
17112
17113
17114 #else /* ! CONFIG_DYNAMIC_FTRACE */
17115@@ -212,6 +216,7 @@ ENTRY(function_hook)
17116 #endif
17117
17118 GLOBAL(ftrace_stub)
17119+ pax_force_retaddr
17120 retq
17121
17122 trace:
17123@@ -225,12 +230,13 @@ trace:
17124 #endif
17125 subq $MCOUNT_INSN_SIZE, %rdi
17126
17127+ pax_force_fptr ftrace_trace_function
17128 call *ftrace_trace_function
17129
17130 MCOUNT_RESTORE_FRAME
17131
17132 jmp ftrace_stub
17133-END(function_hook)
17134+ENDPROC(function_hook)
17135 #endif /* CONFIG_DYNAMIC_FTRACE */
17136 #endif /* CONFIG_FUNCTION_TRACER */
17137
17138@@ -252,8 +258,9 @@ ENTRY(ftrace_graph_caller)
17139
17140 MCOUNT_RESTORE_FRAME
17141
17142+ pax_force_retaddr
17143 retq
17144-END(ftrace_graph_caller)
17145+ENDPROC(ftrace_graph_caller)
17146
17147 GLOBAL(return_to_handler)
17148 subq $24, %rsp
17149@@ -269,7 +276,9 @@ GLOBAL(return_to_handler)
17150 movq 8(%rsp), %rdx
17151 movq (%rsp), %rax
17152 addq $24, %rsp
17153+ pax_force_fptr %rdi
17154 jmp *%rdi
17155+ENDPROC(return_to_handler)
17156 #endif
17157
17158
17159@@ -284,6 +293,273 @@ ENTRY(native_usergs_sysret64)
17160 ENDPROC(native_usergs_sysret64)
17161 #endif /* CONFIG_PARAVIRT */
17162
17163+ .macro ljmpq sel, off
17164+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
17165+ .byte 0x48; ljmp *1234f(%rip)
17166+ .pushsection .rodata
17167+ .align 16
17168+ 1234: .quad \off; .word \sel
17169+ .popsection
17170+#else
17171+ pushq $\sel
17172+ pushq $\off
17173+ lretq
17174+#endif
17175+ .endm
17176+
17177+ .macro pax_enter_kernel
17178+ pax_set_fptr_mask
17179+#ifdef CONFIG_PAX_KERNEXEC
17180+ call pax_enter_kernel
17181+#endif
17182+ .endm
17183+
17184+ .macro pax_exit_kernel
17185+#ifdef CONFIG_PAX_KERNEXEC
17186+ call pax_exit_kernel
17187+#endif
17188+ .endm
17189+
17190+#ifdef CONFIG_PAX_KERNEXEC
17191+ENTRY(pax_enter_kernel)
17192+ pushq %rdi
17193+
17194+#ifdef CONFIG_PARAVIRT
17195+ PV_SAVE_REGS(CLBR_RDI)
17196+#endif
17197+
17198+ GET_CR0_INTO_RDI
17199+ bts $16,%rdi
17200+ jnc 3f
17201+ mov %cs,%edi
17202+ cmp $__KERNEL_CS,%edi
17203+ jnz 2f
17204+1:
17205+
17206+#ifdef CONFIG_PARAVIRT
17207+ PV_RESTORE_REGS(CLBR_RDI)
17208+#endif
17209+
17210+ popq %rdi
17211+ pax_force_retaddr
17212+ retq
17213+
17214+2: ljmpq __KERNEL_CS,1f
17215+3: ljmpq __KERNEXEC_KERNEL_CS,4f
17216+4: SET_RDI_INTO_CR0
17217+ jmp 1b
17218+ENDPROC(pax_enter_kernel)
17219+
17220+ENTRY(pax_exit_kernel)
17221+ pushq %rdi
17222+
17223+#ifdef CONFIG_PARAVIRT
17224+ PV_SAVE_REGS(CLBR_RDI)
17225+#endif
17226+
17227+ mov %cs,%rdi
17228+ cmp $__KERNEXEC_KERNEL_CS,%edi
17229+ jz 2f
17230+1:
17231+
17232+#ifdef CONFIG_PARAVIRT
17233+ PV_RESTORE_REGS(CLBR_RDI);
17234+#endif
17235+
17236+ popq %rdi
17237+ pax_force_retaddr
17238+ retq
17239+
17240+2: GET_CR0_INTO_RDI
17241+ btr $16,%rdi
17242+ ljmpq __KERNEL_CS,3f
17243+3: SET_RDI_INTO_CR0
17244+ jmp 1b
17245+ENDPROC(pax_exit_kernel)
17246+#endif
17247+
17248+ .macro pax_enter_kernel_user
17249+ pax_set_fptr_mask
17250+#ifdef CONFIG_PAX_MEMORY_UDEREF
17251+ call pax_enter_kernel_user
17252+#endif
17253+ .endm
17254+
17255+ .macro pax_exit_kernel_user
17256+#ifdef CONFIG_PAX_MEMORY_UDEREF
17257+ call pax_exit_kernel_user
17258+#endif
17259+#ifdef CONFIG_PAX_RANDKSTACK
17260+ pushq %rax
17261+ call pax_randomize_kstack
17262+ popq %rax
17263+#endif
17264+ .endm
17265+
17266+#ifdef CONFIG_PAX_MEMORY_UDEREF
17267+ENTRY(pax_enter_kernel_user)
17268+ pushq %rdi
17269+ pushq %rbx
17270+
17271+#ifdef CONFIG_PARAVIRT
17272+ PV_SAVE_REGS(CLBR_RDI)
17273+#endif
17274+
17275+ GET_CR3_INTO_RDI
17276+ mov %rdi,%rbx
17277+ add $__START_KERNEL_map,%rbx
17278+ sub phys_base(%rip),%rbx
17279+
17280+#ifdef CONFIG_PARAVIRT
17281+ pushq %rdi
17282+ cmpl $0, pv_info+PARAVIRT_enabled
17283+ jz 1f
17284+ i = 0
17285+ .rept USER_PGD_PTRS
17286+ mov i*8(%rbx),%rsi
17287+ mov $0,%sil
17288+ lea i*8(%rbx),%rdi
17289+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
17290+ i = i + 1
17291+ .endr
17292+ jmp 2f
17293+1:
17294+#endif
17295+
17296+ i = 0
17297+ .rept USER_PGD_PTRS
17298+ movb $0,i*8(%rbx)
17299+ i = i + 1
17300+ .endr
17301+
17302+#ifdef CONFIG_PARAVIRT
17303+2: popq %rdi
17304+#endif
17305+ SET_RDI_INTO_CR3
17306+
17307+#ifdef CONFIG_PAX_KERNEXEC
17308+ GET_CR0_INTO_RDI
17309+ bts $16,%rdi
17310+ SET_RDI_INTO_CR0
17311+#endif
17312+
17313+#ifdef CONFIG_PARAVIRT
17314+ PV_RESTORE_REGS(CLBR_RDI)
17315+#endif
17316+
17317+ popq %rbx
17318+ popq %rdi
17319+ pax_force_retaddr
17320+ retq
17321+ENDPROC(pax_enter_kernel_user)
17322+
17323+ENTRY(pax_exit_kernel_user)
17324+ push %rdi
17325+
17326+#ifdef CONFIG_PARAVIRT
17327+ pushq %rbx
17328+ PV_SAVE_REGS(CLBR_RDI)
17329+#endif
17330+
17331+#ifdef CONFIG_PAX_KERNEXEC
17332+ GET_CR0_INTO_RDI
17333+ btr $16,%rdi
17334+ SET_RDI_INTO_CR0
17335+#endif
17336+
17337+ GET_CR3_INTO_RDI
17338+ add $__START_KERNEL_map,%rdi
17339+ sub phys_base(%rip),%rdi
17340+
17341+#ifdef CONFIG_PARAVIRT
17342+ cmpl $0, pv_info+PARAVIRT_enabled
17343+ jz 1f
17344+ mov %rdi,%rbx
17345+ i = 0
17346+ .rept USER_PGD_PTRS
17347+ mov i*8(%rbx),%rsi
17348+ mov $0x67,%sil
17349+ lea i*8(%rbx),%rdi
17350+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
17351+ i = i + 1
17352+ .endr
17353+ jmp 2f
17354+1:
17355+#endif
17356+
17357+ i = 0
17358+ .rept USER_PGD_PTRS
17359+ movb $0x67,i*8(%rdi)
17360+ i = i + 1
17361+ .endr
17362+
17363+#ifdef CONFIG_PARAVIRT
17364+2: PV_RESTORE_REGS(CLBR_RDI)
17365+ popq %rbx
17366+#endif
17367+
17368+ popq %rdi
17369+ pax_force_retaddr
17370+ retq
17371+ENDPROC(pax_exit_kernel_user)
17372+#endif
17373+
17374+.macro pax_erase_kstack
17375+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
17376+ call pax_erase_kstack
17377+#endif
17378+.endm
17379+
17380+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
17381+ENTRY(pax_erase_kstack)
17382+ pushq %rdi
17383+ pushq %rcx
17384+ pushq %rax
17385+ pushq %r11
17386+
17387+ GET_THREAD_INFO(%r11)
17388+ mov TI_lowest_stack(%r11), %rdi
17389+ mov $-0xBEEF, %rax
17390+ std
17391+
17392+1: mov %edi, %ecx
17393+ and $THREAD_SIZE_asm - 1, %ecx
17394+ shr $3, %ecx
17395+ repne scasq
17396+ jecxz 2f
17397+
17398+ cmp $2*8, %ecx
17399+ jc 2f
17400+
17401+ mov $2*8, %ecx
17402+ repe scasq
17403+ jecxz 2f
17404+ jne 1b
17405+
17406+2: cld
17407+ mov %esp, %ecx
17408+ sub %edi, %ecx
17409+
17410+ cmp $THREAD_SIZE_asm, %rcx
17411+ jb 3f
17412+ ud2
17413+3:
17414+
17415+ shr $3, %ecx
17416+ rep stosq
17417+
17418+ mov TI_task_thread_sp0(%r11), %rdi
17419+ sub $256, %rdi
17420+ mov %rdi, TI_lowest_stack(%r11)
17421+
17422+ popq %r11
17423+ popq %rax
17424+ popq %rcx
17425+ popq %rdi
17426+ pax_force_retaddr
17427+ ret
17428+ENDPROC(pax_erase_kstack)
17429+#endif
17430
17431 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
17432 #ifdef CONFIG_TRACE_IRQFLAGS
17433@@ -375,8 +651,8 @@ ENDPROC(native_usergs_sysret64)
17434 .endm
17435
17436 .macro UNFAKE_STACK_FRAME
17437- addq $8*6, %rsp
17438- CFI_ADJUST_CFA_OFFSET -(6*8)
17439+ addq $8*6 + ARG_SKIP, %rsp
17440+ CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
17441 .endm
17442
17443 /*
17444@@ -463,7 +739,7 @@ ENDPROC(native_usergs_sysret64)
17445 movq %rsp, %rsi
17446
17447 leaq -RBP(%rsp),%rdi /* arg1 for handler */
17448- testl $3, CS-RBP(%rsi)
17449+ testb $3, CS-RBP(%rsi)
17450 je 1f
17451 SWAPGS
17452 /*
17453@@ -498,9 +774,10 @@ ENTRY(save_rest)
17454 movq_cfi r15, R15+16
17455 movq %r11, 8(%rsp) /* return address */
17456 FIXUP_TOP_OF_STACK %r11, 16
17457+ pax_force_retaddr
17458 ret
17459 CFI_ENDPROC
17460-END(save_rest)
17461+ENDPROC(save_rest)
17462
17463 /* save complete stack frame */
17464 .pushsection .kprobes.text, "ax"
17465@@ -529,9 +806,10 @@ ENTRY(save_paranoid)
17466 js 1f /* negative -> in kernel */
17467 SWAPGS
17468 xorl %ebx,%ebx
17469-1: ret
17470+1: pax_force_retaddr_bts
17471+ ret
17472 CFI_ENDPROC
17473-END(save_paranoid)
17474+ENDPROC(save_paranoid)
17475 .popsection
17476
17477 /*
17478@@ -553,7 +831,7 @@ ENTRY(ret_from_fork)
17479
17480 RESTORE_REST
17481
17482- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
17483+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
17484 jz 1f
17485
17486 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
17487@@ -571,7 +849,7 @@ ENTRY(ret_from_fork)
17488 RESTORE_REST
17489 jmp int_ret_from_sys_call
17490 CFI_ENDPROC
17491-END(ret_from_fork)
17492+ENDPROC(ret_from_fork)
17493
17494 /*
17495 * System call entry. Up to 6 arguments in registers are supported.
17496@@ -608,7 +886,7 @@ END(ret_from_fork)
17497 ENTRY(system_call)
17498 CFI_STARTPROC simple
17499 CFI_SIGNAL_FRAME
17500- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
17501+ CFI_DEF_CFA rsp,0
17502 CFI_REGISTER rip,rcx
17503 /*CFI_REGISTER rflags,r11*/
17504 SWAPGS_UNSAFE_STACK
17505@@ -621,16 +899,23 @@ GLOBAL(system_call_after_swapgs)
17506
17507 movq %rsp,PER_CPU_VAR(old_rsp)
17508 movq PER_CPU_VAR(kernel_stack),%rsp
17509+ SAVE_ARGS 8*6,0
17510+ pax_enter_kernel_user
17511+
17512+#ifdef CONFIG_PAX_RANDKSTACK
17513+ pax_erase_kstack
17514+#endif
17515+
17516 /*
17517 * No need to follow this irqs off/on section - it's straight
17518 * and short:
17519 */
17520 ENABLE_INTERRUPTS(CLBR_NONE)
17521- SAVE_ARGS 8,0
17522 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
17523 movq %rcx,RIP-ARGOFFSET(%rsp)
17524 CFI_REL_OFFSET rip,RIP-ARGOFFSET
17525- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
17526+ GET_THREAD_INFO(%rcx)
17527+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
17528 jnz tracesys
17529 system_call_fastpath:
17530 #if __SYSCALL_MASK == ~0
17531@@ -640,7 +925,7 @@ system_call_fastpath:
17532 cmpl $__NR_syscall_max,%eax
17533 #endif
17534 ja badsys
17535- movq %r10,%rcx
17536+ movq R10-ARGOFFSET(%rsp),%rcx
17537 call *sys_call_table(,%rax,8) # XXX: rip relative
17538 movq %rax,RAX-ARGOFFSET(%rsp)
17539 /*
17540@@ -654,10 +939,13 @@ sysret_check:
17541 LOCKDEP_SYS_EXIT
17542 DISABLE_INTERRUPTS(CLBR_NONE)
17543 TRACE_IRQS_OFF
17544- movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx
17545+ GET_THREAD_INFO(%rcx)
17546+ movl TI_flags(%rcx),%edx
17547 andl %edi,%edx
17548 jnz sysret_careful
17549 CFI_REMEMBER_STATE
17550+ pax_exit_kernel_user
17551+ pax_erase_kstack
17552 /*
17553 * sysretq will re-enable interrupts:
17554 */
17555@@ -709,14 +997,18 @@ badsys:
17556 * jump back to the normal fast path.
17557 */
17558 auditsys:
17559- movq %r10,%r9 /* 6th arg: 4th syscall arg */
17560+ movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
17561 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
17562 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
17563 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
17564 movq %rax,%rsi /* 2nd arg: syscall number */
17565 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
17566 call __audit_syscall_entry
17567+
17568+ pax_erase_kstack
17569+
17570 LOAD_ARGS 0 /* reload call-clobbered registers */
17571+ pax_set_fptr_mask
17572 jmp system_call_fastpath
17573
17574 /*
17575@@ -737,7 +1029,7 @@ sysret_audit:
17576 /* Do syscall tracing */
17577 tracesys:
17578 #ifdef CONFIG_AUDITSYSCALL
17579- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
17580+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
17581 jz auditsys
17582 #endif
17583 SAVE_REST
17584@@ -745,12 +1037,16 @@ tracesys:
17585 FIXUP_TOP_OF_STACK %rdi
17586 movq %rsp,%rdi
17587 call syscall_trace_enter
17588+
17589+ pax_erase_kstack
17590+
17591 /*
17592 * Reload arg registers from stack in case ptrace changed them.
17593 * We don't reload %rax because syscall_trace_enter() returned
17594 * the value it wants us to use in the table lookup.
17595 */
17596 LOAD_ARGS ARGOFFSET, 1
17597+ pax_set_fptr_mask
17598 RESTORE_REST
17599 #if __SYSCALL_MASK == ~0
17600 cmpq $__NR_syscall_max,%rax
17601@@ -759,7 +1055,7 @@ tracesys:
17602 cmpl $__NR_syscall_max,%eax
17603 #endif
17604 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
17605- movq %r10,%rcx /* fixup for C */
17606+ movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
17607 call *sys_call_table(,%rax,8)
17608 movq %rax,RAX-ARGOFFSET(%rsp)
17609 /* Use IRET because user could have changed frame */
17610@@ -780,7 +1076,9 @@ GLOBAL(int_with_check)
17611 andl %edi,%edx
17612 jnz int_careful
17613 andl $~TS_COMPAT,TI_status(%rcx)
17614- jmp retint_swapgs
17615+ pax_exit_kernel_user
17616+ pax_erase_kstack
17617+ jmp retint_swapgs_pax
17618
17619 /* Either reschedule or signal or syscall exit tracking needed. */
17620 /* First do a reschedule test. */
17621@@ -826,7 +1124,7 @@ int_restore_rest:
17622 TRACE_IRQS_OFF
17623 jmp int_with_check
17624 CFI_ENDPROC
17625-END(system_call)
17626+ENDPROC(system_call)
17627
17628 /*
17629 * Certain special system calls that need to save a complete full stack frame.
17630@@ -842,7 +1140,7 @@ ENTRY(\label)
17631 call \func
17632 jmp ptregscall_common
17633 CFI_ENDPROC
17634-END(\label)
17635+ENDPROC(\label)
17636 .endm
17637
17638 PTREGSCALL stub_clone, sys_clone, %r8
17639@@ -860,9 +1158,10 @@ ENTRY(ptregscall_common)
17640 movq_cfi_restore R12+8, r12
17641 movq_cfi_restore RBP+8, rbp
17642 movq_cfi_restore RBX+8, rbx
17643+ pax_force_retaddr
17644 ret $REST_SKIP /* pop extended registers */
17645 CFI_ENDPROC
17646-END(ptregscall_common)
17647+ENDPROC(ptregscall_common)
17648
17649 ENTRY(stub_execve)
17650 CFI_STARTPROC
17651@@ -876,7 +1175,7 @@ ENTRY(stub_execve)
17652 RESTORE_REST
17653 jmp int_ret_from_sys_call
17654 CFI_ENDPROC
17655-END(stub_execve)
17656+ENDPROC(stub_execve)
17657
17658 /*
17659 * sigreturn is special because it needs to restore all registers on return.
17660@@ -894,7 +1193,7 @@ ENTRY(stub_rt_sigreturn)
17661 RESTORE_REST
17662 jmp int_ret_from_sys_call
17663 CFI_ENDPROC
17664-END(stub_rt_sigreturn)
17665+ENDPROC(stub_rt_sigreturn)
17666
17667 #ifdef CONFIG_X86_X32_ABI
17668 PTREGSCALL stub_x32_sigaltstack, sys32_sigaltstack, %rdx
17669@@ -962,7 +1261,7 @@ vector=vector+1
17670 2: jmp common_interrupt
17671 .endr
17672 CFI_ENDPROC
17673-END(irq_entries_start)
17674+ENDPROC(irq_entries_start)
17675
17676 .previous
17677 END(interrupt)
17678@@ -982,6 +1281,16 @@ END(interrupt)
17679 subq $ORIG_RAX-RBP, %rsp
17680 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
17681 SAVE_ARGS_IRQ
17682+#ifdef CONFIG_PAX_MEMORY_UDEREF
17683+ testb $3, CS(%rdi)
17684+ jnz 1f
17685+ pax_enter_kernel
17686+ jmp 2f
17687+1: pax_enter_kernel_user
17688+2:
17689+#else
17690+ pax_enter_kernel
17691+#endif
17692 call \func
17693 .endm
17694
17695@@ -1014,7 +1323,7 @@ ret_from_intr:
17696
17697 exit_intr:
17698 GET_THREAD_INFO(%rcx)
17699- testl $3,CS-ARGOFFSET(%rsp)
17700+ testb $3,CS-ARGOFFSET(%rsp)
17701 je retint_kernel
17702
17703 /* Interrupt came from user space */
17704@@ -1036,12 +1345,16 @@ retint_swapgs: /* return to user-space */
17705 * The iretq could re-enable interrupts:
17706 */
17707 DISABLE_INTERRUPTS(CLBR_ANY)
17708+ pax_exit_kernel_user
17709+retint_swapgs_pax:
17710 TRACE_IRQS_IRETQ
17711 SWAPGS
17712 jmp restore_args
17713
17714 retint_restore_args: /* return to kernel space */
17715 DISABLE_INTERRUPTS(CLBR_ANY)
17716+ pax_exit_kernel
17717+ pax_force_retaddr (RIP-ARGOFFSET)
17718 /*
17719 * The iretq could re-enable interrupts:
17720 */
17721@@ -1124,7 +1437,7 @@ ENTRY(retint_kernel)
17722 #endif
17723
17724 CFI_ENDPROC
17725-END(common_interrupt)
17726+ENDPROC(common_interrupt)
17727 /*
17728 * End of kprobes section
17729 */
17730@@ -1142,7 +1455,7 @@ ENTRY(\sym)
17731 interrupt \do_sym
17732 jmp ret_from_intr
17733 CFI_ENDPROC
17734-END(\sym)
17735+ENDPROC(\sym)
17736 .endm
17737
17738 #ifdef CONFIG_SMP
17739@@ -1198,12 +1511,22 @@ ENTRY(\sym)
17740 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
17741 call error_entry
17742 DEFAULT_FRAME 0
17743+#ifdef CONFIG_PAX_MEMORY_UDEREF
17744+ testb $3, CS(%rsp)
17745+ jnz 1f
17746+ pax_enter_kernel
17747+ jmp 2f
17748+1: pax_enter_kernel_user
17749+2:
17750+#else
17751+ pax_enter_kernel
17752+#endif
17753 movq %rsp,%rdi /* pt_regs pointer */
17754 xorl %esi,%esi /* no error code */
17755 call \do_sym
17756 jmp error_exit /* %ebx: no swapgs flag */
17757 CFI_ENDPROC
17758-END(\sym)
17759+ENDPROC(\sym)
17760 .endm
17761
17762 .macro paranoidzeroentry sym do_sym
17763@@ -1216,15 +1539,25 @@ ENTRY(\sym)
17764 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
17765 call save_paranoid
17766 TRACE_IRQS_OFF
17767+#ifdef CONFIG_PAX_MEMORY_UDEREF
17768+ testb $3, CS(%rsp)
17769+ jnz 1f
17770+ pax_enter_kernel
17771+ jmp 2f
17772+1: pax_enter_kernel_user
17773+2:
17774+#else
17775+ pax_enter_kernel
17776+#endif
17777 movq %rsp,%rdi /* pt_regs pointer */
17778 xorl %esi,%esi /* no error code */
17779 call \do_sym
17780 jmp paranoid_exit /* %ebx: no swapgs flag */
17781 CFI_ENDPROC
17782-END(\sym)
17783+ENDPROC(\sym)
17784 .endm
17785
17786-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
17787+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
17788 .macro paranoidzeroentry_ist sym do_sym ist
17789 ENTRY(\sym)
17790 INTR_FRAME
17791@@ -1235,14 +1568,30 @@ ENTRY(\sym)
17792 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
17793 call save_paranoid
17794 TRACE_IRQS_OFF_DEBUG
17795+#ifdef CONFIG_PAX_MEMORY_UDEREF
17796+ testb $3, CS(%rsp)
17797+ jnz 1f
17798+ pax_enter_kernel
17799+ jmp 2f
17800+1: pax_enter_kernel_user
17801+2:
17802+#else
17803+ pax_enter_kernel
17804+#endif
17805 movq %rsp,%rdi /* pt_regs pointer */
17806 xorl %esi,%esi /* no error code */
17807+#ifdef CONFIG_SMP
17808+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
17809+ lea init_tss(%r12), %r12
17810+#else
17811+ lea init_tss(%rip), %r12
17812+#endif
17813 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
17814 call \do_sym
17815 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
17816 jmp paranoid_exit /* %ebx: no swapgs flag */
17817 CFI_ENDPROC
17818-END(\sym)
17819+ENDPROC(\sym)
17820 .endm
17821
17822 .macro errorentry sym do_sym
17823@@ -1254,13 +1603,23 @@ ENTRY(\sym)
17824 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
17825 call error_entry
17826 DEFAULT_FRAME 0
17827+#ifdef CONFIG_PAX_MEMORY_UDEREF
17828+ testb $3, CS(%rsp)
17829+ jnz 1f
17830+ pax_enter_kernel
17831+ jmp 2f
17832+1: pax_enter_kernel_user
17833+2:
17834+#else
17835+ pax_enter_kernel
17836+#endif
17837 movq %rsp,%rdi /* pt_regs pointer */
17838 movq ORIG_RAX(%rsp),%rsi /* get error code */
17839 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
17840 call \do_sym
17841 jmp error_exit /* %ebx: no swapgs flag */
17842 CFI_ENDPROC
17843-END(\sym)
17844+ENDPROC(\sym)
17845 .endm
17846
17847 /* error code is on the stack already */
17848@@ -1274,13 +1633,23 @@ ENTRY(\sym)
17849 call save_paranoid
17850 DEFAULT_FRAME 0
17851 TRACE_IRQS_OFF
17852+#ifdef CONFIG_PAX_MEMORY_UDEREF
17853+ testb $3, CS(%rsp)
17854+ jnz 1f
17855+ pax_enter_kernel
17856+ jmp 2f
17857+1: pax_enter_kernel_user
17858+2:
17859+#else
17860+ pax_enter_kernel
17861+#endif
17862 movq %rsp,%rdi /* pt_regs pointer */
17863 movq ORIG_RAX(%rsp),%rsi /* get error code */
17864 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
17865 call \do_sym
17866 jmp paranoid_exit /* %ebx: no swapgs flag */
17867 CFI_ENDPROC
17868-END(\sym)
17869+ENDPROC(\sym)
17870 .endm
17871
17872 zeroentry divide_error do_divide_error
17873@@ -1310,9 +1679,10 @@ gs_change:
17874 2: mfence /* workaround */
17875 SWAPGS
17876 popfq_cfi
17877+ pax_force_retaddr
17878 ret
17879 CFI_ENDPROC
17880-END(native_load_gs_index)
17881+ENDPROC(native_load_gs_index)
17882
17883 _ASM_EXTABLE(gs_change,bad_gs)
17884 .section .fixup,"ax"
17885@@ -1340,9 +1710,10 @@ ENTRY(call_softirq)
17886 CFI_DEF_CFA_REGISTER rsp
17887 CFI_ADJUST_CFA_OFFSET -8
17888 decl PER_CPU_VAR(irq_count)
17889+ pax_force_retaddr
17890 ret
17891 CFI_ENDPROC
17892-END(call_softirq)
17893+ENDPROC(call_softirq)
17894
17895 #ifdef CONFIG_XEN
17896 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
17897@@ -1380,7 +1751,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
17898 decl PER_CPU_VAR(irq_count)
17899 jmp error_exit
17900 CFI_ENDPROC
17901-END(xen_do_hypervisor_callback)
17902+ENDPROC(xen_do_hypervisor_callback)
17903
17904 /*
17905 * Hypervisor uses this for application faults while it executes.
17906@@ -1439,7 +1810,7 @@ ENTRY(xen_failsafe_callback)
17907 SAVE_ALL
17908 jmp error_exit
17909 CFI_ENDPROC
17910-END(xen_failsafe_callback)
17911+ENDPROC(xen_failsafe_callback)
17912
17913 apicinterrupt XEN_HVM_EVTCHN_CALLBACK \
17914 xen_hvm_callback_vector xen_evtchn_do_upcall
17915@@ -1488,16 +1859,31 @@ ENTRY(paranoid_exit)
17916 TRACE_IRQS_OFF_DEBUG
17917 testl %ebx,%ebx /* swapgs needed? */
17918 jnz paranoid_restore
17919- testl $3,CS(%rsp)
17920+ testb $3,CS(%rsp)
17921 jnz paranoid_userspace
17922+#ifdef CONFIG_PAX_MEMORY_UDEREF
17923+ pax_exit_kernel
17924+ TRACE_IRQS_IRETQ 0
17925+ SWAPGS_UNSAFE_STACK
17926+ RESTORE_ALL 8
17927+ pax_force_retaddr_bts
17928+ jmp irq_return
17929+#endif
17930 paranoid_swapgs:
17931+#ifdef CONFIG_PAX_MEMORY_UDEREF
17932+ pax_exit_kernel_user
17933+#else
17934+ pax_exit_kernel
17935+#endif
17936 TRACE_IRQS_IRETQ 0
17937 SWAPGS_UNSAFE_STACK
17938 RESTORE_ALL 8
17939 jmp irq_return
17940 paranoid_restore:
17941+ pax_exit_kernel
17942 TRACE_IRQS_IRETQ_DEBUG 0
17943 RESTORE_ALL 8
17944+ pax_force_retaddr_bts
17945 jmp irq_return
17946 paranoid_userspace:
17947 GET_THREAD_INFO(%rcx)
17948@@ -1526,7 +1912,7 @@ paranoid_schedule:
17949 TRACE_IRQS_OFF
17950 jmp paranoid_userspace
17951 CFI_ENDPROC
17952-END(paranoid_exit)
17953+ENDPROC(paranoid_exit)
17954
17955 /*
17956 * Exception entry point. This expects an error code/orig_rax on the stack.
17957@@ -1553,12 +1939,13 @@ ENTRY(error_entry)
17958 movq_cfi r14, R14+8
17959 movq_cfi r15, R15+8
17960 xorl %ebx,%ebx
17961- testl $3,CS+8(%rsp)
17962+ testb $3,CS+8(%rsp)
17963 je error_kernelspace
17964 error_swapgs:
17965 SWAPGS
17966 error_sti:
17967 TRACE_IRQS_OFF
17968+ pax_force_retaddr_bts
17969 ret
17970
17971 /*
17972@@ -1585,7 +1972,7 @@ bstep_iret:
17973 movq %rcx,RIP+8(%rsp)
17974 jmp error_swapgs
17975 CFI_ENDPROC
17976-END(error_entry)
17977+ENDPROC(error_entry)
17978
17979
17980 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
17981@@ -1605,7 +1992,7 @@ ENTRY(error_exit)
17982 jnz retint_careful
17983 jmp retint_swapgs
17984 CFI_ENDPROC
17985-END(error_exit)
17986+ENDPROC(error_exit)
17987
17988 /*
17989 * Test if a given stack is an NMI stack or not.
17990@@ -1663,9 +2050,11 @@ ENTRY(nmi)
17991 * If %cs was not the kernel segment, then the NMI triggered in user
17992 * space, which means it is definitely not nested.
17993 */
17994+ cmpl $__KERNEXEC_KERNEL_CS, 16(%rsp)
17995+ je 1f
17996 cmpl $__KERNEL_CS, 16(%rsp)
17997 jne first_nmi
17998-
17999+1:
18000 /*
18001 * Check the special variable on the stack to see if NMIs are
18002 * executing.
18003@@ -1824,6 +2213,17 @@ end_repeat_nmi:
18004 */
18005 movq %cr2, %r12
18006
18007+#ifdef CONFIG_PAX_MEMORY_UDEREF
18008+ testb $3, CS(%rsp)
18009+ jnz 1f
18010+ pax_enter_kernel
18011+ jmp 2f
18012+1: pax_enter_kernel_user
18013+2:
18014+#else
18015+ pax_enter_kernel
18016+#endif
18017+
18018 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
18019 movq %rsp,%rdi
18020 movq $-1,%rsi
18021@@ -1839,21 +2239,32 @@ end_repeat_nmi:
18022 testl %ebx,%ebx /* swapgs needed? */
18023 jnz nmi_restore
18024 nmi_swapgs:
18025+#ifdef CONFIG_PAX_MEMORY_UDEREF
18026+ pax_exit_kernel_user
18027+#else
18028+ pax_exit_kernel
18029+#endif
18030 SWAPGS_UNSAFE_STACK
18031+ RESTORE_ALL 8
18032+ /* Clear the NMI executing stack variable */
18033+ movq $0, 10*8(%rsp)
18034+ jmp irq_return
18035 nmi_restore:
18036+ pax_exit_kernel
18037 RESTORE_ALL 8
18038+ pax_force_retaddr_bts
18039 /* Clear the NMI executing stack variable */
18040 movq $0, 10*8(%rsp)
18041 jmp irq_return
18042 CFI_ENDPROC
18043-END(nmi)
18044+ENDPROC(nmi)
18045
18046 ENTRY(ignore_sysret)
18047 CFI_STARTPROC
18048 mov $-ENOSYS,%eax
18049 sysret
18050 CFI_ENDPROC
18051-END(ignore_sysret)
18052+ENDPROC(ignore_sysret)
18053
18054 /*
18055 * End of kprobes section
18056diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
18057index 1d41402..af9a46a 100644
18058--- a/arch/x86/kernel/ftrace.c
18059+++ b/arch/x86/kernel/ftrace.c
18060@@ -105,6 +105,8 @@ ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
18061 {
18062 unsigned char replaced[MCOUNT_INSN_SIZE];
18063
18064+ ip = ktla_ktva(ip);
18065+
18066 /*
18067 * Note: Due to modules and __init, code can
18068 * disappear and change, we need to protect against faulting
18069@@ -227,7 +229,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
18070 unsigned char old[MCOUNT_INSN_SIZE], *new;
18071 int ret;
18072
18073- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
18074+ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
18075 new = ftrace_call_replace(ip, (unsigned long)func);
18076
18077 /* See comment above by declaration of modifying_ftrace_code */
18078@@ -238,7 +240,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
18079 /* Also update the regs callback function */
18080 if (!ret) {
18081 ip = (unsigned long)(&ftrace_regs_call);
18082- memcpy(old, &ftrace_regs_call, MCOUNT_INSN_SIZE);
18083+ memcpy(old, ktla_ktva((void *)&ftrace_regs_call), MCOUNT_INSN_SIZE);
18084 new = ftrace_call_replace(ip, (unsigned long)func);
18085 ret = ftrace_modify_code(ip, old, new);
18086 }
18087@@ -279,7 +281,7 @@ static int ftrace_write(unsigned long ip, const char *val, int size)
18088 * kernel identity mapping to modify code.
18089 */
18090 if (within(ip, (unsigned long)_text, (unsigned long)_etext))
18091- ip = (unsigned long)__va(__pa(ip));
18092+ ip = (unsigned long)__va(__pa(ktla_ktva(ip)));
18093
18094 return probe_kernel_write((void *)ip, val, size);
18095 }
18096@@ -289,7 +291,7 @@ static int add_break(unsigned long ip, const char *old)
18097 unsigned char replaced[MCOUNT_INSN_SIZE];
18098 unsigned char brk = BREAKPOINT_INSTRUCTION;
18099
18100- if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
18101+ if (probe_kernel_read(replaced, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE))
18102 return -EFAULT;
18103
18104 /* Make sure it is what we expect it to be */
18105@@ -637,7 +639,7 @@ ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
18106 return ret;
18107
18108 fail_update:
18109- probe_kernel_write((void *)ip, &old_code[0], 1);
18110+ probe_kernel_write((void *)ktla_ktva(ip), &old_code[0], 1);
18111 goto out;
18112 }
18113
18114@@ -670,6 +672,8 @@ static int ftrace_mod_jmp(unsigned long ip,
18115 {
18116 unsigned char code[MCOUNT_INSN_SIZE];
18117
18118+ ip = ktla_ktva(ip);
18119+
18120 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
18121 return -EFAULT;
18122
18123diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
18124index c18f59d..9c0c9f6 100644
18125--- a/arch/x86/kernel/head32.c
18126+++ b/arch/x86/kernel/head32.c
18127@@ -18,6 +18,7 @@
18128 #include <asm/io_apic.h>
18129 #include <asm/bios_ebda.h>
18130 #include <asm/tlbflush.h>
18131+#include <asm/boot.h>
18132
18133 static void __init i386_default_early_setup(void)
18134 {
18135@@ -30,8 +31,7 @@ static void __init i386_default_early_setup(void)
18136
18137 void __init i386_start_kernel(void)
18138 {
18139- memblock_reserve(__pa_symbol(&_text),
18140- __pa_symbol(&__bss_stop) - __pa_symbol(&_text));
18141+ memblock_reserve(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop) - LOAD_PHYSICAL_ADDR);
18142
18143 #ifdef CONFIG_BLK_DEV_INITRD
18144 /* Reserve INITRD */
18145diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
18146index 4dac2f6..bc6a335 100644
18147--- a/arch/x86/kernel/head_32.S
18148+++ b/arch/x86/kernel/head_32.S
18149@@ -26,6 +26,12 @@
18150 /* Physical address */
18151 #define pa(X) ((X) - __PAGE_OFFSET)
18152
18153+#ifdef CONFIG_PAX_KERNEXEC
18154+#define ta(X) (X)
18155+#else
18156+#define ta(X) ((X) - __PAGE_OFFSET)
18157+#endif
18158+
18159 /*
18160 * References to members of the new_cpu_data structure.
18161 */
18162@@ -55,11 +61,7 @@
18163 * and small than max_low_pfn, otherwise will waste some page table entries
18164 */
18165
18166-#if PTRS_PER_PMD > 1
18167-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
18168-#else
18169-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
18170-#endif
18171+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
18172
18173 /* Number of possible pages in the lowmem region */
18174 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
18175@@ -78,6 +80,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
18176 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
18177
18178 /*
18179+ * Real beginning of normal "text" segment
18180+ */
18181+ENTRY(stext)
18182+ENTRY(_stext)
18183+
18184+/*
18185 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
18186 * %esi points to the real-mode code as a 32-bit pointer.
18187 * CS and DS must be 4 GB flat segments, but we don't depend on
18188@@ -85,6 +93,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
18189 * can.
18190 */
18191 __HEAD
18192+
18193+#ifdef CONFIG_PAX_KERNEXEC
18194+ jmp startup_32
18195+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
18196+.fill PAGE_SIZE-5,1,0xcc
18197+#endif
18198+
18199 ENTRY(startup_32)
18200 movl pa(stack_start),%ecx
18201
18202@@ -106,6 +121,59 @@ ENTRY(startup_32)
18203 2:
18204 leal -__PAGE_OFFSET(%ecx),%esp
18205
18206+#ifdef CONFIG_SMP
18207+ movl $pa(cpu_gdt_table),%edi
18208+ movl $__per_cpu_load,%eax
18209+ movw %ax,GDT_ENTRY_PERCPU * 8 + 2(%edi)
18210+ rorl $16,%eax
18211+ movb %al,GDT_ENTRY_PERCPU * 8 + 4(%edi)
18212+ movb %ah,GDT_ENTRY_PERCPU * 8 + 7(%edi)
18213+ movl $__per_cpu_end - 1,%eax
18214+ subl $__per_cpu_start,%eax
18215+ movw %ax,GDT_ENTRY_PERCPU * 8 + 0(%edi)
18216+#endif
18217+
18218+#ifdef CONFIG_PAX_MEMORY_UDEREF
18219+ movl $NR_CPUS,%ecx
18220+ movl $pa(cpu_gdt_table),%edi
18221+1:
18222+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
18223+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
18224+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
18225+ addl $PAGE_SIZE_asm,%edi
18226+ loop 1b
18227+#endif
18228+
18229+#ifdef CONFIG_PAX_KERNEXEC
18230+ movl $pa(boot_gdt),%edi
18231+ movl $__LOAD_PHYSICAL_ADDR,%eax
18232+ movw %ax,GDT_ENTRY_BOOT_CS * 8 + 2(%edi)
18233+ rorl $16,%eax
18234+ movb %al,GDT_ENTRY_BOOT_CS * 8 + 4(%edi)
18235+ movb %ah,GDT_ENTRY_BOOT_CS * 8 + 7(%edi)
18236+ rorl $16,%eax
18237+
18238+ ljmp $(__BOOT_CS),$1f
18239+1:
18240+
18241+ movl $NR_CPUS,%ecx
18242+ movl $pa(cpu_gdt_table),%edi
18243+ addl $__PAGE_OFFSET,%eax
18244+1:
18245+ movb $0xc0,GDT_ENTRY_KERNEL_CS * 8 + 6(%edi)
18246+ movb $0xc0,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 6(%edi)
18247+ movw %ax,GDT_ENTRY_KERNEL_CS * 8 + 2(%edi)
18248+ movw %ax,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 2(%edi)
18249+ rorl $16,%eax
18250+ movb %al,GDT_ENTRY_KERNEL_CS * 8 + 4(%edi)
18251+ movb %al,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 4(%edi)
18252+ movb %ah,GDT_ENTRY_KERNEL_CS * 8 + 7(%edi)
18253+ movb %ah,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 7(%edi)
18254+ rorl $16,%eax
18255+ addl $PAGE_SIZE_asm,%edi
18256+ loop 1b
18257+#endif
18258+
18259 /*
18260 * Clear BSS first so that there are no surprises...
18261 */
18262@@ -196,8 +264,11 @@ ENTRY(startup_32)
18263 movl %eax, pa(max_pfn_mapped)
18264
18265 /* Do early initialization of the fixmap area */
18266- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
18267- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
18268+#ifdef CONFIG_COMPAT_VDSO
18269+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
18270+#else
18271+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
18272+#endif
18273 #else /* Not PAE */
18274
18275 page_pde_offset = (__PAGE_OFFSET >> 20);
18276@@ -227,8 +298,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
18277 movl %eax, pa(max_pfn_mapped)
18278
18279 /* Do early initialization of the fixmap area */
18280- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
18281- movl %eax,pa(initial_page_table+0xffc)
18282+#ifdef CONFIG_COMPAT_VDSO
18283+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
18284+#else
18285+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
18286+#endif
18287 #endif
18288
18289 #ifdef CONFIG_PARAVIRT
18290@@ -242,9 +316,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
18291 cmpl $num_subarch_entries, %eax
18292 jae bad_subarch
18293
18294- movl pa(subarch_entries)(,%eax,4), %eax
18295- subl $__PAGE_OFFSET, %eax
18296- jmp *%eax
18297+ jmp *pa(subarch_entries)(,%eax,4)
18298
18299 bad_subarch:
18300 WEAK(lguest_entry)
18301@@ -256,10 +328,10 @@ WEAK(xen_entry)
18302 __INITDATA
18303
18304 subarch_entries:
18305- .long default_entry /* normal x86/PC */
18306- .long lguest_entry /* lguest hypervisor */
18307- .long xen_entry /* Xen hypervisor */
18308- .long default_entry /* Moorestown MID */
18309+ .long ta(default_entry) /* normal x86/PC */
18310+ .long ta(lguest_entry) /* lguest hypervisor */
18311+ .long ta(xen_entry) /* Xen hypervisor */
18312+ .long ta(default_entry) /* Moorestown MID */
18313 num_subarch_entries = (. - subarch_entries) / 4
18314 .previous
18315 #else
18316@@ -316,6 +388,7 @@ default_entry:
18317 movl pa(mmu_cr4_features),%eax
18318 movl %eax,%cr4
18319
18320+#ifdef CONFIG_X86_PAE
18321 testb $X86_CR4_PAE, %al # check if PAE is enabled
18322 jz 6f
18323
18324@@ -344,6 +417,9 @@ default_entry:
18325 /* Make changes effective */
18326 wrmsr
18327
18328+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
18329+#endif
18330+
18331 6:
18332
18333 /*
18334@@ -442,14 +518,20 @@ is386: movl $2,%ecx # set MP
18335 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
18336 movl %eax,%ss # after changing gdt.
18337
18338- movl $(__USER_DS),%eax # DS/ES contains default USER segment
18339+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
18340 movl %eax,%ds
18341 movl %eax,%es
18342
18343 movl $(__KERNEL_PERCPU), %eax
18344 movl %eax,%fs # set this cpu's percpu
18345
18346+#ifdef CONFIG_CC_STACKPROTECTOR
18347 movl $(__KERNEL_STACK_CANARY),%eax
18348+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
18349+ movl $(__USER_DS),%eax
18350+#else
18351+ xorl %eax,%eax
18352+#endif
18353 movl %eax,%gs
18354
18355 xorl %eax,%eax # Clear LDT
18356@@ -526,8 +608,11 @@ setup_once:
18357 * relocation. Manually set base address in stack canary
18358 * segment descriptor.
18359 */
18360- movl $gdt_page,%eax
18361+ movl $cpu_gdt_table,%eax
18362 movl $stack_canary,%ecx
18363+#ifdef CONFIG_SMP
18364+ addl $__per_cpu_load,%ecx
18365+#endif
18366 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
18367 shrl $16, %ecx
18368 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
18369@@ -558,7 +643,7 @@ ENDPROC(early_idt_handlers)
18370 /* This is global to keep gas from relaxing the jumps */
18371 ENTRY(early_idt_handler)
18372 cld
18373- cmpl $2,%ss:early_recursion_flag
18374+ cmpl $1,%ss:early_recursion_flag
18375 je hlt_loop
18376 incl %ss:early_recursion_flag
18377
18378@@ -596,8 +681,8 @@ ENTRY(early_idt_handler)
18379 pushl (20+6*4)(%esp) /* trapno */
18380 pushl $fault_msg
18381 call printk
18382-#endif
18383 call dump_stack
18384+#endif
18385 hlt_loop:
18386 hlt
18387 jmp hlt_loop
18388@@ -616,8 +701,11 @@ ENDPROC(early_idt_handler)
18389 /* This is the default interrupt "handler" :-) */
18390 ALIGN
18391 ignore_int:
18392- cld
18393 #ifdef CONFIG_PRINTK
18394+ cmpl $2,%ss:early_recursion_flag
18395+ je hlt_loop
18396+ incl %ss:early_recursion_flag
18397+ cld
18398 pushl %eax
18399 pushl %ecx
18400 pushl %edx
18401@@ -626,9 +714,6 @@ ignore_int:
18402 movl $(__KERNEL_DS),%eax
18403 movl %eax,%ds
18404 movl %eax,%es
18405- cmpl $2,early_recursion_flag
18406- je hlt_loop
18407- incl early_recursion_flag
18408 pushl 16(%esp)
18409 pushl 24(%esp)
18410 pushl 32(%esp)
18411@@ -662,29 +747,43 @@ ENTRY(setup_once_ref)
18412 /*
18413 * BSS section
18414 */
18415-__PAGE_ALIGNED_BSS
18416- .align PAGE_SIZE
18417 #ifdef CONFIG_X86_PAE
18418+.section .initial_pg_pmd,"a",@progbits
18419 initial_pg_pmd:
18420 .fill 1024*KPMDS,4,0
18421 #else
18422+.section .initial_page_table,"a",@progbits
18423 ENTRY(initial_page_table)
18424 .fill 1024,4,0
18425 #endif
18426+.section .initial_pg_fixmap,"a",@progbits
18427 initial_pg_fixmap:
18428 .fill 1024,4,0
18429+.section .empty_zero_page,"a",@progbits
18430 ENTRY(empty_zero_page)
18431 .fill 4096,1,0
18432+.section .swapper_pg_dir,"a",@progbits
18433 ENTRY(swapper_pg_dir)
18434+#ifdef CONFIG_X86_PAE
18435+ .fill 4,8,0
18436+#else
18437 .fill 1024,4,0
18438+#endif
18439+
18440+/*
18441+ * The IDT has to be page-aligned to simplify the Pentium
18442+ * F0 0F bug workaround.. We have a special link segment
18443+ * for this.
18444+ */
18445+.section .idt,"a",@progbits
18446+ENTRY(idt_table)
18447+ .fill 256,8,0
18448
18449 /*
18450 * This starts the data section.
18451 */
18452 #ifdef CONFIG_X86_PAE
18453-__PAGE_ALIGNED_DATA
18454- /* Page-aligned for the benefit of paravirt? */
18455- .align PAGE_SIZE
18456+.section .initial_page_table,"a",@progbits
18457 ENTRY(initial_page_table)
18458 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
18459 # if KPMDS == 3
18460@@ -703,12 +802,20 @@ ENTRY(initial_page_table)
18461 # error "Kernel PMDs should be 1, 2 or 3"
18462 # endif
18463 .align PAGE_SIZE /* needs to be page-sized too */
18464+
18465+#ifdef CONFIG_PAX_PER_CPU_PGD
18466+ENTRY(cpu_pgd)
18467+ .rept NR_CPUS
18468+ .fill 4,8,0
18469+ .endr
18470+#endif
18471+
18472 #endif
18473
18474 .data
18475 .balign 4
18476 ENTRY(stack_start)
18477- .long init_thread_union+THREAD_SIZE
18478+ .long init_thread_union+THREAD_SIZE-8
18479
18480 __INITRODATA
18481 int_msg:
18482@@ -736,7 +843,7 @@ fault_msg:
18483 * segment size, and 32-bit linear address value:
18484 */
18485
18486- .data
18487+.section .rodata,"a",@progbits
18488 .globl boot_gdt_descr
18489 .globl idt_descr
18490
18491@@ -745,7 +852,7 @@ fault_msg:
18492 .word 0 # 32 bit align gdt_desc.address
18493 boot_gdt_descr:
18494 .word __BOOT_DS+7
18495- .long boot_gdt - __PAGE_OFFSET
18496+ .long pa(boot_gdt)
18497
18498 .word 0 # 32-bit align idt_desc.address
18499 idt_descr:
18500@@ -756,7 +863,7 @@ idt_descr:
18501 .word 0 # 32 bit align gdt_desc.address
18502 ENTRY(early_gdt_descr)
18503 .word GDT_ENTRIES*8-1
18504- .long gdt_page /* Overwritten for secondary CPUs */
18505+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
18506
18507 /*
18508 * The boot_gdt must mirror the equivalent in setup.S and is
18509@@ -765,5 +872,65 @@ ENTRY(early_gdt_descr)
18510 .align L1_CACHE_BYTES
18511 ENTRY(boot_gdt)
18512 .fill GDT_ENTRY_BOOT_CS,8,0
18513- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
18514- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
18515+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
18516+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
18517+
18518+ .align PAGE_SIZE_asm
18519+ENTRY(cpu_gdt_table)
18520+ .rept NR_CPUS
18521+ .quad 0x0000000000000000 /* NULL descriptor */
18522+ .quad 0x0000000000000000 /* 0x0b reserved */
18523+ .quad 0x0000000000000000 /* 0x13 reserved */
18524+ .quad 0x0000000000000000 /* 0x1b reserved */
18525+
18526+#ifdef CONFIG_PAX_KERNEXEC
18527+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
18528+#else
18529+ .quad 0x0000000000000000 /* 0x20 unused */
18530+#endif
18531+
18532+ .quad 0x0000000000000000 /* 0x28 unused */
18533+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
18534+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
18535+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
18536+ .quad 0x0000000000000000 /* 0x4b reserved */
18537+ .quad 0x0000000000000000 /* 0x53 reserved */
18538+ .quad 0x0000000000000000 /* 0x5b reserved */
18539+
18540+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
18541+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
18542+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
18543+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
18544+
18545+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
18546+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
18547+
18548+ /*
18549+ * Segments used for calling PnP BIOS have byte granularity.
18550+ * The code segments and data segments have fixed 64k limits,
18551+ * the transfer segment sizes are set at run time.
18552+ */
18553+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
18554+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
18555+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
18556+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
18557+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
18558+
18559+ /*
18560+ * The APM segments have byte granularity and their bases
18561+ * are set at run time. All have 64k limits.
18562+ */
18563+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
18564+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
18565+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
18566+
18567+ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
18568+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
18569+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
18570+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
18571+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
18572+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
18573+
18574+ /* Be sure this is zeroed to avoid false validations in Xen */
18575+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
18576+ .endr
18577diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
18578index 94bf9cc..400455a 100644
18579--- a/arch/x86/kernel/head_64.S
18580+++ b/arch/x86/kernel/head_64.S
18581@@ -20,6 +20,8 @@
18582 #include <asm/processor-flags.h>
18583 #include <asm/percpu.h>
18584 #include <asm/nops.h>
18585+#include <asm/cpufeature.h>
18586+#include <asm/alternative-asm.h>
18587
18588 #ifdef CONFIG_PARAVIRT
18589 #include <asm/asm-offsets.h>
18590@@ -41,6 +43,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
18591 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
18592 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
18593 L3_START_KERNEL = pud_index(__START_KERNEL_map)
18594+L4_VMALLOC_START = pgd_index(VMALLOC_START)
18595+L3_VMALLOC_START = pud_index(VMALLOC_START)
18596+L4_VMALLOC_END = pgd_index(VMALLOC_END)
18597+L3_VMALLOC_END = pud_index(VMALLOC_END)
18598+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
18599+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
18600
18601 .text
18602 __HEAD
18603@@ -88,35 +96,23 @@ startup_64:
18604 */
18605 addq %rbp, init_level4_pgt + 0(%rip)
18606 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
18607+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
18608+ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
18609+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
18610 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
18611
18612 addq %rbp, level3_ident_pgt + 0(%rip)
18613+#ifndef CONFIG_XEN
18614+ addq %rbp, level3_ident_pgt + 8(%rip)
18615+#endif
18616
18617- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
18618- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
18619+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
18620+
18621+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
18622+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
18623
18624 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
18625-
18626- /* Add an Identity mapping if I am above 1G */
18627- leaq _text(%rip), %rdi
18628- andq $PMD_PAGE_MASK, %rdi
18629-
18630- movq %rdi, %rax
18631- shrq $PUD_SHIFT, %rax
18632- andq $(PTRS_PER_PUD - 1), %rax
18633- jz ident_complete
18634-
18635- leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
18636- leaq level3_ident_pgt(%rip), %rbx
18637- movq %rdx, 0(%rbx, %rax, 8)
18638-
18639- movq %rdi, %rax
18640- shrq $PMD_SHIFT, %rax
18641- andq $(PTRS_PER_PMD - 1), %rax
18642- leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
18643- leaq level2_spare_pgt(%rip), %rbx
18644- movq %rdx, 0(%rbx, %rax, 8)
18645-ident_complete:
18646+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
18647
18648 /*
18649 * Fixup the kernel text+data virtual addresses. Note that
18650@@ -159,8 +155,8 @@ ENTRY(secondary_startup_64)
18651 * after the boot processor executes this code.
18652 */
18653
18654- /* Enable PAE mode and PGE */
18655- movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
18656+ /* Enable PAE mode and PSE/PGE */
18657+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
18658 movq %rax, %cr4
18659
18660 /* Setup early boot stage 4 level pagetables. */
18661@@ -182,9 +178,17 @@ ENTRY(secondary_startup_64)
18662 movl $MSR_EFER, %ecx
18663 rdmsr
18664 btsl $_EFER_SCE, %eax /* Enable System Call */
18665- btl $20,%edi /* No Execute supported? */
18666+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
18667 jnc 1f
18668 btsl $_EFER_NX, %eax
18669+ leaq init_level4_pgt(%rip), %rdi
18670+#ifndef CONFIG_EFI
18671+ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
18672+#endif
18673+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
18674+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
18675+ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
18676+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
18677 1: wrmsr /* Make changes effective */
18678
18679 /* Setup cr0 */
18680@@ -246,6 +250,7 @@ ENTRY(secondary_startup_64)
18681 * jump. In addition we need to ensure %cs is set so we make this
18682 * a far return.
18683 */
18684+ pax_set_fptr_mask
18685 movq initial_code(%rip),%rax
18686 pushq $0 # fake return address to stop unwinder
18687 pushq $__KERNEL_CS # set correct cs
18688@@ -268,7 +273,7 @@ ENTRY(secondary_startup_64)
18689 bad_address:
18690 jmp bad_address
18691
18692- .section ".init.text","ax"
18693+ __INIT
18694 .globl early_idt_handlers
18695 early_idt_handlers:
18696 # 104(%rsp) %rflags
18697@@ -347,11 +352,15 @@ ENTRY(early_idt_handler)
18698 addq $16,%rsp # drop vector number and error code
18699 decl early_recursion_flag(%rip)
18700 INTERRUPT_RETURN
18701+ .previous
18702
18703+ __INITDATA
18704 .balign 4
18705 early_recursion_flag:
18706 .long 0
18707+ .previous
18708
18709+ .section .rodata,"a",@progbits
18710 #ifdef CONFIG_EARLY_PRINTK
18711 early_idt_msg:
18712 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
18713@@ -360,6 +369,7 @@ early_idt_ripmsg:
18714 #endif /* CONFIG_EARLY_PRINTK */
18715 .previous
18716
18717+ .section .rodata,"a",@progbits
18718 #define NEXT_PAGE(name) \
18719 .balign PAGE_SIZE; \
18720 ENTRY(name)
18721@@ -372,7 +382,6 @@ ENTRY(name)
18722 i = i + 1 ; \
18723 .endr
18724
18725- .data
18726 /*
18727 * This default setting generates an ident mapping at address 0x100000
18728 * and a mapping for the kernel that precisely maps virtual address
18729@@ -383,13 +392,41 @@ NEXT_PAGE(init_level4_pgt)
18730 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
18731 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
18732 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
18733+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
18734+ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
18735+ .org init_level4_pgt + L4_VMALLOC_END*8, 0
18736+ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
18737+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
18738+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
18739 .org init_level4_pgt + L4_START_KERNEL*8, 0
18740 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
18741 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
18742
18743+#ifdef CONFIG_PAX_PER_CPU_PGD
18744+NEXT_PAGE(cpu_pgd)
18745+ .rept NR_CPUS
18746+ .fill 512,8,0
18747+ .endr
18748+#endif
18749+
18750 NEXT_PAGE(level3_ident_pgt)
18751 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
18752+#ifdef CONFIG_XEN
18753 .fill 511,8,0
18754+#else
18755+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
18756+ .fill 510,8,0
18757+#endif
18758+
18759+NEXT_PAGE(level3_vmalloc_start_pgt)
18760+ .fill 512,8,0
18761+
18762+NEXT_PAGE(level3_vmalloc_end_pgt)
18763+ .fill 512,8,0
18764+
18765+NEXT_PAGE(level3_vmemmap_pgt)
18766+ .fill L3_VMEMMAP_START,8,0
18767+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
18768
18769 NEXT_PAGE(level3_kernel_pgt)
18770 .fill L3_START_KERNEL,8,0
18771@@ -397,20 +434,23 @@ NEXT_PAGE(level3_kernel_pgt)
18772 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
18773 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
18774
18775+NEXT_PAGE(level2_vmemmap_pgt)
18776+ .fill 512,8,0
18777+
18778 NEXT_PAGE(level2_fixmap_pgt)
18779- .fill 506,8,0
18780- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
18781- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
18782- .fill 5,8,0
18783+ .fill 507,8,0
18784+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
18785+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
18786+ .fill 4,8,0
18787
18788-NEXT_PAGE(level1_fixmap_pgt)
18789+NEXT_PAGE(level1_vsyscall_pgt)
18790 .fill 512,8,0
18791
18792-NEXT_PAGE(level2_ident_pgt)
18793- /* Since I easily can, map the first 1G.
18794+ /* Since I easily can, map the first 2G.
18795 * Don't set NX because code runs from these pages.
18796 */
18797- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
18798+NEXT_PAGE(level2_ident_pgt)
18799+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
18800
18801 NEXT_PAGE(level2_kernel_pgt)
18802 /*
18803@@ -423,37 +463,59 @@ NEXT_PAGE(level2_kernel_pgt)
18804 * If you want to increase this then increase MODULES_VADDR
18805 * too.)
18806 */
18807- PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
18808- KERNEL_IMAGE_SIZE/PMD_SIZE)
18809-
18810-NEXT_PAGE(level2_spare_pgt)
18811- .fill 512, 8, 0
18812+ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
18813
18814 #undef PMDS
18815 #undef NEXT_PAGE
18816
18817- .data
18818+ .align PAGE_SIZE
18819+ENTRY(cpu_gdt_table)
18820+ .rept NR_CPUS
18821+ .quad 0x0000000000000000 /* NULL descriptor */
18822+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
18823+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
18824+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
18825+ .quad 0x00cffb000000ffff /* __USER32_CS */
18826+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
18827+ .quad 0x00affb000000ffff /* __USER_CS */
18828+
18829+#ifdef CONFIG_PAX_KERNEXEC
18830+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
18831+#else
18832+ .quad 0x0 /* unused */
18833+#endif
18834+
18835+ .quad 0,0 /* TSS */
18836+ .quad 0,0 /* LDT */
18837+ .quad 0,0,0 /* three TLS descriptors */
18838+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
18839+ /* asm/segment.h:GDT_ENTRIES must match this */
18840+
18841+ /* zero the remaining page */
18842+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
18843+ .endr
18844+
18845 .align 16
18846 .globl early_gdt_descr
18847 early_gdt_descr:
18848 .word GDT_ENTRIES*8-1
18849 early_gdt_descr_base:
18850- .quad INIT_PER_CPU_VAR(gdt_page)
18851+ .quad cpu_gdt_table
18852
18853 ENTRY(phys_base)
18854 /* This must match the first entry in level2_kernel_pgt */
18855 .quad 0x0000000000000000
18856
18857 #include "../../x86/xen/xen-head.S"
18858-
18859- .section .bss, "aw", @nobits
18860+
18861+ .section .rodata,"a",@progbits
18862 .align L1_CACHE_BYTES
18863 ENTRY(idt_table)
18864- .skip IDT_ENTRIES * 16
18865+ .fill 512,8,0
18866
18867 .align L1_CACHE_BYTES
18868 ENTRY(nmi_idt_table)
18869- .skip IDT_ENTRIES * 16
18870+ .fill 512,8,0
18871
18872 __PAGE_ALIGNED_BSS
18873 .align PAGE_SIZE
18874diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
18875index 9c3bd4a..e1d9b35 100644
18876--- a/arch/x86/kernel/i386_ksyms_32.c
18877+++ b/arch/x86/kernel/i386_ksyms_32.c
18878@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
18879 EXPORT_SYMBOL(cmpxchg8b_emu);
18880 #endif
18881
18882+EXPORT_SYMBOL_GPL(cpu_gdt_table);
18883+
18884 /* Networking helper routines. */
18885 EXPORT_SYMBOL(csum_partial_copy_generic);
18886+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
18887+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
18888
18889 EXPORT_SYMBOL(__get_user_1);
18890 EXPORT_SYMBOL(__get_user_2);
18891@@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
18892
18893 EXPORT_SYMBOL(csum_partial);
18894 EXPORT_SYMBOL(empty_zero_page);
18895+
18896+#ifdef CONFIG_PAX_KERNEXEC
18897+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
18898+#endif
18899diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
18900index 675a050..95febfd 100644
18901--- a/arch/x86/kernel/i387.c
18902+++ b/arch/x86/kernel/i387.c
18903@@ -55,7 +55,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
18904 static inline bool interrupted_user_mode(void)
18905 {
18906 struct pt_regs *regs = get_irq_regs();
18907- return regs && user_mode_vm(regs);
18908+ return regs && user_mode(regs);
18909 }
18910
18911 /*
18912diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
18913index 9a5c460..dc4374d 100644
18914--- a/arch/x86/kernel/i8259.c
18915+++ b/arch/x86/kernel/i8259.c
18916@@ -209,7 +209,7 @@ spurious_8259A_irq:
18917 "spurious 8259A interrupt: IRQ%d.\n", irq);
18918 spurious_irq_mask |= irqmask;
18919 }
18920- atomic_inc(&irq_err_count);
18921+ atomic_inc_unchecked(&irq_err_count);
18922 /*
18923 * Theoretically we do not have to handle this IRQ,
18924 * but in Linux this does not cause problems and is
18925diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
18926index 8c96897..be66bfa 100644
18927--- a/arch/x86/kernel/ioport.c
18928+++ b/arch/x86/kernel/ioport.c
18929@@ -6,6 +6,7 @@
18930 #include <linux/sched.h>
18931 #include <linux/kernel.h>
18932 #include <linux/capability.h>
18933+#include <linux/security.h>
18934 #include <linux/errno.h>
18935 #include <linux/types.h>
18936 #include <linux/ioport.h>
18937@@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
18938
18939 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
18940 return -EINVAL;
18941+#ifdef CONFIG_GRKERNSEC_IO
18942+ if (turn_on && grsec_disable_privio) {
18943+ gr_handle_ioperm();
18944+ return -EPERM;
18945+ }
18946+#endif
18947 if (turn_on && !capable(CAP_SYS_RAWIO))
18948 return -EPERM;
18949
18950@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
18951 * because the ->io_bitmap_max value must match the bitmap
18952 * contents:
18953 */
18954- tss = &per_cpu(init_tss, get_cpu());
18955+ tss = init_tss + get_cpu();
18956
18957 if (turn_on)
18958 bitmap_clear(t->io_bitmap_ptr, from, num);
18959@@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct pt_regs *regs)
18960 return -EINVAL;
18961 /* Trying to gain more privileges? */
18962 if (level > old) {
18963+#ifdef CONFIG_GRKERNSEC_IO
18964+ if (grsec_disable_privio) {
18965+ gr_handle_iopl();
18966+ return -EPERM;
18967+ }
18968+#endif
18969 if (!capable(CAP_SYS_RAWIO))
18970 return -EPERM;
18971 }
18972diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
18973index e4595f1..ee3bfb8 100644
18974--- a/arch/x86/kernel/irq.c
18975+++ b/arch/x86/kernel/irq.c
18976@@ -18,7 +18,7 @@
18977 #include <asm/mce.h>
18978 #include <asm/hw_irq.h>
18979
18980-atomic_t irq_err_count;
18981+atomic_unchecked_t irq_err_count;
18982
18983 /* Function pointer for generic interrupt vector handling */
18984 void (*x86_platform_ipi_callback)(void) = NULL;
18985@@ -122,9 +122,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
18986 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
18987 seq_printf(p, " Machine check polls\n");
18988 #endif
18989- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
18990+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
18991 #if defined(CONFIG_X86_IO_APIC)
18992- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
18993+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
18994 #endif
18995 return 0;
18996 }
18997@@ -164,10 +164,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
18998
18999 u64 arch_irq_stat(void)
19000 {
19001- u64 sum = atomic_read(&irq_err_count);
19002+ u64 sum = atomic_read_unchecked(&irq_err_count);
19003
19004 #ifdef CONFIG_X86_IO_APIC
19005- sum += atomic_read(&irq_mis_count);
19006+ sum += atomic_read_unchecked(&irq_mis_count);
19007 #endif
19008 return sum;
19009 }
19010diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
19011index 344faf8..355f60d 100644
19012--- a/arch/x86/kernel/irq_32.c
19013+++ b/arch/x86/kernel/irq_32.c
19014@@ -39,7 +39,7 @@ static int check_stack_overflow(void)
19015 __asm__ __volatile__("andl %%esp,%0" :
19016 "=r" (sp) : "0" (THREAD_SIZE - 1));
19017
19018- return sp < (sizeof(struct thread_info) + STACK_WARN);
19019+ return sp < STACK_WARN;
19020 }
19021
19022 static void print_stack_overflow(void)
19023@@ -59,8 +59,8 @@ static inline void print_stack_overflow(void) { }
19024 * per-CPU IRQ handling contexts (thread information and stack)
19025 */
19026 union irq_ctx {
19027- struct thread_info tinfo;
19028- u32 stack[THREAD_SIZE/sizeof(u32)];
19029+ unsigned long previous_esp;
19030+ u32 stack[THREAD_SIZE/sizeof(u32)];
19031 } __attribute__((aligned(THREAD_SIZE)));
19032
19033 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
19034@@ -80,10 +80,9 @@ static void call_on_stack(void *func, void *stack)
19035 static inline int
19036 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
19037 {
19038- union irq_ctx *curctx, *irqctx;
19039+ union irq_ctx *irqctx;
19040 u32 *isp, arg1, arg2;
19041
19042- curctx = (union irq_ctx *) current_thread_info();
19043 irqctx = __this_cpu_read(hardirq_ctx);
19044
19045 /*
19046@@ -92,16 +91,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
19047 * handler) we can't do that and just have to keep using the
19048 * current stack (which is the irq stack already after all)
19049 */
19050- if (unlikely(curctx == irqctx))
19051+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
19052 return 0;
19053
19054 /* build the stack frame on the IRQ stack */
19055- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
19056- irqctx->tinfo.task = curctx->tinfo.task;
19057- irqctx->tinfo.previous_esp = current_stack_pointer;
19058+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
19059+ irqctx->previous_esp = current_stack_pointer;
19060
19061- /* Copy the preempt_count so that the [soft]irq checks work. */
19062- irqctx->tinfo.preempt_count = curctx->tinfo.preempt_count;
19063+#ifdef CONFIG_PAX_MEMORY_UDEREF
19064+ __set_fs(MAKE_MM_SEG(0));
19065+#endif
19066
19067 if (unlikely(overflow))
19068 call_on_stack(print_stack_overflow, isp);
19069@@ -113,6 +112,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
19070 : "0" (irq), "1" (desc), "2" (isp),
19071 "D" (desc->handle_irq)
19072 : "memory", "cc", "ecx");
19073+
19074+#ifdef CONFIG_PAX_MEMORY_UDEREF
19075+ __set_fs(current_thread_info()->addr_limit);
19076+#endif
19077+
19078 return 1;
19079 }
19080
19081@@ -121,29 +125,14 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
19082 */
19083 void __cpuinit irq_ctx_init(int cpu)
19084 {
19085- union irq_ctx *irqctx;
19086-
19087 if (per_cpu(hardirq_ctx, cpu))
19088 return;
19089
19090- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
19091- THREADINFO_GFP,
19092- THREAD_SIZE_ORDER));
19093- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
19094- irqctx->tinfo.cpu = cpu;
19095- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
19096- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
19097-
19098- per_cpu(hardirq_ctx, cpu) = irqctx;
19099-
19100- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
19101- THREADINFO_GFP,
19102- THREAD_SIZE_ORDER));
19103- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
19104- irqctx->tinfo.cpu = cpu;
19105- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
19106-
19107- per_cpu(softirq_ctx, cpu) = irqctx;
19108+ per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
19109+ per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
19110+
19111+ printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
19112+ cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
19113
19114 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
19115 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
19116@@ -152,7 +141,6 @@ void __cpuinit irq_ctx_init(int cpu)
19117 asmlinkage void do_softirq(void)
19118 {
19119 unsigned long flags;
19120- struct thread_info *curctx;
19121 union irq_ctx *irqctx;
19122 u32 *isp;
19123
19124@@ -162,15 +150,22 @@ asmlinkage void do_softirq(void)
19125 local_irq_save(flags);
19126
19127 if (local_softirq_pending()) {
19128- curctx = current_thread_info();
19129 irqctx = __this_cpu_read(softirq_ctx);
19130- irqctx->tinfo.task = curctx->task;
19131- irqctx->tinfo.previous_esp = current_stack_pointer;
19132+ irqctx->previous_esp = current_stack_pointer;
19133
19134 /* build the stack frame on the softirq stack */
19135- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
19136+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
19137+
19138+#ifdef CONFIG_PAX_MEMORY_UDEREF
19139+ __set_fs(MAKE_MM_SEG(0));
19140+#endif
19141
19142 call_on_stack(__do_softirq, isp);
19143+
19144+#ifdef CONFIG_PAX_MEMORY_UDEREF
19145+ __set_fs(current_thread_info()->addr_limit);
19146+#endif
19147+
19148 /*
19149 * Shouldn't happen, we returned above if in_interrupt():
19150 */
19151@@ -191,7 +186,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
19152 if (unlikely(!desc))
19153 return false;
19154
19155- if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
19156+ if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
19157 if (unlikely(overflow))
19158 print_stack_overflow();
19159 desc->handle_irq(irq, desc);
19160diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
19161index d04d3ec..ea4b374 100644
19162--- a/arch/x86/kernel/irq_64.c
19163+++ b/arch/x86/kernel/irq_64.c
19164@@ -44,7 +44,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
19165 u64 estack_top, estack_bottom;
19166 u64 curbase = (u64)task_stack_page(current);
19167
19168- if (user_mode_vm(regs))
19169+ if (user_mode(regs))
19170 return;
19171
19172 if (regs->sp >= curbase + sizeof(struct thread_info) +
19173diff --git a/arch/x86/kernel/kdebugfs.c b/arch/x86/kernel/kdebugfs.c
19174index dc1404b..bbc43e7 100644
19175--- a/arch/x86/kernel/kdebugfs.c
19176+++ b/arch/x86/kernel/kdebugfs.c
19177@@ -27,7 +27,7 @@ struct setup_data_node {
19178 u32 len;
19179 };
19180
19181-static ssize_t setup_data_read(struct file *file, char __user *user_buf,
19182+static ssize_t __size_overflow(3) setup_data_read(struct file *file, char __user *user_buf,
19183 size_t count, loff_t *ppos)
19184 {
19185 struct setup_data_node *node = file->private_data;
19186diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
19187index 836f832..a8bda67 100644
19188--- a/arch/x86/kernel/kgdb.c
19189+++ b/arch/x86/kernel/kgdb.c
19190@@ -127,11 +127,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
19191 #ifdef CONFIG_X86_32
19192 switch (regno) {
19193 case GDB_SS:
19194- if (!user_mode_vm(regs))
19195+ if (!user_mode(regs))
19196 *(unsigned long *)mem = __KERNEL_DS;
19197 break;
19198 case GDB_SP:
19199- if (!user_mode_vm(regs))
19200+ if (!user_mode(regs))
19201 *(unsigned long *)mem = kernel_stack_pointer(regs);
19202 break;
19203 case GDB_GS:
19204@@ -229,7 +229,10 @@ static void kgdb_correct_hw_break(void)
19205 bp->attr.bp_addr = breakinfo[breakno].addr;
19206 bp->attr.bp_len = breakinfo[breakno].len;
19207 bp->attr.bp_type = breakinfo[breakno].type;
19208- info->address = breakinfo[breakno].addr;
19209+ if (breakinfo[breakno].type == X86_BREAKPOINT_EXECUTE)
19210+ info->address = ktla_ktva(breakinfo[breakno].addr);
19211+ else
19212+ info->address = breakinfo[breakno].addr;
19213 info->len = breakinfo[breakno].len;
19214 info->type = breakinfo[breakno].type;
19215 val = arch_install_hw_breakpoint(bp);
19216@@ -476,12 +479,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
19217 case 'k':
19218 /* clear the trace bit */
19219 linux_regs->flags &= ~X86_EFLAGS_TF;
19220- atomic_set(&kgdb_cpu_doing_single_step, -1);
19221+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
19222
19223 /* set the trace bit if we're stepping */
19224 if (remcomInBuffer[0] == 's') {
19225 linux_regs->flags |= X86_EFLAGS_TF;
19226- atomic_set(&kgdb_cpu_doing_single_step,
19227+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
19228 raw_smp_processor_id());
19229 }
19230
19231@@ -546,7 +549,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
19232
19233 switch (cmd) {
19234 case DIE_DEBUG:
19235- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
19236+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
19237 if (user_mode(regs))
19238 return single_step_cont(regs, args);
19239 break;
19240@@ -751,11 +754,11 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
19241 #endif /* CONFIG_DEBUG_RODATA */
19242
19243 bpt->type = BP_BREAKPOINT;
19244- err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
19245+ err = probe_kernel_read(bpt->saved_instr, ktla_ktva((char *)bpt->bpt_addr),
19246 BREAK_INSTR_SIZE);
19247 if (err)
19248 return err;
19249- err = probe_kernel_write((char *)bpt->bpt_addr,
19250+ err = probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
19251 arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
19252 #ifdef CONFIG_DEBUG_RODATA
19253 if (!err)
19254@@ -768,7 +771,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
19255 return -EBUSY;
19256 text_poke((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr,
19257 BREAK_INSTR_SIZE);
19258- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
19259+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
19260 if (err)
19261 return err;
19262 if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE))
19263@@ -793,13 +796,13 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
19264 if (mutex_is_locked(&text_mutex))
19265 goto knl_write;
19266 text_poke((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE);
19267- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
19268+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
19269 if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE))
19270 goto knl_write;
19271 return err;
19272 knl_write:
19273 #endif /* CONFIG_DEBUG_RODATA */
19274- return probe_kernel_write((char *)bpt->bpt_addr,
19275+ return probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
19276 (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
19277 }
19278
19279diff --git a/arch/x86/kernel/kprobes-opt.c b/arch/x86/kernel/kprobes-opt.c
19280index c5e410e..ed5a7f0 100644
19281--- a/arch/x86/kernel/kprobes-opt.c
19282+++ b/arch/x86/kernel/kprobes-opt.c
19283@@ -338,7 +338,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
19284 * Verify if the address gap is in 2GB range, because this uses
19285 * a relative jump.
19286 */
19287- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
19288+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
19289 if (abs(rel) > 0x7fffffff)
19290 return -ERANGE;
19291
19292@@ -353,16 +353,16 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
19293 op->optinsn.size = ret;
19294
19295 /* Copy arch-dep-instance from template */
19296- memcpy(buf, &optprobe_template_entry, TMPL_END_IDX);
19297+ memcpy(buf, ktla_ktva(&optprobe_template_entry), TMPL_END_IDX);
19298
19299 /* Set probe information */
19300 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
19301
19302 /* Set probe function call */
19303- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
19304+ synthesize_relcall(ktva_ktla(buf) + TMPL_CALL_IDX, optimized_callback);
19305
19306 /* Set returning jmp instruction at the tail of out-of-line buffer */
19307- synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
19308+ synthesize_reljump(ktva_ktla(buf) + TMPL_END_IDX + op->optinsn.size,
19309 (u8 *)op->kp.addr + op->optinsn.size);
19310
19311 flush_icache_range((unsigned long) buf,
19312@@ -385,7 +385,7 @@ static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
19313 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
19314
19315 /* Backup instructions which will be replaced by jump address */
19316- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
19317+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
19318 RELATIVE_ADDR_SIZE);
19319
19320 insn_buf[0] = RELATIVEJUMP_OPCODE;
19321@@ -483,7 +483,7 @@ setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
19322 /* This kprobe is really able to run optimized path. */
19323 op = container_of(p, struct optimized_kprobe, kp);
19324 /* Detour through copied instructions */
19325- regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
19326+ regs->ip = ktva_ktla((unsigned long)op->optinsn.insn) + TMPL_END_IDX;
19327 if (!reenter)
19328 reset_current_kprobe();
19329 preempt_enable_no_resched();
19330diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
19331index 57916c0..9e0b9d0 100644
19332--- a/arch/x86/kernel/kprobes.c
19333+++ b/arch/x86/kernel/kprobes.c
19334@@ -119,9 +119,12 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
19335 s32 raddr;
19336 } __attribute__((packed)) *insn;
19337
19338- insn = (struct __arch_relative_insn *)from;
19339+ insn = (struct __arch_relative_insn *)ktla_ktva(from);
19340+
19341+ pax_open_kernel();
19342 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
19343 insn->op = op;
19344+ pax_close_kernel();
19345 }
19346
19347 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
19348@@ -164,7 +167,7 @@ int __kprobes can_boost(kprobe_opcode_t *opcodes)
19349 kprobe_opcode_t opcode;
19350 kprobe_opcode_t *orig_opcodes = opcodes;
19351
19352- if (search_exception_tables((unsigned long)opcodes))
19353+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
19354 return 0; /* Page fault may occur on this address. */
19355
19356 retry:
19357@@ -238,9 +241,9 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
19358 * for the first byte, we can recover the original instruction
19359 * from it and kp->opcode.
19360 */
19361- memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
19362+ memcpy(buf, ktla_ktva(kp->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
19363 buf[0] = kp->opcode;
19364- return (unsigned long)buf;
19365+ return ktva_ktla((unsigned long)buf);
19366 }
19367
19368 /*
19369@@ -332,7 +335,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
19370 /* Another subsystem puts a breakpoint, failed to recover */
19371 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
19372 return 0;
19373+ pax_open_kernel();
19374 memcpy(dest, insn.kaddr, insn.length);
19375+ pax_close_kernel();
19376
19377 #ifdef CONFIG_X86_64
19378 if (insn_rip_relative(&insn)) {
19379@@ -355,7 +360,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
19380 newdisp = (u8 *) src + (s64) insn.displacement.value - (u8 *) dest;
19381 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
19382 disp = (u8 *) dest + insn_offset_displacement(&insn);
19383+ pax_open_kernel();
19384 *(s32 *) disp = (s32) newdisp;
19385+ pax_close_kernel();
19386 }
19387 #endif
19388 return insn.length;
19389@@ -485,7 +492,7 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
19390 * nor set current_kprobe, because it doesn't use single
19391 * stepping.
19392 */
19393- regs->ip = (unsigned long)p->ainsn.insn;
19394+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
19395 preempt_enable_no_resched();
19396 return;
19397 }
19398@@ -502,9 +509,9 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
19399 regs->flags &= ~X86_EFLAGS_IF;
19400 /* single step inline if the instruction is an int3 */
19401 if (p->opcode == BREAKPOINT_INSTRUCTION)
19402- regs->ip = (unsigned long)p->addr;
19403+ regs->ip = ktla_ktva((unsigned long)p->addr);
19404 else
19405- regs->ip = (unsigned long)p->ainsn.insn;
19406+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
19407 }
19408
19409 /*
19410@@ -600,7 +607,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
19411 setup_singlestep(p, regs, kcb, 0);
19412 return 1;
19413 }
19414- } else if (*addr != BREAKPOINT_INSTRUCTION) {
19415+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
19416 /*
19417 * The breakpoint instruction was removed right
19418 * after we hit it. Another cpu has removed
19419@@ -651,6 +658,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
19420 " movq %rax, 152(%rsp)\n"
19421 RESTORE_REGS_STRING
19422 " popfq\n"
19423+#ifdef KERNEXEC_PLUGIN
19424+ " btsq $63,(%rsp)\n"
19425+#endif
19426 #else
19427 " pushf\n"
19428 SAVE_REGS_STRING
19429@@ -788,7 +798,7 @@ static void __kprobes
19430 resume_execution(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb)
19431 {
19432 unsigned long *tos = stack_addr(regs);
19433- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
19434+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
19435 unsigned long orig_ip = (unsigned long)p->addr;
19436 kprobe_opcode_t *insn = p->ainsn.insn;
19437
19438@@ -970,7 +980,7 @@ kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *d
19439 struct die_args *args = data;
19440 int ret = NOTIFY_DONE;
19441
19442- if (args->regs && user_mode_vm(args->regs))
19443+ if (args->regs && user_mode(args->regs))
19444 return ret;
19445
19446 switch (val) {
19447diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
19448index ebc9873..1b9724b 100644
19449--- a/arch/x86/kernel/ldt.c
19450+++ b/arch/x86/kernel/ldt.c
19451@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
19452 if (reload) {
19453 #ifdef CONFIG_SMP
19454 preempt_disable();
19455- load_LDT(pc);
19456+ load_LDT_nolock(pc);
19457 if (!cpumask_equal(mm_cpumask(current->mm),
19458 cpumask_of(smp_processor_id())))
19459 smp_call_function(flush_ldt, current->mm, 1);
19460 preempt_enable();
19461 #else
19462- load_LDT(pc);
19463+ load_LDT_nolock(pc);
19464 #endif
19465 }
19466 if (oldsize) {
19467@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
19468 return err;
19469
19470 for (i = 0; i < old->size; i++)
19471- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
19472+ write_ldt_entry(new->ldt, i, old->ldt + i);
19473 return 0;
19474 }
19475
19476@@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
19477 retval = copy_ldt(&mm->context, &old_mm->context);
19478 mutex_unlock(&old_mm->context.lock);
19479 }
19480+
19481+ if (tsk == current) {
19482+ mm->context.vdso = 0;
19483+
19484+#ifdef CONFIG_X86_32
19485+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
19486+ mm->context.user_cs_base = 0UL;
19487+ mm->context.user_cs_limit = ~0UL;
19488+
19489+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
19490+ cpus_clear(mm->context.cpu_user_cs_mask);
19491+#endif
19492+
19493+#endif
19494+#endif
19495+
19496+ }
19497+
19498 return retval;
19499 }
19500
19501@@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
19502 }
19503 }
19504
19505+#ifdef CONFIG_PAX_SEGMEXEC
19506+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
19507+ error = -EINVAL;
19508+ goto out_unlock;
19509+ }
19510+#endif
19511+
19512 fill_ldt(&ldt, &ldt_info);
19513 if (oldmode)
19514 ldt.avl = 0;
19515diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
19516index 5b19e4d..6476a76 100644
19517--- a/arch/x86/kernel/machine_kexec_32.c
19518+++ b/arch/x86/kernel/machine_kexec_32.c
19519@@ -26,7 +26,7 @@
19520 #include <asm/cacheflush.h>
19521 #include <asm/debugreg.h>
19522
19523-static void set_idt(void *newidt, __u16 limit)
19524+static void set_idt(struct desc_struct *newidt, __u16 limit)
19525 {
19526 struct desc_ptr curidt;
19527
19528@@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit)
19529 }
19530
19531
19532-static void set_gdt(void *newgdt, __u16 limit)
19533+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
19534 {
19535 struct desc_ptr curgdt;
19536
19537@@ -216,7 +216,7 @@ void machine_kexec(struct kimage *image)
19538 }
19539
19540 control_page = page_address(image->control_code_page);
19541- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
19542+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
19543
19544 relocate_kernel_ptr = control_page;
19545 page_list[PA_CONTROL_PAGE] = __pa(control_page);
19546diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
19547index 3544aed..01ddc1c 100644
19548--- a/arch/x86/kernel/microcode_intel.c
19549+++ b/arch/x86/kernel/microcode_intel.c
19550@@ -431,13 +431,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
19551
19552 static int get_ucode_user(void *to, const void *from, size_t n)
19553 {
19554- return copy_from_user(to, from, n);
19555+ return copy_from_user(to, (const void __force_user *)from, n);
19556 }
19557
19558 static enum ucode_state
19559 request_microcode_user(int cpu, const void __user *buf, size_t size)
19560 {
19561- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
19562+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
19563 }
19564
19565 static void microcode_fini_cpu(int cpu)
19566diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
19567index 216a4d7..228255a 100644
19568--- a/arch/x86/kernel/module.c
19569+++ b/arch/x86/kernel/module.c
19570@@ -43,15 +43,60 @@ do { \
19571 } while (0)
19572 #endif
19573
19574-void *module_alloc(unsigned long size)
19575+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
19576 {
19577- if (PAGE_ALIGN(size) > MODULES_LEN)
19578+ if (!size || PAGE_ALIGN(size) > MODULES_LEN)
19579 return NULL;
19580 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
19581- GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
19582+ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
19583 -1, __builtin_return_address(0));
19584 }
19585
19586+void *module_alloc(unsigned long size)
19587+{
19588+
19589+#ifdef CONFIG_PAX_KERNEXEC
19590+ return __module_alloc(size, PAGE_KERNEL);
19591+#else
19592+ return __module_alloc(size, PAGE_KERNEL_EXEC);
19593+#endif
19594+
19595+}
19596+
19597+#ifdef CONFIG_PAX_KERNEXEC
19598+#ifdef CONFIG_X86_32
19599+void *module_alloc_exec(unsigned long size)
19600+{
19601+ struct vm_struct *area;
19602+
19603+ if (size == 0)
19604+ return NULL;
19605+
19606+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
19607+ return area ? area->addr : NULL;
19608+}
19609+EXPORT_SYMBOL(module_alloc_exec);
19610+
19611+void module_free_exec(struct module *mod, void *module_region)
19612+{
19613+ vunmap(module_region);
19614+}
19615+EXPORT_SYMBOL(module_free_exec);
19616+#else
19617+void module_free_exec(struct module *mod, void *module_region)
19618+{
19619+ module_free(mod, module_region);
19620+}
19621+EXPORT_SYMBOL(module_free_exec);
19622+
19623+void *module_alloc_exec(unsigned long size)
19624+{
19625+ return __module_alloc(size, PAGE_KERNEL_RX);
19626+}
19627+EXPORT_SYMBOL(module_alloc_exec);
19628+#endif
19629+#endif
19630+
19631 #ifdef CONFIG_X86_32
19632 int apply_relocate(Elf32_Shdr *sechdrs,
19633 const char *strtab,
19634@@ -62,14 +107,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
19635 unsigned int i;
19636 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
19637 Elf32_Sym *sym;
19638- uint32_t *location;
19639+ uint32_t *plocation, location;
19640
19641 DEBUGP("Applying relocate section %u to %u\n",
19642 relsec, sechdrs[relsec].sh_info);
19643 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
19644 /* This is where to make the change */
19645- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
19646- + rel[i].r_offset;
19647+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
19648+ location = (uint32_t)plocation;
19649+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
19650+ plocation = ktla_ktva((void *)plocation);
19651 /* This is the symbol it is referring to. Note that all
19652 undefined symbols have been resolved. */
19653 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
19654@@ -78,11 +125,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
19655 switch (ELF32_R_TYPE(rel[i].r_info)) {
19656 case R_386_32:
19657 /* We add the value into the location given */
19658- *location += sym->st_value;
19659+ pax_open_kernel();
19660+ *plocation += sym->st_value;
19661+ pax_close_kernel();
19662 break;
19663 case R_386_PC32:
19664 /* Add the value, subtract its position */
19665- *location += sym->st_value - (uint32_t)location;
19666+ pax_open_kernel();
19667+ *plocation += sym->st_value - location;
19668+ pax_close_kernel();
19669 break;
19670 default:
19671 pr_err("%s: Unknown relocation: %u\n",
19672@@ -127,21 +178,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
19673 case R_X86_64_NONE:
19674 break;
19675 case R_X86_64_64:
19676+ pax_open_kernel();
19677 *(u64 *)loc = val;
19678+ pax_close_kernel();
19679 break;
19680 case R_X86_64_32:
19681+ pax_open_kernel();
19682 *(u32 *)loc = val;
19683+ pax_close_kernel();
19684 if (val != *(u32 *)loc)
19685 goto overflow;
19686 break;
19687 case R_X86_64_32S:
19688+ pax_open_kernel();
19689 *(s32 *)loc = val;
19690+ pax_close_kernel();
19691 if ((s64)val != *(s32 *)loc)
19692 goto overflow;
19693 break;
19694 case R_X86_64_PC32:
19695 val -= (u64)loc;
19696+ pax_open_kernel();
19697 *(u32 *)loc = val;
19698+ pax_close_kernel();
19699+
19700 #if 0
19701 if ((s64)val != *(s32 *)loc)
19702 goto overflow;
19703diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
19704index f84f5c5..e27e54b 100644
19705--- a/arch/x86/kernel/nmi.c
19706+++ b/arch/x86/kernel/nmi.c
19707@@ -479,6 +479,17 @@ static inline void nmi_nesting_postprocess(void)
19708 dotraplinkage notrace __kprobes void
19709 do_nmi(struct pt_regs *regs, long error_code)
19710 {
19711+
19712+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19713+ if (!user_mode(regs)) {
19714+ unsigned long cs = regs->cs & 0xFFFF;
19715+ unsigned long ip = ktva_ktla(regs->ip);
19716+
19717+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
19718+ regs->ip = ip;
19719+ }
19720+#endif
19721+
19722 nmi_nesting_preprocess(regs);
19723
19724 nmi_enter();
19725diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
19726index 676b8c7..870ba04 100644
19727--- a/arch/x86/kernel/paravirt-spinlocks.c
19728+++ b/arch/x86/kernel/paravirt-spinlocks.c
19729@@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
19730 arch_spin_lock(lock);
19731 }
19732
19733-struct pv_lock_ops pv_lock_ops = {
19734+struct pv_lock_ops pv_lock_ops __read_only = {
19735 #ifdef CONFIG_SMP
19736 .spin_is_locked = __ticket_spin_is_locked,
19737 .spin_is_contended = __ticket_spin_is_contended,
19738diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
19739index 17fff18..5cfa0f4 100644
19740--- a/arch/x86/kernel/paravirt.c
19741+++ b/arch/x86/kernel/paravirt.c
19742@@ -55,6 +55,9 @@ u64 _paravirt_ident_64(u64 x)
19743 {
19744 return x;
19745 }
19746+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
19747+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
19748+#endif
19749
19750 void __init default_banner(void)
19751 {
19752@@ -147,15 +150,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
19753 if (opfunc == NULL)
19754 /* If there's no function, patch it with a ud2a (BUG) */
19755 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
19756- else if (opfunc == _paravirt_nop)
19757+ else if (opfunc == (void *)_paravirt_nop)
19758 /* If the operation is a nop, then nop the callsite */
19759 ret = paravirt_patch_nop();
19760
19761 /* identity functions just return their single argument */
19762- else if (opfunc == _paravirt_ident_32)
19763+ else if (opfunc == (void *)_paravirt_ident_32)
19764 ret = paravirt_patch_ident_32(insnbuf, len);
19765- else if (opfunc == _paravirt_ident_64)
19766+ else if (opfunc == (void *)_paravirt_ident_64)
19767 ret = paravirt_patch_ident_64(insnbuf, len);
19768+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
19769+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
19770+ ret = paravirt_patch_ident_64(insnbuf, len);
19771+#endif
19772
19773 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
19774 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
19775@@ -180,7 +187,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
19776 if (insn_len > len || start == NULL)
19777 insn_len = len;
19778 else
19779- memcpy(insnbuf, start, insn_len);
19780+ memcpy(insnbuf, ktla_ktva(start), insn_len);
19781
19782 return insn_len;
19783 }
19784@@ -304,7 +311,7 @@ void arch_flush_lazy_mmu_mode(void)
19785 preempt_enable();
19786 }
19787
19788-struct pv_info pv_info = {
19789+struct pv_info pv_info __read_only = {
19790 .name = "bare hardware",
19791 .paravirt_enabled = 0,
19792 .kernel_rpl = 0,
19793@@ -315,16 +322,16 @@ struct pv_info pv_info = {
19794 #endif
19795 };
19796
19797-struct pv_init_ops pv_init_ops = {
19798+struct pv_init_ops pv_init_ops __read_only = {
19799 .patch = native_patch,
19800 };
19801
19802-struct pv_time_ops pv_time_ops = {
19803+struct pv_time_ops pv_time_ops __read_only = {
19804 .sched_clock = native_sched_clock,
19805 .steal_clock = native_steal_clock,
19806 };
19807
19808-struct pv_irq_ops pv_irq_ops = {
19809+struct pv_irq_ops pv_irq_ops __read_only = {
19810 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
19811 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
19812 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
19813@@ -336,7 +343,7 @@ struct pv_irq_ops pv_irq_ops = {
19814 #endif
19815 };
19816
19817-struct pv_cpu_ops pv_cpu_ops = {
19818+struct pv_cpu_ops pv_cpu_ops __read_only = {
19819 .cpuid = native_cpuid,
19820 .get_debugreg = native_get_debugreg,
19821 .set_debugreg = native_set_debugreg,
19822@@ -395,21 +402,26 @@ struct pv_cpu_ops pv_cpu_ops = {
19823 .end_context_switch = paravirt_nop,
19824 };
19825
19826-struct pv_apic_ops pv_apic_ops = {
19827+struct pv_apic_ops pv_apic_ops __read_only= {
19828 #ifdef CONFIG_X86_LOCAL_APIC
19829 .startup_ipi_hook = paravirt_nop,
19830 #endif
19831 };
19832
19833-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
19834+#ifdef CONFIG_X86_32
19835+#ifdef CONFIG_X86_PAE
19836+/* 64-bit pagetable entries */
19837+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
19838+#else
19839 /* 32-bit pagetable entries */
19840 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
19841+#endif
19842 #else
19843 /* 64-bit pagetable entries */
19844 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
19845 #endif
19846
19847-struct pv_mmu_ops pv_mmu_ops = {
19848+struct pv_mmu_ops pv_mmu_ops __read_only = {
19849
19850 .read_cr2 = native_read_cr2,
19851 .write_cr2 = native_write_cr2,
19852@@ -459,6 +471,7 @@ struct pv_mmu_ops pv_mmu_ops = {
19853 .make_pud = PTE_IDENT,
19854
19855 .set_pgd = native_set_pgd,
19856+ .set_pgd_batched = native_set_pgd_batched,
19857 #endif
19858 #endif /* PAGETABLE_LEVELS >= 3 */
19859
19860@@ -478,6 +491,12 @@ struct pv_mmu_ops pv_mmu_ops = {
19861 },
19862
19863 .set_fixmap = native_set_fixmap,
19864+
19865+#ifdef CONFIG_PAX_KERNEXEC
19866+ .pax_open_kernel = native_pax_open_kernel,
19867+ .pax_close_kernel = native_pax_close_kernel,
19868+#endif
19869+
19870 };
19871
19872 EXPORT_SYMBOL_GPL(pv_time_ops);
19873diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
19874index 35ccf75..7a15747 100644
19875--- a/arch/x86/kernel/pci-iommu_table.c
19876+++ b/arch/x86/kernel/pci-iommu_table.c
19877@@ -2,7 +2,7 @@
19878 #include <asm/iommu_table.h>
19879 #include <linux/string.h>
19880 #include <linux/kallsyms.h>
19881-
19882+#include <linux/sched.h>
19883
19884 #define DEBUG 1
19885
19886diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
19887index b644e1c..4a6d379 100644
19888--- a/arch/x86/kernel/process.c
19889+++ b/arch/x86/kernel/process.c
19890@@ -36,7 +36,8 @@
19891 * section. Since TSS's are completely CPU-local, we want them
19892 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
19893 */
19894-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
19895+struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
19896+EXPORT_SYMBOL(init_tss);
19897
19898 #ifdef CONFIG_X86_64
19899 static DEFINE_PER_CPU(unsigned char, is_idle);
19900@@ -92,7 +93,7 @@ void arch_task_cache_init(void)
19901 task_xstate_cachep =
19902 kmem_cache_create("task_xstate", xstate_size,
19903 __alignof__(union thread_xstate),
19904- SLAB_PANIC | SLAB_NOTRACK, NULL);
19905+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
19906 }
19907
19908 /*
19909@@ -105,7 +106,7 @@ void exit_thread(void)
19910 unsigned long *bp = t->io_bitmap_ptr;
19911
19912 if (bp) {
19913- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
19914+ struct tss_struct *tss = init_tss + get_cpu();
19915
19916 t->io_bitmap_ptr = NULL;
19917 clear_thread_flag(TIF_IO_BITMAP);
19918@@ -136,7 +137,7 @@ void show_regs_common(void)
19919 board = dmi_get_system_info(DMI_BOARD_NAME);
19920
19921 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s %s %s%s%s\n",
19922- current->pid, current->comm, print_tainted(),
19923+ task_pid_nr(current), current->comm, print_tainted(),
19924 init_utsname()->release,
19925 (int)strcspn(init_utsname()->version, " "),
19926 init_utsname()->version,
19927@@ -149,6 +150,9 @@ void flush_thread(void)
19928 {
19929 struct task_struct *tsk = current;
19930
19931+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
19932+ loadsegment(gs, 0);
19933+#endif
19934 flush_ptrace_hw_breakpoint(tsk);
19935 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
19936 drop_init_fpu(tsk);
19937@@ -336,7 +340,7 @@ static void __exit_idle(void)
19938 void exit_idle(void)
19939 {
19940 /* idle loop has pid 0 */
19941- if (current->pid)
19942+ if (task_pid_nr(current))
19943 return;
19944 __exit_idle();
19945 }
19946@@ -445,7 +449,7 @@ bool set_pm_idle_to_default(void)
19947
19948 return ret;
19949 }
19950-void stop_this_cpu(void *dummy)
19951+__noreturn void stop_this_cpu(void *dummy)
19952 {
19953 local_irq_disable();
19954 /*
19955@@ -673,16 +677,37 @@ static int __init idle_setup(char *str)
19956 }
19957 early_param("idle", idle_setup);
19958
19959-unsigned long arch_align_stack(unsigned long sp)
19960+#ifdef CONFIG_PAX_RANDKSTACK
19961+void pax_randomize_kstack(struct pt_regs *regs)
19962 {
19963- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
19964- sp -= get_random_int() % 8192;
19965- return sp & ~0xf;
19966-}
19967+ struct thread_struct *thread = &current->thread;
19968+ unsigned long time;
19969
19970-unsigned long arch_randomize_brk(struct mm_struct *mm)
19971-{
19972- unsigned long range_end = mm->brk + 0x02000000;
19973- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
19974-}
19975+ if (!randomize_va_space)
19976+ return;
19977+
19978+ if (v8086_mode(regs))
19979+ return;
19980
19981+ rdtscl(time);
19982+
19983+ /* P4 seems to return a 0 LSB, ignore it */
19984+#ifdef CONFIG_MPENTIUM4
19985+ time &= 0x3EUL;
19986+ time <<= 2;
19987+#elif defined(CONFIG_X86_64)
19988+ time &= 0xFUL;
19989+ time <<= 4;
19990+#else
19991+ time &= 0x1FUL;
19992+ time <<= 3;
19993+#endif
19994+
19995+ thread->sp0 ^= time;
19996+ load_sp0(init_tss + smp_processor_id(), thread);
19997+
19998+#ifdef CONFIG_X86_64
19999+ this_cpu_write(kernel_stack, thread->sp0);
20000+#endif
20001+}
20002+#endif
20003diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
20004index 44e0bff..5ceb99c 100644
20005--- a/arch/x86/kernel/process_32.c
20006+++ b/arch/x86/kernel/process_32.c
20007@@ -65,6 +65,7 @@ asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread");
20008 unsigned long thread_saved_pc(struct task_struct *tsk)
20009 {
20010 return ((unsigned long *)tsk->thread.sp)[3];
20011+//XXX return tsk->thread.eip;
20012 }
20013
20014 void __show_regs(struct pt_regs *regs, int all)
20015@@ -74,21 +75,20 @@ void __show_regs(struct pt_regs *regs, int all)
20016 unsigned long sp;
20017 unsigned short ss, gs;
20018
20019- if (user_mode_vm(regs)) {
20020+ if (user_mode(regs)) {
20021 sp = regs->sp;
20022 ss = regs->ss & 0xffff;
20023- gs = get_user_gs(regs);
20024 } else {
20025 sp = kernel_stack_pointer(regs);
20026 savesegment(ss, ss);
20027- savesegment(gs, gs);
20028 }
20029+ gs = get_user_gs(regs);
20030
20031 show_regs_common();
20032
20033 printk(KERN_DEFAULT "EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
20034 (u16)regs->cs, regs->ip, regs->flags,
20035- smp_processor_id());
20036+ raw_smp_processor_id());
20037 print_symbol("EIP is at %s\n", regs->ip);
20038
20039 printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
20040@@ -131,20 +131,21 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
20041 unsigned long arg,
20042 struct task_struct *p, struct pt_regs *regs)
20043 {
20044- struct pt_regs *childregs = task_pt_regs(p);
20045+ struct pt_regs *childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
20046 struct task_struct *tsk;
20047 int err;
20048
20049 p->thread.sp = (unsigned long) childregs;
20050 p->thread.sp0 = (unsigned long) (childregs+1);
20051+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
20052
20053 if (unlikely(!regs)) {
20054 /* kernel thread */
20055 memset(childregs, 0, sizeof(struct pt_regs));
20056 p->thread.ip = (unsigned long) ret_from_kernel_thread;
20057- task_user_gs(p) = __KERNEL_STACK_CANARY;
20058- childregs->ds = __USER_DS;
20059- childregs->es = __USER_DS;
20060+ savesegment(gs, childregs->gs);
20061+ childregs->ds = __KERNEL_DS;
20062+ childregs->es = __KERNEL_DS;
20063 childregs->fs = __KERNEL_PERCPU;
20064 childregs->bx = sp; /* function */
20065 childregs->bp = arg;
20066@@ -250,7 +251,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
20067 struct thread_struct *prev = &prev_p->thread,
20068 *next = &next_p->thread;
20069 int cpu = smp_processor_id();
20070- struct tss_struct *tss = &per_cpu(init_tss, cpu);
20071+ struct tss_struct *tss = init_tss + cpu;
20072 fpu_switch_t fpu;
20073
20074 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
20075@@ -274,6 +275,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
20076 */
20077 lazy_save_gs(prev->gs);
20078
20079+#ifdef CONFIG_PAX_MEMORY_UDEREF
20080+ __set_fs(task_thread_info(next_p)->addr_limit);
20081+#endif
20082+
20083 /*
20084 * Load the per-thread Thread-Local Storage descriptor.
20085 */
20086@@ -304,6 +309,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
20087 */
20088 arch_end_context_switch(next_p);
20089
20090+ this_cpu_write(current_task, next_p);
20091+ this_cpu_write(current_tinfo, &next_p->tinfo);
20092+
20093 /*
20094 * Restore %gs if needed (which is common)
20095 */
20096@@ -312,8 +320,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
20097
20098 switch_fpu_finish(next_p, fpu);
20099
20100- this_cpu_write(current_task, next_p);
20101-
20102 return prev_p;
20103 }
20104
20105@@ -343,4 +349,3 @@ unsigned long get_wchan(struct task_struct *p)
20106 } while (count++ < 16);
20107 return 0;
20108 }
20109-
20110diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
20111index 16c6365..5d32218 100644
20112--- a/arch/x86/kernel/process_64.c
20113+++ b/arch/x86/kernel/process_64.c
20114@@ -153,10 +153,11 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
20115 struct pt_regs *childregs;
20116 struct task_struct *me = current;
20117
20118- p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
20119+ p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE - 16;
20120 childregs = task_pt_regs(p);
20121 p->thread.sp = (unsigned long) childregs;
20122 p->thread.usersp = me->thread.usersp;
20123+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
20124 set_tsk_thread_flag(p, TIF_FORK);
20125 p->fpu_counter = 0;
20126 p->thread.io_bitmap_ptr = NULL;
20127@@ -274,7 +275,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
20128 struct thread_struct *prev = &prev_p->thread;
20129 struct thread_struct *next = &next_p->thread;
20130 int cpu = smp_processor_id();
20131- struct tss_struct *tss = &per_cpu(init_tss, cpu);
20132+ struct tss_struct *tss = init_tss + cpu;
20133 unsigned fsindex, gsindex;
20134 fpu_switch_t fpu;
20135
20136@@ -356,10 +357,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
20137 prev->usersp = this_cpu_read(old_rsp);
20138 this_cpu_write(old_rsp, next->usersp);
20139 this_cpu_write(current_task, next_p);
20140+ this_cpu_write(current_tinfo, &next_p->tinfo);
20141
20142- this_cpu_write(kernel_stack,
20143- (unsigned long)task_stack_page(next_p) +
20144- THREAD_SIZE - KERNEL_STACK_OFFSET);
20145+ this_cpu_write(kernel_stack, next->sp0);
20146
20147 /*
20148 * Now maybe reload the debug registers and handle I/O bitmaps
20149@@ -428,12 +428,11 @@ unsigned long get_wchan(struct task_struct *p)
20150 if (!p || p == current || p->state == TASK_RUNNING)
20151 return 0;
20152 stack = (unsigned long)task_stack_page(p);
20153- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
20154+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
20155 return 0;
20156 fp = *(u64 *)(p->thread.sp);
20157 do {
20158- if (fp < (unsigned long)stack ||
20159- fp >= (unsigned long)stack+THREAD_SIZE)
20160+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
20161 return 0;
20162 ip = *(u64 *)(fp+8);
20163 if (!in_sched_functions(ip))
20164diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
20165index 974b67e..53bdb6c 100644
20166--- a/arch/x86/kernel/ptrace.c
20167+++ b/arch/x86/kernel/ptrace.c
20168@@ -183,14 +183,13 @@ unsigned long kernel_stack_pointer(struct pt_regs *regs)
20169 {
20170 unsigned long context = (unsigned long)regs & ~(THREAD_SIZE - 1);
20171 unsigned long sp = (unsigned long)&regs->sp;
20172- struct thread_info *tinfo;
20173
20174- if (context == (sp & ~(THREAD_SIZE - 1)))
20175+ if (context == ((sp + 8) & ~(THREAD_SIZE - 1)))
20176 return sp;
20177
20178- tinfo = (struct thread_info *)context;
20179- if (tinfo->previous_esp)
20180- return tinfo->previous_esp;
20181+ sp = *(unsigned long *)context;
20182+ if (sp)
20183+ return sp;
20184
20185 return (unsigned long)regs;
20186 }
20187@@ -587,7 +586,7 @@ static void ptrace_triggered(struct perf_event *bp,
20188 static unsigned long ptrace_get_dr7(struct perf_event *bp[])
20189 {
20190 int i;
20191- int dr7 = 0;
20192+ unsigned long dr7 = 0;
20193 struct arch_hw_breakpoint *info;
20194
20195 for (i = 0; i < HBP_NUM; i++) {
20196@@ -855,7 +854,7 @@ long arch_ptrace(struct task_struct *child, long request,
20197 unsigned long addr, unsigned long data)
20198 {
20199 int ret;
20200- unsigned long __user *datap = (unsigned long __user *)data;
20201+ unsigned long __user *datap = (__force unsigned long __user *)data;
20202
20203 switch (request) {
20204 /* read the word at location addr in the USER area. */
20205@@ -940,14 +939,14 @@ long arch_ptrace(struct task_struct *child, long request,
20206 if ((int) addr < 0)
20207 return -EIO;
20208 ret = do_get_thread_area(child, addr,
20209- (struct user_desc __user *)data);
20210+ (__force struct user_desc __user *) data);
20211 break;
20212
20213 case PTRACE_SET_THREAD_AREA:
20214 if ((int) addr < 0)
20215 return -EIO;
20216 ret = do_set_thread_area(child, addr,
20217- (struct user_desc __user *)data, 0);
20218+ (__force struct user_desc __user *) data, 0);
20219 break;
20220 #endif
20221
20222@@ -1325,7 +1324,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
20223
20224 #ifdef CONFIG_X86_64
20225
20226-static struct user_regset x86_64_regsets[] __read_mostly = {
20227+static user_regset_no_const x86_64_regsets[] __read_only = {
20228 [REGSET_GENERAL] = {
20229 .core_note_type = NT_PRSTATUS,
20230 .n = sizeof(struct user_regs_struct) / sizeof(long),
20231@@ -1366,7 +1365,7 @@ static const struct user_regset_view user_x86_64_view = {
20232 #endif /* CONFIG_X86_64 */
20233
20234 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
20235-static struct user_regset x86_32_regsets[] __read_mostly = {
20236+static user_regset_no_const x86_32_regsets[] __read_only = {
20237 [REGSET_GENERAL] = {
20238 .core_note_type = NT_PRSTATUS,
20239 .n = sizeof(struct user_regs_struct32) / sizeof(u32),
20240@@ -1419,7 +1418,7 @@ static const struct user_regset_view user_x86_32_view = {
20241 */
20242 u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
20243
20244-void update_regset_xstate_info(unsigned int size, u64 xstate_mask)
20245+void __init update_regset_xstate_info(unsigned int size, u64 xstate_mask)
20246 {
20247 #ifdef CONFIG_X86_64
20248 x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64);
20249@@ -1454,7 +1453,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
20250 memset(info, 0, sizeof(*info));
20251 info->si_signo = SIGTRAP;
20252 info->si_code = si_code;
20253- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
20254+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
20255 }
20256
20257 void user_single_step_siginfo(struct task_struct *tsk,
20258@@ -1483,6 +1482,10 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
20259 # define IS_IA32 0
20260 #endif
20261
20262+#ifdef CONFIG_GRKERNSEC_SETXID
20263+extern void gr_delayed_cred_worker(void);
20264+#endif
20265+
20266 /*
20267 * We must return the syscall number to actually look up in the table.
20268 * This can be -1L to skip running any syscall at all.
20269@@ -1493,6 +1496,11 @@ long syscall_trace_enter(struct pt_regs *regs)
20270
20271 rcu_user_exit();
20272
20273+#ifdef CONFIG_GRKERNSEC_SETXID
20274+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
20275+ gr_delayed_cred_worker();
20276+#endif
20277+
20278 /*
20279 * If we stepped into a sysenter/syscall insn, it trapped in
20280 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
20281@@ -1548,6 +1556,11 @@ void syscall_trace_leave(struct pt_regs *regs)
20282 */
20283 rcu_user_exit();
20284
20285+#ifdef CONFIG_GRKERNSEC_SETXID
20286+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
20287+ gr_delayed_cred_worker();
20288+#endif
20289+
20290 audit_syscall_exit(regs);
20291
20292 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
20293diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
20294index 42eb330..139955c 100644
20295--- a/arch/x86/kernel/pvclock.c
20296+++ b/arch/x86/kernel/pvclock.c
20297@@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
20298 return pv_tsc_khz;
20299 }
20300
20301-static atomic64_t last_value = ATOMIC64_INIT(0);
20302+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
20303
20304 void pvclock_resume(void)
20305 {
20306- atomic64_set(&last_value, 0);
20307+ atomic64_set_unchecked(&last_value, 0);
20308 }
20309
20310 cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
20311@@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
20312 * updating at the same time, and one of them could be slightly behind,
20313 * making the assumption that last_value always go forward fail to hold.
20314 */
20315- last = atomic64_read(&last_value);
20316+ last = atomic64_read_unchecked(&last_value);
20317 do {
20318 if (ret < last)
20319 return last;
20320- last = atomic64_cmpxchg(&last_value, last, ret);
20321+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
20322 } while (unlikely(last != ret));
20323
20324 return ret;
20325diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
20326index 76fa1e9..abf09ea 100644
20327--- a/arch/x86/kernel/reboot.c
20328+++ b/arch/x86/kernel/reboot.c
20329@@ -36,7 +36,7 @@ void (*pm_power_off)(void);
20330 EXPORT_SYMBOL(pm_power_off);
20331
20332 static const struct desc_ptr no_idt = {};
20333-static int reboot_mode;
20334+static unsigned short reboot_mode;
20335 enum reboot_type reboot_type = BOOT_ACPI;
20336 int reboot_force;
20337
20338@@ -157,6 +157,11 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
20339
20340 void __noreturn machine_real_restart(unsigned int type)
20341 {
20342+
20343+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
20344+ struct desc_struct *gdt;
20345+#endif
20346+
20347 local_irq_disable();
20348
20349 /*
20350@@ -184,7 +189,29 @@ void __noreturn machine_real_restart(unsigned int type)
20351
20352 /* Jump to the identity-mapped low memory code */
20353 #ifdef CONFIG_X86_32
20354- asm volatile("jmpl *%0" : :
20355+
20356+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
20357+ gdt = get_cpu_gdt_table(smp_processor_id());
20358+ pax_open_kernel();
20359+#ifdef CONFIG_PAX_MEMORY_UDEREF
20360+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
20361+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
20362+ loadsegment(ds, __KERNEL_DS);
20363+ loadsegment(es, __KERNEL_DS);
20364+ loadsegment(ss, __KERNEL_DS);
20365+#endif
20366+#ifdef CONFIG_PAX_KERNEXEC
20367+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
20368+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
20369+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
20370+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
20371+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
20372+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
20373+#endif
20374+ pax_close_kernel();
20375+#endif
20376+
20377+ asm volatile("ljmpl *%0" : :
20378 "rm" (real_mode_header->machine_real_restart_asm),
20379 "a" (type));
20380 #else
20381@@ -531,7 +558,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
20382 * try to force a triple fault and then cycle between hitting the keyboard
20383 * controller and doing that
20384 */
20385-static void native_machine_emergency_restart(void)
20386+static void __noreturn native_machine_emergency_restart(void)
20387 {
20388 int i;
20389 int attempt = 0;
20390@@ -654,13 +681,13 @@ void native_machine_shutdown(void)
20391 #endif
20392 }
20393
20394-static void __machine_emergency_restart(int emergency)
20395+static void __noreturn __machine_emergency_restart(int emergency)
20396 {
20397 reboot_emergency = emergency;
20398 machine_ops.emergency_restart();
20399 }
20400
20401-static void native_machine_restart(char *__unused)
20402+static void __noreturn native_machine_restart(char *__unused)
20403 {
20404 pr_notice("machine restart\n");
20405
20406@@ -669,7 +696,7 @@ static void native_machine_restart(char *__unused)
20407 __machine_emergency_restart(0);
20408 }
20409
20410-static void native_machine_halt(void)
20411+static void __noreturn native_machine_halt(void)
20412 {
20413 /* Stop other cpus and apics */
20414 machine_shutdown();
20415@@ -679,7 +706,7 @@ static void native_machine_halt(void)
20416 stop_this_cpu(NULL);
20417 }
20418
20419-static void native_machine_power_off(void)
20420+static void __noreturn native_machine_power_off(void)
20421 {
20422 if (pm_power_off) {
20423 if (!reboot_force)
20424@@ -688,9 +715,10 @@ static void native_machine_power_off(void)
20425 }
20426 /* A fallback in case there is no PM info available */
20427 tboot_shutdown(TB_SHUTDOWN_HALT);
20428+ unreachable();
20429 }
20430
20431-struct machine_ops machine_ops = {
20432+struct machine_ops machine_ops __read_only = {
20433 .power_off = native_machine_power_off,
20434 .shutdown = native_machine_shutdown,
20435 .emergency_restart = native_machine_emergency_restart,
20436diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
20437index 7a6f3b3..bed145d7 100644
20438--- a/arch/x86/kernel/relocate_kernel_64.S
20439+++ b/arch/x86/kernel/relocate_kernel_64.S
20440@@ -11,6 +11,7 @@
20441 #include <asm/kexec.h>
20442 #include <asm/processor-flags.h>
20443 #include <asm/pgtable_types.h>
20444+#include <asm/alternative-asm.h>
20445
20446 /*
20447 * Must be relocatable PIC code callable as a C function
20448@@ -160,13 +161,14 @@ identity_mapped:
20449 xorq %rbp, %rbp
20450 xorq %r8, %r8
20451 xorq %r9, %r9
20452- xorq %r10, %r9
20453+ xorq %r10, %r10
20454 xorq %r11, %r11
20455 xorq %r12, %r12
20456 xorq %r13, %r13
20457 xorq %r14, %r14
20458 xorq %r15, %r15
20459
20460+ pax_force_retaddr 0, 1
20461 ret
20462
20463 1:
20464diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
20465index aeacb0e..f9d4c02 100644
20466--- a/arch/x86/kernel/setup.c
20467+++ b/arch/x86/kernel/setup.c
20468@@ -441,7 +441,7 @@ static void __init parse_setup_data(void)
20469
20470 switch (data->type) {
20471 case SETUP_E820_EXT:
20472- parse_e820_ext(data);
20473+ parse_e820_ext((struct setup_data __force_kernel *)data);
20474 break;
20475 case SETUP_DTB:
20476 add_dtb(pa_data);
20477@@ -710,7 +710,7 @@ static void __init trim_bios_range(void)
20478 * area (640->1Mb) as ram even though it is not.
20479 * take them out.
20480 */
20481- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
20482+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
20483
20484 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
20485 }
20486@@ -834,14 +834,14 @@ void __init setup_arch(char **cmdline_p)
20487
20488 if (!boot_params.hdr.root_flags)
20489 root_mountflags &= ~MS_RDONLY;
20490- init_mm.start_code = (unsigned long) _text;
20491- init_mm.end_code = (unsigned long) _etext;
20492+ init_mm.start_code = ktla_ktva((unsigned long) _text);
20493+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
20494 init_mm.end_data = (unsigned long) _edata;
20495 init_mm.brk = _brk_end;
20496
20497- code_resource.start = virt_to_phys(_text);
20498- code_resource.end = virt_to_phys(_etext)-1;
20499- data_resource.start = virt_to_phys(_etext);
20500+ code_resource.start = virt_to_phys(ktla_ktva(_text));
20501+ code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
20502+ data_resource.start = virt_to_phys(_sdata);
20503 data_resource.end = virt_to_phys(_edata)-1;
20504 bss_resource.start = virt_to_phys(&__bss_start);
20505 bss_resource.end = virt_to_phys(&__bss_stop)-1;
20506diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
20507index 5cdff03..5810740 100644
20508--- a/arch/x86/kernel/setup_percpu.c
20509+++ b/arch/x86/kernel/setup_percpu.c
20510@@ -21,19 +21,17 @@
20511 #include <asm/cpu.h>
20512 #include <asm/stackprotector.h>
20513
20514-DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
20515+#ifdef CONFIG_SMP
20516+DEFINE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
20517 EXPORT_PER_CPU_SYMBOL(cpu_number);
20518+#endif
20519
20520-#ifdef CONFIG_X86_64
20521 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
20522-#else
20523-#define BOOT_PERCPU_OFFSET 0
20524-#endif
20525
20526 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
20527 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
20528
20529-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
20530+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
20531 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
20532 };
20533 EXPORT_SYMBOL(__per_cpu_offset);
20534@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
20535 {
20536 #ifdef CONFIG_X86_32
20537 struct desc_struct gdt;
20538+ unsigned long base = per_cpu_offset(cpu);
20539
20540- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
20541- 0x2 | DESCTYPE_S, 0x8);
20542- gdt.s = 1;
20543+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
20544+ 0x83 | DESCTYPE_S, 0xC);
20545 write_gdt_entry(get_cpu_gdt_table(cpu),
20546 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
20547 #endif
20548@@ -219,6 +217,11 @@ void __init setup_per_cpu_areas(void)
20549 /* alrighty, percpu areas up and running */
20550 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
20551 for_each_possible_cpu(cpu) {
20552+#ifdef CONFIG_CC_STACKPROTECTOR
20553+#ifdef CONFIG_X86_32
20554+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
20555+#endif
20556+#endif
20557 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
20558 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
20559 per_cpu(cpu_number, cpu) = cpu;
20560@@ -259,6 +262,12 @@ void __init setup_per_cpu_areas(void)
20561 */
20562 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
20563 #endif
20564+#ifdef CONFIG_CC_STACKPROTECTOR
20565+#ifdef CONFIG_X86_32
20566+ if (!cpu)
20567+ per_cpu(stack_canary.canary, cpu) = canary;
20568+#endif
20569+#endif
20570 /*
20571 * Up to this point, the boot CPU has been using .init.data
20572 * area. Reload any changed state for the boot CPU.
20573diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
20574index 70b27ee..fcf827f 100644
20575--- a/arch/x86/kernel/signal.c
20576+++ b/arch/x86/kernel/signal.c
20577@@ -195,7 +195,7 @@ static unsigned long align_sigframe(unsigned long sp)
20578 * Align the stack pointer according to the i386 ABI,
20579 * i.e. so that on function entry ((sp + 4) & 15) == 0.
20580 */
20581- sp = ((sp + 4) & -16ul) - 4;
20582+ sp = ((sp - 12) & -16ul) - 4;
20583 #else /* !CONFIG_X86_32 */
20584 sp = round_down(sp, 16) - 8;
20585 #endif
20586@@ -303,9 +303,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
20587 }
20588
20589 if (current->mm->context.vdso)
20590- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
20591+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
20592 else
20593- restorer = &frame->retcode;
20594+ restorer = (void __user *)&frame->retcode;
20595 if (ka->sa.sa_flags & SA_RESTORER)
20596 restorer = ka->sa.sa_restorer;
20597
20598@@ -319,7 +319,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
20599 * reasons and because gdb uses it as a signature to notice
20600 * signal handler stack frames.
20601 */
20602- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
20603+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
20604
20605 if (err)
20606 return -EFAULT;
20607@@ -369,7 +369,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
20608 put_user_ex(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
20609
20610 /* Set up to return from userspace. */
20611- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
20612+ if (current->mm->context.vdso)
20613+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
20614+ else
20615+ restorer = (void __user *)&frame->retcode;
20616 if (ka->sa.sa_flags & SA_RESTORER)
20617 restorer = ka->sa.sa_restorer;
20618 put_user_ex(restorer, &frame->pretcode);
20619@@ -381,7 +384,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
20620 * reasons and because gdb uses it as a signature to notice
20621 * signal handler stack frames.
20622 */
20623- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
20624+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
20625 } put_user_catch(err);
20626
20627 err |= copy_siginfo_to_user(&frame->info, info);
20628diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
20629index 48d2b7d..90d328a 100644
20630--- a/arch/x86/kernel/smp.c
20631+++ b/arch/x86/kernel/smp.c
20632@@ -285,7 +285,7 @@ static int __init nonmi_ipi_setup(char *str)
20633
20634 __setup("nonmi_ipi", nonmi_ipi_setup);
20635
20636-struct smp_ops smp_ops = {
20637+struct smp_ops smp_ops __read_only = {
20638 .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
20639 .smp_prepare_cpus = native_smp_prepare_cpus,
20640 .smp_cpus_done = native_smp_cpus_done,
20641diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
20642index f3e2ec8..ad5287a 100644
20643--- a/arch/x86/kernel/smpboot.c
20644+++ b/arch/x86/kernel/smpboot.c
20645@@ -673,6 +673,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
20646 idle->thread.sp = (unsigned long) (((struct pt_regs *)
20647 (THREAD_SIZE + task_stack_page(idle))) - 1);
20648 per_cpu(current_task, cpu) = idle;
20649+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
20650
20651 #ifdef CONFIG_X86_32
20652 /* Stack for startup_32 can be just as for start_secondary onwards */
20653@@ -680,11 +681,13 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
20654 #else
20655 clear_tsk_thread_flag(idle, TIF_FORK);
20656 initial_gs = per_cpu_offset(cpu);
20657- per_cpu(kernel_stack, cpu) =
20658- (unsigned long)task_stack_page(idle) -
20659- KERNEL_STACK_OFFSET + THREAD_SIZE;
20660+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
20661 #endif
20662+
20663+ pax_open_kernel();
20664 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
20665+ pax_close_kernel();
20666+
20667 initial_code = (unsigned long)start_secondary;
20668 stack_start = idle->thread.sp;
20669
20670@@ -823,6 +826,15 @@ int __cpuinit native_cpu_up(unsigned int cpu, struct task_struct *tidle)
20671 /* the FPU context is blank, nobody can own it */
20672 __cpu_disable_lazy_restore(cpu);
20673
20674+#ifdef CONFIG_PAX_PER_CPU_PGD
20675+ clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
20676+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
20677+ KERNEL_PGD_PTRS);
20678+#endif
20679+
20680+ /* the FPU context is blank, nobody can own it */
20681+ __cpu_disable_lazy_restore(cpu);
20682+
20683 err = do_boot_cpu(apicid, cpu, tidle);
20684 if (err) {
20685 pr_debug("do_boot_cpu failed %d\n", err);
20686diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
20687index 9b4d51d..5d28b58 100644
20688--- a/arch/x86/kernel/step.c
20689+++ b/arch/x86/kernel/step.c
20690@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
20691 struct desc_struct *desc;
20692 unsigned long base;
20693
20694- seg &= ~7UL;
20695+ seg >>= 3;
20696
20697 mutex_lock(&child->mm->context.lock);
20698- if (unlikely((seg >> 3) >= child->mm->context.size))
20699+ if (unlikely(seg >= child->mm->context.size))
20700 addr = -1L; /* bogus selector, access would fault */
20701 else {
20702 desc = child->mm->context.ldt + seg;
20703@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
20704 addr += base;
20705 }
20706 mutex_unlock(&child->mm->context.lock);
20707- }
20708+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
20709+ addr = ktla_ktva(addr);
20710
20711 return addr;
20712 }
20713@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
20714 unsigned char opcode[15];
20715 unsigned long addr = convert_ip_to_linear(child, regs);
20716
20717+ if (addr == -EINVAL)
20718+ return 0;
20719+
20720 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
20721 for (i = 0; i < copied; i++) {
20722 switch (opcode[i]) {
20723diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
20724new file mode 100644
20725index 0000000..26bb1af
20726--- /dev/null
20727+++ b/arch/x86/kernel/sys_i386_32.c
20728@@ -0,0 +1,249 @@
20729+/*
20730+ * This file contains various random system calls that
20731+ * have a non-standard calling sequence on the Linux/i386
20732+ * platform.
20733+ */
20734+
20735+#include <linux/errno.h>
20736+#include <linux/sched.h>
20737+#include <linux/mm.h>
20738+#include <linux/fs.h>
20739+#include <linux/smp.h>
20740+#include <linux/sem.h>
20741+#include <linux/msg.h>
20742+#include <linux/shm.h>
20743+#include <linux/stat.h>
20744+#include <linux/syscalls.h>
20745+#include <linux/mman.h>
20746+#include <linux/file.h>
20747+#include <linux/utsname.h>
20748+#include <linux/ipc.h>
20749+
20750+#include <linux/uaccess.h>
20751+#include <linux/unistd.h>
20752+
20753+#include <asm/syscalls.h>
20754+
20755+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
20756+{
20757+ unsigned long pax_task_size = TASK_SIZE;
20758+
20759+#ifdef CONFIG_PAX_SEGMEXEC
20760+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
20761+ pax_task_size = SEGMEXEC_TASK_SIZE;
20762+#endif
20763+
20764+ if (len > pax_task_size || addr > pax_task_size - len)
20765+ return -EINVAL;
20766+
20767+ return 0;
20768+}
20769+
20770+unsigned long
20771+arch_get_unmapped_area(struct file *filp, unsigned long addr,
20772+ unsigned long len, unsigned long pgoff, unsigned long flags)
20773+{
20774+ struct mm_struct *mm = current->mm;
20775+ struct vm_area_struct *vma;
20776+ unsigned long start_addr, pax_task_size = TASK_SIZE;
20777+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
20778+
20779+#ifdef CONFIG_PAX_SEGMEXEC
20780+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
20781+ pax_task_size = SEGMEXEC_TASK_SIZE;
20782+#endif
20783+
20784+ pax_task_size -= PAGE_SIZE;
20785+
20786+ if (len > pax_task_size)
20787+ return -ENOMEM;
20788+
20789+ if (flags & MAP_FIXED)
20790+ return addr;
20791+
20792+#ifdef CONFIG_PAX_RANDMMAP
20793+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
20794+#endif
20795+
20796+ if (addr) {
20797+ addr = PAGE_ALIGN(addr);
20798+ if (pax_task_size - len >= addr) {
20799+ vma = find_vma(mm, addr);
20800+ if (check_heap_stack_gap(vma, addr, len, offset))
20801+ return addr;
20802+ }
20803+ }
20804+ if (len > mm->cached_hole_size) {
20805+ start_addr = addr = mm->free_area_cache;
20806+ } else {
20807+ start_addr = addr = mm->mmap_base;
20808+ mm->cached_hole_size = 0;
20809+ }
20810+
20811+#ifdef CONFIG_PAX_PAGEEXEC
20812+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
20813+ start_addr = 0x00110000UL;
20814+
20815+#ifdef CONFIG_PAX_RANDMMAP
20816+ if (mm->pax_flags & MF_PAX_RANDMMAP)
20817+ start_addr += mm->delta_mmap & 0x03FFF000UL;
20818+#endif
20819+
20820+ if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
20821+ start_addr = addr = mm->mmap_base;
20822+ else
20823+ addr = start_addr;
20824+ }
20825+#endif
20826+
20827+full_search:
20828+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
20829+ /* At this point: (!vma || addr < vma->vm_end). */
20830+ if (pax_task_size - len < addr) {
20831+ /*
20832+ * Start a new search - just in case we missed
20833+ * some holes.
20834+ */
20835+ if (start_addr != mm->mmap_base) {
20836+ start_addr = addr = mm->mmap_base;
20837+ mm->cached_hole_size = 0;
20838+ goto full_search;
20839+ }
20840+ return -ENOMEM;
20841+ }
20842+ if (check_heap_stack_gap(vma, addr, len, offset))
20843+ break;
20844+ if (addr + mm->cached_hole_size < vma->vm_start)
20845+ mm->cached_hole_size = vma->vm_start - addr;
20846+ addr = vma->vm_end;
20847+ if (mm->start_brk <= addr && addr < mm->mmap_base) {
20848+ start_addr = addr = mm->mmap_base;
20849+ mm->cached_hole_size = 0;
20850+ goto full_search;
20851+ }
20852+ }
20853+
20854+ /*
20855+ * Remember the place where we stopped the search:
20856+ */
20857+ mm->free_area_cache = addr + len;
20858+ return addr;
20859+}
20860+
20861+unsigned long
20862+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
20863+ const unsigned long len, const unsigned long pgoff,
20864+ const unsigned long flags)
20865+{
20866+ struct vm_area_struct *vma;
20867+ struct mm_struct *mm = current->mm;
20868+ unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
20869+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
20870+
20871+#ifdef CONFIG_PAX_SEGMEXEC
20872+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
20873+ pax_task_size = SEGMEXEC_TASK_SIZE;
20874+#endif
20875+
20876+ pax_task_size -= PAGE_SIZE;
20877+
20878+ /* requested length too big for entire address space */
20879+ if (len > pax_task_size)
20880+ return -ENOMEM;
20881+
20882+ if (flags & MAP_FIXED)
20883+ return addr;
20884+
20885+#ifdef CONFIG_PAX_PAGEEXEC
20886+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
20887+ goto bottomup;
20888+#endif
20889+
20890+#ifdef CONFIG_PAX_RANDMMAP
20891+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
20892+#endif
20893+
20894+ /* requesting a specific address */
20895+ if (addr) {
20896+ addr = PAGE_ALIGN(addr);
20897+ if (pax_task_size - len >= addr) {
20898+ vma = find_vma(mm, addr);
20899+ if (check_heap_stack_gap(vma, addr, len, offset))
20900+ return addr;
20901+ }
20902+ }
20903+
20904+ /* check if free_area_cache is useful for us */
20905+ if (len <= mm->cached_hole_size) {
20906+ mm->cached_hole_size = 0;
20907+ mm->free_area_cache = mm->mmap_base;
20908+ }
20909+
20910+ /* either no address requested or can't fit in requested address hole */
20911+ addr = mm->free_area_cache;
20912+
20913+ /* make sure it can fit in the remaining address space */
20914+ if (addr > len) {
20915+ vma = find_vma(mm, addr-len);
20916+ if (check_heap_stack_gap(vma, addr - len, len, offset))
20917+ /* remember the address as a hint for next time */
20918+ return (mm->free_area_cache = addr-len);
20919+ }
20920+
20921+ if (mm->mmap_base < len)
20922+ goto bottomup;
20923+
20924+ addr = mm->mmap_base-len;
20925+
20926+ do {
20927+ /*
20928+ * Lookup failure means no vma is above this address,
20929+ * else if new region fits below vma->vm_start,
20930+ * return with success:
20931+ */
20932+ vma = find_vma(mm, addr);
20933+ if (check_heap_stack_gap(vma, addr, len, offset))
20934+ /* remember the address as a hint for next time */
20935+ return (mm->free_area_cache = addr);
20936+
20937+ /* remember the largest hole we saw so far */
20938+ if (addr + mm->cached_hole_size < vma->vm_start)
20939+ mm->cached_hole_size = vma->vm_start - addr;
20940+
20941+ /* try just below the current vma->vm_start */
20942+ addr = skip_heap_stack_gap(vma, len, offset);
20943+ } while (!IS_ERR_VALUE(addr));
20944+
20945+bottomup:
20946+ /*
20947+ * A failed mmap() very likely causes application failure,
20948+ * so fall back to the bottom-up function here. This scenario
20949+ * can happen with large stack limits and large mmap()
20950+ * allocations.
20951+ */
20952+
20953+#ifdef CONFIG_PAX_SEGMEXEC
20954+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
20955+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
20956+ else
20957+#endif
20958+
20959+ mm->mmap_base = TASK_UNMAPPED_BASE;
20960+
20961+#ifdef CONFIG_PAX_RANDMMAP
20962+ if (mm->pax_flags & MF_PAX_RANDMMAP)
20963+ mm->mmap_base += mm->delta_mmap;
20964+#endif
20965+
20966+ mm->free_area_cache = mm->mmap_base;
20967+ mm->cached_hole_size = ~0UL;
20968+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
20969+ /*
20970+ * Restore the topdown base:
20971+ */
20972+ mm->mmap_base = base;
20973+ mm->free_area_cache = base;
20974+ mm->cached_hole_size = ~0UL;
20975+
20976+ return addr;
20977+}
20978diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
20979index b4d3c39..d699d77 100644
20980--- a/arch/x86/kernel/sys_x86_64.c
20981+++ b/arch/x86/kernel/sys_x86_64.c
20982@@ -95,8 +95,8 @@ out:
20983 return error;
20984 }
20985
20986-static void find_start_end(unsigned long flags, unsigned long *begin,
20987- unsigned long *end)
20988+static void find_start_end(struct mm_struct *mm, unsigned long flags,
20989+ unsigned long *begin, unsigned long *end)
20990 {
20991 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
20992 unsigned long new_begin;
20993@@ -115,7 +115,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
20994 *begin = new_begin;
20995 }
20996 } else {
20997- *begin = TASK_UNMAPPED_BASE;
20998+ *begin = mm->mmap_base;
20999 *end = TASK_SIZE;
21000 }
21001 }
21002@@ -128,20 +128,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
21003 struct vm_area_struct *vma;
21004 unsigned long start_addr;
21005 unsigned long begin, end;
21006+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
21007
21008 if (flags & MAP_FIXED)
21009 return addr;
21010
21011- find_start_end(flags, &begin, &end);
21012+ find_start_end(mm, flags, &begin, &end);
21013
21014 if (len > end)
21015 return -ENOMEM;
21016
21017+#ifdef CONFIG_PAX_RANDMMAP
21018+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
21019+#endif
21020+
21021 if (addr) {
21022 addr = PAGE_ALIGN(addr);
21023 vma = find_vma(mm, addr);
21024- if (end - len >= addr &&
21025- (!vma || addr + len <= vma->vm_start))
21026+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
21027 return addr;
21028 }
21029 if (((flags & MAP_32BIT) || test_thread_flag(TIF_ADDR32))
21030@@ -172,7 +176,7 @@ full_search:
21031 }
21032 return -ENOMEM;
21033 }
21034- if (!vma || addr + len <= vma->vm_start) {
21035+ if (check_heap_stack_gap(vma, addr, len, offset)) {
21036 /*
21037 * Remember the place where we stopped the search:
21038 */
21039@@ -195,7 +199,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
21040 {
21041 struct vm_area_struct *vma;
21042 struct mm_struct *mm = current->mm;
21043- unsigned long addr = addr0, start_addr;
21044+ unsigned long base = mm->mmap_base, addr = addr0, start_addr;
21045+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
21046
21047 /* requested length too big for entire address space */
21048 if (len > TASK_SIZE)
21049@@ -208,13 +213,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
21050 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
21051 goto bottomup;
21052
21053+#ifdef CONFIG_PAX_RANDMMAP
21054+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
21055+#endif
21056+
21057 /* requesting a specific address */
21058 if (addr) {
21059 addr = PAGE_ALIGN(addr);
21060- vma = find_vma(mm, addr);
21061- if (TASK_SIZE - len >= addr &&
21062- (!vma || addr + len <= vma->vm_start))
21063- return addr;
21064+ if (TASK_SIZE - len >= addr) {
21065+ vma = find_vma(mm, addr);
21066+ if (check_heap_stack_gap(vma, addr, len, offset))
21067+ return addr;
21068+ }
21069 }
21070
21071 /* check if free_area_cache is useful for us */
21072@@ -240,7 +250,7 @@ try_again:
21073 * return with success:
21074 */
21075 vma = find_vma(mm, addr);
21076- if (!vma || addr+len <= vma->vm_start)
21077+ if (check_heap_stack_gap(vma, addr, len, offset))
21078 /* remember the address as a hint for next time */
21079 return mm->free_area_cache = addr;
21080
21081@@ -249,8 +259,8 @@ try_again:
21082 mm->cached_hole_size = vma->vm_start - addr;
21083
21084 /* try just below the current vma->vm_start */
21085- addr = vma->vm_start-len;
21086- } while (len < vma->vm_start);
21087+ addr = skip_heap_stack_gap(vma, len, offset);
21088+ } while (!IS_ERR_VALUE(addr));
21089
21090 fail:
21091 /*
21092@@ -270,13 +280,21 @@ bottomup:
21093 * can happen with large stack limits and large mmap()
21094 * allocations.
21095 */
21096+ mm->mmap_base = TASK_UNMAPPED_BASE;
21097+
21098+#ifdef CONFIG_PAX_RANDMMAP
21099+ if (mm->pax_flags & MF_PAX_RANDMMAP)
21100+ mm->mmap_base += mm->delta_mmap;
21101+#endif
21102+
21103+ mm->free_area_cache = mm->mmap_base;
21104 mm->cached_hole_size = ~0UL;
21105- mm->free_area_cache = TASK_UNMAPPED_BASE;
21106 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
21107 /*
21108 * Restore the topdown base:
21109 */
21110- mm->free_area_cache = mm->mmap_base;
21111+ mm->mmap_base = base;
21112+ mm->free_area_cache = base;
21113 mm->cached_hole_size = ~0UL;
21114
21115 return addr;
21116diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
21117index f84fe00..93fe08f 100644
21118--- a/arch/x86/kernel/tboot.c
21119+++ b/arch/x86/kernel/tboot.c
21120@@ -220,7 +220,7 @@ static int tboot_setup_sleep(void)
21121
21122 void tboot_shutdown(u32 shutdown_type)
21123 {
21124- void (*shutdown)(void);
21125+ void (* __noreturn shutdown)(void);
21126
21127 if (!tboot_enabled())
21128 return;
21129@@ -242,7 +242,7 @@ void tboot_shutdown(u32 shutdown_type)
21130
21131 switch_to_tboot_pt();
21132
21133- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
21134+ shutdown = (void *)tboot->shutdown_entry;
21135 shutdown();
21136
21137 /* should not reach here */
21138@@ -300,7 +300,7 @@ static int tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
21139 return 0;
21140 }
21141
21142-static atomic_t ap_wfs_count;
21143+static atomic_unchecked_t ap_wfs_count;
21144
21145 static int tboot_wait_for_aps(int num_aps)
21146 {
21147@@ -324,9 +324,9 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
21148 {
21149 switch (action) {
21150 case CPU_DYING:
21151- atomic_inc(&ap_wfs_count);
21152+ atomic_inc_unchecked(&ap_wfs_count);
21153 if (num_online_cpus() == 1)
21154- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
21155+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
21156 return NOTIFY_BAD;
21157 break;
21158 }
21159@@ -345,7 +345,7 @@ static __init int tboot_late_init(void)
21160
21161 tboot_create_trampoline();
21162
21163- atomic_set(&ap_wfs_count, 0);
21164+ atomic_set_unchecked(&ap_wfs_count, 0);
21165 register_hotcpu_notifier(&tboot_cpu_notifier);
21166
21167 acpi_os_set_prepare_sleep(&tboot_sleep);
21168diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
21169index 24d3c91..d06b473 100644
21170--- a/arch/x86/kernel/time.c
21171+++ b/arch/x86/kernel/time.c
21172@@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs *regs)
21173 {
21174 unsigned long pc = instruction_pointer(regs);
21175
21176- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
21177+ if (!user_mode(regs) && in_lock_functions(pc)) {
21178 #ifdef CONFIG_FRAME_POINTER
21179- return *(unsigned long *)(regs->bp + sizeof(long));
21180+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
21181 #else
21182 unsigned long *sp =
21183 (unsigned long *)kernel_stack_pointer(regs);
21184@@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
21185 * or above a saved flags. Eflags has bits 22-31 zero,
21186 * kernel addresses don't.
21187 */
21188+
21189+#ifdef CONFIG_PAX_KERNEXEC
21190+ return ktla_ktva(sp[0]);
21191+#else
21192 if (sp[0] >> 22)
21193 return sp[0];
21194 if (sp[1] >> 22)
21195 return sp[1];
21196 #endif
21197+
21198+#endif
21199 }
21200 return pc;
21201 }
21202diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
21203index 9d9d2f9..cad418a 100644
21204--- a/arch/x86/kernel/tls.c
21205+++ b/arch/x86/kernel/tls.c
21206@@ -84,6 +84,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
21207 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
21208 return -EINVAL;
21209
21210+#ifdef CONFIG_PAX_SEGMEXEC
21211+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
21212+ return -EINVAL;
21213+#endif
21214+
21215 set_tls_desc(p, idx, &info, 1);
21216
21217 return 0;
21218@@ -204,7 +209,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
21219
21220 if (kbuf)
21221 info = kbuf;
21222- else if (__copy_from_user(infobuf, ubuf, count))
21223+ else if (count > sizeof infobuf || __copy_from_user(infobuf, ubuf, count))
21224 return -EFAULT;
21225 else
21226 info = infobuf;
21227diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
21228index 8276dc6..4ca48a2 100644
21229--- a/arch/x86/kernel/traps.c
21230+++ b/arch/x86/kernel/traps.c
21231@@ -71,12 +71,6 @@ asmlinkage int system_call(void);
21232
21233 /* Do we ignore FPU interrupts ? */
21234 char ignore_fpu_irq;
21235-
21236-/*
21237- * The IDT has to be page-aligned to simplify the Pentium
21238- * F0 0F bug workaround.
21239- */
21240-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
21241 #endif
21242
21243 DECLARE_BITMAP(used_vectors, NR_VECTORS);
21244@@ -109,11 +103,11 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
21245 }
21246
21247 static int __kprobes
21248-do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
21249+do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str,
21250 struct pt_regs *regs, long error_code)
21251 {
21252 #ifdef CONFIG_X86_32
21253- if (regs->flags & X86_VM_MASK) {
21254+ if (v8086_mode(regs)) {
21255 /*
21256 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
21257 * On nmi (interrupt 2), do_trap should not be called.
21258@@ -126,12 +120,24 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
21259 return -1;
21260 }
21261 #endif
21262- if (!user_mode(regs)) {
21263+ if (!user_mode_novm(regs)) {
21264 if (!fixup_exception(regs)) {
21265 tsk->thread.error_code = error_code;
21266 tsk->thread.trap_nr = trapnr;
21267+
21268+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21269+ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
21270+ str = "PAX: suspicious stack segment fault";
21271+#endif
21272+
21273 die(str, regs, error_code);
21274 }
21275+
21276+#ifdef CONFIG_PAX_REFCOUNT
21277+ if (trapnr == 4)
21278+ pax_report_refcount_overflow(regs);
21279+#endif
21280+
21281 return 0;
21282 }
21283
21284@@ -139,7 +145,7 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
21285 }
21286
21287 static void __kprobes
21288-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
21289+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
21290 long error_code, siginfo_t *info)
21291 {
21292 struct task_struct *tsk = current;
21293@@ -163,7 +169,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
21294 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
21295 printk_ratelimit()) {
21296 pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
21297- tsk->comm, tsk->pid, str,
21298+ tsk->comm, task_pid_nr(tsk), str,
21299 regs->ip, regs->sp, error_code);
21300 print_vma_addr(" in ", regs->ip);
21301 pr_cont("\n");
21302@@ -269,7 +275,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
21303 conditional_sti(regs);
21304
21305 #ifdef CONFIG_X86_32
21306- if (regs->flags & X86_VM_MASK) {
21307+ if (v8086_mode(regs)) {
21308 local_irq_enable();
21309 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
21310 goto exit;
21311@@ -277,18 +283,42 @@ do_general_protection(struct pt_regs *regs, long error_code)
21312 #endif
21313
21314 tsk = current;
21315- if (!user_mode(regs)) {
21316+ if (!user_mode_novm(regs)) {
21317 if (fixup_exception(regs))
21318 goto exit;
21319
21320 tsk->thread.error_code = error_code;
21321 tsk->thread.trap_nr = X86_TRAP_GP;
21322 if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
21323- X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP)
21324+ X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP) {
21325+
21326+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21327+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
21328+ die("PAX: suspicious general protection fault", regs, error_code);
21329+ else
21330+#endif
21331+
21332 die("general protection fault", regs, error_code);
21333+ }
21334 goto exit;
21335 }
21336
21337+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
21338+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
21339+ struct mm_struct *mm = tsk->mm;
21340+ unsigned long limit;
21341+
21342+ down_write(&mm->mmap_sem);
21343+ limit = mm->context.user_cs_limit;
21344+ if (limit < TASK_SIZE) {
21345+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
21346+ up_write(&mm->mmap_sem);
21347+ return;
21348+ }
21349+ up_write(&mm->mmap_sem);
21350+ }
21351+#endif
21352+
21353 tsk->thread.error_code = error_code;
21354 tsk->thread.trap_nr = X86_TRAP_GP;
21355
21356@@ -443,7 +473,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
21357 /* It's safe to allow irq's after DR6 has been saved */
21358 preempt_conditional_sti(regs);
21359
21360- if (regs->flags & X86_VM_MASK) {
21361+ if (v8086_mode(regs)) {
21362 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
21363 X86_TRAP_DB);
21364 preempt_conditional_cli(regs);
21365@@ -458,7 +488,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
21366 * We already checked v86 mode above, so we can check for kernel mode
21367 * by just checking the CPL of CS.
21368 */
21369- if ((dr6 & DR_STEP) && !user_mode(regs)) {
21370+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
21371 tsk->thread.debugreg6 &= ~DR_STEP;
21372 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
21373 regs->flags &= ~X86_EFLAGS_TF;
21374@@ -490,7 +520,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
21375 return;
21376 conditional_sti(regs);
21377
21378- if (!user_mode_vm(regs))
21379+ if (!user_mode(regs))
21380 {
21381 if (!fixup_exception(regs)) {
21382 task->thread.error_code = error_code;
21383diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
21384index aafa555..a04691a 100644
21385--- a/arch/x86/kernel/uprobes.c
21386+++ b/arch/x86/kernel/uprobes.c
21387@@ -614,7 +614,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
21388 int ret = NOTIFY_DONE;
21389
21390 /* We are only interested in userspace traps */
21391- if (regs && !user_mode_vm(regs))
21392+ if (regs && !user_mode(regs))
21393 return NOTIFY_DONE;
21394
21395 switch (val) {
21396diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
21397index b9242ba..50c5edd 100644
21398--- a/arch/x86/kernel/verify_cpu.S
21399+++ b/arch/x86/kernel/verify_cpu.S
21400@@ -20,6 +20,7 @@
21401 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
21402 * arch/x86/kernel/trampoline_64.S: secondary processor verification
21403 * arch/x86/kernel/head_32.S: processor startup
21404+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
21405 *
21406 * verify_cpu, returns the status of longmode and SSE in register %eax.
21407 * 0: Success 1: Failure
21408diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
21409index 5c9687b..5f857d3 100644
21410--- a/arch/x86/kernel/vm86_32.c
21411+++ b/arch/x86/kernel/vm86_32.c
21412@@ -43,6 +43,7 @@
21413 #include <linux/ptrace.h>
21414 #include <linux/audit.h>
21415 #include <linux/stddef.h>
21416+#include <linux/grsecurity.h>
21417
21418 #include <asm/uaccess.h>
21419 #include <asm/io.h>
21420@@ -150,7 +151,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
21421 do_exit(SIGSEGV);
21422 }
21423
21424- tss = &per_cpu(init_tss, get_cpu());
21425+ tss = init_tss + get_cpu();
21426 current->thread.sp0 = current->thread.saved_sp0;
21427 current->thread.sysenter_cs = __KERNEL_CS;
21428 load_sp0(tss, &current->thread);
21429@@ -212,6 +213,13 @@ int sys_vm86old(struct vm86_struct __user *v86, struct pt_regs *regs)
21430 struct task_struct *tsk;
21431 int tmp, ret = -EPERM;
21432
21433+#ifdef CONFIG_GRKERNSEC_VM86
21434+ if (!capable(CAP_SYS_RAWIO)) {
21435+ gr_handle_vm86();
21436+ goto out;
21437+ }
21438+#endif
21439+
21440 tsk = current;
21441 if (tsk->thread.saved_sp0)
21442 goto out;
21443@@ -242,6 +250,14 @@ int sys_vm86(unsigned long cmd, unsigned long arg, struct pt_regs *regs)
21444 int tmp, ret;
21445 struct vm86plus_struct __user *v86;
21446
21447+#ifdef CONFIG_GRKERNSEC_VM86
21448+ if (!capable(CAP_SYS_RAWIO)) {
21449+ gr_handle_vm86();
21450+ ret = -EPERM;
21451+ goto out;
21452+ }
21453+#endif
21454+
21455 tsk = current;
21456 switch (cmd) {
21457 case VM86_REQUEST_IRQ:
21458@@ -328,7 +344,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
21459 tsk->thread.saved_fs = info->regs32->fs;
21460 tsk->thread.saved_gs = get_user_gs(info->regs32);
21461
21462- tss = &per_cpu(init_tss, get_cpu());
21463+ tss = init_tss + get_cpu();
21464 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
21465 if (cpu_has_sep)
21466 tsk->thread.sysenter_cs = 0;
21467@@ -535,7 +551,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
21468 goto cannot_handle;
21469 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
21470 goto cannot_handle;
21471- intr_ptr = (unsigned long __user *) (i << 2);
21472+ intr_ptr = (__force unsigned long __user *) (i << 2);
21473 if (get_user(segoffs, intr_ptr))
21474 goto cannot_handle;
21475 if ((segoffs >> 16) == BIOSSEG)
21476diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
21477index 22a1530..8fbaaad 100644
21478--- a/arch/x86/kernel/vmlinux.lds.S
21479+++ b/arch/x86/kernel/vmlinux.lds.S
21480@@ -26,6 +26,13 @@
21481 #include <asm/page_types.h>
21482 #include <asm/cache.h>
21483 #include <asm/boot.h>
21484+#include <asm/segment.h>
21485+
21486+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21487+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
21488+#else
21489+#define __KERNEL_TEXT_OFFSET 0
21490+#endif
21491
21492 #undef i386 /* in case the preprocessor is a 32bit one */
21493
21494@@ -69,30 +76,43 @@ jiffies_64 = jiffies;
21495
21496 PHDRS {
21497 text PT_LOAD FLAGS(5); /* R_E */
21498+#ifdef CONFIG_X86_32
21499+ module PT_LOAD FLAGS(5); /* R_E */
21500+#endif
21501+#ifdef CONFIG_XEN
21502+ rodata PT_LOAD FLAGS(5); /* R_E */
21503+#else
21504+ rodata PT_LOAD FLAGS(4); /* R__ */
21505+#endif
21506 data PT_LOAD FLAGS(6); /* RW_ */
21507-#ifdef CONFIG_X86_64
21508+ init.begin PT_LOAD FLAGS(6); /* RW_ */
21509 #ifdef CONFIG_SMP
21510 percpu PT_LOAD FLAGS(6); /* RW_ */
21511 #endif
21512+ text.init PT_LOAD FLAGS(5); /* R_E */
21513+ text.exit PT_LOAD FLAGS(5); /* R_E */
21514 init PT_LOAD FLAGS(7); /* RWE */
21515-#endif
21516 note PT_NOTE FLAGS(0); /* ___ */
21517 }
21518
21519 SECTIONS
21520 {
21521 #ifdef CONFIG_X86_32
21522- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
21523- phys_startup_32 = startup_32 - LOAD_OFFSET;
21524+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
21525 #else
21526- . = __START_KERNEL;
21527- phys_startup_64 = startup_64 - LOAD_OFFSET;
21528+ . = __START_KERNEL;
21529 #endif
21530
21531 /* Text and read-only data */
21532- .text : AT(ADDR(.text) - LOAD_OFFSET) {
21533- _text = .;
21534+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
21535 /* bootstrapping code */
21536+#ifdef CONFIG_X86_32
21537+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
21538+#else
21539+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
21540+#endif
21541+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
21542+ _text = .;
21543 HEAD_TEXT
21544 #ifdef CONFIG_X86_32
21545 . = ALIGN(PAGE_SIZE);
21546@@ -108,13 +128,48 @@ SECTIONS
21547 IRQENTRY_TEXT
21548 *(.fixup)
21549 *(.gnu.warning)
21550- /* End of text section */
21551- _etext = .;
21552 } :text = 0x9090
21553
21554- NOTES :text :note
21555+ . += __KERNEL_TEXT_OFFSET;
21556
21557- EXCEPTION_TABLE(16) :text = 0x9090
21558+#ifdef CONFIG_X86_32
21559+ . = ALIGN(PAGE_SIZE);
21560+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
21561+
21562+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
21563+ MODULES_EXEC_VADDR = .;
21564+ BYTE(0)
21565+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
21566+ . = ALIGN(HPAGE_SIZE) - 1;
21567+ MODULES_EXEC_END = .;
21568+#endif
21569+
21570+ } :module
21571+#endif
21572+
21573+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
21574+ /* End of text section */
21575+ BYTE(0)
21576+ _etext = . - __KERNEL_TEXT_OFFSET;
21577+ }
21578+
21579+#ifdef CONFIG_X86_32
21580+ . = ALIGN(PAGE_SIZE);
21581+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
21582+ *(.idt)
21583+ . = ALIGN(PAGE_SIZE);
21584+ *(.empty_zero_page)
21585+ *(.initial_pg_fixmap)
21586+ *(.initial_pg_pmd)
21587+ *(.initial_page_table)
21588+ *(.swapper_pg_dir)
21589+ } :rodata
21590+#endif
21591+
21592+ . = ALIGN(PAGE_SIZE);
21593+ NOTES :rodata :note
21594+
21595+ EXCEPTION_TABLE(16) :rodata
21596
21597 #if defined(CONFIG_DEBUG_RODATA)
21598 /* .text should occupy whole number of pages */
21599@@ -126,16 +181,20 @@ SECTIONS
21600
21601 /* Data */
21602 .data : AT(ADDR(.data) - LOAD_OFFSET) {
21603+
21604+#ifdef CONFIG_PAX_KERNEXEC
21605+ . = ALIGN(HPAGE_SIZE);
21606+#else
21607+ . = ALIGN(PAGE_SIZE);
21608+#endif
21609+
21610 /* Start of data section */
21611 _sdata = .;
21612
21613 /* init_task */
21614 INIT_TASK_DATA(THREAD_SIZE)
21615
21616-#ifdef CONFIG_X86_32
21617- /* 32 bit has nosave before _edata */
21618 NOSAVE_DATA
21619-#endif
21620
21621 PAGE_ALIGNED_DATA(PAGE_SIZE)
21622
21623@@ -176,12 +235,19 @@ SECTIONS
21624 #endif /* CONFIG_X86_64 */
21625
21626 /* Init code and data - will be freed after init */
21627- . = ALIGN(PAGE_SIZE);
21628 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
21629+ BYTE(0)
21630+
21631+#ifdef CONFIG_PAX_KERNEXEC
21632+ . = ALIGN(HPAGE_SIZE);
21633+#else
21634+ . = ALIGN(PAGE_SIZE);
21635+#endif
21636+
21637 __init_begin = .; /* paired with __init_end */
21638- }
21639+ } :init.begin
21640
21641-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
21642+#ifdef CONFIG_SMP
21643 /*
21644 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
21645 * output PHDR, so the next output section - .init.text - should
21646@@ -190,12 +256,27 @@ SECTIONS
21647 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
21648 #endif
21649
21650- INIT_TEXT_SECTION(PAGE_SIZE)
21651-#ifdef CONFIG_X86_64
21652- :init
21653-#endif
21654+ . = ALIGN(PAGE_SIZE);
21655+ init_begin = .;
21656+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
21657+ VMLINUX_SYMBOL(_sinittext) = .;
21658+ INIT_TEXT
21659+ VMLINUX_SYMBOL(_einittext) = .;
21660+ . = ALIGN(PAGE_SIZE);
21661+ } :text.init
21662
21663- INIT_DATA_SECTION(16)
21664+ /*
21665+ * .exit.text is discard at runtime, not link time, to deal with
21666+ * references from .altinstructions and .eh_frame
21667+ */
21668+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
21669+ EXIT_TEXT
21670+ . = ALIGN(16);
21671+ } :text.exit
21672+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
21673+
21674+ . = ALIGN(PAGE_SIZE);
21675+ INIT_DATA_SECTION(16) :init
21676
21677 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
21678 __x86_cpu_dev_start = .;
21679@@ -257,19 +338,12 @@ SECTIONS
21680 }
21681
21682 . = ALIGN(8);
21683- /*
21684- * .exit.text is discard at runtime, not link time, to deal with
21685- * references from .altinstructions and .eh_frame
21686- */
21687- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
21688- EXIT_TEXT
21689- }
21690
21691 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
21692 EXIT_DATA
21693 }
21694
21695-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
21696+#ifndef CONFIG_SMP
21697 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
21698 #endif
21699
21700@@ -288,16 +362,10 @@ SECTIONS
21701 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
21702 __smp_locks = .;
21703 *(.smp_locks)
21704- . = ALIGN(PAGE_SIZE);
21705 __smp_locks_end = .;
21706+ . = ALIGN(PAGE_SIZE);
21707 }
21708
21709-#ifdef CONFIG_X86_64
21710- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
21711- NOSAVE_DATA
21712- }
21713-#endif
21714-
21715 /* BSS */
21716 . = ALIGN(PAGE_SIZE);
21717 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
21718@@ -313,6 +381,7 @@ SECTIONS
21719 __brk_base = .;
21720 . += 64 * 1024; /* 64k alignment slop space */
21721 *(.brk_reservation) /* areas brk users have reserved */
21722+ . = ALIGN(HPAGE_SIZE);
21723 __brk_limit = .;
21724 }
21725
21726@@ -339,13 +408,12 @@ SECTIONS
21727 * for the boot processor.
21728 */
21729 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
21730-INIT_PER_CPU(gdt_page);
21731 INIT_PER_CPU(irq_stack_union);
21732
21733 /*
21734 * Build-time check on the image size:
21735 */
21736-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
21737+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
21738 "kernel image bigger than KERNEL_IMAGE_SIZE");
21739
21740 #ifdef CONFIG_SMP
21741diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
21742index 3a3e8c9..1af9465 100644
21743--- a/arch/x86/kernel/vsyscall_64.c
21744+++ b/arch/x86/kernel/vsyscall_64.c
21745@@ -56,15 +56,13 @@
21746 DEFINE_VVAR(int, vgetcpu_mode);
21747 DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data);
21748
21749-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
21750+static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
21751
21752 static int __init vsyscall_setup(char *str)
21753 {
21754 if (str) {
21755 if (!strcmp("emulate", str))
21756 vsyscall_mode = EMULATE;
21757- else if (!strcmp("native", str))
21758- vsyscall_mode = NATIVE;
21759 else if (!strcmp("none", str))
21760 vsyscall_mode = NONE;
21761 else
21762@@ -315,8 +313,7 @@ done:
21763 return true;
21764
21765 sigsegv:
21766- force_sig(SIGSEGV, current);
21767- return true;
21768+ do_group_exit(SIGKILL);
21769 }
21770
21771 /*
21772@@ -369,10 +366,7 @@ void __init map_vsyscall(void)
21773 extern char __vvar_page;
21774 unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
21775
21776- __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
21777- vsyscall_mode == NATIVE
21778- ? PAGE_KERNEL_VSYSCALL
21779- : PAGE_KERNEL_VVAR);
21780+ __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
21781 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
21782 (unsigned long)VSYSCALL_START);
21783
21784diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
21785index 1330dd1..d220b99 100644
21786--- a/arch/x86/kernel/x8664_ksyms_64.c
21787+++ b/arch/x86/kernel/x8664_ksyms_64.c
21788@@ -34,8 +34,6 @@ EXPORT_SYMBOL(copy_user_generic_string);
21789 EXPORT_SYMBOL(copy_user_generic_unrolled);
21790 EXPORT_SYMBOL(copy_user_enhanced_fast_string);
21791 EXPORT_SYMBOL(__copy_user_nocache);
21792-EXPORT_SYMBOL(_copy_from_user);
21793-EXPORT_SYMBOL(_copy_to_user);
21794
21795 EXPORT_SYMBOL(copy_page);
21796 EXPORT_SYMBOL(clear_page);
21797diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
21798index 7a3d075..6cb373d 100644
21799--- a/arch/x86/kernel/x86_init.c
21800+++ b/arch/x86/kernel/x86_init.c
21801@@ -88,7 +88,7 @@ struct x86_init_ops x86_init __initdata = {
21802 },
21803 };
21804
21805-struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = {
21806+struct x86_cpuinit_ops x86_cpuinit __cpuinitconst = {
21807 .early_percpu_clock_init = x86_init_noop,
21808 .setup_percpu_clockev = setup_secondary_APIC_clock,
21809 };
21810@@ -96,7 +96,7 @@ struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = {
21811 static void default_nmi_init(void) { };
21812 static int default_i8042_detect(void) { return 1; };
21813
21814-struct x86_platform_ops x86_platform = {
21815+struct x86_platform_ops x86_platform __read_only = {
21816 .calibrate_tsc = native_calibrate_tsc,
21817 .get_wallclock = mach_get_cmos_time,
21818 .set_wallclock = mach_set_rtc_mmss,
21819@@ -110,14 +110,14 @@ struct x86_platform_ops x86_platform = {
21820 };
21821
21822 EXPORT_SYMBOL_GPL(x86_platform);
21823-struct x86_msi_ops x86_msi = {
21824+struct x86_msi_ops x86_msi __read_only = {
21825 .setup_msi_irqs = native_setup_msi_irqs,
21826 .teardown_msi_irq = native_teardown_msi_irq,
21827 .teardown_msi_irqs = default_teardown_msi_irqs,
21828 .restore_msi_irqs = default_restore_msi_irqs,
21829 };
21830
21831-struct x86_io_apic_ops x86_io_apic_ops = {
21832+struct x86_io_apic_ops x86_io_apic_ops __read_only = {
21833 .init = native_io_apic_init_mappings,
21834 .read = native_io_apic_read,
21835 .write = native_io_apic_write,
21836diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
21837index ada87a3..afea76d 100644
21838--- a/arch/x86/kernel/xsave.c
21839+++ b/arch/x86/kernel/xsave.c
21840@@ -199,6 +199,7 @@ static inline int save_user_xstate(struct xsave_struct __user *buf)
21841 {
21842 int err;
21843
21844+ buf = (struct xsave_struct __user *)____m(buf);
21845 if (use_xsave())
21846 err = xsave_user(buf);
21847 else if (use_fxsr())
21848@@ -311,6 +312,7 @@ sanitize_restored_xstate(struct task_struct *tsk,
21849 */
21850 static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only)
21851 {
21852+ buf = (void __user *)____m(buf);
21853 if (use_xsave()) {
21854 if ((unsigned long)buf % 64 || fx_only) {
21855 u64 init_bv = pcntxt_mask & ~XSTATE_FPSSE;
21856diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
21857index ec79e77..420f5cc 100644
21858--- a/arch/x86/kvm/cpuid.c
21859+++ b/arch/x86/kvm/cpuid.c
21860@@ -124,15 +124,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
21861 struct kvm_cpuid2 *cpuid,
21862 struct kvm_cpuid_entry2 __user *entries)
21863 {
21864- int r;
21865+ int r, i;
21866
21867 r = -E2BIG;
21868 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
21869 goto out;
21870 r = -EFAULT;
21871- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
21872- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
21873+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
21874 goto out;
21875+ for (i = 0; i < cpuid->nent; ++i) {
21876+ struct kvm_cpuid_entry2 cpuid_entry;
21877+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
21878+ goto out;
21879+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
21880+ }
21881 vcpu->arch.cpuid_nent = cpuid->nent;
21882 kvm_apic_set_version(vcpu);
21883 kvm_x86_ops->cpuid_update(vcpu);
21884@@ -147,15 +152,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
21885 struct kvm_cpuid2 *cpuid,
21886 struct kvm_cpuid_entry2 __user *entries)
21887 {
21888- int r;
21889+ int r, i;
21890
21891 r = -E2BIG;
21892 if (cpuid->nent < vcpu->arch.cpuid_nent)
21893 goto out;
21894 r = -EFAULT;
21895- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
21896- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
21897+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
21898 goto out;
21899+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
21900+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
21901+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
21902+ goto out;
21903+ }
21904 return 0;
21905
21906 out:
21907diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
21908index bba39bf..296540a 100644
21909--- a/arch/x86/kvm/emulate.c
21910+++ b/arch/x86/kvm/emulate.c
21911@@ -292,6 +292,7 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
21912
21913 #define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype) \
21914 do { \
21915+ unsigned long _tmp; \
21916 __asm__ __volatile__ ( \
21917 _PRE_EFLAGS("0", "4", "2") \
21918 _op _suffix " %"_x"3,%1; " \
21919@@ -306,8 +307,6 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
21920 /* Raw emulation: instruction has two explicit operands. */
21921 #define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy) \
21922 do { \
21923- unsigned long _tmp; \
21924- \
21925 switch ((ctxt)->dst.bytes) { \
21926 case 2: \
21927 ____emulate_2op(ctxt,_op,_wx,_wy,"w",u16); \
21928@@ -323,7 +322,6 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
21929
21930 #define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
21931 do { \
21932- unsigned long _tmp; \
21933 switch ((ctxt)->dst.bytes) { \
21934 case 1: \
21935 ____emulate_2op(ctxt,_op,_bx,_by,"b",u8); \
21936diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
21937index 43e9fad..3b7c059 100644
21938--- a/arch/x86/kvm/lapic.c
21939+++ b/arch/x86/kvm/lapic.c
21940@@ -55,7 +55,7 @@
21941 #define APIC_BUS_CYCLE_NS 1
21942
21943 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
21944-#define apic_debug(fmt, arg...)
21945+#define apic_debug(fmt, arg...) do {} while (0)
21946
21947 #define APIC_LVT_NUM 6
21948 /* 14 is the version for Xeon and Pentium 8.4.8*/
21949diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
21950index 714e2c0..3f7a086 100644
21951--- a/arch/x86/kvm/paging_tmpl.h
21952+++ b/arch/x86/kvm/paging_tmpl.h
21953@@ -208,7 +208,7 @@ retry_walk:
21954 if (unlikely(kvm_is_error_hva(host_addr)))
21955 goto error;
21956
21957- ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
21958+ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
21959 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
21960 goto error;
21961 walker->ptep_user[walker->level - 1] = ptep_user;
21962diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
21963index d017df3..61ae42e 100644
21964--- a/arch/x86/kvm/svm.c
21965+++ b/arch/x86/kvm/svm.c
21966@@ -3500,7 +3500,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
21967 int cpu = raw_smp_processor_id();
21968
21969 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
21970+
21971+ pax_open_kernel();
21972 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
21973+ pax_close_kernel();
21974+
21975 load_TR_desc();
21976 }
21977
21978@@ -3874,6 +3878,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
21979 #endif
21980 #endif
21981
21982+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
21983+ __set_fs(current_thread_info()->addr_limit);
21984+#endif
21985+
21986 reload_tss(vcpu);
21987
21988 local_irq_disable();
21989diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
21990index f858159..4ab7dba 100644
21991--- a/arch/x86/kvm/vmx.c
21992+++ b/arch/x86/kvm/vmx.c
21993@@ -1332,7 +1332,11 @@ static void reload_tss(void)
21994 struct desc_struct *descs;
21995
21996 descs = (void *)gdt->address;
21997+
21998+ pax_open_kernel();
21999 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
22000+ pax_close_kernel();
22001+
22002 load_TR_desc();
22003 }
22004
22005@@ -1546,6 +1550,10 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
22006 vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
22007 vmcs_writel(HOST_GDTR_BASE, gdt->address); /* 22.2.4 */
22008
22009+#ifdef CONFIG_PAX_PER_CPU_PGD
22010+ vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
22011+#endif
22012+
22013 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
22014 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
22015 vmx->loaded_vmcs->cpu = cpu;
22016@@ -2669,8 +2677,11 @@ static __init int hardware_setup(void)
22017 if (!cpu_has_vmx_flexpriority())
22018 flexpriority_enabled = 0;
22019
22020- if (!cpu_has_vmx_tpr_shadow())
22021- kvm_x86_ops->update_cr8_intercept = NULL;
22022+ if (!cpu_has_vmx_tpr_shadow()) {
22023+ pax_open_kernel();
22024+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
22025+ pax_close_kernel();
22026+ }
22027
22028 if (enable_ept && !cpu_has_vmx_ept_2m_page())
22029 kvm_disable_largepages();
22030@@ -3712,7 +3723,10 @@ static void vmx_set_constant_host_state(void)
22031
22032 vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */
22033 vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */
22034+
22035+#ifndef CONFIG_PAX_PER_CPU_PGD
22036 vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
22037+#endif
22038
22039 vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
22040 #ifdef CONFIG_X86_64
22041@@ -3733,7 +3747,7 @@ static void vmx_set_constant_host_state(void)
22042 native_store_idt(&dt);
22043 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
22044
22045- vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */
22046+ vmcs_writel(HOST_RIP, ktla_ktva(vmx_return)); /* 22.2.5 */
22047
22048 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
22049 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
22050@@ -6279,6 +6293,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
22051 "jmp 2f \n\t"
22052 "1: " __ex(ASM_VMX_VMRESUME) "\n\t"
22053 "2: "
22054+
22055+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
22056+ "ljmp %[cs],$3f\n\t"
22057+ "3: "
22058+#endif
22059+
22060 /* Save guest registers, load host registers, keep flags */
22061 "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
22062 "pop %0 \n\t"
22063@@ -6331,6 +6351,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
22064 #endif
22065 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
22066 [wordsize]"i"(sizeof(ulong))
22067+
22068+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
22069+ ,[cs]"i"(__KERNEL_CS)
22070+#endif
22071+
22072 : "cc", "memory"
22073 #ifdef CONFIG_X86_64
22074 , "rax", "rbx", "rdi", "rsi"
22075@@ -6344,7 +6369,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
22076 if (debugctlmsr)
22077 update_debugctlmsr(debugctlmsr);
22078
22079-#ifndef CONFIG_X86_64
22080+#ifdef CONFIG_X86_32
22081 /*
22082 * The sysexit path does not restore ds/es, so we must set them to
22083 * a reasonable value ourselves.
22084@@ -6353,8 +6378,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
22085 * may be executed in interrupt context, which saves and restore segments
22086 * around it, nullifying its effect.
22087 */
22088- loadsegment(ds, __USER_DS);
22089- loadsegment(es, __USER_DS);
22090+ loadsegment(ds, __KERNEL_DS);
22091+ loadsegment(es, __KERNEL_DS);
22092+ loadsegment(ss, __KERNEL_DS);
22093+
22094+#ifdef CONFIG_PAX_KERNEXEC
22095+ loadsegment(fs, __KERNEL_PERCPU);
22096+#endif
22097+
22098+#ifdef CONFIG_PAX_MEMORY_UDEREF
22099+ __set_fs(current_thread_info()->addr_limit);
22100+#endif
22101+
22102 #endif
22103
22104 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
22105diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
22106index 4f76417..93429b5 100644
22107--- a/arch/x86/kvm/x86.c
22108+++ b/arch/x86/kvm/x86.c
22109@@ -1390,8 +1390,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
22110 {
22111 struct kvm *kvm = vcpu->kvm;
22112 int lm = is_long_mode(vcpu);
22113- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
22114- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
22115+ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
22116+ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
22117 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
22118 : kvm->arch.xen_hvm_config.blob_size_32;
22119 u32 page_num = data & ~PAGE_MASK;
22120@@ -2255,6 +2255,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
22121 if (n < msr_list.nmsrs)
22122 goto out;
22123 r = -EFAULT;
22124+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
22125+ goto out;
22126 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
22127 num_msrs_to_save * sizeof(u32)))
22128 goto out;
22129@@ -2379,7 +2381,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
22130 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
22131 struct kvm_interrupt *irq)
22132 {
22133- if (irq->irq < 0 || irq->irq >= KVM_NR_INTERRUPTS)
22134+ if (irq->irq >= KVM_NR_INTERRUPTS)
22135 return -EINVAL;
22136 if (irqchip_in_kernel(vcpu->kvm))
22137 return -ENXIO;
22138@@ -4881,7 +4883,7 @@ static void kvm_set_mmio_spte_mask(void)
22139 kvm_mmu_set_mmio_spte_mask(mask);
22140 }
22141
22142-int kvm_arch_init(void *opaque)
22143+int kvm_arch_init(const void *opaque)
22144 {
22145 int r;
22146 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
22147diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
22148index 642d880..44e0f3f 100644
22149--- a/arch/x86/lguest/boot.c
22150+++ b/arch/x86/lguest/boot.c
22151@@ -1200,9 +1200,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
22152 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
22153 * Launcher to reboot us.
22154 */
22155-static void lguest_restart(char *reason)
22156+static __noreturn void lguest_restart(char *reason)
22157 {
22158 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
22159+ BUG();
22160 }
22161
22162 /*G:050
22163diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
22164index 00933d5..3a64af9 100644
22165--- a/arch/x86/lib/atomic64_386_32.S
22166+++ b/arch/x86/lib/atomic64_386_32.S
22167@@ -48,6 +48,10 @@ BEGIN(read)
22168 movl (v), %eax
22169 movl 4(v), %edx
22170 RET_ENDP
22171+BEGIN(read_unchecked)
22172+ movl (v), %eax
22173+ movl 4(v), %edx
22174+RET_ENDP
22175 #undef v
22176
22177 #define v %esi
22178@@ -55,6 +59,10 @@ BEGIN(set)
22179 movl %ebx, (v)
22180 movl %ecx, 4(v)
22181 RET_ENDP
22182+BEGIN(set_unchecked)
22183+ movl %ebx, (v)
22184+ movl %ecx, 4(v)
22185+RET_ENDP
22186 #undef v
22187
22188 #define v %esi
22189@@ -70,6 +78,20 @@ RET_ENDP
22190 BEGIN(add)
22191 addl %eax, (v)
22192 adcl %edx, 4(v)
22193+
22194+#ifdef CONFIG_PAX_REFCOUNT
22195+ jno 0f
22196+ subl %eax, (v)
22197+ sbbl %edx, 4(v)
22198+ int $4
22199+0:
22200+ _ASM_EXTABLE(0b, 0b)
22201+#endif
22202+
22203+RET_ENDP
22204+BEGIN(add_unchecked)
22205+ addl %eax, (v)
22206+ adcl %edx, 4(v)
22207 RET_ENDP
22208 #undef v
22209
22210@@ -77,6 +99,24 @@ RET_ENDP
22211 BEGIN(add_return)
22212 addl (v), %eax
22213 adcl 4(v), %edx
22214+
22215+#ifdef CONFIG_PAX_REFCOUNT
22216+ into
22217+1234:
22218+ _ASM_EXTABLE(1234b, 2f)
22219+#endif
22220+
22221+ movl %eax, (v)
22222+ movl %edx, 4(v)
22223+
22224+#ifdef CONFIG_PAX_REFCOUNT
22225+2:
22226+#endif
22227+
22228+RET_ENDP
22229+BEGIN(add_return_unchecked)
22230+ addl (v), %eax
22231+ adcl 4(v), %edx
22232 movl %eax, (v)
22233 movl %edx, 4(v)
22234 RET_ENDP
22235@@ -86,6 +126,20 @@ RET_ENDP
22236 BEGIN(sub)
22237 subl %eax, (v)
22238 sbbl %edx, 4(v)
22239+
22240+#ifdef CONFIG_PAX_REFCOUNT
22241+ jno 0f
22242+ addl %eax, (v)
22243+ adcl %edx, 4(v)
22244+ int $4
22245+0:
22246+ _ASM_EXTABLE(0b, 0b)
22247+#endif
22248+
22249+RET_ENDP
22250+BEGIN(sub_unchecked)
22251+ subl %eax, (v)
22252+ sbbl %edx, 4(v)
22253 RET_ENDP
22254 #undef v
22255
22256@@ -96,6 +150,27 @@ BEGIN(sub_return)
22257 sbbl $0, %edx
22258 addl (v), %eax
22259 adcl 4(v), %edx
22260+
22261+#ifdef CONFIG_PAX_REFCOUNT
22262+ into
22263+1234:
22264+ _ASM_EXTABLE(1234b, 2f)
22265+#endif
22266+
22267+ movl %eax, (v)
22268+ movl %edx, 4(v)
22269+
22270+#ifdef CONFIG_PAX_REFCOUNT
22271+2:
22272+#endif
22273+
22274+RET_ENDP
22275+BEGIN(sub_return_unchecked)
22276+ negl %edx
22277+ negl %eax
22278+ sbbl $0, %edx
22279+ addl (v), %eax
22280+ adcl 4(v), %edx
22281 movl %eax, (v)
22282 movl %edx, 4(v)
22283 RET_ENDP
22284@@ -105,6 +180,20 @@ RET_ENDP
22285 BEGIN(inc)
22286 addl $1, (v)
22287 adcl $0, 4(v)
22288+
22289+#ifdef CONFIG_PAX_REFCOUNT
22290+ jno 0f
22291+ subl $1, (v)
22292+ sbbl $0, 4(v)
22293+ int $4
22294+0:
22295+ _ASM_EXTABLE(0b, 0b)
22296+#endif
22297+
22298+RET_ENDP
22299+BEGIN(inc_unchecked)
22300+ addl $1, (v)
22301+ adcl $0, 4(v)
22302 RET_ENDP
22303 #undef v
22304
22305@@ -114,6 +203,26 @@ BEGIN(inc_return)
22306 movl 4(v), %edx
22307 addl $1, %eax
22308 adcl $0, %edx
22309+
22310+#ifdef CONFIG_PAX_REFCOUNT
22311+ into
22312+1234:
22313+ _ASM_EXTABLE(1234b, 2f)
22314+#endif
22315+
22316+ movl %eax, (v)
22317+ movl %edx, 4(v)
22318+
22319+#ifdef CONFIG_PAX_REFCOUNT
22320+2:
22321+#endif
22322+
22323+RET_ENDP
22324+BEGIN(inc_return_unchecked)
22325+ movl (v), %eax
22326+ movl 4(v), %edx
22327+ addl $1, %eax
22328+ adcl $0, %edx
22329 movl %eax, (v)
22330 movl %edx, 4(v)
22331 RET_ENDP
22332@@ -123,6 +232,20 @@ RET_ENDP
22333 BEGIN(dec)
22334 subl $1, (v)
22335 sbbl $0, 4(v)
22336+
22337+#ifdef CONFIG_PAX_REFCOUNT
22338+ jno 0f
22339+ addl $1, (v)
22340+ adcl $0, 4(v)
22341+ int $4
22342+0:
22343+ _ASM_EXTABLE(0b, 0b)
22344+#endif
22345+
22346+RET_ENDP
22347+BEGIN(dec_unchecked)
22348+ subl $1, (v)
22349+ sbbl $0, 4(v)
22350 RET_ENDP
22351 #undef v
22352
22353@@ -132,6 +255,26 @@ BEGIN(dec_return)
22354 movl 4(v), %edx
22355 subl $1, %eax
22356 sbbl $0, %edx
22357+
22358+#ifdef CONFIG_PAX_REFCOUNT
22359+ into
22360+1234:
22361+ _ASM_EXTABLE(1234b, 2f)
22362+#endif
22363+
22364+ movl %eax, (v)
22365+ movl %edx, 4(v)
22366+
22367+#ifdef CONFIG_PAX_REFCOUNT
22368+2:
22369+#endif
22370+
22371+RET_ENDP
22372+BEGIN(dec_return_unchecked)
22373+ movl (v), %eax
22374+ movl 4(v), %edx
22375+ subl $1, %eax
22376+ sbbl $0, %edx
22377 movl %eax, (v)
22378 movl %edx, 4(v)
22379 RET_ENDP
22380@@ -143,6 +286,13 @@ BEGIN(add_unless)
22381 adcl %edx, %edi
22382 addl (v), %eax
22383 adcl 4(v), %edx
22384+
22385+#ifdef CONFIG_PAX_REFCOUNT
22386+ into
22387+1234:
22388+ _ASM_EXTABLE(1234b, 2f)
22389+#endif
22390+
22391 cmpl %eax, %ecx
22392 je 3f
22393 1:
22394@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
22395 1:
22396 addl $1, %eax
22397 adcl $0, %edx
22398+
22399+#ifdef CONFIG_PAX_REFCOUNT
22400+ into
22401+1234:
22402+ _ASM_EXTABLE(1234b, 2f)
22403+#endif
22404+
22405 movl %eax, (v)
22406 movl %edx, 4(v)
22407 movl $1, %eax
22408@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
22409 movl 4(v), %edx
22410 subl $1, %eax
22411 sbbl $0, %edx
22412+
22413+#ifdef CONFIG_PAX_REFCOUNT
22414+ into
22415+1234:
22416+ _ASM_EXTABLE(1234b, 1f)
22417+#endif
22418+
22419 js 1f
22420 movl %eax, (v)
22421 movl %edx, 4(v)
22422diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
22423index f5cc9eb..51fa319 100644
22424--- a/arch/x86/lib/atomic64_cx8_32.S
22425+++ b/arch/x86/lib/atomic64_cx8_32.S
22426@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
22427 CFI_STARTPROC
22428
22429 read64 %ecx
22430+ pax_force_retaddr
22431 ret
22432 CFI_ENDPROC
22433 ENDPROC(atomic64_read_cx8)
22434
22435+ENTRY(atomic64_read_unchecked_cx8)
22436+ CFI_STARTPROC
22437+
22438+ read64 %ecx
22439+ pax_force_retaddr
22440+ ret
22441+ CFI_ENDPROC
22442+ENDPROC(atomic64_read_unchecked_cx8)
22443+
22444 ENTRY(atomic64_set_cx8)
22445 CFI_STARTPROC
22446
22447@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
22448 cmpxchg8b (%esi)
22449 jne 1b
22450
22451+ pax_force_retaddr
22452 ret
22453 CFI_ENDPROC
22454 ENDPROC(atomic64_set_cx8)
22455
22456+ENTRY(atomic64_set_unchecked_cx8)
22457+ CFI_STARTPROC
22458+
22459+1:
22460+/* we don't need LOCK_PREFIX since aligned 64-bit writes
22461+ * are atomic on 586 and newer */
22462+ cmpxchg8b (%esi)
22463+ jne 1b
22464+
22465+ pax_force_retaddr
22466+ ret
22467+ CFI_ENDPROC
22468+ENDPROC(atomic64_set_unchecked_cx8)
22469+
22470 ENTRY(atomic64_xchg_cx8)
22471 CFI_STARTPROC
22472
22473@@ -60,12 +85,13 @@ ENTRY(atomic64_xchg_cx8)
22474 cmpxchg8b (%esi)
22475 jne 1b
22476
22477+ pax_force_retaddr
22478 ret
22479 CFI_ENDPROC
22480 ENDPROC(atomic64_xchg_cx8)
22481
22482-.macro addsub_return func ins insc
22483-ENTRY(atomic64_\func\()_return_cx8)
22484+.macro addsub_return func ins insc unchecked=""
22485+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
22486 CFI_STARTPROC
22487 SAVE ebp
22488 SAVE ebx
22489@@ -82,27 +108,44 @@ ENTRY(atomic64_\func\()_return_cx8)
22490 movl %edx, %ecx
22491 \ins\()l %esi, %ebx
22492 \insc\()l %edi, %ecx
22493+
22494+.ifb \unchecked
22495+#ifdef CONFIG_PAX_REFCOUNT
22496+ into
22497+2:
22498+ _ASM_EXTABLE(2b, 3f)
22499+#endif
22500+.endif
22501+
22502 LOCK_PREFIX
22503 cmpxchg8b (%ebp)
22504 jne 1b
22505-
22506-10:
22507 movl %ebx, %eax
22508 movl %ecx, %edx
22509+
22510+.ifb \unchecked
22511+#ifdef CONFIG_PAX_REFCOUNT
22512+3:
22513+#endif
22514+.endif
22515+
22516 RESTORE edi
22517 RESTORE esi
22518 RESTORE ebx
22519 RESTORE ebp
22520+ pax_force_retaddr
22521 ret
22522 CFI_ENDPROC
22523-ENDPROC(atomic64_\func\()_return_cx8)
22524+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
22525 .endm
22526
22527 addsub_return add add adc
22528 addsub_return sub sub sbb
22529+addsub_return add add adc _unchecked
22530+addsub_return sub sub sbb _unchecked
22531
22532-.macro incdec_return func ins insc
22533-ENTRY(atomic64_\func\()_return_cx8)
22534+.macro incdec_return func ins insc unchecked=""
22535+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
22536 CFI_STARTPROC
22537 SAVE ebx
22538
22539@@ -112,21 +155,39 @@ ENTRY(atomic64_\func\()_return_cx8)
22540 movl %edx, %ecx
22541 \ins\()l $1, %ebx
22542 \insc\()l $0, %ecx
22543+
22544+.ifb \unchecked
22545+#ifdef CONFIG_PAX_REFCOUNT
22546+ into
22547+2:
22548+ _ASM_EXTABLE(2b, 3f)
22549+#endif
22550+.endif
22551+
22552 LOCK_PREFIX
22553 cmpxchg8b (%esi)
22554 jne 1b
22555
22556-10:
22557 movl %ebx, %eax
22558 movl %ecx, %edx
22559+
22560+.ifb \unchecked
22561+#ifdef CONFIG_PAX_REFCOUNT
22562+3:
22563+#endif
22564+.endif
22565+
22566 RESTORE ebx
22567+ pax_force_retaddr
22568 ret
22569 CFI_ENDPROC
22570-ENDPROC(atomic64_\func\()_return_cx8)
22571+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
22572 .endm
22573
22574 incdec_return inc add adc
22575 incdec_return dec sub sbb
22576+incdec_return inc add adc _unchecked
22577+incdec_return dec sub sbb _unchecked
22578
22579 ENTRY(atomic64_dec_if_positive_cx8)
22580 CFI_STARTPROC
22581@@ -138,6 +199,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
22582 movl %edx, %ecx
22583 subl $1, %ebx
22584 sbb $0, %ecx
22585+
22586+#ifdef CONFIG_PAX_REFCOUNT
22587+ into
22588+1234:
22589+ _ASM_EXTABLE(1234b, 2f)
22590+#endif
22591+
22592 js 2f
22593 LOCK_PREFIX
22594 cmpxchg8b (%esi)
22595@@ -147,6 +215,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
22596 movl %ebx, %eax
22597 movl %ecx, %edx
22598 RESTORE ebx
22599+ pax_force_retaddr
22600 ret
22601 CFI_ENDPROC
22602 ENDPROC(atomic64_dec_if_positive_cx8)
22603@@ -171,6 +240,13 @@ ENTRY(atomic64_add_unless_cx8)
22604 movl %edx, %ecx
22605 addl %ebp, %ebx
22606 adcl %edi, %ecx
22607+
22608+#ifdef CONFIG_PAX_REFCOUNT
22609+ into
22610+1234:
22611+ _ASM_EXTABLE(1234b, 3f)
22612+#endif
22613+
22614 LOCK_PREFIX
22615 cmpxchg8b (%esi)
22616 jne 1b
22617@@ -181,6 +257,7 @@ ENTRY(atomic64_add_unless_cx8)
22618 CFI_ADJUST_CFA_OFFSET -8
22619 RESTORE ebx
22620 RESTORE ebp
22621+ pax_force_retaddr
22622 ret
22623 4:
22624 cmpl %edx, 4(%esp)
22625@@ -203,6 +280,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
22626 xorl %ecx, %ecx
22627 addl $1, %ebx
22628 adcl %edx, %ecx
22629+
22630+#ifdef CONFIG_PAX_REFCOUNT
22631+ into
22632+1234:
22633+ _ASM_EXTABLE(1234b, 3f)
22634+#endif
22635+
22636 LOCK_PREFIX
22637 cmpxchg8b (%esi)
22638 jne 1b
22639@@ -210,6 +294,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
22640 movl $1, %eax
22641 3:
22642 RESTORE ebx
22643+ pax_force_retaddr
22644 ret
22645 CFI_ENDPROC
22646 ENDPROC(atomic64_inc_not_zero_cx8)
22647diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
22648index 2af5df3..62b1a5a 100644
22649--- a/arch/x86/lib/checksum_32.S
22650+++ b/arch/x86/lib/checksum_32.S
22651@@ -29,7 +29,8 @@
22652 #include <asm/dwarf2.h>
22653 #include <asm/errno.h>
22654 #include <asm/asm.h>
22655-
22656+#include <asm/segment.h>
22657+
22658 /*
22659 * computes a partial checksum, e.g. for TCP/UDP fragments
22660 */
22661@@ -293,9 +294,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
22662
22663 #define ARGBASE 16
22664 #define FP 12
22665-
22666-ENTRY(csum_partial_copy_generic)
22667+
22668+ENTRY(csum_partial_copy_generic_to_user)
22669 CFI_STARTPROC
22670+
22671+#ifdef CONFIG_PAX_MEMORY_UDEREF
22672+ pushl_cfi %gs
22673+ popl_cfi %es
22674+ jmp csum_partial_copy_generic
22675+#endif
22676+
22677+ENTRY(csum_partial_copy_generic_from_user)
22678+
22679+#ifdef CONFIG_PAX_MEMORY_UDEREF
22680+ pushl_cfi %gs
22681+ popl_cfi %ds
22682+#endif
22683+
22684+ENTRY(csum_partial_copy_generic)
22685 subl $4,%esp
22686 CFI_ADJUST_CFA_OFFSET 4
22687 pushl_cfi %edi
22688@@ -317,7 +333,7 @@ ENTRY(csum_partial_copy_generic)
22689 jmp 4f
22690 SRC(1: movw (%esi), %bx )
22691 addl $2, %esi
22692-DST( movw %bx, (%edi) )
22693+DST( movw %bx, %es:(%edi) )
22694 addl $2, %edi
22695 addw %bx, %ax
22696 adcl $0, %eax
22697@@ -329,30 +345,30 @@ DST( movw %bx, (%edi) )
22698 SRC(1: movl (%esi), %ebx )
22699 SRC( movl 4(%esi), %edx )
22700 adcl %ebx, %eax
22701-DST( movl %ebx, (%edi) )
22702+DST( movl %ebx, %es:(%edi) )
22703 adcl %edx, %eax
22704-DST( movl %edx, 4(%edi) )
22705+DST( movl %edx, %es:4(%edi) )
22706
22707 SRC( movl 8(%esi), %ebx )
22708 SRC( movl 12(%esi), %edx )
22709 adcl %ebx, %eax
22710-DST( movl %ebx, 8(%edi) )
22711+DST( movl %ebx, %es:8(%edi) )
22712 adcl %edx, %eax
22713-DST( movl %edx, 12(%edi) )
22714+DST( movl %edx, %es:12(%edi) )
22715
22716 SRC( movl 16(%esi), %ebx )
22717 SRC( movl 20(%esi), %edx )
22718 adcl %ebx, %eax
22719-DST( movl %ebx, 16(%edi) )
22720+DST( movl %ebx, %es:16(%edi) )
22721 adcl %edx, %eax
22722-DST( movl %edx, 20(%edi) )
22723+DST( movl %edx, %es:20(%edi) )
22724
22725 SRC( movl 24(%esi), %ebx )
22726 SRC( movl 28(%esi), %edx )
22727 adcl %ebx, %eax
22728-DST( movl %ebx, 24(%edi) )
22729+DST( movl %ebx, %es:24(%edi) )
22730 adcl %edx, %eax
22731-DST( movl %edx, 28(%edi) )
22732+DST( movl %edx, %es:28(%edi) )
22733
22734 lea 32(%esi), %esi
22735 lea 32(%edi), %edi
22736@@ -366,7 +382,7 @@ DST( movl %edx, 28(%edi) )
22737 shrl $2, %edx # This clears CF
22738 SRC(3: movl (%esi), %ebx )
22739 adcl %ebx, %eax
22740-DST( movl %ebx, (%edi) )
22741+DST( movl %ebx, %es:(%edi) )
22742 lea 4(%esi), %esi
22743 lea 4(%edi), %edi
22744 dec %edx
22745@@ -378,12 +394,12 @@ DST( movl %ebx, (%edi) )
22746 jb 5f
22747 SRC( movw (%esi), %cx )
22748 leal 2(%esi), %esi
22749-DST( movw %cx, (%edi) )
22750+DST( movw %cx, %es:(%edi) )
22751 leal 2(%edi), %edi
22752 je 6f
22753 shll $16,%ecx
22754 SRC(5: movb (%esi), %cl )
22755-DST( movb %cl, (%edi) )
22756+DST( movb %cl, %es:(%edi) )
22757 6: addl %ecx, %eax
22758 adcl $0, %eax
22759 7:
22760@@ -394,7 +410,7 @@ DST( movb %cl, (%edi) )
22761
22762 6001:
22763 movl ARGBASE+20(%esp), %ebx # src_err_ptr
22764- movl $-EFAULT, (%ebx)
22765+ movl $-EFAULT, %ss:(%ebx)
22766
22767 # zero the complete destination - computing the rest
22768 # is too much work
22769@@ -407,11 +423,15 @@ DST( movb %cl, (%edi) )
22770
22771 6002:
22772 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
22773- movl $-EFAULT,(%ebx)
22774+ movl $-EFAULT,%ss:(%ebx)
22775 jmp 5000b
22776
22777 .previous
22778
22779+ pushl_cfi %ss
22780+ popl_cfi %ds
22781+ pushl_cfi %ss
22782+ popl_cfi %es
22783 popl_cfi %ebx
22784 CFI_RESTORE ebx
22785 popl_cfi %esi
22786@@ -421,26 +441,43 @@ DST( movb %cl, (%edi) )
22787 popl_cfi %ecx # equivalent to addl $4,%esp
22788 ret
22789 CFI_ENDPROC
22790-ENDPROC(csum_partial_copy_generic)
22791+ENDPROC(csum_partial_copy_generic_to_user)
22792
22793 #else
22794
22795 /* Version for PentiumII/PPro */
22796
22797 #define ROUND1(x) \
22798+ nop; nop; nop; \
22799 SRC(movl x(%esi), %ebx ) ; \
22800 addl %ebx, %eax ; \
22801- DST(movl %ebx, x(%edi) ) ;
22802+ DST(movl %ebx, %es:x(%edi)) ;
22803
22804 #define ROUND(x) \
22805+ nop; nop; nop; \
22806 SRC(movl x(%esi), %ebx ) ; \
22807 adcl %ebx, %eax ; \
22808- DST(movl %ebx, x(%edi) ) ;
22809+ DST(movl %ebx, %es:x(%edi)) ;
22810
22811 #define ARGBASE 12
22812-
22813-ENTRY(csum_partial_copy_generic)
22814+
22815+ENTRY(csum_partial_copy_generic_to_user)
22816 CFI_STARTPROC
22817+
22818+#ifdef CONFIG_PAX_MEMORY_UDEREF
22819+ pushl_cfi %gs
22820+ popl_cfi %es
22821+ jmp csum_partial_copy_generic
22822+#endif
22823+
22824+ENTRY(csum_partial_copy_generic_from_user)
22825+
22826+#ifdef CONFIG_PAX_MEMORY_UDEREF
22827+ pushl_cfi %gs
22828+ popl_cfi %ds
22829+#endif
22830+
22831+ENTRY(csum_partial_copy_generic)
22832 pushl_cfi %ebx
22833 CFI_REL_OFFSET ebx, 0
22834 pushl_cfi %edi
22835@@ -461,7 +498,7 @@ ENTRY(csum_partial_copy_generic)
22836 subl %ebx, %edi
22837 lea -1(%esi),%edx
22838 andl $-32,%edx
22839- lea 3f(%ebx,%ebx), %ebx
22840+ lea 3f(%ebx,%ebx,2), %ebx
22841 testl %esi, %esi
22842 jmp *%ebx
22843 1: addl $64,%esi
22844@@ -482,19 +519,19 @@ ENTRY(csum_partial_copy_generic)
22845 jb 5f
22846 SRC( movw (%esi), %dx )
22847 leal 2(%esi), %esi
22848-DST( movw %dx, (%edi) )
22849+DST( movw %dx, %es:(%edi) )
22850 leal 2(%edi), %edi
22851 je 6f
22852 shll $16,%edx
22853 5:
22854 SRC( movb (%esi), %dl )
22855-DST( movb %dl, (%edi) )
22856+DST( movb %dl, %es:(%edi) )
22857 6: addl %edx, %eax
22858 adcl $0, %eax
22859 7:
22860 .section .fixup, "ax"
22861 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
22862- movl $-EFAULT, (%ebx)
22863+ movl $-EFAULT, %ss:(%ebx)
22864 # zero the complete destination (computing the rest is too much work)
22865 movl ARGBASE+8(%esp),%edi # dst
22866 movl ARGBASE+12(%esp),%ecx # len
22867@@ -502,10 +539,17 @@ DST( movb %dl, (%edi) )
22868 rep; stosb
22869 jmp 7b
22870 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
22871- movl $-EFAULT, (%ebx)
22872+ movl $-EFAULT, %ss:(%ebx)
22873 jmp 7b
22874 .previous
22875
22876+#ifdef CONFIG_PAX_MEMORY_UDEREF
22877+ pushl_cfi %ss
22878+ popl_cfi %ds
22879+ pushl_cfi %ss
22880+ popl_cfi %es
22881+#endif
22882+
22883 popl_cfi %esi
22884 CFI_RESTORE esi
22885 popl_cfi %edi
22886@@ -514,7 +558,7 @@ DST( movb %dl, (%edi) )
22887 CFI_RESTORE ebx
22888 ret
22889 CFI_ENDPROC
22890-ENDPROC(csum_partial_copy_generic)
22891+ENDPROC(csum_partial_copy_generic_to_user)
22892
22893 #undef ROUND
22894 #undef ROUND1
22895diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
22896index f2145cf..cea889d 100644
22897--- a/arch/x86/lib/clear_page_64.S
22898+++ b/arch/x86/lib/clear_page_64.S
22899@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
22900 movl $4096/8,%ecx
22901 xorl %eax,%eax
22902 rep stosq
22903+ pax_force_retaddr
22904 ret
22905 CFI_ENDPROC
22906 ENDPROC(clear_page_c)
22907@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
22908 movl $4096,%ecx
22909 xorl %eax,%eax
22910 rep stosb
22911+ pax_force_retaddr
22912 ret
22913 CFI_ENDPROC
22914 ENDPROC(clear_page_c_e)
22915@@ -43,6 +45,7 @@ ENTRY(clear_page)
22916 leaq 64(%rdi),%rdi
22917 jnz .Lloop
22918 nop
22919+ pax_force_retaddr
22920 ret
22921 CFI_ENDPROC
22922 .Lclear_page_end:
22923@@ -58,7 +61,7 @@ ENDPROC(clear_page)
22924
22925 #include <asm/cpufeature.h>
22926
22927- .section .altinstr_replacement,"ax"
22928+ .section .altinstr_replacement,"a"
22929 1: .byte 0xeb /* jmp <disp8> */
22930 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
22931 2: .byte 0xeb /* jmp <disp8> */
22932diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
22933index 1e572c5..2a162cd 100644
22934--- a/arch/x86/lib/cmpxchg16b_emu.S
22935+++ b/arch/x86/lib/cmpxchg16b_emu.S
22936@@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
22937
22938 popf
22939 mov $1, %al
22940+ pax_force_retaddr
22941 ret
22942
22943 not_same:
22944 popf
22945 xor %al,%al
22946+ pax_force_retaddr
22947 ret
22948
22949 CFI_ENDPROC
22950diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
22951index 6b34d04..dccb07f 100644
22952--- a/arch/x86/lib/copy_page_64.S
22953+++ b/arch/x86/lib/copy_page_64.S
22954@@ -9,6 +9,7 @@ copy_page_c:
22955 CFI_STARTPROC
22956 movl $4096/8,%ecx
22957 rep movsq
22958+ pax_force_retaddr
22959 ret
22960 CFI_ENDPROC
22961 ENDPROC(copy_page_c)
22962@@ -20,12 +21,14 @@ ENDPROC(copy_page_c)
22963
22964 ENTRY(copy_page)
22965 CFI_STARTPROC
22966- subq $2*8,%rsp
22967- CFI_ADJUST_CFA_OFFSET 2*8
22968+ subq $3*8,%rsp
22969+ CFI_ADJUST_CFA_OFFSET 3*8
22970 movq %rbx,(%rsp)
22971 CFI_REL_OFFSET rbx, 0
22972 movq %r12,1*8(%rsp)
22973 CFI_REL_OFFSET r12, 1*8
22974+ movq %r13,2*8(%rsp)
22975+ CFI_REL_OFFSET r13, 2*8
22976
22977 movl $(4096/64)-5,%ecx
22978 .p2align 4
22979@@ -37,7 +40,7 @@ ENTRY(copy_page)
22980 movq 16 (%rsi), %rdx
22981 movq 24 (%rsi), %r8
22982 movq 32 (%rsi), %r9
22983- movq 40 (%rsi), %r10
22984+ movq 40 (%rsi), %r13
22985 movq 48 (%rsi), %r11
22986 movq 56 (%rsi), %r12
22987
22988@@ -48,7 +51,7 @@ ENTRY(copy_page)
22989 movq %rdx, 16 (%rdi)
22990 movq %r8, 24 (%rdi)
22991 movq %r9, 32 (%rdi)
22992- movq %r10, 40 (%rdi)
22993+ movq %r13, 40 (%rdi)
22994 movq %r11, 48 (%rdi)
22995 movq %r12, 56 (%rdi)
22996
22997@@ -67,7 +70,7 @@ ENTRY(copy_page)
22998 movq 16 (%rsi), %rdx
22999 movq 24 (%rsi), %r8
23000 movq 32 (%rsi), %r9
23001- movq 40 (%rsi), %r10
23002+ movq 40 (%rsi), %r13
23003 movq 48 (%rsi), %r11
23004 movq 56 (%rsi), %r12
23005
23006@@ -76,7 +79,7 @@ ENTRY(copy_page)
23007 movq %rdx, 16 (%rdi)
23008 movq %r8, 24 (%rdi)
23009 movq %r9, 32 (%rdi)
23010- movq %r10, 40 (%rdi)
23011+ movq %r13, 40 (%rdi)
23012 movq %r11, 48 (%rdi)
23013 movq %r12, 56 (%rdi)
23014
23015@@ -89,8 +92,11 @@ ENTRY(copy_page)
23016 CFI_RESTORE rbx
23017 movq 1*8(%rsp),%r12
23018 CFI_RESTORE r12
23019- addq $2*8,%rsp
23020- CFI_ADJUST_CFA_OFFSET -2*8
23021+ movq 2*8(%rsp),%r13
23022+ CFI_RESTORE r13
23023+ addq $3*8,%rsp
23024+ CFI_ADJUST_CFA_OFFSET -3*8
23025+ pax_force_retaddr
23026 ret
23027 .Lcopy_page_end:
23028 CFI_ENDPROC
23029@@ -101,7 +107,7 @@ ENDPROC(copy_page)
23030
23031 #include <asm/cpufeature.h>
23032
23033- .section .altinstr_replacement,"ax"
23034+ .section .altinstr_replacement,"a"
23035 1: .byte 0xeb /* jmp <disp8> */
23036 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
23037 2:
23038diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
23039index a30ca15..d25fab6 100644
23040--- a/arch/x86/lib/copy_user_64.S
23041+++ b/arch/x86/lib/copy_user_64.S
23042@@ -18,6 +18,7 @@
23043 #include <asm/alternative-asm.h>
23044 #include <asm/asm.h>
23045 #include <asm/smap.h>
23046+#include <asm/pgtable.h>
23047
23048 /*
23049 * By placing feature2 after feature1 in altinstructions section, we logically
23050@@ -31,7 +32,7 @@
23051 .byte 0xe9 /* 32bit jump */
23052 .long \orig-1f /* by default jump to orig */
23053 1:
23054- .section .altinstr_replacement,"ax"
23055+ .section .altinstr_replacement,"a"
23056 2: .byte 0xe9 /* near jump with 32bit immediate */
23057 .long \alt1-1b /* offset */ /* or alternatively to alt1 */
23058 3: .byte 0xe9 /* near jump with 32bit immediate */
23059@@ -70,47 +71,20 @@
23060 #endif
23061 .endm
23062
23063-/* Standard copy_to_user with segment limit checking */
23064-ENTRY(_copy_to_user)
23065- CFI_STARTPROC
23066- GET_THREAD_INFO(%rax)
23067- movq %rdi,%rcx
23068- addq %rdx,%rcx
23069- jc bad_to_user
23070- cmpq TI_addr_limit(%rax),%rcx
23071- ja bad_to_user
23072- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
23073- copy_user_generic_unrolled,copy_user_generic_string, \
23074- copy_user_enhanced_fast_string
23075- CFI_ENDPROC
23076-ENDPROC(_copy_to_user)
23077-
23078-/* Standard copy_from_user with segment limit checking */
23079-ENTRY(_copy_from_user)
23080- CFI_STARTPROC
23081- GET_THREAD_INFO(%rax)
23082- movq %rsi,%rcx
23083- addq %rdx,%rcx
23084- jc bad_from_user
23085- cmpq TI_addr_limit(%rax),%rcx
23086- ja bad_from_user
23087- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
23088- copy_user_generic_unrolled,copy_user_generic_string, \
23089- copy_user_enhanced_fast_string
23090- CFI_ENDPROC
23091-ENDPROC(_copy_from_user)
23092-
23093 .section .fixup,"ax"
23094 /* must zero dest */
23095 ENTRY(bad_from_user)
23096 bad_from_user:
23097 CFI_STARTPROC
23098+ testl %edx,%edx
23099+ js bad_to_user
23100 movl %edx,%ecx
23101 xorl %eax,%eax
23102 rep
23103 stosb
23104 bad_to_user:
23105 movl %edx,%eax
23106+ pax_force_retaddr
23107 ret
23108 CFI_ENDPROC
23109 ENDPROC(bad_from_user)
23110@@ -141,19 +115,19 @@ ENTRY(copy_user_generic_unrolled)
23111 jz 17f
23112 1: movq (%rsi),%r8
23113 2: movq 1*8(%rsi),%r9
23114-3: movq 2*8(%rsi),%r10
23115+3: movq 2*8(%rsi),%rax
23116 4: movq 3*8(%rsi),%r11
23117 5: movq %r8,(%rdi)
23118 6: movq %r9,1*8(%rdi)
23119-7: movq %r10,2*8(%rdi)
23120+7: movq %rax,2*8(%rdi)
23121 8: movq %r11,3*8(%rdi)
23122 9: movq 4*8(%rsi),%r8
23123 10: movq 5*8(%rsi),%r9
23124-11: movq 6*8(%rsi),%r10
23125+11: movq 6*8(%rsi),%rax
23126 12: movq 7*8(%rsi),%r11
23127 13: movq %r8,4*8(%rdi)
23128 14: movq %r9,5*8(%rdi)
23129-15: movq %r10,6*8(%rdi)
23130+15: movq %rax,6*8(%rdi)
23131 16: movq %r11,7*8(%rdi)
23132 leaq 64(%rsi),%rsi
23133 leaq 64(%rdi),%rdi
23134@@ -180,6 +154,7 @@ ENTRY(copy_user_generic_unrolled)
23135 jnz 21b
23136 23: xor %eax,%eax
23137 ASM_CLAC
23138+ pax_force_retaddr
23139 ret
23140
23141 .section .fixup,"ax"
23142@@ -251,6 +226,7 @@ ENTRY(copy_user_generic_string)
23143 movsb
23144 4: xorl %eax,%eax
23145 ASM_CLAC
23146+ pax_force_retaddr
23147 ret
23148
23149 .section .fixup,"ax"
23150@@ -286,6 +262,7 @@ ENTRY(copy_user_enhanced_fast_string)
23151 movsb
23152 2: xorl %eax,%eax
23153 ASM_CLAC
23154+ pax_force_retaddr
23155 ret
23156
23157 .section .fixup,"ax"
23158diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
23159index 6a4f43c..f5f9e26 100644
23160--- a/arch/x86/lib/copy_user_nocache_64.S
23161+++ b/arch/x86/lib/copy_user_nocache_64.S
23162@@ -8,6 +8,7 @@
23163
23164 #include <linux/linkage.h>
23165 #include <asm/dwarf2.h>
23166+#include <asm/alternative-asm.h>
23167
23168 #define FIX_ALIGNMENT 1
23169
23170@@ -16,6 +17,7 @@
23171 #include <asm/thread_info.h>
23172 #include <asm/asm.h>
23173 #include <asm/smap.h>
23174+#include <asm/pgtable.h>
23175
23176 .macro ALIGN_DESTINATION
23177 #ifdef FIX_ALIGNMENT
23178@@ -49,6 +51,15 @@
23179 */
23180 ENTRY(__copy_user_nocache)
23181 CFI_STARTPROC
23182+
23183+#ifdef CONFIG_PAX_MEMORY_UDEREF
23184+ mov $PAX_USER_SHADOW_BASE,%rcx
23185+ cmp %rcx,%rsi
23186+ jae 1f
23187+ add %rcx,%rsi
23188+1:
23189+#endif
23190+
23191 ASM_STAC
23192 cmpl $8,%edx
23193 jb 20f /* less then 8 bytes, go to byte copy loop */
23194@@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache)
23195 jz 17f
23196 1: movq (%rsi),%r8
23197 2: movq 1*8(%rsi),%r9
23198-3: movq 2*8(%rsi),%r10
23199+3: movq 2*8(%rsi),%rax
23200 4: movq 3*8(%rsi),%r11
23201 5: movnti %r8,(%rdi)
23202 6: movnti %r9,1*8(%rdi)
23203-7: movnti %r10,2*8(%rdi)
23204+7: movnti %rax,2*8(%rdi)
23205 8: movnti %r11,3*8(%rdi)
23206 9: movq 4*8(%rsi),%r8
23207 10: movq 5*8(%rsi),%r9
23208-11: movq 6*8(%rsi),%r10
23209+11: movq 6*8(%rsi),%rax
23210 12: movq 7*8(%rsi),%r11
23211 13: movnti %r8,4*8(%rdi)
23212 14: movnti %r9,5*8(%rdi)
23213-15: movnti %r10,6*8(%rdi)
23214+15: movnti %rax,6*8(%rdi)
23215 16: movnti %r11,7*8(%rdi)
23216 leaq 64(%rsi),%rsi
23217 leaq 64(%rdi),%rdi
23218@@ -99,6 +110,7 @@ ENTRY(__copy_user_nocache)
23219 23: xorl %eax,%eax
23220 ASM_CLAC
23221 sfence
23222+ pax_force_retaddr
23223 ret
23224
23225 .section .fixup,"ax"
23226diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
23227index 2419d5f..953ee51 100644
23228--- a/arch/x86/lib/csum-copy_64.S
23229+++ b/arch/x86/lib/csum-copy_64.S
23230@@ -9,6 +9,7 @@
23231 #include <asm/dwarf2.h>
23232 #include <asm/errno.h>
23233 #include <asm/asm.h>
23234+#include <asm/alternative-asm.h>
23235
23236 /*
23237 * Checksum copy with exception handling.
23238@@ -220,6 +221,7 @@ ENTRY(csum_partial_copy_generic)
23239 CFI_RESTORE rbp
23240 addq $7*8, %rsp
23241 CFI_ADJUST_CFA_OFFSET -7*8
23242+ pax_force_retaddr 0, 1
23243 ret
23244 CFI_RESTORE_STATE
23245
23246diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
23247index 25b7ae8..169fafc 100644
23248--- a/arch/x86/lib/csum-wrappers_64.c
23249+++ b/arch/x86/lib/csum-wrappers_64.c
23250@@ -52,7 +52,7 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
23251 len -= 2;
23252 }
23253 }
23254- isum = csum_partial_copy_generic((__force const void *)src,
23255+ isum = csum_partial_copy_generic((const void __force_kernel *)____m(src),
23256 dst, len, isum, errp, NULL);
23257 if (unlikely(*errp))
23258 goto out_err;
23259@@ -105,7 +105,7 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
23260 }
23261
23262 *errp = 0;
23263- return csum_partial_copy_generic(src, (void __force *)dst,
23264+ return csum_partial_copy_generic(src, (void __force_kernel *)____m(dst),
23265 len, isum, NULL, errp);
23266 }
23267 EXPORT_SYMBOL(csum_partial_copy_to_user);
23268diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
23269index 156b9c8..b144132 100644
23270--- a/arch/x86/lib/getuser.S
23271+++ b/arch/x86/lib/getuser.S
23272@@ -34,17 +34,40 @@
23273 #include <asm/thread_info.h>
23274 #include <asm/asm.h>
23275 #include <asm/smap.h>
23276+#include <asm/segment.h>
23277+#include <asm/pgtable.h>
23278+#include <asm/alternative-asm.h>
23279+
23280+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
23281+#define __copyuser_seg gs;
23282+#else
23283+#define __copyuser_seg
23284+#endif
23285
23286 .text
23287 ENTRY(__get_user_1)
23288 CFI_STARTPROC
23289+
23290+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
23291 GET_THREAD_INFO(%_ASM_DX)
23292 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
23293 jae bad_get_user
23294 ASM_STAC
23295-1: movzb (%_ASM_AX),%edx
23296+
23297+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23298+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
23299+ cmp %_ASM_DX,%_ASM_AX
23300+ jae 1234f
23301+ add %_ASM_DX,%_ASM_AX
23302+1234:
23303+#endif
23304+
23305+#endif
23306+
23307+1: __copyuser_seg movzb (%_ASM_AX),%edx
23308 xor %eax,%eax
23309 ASM_CLAC
23310+ pax_force_retaddr
23311 ret
23312 CFI_ENDPROC
23313 ENDPROC(__get_user_1)
23314@@ -52,14 +75,28 @@ ENDPROC(__get_user_1)
23315 ENTRY(__get_user_2)
23316 CFI_STARTPROC
23317 add $1,%_ASM_AX
23318+
23319+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
23320 jc bad_get_user
23321 GET_THREAD_INFO(%_ASM_DX)
23322 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
23323 jae bad_get_user
23324 ASM_STAC
23325-2: movzwl -1(%_ASM_AX),%edx
23326+
23327+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23328+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
23329+ cmp %_ASM_DX,%_ASM_AX
23330+ jae 1234f
23331+ add %_ASM_DX,%_ASM_AX
23332+1234:
23333+#endif
23334+
23335+#endif
23336+
23337+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
23338 xor %eax,%eax
23339 ASM_CLAC
23340+ pax_force_retaddr
23341 ret
23342 CFI_ENDPROC
23343 ENDPROC(__get_user_2)
23344@@ -67,14 +104,28 @@ ENDPROC(__get_user_2)
23345 ENTRY(__get_user_4)
23346 CFI_STARTPROC
23347 add $3,%_ASM_AX
23348+
23349+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
23350 jc bad_get_user
23351 GET_THREAD_INFO(%_ASM_DX)
23352 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
23353 jae bad_get_user
23354 ASM_STAC
23355-3: mov -3(%_ASM_AX),%edx
23356+
23357+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23358+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
23359+ cmp %_ASM_DX,%_ASM_AX
23360+ jae 1234f
23361+ add %_ASM_DX,%_ASM_AX
23362+1234:
23363+#endif
23364+
23365+#endif
23366+
23367+3: __copyuser_seg mov -3(%_ASM_AX),%edx
23368 xor %eax,%eax
23369 ASM_CLAC
23370+ pax_force_retaddr
23371 ret
23372 CFI_ENDPROC
23373 ENDPROC(__get_user_4)
23374@@ -87,10 +138,20 @@ ENTRY(__get_user_8)
23375 GET_THREAD_INFO(%_ASM_DX)
23376 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
23377 jae bad_get_user
23378+
23379+#ifdef CONFIG_PAX_MEMORY_UDEREF
23380+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
23381+ cmp %_ASM_DX,%_ASM_AX
23382+ jae 1234f
23383+ add %_ASM_DX,%_ASM_AX
23384+1234:
23385+#endif
23386+
23387 ASM_STAC
23388 4: movq -7(%_ASM_AX),%_ASM_DX
23389 xor %eax,%eax
23390 ASM_CLAC
23391+ pax_force_retaddr
23392 ret
23393 CFI_ENDPROC
23394 ENDPROC(__get_user_8)
23395@@ -101,6 +162,7 @@ bad_get_user:
23396 xor %edx,%edx
23397 mov $(-EFAULT),%_ASM_AX
23398 ASM_CLAC
23399+ pax_force_retaddr
23400 ret
23401 CFI_ENDPROC
23402 END(bad_get_user)
23403diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
23404index 54fcffe..7be149e 100644
23405--- a/arch/x86/lib/insn.c
23406+++ b/arch/x86/lib/insn.c
23407@@ -20,8 +20,10 @@
23408
23409 #ifdef __KERNEL__
23410 #include <linux/string.h>
23411+#include <asm/pgtable_types.h>
23412 #else
23413 #include <string.h>
23414+#define ktla_ktva(addr) addr
23415 #endif
23416 #include <asm/inat.h>
23417 #include <asm/insn.h>
23418@@ -53,8 +55,8 @@
23419 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
23420 {
23421 memset(insn, 0, sizeof(*insn));
23422- insn->kaddr = kaddr;
23423- insn->next_byte = kaddr;
23424+ insn->kaddr = ktla_ktva(kaddr);
23425+ insn->next_byte = ktla_ktva(kaddr);
23426 insn->x86_64 = x86_64 ? 1 : 0;
23427 insn->opnd_bytes = 4;
23428 if (x86_64)
23429diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
23430index 05a95e7..326f2fa 100644
23431--- a/arch/x86/lib/iomap_copy_64.S
23432+++ b/arch/x86/lib/iomap_copy_64.S
23433@@ -17,6 +17,7 @@
23434
23435 #include <linux/linkage.h>
23436 #include <asm/dwarf2.h>
23437+#include <asm/alternative-asm.h>
23438
23439 /*
23440 * override generic version in lib/iomap_copy.c
23441@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
23442 CFI_STARTPROC
23443 movl %edx,%ecx
23444 rep movsd
23445+ pax_force_retaddr
23446 ret
23447 CFI_ENDPROC
23448 ENDPROC(__iowrite32_copy)
23449diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
23450index 1c273be..da9cc0e 100644
23451--- a/arch/x86/lib/memcpy_64.S
23452+++ b/arch/x86/lib/memcpy_64.S
23453@@ -33,6 +33,7 @@
23454 rep movsq
23455 movl %edx, %ecx
23456 rep movsb
23457+ pax_force_retaddr
23458 ret
23459 .Lmemcpy_e:
23460 .previous
23461@@ -49,6 +50,7 @@
23462 movq %rdi, %rax
23463 movq %rdx, %rcx
23464 rep movsb
23465+ pax_force_retaddr
23466 ret
23467 .Lmemcpy_e_e:
23468 .previous
23469@@ -76,13 +78,13 @@ ENTRY(memcpy)
23470 */
23471 movq 0*8(%rsi), %r8
23472 movq 1*8(%rsi), %r9
23473- movq 2*8(%rsi), %r10
23474+ movq 2*8(%rsi), %rcx
23475 movq 3*8(%rsi), %r11
23476 leaq 4*8(%rsi), %rsi
23477
23478 movq %r8, 0*8(%rdi)
23479 movq %r9, 1*8(%rdi)
23480- movq %r10, 2*8(%rdi)
23481+ movq %rcx, 2*8(%rdi)
23482 movq %r11, 3*8(%rdi)
23483 leaq 4*8(%rdi), %rdi
23484 jae .Lcopy_forward_loop
23485@@ -105,12 +107,12 @@ ENTRY(memcpy)
23486 subq $0x20, %rdx
23487 movq -1*8(%rsi), %r8
23488 movq -2*8(%rsi), %r9
23489- movq -3*8(%rsi), %r10
23490+ movq -3*8(%rsi), %rcx
23491 movq -4*8(%rsi), %r11
23492 leaq -4*8(%rsi), %rsi
23493 movq %r8, -1*8(%rdi)
23494 movq %r9, -2*8(%rdi)
23495- movq %r10, -3*8(%rdi)
23496+ movq %rcx, -3*8(%rdi)
23497 movq %r11, -4*8(%rdi)
23498 leaq -4*8(%rdi), %rdi
23499 jae .Lcopy_backward_loop
23500@@ -130,12 +132,13 @@ ENTRY(memcpy)
23501 */
23502 movq 0*8(%rsi), %r8
23503 movq 1*8(%rsi), %r9
23504- movq -2*8(%rsi, %rdx), %r10
23505+ movq -2*8(%rsi, %rdx), %rcx
23506 movq -1*8(%rsi, %rdx), %r11
23507 movq %r8, 0*8(%rdi)
23508 movq %r9, 1*8(%rdi)
23509- movq %r10, -2*8(%rdi, %rdx)
23510+ movq %rcx, -2*8(%rdi, %rdx)
23511 movq %r11, -1*8(%rdi, %rdx)
23512+ pax_force_retaddr
23513 retq
23514 .p2align 4
23515 .Lless_16bytes:
23516@@ -148,6 +151,7 @@ ENTRY(memcpy)
23517 movq -1*8(%rsi, %rdx), %r9
23518 movq %r8, 0*8(%rdi)
23519 movq %r9, -1*8(%rdi, %rdx)
23520+ pax_force_retaddr
23521 retq
23522 .p2align 4
23523 .Lless_8bytes:
23524@@ -161,6 +165,7 @@ ENTRY(memcpy)
23525 movl -4(%rsi, %rdx), %r8d
23526 movl %ecx, (%rdi)
23527 movl %r8d, -4(%rdi, %rdx)
23528+ pax_force_retaddr
23529 retq
23530 .p2align 4
23531 .Lless_3bytes:
23532@@ -179,6 +184,7 @@ ENTRY(memcpy)
23533 movb %cl, (%rdi)
23534
23535 .Lend:
23536+ pax_force_retaddr
23537 retq
23538 CFI_ENDPROC
23539 ENDPROC(memcpy)
23540diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
23541index ee16461..c39c199 100644
23542--- a/arch/x86/lib/memmove_64.S
23543+++ b/arch/x86/lib/memmove_64.S
23544@@ -61,13 +61,13 @@ ENTRY(memmove)
23545 5:
23546 sub $0x20, %rdx
23547 movq 0*8(%rsi), %r11
23548- movq 1*8(%rsi), %r10
23549+ movq 1*8(%rsi), %rcx
23550 movq 2*8(%rsi), %r9
23551 movq 3*8(%rsi), %r8
23552 leaq 4*8(%rsi), %rsi
23553
23554 movq %r11, 0*8(%rdi)
23555- movq %r10, 1*8(%rdi)
23556+ movq %rcx, 1*8(%rdi)
23557 movq %r9, 2*8(%rdi)
23558 movq %r8, 3*8(%rdi)
23559 leaq 4*8(%rdi), %rdi
23560@@ -81,10 +81,10 @@ ENTRY(memmove)
23561 4:
23562 movq %rdx, %rcx
23563 movq -8(%rsi, %rdx), %r11
23564- lea -8(%rdi, %rdx), %r10
23565+ lea -8(%rdi, %rdx), %r9
23566 shrq $3, %rcx
23567 rep movsq
23568- movq %r11, (%r10)
23569+ movq %r11, (%r9)
23570 jmp 13f
23571 .Lmemmove_end_forward:
23572
23573@@ -95,14 +95,14 @@ ENTRY(memmove)
23574 7:
23575 movq %rdx, %rcx
23576 movq (%rsi), %r11
23577- movq %rdi, %r10
23578+ movq %rdi, %r9
23579 leaq -8(%rsi, %rdx), %rsi
23580 leaq -8(%rdi, %rdx), %rdi
23581 shrq $3, %rcx
23582 std
23583 rep movsq
23584 cld
23585- movq %r11, (%r10)
23586+ movq %r11, (%r9)
23587 jmp 13f
23588
23589 /*
23590@@ -127,13 +127,13 @@ ENTRY(memmove)
23591 8:
23592 subq $0x20, %rdx
23593 movq -1*8(%rsi), %r11
23594- movq -2*8(%rsi), %r10
23595+ movq -2*8(%rsi), %rcx
23596 movq -3*8(%rsi), %r9
23597 movq -4*8(%rsi), %r8
23598 leaq -4*8(%rsi), %rsi
23599
23600 movq %r11, -1*8(%rdi)
23601- movq %r10, -2*8(%rdi)
23602+ movq %rcx, -2*8(%rdi)
23603 movq %r9, -3*8(%rdi)
23604 movq %r8, -4*8(%rdi)
23605 leaq -4*8(%rdi), %rdi
23606@@ -151,11 +151,11 @@ ENTRY(memmove)
23607 * Move data from 16 bytes to 31 bytes.
23608 */
23609 movq 0*8(%rsi), %r11
23610- movq 1*8(%rsi), %r10
23611+ movq 1*8(%rsi), %rcx
23612 movq -2*8(%rsi, %rdx), %r9
23613 movq -1*8(%rsi, %rdx), %r8
23614 movq %r11, 0*8(%rdi)
23615- movq %r10, 1*8(%rdi)
23616+ movq %rcx, 1*8(%rdi)
23617 movq %r9, -2*8(%rdi, %rdx)
23618 movq %r8, -1*8(%rdi, %rdx)
23619 jmp 13f
23620@@ -167,9 +167,9 @@ ENTRY(memmove)
23621 * Move data from 8 bytes to 15 bytes.
23622 */
23623 movq 0*8(%rsi), %r11
23624- movq -1*8(%rsi, %rdx), %r10
23625+ movq -1*8(%rsi, %rdx), %r9
23626 movq %r11, 0*8(%rdi)
23627- movq %r10, -1*8(%rdi, %rdx)
23628+ movq %r9, -1*8(%rdi, %rdx)
23629 jmp 13f
23630 10:
23631 cmpq $4, %rdx
23632@@ -178,9 +178,9 @@ ENTRY(memmove)
23633 * Move data from 4 bytes to 7 bytes.
23634 */
23635 movl (%rsi), %r11d
23636- movl -4(%rsi, %rdx), %r10d
23637+ movl -4(%rsi, %rdx), %r9d
23638 movl %r11d, (%rdi)
23639- movl %r10d, -4(%rdi, %rdx)
23640+ movl %r9d, -4(%rdi, %rdx)
23641 jmp 13f
23642 11:
23643 cmp $2, %rdx
23644@@ -189,9 +189,9 @@ ENTRY(memmove)
23645 * Move data from 2 bytes to 3 bytes.
23646 */
23647 movw (%rsi), %r11w
23648- movw -2(%rsi, %rdx), %r10w
23649+ movw -2(%rsi, %rdx), %r9w
23650 movw %r11w, (%rdi)
23651- movw %r10w, -2(%rdi, %rdx)
23652+ movw %r9w, -2(%rdi, %rdx)
23653 jmp 13f
23654 12:
23655 cmp $1, %rdx
23656@@ -202,6 +202,7 @@ ENTRY(memmove)
23657 movb (%rsi), %r11b
23658 movb %r11b, (%rdi)
23659 13:
23660+ pax_force_retaddr
23661 retq
23662 CFI_ENDPROC
23663
23664@@ -210,6 +211,7 @@ ENTRY(memmove)
23665 /* Forward moving data. */
23666 movq %rdx, %rcx
23667 rep movsb
23668+ pax_force_retaddr
23669 retq
23670 .Lmemmove_end_forward_efs:
23671 .previous
23672diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
23673index 2dcb380..963660a 100644
23674--- a/arch/x86/lib/memset_64.S
23675+++ b/arch/x86/lib/memset_64.S
23676@@ -30,6 +30,7 @@
23677 movl %edx,%ecx
23678 rep stosb
23679 movq %r9,%rax
23680+ pax_force_retaddr
23681 ret
23682 .Lmemset_e:
23683 .previous
23684@@ -52,6 +53,7 @@
23685 movq %rdx,%rcx
23686 rep stosb
23687 movq %r9,%rax
23688+ pax_force_retaddr
23689 ret
23690 .Lmemset_e_e:
23691 .previous
23692@@ -59,7 +61,7 @@
23693 ENTRY(memset)
23694 ENTRY(__memset)
23695 CFI_STARTPROC
23696- movq %rdi,%r10
23697+ movq %rdi,%r11
23698
23699 /* expand byte value */
23700 movzbl %sil,%ecx
23701@@ -117,7 +119,8 @@ ENTRY(__memset)
23702 jnz .Lloop_1
23703
23704 .Lende:
23705- movq %r10,%rax
23706+ movq %r11,%rax
23707+ pax_force_retaddr
23708 ret
23709
23710 CFI_RESTORE_STATE
23711diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
23712index c9f2d9b..e7fd2c0 100644
23713--- a/arch/x86/lib/mmx_32.c
23714+++ b/arch/x86/lib/mmx_32.c
23715@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
23716 {
23717 void *p;
23718 int i;
23719+ unsigned long cr0;
23720
23721 if (unlikely(in_interrupt()))
23722 return __memcpy(to, from, len);
23723@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
23724 kernel_fpu_begin();
23725
23726 __asm__ __volatile__ (
23727- "1: prefetch (%0)\n" /* This set is 28 bytes */
23728- " prefetch 64(%0)\n"
23729- " prefetch 128(%0)\n"
23730- " prefetch 192(%0)\n"
23731- " prefetch 256(%0)\n"
23732+ "1: prefetch (%1)\n" /* This set is 28 bytes */
23733+ " prefetch 64(%1)\n"
23734+ " prefetch 128(%1)\n"
23735+ " prefetch 192(%1)\n"
23736+ " prefetch 256(%1)\n"
23737 "2: \n"
23738 ".section .fixup, \"ax\"\n"
23739- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
23740+ "3: \n"
23741+
23742+#ifdef CONFIG_PAX_KERNEXEC
23743+ " movl %%cr0, %0\n"
23744+ " movl %0, %%eax\n"
23745+ " andl $0xFFFEFFFF, %%eax\n"
23746+ " movl %%eax, %%cr0\n"
23747+#endif
23748+
23749+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
23750+
23751+#ifdef CONFIG_PAX_KERNEXEC
23752+ " movl %0, %%cr0\n"
23753+#endif
23754+
23755 " jmp 2b\n"
23756 ".previous\n"
23757 _ASM_EXTABLE(1b, 3b)
23758- : : "r" (from));
23759+ : "=&r" (cr0) : "r" (from) : "ax");
23760
23761 for ( ; i > 5; i--) {
23762 __asm__ __volatile__ (
23763- "1: prefetch 320(%0)\n"
23764- "2: movq (%0), %%mm0\n"
23765- " movq 8(%0), %%mm1\n"
23766- " movq 16(%0), %%mm2\n"
23767- " movq 24(%0), %%mm3\n"
23768- " movq %%mm0, (%1)\n"
23769- " movq %%mm1, 8(%1)\n"
23770- " movq %%mm2, 16(%1)\n"
23771- " movq %%mm3, 24(%1)\n"
23772- " movq 32(%0), %%mm0\n"
23773- " movq 40(%0), %%mm1\n"
23774- " movq 48(%0), %%mm2\n"
23775- " movq 56(%0), %%mm3\n"
23776- " movq %%mm0, 32(%1)\n"
23777- " movq %%mm1, 40(%1)\n"
23778- " movq %%mm2, 48(%1)\n"
23779- " movq %%mm3, 56(%1)\n"
23780+ "1: prefetch 320(%1)\n"
23781+ "2: movq (%1), %%mm0\n"
23782+ " movq 8(%1), %%mm1\n"
23783+ " movq 16(%1), %%mm2\n"
23784+ " movq 24(%1), %%mm3\n"
23785+ " movq %%mm0, (%2)\n"
23786+ " movq %%mm1, 8(%2)\n"
23787+ " movq %%mm2, 16(%2)\n"
23788+ " movq %%mm3, 24(%2)\n"
23789+ " movq 32(%1), %%mm0\n"
23790+ " movq 40(%1), %%mm1\n"
23791+ " movq 48(%1), %%mm2\n"
23792+ " movq 56(%1), %%mm3\n"
23793+ " movq %%mm0, 32(%2)\n"
23794+ " movq %%mm1, 40(%2)\n"
23795+ " movq %%mm2, 48(%2)\n"
23796+ " movq %%mm3, 56(%2)\n"
23797 ".section .fixup, \"ax\"\n"
23798- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
23799+ "3:\n"
23800+
23801+#ifdef CONFIG_PAX_KERNEXEC
23802+ " movl %%cr0, %0\n"
23803+ " movl %0, %%eax\n"
23804+ " andl $0xFFFEFFFF, %%eax\n"
23805+ " movl %%eax, %%cr0\n"
23806+#endif
23807+
23808+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
23809+
23810+#ifdef CONFIG_PAX_KERNEXEC
23811+ " movl %0, %%cr0\n"
23812+#endif
23813+
23814 " jmp 2b\n"
23815 ".previous\n"
23816 _ASM_EXTABLE(1b, 3b)
23817- : : "r" (from), "r" (to) : "memory");
23818+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
23819
23820 from += 64;
23821 to += 64;
23822@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
23823 static void fast_copy_page(void *to, void *from)
23824 {
23825 int i;
23826+ unsigned long cr0;
23827
23828 kernel_fpu_begin();
23829
23830@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
23831 * but that is for later. -AV
23832 */
23833 __asm__ __volatile__(
23834- "1: prefetch (%0)\n"
23835- " prefetch 64(%0)\n"
23836- " prefetch 128(%0)\n"
23837- " prefetch 192(%0)\n"
23838- " prefetch 256(%0)\n"
23839+ "1: prefetch (%1)\n"
23840+ " prefetch 64(%1)\n"
23841+ " prefetch 128(%1)\n"
23842+ " prefetch 192(%1)\n"
23843+ " prefetch 256(%1)\n"
23844 "2: \n"
23845 ".section .fixup, \"ax\"\n"
23846- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
23847+ "3: \n"
23848+
23849+#ifdef CONFIG_PAX_KERNEXEC
23850+ " movl %%cr0, %0\n"
23851+ " movl %0, %%eax\n"
23852+ " andl $0xFFFEFFFF, %%eax\n"
23853+ " movl %%eax, %%cr0\n"
23854+#endif
23855+
23856+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
23857+
23858+#ifdef CONFIG_PAX_KERNEXEC
23859+ " movl %0, %%cr0\n"
23860+#endif
23861+
23862 " jmp 2b\n"
23863 ".previous\n"
23864- _ASM_EXTABLE(1b, 3b) : : "r" (from));
23865+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
23866
23867 for (i = 0; i < (4096-320)/64; i++) {
23868 __asm__ __volatile__ (
23869- "1: prefetch 320(%0)\n"
23870- "2: movq (%0), %%mm0\n"
23871- " movntq %%mm0, (%1)\n"
23872- " movq 8(%0), %%mm1\n"
23873- " movntq %%mm1, 8(%1)\n"
23874- " movq 16(%0), %%mm2\n"
23875- " movntq %%mm2, 16(%1)\n"
23876- " movq 24(%0), %%mm3\n"
23877- " movntq %%mm3, 24(%1)\n"
23878- " movq 32(%0), %%mm4\n"
23879- " movntq %%mm4, 32(%1)\n"
23880- " movq 40(%0), %%mm5\n"
23881- " movntq %%mm5, 40(%1)\n"
23882- " movq 48(%0), %%mm6\n"
23883- " movntq %%mm6, 48(%1)\n"
23884- " movq 56(%0), %%mm7\n"
23885- " movntq %%mm7, 56(%1)\n"
23886+ "1: prefetch 320(%1)\n"
23887+ "2: movq (%1), %%mm0\n"
23888+ " movntq %%mm0, (%2)\n"
23889+ " movq 8(%1), %%mm1\n"
23890+ " movntq %%mm1, 8(%2)\n"
23891+ " movq 16(%1), %%mm2\n"
23892+ " movntq %%mm2, 16(%2)\n"
23893+ " movq 24(%1), %%mm3\n"
23894+ " movntq %%mm3, 24(%2)\n"
23895+ " movq 32(%1), %%mm4\n"
23896+ " movntq %%mm4, 32(%2)\n"
23897+ " movq 40(%1), %%mm5\n"
23898+ " movntq %%mm5, 40(%2)\n"
23899+ " movq 48(%1), %%mm6\n"
23900+ " movntq %%mm6, 48(%2)\n"
23901+ " movq 56(%1), %%mm7\n"
23902+ " movntq %%mm7, 56(%2)\n"
23903 ".section .fixup, \"ax\"\n"
23904- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
23905+ "3:\n"
23906+
23907+#ifdef CONFIG_PAX_KERNEXEC
23908+ " movl %%cr0, %0\n"
23909+ " movl %0, %%eax\n"
23910+ " andl $0xFFFEFFFF, %%eax\n"
23911+ " movl %%eax, %%cr0\n"
23912+#endif
23913+
23914+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
23915+
23916+#ifdef CONFIG_PAX_KERNEXEC
23917+ " movl %0, %%cr0\n"
23918+#endif
23919+
23920 " jmp 2b\n"
23921 ".previous\n"
23922- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
23923+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
23924
23925 from += 64;
23926 to += 64;
23927@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
23928 static void fast_copy_page(void *to, void *from)
23929 {
23930 int i;
23931+ unsigned long cr0;
23932
23933 kernel_fpu_begin();
23934
23935 __asm__ __volatile__ (
23936- "1: prefetch (%0)\n"
23937- " prefetch 64(%0)\n"
23938- " prefetch 128(%0)\n"
23939- " prefetch 192(%0)\n"
23940- " prefetch 256(%0)\n"
23941+ "1: prefetch (%1)\n"
23942+ " prefetch 64(%1)\n"
23943+ " prefetch 128(%1)\n"
23944+ " prefetch 192(%1)\n"
23945+ " prefetch 256(%1)\n"
23946 "2: \n"
23947 ".section .fixup, \"ax\"\n"
23948- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
23949+ "3: \n"
23950+
23951+#ifdef CONFIG_PAX_KERNEXEC
23952+ " movl %%cr0, %0\n"
23953+ " movl %0, %%eax\n"
23954+ " andl $0xFFFEFFFF, %%eax\n"
23955+ " movl %%eax, %%cr0\n"
23956+#endif
23957+
23958+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
23959+
23960+#ifdef CONFIG_PAX_KERNEXEC
23961+ " movl %0, %%cr0\n"
23962+#endif
23963+
23964 " jmp 2b\n"
23965 ".previous\n"
23966- _ASM_EXTABLE(1b, 3b) : : "r" (from));
23967+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
23968
23969 for (i = 0; i < 4096/64; i++) {
23970 __asm__ __volatile__ (
23971- "1: prefetch 320(%0)\n"
23972- "2: movq (%0), %%mm0\n"
23973- " movq 8(%0), %%mm1\n"
23974- " movq 16(%0), %%mm2\n"
23975- " movq 24(%0), %%mm3\n"
23976- " movq %%mm0, (%1)\n"
23977- " movq %%mm1, 8(%1)\n"
23978- " movq %%mm2, 16(%1)\n"
23979- " movq %%mm3, 24(%1)\n"
23980- " movq 32(%0), %%mm0\n"
23981- " movq 40(%0), %%mm1\n"
23982- " movq 48(%0), %%mm2\n"
23983- " movq 56(%0), %%mm3\n"
23984- " movq %%mm0, 32(%1)\n"
23985- " movq %%mm1, 40(%1)\n"
23986- " movq %%mm2, 48(%1)\n"
23987- " movq %%mm3, 56(%1)\n"
23988+ "1: prefetch 320(%1)\n"
23989+ "2: movq (%1), %%mm0\n"
23990+ " movq 8(%1), %%mm1\n"
23991+ " movq 16(%1), %%mm2\n"
23992+ " movq 24(%1), %%mm3\n"
23993+ " movq %%mm0, (%2)\n"
23994+ " movq %%mm1, 8(%2)\n"
23995+ " movq %%mm2, 16(%2)\n"
23996+ " movq %%mm3, 24(%2)\n"
23997+ " movq 32(%1), %%mm0\n"
23998+ " movq 40(%1), %%mm1\n"
23999+ " movq 48(%1), %%mm2\n"
24000+ " movq 56(%1), %%mm3\n"
24001+ " movq %%mm0, 32(%2)\n"
24002+ " movq %%mm1, 40(%2)\n"
24003+ " movq %%mm2, 48(%2)\n"
24004+ " movq %%mm3, 56(%2)\n"
24005 ".section .fixup, \"ax\"\n"
24006- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
24007+ "3:\n"
24008+
24009+#ifdef CONFIG_PAX_KERNEXEC
24010+ " movl %%cr0, %0\n"
24011+ " movl %0, %%eax\n"
24012+ " andl $0xFFFEFFFF, %%eax\n"
24013+ " movl %%eax, %%cr0\n"
24014+#endif
24015+
24016+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
24017+
24018+#ifdef CONFIG_PAX_KERNEXEC
24019+ " movl %0, %%cr0\n"
24020+#endif
24021+
24022 " jmp 2b\n"
24023 ".previous\n"
24024 _ASM_EXTABLE(1b, 3b)
24025- : : "r" (from), "r" (to) : "memory");
24026+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
24027
24028 from += 64;
24029 to += 64;
24030diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
24031index f6d13ee..aca5f0b 100644
24032--- a/arch/x86/lib/msr-reg.S
24033+++ b/arch/x86/lib/msr-reg.S
24034@@ -3,6 +3,7 @@
24035 #include <asm/dwarf2.h>
24036 #include <asm/asm.h>
24037 #include <asm/msr.h>
24038+#include <asm/alternative-asm.h>
24039
24040 #ifdef CONFIG_X86_64
24041 /*
24042@@ -16,7 +17,7 @@ ENTRY(\op\()_safe_regs)
24043 CFI_STARTPROC
24044 pushq_cfi %rbx
24045 pushq_cfi %rbp
24046- movq %rdi, %r10 /* Save pointer */
24047+ movq %rdi, %r9 /* Save pointer */
24048 xorl %r11d, %r11d /* Return value */
24049 movl (%rdi), %eax
24050 movl 4(%rdi), %ecx
24051@@ -27,16 +28,17 @@ ENTRY(\op\()_safe_regs)
24052 movl 28(%rdi), %edi
24053 CFI_REMEMBER_STATE
24054 1: \op
24055-2: movl %eax, (%r10)
24056+2: movl %eax, (%r9)
24057 movl %r11d, %eax /* Return value */
24058- movl %ecx, 4(%r10)
24059- movl %edx, 8(%r10)
24060- movl %ebx, 12(%r10)
24061- movl %ebp, 20(%r10)
24062- movl %esi, 24(%r10)
24063- movl %edi, 28(%r10)
24064+ movl %ecx, 4(%r9)
24065+ movl %edx, 8(%r9)
24066+ movl %ebx, 12(%r9)
24067+ movl %ebp, 20(%r9)
24068+ movl %esi, 24(%r9)
24069+ movl %edi, 28(%r9)
24070 popq_cfi %rbp
24071 popq_cfi %rbx
24072+ pax_force_retaddr
24073 ret
24074 3:
24075 CFI_RESTORE_STATE
24076diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
24077index fc6ba17..04471c5 100644
24078--- a/arch/x86/lib/putuser.S
24079+++ b/arch/x86/lib/putuser.S
24080@@ -16,7 +16,9 @@
24081 #include <asm/errno.h>
24082 #include <asm/asm.h>
24083 #include <asm/smap.h>
24084-
24085+#include <asm/segment.h>
24086+#include <asm/pgtable.h>
24087+#include <asm/alternative-asm.h>
24088
24089 /*
24090 * __put_user_X
24091@@ -30,57 +32,125 @@
24092 * as they get called from within inline assembly.
24093 */
24094
24095-#define ENTER CFI_STARTPROC ; \
24096- GET_THREAD_INFO(%_ASM_BX)
24097-#define EXIT ASM_CLAC ; \
24098- ret ; \
24099+#define ENTER CFI_STARTPROC
24100+#define EXIT ASM_CLAC ; \
24101+ pax_force_retaddr ; \
24102+ ret ; \
24103 CFI_ENDPROC
24104
24105+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24106+#define _DEST %_ASM_CX,%_ASM_BX
24107+#else
24108+#define _DEST %_ASM_CX
24109+#endif
24110+
24111+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
24112+#define __copyuser_seg gs;
24113+#else
24114+#define __copyuser_seg
24115+#endif
24116+
24117 .text
24118 ENTRY(__put_user_1)
24119 ENTER
24120+
24121+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
24122+ GET_THREAD_INFO(%_ASM_BX)
24123 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
24124 jae bad_put_user
24125 ASM_STAC
24126-1: movb %al,(%_ASM_CX)
24127+
24128+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24129+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
24130+ cmp %_ASM_BX,%_ASM_CX
24131+ jb 1234f
24132+ xor %ebx,%ebx
24133+1234:
24134+#endif
24135+
24136+#endif
24137+
24138+1: __copyuser_seg movb %al,(_DEST)
24139 xor %eax,%eax
24140 EXIT
24141 ENDPROC(__put_user_1)
24142
24143 ENTRY(__put_user_2)
24144 ENTER
24145+
24146+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
24147+ GET_THREAD_INFO(%_ASM_BX)
24148 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
24149 sub $1,%_ASM_BX
24150 cmp %_ASM_BX,%_ASM_CX
24151 jae bad_put_user
24152 ASM_STAC
24153-2: movw %ax,(%_ASM_CX)
24154+
24155+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24156+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
24157+ cmp %_ASM_BX,%_ASM_CX
24158+ jb 1234f
24159+ xor %ebx,%ebx
24160+1234:
24161+#endif
24162+
24163+#endif
24164+
24165+2: __copyuser_seg movw %ax,(_DEST)
24166 xor %eax,%eax
24167 EXIT
24168 ENDPROC(__put_user_2)
24169
24170 ENTRY(__put_user_4)
24171 ENTER
24172+
24173+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
24174+ GET_THREAD_INFO(%_ASM_BX)
24175 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
24176 sub $3,%_ASM_BX
24177 cmp %_ASM_BX,%_ASM_CX
24178 jae bad_put_user
24179 ASM_STAC
24180-3: movl %eax,(%_ASM_CX)
24181+
24182+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24183+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
24184+ cmp %_ASM_BX,%_ASM_CX
24185+ jb 1234f
24186+ xor %ebx,%ebx
24187+1234:
24188+#endif
24189+
24190+#endif
24191+
24192+3: __copyuser_seg movl %eax,(_DEST)
24193 xor %eax,%eax
24194 EXIT
24195 ENDPROC(__put_user_4)
24196
24197 ENTRY(__put_user_8)
24198 ENTER
24199+
24200+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
24201+ GET_THREAD_INFO(%_ASM_BX)
24202 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
24203 sub $7,%_ASM_BX
24204 cmp %_ASM_BX,%_ASM_CX
24205 jae bad_put_user
24206 ASM_STAC
24207-4: mov %_ASM_AX,(%_ASM_CX)
24208+
24209+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24210+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
24211+ cmp %_ASM_BX,%_ASM_CX
24212+ jb 1234f
24213+ xor %ebx,%ebx
24214+1234:
24215+#endif
24216+
24217+#endif
24218+
24219+4: __copyuser_seg mov %_ASM_AX,(_DEST)
24220 #ifdef CONFIG_X86_32
24221-5: movl %edx,4(%_ASM_CX)
24222+5: __copyuser_seg movl %edx,4(_DEST)
24223 #endif
24224 xor %eax,%eax
24225 EXIT
24226diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
24227index 1cad221..de671ee 100644
24228--- a/arch/x86/lib/rwlock.S
24229+++ b/arch/x86/lib/rwlock.S
24230@@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
24231 FRAME
24232 0: LOCK_PREFIX
24233 WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
24234+
24235+#ifdef CONFIG_PAX_REFCOUNT
24236+ jno 1234f
24237+ LOCK_PREFIX
24238+ WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
24239+ int $4
24240+1234:
24241+ _ASM_EXTABLE(1234b, 1234b)
24242+#endif
24243+
24244 1: rep; nop
24245 cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
24246 jne 1b
24247 LOCK_PREFIX
24248 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
24249+
24250+#ifdef CONFIG_PAX_REFCOUNT
24251+ jno 1234f
24252+ LOCK_PREFIX
24253+ WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
24254+ int $4
24255+1234:
24256+ _ASM_EXTABLE(1234b, 1234b)
24257+#endif
24258+
24259 jnz 0b
24260 ENDFRAME
24261+ pax_force_retaddr
24262 ret
24263 CFI_ENDPROC
24264 END(__write_lock_failed)
24265@@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
24266 FRAME
24267 0: LOCK_PREFIX
24268 READ_LOCK_SIZE(inc) (%__lock_ptr)
24269+
24270+#ifdef CONFIG_PAX_REFCOUNT
24271+ jno 1234f
24272+ LOCK_PREFIX
24273+ READ_LOCK_SIZE(dec) (%__lock_ptr)
24274+ int $4
24275+1234:
24276+ _ASM_EXTABLE(1234b, 1234b)
24277+#endif
24278+
24279 1: rep; nop
24280 READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
24281 js 1b
24282 LOCK_PREFIX
24283 READ_LOCK_SIZE(dec) (%__lock_ptr)
24284+
24285+#ifdef CONFIG_PAX_REFCOUNT
24286+ jno 1234f
24287+ LOCK_PREFIX
24288+ READ_LOCK_SIZE(inc) (%__lock_ptr)
24289+ int $4
24290+1234:
24291+ _ASM_EXTABLE(1234b, 1234b)
24292+#endif
24293+
24294 js 0b
24295 ENDFRAME
24296+ pax_force_retaddr
24297 ret
24298 CFI_ENDPROC
24299 END(__read_lock_failed)
24300diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
24301index 5dff5f0..cadebf4 100644
24302--- a/arch/x86/lib/rwsem.S
24303+++ b/arch/x86/lib/rwsem.S
24304@@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
24305 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
24306 CFI_RESTORE __ASM_REG(dx)
24307 restore_common_regs
24308+ pax_force_retaddr
24309 ret
24310 CFI_ENDPROC
24311 ENDPROC(call_rwsem_down_read_failed)
24312@@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
24313 movq %rax,%rdi
24314 call rwsem_down_write_failed
24315 restore_common_regs
24316+ pax_force_retaddr
24317 ret
24318 CFI_ENDPROC
24319 ENDPROC(call_rwsem_down_write_failed)
24320@@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
24321 movq %rax,%rdi
24322 call rwsem_wake
24323 restore_common_regs
24324-1: ret
24325+1: pax_force_retaddr
24326+ ret
24327 CFI_ENDPROC
24328 ENDPROC(call_rwsem_wake)
24329
24330@@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
24331 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
24332 CFI_RESTORE __ASM_REG(dx)
24333 restore_common_regs
24334+ pax_force_retaddr
24335 ret
24336 CFI_ENDPROC
24337 ENDPROC(call_rwsem_downgrade_wake)
24338diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
24339index a63efd6..ccecad8 100644
24340--- a/arch/x86/lib/thunk_64.S
24341+++ b/arch/x86/lib/thunk_64.S
24342@@ -8,6 +8,7 @@
24343 #include <linux/linkage.h>
24344 #include <asm/dwarf2.h>
24345 #include <asm/calling.h>
24346+#include <asm/alternative-asm.h>
24347
24348 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
24349 .macro THUNK name, func, put_ret_addr_in_rdi=0
24350@@ -41,5 +42,6 @@
24351 SAVE_ARGS
24352 restore:
24353 RESTORE_ARGS
24354+ pax_force_retaddr
24355 ret
24356 CFI_ENDPROC
24357diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
24358index 98f6d6b6..d27f045 100644
24359--- a/arch/x86/lib/usercopy_32.c
24360+++ b/arch/x86/lib/usercopy_32.c
24361@@ -42,11 +42,13 @@ do { \
24362 int __d0; \
24363 might_fault(); \
24364 __asm__ __volatile__( \
24365+ __COPYUSER_SET_ES \
24366 ASM_STAC "\n" \
24367 "0: rep; stosl\n" \
24368 " movl %2,%0\n" \
24369 "1: rep; stosb\n" \
24370 "2: " ASM_CLAC "\n" \
24371+ __COPYUSER_RESTORE_ES \
24372 ".section .fixup,\"ax\"\n" \
24373 "3: lea 0(%2,%0,4),%0\n" \
24374 " jmp 2b\n" \
24375@@ -98,7 +100,7 @@ EXPORT_SYMBOL(__clear_user);
24376
24377 #ifdef CONFIG_X86_INTEL_USERCOPY
24378 static unsigned long
24379-__copy_user_intel(void __user *to, const void *from, unsigned long size)
24380+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
24381 {
24382 int d0, d1;
24383 __asm__ __volatile__(
24384@@ -110,36 +112,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
24385 " .align 2,0x90\n"
24386 "3: movl 0(%4), %%eax\n"
24387 "4: movl 4(%4), %%edx\n"
24388- "5: movl %%eax, 0(%3)\n"
24389- "6: movl %%edx, 4(%3)\n"
24390+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
24391+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
24392 "7: movl 8(%4), %%eax\n"
24393 "8: movl 12(%4),%%edx\n"
24394- "9: movl %%eax, 8(%3)\n"
24395- "10: movl %%edx, 12(%3)\n"
24396+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
24397+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
24398 "11: movl 16(%4), %%eax\n"
24399 "12: movl 20(%4), %%edx\n"
24400- "13: movl %%eax, 16(%3)\n"
24401- "14: movl %%edx, 20(%3)\n"
24402+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
24403+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
24404 "15: movl 24(%4), %%eax\n"
24405 "16: movl 28(%4), %%edx\n"
24406- "17: movl %%eax, 24(%3)\n"
24407- "18: movl %%edx, 28(%3)\n"
24408+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
24409+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
24410 "19: movl 32(%4), %%eax\n"
24411 "20: movl 36(%4), %%edx\n"
24412- "21: movl %%eax, 32(%3)\n"
24413- "22: movl %%edx, 36(%3)\n"
24414+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
24415+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
24416 "23: movl 40(%4), %%eax\n"
24417 "24: movl 44(%4), %%edx\n"
24418- "25: movl %%eax, 40(%3)\n"
24419- "26: movl %%edx, 44(%3)\n"
24420+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
24421+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
24422 "27: movl 48(%4), %%eax\n"
24423 "28: movl 52(%4), %%edx\n"
24424- "29: movl %%eax, 48(%3)\n"
24425- "30: movl %%edx, 52(%3)\n"
24426+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
24427+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
24428 "31: movl 56(%4), %%eax\n"
24429 "32: movl 60(%4), %%edx\n"
24430- "33: movl %%eax, 56(%3)\n"
24431- "34: movl %%edx, 60(%3)\n"
24432+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
24433+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
24434 " addl $-64, %0\n"
24435 " addl $64, %4\n"
24436 " addl $64, %3\n"
24437@@ -149,10 +151,12 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
24438 " shrl $2, %0\n"
24439 " andl $3, %%eax\n"
24440 " cld\n"
24441+ __COPYUSER_SET_ES
24442 "99: rep; movsl\n"
24443 "36: movl %%eax, %0\n"
24444 "37: rep; movsb\n"
24445 "100:\n"
24446+ __COPYUSER_RESTORE_ES
24447 ".section .fixup,\"ax\"\n"
24448 "101: lea 0(%%eax,%0,4),%0\n"
24449 " jmp 100b\n"
24450@@ -202,46 +206,150 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
24451 }
24452
24453 static unsigned long
24454+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
24455+{
24456+ int d0, d1;
24457+ __asm__ __volatile__(
24458+ " .align 2,0x90\n"
24459+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
24460+ " cmpl $67, %0\n"
24461+ " jbe 3f\n"
24462+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
24463+ " .align 2,0x90\n"
24464+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
24465+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
24466+ "5: movl %%eax, 0(%3)\n"
24467+ "6: movl %%edx, 4(%3)\n"
24468+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
24469+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
24470+ "9: movl %%eax, 8(%3)\n"
24471+ "10: movl %%edx, 12(%3)\n"
24472+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
24473+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
24474+ "13: movl %%eax, 16(%3)\n"
24475+ "14: movl %%edx, 20(%3)\n"
24476+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
24477+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
24478+ "17: movl %%eax, 24(%3)\n"
24479+ "18: movl %%edx, 28(%3)\n"
24480+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
24481+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
24482+ "21: movl %%eax, 32(%3)\n"
24483+ "22: movl %%edx, 36(%3)\n"
24484+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
24485+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
24486+ "25: movl %%eax, 40(%3)\n"
24487+ "26: movl %%edx, 44(%3)\n"
24488+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
24489+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
24490+ "29: movl %%eax, 48(%3)\n"
24491+ "30: movl %%edx, 52(%3)\n"
24492+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
24493+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
24494+ "33: movl %%eax, 56(%3)\n"
24495+ "34: movl %%edx, 60(%3)\n"
24496+ " addl $-64, %0\n"
24497+ " addl $64, %4\n"
24498+ " addl $64, %3\n"
24499+ " cmpl $63, %0\n"
24500+ " ja 1b\n"
24501+ "35: movl %0, %%eax\n"
24502+ " shrl $2, %0\n"
24503+ " andl $3, %%eax\n"
24504+ " cld\n"
24505+ "99: rep; "__copyuser_seg" movsl\n"
24506+ "36: movl %%eax, %0\n"
24507+ "37: rep; "__copyuser_seg" movsb\n"
24508+ "100:\n"
24509+ ".section .fixup,\"ax\"\n"
24510+ "101: lea 0(%%eax,%0,4),%0\n"
24511+ " jmp 100b\n"
24512+ ".previous\n"
24513+ _ASM_EXTABLE(1b,100b)
24514+ _ASM_EXTABLE(2b,100b)
24515+ _ASM_EXTABLE(3b,100b)
24516+ _ASM_EXTABLE(4b,100b)
24517+ _ASM_EXTABLE(5b,100b)
24518+ _ASM_EXTABLE(6b,100b)
24519+ _ASM_EXTABLE(7b,100b)
24520+ _ASM_EXTABLE(8b,100b)
24521+ _ASM_EXTABLE(9b,100b)
24522+ _ASM_EXTABLE(10b,100b)
24523+ _ASM_EXTABLE(11b,100b)
24524+ _ASM_EXTABLE(12b,100b)
24525+ _ASM_EXTABLE(13b,100b)
24526+ _ASM_EXTABLE(14b,100b)
24527+ _ASM_EXTABLE(15b,100b)
24528+ _ASM_EXTABLE(16b,100b)
24529+ _ASM_EXTABLE(17b,100b)
24530+ _ASM_EXTABLE(18b,100b)
24531+ _ASM_EXTABLE(19b,100b)
24532+ _ASM_EXTABLE(20b,100b)
24533+ _ASM_EXTABLE(21b,100b)
24534+ _ASM_EXTABLE(22b,100b)
24535+ _ASM_EXTABLE(23b,100b)
24536+ _ASM_EXTABLE(24b,100b)
24537+ _ASM_EXTABLE(25b,100b)
24538+ _ASM_EXTABLE(26b,100b)
24539+ _ASM_EXTABLE(27b,100b)
24540+ _ASM_EXTABLE(28b,100b)
24541+ _ASM_EXTABLE(29b,100b)
24542+ _ASM_EXTABLE(30b,100b)
24543+ _ASM_EXTABLE(31b,100b)
24544+ _ASM_EXTABLE(32b,100b)
24545+ _ASM_EXTABLE(33b,100b)
24546+ _ASM_EXTABLE(34b,100b)
24547+ _ASM_EXTABLE(35b,100b)
24548+ _ASM_EXTABLE(36b,100b)
24549+ _ASM_EXTABLE(37b,100b)
24550+ _ASM_EXTABLE(99b,101b)
24551+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
24552+ : "1"(to), "2"(from), "0"(size)
24553+ : "eax", "edx", "memory");
24554+ return size;
24555+}
24556+
24557+static unsigned long __size_overflow(3)
24558 __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
24559 {
24560 int d0, d1;
24561 __asm__ __volatile__(
24562 " .align 2,0x90\n"
24563- "0: movl 32(%4), %%eax\n"
24564+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
24565 " cmpl $67, %0\n"
24566 " jbe 2f\n"
24567- "1: movl 64(%4), %%eax\n"
24568+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
24569 " .align 2,0x90\n"
24570- "2: movl 0(%4), %%eax\n"
24571- "21: movl 4(%4), %%edx\n"
24572+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
24573+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
24574 " movl %%eax, 0(%3)\n"
24575 " movl %%edx, 4(%3)\n"
24576- "3: movl 8(%4), %%eax\n"
24577- "31: movl 12(%4),%%edx\n"
24578+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
24579+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
24580 " movl %%eax, 8(%3)\n"
24581 " movl %%edx, 12(%3)\n"
24582- "4: movl 16(%4), %%eax\n"
24583- "41: movl 20(%4), %%edx\n"
24584+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
24585+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
24586 " movl %%eax, 16(%3)\n"
24587 " movl %%edx, 20(%3)\n"
24588- "10: movl 24(%4), %%eax\n"
24589- "51: movl 28(%4), %%edx\n"
24590+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
24591+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
24592 " movl %%eax, 24(%3)\n"
24593 " movl %%edx, 28(%3)\n"
24594- "11: movl 32(%4), %%eax\n"
24595- "61: movl 36(%4), %%edx\n"
24596+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
24597+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
24598 " movl %%eax, 32(%3)\n"
24599 " movl %%edx, 36(%3)\n"
24600- "12: movl 40(%4), %%eax\n"
24601- "71: movl 44(%4), %%edx\n"
24602+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
24603+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
24604 " movl %%eax, 40(%3)\n"
24605 " movl %%edx, 44(%3)\n"
24606- "13: movl 48(%4), %%eax\n"
24607- "81: movl 52(%4), %%edx\n"
24608+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
24609+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
24610 " movl %%eax, 48(%3)\n"
24611 " movl %%edx, 52(%3)\n"
24612- "14: movl 56(%4), %%eax\n"
24613- "91: movl 60(%4), %%edx\n"
24614+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
24615+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
24616 " movl %%eax, 56(%3)\n"
24617 " movl %%edx, 60(%3)\n"
24618 " addl $-64, %0\n"
24619@@ -253,9 +361,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
24620 " shrl $2, %0\n"
24621 " andl $3, %%eax\n"
24622 " cld\n"
24623- "6: rep; movsl\n"
24624+ "6: rep; "__copyuser_seg" movsl\n"
24625 " movl %%eax,%0\n"
24626- "7: rep; movsb\n"
24627+ "7: rep; "__copyuser_seg" movsb\n"
24628 "8:\n"
24629 ".section .fixup,\"ax\"\n"
24630 "9: lea 0(%%eax,%0,4),%0\n"
24631@@ -298,48 +406,48 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
24632 * hyoshiok@miraclelinux.com
24633 */
24634
24635-static unsigned long __copy_user_zeroing_intel_nocache(void *to,
24636+static unsigned long __size_overflow(3) __copy_user_zeroing_intel_nocache(void *to,
24637 const void __user *from, unsigned long size)
24638 {
24639 int d0, d1;
24640
24641 __asm__ __volatile__(
24642 " .align 2,0x90\n"
24643- "0: movl 32(%4), %%eax\n"
24644+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
24645 " cmpl $67, %0\n"
24646 " jbe 2f\n"
24647- "1: movl 64(%4), %%eax\n"
24648+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
24649 " .align 2,0x90\n"
24650- "2: movl 0(%4), %%eax\n"
24651- "21: movl 4(%4), %%edx\n"
24652+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
24653+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
24654 " movnti %%eax, 0(%3)\n"
24655 " movnti %%edx, 4(%3)\n"
24656- "3: movl 8(%4), %%eax\n"
24657- "31: movl 12(%4),%%edx\n"
24658+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
24659+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
24660 " movnti %%eax, 8(%3)\n"
24661 " movnti %%edx, 12(%3)\n"
24662- "4: movl 16(%4), %%eax\n"
24663- "41: movl 20(%4), %%edx\n"
24664+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
24665+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
24666 " movnti %%eax, 16(%3)\n"
24667 " movnti %%edx, 20(%3)\n"
24668- "10: movl 24(%4), %%eax\n"
24669- "51: movl 28(%4), %%edx\n"
24670+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
24671+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
24672 " movnti %%eax, 24(%3)\n"
24673 " movnti %%edx, 28(%3)\n"
24674- "11: movl 32(%4), %%eax\n"
24675- "61: movl 36(%4), %%edx\n"
24676+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
24677+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
24678 " movnti %%eax, 32(%3)\n"
24679 " movnti %%edx, 36(%3)\n"
24680- "12: movl 40(%4), %%eax\n"
24681- "71: movl 44(%4), %%edx\n"
24682+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
24683+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
24684 " movnti %%eax, 40(%3)\n"
24685 " movnti %%edx, 44(%3)\n"
24686- "13: movl 48(%4), %%eax\n"
24687- "81: movl 52(%4), %%edx\n"
24688+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
24689+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
24690 " movnti %%eax, 48(%3)\n"
24691 " movnti %%edx, 52(%3)\n"
24692- "14: movl 56(%4), %%eax\n"
24693- "91: movl 60(%4), %%edx\n"
24694+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
24695+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
24696 " movnti %%eax, 56(%3)\n"
24697 " movnti %%edx, 60(%3)\n"
24698 " addl $-64, %0\n"
24699@@ -352,9 +460,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
24700 " shrl $2, %0\n"
24701 " andl $3, %%eax\n"
24702 " cld\n"
24703- "6: rep; movsl\n"
24704+ "6: rep; "__copyuser_seg" movsl\n"
24705 " movl %%eax,%0\n"
24706- "7: rep; movsb\n"
24707+ "7: rep; "__copyuser_seg" movsb\n"
24708 "8:\n"
24709 ".section .fixup,\"ax\"\n"
24710 "9: lea 0(%%eax,%0,4),%0\n"
24711@@ -392,48 +500,48 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
24712 return size;
24713 }
24714
24715-static unsigned long __copy_user_intel_nocache(void *to,
24716+static unsigned long __size_overflow(3) __copy_user_intel_nocache(void *to,
24717 const void __user *from, unsigned long size)
24718 {
24719 int d0, d1;
24720
24721 __asm__ __volatile__(
24722 " .align 2,0x90\n"
24723- "0: movl 32(%4), %%eax\n"
24724+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
24725 " cmpl $67, %0\n"
24726 " jbe 2f\n"
24727- "1: movl 64(%4), %%eax\n"
24728+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
24729 " .align 2,0x90\n"
24730- "2: movl 0(%4), %%eax\n"
24731- "21: movl 4(%4), %%edx\n"
24732+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
24733+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
24734 " movnti %%eax, 0(%3)\n"
24735 " movnti %%edx, 4(%3)\n"
24736- "3: movl 8(%4), %%eax\n"
24737- "31: movl 12(%4),%%edx\n"
24738+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
24739+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
24740 " movnti %%eax, 8(%3)\n"
24741 " movnti %%edx, 12(%3)\n"
24742- "4: movl 16(%4), %%eax\n"
24743- "41: movl 20(%4), %%edx\n"
24744+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
24745+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
24746 " movnti %%eax, 16(%3)\n"
24747 " movnti %%edx, 20(%3)\n"
24748- "10: movl 24(%4), %%eax\n"
24749- "51: movl 28(%4), %%edx\n"
24750+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
24751+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
24752 " movnti %%eax, 24(%3)\n"
24753 " movnti %%edx, 28(%3)\n"
24754- "11: movl 32(%4), %%eax\n"
24755- "61: movl 36(%4), %%edx\n"
24756+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
24757+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
24758 " movnti %%eax, 32(%3)\n"
24759 " movnti %%edx, 36(%3)\n"
24760- "12: movl 40(%4), %%eax\n"
24761- "71: movl 44(%4), %%edx\n"
24762+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
24763+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
24764 " movnti %%eax, 40(%3)\n"
24765 " movnti %%edx, 44(%3)\n"
24766- "13: movl 48(%4), %%eax\n"
24767- "81: movl 52(%4), %%edx\n"
24768+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
24769+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
24770 " movnti %%eax, 48(%3)\n"
24771 " movnti %%edx, 52(%3)\n"
24772- "14: movl 56(%4), %%eax\n"
24773- "91: movl 60(%4), %%edx\n"
24774+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
24775+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
24776 " movnti %%eax, 56(%3)\n"
24777 " movnti %%edx, 60(%3)\n"
24778 " addl $-64, %0\n"
24779@@ -446,9 +554,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
24780 " shrl $2, %0\n"
24781 " andl $3, %%eax\n"
24782 " cld\n"
24783- "6: rep; movsl\n"
24784+ "6: rep; "__copyuser_seg" movsl\n"
24785 " movl %%eax,%0\n"
24786- "7: rep; movsb\n"
24787+ "7: rep; "__copyuser_seg" movsb\n"
24788 "8:\n"
24789 ".section .fixup,\"ax\"\n"
24790 "9: lea 0(%%eax,%0,4),%0\n"
24791@@ -488,32 +596,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
24792 */
24793 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
24794 unsigned long size);
24795-unsigned long __copy_user_intel(void __user *to, const void *from,
24796+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
24797+ unsigned long size);
24798+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
24799 unsigned long size);
24800 unsigned long __copy_user_zeroing_intel_nocache(void *to,
24801 const void __user *from, unsigned long size);
24802 #endif /* CONFIG_X86_INTEL_USERCOPY */
24803
24804 /* Generic arbitrary sized copy. */
24805-#define __copy_user(to, from, size) \
24806+#define __copy_user(to, from, size, prefix, set, restore) \
24807 do { \
24808 int __d0, __d1, __d2; \
24809 __asm__ __volatile__( \
24810+ set \
24811 " cmp $7,%0\n" \
24812 " jbe 1f\n" \
24813 " movl %1,%0\n" \
24814 " negl %0\n" \
24815 " andl $7,%0\n" \
24816 " subl %0,%3\n" \
24817- "4: rep; movsb\n" \
24818+ "4: rep; "prefix"movsb\n" \
24819 " movl %3,%0\n" \
24820 " shrl $2,%0\n" \
24821 " andl $3,%3\n" \
24822 " .align 2,0x90\n" \
24823- "0: rep; movsl\n" \
24824+ "0: rep; "prefix"movsl\n" \
24825 " movl %3,%0\n" \
24826- "1: rep; movsb\n" \
24827+ "1: rep; "prefix"movsb\n" \
24828 "2:\n" \
24829+ restore \
24830 ".section .fixup,\"ax\"\n" \
24831 "5: addl %3,%0\n" \
24832 " jmp 2b\n" \
24833@@ -538,14 +650,14 @@ do { \
24834 " negl %0\n" \
24835 " andl $7,%0\n" \
24836 " subl %0,%3\n" \
24837- "4: rep; movsb\n" \
24838+ "4: rep; "__copyuser_seg"movsb\n" \
24839 " movl %3,%0\n" \
24840 " shrl $2,%0\n" \
24841 " andl $3,%3\n" \
24842 " .align 2,0x90\n" \
24843- "0: rep; movsl\n" \
24844+ "0: rep; "__copyuser_seg"movsl\n" \
24845 " movl %3,%0\n" \
24846- "1: rep; movsb\n" \
24847+ "1: rep; "__copyuser_seg"movsb\n" \
24848 "2:\n" \
24849 ".section .fixup,\"ax\"\n" \
24850 "5: addl %3,%0\n" \
24851@@ -629,9 +741,9 @@ survive:
24852 #endif
24853 stac();
24854 if (movsl_is_ok(to, from, n))
24855- __copy_user(to, from, n);
24856+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
24857 else
24858- n = __copy_user_intel(to, from, n);
24859+ n = __generic_copy_to_user_intel(to, from, n);
24860 clac();
24861 return n;
24862 }
24863@@ -655,10 +767,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
24864 {
24865 stac();
24866 if (movsl_is_ok(to, from, n))
24867- __copy_user(to, from, n);
24868+ __copy_user(to, from, n, __copyuser_seg, "", "");
24869 else
24870- n = __copy_user_intel((void __user *)to,
24871- (const void *)from, n);
24872+ n = __generic_copy_from_user_intel(to, from, n);
24873 clac();
24874 return n;
24875 }
24876@@ -689,66 +800,51 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
24877 if (n > 64 && cpu_has_xmm2)
24878 n = __copy_user_intel_nocache(to, from, n);
24879 else
24880- __copy_user(to, from, n);
24881+ __copy_user(to, from, n, __copyuser_seg, "", "");
24882 #else
24883- __copy_user(to, from, n);
24884+ __copy_user(to, from, n, __copyuser_seg, "", "");
24885 #endif
24886 clac();
24887 return n;
24888 }
24889 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
24890
24891-/**
24892- * copy_to_user: - Copy a block of data into user space.
24893- * @to: Destination address, in user space.
24894- * @from: Source address, in kernel space.
24895- * @n: Number of bytes to copy.
24896- *
24897- * Context: User context only. This function may sleep.
24898- *
24899- * Copy data from kernel space to user space.
24900- *
24901- * Returns number of bytes that could not be copied.
24902- * On success, this will be zero.
24903- */
24904-unsigned long
24905-copy_to_user(void __user *to, const void *from, unsigned long n)
24906-{
24907- if (access_ok(VERIFY_WRITE, to, n))
24908- n = __copy_to_user(to, from, n);
24909- return n;
24910-}
24911-EXPORT_SYMBOL(copy_to_user);
24912-
24913-/**
24914- * copy_from_user: - Copy a block of data from user space.
24915- * @to: Destination address, in kernel space.
24916- * @from: Source address, in user space.
24917- * @n: Number of bytes to copy.
24918- *
24919- * Context: User context only. This function may sleep.
24920- *
24921- * Copy data from user space to kernel space.
24922- *
24923- * Returns number of bytes that could not be copied.
24924- * On success, this will be zero.
24925- *
24926- * If some data could not be copied, this function will pad the copied
24927- * data to the requested size using zero bytes.
24928- */
24929-unsigned long
24930-_copy_from_user(void *to, const void __user *from, unsigned long n)
24931-{
24932- if (access_ok(VERIFY_READ, from, n))
24933- n = __copy_from_user(to, from, n);
24934- else
24935- memset(to, 0, n);
24936- return n;
24937-}
24938-EXPORT_SYMBOL(_copy_from_user);
24939-
24940 void copy_from_user_overflow(void)
24941 {
24942 WARN(1, "Buffer overflow detected!\n");
24943 }
24944 EXPORT_SYMBOL(copy_from_user_overflow);
24945+
24946+void copy_to_user_overflow(void)
24947+{
24948+ WARN(1, "Buffer overflow detected!\n");
24949+}
24950+EXPORT_SYMBOL(copy_to_user_overflow);
24951+
24952+#ifdef CONFIG_PAX_MEMORY_UDEREF
24953+void __set_fs(mm_segment_t x)
24954+{
24955+ switch (x.seg) {
24956+ case 0:
24957+ loadsegment(gs, 0);
24958+ break;
24959+ case TASK_SIZE_MAX:
24960+ loadsegment(gs, __USER_DS);
24961+ break;
24962+ case -1UL:
24963+ loadsegment(gs, __KERNEL_DS);
24964+ break;
24965+ default:
24966+ BUG();
24967+ }
24968+ return;
24969+}
24970+EXPORT_SYMBOL(__set_fs);
24971+
24972+void set_fs(mm_segment_t x)
24973+{
24974+ current_thread_info()->addr_limit = x;
24975+ __set_fs(x);
24976+}
24977+EXPORT_SYMBOL(set_fs);
24978+#endif
24979diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
24980index 05928aa..b33dea1 100644
24981--- a/arch/x86/lib/usercopy_64.c
24982+++ b/arch/x86/lib/usercopy_64.c
24983@@ -39,7 +39,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
24984 _ASM_EXTABLE(0b,3b)
24985 _ASM_EXTABLE(1b,2b)
24986 : [size8] "=&c"(size), [dst] "=&D" (__d0)
24987- : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
24988+ : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(____m(addr)),
24989 [zero] "r" (0UL), [eight] "r" (8UL));
24990 clac();
24991 return size;
24992@@ -54,12 +54,11 @@ unsigned long clear_user(void __user *to, unsigned long n)
24993 }
24994 EXPORT_SYMBOL(clear_user);
24995
24996-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
24997+unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
24998 {
24999- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
25000- return copy_user_generic((__force void *)to, (__force void *)from, len);
25001- }
25002- return len;
25003+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len))
25004+ return copy_user_generic((void __force_kernel *)____m(to), (void __force_kernel *)____m(from), len);
25005+ return len;
25006 }
25007 EXPORT_SYMBOL(copy_in_user);
25008
25009@@ -69,7 +68,7 @@ EXPORT_SYMBOL(copy_in_user);
25010 * it is not necessary to optimize tail handling.
25011 */
25012 unsigned long
25013-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
25014+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
25015 {
25016 char c;
25017 unsigned zero_len;
25018@@ -87,3 +86,15 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
25019 clac();
25020 return len;
25021 }
25022+
25023+void copy_from_user_overflow(void)
25024+{
25025+ WARN(1, "Buffer overflow detected!\n");
25026+}
25027+EXPORT_SYMBOL(copy_from_user_overflow);
25028+
25029+void copy_to_user_overflow(void)
25030+{
25031+ WARN(1, "Buffer overflow detected!\n");
25032+}
25033+EXPORT_SYMBOL(copy_to_user_overflow);
25034diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
25035index 903ec1e..c4166b2 100644
25036--- a/arch/x86/mm/extable.c
25037+++ b/arch/x86/mm/extable.c
25038@@ -6,12 +6,24 @@
25039 static inline unsigned long
25040 ex_insn_addr(const struct exception_table_entry *x)
25041 {
25042- return (unsigned long)&x->insn + x->insn;
25043+ unsigned long reloc = 0;
25044+
25045+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
25046+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
25047+#endif
25048+
25049+ return (unsigned long)&x->insn + x->insn + reloc;
25050 }
25051 static inline unsigned long
25052 ex_fixup_addr(const struct exception_table_entry *x)
25053 {
25054- return (unsigned long)&x->fixup + x->fixup;
25055+ unsigned long reloc = 0;
25056+
25057+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
25058+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
25059+#endif
25060+
25061+ return (unsigned long)&x->fixup + x->fixup + reloc;
25062 }
25063
25064 int fixup_exception(struct pt_regs *regs)
25065@@ -20,7 +32,7 @@ int fixup_exception(struct pt_regs *regs)
25066 unsigned long new_ip;
25067
25068 #ifdef CONFIG_PNPBIOS
25069- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
25070+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
25071 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
25072 extern u32 pnp_bios_is_utter_crap;
25073 pnp_bios_is_utter_crap = 1;
25074@@ -145,6 +157,13 @@ void sort_extable(struct exception_table_entry *start,
25075 i += 4;
25076 p->fixup -= i;
25077 i += 4;
25078+
25079+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
25080+ BUILD_BUG_ON(!IS_ENABLED(CONFIG_BUILDTIME_EXTABLE_SORT));
25081+ p->insn -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
25082+ p->fixup -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
25083+#endif
25084+
25085 }
25086 }
25087
25088diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
25089index 79ff7da..610cf70 100644
25090--- a/arch/x86/mm/fault.c
25091+++ b/arch/x86/mm/fault.c
25092@@ -13,12 +13,19 @@
25093 #include <linux/perf_event.h> /* perf_sw_event */
25094 #include <linux/hugetlb.h> /* hstate_index_to_shift */
25095 #include <linux/prefetch.h> /* prefetchw */
25096+#include <linux/unistd.h>
25097+#include <linux/compiler.h>
25098
25099 #include <asm/traps.h> /* dotraplinkage, ... */
25100 #include <asm/pgalloc.h> /* pgd_*(), ... */
25101 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
25102 #include <asm/fixmap.h> /* VSYSCALL_START */
25103 #include <asm/rcu.h> /* exception_enter(), ... */
25104+#include <asm/tlbflush.h>
25105+
25106+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25107+#include <asm/stacktrace.h>
25108+#endif
25109
25110 /*
25111 * Page fault error code bits:
25112@@ -56,7 +63,7 @@ static inline int __kprobes notify_page_fault(struct pt_regs *regs)
25113 int ret = 0;
25114
25115 /* kprobe_running() needs smp_processor_id() */
25116- if (kprobes_built_in() && !user_mode_vm(regs)) {
25117+ if (kprobes_built_in() && !user_mode(regs)) {
25118 preempt_disable();
25119 if (kprobe_running() && kprobe_fault_handler(regs, 14))
25120 ret = 1;
25121@@ -117,7 +124,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
25122 return !instr_lo || (instr_lo>>1) == 1;
25123 case 0x00:
25124 /* Prefetch instruction is 0x0F0D or 0x0F18 */
25125- if (probe_kernel_address(instr, opcode))
25126+ if (user_mode(regs)) {
25127+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
25128+ return 0;
25129+ } else if (probe_kernel_address(instr, opcode))
25130 return 0;
25131
25132 *prefetch = (instr_lo == 0xF) &&
25133@@ -151,7 +161,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
25134 while (instr < max_instr) {
25135 unsigned char opcode;
25136
25137- if (probe_kernel_address(instr, opcode))
25138+ if (user_mode(regs)) {
25139+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
25140+ break;
25141+ } else if (probe_kernel_address(instr, opcode))
25142 break;
25143
25144 instr++;
25145@@ -182,6 +195,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
25146 force_sig_info(si_signo, &info, tsk);
25147 }
25148
25149+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
25150+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
25151+#endif
25152+
25153+#ifdef CONFIG_PAX_EMUTRAMP
25154+static int pax_handle_fetch_fault(struct pt_regs *regs);
25155+#endif
25156+
25157+#ifdef CONFIG_PAX_PAGEEXEC
25158+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
25159+{
25160+ pgd_t *pgd;
25161+ pud_t *pud;
25162+ pmd_t *pmd;
25163+
25164+ pgd = pgd_offset(mm, address);
25165+ if (!pgd_present(*pgd))
25166+ return NULL;
25167+ pud = pud_offset(pgd, address);
25168+ if (!pud_present(*pud))
25169+ return NULL;
25170+ pmd = pmd_offset(pud, address);
25171+ if (!pmd_present(*pmd))
25172+ return NULL;
25173+ return pmd;
25174+}
25175+#endif
25176+
25177 DEFINE_SPINLOCK(pgd_lock);
25178 LIST_HEAD(pgd_list);
25179
25180@@ -232,10 +273,22 @@ void vmalloc_sync_all(void)
25181 for (address = VMALLOC_START & PMD_MASK;
25182 address >= TASK_SIZE && address < FIXADDR_TOP;
25183 address += PMD_SIZE) {
25184+
25185+#ifdef CONFIG_PAX_PER_CPU_PGD
25186+ unsigned long cpu;
25187+#else
25188 struct page *page;
25189+#endif
25190
25191 spin_lock(&pgd_lock);
25192+
25193+#ifdef CONFIG_PAX_PER_CPU_PGD
25194+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
25195+ pgd_t *pgd = get_cpu_pgd(cpu);
25196+ pmd_t *ret;
25197+#else
25198 list_for_each_entry(page, &pgd_list, lru) {
25199+ pgd_t *pgd = page_address(page);
25200 spinlock_t *pgt_lock;
25201 pmd_t *ret;
25202
25203@@ -243,8 +296,13 @@ void vmalloc_sync_all(void)
25204 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
25205
25206 spin_lock(pgt_lock);
25207- ret = vmalloc_sync_one(page_address(page), address);
25208+#endif
25209+
25210+ ret = vmalloc_sync_one(pgd, address);
25211+
25212+#ifndef CONFIG_PAX_PER_CPU_PGD
25213 spin_unlock(pgt_lock);
25214+#endif
25215
25216 if (!ret)
25217 break;
25218@@ -278,6 +336,11 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
25219 * an interrupt in the middle of a task switch..
25220 */
25221 pgd_paddr = read_cr3();
25222+
25223+#ifdef CONFIG_PAX_PER_CPU_PGD
25224+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
25225+#endif
25226+
25227 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
25228 if (!pmd_k)
25229 return -1;
25230@@ -373,7 +436,14 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
25231 * happen within a race in page table update. In the later
25232 * case just flush:
25233 */
25234+
25235+#ifdef CONFIG_PAX_PER_CPU_PGD
25236+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
25237+ pgd = pgd_offset_cpu(smp_processor_id(), address);
25238+#else
25239 pgd = pgd_offset(current->active_mm, address);
25240+#endif
25241+
25242 pgd_ref = pgd_offset_k(address);
25243 if (pgd_none(*pgd_ref))
25244 return -1;
25245@@ -541,7 +611,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
25246 static int is_errata100(struct pt_regs *regs, unsigned long address)
25247 {
25248 #ifdef CONFIG_X86_64
25249- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
25250+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
25251 return 1;
25252 #endif
25253 return 0;
25254@@ -568,7 +638,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
25255 }
25256
25257 static const char nx_warning[] = KERN_CRIT
25258-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
25259+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
25260
25261 static void
25262 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
25263@@ -577,15 +647,27 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
25264 if (!oops_may_print())
25265 return;
25266
25267- if (error_code & PF_INSTR) {
25268+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
25269 unsigned int level;
25270
25271 pte_t *pte = lookup_address(address, &level);
25272
25273 if (pte && pte_present(*pte) && !pte_exec(*pte))
25274- printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
25275+ printk(nx_warning, from_kuid(&init_user_ns, current_uid()), current->comm, task_pid_nr(current));
25276 }
25277
25278+#ifdef CONFIG_PAX_KERNEXEC
25279+ if (init_mm.start_code <= address && address < init_mm.end_code) {
25280+ if (current->signal->curr_ip)
25281+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
25282+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
25283+ from_kuid(&init_user_ns, current_uid()), from_kuid(&init_user_ns, current_euid()));
25284+ else
25285+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
25286+ from_kuid(&init_user_ns, current_uid()), from_kuid(&init_user_ns, current_euid()));
25287+ }
25288+#endif
25289+
25290 printk(KERN_ALERT "BUG: unable to handle kernel ");
25291 if (address < PAGE_SIZE)
25292 printk(KERN_CONT "NULL pointer dereference");
25293@@ -748,6 +830,22 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
25294 return;
25295 }
25296 #endif
25297+
25298+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
25299+ if (pax_is_fetch_fault(regs, error_code, address)) {
25300+
25301+#ifdef CONFIG_PAX_EMUTRAMP
25302+ switch (pax_handle_fetch_fault(regs)) {
25303+ case 2:
25304+ return;
25305+ }
25306+#endif
25307+
25308+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
25309+ do_group_exit(SIGKILL);
25310+ }
25311+#endif
25312+
25313 /* Kernel addresses are always protection faults: */
25314 if (address >= TASK_SIZE)
25315 error_code |= PF_PROT;
25316@@ -847,7 +945,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
25317 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
25318 printk(KERN_ERR
25319 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
25320- tsk->comm, tsk->pid, address);
25321+ tsk->comm, task_pid_nr(tsk), address);
25322 code = BUS_MCEERR_AR;
25323 }
25324 #endif
25325@@ -903,6 +1001,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
25326 return 1;
25327 }
25328
25329+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
25330+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
25331+{
25332+ pte_t *pte;
25333+ pmd_t *pmd;
25334+ spinlock_t *ptl;
25335+ unsigned char pte_mask;
25336+
25337+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
25338+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
25339+ return 0;
25340+
25341+ /* PaX: it's our fault, let's handle it if we can */
25342+
25343+ /* PaX: take a look at read faults before acquiring any locks */
25344+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
25345+ /* instruction fetch attempt from a protected page in user mode */
25346+ up_read(&mm->mmap_sem);
25347+
25348+#ifdef CONFIG_PAX_EMUTRAMP
25349+ switch (pax_handle_fetch_fault(regs)) {
25350+ case 2:
25351+ return 1;
25352+ }
25353+#endif
25354+
25355+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
25356+ do_group_exit(SIGKILL);
25357+ }
25358+
25359+ pmd = pax_get_pmd(mm, address);
25360+ if (unlikely(!pmd))
25361+ return 0;
25362+
25363+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
25364+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
25365+ pte_unmap_unlock(pte, ptl);
25366+ return 0;
25367+ }
25368+
25369+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
25370+ /* write attempt to a protected page in user mode */
25371+ pte_unmap_unlock(pte, ptl);
25372+ return 0;
25373+ }
25374+
25375+#ifdef CONFIG_SMP
25376+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
25377+#else
25378+ if (likely(address > get_limit(regs->cs)))
25379+#endif
25380+ {
25381+ set_pte(pte, pte_mkread(*pte));
25382+ __flush_tlb_one(address);
25383+ pte_unmap_unlock(pte, ptl);
25384+ up_read(&mm->mmap_sem);
25385+ return 1;
25386+ }
25387+
25388+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
25389+
25390+ /*
25391+ * PaX: fill DTLB with user rights and retry
25392+ */
25393+ __asm__ __volatile__ (
25394+ "orb %2,(%1)\n"
25395+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
25396+/*
25397+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
25398+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
25399+ * page fault when examined during a TLB load attempt. this is true not only
25400+ * for PTEs holding a non-present entry but also present entries that will
25401+ * raise a page fault (such as those set up by PaX, or the copy-on-write
25402+ * mechanism). in effect it means that we do *not* need to flush the TLBs
25403+ * for our target pages since their PTEs are simply not in the TLBs at all.
25404+
25405+ * the best thing in omitting it is that we gain around 15-20% speed in the
25406+ * fast path of the page fault handler and can get rid of tracing since we
25407+ * can no longer flush unintended entries.
25408+ */
25409+ "invlpg (%0)\n"
25410+#endif
25411+ __copyuser_seg"testb $0,(%0)\n"
25412+ "xorb %3,(%1)\n"
25413+ :
25414+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
25415+ : "memory", "cc");
25416+ pte_unmap_unlock(pte, ptl);
25417+ up_read(&mm->mmap_sem);
25418+ return 1;
25419+}
25420+#endif
25421+
25422 /*
25423 * Handle a spurious fault caused by a stale TLB entry.
25424 *
25425@@ -975,6 +1166,9 @@ int show_unhandled_signals = 1;
25426 static inline int
25427 access_error(unsigned long error_code, struct vm_area_struct *vma)
25428 {
25429+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
25430+ return 1;
25431+
25432 if (error_code & PF_WRITE) {
25433 /* write, present and write, not present: */
25434 if (unlikely(!(vma->vm_flags & VM_WRITE)))
25435@@ -1003,7 +1197,7 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs)
25436 if (error_code & PF_USER)
25437 return false;
25438
25439- if (!user_mode_vm(regs) && (regs->flags & X86_EFLAGS_AC))
25440+ if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC))
25441 return false;
25442
25443 return true;
25444@@ -1019,18 +1213,33 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
25445 {
25446 struct vm_area_struct *vma;
25447 struct task_struct *tsk;
25448- unsigned long address;
25449 struct mm_struct *mm;
25450 int fault;
25451 int write = error_code & PF_WRITE;
25452 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
25453 (write ? FAULT_FLAG_WRITE : 0);
25454
25455- tsk = current;
25456- mm = tsk->mm;
25457-
25458 /* Get the faulting address: */
25459- address = read_cr2();
25460+ unsigned long address = read_cr2();
25461+
25462+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25463+ if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
25464+ if (!search_exception_tables(regs->ip)) {
25465+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
25466+ bad_area_nosemaphore(regs, error_code, address);
25467+ return;
25468+ }
25469+ if (address < PAX_USER_SHADOW_BASE) {
25470+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
25471+ printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
25472+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
25473+ } else
25474+ address -= PAX_USER_SHADOW_BASE;
25475+ }
25476+#endif
25477+
25478+ tsk = current;
25479+ mm = tsk->mm;
25480
25481 /*
25482 * Detect and handle instructions that would cause a page fault for
25483@@ -1091,7 +1300,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
25484 * User-mode registers count as a user access even for any
25485 * potential system fault or CPU buglet:
25486 */
25487- if (user_mode_vm(regs)) {
25488+ if (user_mode(regs)) {
25489 local_irq_enable();
25490 error_code |= PF_USER;
25491 } else {
25492@@ -1153,6 +1362,11 @@ retry:
25493 might_sleep();
25494 }
25495
25496+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
25497+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
25498+ return;
25499+#endif
25500+
25501 vma = find_vma(mm, address);
25502 if (unlikely(!vma)) {
25503 bad_area(regs, error_code, address);
25504@@ -1164,18 +1378,24 @@ retry:
25505 bad_area(regs, error_code, address);
25506 return;
25507 }
25508- if (error_code & PF_USER) {
25509- /*
25510- * Accessing the stack below %sp is always a bug.
25511- * The large cushion allows instructions like enter
25512- * and pusha to work. ("enter $65535, $31" pushes
25513- * 32 pointers and then decrements %sp by 65535.)
25514- */
25515- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
25516- bad_area(regs, error_code, address);
25517- return;
25518- }
25519+ /*
25520+ * Accessing the stack below %sp is always a bug.
25521+ * The large cushion allows instructions like enter
25522+ * and pusha to work. ("enter $65535, $31" pushes
25523+ * 32 pointers and then decrements %sp by 65535.)
25524+ */
25525+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
25526+ bad_area(regs, error_code, address);
25527+ return;
25528 }
25529+
25530+#ifdef CONFIG_PAX_SEGMEXEC
25531+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
25532+ bad_area(regs, error_code, address);
25533+ return;
25534+ }
25535+#endif
25536+
25537 if (unlikely(expand_stack(vma, address))) {
25538 bad_area(regs, error_code, address);
25539 return;
25540@@ -1239,3 +1459,292 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
25541 __do_page_fault(regs, error_code);
25542 exception_exit(regs);
25543 }
25544+
25545+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
25546+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
25547+{
25548+ struct mm_struct *mm = current->mm;
25549+ unsigned long ip = regs->ip;
25550+
25551+ if (v8086_mode(regs))
25552+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
25553+
25554+#ifdef CONFIG_PAX_PAGEEXEC
25555+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
25556+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
25557+ return true;
25558+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
25559+ return true;
25560+ return false;
25561+ }
25562+#endif
25563+
25564+#ifdef CONFIG_PAX_SEGMEXEC
25565+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
25566+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
25567+ return true;
25568+ return false;
25569+ }
25570+#endif
25571+
25572+ return false;
25573+}
25574+#endif
25575+
25576+#ifdef CONFIG_PAX_EMUTRAMP
25577+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
25578+{
25579+ int err;
25580+
25581+ do { /* PaX: libffi trampoline emulation */
25582+ unsigned char mov, jmp;
25583+ unsigned int addr1, addr2;
25584+
25585+#ifdef CONFIG_X86_64
25586+ if ((regs->ip + 9) >> 32)
25587+ break;
25588+#endif
25589+
25590+ err = get_user(mov, (unsigned char __user *)regs->ip);
25591+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
25592+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
25593+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
25594+
25595+ if (err)
25596+ break;
25597+
25598+ if (mov == 0xB8 && jmp == 0xE9) {
25599+ regs->ax = addr1;
25600+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
25601+ return 2;
25602+ }
25603+ } while (0);
25604+
25605+ do { /* PaX: gcc trampoline emulation #1 */
25606+ unsigned char mov1, mov2;
25607+ unsigned short jmp;
25608+ unsigned int addr1, addr2;
25609+
25610+#ifdef CONFIG_X86_64
25611+ if ((regs->ip + 11) >> 32)
25612+ break;
25613+#endif
25614+
25615+ err = get_user(mov1, (unsigned char __user *)regs->ip);
25616+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
25617+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
25618+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
25619+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
25620+
25621+ if (err)
25622+ break;
25623+
25624+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
25625+ regs->cx = addr1;
25626+ regs->ax = addr2;
25627+ regs->ip = addr2;
25628+ return 2;
25629+ }
25630+ } while (0);
25631+
25632+ do { /* PaX: gcc trampoline emulation #2 */
25633+ unsigned char mov, jmp;
25634+ unsigned int addr1, addr2;
25635+
25636+#ifdef CONFIG_X86_64
25637+ if ((regs->ip + 9) >> 32)
25638+ break;
25639+#endif
25640+
25641+ err = get_user(mov, (unsigned char __user *)regs->ip);
25642+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
25643+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
25644+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
25645+
25646+ if (err)
25647+ break;
25648+
25649+ if (mov == 0xB9 && jmp == 0xE9) {
25650+ regs->cx = addr1;
25651+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
25652+ return 2;
25653+ }
25654+ } while (0);
25655+
25656+ return 1; /* PaX in action */
25657+}
25658+
25659+#ifdef CONFIG_X86_64
25660+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
25661+{
25662+ int err;
25663+
25664+ do { /* PaX: libffi trampoline emulation */
25665+ unsigned short mov1, mov2, jmp1;
25666+ unsigned char stcclc, jmp2;
25667+ unsigned long addr1, addr2;
25668+
25669+ err = get_user(mov1, (unsigned short __user *)regs->ip);
25670+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
25671+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
25672+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
25673+ err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
25674+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
25675+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
25676+
25677+ if (err)
25678+ break;
25679+
25680+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
25681+ regs->r11 = addr1;
25682+ regs->r10 = addr2;
25683+ if (stcclc == 0xF8)
25684+ regs->flags &= ~X86_EFLAGS_CF;
25685+ else
25686+ regs->flags |= X86_EFLAGS_CF;
25687+ regs->ip = addr1;
25688+ return 2;
25689+ }
25690+ } while (0);
25691+
25692+ do { /* PaX: gcc trampoline emulation #1 */
25693+ unsigned short mov1, mov2, jmp1;
25694+ unsigned char jmp2;
25695+ unsigned int addr1;
25696+ unsigned long addr2;
25697+
25698+ err = get_user(mov1, (unsigned short __user *)regs->ip);
25699+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
25700+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
25701+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
25702+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
25703+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
25704+
25705+ if (err)
25706+ break;
25707+
25708+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
25709+ regs->r11 = addr1;
25710+ regs->r10 = addr2;
25711+ regs->ip = addr1;
25712+ return 2;
25713+ }
25714+ } while (0);
25715+
25716+ do { /* PaX: gcc trampoline emulation #2 */
25717+ unsigned short mov1, mov2, jmp1;
25718+ unsigned char jmp2;
25719+ unsigned long addr1, addr2;
25720+
25721+ err = get_user(mov1, (unsigned short __user *)regs->ip);
25722+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
25723+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
25724+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
25725+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
25726+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
25727+
25728+ if (err)
25729+ break;
25730+
25731+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
25732+ regs->r11 = addr1;
25733+ regs->r10 = addr2;
25734+ regs->ip = addr1;
25735+ return 2;
25736+ }
25737+ } while (0);
25738+
25739+ return 1; /* PaX in action */
25740+}
25741+#endif
25742+
25743+/*
25744+ * PaX: decide what to do with offenders (regs->ip = fault address)
25745+ *
25746+ * returns 1 when task should be killed
25747+ * 2 when gcc trampoline was detected
25748+ */
25749+static int pax_handle_fetch_fault(struct pt_regs *regs)
25750+{
25751+ if (v8086_mode(regs))
25752+ return 1;
25753+
25754+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
25755+ return 1;
25756+
25757+#ifdef CONFIG_X86_32
25758+ return pax_handle_fetch_fault_32(regs);
25759+#else
25760+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
25761+ return pax_handle_fetch_fault_32(regs);
25762+ else
25763+ return pax_handle_fetch_fault_64(regs);
25764+#endif
25765+}
25766+#endif
25767+
25768+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
25769+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
25770+{
25771+ long i;
25772+
25773+ printk(KERN_ERR "PAX: bytes at PC: ");
25774+ for (i = 0; i < 20; i++) {
25775+ unsigned char c;
25776+ if (get_user(c, (unsigned char __force_user *)pc+i))
25777+ printk(KERN_CONT "?? ");
25778+ else
25779+ printk(KERN_CONT "%02x ", c);
25780+ }
25781+ printk("\n");
25782+
25783+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
25784+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
25785+ unsigned long c;
25786+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
25787+#ifdef CONFIG_X86_32
25788+ printk(KERN_CONT "???????? ");
25789+#else
25790+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
25791+ printk(KERN_CONT "???????? ???????? ");
25792+ else
25793+ printk(KERN_CONT "???????????????? ");
25794+#endif
25795+ } else {
25796+#ifdef CONFIG_X86_64
25797+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
25798+ printk(KERN_CONT "%08x ", (unsigned int)c);
25799+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
25800+ } else
25801+#endif
25802+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
25803+ }
25804+ }
25805+ printk("\n");
25806+}
25807+#endif
25808+
25809+/**
25810+ * probe_kernel_write(): safely attempt to write to a location
25811+ * @dst: address to write to
25812+ * @src: pointer to the data that shall be written
25813+ * @size: size of the data chunk
25814+ *
25815+ * Safely write to address @dst from the buffer at @src. If a kernel fault
25816+ * happens, handle that and return -EFAULT.
25817+ */
25818+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
25819+{
25820+ long ret;
25821+ mm_segment_t old_fs = get_fs();
25822+
25823+ set_fs(KERNEL_DS);
25824+ pagefault_disable();
25825+ pax_open_kernel();
25826+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
25827+ pax_close_kernel();
25828+ pagefault_enable();
25829+ set_fs(old_fs);
25830+
25831+ return ret ? -EFAULT : 0;
25832+}
25833diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
25834index dd74e46..7d26398 100644
25835--- a/arch/x86/mm/gup.c
25836+++ b/arch/x86/mm/gup.c
25837@@ -255,7 +255,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
25838 addr = start;
25839 len = (unsigned long) nr_pages << PAGE_SHIFT;
25840 end = start + len;
25841- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
25842+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
25843 (void __user *)start, len)))
25844 return 0;
25845
25846diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
25847index 6f31ee5..8ee4164 100644
25848--- a/arch/x86/mm/highmem_32.c
25849+++ b/arch/x86/mm/highmem_32.c
25850@@ -44,7 +44,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
25851 idx = type + KM_TYPE_NR*smp_processor_id();
25852 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
25853 BUG_ON(!pte_none(*(kmap_pte-idx)));
25854+
25855+ pax_open_kernel();
25856 set_pte(kmap_pte-idx, mk_pte(page, prot));
25857+ pax_close_kernel();
25858+
25859 arch_flush_lazy_mmu_mode();
25860
25861 return (void *)vaddr;
25862diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
25863index 937bff5..dce75ff 100644
25864--- a/arch/x86/mm/hugetlbpage.c
25865+++ b/arch/x86/mm/hugetlbpage.c
25866@@ -276,13 +276,21 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
25867 struct hstate *h = hstate_file(file);
25868 struct mm_struct *mm = current->mm;
25869 struct vm_area_struct *vma;
25870- unsigned long start_addr;
25871+ unsigned long start_addr, pax_task_size = TASK_SIZE;
25872+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
25873+
25874+#ifdef CONFIG_PAX_SEGMEXEC
25875+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
25876+ pax_task_size = SEGMEXEC_TASK_SIZE;
25877+#endif
25878+
25879+ pax_task_size -= PAGE_SIZE;
25880
25881 if (len > mm->cached_hole_size) {
25882- start_addr = mm->free_area_cache;
25883+ start_addr = mm->free_area_cache;
25884 } else {
25885- start_addr = TASK_UNMAPPED_BASE;
25886- mm->cached_hole_size = 0;
25887+ start_addr = mm->mmap_base;
25888+ mm->cached_hole_size = 0;
25889 }
25890
25891 full_search:
25892@@ -290,26 +298,27 @@ full_search:
25893
25894 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
25895 /* At this point: (!vma || addr < vma->vm_end). */
25896- if (TASK_SIZE - len < addr) {
25897+ if (pax_task_size - len < addr) {
25898 /*
25899 * Start a new search - just in case we missed
25900 * some holes.
25901 */
25902- if (start_addr != TASK_UNMAPPED_BASE) {
25903- start_addr = TASK_UNMAPPED_BASE;
25904+ if (start_addr != mm->mmap_base) {
25905+ start_addr = mm->mmap_base;
25906 mm->cached_hole_size = 0;
25907 goto full_search;
25908 }
25909 return -ENOMEM;
25910 }
25911- if (!vma || addr + len <= vma->vm_start) {
25912- mm->free_area_cache = addr + len;
25913- return addr;
25914- }
25915+ if (check_heap_stack_gap(vma, addr, len, offset))
25916+ break;
25917 if (addr + mm->cached_hole_size < vma->vm_start)
25918 mm->cached_hole_size = vma->vm_start - addr;
25919 addr = ALIGN(vma->vm_end, huge_page_size(h));
25920 }
25921+
25922+ mm->free_area_cache = addr + len;
25923+ return addr;
25924 }
25925
25926 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
25927@@ -320,9 +329,9 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
25928 struct mm_struct *mm = current->mm;
25929 struct vm_area_struct *vma;
25930 unsigned long base = mm->mmap_base;
25931- unsigned long addr = addr0;
25932+ unsigned long addr;
25933 unsigned long largest_hole = mm->cached_hole_size;
25934- unsigned long start_addr;
25935+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
25936
25937 /* don't allow allocations above current base */
25938 if (mm->free_area_cache > base)
25939@@ -332,16 +341,15 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
25940 largest_hole = 0;
25941 mm->free_area_cache = base;
25942 }
25943-try_again:
25944- start_addr = mm->free_area_cache;
25945
25946 /* make sure it can fit in the remaining address space */
25947 if (mm->free_area_cache < len)
25948 goto fail;
25949
25950 /* either no address requested or can't fit in requested address hole */
25951- addr = (mm->free_area_cache - len) & huge_page_mask(h);
25952+ addr = mm->free_area_cache - len;
25953 do {
25954+ addr &= huge_page_mask(h);
25955 /*
25956 * Lookup failure means no vma is above this address,
25957 * i.e. return with success:
25958@@ -350,10 +358,10 @@ try_again:
25959 if (!vma)
25960 return addr;
25961
25962- if (addr + len <= vma->vm_start) {
25963+ if (check_heap_stack_gap(vma, addr, len, offset)) {
25964 /* remember the address as a hint for next time */
25965- mm->cached_hole_size = largest_hole;
25966- return (mm->free_area_cache = addr);
25967+ mm->cached_hole_size = largest_hole;
25968+ return (mm->free_area_cache = addr);
25969 } else if (mm->free_area_cache == vma->vm_end) {
25970 /* pull free_area_cache down to the first hole */
25971 mm->free_area_cache = vma->vm_start;
25972@@ -362,29 +370,34 @@ try_again:
25973
25974 /* remember the largest hole we saw so far */
25975 if (addr + largest_hole < vma->vm_start)
25976- largest_hole = vma->vm_start - addr;
25977+ largest_hole = vma->vm_start - addr;
25978
25979 /* try just below the current vma->vm_start */
25980- addr = (vma->vm_start - len) & huge_page_mask(h);
25981- } while (len <= vma->vm_start);
25982+ addr = skip_heap_stack_gap(vma, len, offset);
25983+ } while (!IS_ERR_VALUE(addr));
25984
25985 fail:
25986 /*
25987- * if hint left us with no space for the requested
25988- * mapping then try again:
25989- */
25990- if (start_addr != base) {
25991- mm->free_area_cache = base;
25992- largest_hole = 0;
25993- goto try_again;
25994- }
25995- /*
25996 * A failed mmap() very likely causes application failure,
25997 * so fall back to the bottom-up function here. This scenario
25998 * can happen with large stack limits and large mmap()
25999 * allocations.
26000 */
26001- mm->free_area_cache = TASK_UNMAPPED_BASE;
26002+
26003+#ifdef CONFIG_PAX_SEGMEXEC
26004+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
26005+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
26006+ else
26007+#endif
26008+
26009+ mm->mmap_base = TASK_UNMAPPED_BASE;
26010+
26011+#ifdef CONFIG_PAX_RANDMMAP
26012+ if (mm->pax_flags & MF_PAX_RANDMMAP)
26013+ mm->mmap_base += mm->delta_mmap;
26014+#endif
26015+
26016+ mm->free_area_cache = mm->mmap_base;
26017 mm->cached_hole_size = ~0UL;
26018 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
26019 len, pgoff, flags);
26020@@ -392,6 +405,7 @@ fail:
26021 /*
26022 * Restore the topdown base:
26023 */
26024+ mm->mmap_base = base;
26025 mm->free_area_cache = base;
26026 mm->cached_hole_size = ~0UL;
26027
26028@@ -405,10 +419,20 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
26029 struct hstate *h = hstate_file(file);
26030 struct mm_struct *mm = current->mm;
26031 struct vm_area_struct *vma;
26032+ unsigned long pax_task_size = TASK_SIZE;
26033+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
26034
26035 if (len & ~huge_page_mask(h))
26036 return -EINVAL;
26037- if (len > TASK_SIZE)
26038+
26039+#ifdef CONFIG_PAX_SEGMEXEC
26040+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
26041+ pax_task_size = SEGMEXEC_TASK_SIZE;
26042+#endif
26043+
26044+ pax_task_size -= PAGE_SIZE;
26045+
26046+ if (len > pax_task_size)
26047 return -ENOMEM;
26048
26049 if (flags & MAP_FIXED) {
26050@@ -417,11 +441,14 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
26051 return addr;
26052 }
26053
26054+#ifdef CONFIG_PAX_RANDMMAP
26055+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
26056+#endif
26057+
26058 if (addr) {
26059 addr = ALIGN(addr, huge_page_size(h));
26060 vma = find_vma(mm, addr);
26061- if (TASK_SIZE - len >= addr &&
26062- (!vma || addr + len <= vma->vm_start))
26063+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
26064 return addr;
26065 }
26066 if (mm->get_unmapped_area == arch_get_unmapped_area)
26067diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
26068index d7aea41..0fc945b 100644
26069--- a/arch/x86/mm/init.c
26070+++ b/arch/x86/mm/init.c
26071@@ -4,6 +4,7 @@
26072 #include <linux/swap.h>
26073 #include <linux/memblock.h>
26074 #include <linux/bootmem.h> /* for max_low_pfn */
26075+#include <linux/tboot.h>
26076
26077 #include <asm/cacheflush.h>
26078 #include <asm/e820.h>
26079@@ -16,6 +17,8 @@
26080 #include <asm/tlb.h>
26081 #include <asm/proto.h>
26082 #include <asm/dma.h> /* for MAX_DMA_PFN */
26083+#include <asm/desc.h>
26084+#include <asm/bios_ebda.h>
26085
26086 unsigned long __initdata pgt_buf_start;
26087 unsigned long __meminitdata pgt_buf_end;
26088@@ -44,7 +47,7 @@ static void __init find_early_table_space(struct map_range *mr, int nr_range)
26089 {
26090 int i;
26091 unsigned long puds = 0, pmds = 0, ptes = 0, tables;
26092- unsigned long start = 0, good_end;
26093+ unsigned long start = 0x100000, good_end;
26094 phys_addr_t base;
26095
26096 for (i = 0; i < nr_range; i++) {
26097@@ -321,10 +324,40 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
26098 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
26099 * mmio resources as well as potential bios/acpi data regions.
26100 */
26101+
26102+#ifdef CONFIG_GRKERNSEC_KMEM
26103+static unsigned int ebda_start __read_only;
26104+static unsigned int ebda_end __read_only;
26105+#endif
26106+
26107 int devmem_is_allowed(unsigned long pagenr)
26108 {
26109- if (pagenr < 256)
26110+#ifdef CONFIG_GRKERNSEC_KMEM
26111+ /* allow BDA */
26112+ if (!pagenr)
26113 return 1;
26114+ /* allow EBDA */
26115+ if (pagenr >= ebda_start && pagenr < ebda_end)
26116+ return 1;
26117+ /* if tboot is in use, allow access to its hardcoded serial log range */
26118+ if (tboot_enabled() && ((0x60000 >> PAGE_SHIFT) <= pagenr) && (pagenr < (0x68000 >> PAGE_SHIFT)))
26119+ return 1;
26120+#else
26121+ if (!pagenr)
26122+ return 1;
26123+#ifdef CONFIG_VM86
26124+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
26125+ return 1;
26126+#endif
26127+#endif
26128+
26129+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
26130+ return 1;
26131+#ifdef CONFIG_GRKERNSEC_KMEM
26132+ /* throw out everything else below 1MB */
26133+ if (pagenr <= 256)
26134+ return 0;
26135+#endif
26136 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
26137 return 0;
26138 if (!page_is_ram(pagenr))
26139@@ -381,8 +414,117 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
26140 #endif
26141 }
26142
26143+#ifdef CONFIG_GRKERNSEC_KMEM
26144+static inline void gr_init_ebda(void)
26145+{
26146+ unsigned int ebda_addr;
26147+ unsigned int ebda_size = 0;
26148+
26149+ ebda_addr = get_bios_ebda();
26150+ if (ebda_addr) {
26151+ ebda_size = *(unsigned char *)phys_to_virt(ebda_addr);
26152+ ebda_size <<= 10;
26153+ }
26154+ if (ebda_addr && ebda_size) {
26155+ ebda_start = ebda_addr >> PAGE_SHIFT;
26156+ ebda_end = min((unsigned int)PAGE_ALIGN(ebda_addr + ebda_size), (unsigned int)0xa0000) >> PAGE_SHIFT;
26157+ } else {
26158+ ebda_start = 0x9f000 >> PAGE_SHIFT;
26159+ ebda_end = 0xa0000 >> PAGE_SHIFT;
26160+ }
26161+}
26162+#else
26163+static inline void gr_init_ebda(void) { }
26164+#endif
26165+
26166 void free_initmem(void)
26167 {
26168+#ifdef CONFIG_PAX_KERNEXEC
26169+#ifdef CONFIG_X86_32
26170+ /* PaX: limit KERNEL_CS to actual size */
26171+ unsigned long addr, limit;
26172+ struct desc_struct d;
26173+ int cpu;
26174+#else
26175+ pgd_t *pgd;
26176+ pud_t *pud;
26177+ pmd_t *pmd;
26178+ unsigned long addr, end;
26179+#endif
26180+#endif
26181+
26182+ gr_init_ebda();
26183+
26184+#ifdef CONFIG_PAX_KERNEXEC
26185+#ifdef CONFIG_X86_32
26186+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
26187+ limit = (limit - 1UL) >> PAGE_SHIFT;
26188+
26189+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
26190+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
26191+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
26192+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
26193+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEXEC_KERNEL_CS, &d, DESCTYPE_S);
26194+ }
26195+
26196+ /* PaX: make KERNEL_CS read-only */
26197+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
26198+ if (!paravirt_enabled())
26199+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
26200+/*
26201+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
26202+ pgd = pgd_offset_k(addr);
26203+ pud = pud_offset(pgd, addr);
26204+ pmd = pmd_offset(pud, addr);
26205+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
26206+ }
26207+*/
26208+#ifdef CONFIG_X86_PAE
26209+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
26210+/*
26211+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
26212+ pgd = pgd_offset_k(addr);
26213+ pud = pud_offset(pgd, addr);
26214+ pmd = pmd_offset(pud, addr);
26215+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
26216+ }
26217+*/
26218+#endif
26219+
26220+#ifdef CONFIG_MODULES
26221+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
26222+#endif
26223+
26224+#else
26225+ /* PaX: make kernel code/rodata read-only, rest non-executable */
26226+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
26227+ pgd = pgd_offset_k(addr);
26228+ pud = pud_offset(pgd, addr);
26229+ pmd = pmd_offset(pud, addr);
26230+ if (!pmd_present(*pmd))
26231+ continue;
26232+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
26233+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
26234+ else
26235+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
26236+ }
26237+
26238+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
26239+ end = addr + KERNEL_IMAGE_SIZE;
26240+ for (; addr < end; addr += PMD_SIZE) {
26241+ pgd = pgd_offset_k(addr);
26242+ pud = pud_offset(pgd, addr);
26243+ pmd = pmd_offset(pud, addr);
26244+ if (!pmd_present(*pmd))
26245+ continue;
26246+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
26247+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
26248+ }
26249+#endif
26250+
26251+ flush_tlb_all();
26252+#endif
26253+
26254 free_init_pages("unused kernel memory",
26255 (unsigned long)(&__init_begin),
26256 (unsigned long)(&__init_end));
26257diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
26258index 11a5800..4bd9977 100644
26259--- a/arch/x86/mm/init_32.c
26260+++ b/arch/x86/mm/init_32.c
26261@@ -73,36 +73,6 @@ static __init void *alloc_low_page(void)
26262 }
26263
26264 /*
26265- * Creates a middle page table and puts a pointer to it in the
26266- * given global directory entry. This only returns the gd entry
26267- * in non-PAE compilation mode, since the middle layer is folded.
26268- */
26269-static pmd_t * __init one_md_table_init(pgd_t *pgd)
26270-{
26271- pud_t *pud;
26272- pmd_t *pmd_table;
26273-
26274-#ifdef CONFIG_X86_PAE
26275- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
26276- if (after_bootmem)
26277- pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
26278- else
26279- pmd_table = (pmd_t *)alloc_low_page();
26280- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
26281- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
26282- pud = pud_offset(pgd, 0);
26283- BUG_ON(pmd_table != pmd_offset(pud, 0));
26284-
26285- return pmd_table;
26286- }
26287-#endif
26288- pud = pud_offset(pgd, 0);
26289- pmd_table = pmd_offset(pud, 0);
26290-
26291- return pmd_table;
26292-}
26293-
26294-/*
26295 * Create a page table and place a pointer to it in a middle page
26296 * directory entry:
26297 */
26298@@ -122,13 +92,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
26299 page_table = (pte_t *)alloc_low_page();
26300
26301 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
26302+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
26303+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
26304+#else
26305 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
26306+#endif
26307 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
26308 }
26309
26310 return pte_offset_kernel(pmd, 0);
26311 }
26312
26313+static pmd_t * __init one_md_table_init(pgd_t *pgd)
26314+{
26315+ pud_t *pud;
26316+ pmd_t *pmd_table;
26317+
26318+ pud = pud_offset(pgd, 0);
26319+ pmd_table = pmd_offset(pud, 0);
26320+
26321+ return pmd_table;
26322+}
26323+
26324 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
26325 {
26326 int pgd_idx = pgd_index(vaddr);
26327@@ -202,6 +187,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
26328 int pgd_idx, pmd_idx;
26329 unsigned long vaddr;
26330 pgd_t *pgd;
26331+ pud_t *pud;
26332 pmd_t *pmd;
26333 pte_t *pte = NULL;
26334
26335@@ -211,8 +197,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
26336 pgd = pgd_base + pgd_idx;
26337
26338 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
26339- pmd = one_md_table_init(pgd);
26340- pmd = pmd + pmd_index(vaddr);
26341+ pud = pud_offset(pgd, vaddr);
26342+ pmd = pmd_offset(pud, vaddr);
26343+
26344+#ifdef CONFIG_X86_PAE
26345+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
26346+#endif
26347+
26348 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
26349 pmd++, pmd_idx++) {
26350 pte = page_table_kmap_check(one_page_table_init(pmd),
26351@@ -224,11 +215,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
26352 }
26353 }
26354
26355-static inline int is_kernel_text(unsigned long addr)
26356+static inline int is_kernel_text(unsigned long start, unsigned long end)
26357 {
26358- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
26359- return 1;
26360- return 0;
26361+ if ((start > ktla_ktva((unsigned long)_etext) ||
26362+ end <= ktla_ktva((unsigned long)_stext)) &&
26363+ (start > ktla_ktva((unsigned long)_einittext) ||
26364+ end <= ktla_ktva((unsigned long)_sinittext)) &&
26365+
26366+#ifdef CONFIG_ACPI_SLEEP
26367+ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
26368+#endif
26369+
26370+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
26371+ return 0;
26372+ return 1;
26373 }
26374
26375 /*
26376@@ -245,9 +245,10 @@ kernel_physical_mapping_init(unsigned long start,
26377 unsigned long last_map_addr = end;
26378 unsigned long start_pfn, end_pfn;
26379 pgd_t *pgd_base = swapper_pg_dir;
26380- int pgd_idx, pmd_idx, pte_ofs;
26381+ unsigned int pgd_idx, pmd_idx, pte_ofs;
26382 unsigned long pfn;
26383 pgd_t *pgd;
26384+ pud_t *pud;
26385 pmd_t *pmd;
26386 pte_t *pte;
26387 unsigned pages_2m, pages_4k;
26388@@ -280,8 +281,13 @@ repeat:
26389 pfn = start_pfn;
26390 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
26391 pgd = pgd_base + pgd_idx;
26392- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
26393- pmd = one_md_table_init(pgd);
26394+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
26395+ pud = pud_offset(pgd, 0);
26396+ pmd = pmd_offset(pud, 0);
26397+
26398+#ifdef CONFIG_X86_PAE
26399+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
26400+#endif
26401
26402 if (pfn >= end_pfn)
26403 continue;
26404@@ -293,14 +299,13 @@ repeat:
26405 #endif
26406 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
26407 pmd++, pmd_idx++) {
26408- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
26409+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
26410
26411 /*
26412 * Map with big pages if possible, otherwise
26413 * create normal page tables:
26414 */
26415 if (use_pse) {
26416- unsigned int addr2;
26417 pgprot_t prot = PAGE_KERNEL_LARGE;
26418 /*
26419 * first pass will use the same initial
26420@@ -310,11 +315,7 @@ repeat:
26421 __pgprot(PTE_IDENT_ATTR |
26422 _PAGE_PSE);
26423
26424- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
26425- PAGE_OFFSET + PAGE_SIZE-1;
26426-
26427- if (is_kernel_text(addr) ||
26428- is_kernel_text(addr2))
26429+ if (is_kernel_text(address, address + PMD_SIZE))
26430 prot = PAGE_KERNEL_LARGE_EXEC;
26431
26432 pages_2m++;
26433@@ -331,7 +332,7 @@ repeat:
26434 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
26435 pte += pte_ofs;
26436 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
26437- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
26438+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
26439 pgprot_t prot = PAGE_KERNEL;
26440 /*
26441 * first pass will use the same initial
26442@@ -339,7 +340,7 @@ repeat:
26443 */
26444 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
26445
26446- if (is_kernel_text(addr))
26447+ if (is_kernel_text(address, address + PAGE_SIZE))
26448 prot = PAGE_KERNEL_EXEC;
26449
26450 pages_4k++;
26451@@ -465,7 +466,7 @@ void __init native_pagetable_init(void)
26452
26453 pud = pud_offset(pgd, va);
26454 pmd = pmd_offset(pud, va);
26455- if (!pmd_present(*pmd))
26456+ if (!pmd_present(*pmd) || pmd_huge(*pmd))
26457 break;
26458
26459 pte = pte_offset_kernel(pmd, va);
26460@@ -514,12 +515,10 @@ void __init early_ioremap_page_table_range_init(void)
26461
26462 static void __init pagetable_init(void)
26463 {
26464- pgd_t *pgd_base = swapper_pg_dir;
26465-
26466- permanent_kmaps_init(pgd_base);
26467+ permanent_kmaps_init(swapper_pg_dir);
26468 }
26469
26470-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
26471+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
26472 EXPORT_SYMBOL_GPL(__supported_pte_mask);
26473
26474 /* user-defined highmem size */
26475@@ -731,6 +730,12 @@ void __init mem_init(void)
26476
26477 pci_iommu_alloc();
26478
26479+#ifdef CONFIG_PAX_PER_CPU_PGD
26480+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
26481+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
26482+ KERNEL_PGD_PTRS);
26483+#endif
26484+
26485 #ifdef CONFIG_FLATMEM
26486 BUG_ON(!mem_map);
26487 #endif
26488@@ -757,7 +762,7 @@ void __init mem_init(void)
26489 reservedpages++;
26490
26491 codesize = (unsigned long) &_etext - (unsigned long) &_text;
26492- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
26493+ datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
26494 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
26495
26496 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
26497@@ -798,10 +803,10 @@ void __init mem_init(void)
26498 ((unsigned long)&__init_end -
26499 (unsigned long)&__init_begin) >> 10,
26500
26501- (unsigned long)&_etext, (unsigned long)&_edata,
26502- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
26503+ (unsigned long)&_sdata, (unsigned long)&_edata,
26504+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
26505
26506- (unsigned long)&_text, (unsigned long)&_etext,
26507+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
26508 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
26509
26510 /*
26511@@ -879,6 +884,7 @@ void set_kernel_text_rw(void)
26512 if (!kernel_set_to_readonly)
26513 return;
26514
26515+ start = ktla_ktva(start);
26516 pr_debug("Set kernel text: %lx - %lx for read write\n",
26517 start, start+size);
26518
26519@@ -893,6 +899,7 @@ void set_kernel_text_ro(void)
26520 if (!kernel_set_to_readonly)
26521 return;
26522
26523+ start = ktla_ktva(start);
26524 pr_debug("Set kernel text: %lx - %lx for read only\n",
26525 start, start+size);
26526
26527@@ -921,6 +928,7 @@ void mark_rodata_ro(void)
26528 unsigned long start = PFN_ALIGN(_text);
26529 unsigned long size = PFN_ALIGN(_etext) - start;
26530
26531+ start = ktla_ktva(start);
26532 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
26533 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
26534 size >> 10);
26535diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
26536index ce42da7..678a54e 100644
26537--- a/arch/x86/mm/init_64.c
26538+++ b/arch/x86/mm/init_64.c
26539@@ -74,7 +74,7 @@ early_param("gbpages", parse_direct_gbpages_on);
26540 * around without checking the pgd every time.
26541 */
26542
26543-pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
26544+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
26545 EXPORT_SYMBOL_GPL(__supported_pte_mask);
26546
26547 int force_personality32;
26548@@ -107,12 +107,22 @@ void sync_global_pgds(unsigned long start, unsigned long end)
26549
26550 for (address = start; address <= end; address += PGDIR_SIZE) {
26551 const pgd_t *pgd_ref = pgd_offset_k(address);
26552+
26553+#ifdef CONFIG_PAX_PER_CPU_PGD
26554+ unsigned long cpu;
26555+#else
26556 struct page *page;
26557+#endif
26558
26559 if (pgd_none(*pgd_ref))
26560 continue;
26561
26562 spin_lock(&pgd_lock);
26563+
26564+#ifdef CONFIG_PAX_PER_CPU_PGD
26565+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
26566+ pgd_t *pgd = pgd_offset_cpu(cpu, address);
26567+#else
26568 list_for_each_entry(page, &pgd_list, lru) {
26569 pgd_t *pgd;
26570 spinlock_t *pgt_lock;
26571@@ -121,6 +131,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
26572 /* the pgt_lock only for Xen */
26573 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
26574 spin_lock(pgt_lock);
26575+#endif
26576
26577 if (pgd_none(*pgd))
26578 set_pgd(pgd, *pgd_ref);
26579@@ -128,7 +139,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
26580 BUG_ON(pgd_page_vaddr(*pgd)
26581 != pgd_page_vaddr(*pgd_ref));
26582
26583+#ifndef CONFIG_PAX_PER_CPU_PGD
26584 spin_unlock(pgt_lock);
26585+#endif
26586+
26587 }
26588 spin_unlock(&pgd_lock);
26589 }
26590@@ -161,7 +175,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
26591 {
26592 if (pgd_none(*pgd)) {
26593 pud_t *pud = (pud_t *)spp_getpage();
26594- pgd_populate(&init_mm, pgd, pud);
26595+ pgd_populate_kernel(&init_mm, pgd, pud);
26596 if (pud != pud_offset(pgd, 0))
26597 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
26598 pud, pud_offset(pgd, 0));
26599@@ -173,7 +187,7 @@ static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
26600 {
26601 if (pud_none(*pud)) {
26602 pmd_t *pmd = (pmd_t *) spp_getpage();
26603- pud_populate(&init_mm, pud, pmd);
26604+ pud_populate_kernel(&init_mm, pud, pmd);
26605 if (pmd != pmd_offset(pud, 0))
26606 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
26607 pmd, pmd_offset(pud, 0));
26608@@ -202,7 +216,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
26609 pmd = fill_pmd(pud, vaddr);
26610 pte = fill_pte(pmd, vaddr);
26611
26612+ pax_open_kernel();
26613 set_pte(pte, new_pte);
26614+ pax_close_kernel();
26615
26616 /*
26617 * It's enough to flush this one mapping.
26618@@ -261,14 +277,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
26619 pgd = pgd_offset_k((unsigned long)__va(phys));
26620 if (pgd_none(*pgd)) {
26621 pud = (pud_t *) spp_getpage();
26622- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
26623- _PAGE_USER));
26624+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
26625 }
26626 pud = pud_offset(pgd, (unsigned long)__va(phys));
26627 if (pud_none(*pud)) {
26628 pmd = (pmd_t *) spp_getpage();
26629- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
26630- _PAGE_USER));
26631+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
26632 }
26633 pmd = pmd_offset(pud, phys);
26634 BUG_ON(!pmd_none(*pmd));
26635@@ -329,7 +343,7 @@ static __ref void *alloc_low_page(unsigned long *phys)
26636 if (pfn >= pgt_buf_top)
26637 panic("alloc_low_page: ran out of memory");
26638
26639- adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
26640+ adr = (void __force_kernel *)early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
26641 clear_page(adr);
26642 *phys = pfn * PAGE_SIZE;
26643 return adr;
26644@@ -345,7 +359,7 @@ static __ref void *map_low_page(void *virt)
26645
26646 phys = __pa(virt);
26647 left = phys & (PAGE_SIZE - 1);
26648- adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE);
26649+ adr = (void __force_kernel *)early_memremap(phys & PAGE_MASK, PAGE_SIZE);
26650 adr = (void *)(((unsigned long)adr) | left);
26651
26652 return adr;
26653@@ -553,7 +567,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
26654 unmap_low_page(pmd);
26655
26656 spin_lock(&init_mm.page_table_lock);
26657- pud_populate(&init_mm, pud, __va(pmd_phys));
26658+ pud_populate_kernel(&init_mm, pud, __va(pmd_phys));
26659 spin_unlock(&init_mm.page_table_lock);
26660 }
26661 __flush_tlb_all();
26662@@ -599,7 +613,7 @@ kernel_physical_mapping_init(unsigned long start,
26663 unmap_low_page(pud);
26664
26665 spin_lock(&init_mm.page_table_lock);
26666- pgd_populate(&init_mm, pgd, __va(pud_phys));
26667+ pgd_populate_kernel(&init_mm, pgd, __va(pud_phys));
26668 spin_unlock(&init_mm.page_table_lock);
26669 pgd_changed = true;
26670 }
26671@@ -691,6 +705,12 @@ void __init mem_init(void)
26672
26673 pci_iommu_alloc();
26674
26675+#ifdef CONFIG_PAX_PER_CPU_PGD
26676+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
26677+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
26678+ KERNEL_PGD_PTRS);
26679+#endif
26680+
26681 /* clear_bss() already clear the empty_zero_page */
26682
26683 reservedpages = 0;
26684@@ -854,8 +874,8 @@ int kern_addr_valid(unsigned long addr)
26685 static struct vm_area_struct gate_vma = {
26686 .vm_start = VSYSCALL_START,
26687 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
26688- .vm_page_prot = PAGE_READONLY_EXEC,
26689- .vm_flags = VM_READ | VM_EXEC
26690+ .vm_page_prot = PAGE_READONLY,
26691+ .vm_flags = VM_READ
26692 };
26693
26694 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
26695@@ -889,7 +909,7 @@ int in_gate_area_no_mm(unsigned long addr)
26696
26697 const char *arch_vma_name(struct vm_area_struct *vma)
26698 {
26699- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
26700+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
26701 return "[vdso]";
26702 if (vma == &gate_vma)
26703 return "[vsyscall]";
26704diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
26705index 7b179b4..6bd1777 100644
26706--- a/arch/x86/mm/iomap_32.c
26707+++ b/arch/x86/mm/iomap_32.c
26708@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
26709 type = kmap_atomic_idx_push();
26710 idx = type + KM_TYPE_NR * smp_processor_id();
26711 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
26712+
26713+ pax_open_kernel();
26714 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
26715+ pax_close_kernel();
26716+
26717 arch_flush_lazy_mmu_mode();
26718
26719 return (void *)vaddr;
26720diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
26721index 78fe3f1..2f9433c 100644
26722--- a/arch/x86/mm/ioremap.c
26723+++ b/arch/x86/mm/ioremap.c
26724@@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
26725 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
26726 int is_ram = page_is_ram(pfn);
26727
26728- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
26729+ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
26730 return NULL;
26731 WARN_ON_ONCE(is_ram);
26732 }
26733@@ -256,7 +256,7 @@ EXPORT_SYMBOL(ioremap_prot);
26734 *
26735 * Caller must ensure there is only one unmapping for the same pointer.
26736 */
26737-void iounmap(volatile void __iomem *addr)
26738+void iounmap(const volatile void __iomem *addr)
26739 {
26740 struct vm_struct *p, *o;
26741
26742@@ -315,6 +315,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
26743
26744 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
26745 if (page_is_ram(start >> PAGE_SHIFT))
26746+#ifdef CONFIG_HIGHMEM
26747+ if ((start >> PAGE_SHIFT) < max_low_pfn)
26748+#endif
26749 return __va(phys);
26750
26751 addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
26752@@ -344,7 +347,7 @@ static int __init early_ioremap_debug_setup(char *str)
26753 early_param("early_ioremap_debug", early_ioremap_debug_setup);
26754
26755 static __initdata int after_paging_init;
26756-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
26757+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
26758
26759 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
26760 {
26761@@ -381,8 +384,7 @@ void __init early_ioremap_init(void)
26762 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
26763
26764 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
26765- memset(bm_pte, 0, sizeof(bm_pte));
26766- pmd_populate_kernel(&init_mm, pmd, bm_pte);
26767+ pmd_populate_user(&init_mm, pmd, bm_pte);
26768
26769 /*
26770 * The boot-ioremap range spans multiple pmds, for which
26771diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
26772index d87dd6d..bf3fa66 100644
26773--- a/arch/x86/mm/kmemcheck/kmemcheck.c
26774+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
26775@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
26776 * memory (e.g. tracked pages)? For now, we need this to avoid
26777 * invoking kmemcheck for PnP BIOS calls.
26778 */
26779- if (regs->flags & X86_VM_MASK)
26780+ if (v8086_mode(regs))
26781 return false;
26782- if (regs->cs != __KERNEL_CS)
26783+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
26784 return false;
26785
26786 pte = kmemcheck_pte_lookup(address);
26787diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
26788index 845df68..1d8d29f 100644
26789--- a/arch/x86/mm/mmap.c
26790+++ b/arch/x86/mm/mmap.c
26791@@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size(void)
26792 * Leave an at least ~128 MB hole with possible stack randomization.
26793 */
26794 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
26795-#define MAX_GAP (TASK_SIZE/6*5)
26796+#define MAX_GAP (pax_task_size/6*5)
26797
26798 static int mmap_is_legacy(void)
26799 {
26800@@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
26801 return rnd << PAGE_SHIFT;
26802 }
26803
26804-static unsigned long mmap_base(void)
26805+static unsigned long mmap_base(struct mm_struct *mm)
26806 {
26807 unsigned long gap = rlimit(RLIMIT_STACK);
26808+ unsigned long pax_task_size = TASK_SIZE;
26809+
26810+#ifdef CONFIG_PAX_SEGMEXEC
26811+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
26812+ pax_task_size = SEGMEXEC_TASK_SIZE;
26813+#endif
26814
26815 if (gap < MIN_GAP)
26816 gap = MIN_GAP;
26817 else if (gap > MAX_GAP)
26818 gap = MAX_GAP;
26819
26820- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
26821+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
26822 }
26823
26824 /*
26825 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
26826 * does, but not when emulating X86_32
26827 */
26828-static unsigned long mmap_legacy_base(void)
26829+static unsigned long mmap_legacy_base(struct mm_struct *mm)
26830 {
26831- if (mmap_is_ia32())
26832+ if (mmap_is_ia32()) {
26833+
26834+#ifdef CONFIG_PAX_SEGMEXEC
26835+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
26836+ return SEGMEXEC_TASK_UNMAPPED_BASE;
26837+ else
26838+#endif
26839+
26840 return TASK_UNMAPPED_BASE;
26841- else
26842+ } else
26843 return TASK_UNMAPPED_BASE + mmap_rnd();
26844 }
26845
26846@@ -113,11 +126,23 @@ static unsigned long mmap_legacy_base(void)
26847 void arch_pick_mmap_layout(struct mm_struct *mm)
26848 {
26849 if (mmap_is_legacy()) {
26850- mm->mmap_base = mmap_legacy_base();
26851+ mm->mmap_base = mmap_legacy_base(mm);
26852+
26853+#ifdef CONFIG_PAX_RANDMMAP
26854+ if (mm->pax_flags & MF_PAX_RANDMMAP)
26855+ mm->mmap_base += mm->delta_mmap;
26856+#endif
26857+
26858 mm->get_unmapped_area = arch_get_unmapped_area;
26859 mm->unmap_area = arch_unmap_area;
26860 } else {
26861- mm->mmap_base = mmap_base();
26862+ mm->mmap_base = mmap_base(mm);
26863+
26864+#ifdef CONFIG_PAX_RANDMMAP
26865+ if (mm->pax_flags & MF_PAX_RANDMMAP)
26866+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
26867+#endif
26868+
26869 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
26870 mm->unmap_area = arch_unmap_area_topdown;
26871 }
26872diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
26873index dc0b727..f612039 100644
26874--- a/arch/x86/mm/mmio-mod.c
26875+++ b/arch/x86/mm/mmio-mod.c
26876@@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
26877 break;
26878 default:
26879 {
26880- unsigned char *ip = (unsigned char *)instptr;
26881+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
26882 my_trace->opcode = MMIO_UNKNOWN_OP;
26883 my_trace->width = 0;
26884 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
26885@@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
26886 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
26887 void __iomem *addr)
26888 {
26889- static atomic_t next_id;
26890+ static atomic_unchecked_t next_id;
26891 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
26892 /* These are page-unaligned. */
26893 struct mmiotrace_map map = {
26894@@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
26895 .private = trace
26896 },
26897 .phys = offset,
26898- .id = atomic_inc_return(&next_id)
26899+ .id = atomic_inc_return_unchecked(&next_id)
26900 };
26901 map.map_id = trace->id;
26902
26903@@ -290,7 +290,7 @@ void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
26904 ioremap_trace_core(offset, size, addr);
26905 }
26906
26907-static void iounmap_trace_core(volatile void __iomem *addr)
26908+static void iounmap_trace_core(const volatile void __iomem *addr)
26909 {
26910 struct mmiotrace_map map = {
26911 .phys = 0,
26912@@ -328,7 +328,7 @@ not_enabled:
26913 }
26914 }
26915
26916-void mmiotrace_iounmap(volatile void __iomem *addr)
26917+void mmiotrace_iounmap(const volatile void __iomem *addr)
26918 {
26919 might_sleep();
26920 if (is_enabled()) /* recheck and proper locking in *_core() */
26921diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
26922index b008656..773eac2 100644
26923--- a/arch/x86/mm/pageattr-test.c
26924+++ b/arch/x86/mm/pageattr-test.c
26925@@ -36,7 +36,7 @@ enum {
26926
26927 static int pte_testbit(pte_t pte)
26928 {
26929- return pte_flags(pte) & _PAGE_UNUSED1;
26930+ return pte_flags(pte) & _PAGE_CPA_TEST;
26931 }
26932
26933 struct split_state {
26934diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
26935index a718e0d..77419bc 100644
26936--- a/arch/x86/mm/pageattr.c
26937+++ b/arch/x86/mm/pageattr.c
26938@@ -261,7 +261,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
26939 */
26940 #ifdef CONFIG_PCI_BIOS
26941 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
26942- pgprot_val(forbidden) |= _PAGE_NX;
26943+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
26944 #endif
26945
26946 /*
26947@@ -269,9 +269,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
26948 * Does not cover __inittext since that is gone later on. On
26949 * 64bit we do not enforce !NX on the low mapping
26950 */
26951- if (within(address, (unsigned long)_text, (unsigned long)_etext))
26952- pgprot_val(forbidden) |= _PAGE_NX;
26953+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
26954+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
26955
26956+#ifdef CONFIG_DEBUG_RODATA
26957 /*
26958 * The .rodata section needs to be read-only. Using the pfn
26959 * catches all aliases.
26960@@ -279,6 +280,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
26961 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
26962 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
26963 pgprot_val(forbidden) |= _PAGE_RW;
26964+#endif
26965
26966 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
26967 /*
26968@@ -317,6 +319,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
26969 }
26970 #endif
26971
26972+#ifdef CONFIG_PAX_KERNEXEC
26973+ if (within(pfn, __pa(ktla_ktva((unsigned long)&_text)), __pa((unsigned long)&_sdata))) {
26974+ pgprot_val(forbidden) |= _PAGE_RW;
26975+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
26976+ }
26977+#endif
26978+
26979 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
26980
26981 return prot;
26982@@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
26983 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
26984 {
26985 /* change init_mm */
26986+ pax_open_kernel();
26987 set_pte_atomic(kpte, pte);
26988+
26989 #ifdef CONFIG_X86_32
26990 if (!SHARED_KERNEL_PMD) {
26991+
26992+#ifdef CONFIG_PAX_PER_CPU_PGD
26993+ unsigned long cpu;
26994+#else
26995 struct page *page;
26996+#endif
26997
26998+#ifdef CONFIG_PAX_PER_CPU_PGD
26999+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
27000+ pgd_t *pgd = get_cpu_pgd(cpu);
27001+#else
27002 list_for_each_entry(page, &pgd_list, lru) {
27003- pgd_t *pgd;
27004+ pgd_t *pgd = (pgd_t *)page_address(page);
27005+#endif
27006+
27007 pud_t *pud;
27008 pmd_t *pmd;
27009
27010- pgd = (pgd_t *)page_address(page) + pgd_index(address);
27011+ pgd += pgd_index(address);
27012 pud = pud_offset(pgd, address);
27013 pmd = pmd_offset(pud, address);
27014 set_pte_atomic((pte_t *)pmd, pte);
27015 }
27016 }
27017 #endif
27018+ pax_close_kernel();
27019 }
27020
27021 static int
27022diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
27023index 0eb572e..92f5c1e 100644
27024--- a/arch/x86/mm/pat.c
27025+++ b/arch/x86/mm/pat.c
27026@@ -376,7 +376,7 @@ int free_memtype(u64 start, u64 end)
27027
27028 if (!entry) {
27029 printk(KERN_INFO "%s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
27030- current->comm, current->pid, start, end - 1);
27031+ current->comm, task_pid_nr(current), start, end - 1);
27032 return -EINVAL;
27033 }
27034
27035@@ -506,8 +506,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27036
27037 while (cursor < to) {
27038 if (!devmem_is_allowed(pfn)) {
27039- printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx]\n",
27040- current->comm, from, to - 1);
27041+ printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx] (%#010Lx)\n",
27042+ current->comm, from, to - 1, cursor);
27043 return 0;
27044 }
27045 cursor += PAGE_SIZE;
27046@@ -570,7 +570,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
27047 if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) {
27048 printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
27049 "for [mem %#010Lx-%#010Lx]\n",
27050- current->comm, current->pid,
27051+ current->comm, task_pid_nr(current),
27052 cattr_name(flags),
27053 base, (unsigned long long)(base + size-1));
27054 return -EINVAL;
27055@@ -605,7 +605,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
27056 flags = lookup_memtype(paddr);
27057 if (want_flags != flags) {
27058 printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
27059- current->comm, current->pid,
27060+ current->comm, task_pid_nr(current),
27061 cattr_name(want_flags),
27062 (unsigned long long)paddr,
27063 (unsigned long long)(paddr + size - 1),
27064@@ -627,7 +627,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
27065 free_memtype(paddr, paddr + size);
27066 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
27067 " for [mem %#010Lx-%#010Lx], got %s\n",
27068- current->comm, current->pid,
27069+ current->comm, task_pid_nr(current),
27070 cattr_name(want_flags),
27071 (unsigned long long)paddr,
27072 (unsigned long long)(paddr + size - 1),
27073diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
27074index 9f0614d..92ae64a 100644
27075--- a/arch/x86/mm/pf_in.c
27076+++ b/arch/x86/mm/pf_in.c
27077@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
27078 int i;
27079 enum reason_type rv = OTHERS;
27080
27081- p = (unsigned char *)ins_addr;
27082+ p = (unsigned char *)ktla_ktva(ins_addr);
27083 p += skip_prefix(p, &prf);
27084 p += get_opcode(p, &opcode);
27085
27086@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
27087 struct prefix_bits prf;
27088 int i;
27089
27090- p = (unsigned char *)ins_addr;
27091+ p = (unsigned char *)ktla_ktva(ins_addr);
27092 p += skip_prefix(p, &prf);
27093 p += get_opcode(p, &opcode);
27094
27095@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
27096 struct prefix_bits prf;
27097 int i;
27098
27099- p = (unsigned char *)ins_addr;
27100+ p = (unsigned char *)ktla_ktva(ins_addr);
27101 p += skip_prefix(p, &prf);
27102 p += get_opcode(p, &opcode);
27103
27104@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
27105 struct prefix_bits prf;
27106 int i;
27107
27108- p = (unsigned char *)ins_addr;
27109+ p = (unsigned char *)ktla_ktva(ins_addr);
27110 p += skip_prefix(p, &prf);
27111 p += get_opcode(p, &opcode);
27112 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
27113@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
27114 struct prefix_bits prf;
27115 int i;
27116
27117- p = (unsigned char *)ins_addr;
27118+ p = (unsigned char *)ktla_ktva(ins_addr);
27119 p += skip_prefix(p, &prf);
27120 p += get_opcode(p, &opcode);
27121 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
27122diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
27123index 8573b83..4f3ed7e 100644
27124--- a/arch/x86/mm/pgtable.c
27125+++ b/arch/x86/mm/pgtable.c
27126@@ -84,10 +84,64 @@ static inline void pgd_list_del(pgd_t *pgd)
27127 list_del(&page->lru);
27128 }
27129
27130-#define UNSHARED_PTRS_PER_PGD \
27131- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
27132+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
27133+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
27134
27135+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src)
27136+{
27137+ unsigned int count = USER_PGD_PTRS;
27138
27139+ while (count--)
27140+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
27141+}
27142+#endif
27143+
27144+#ifdef CONFIG_PAX_PER_CPU_PGD
27145+void __clone_user_pgds(pgd_t *dst, const pgd_t *src)
27146+{
27147+ unsigned int count = USER_PGD_PTRS;
27148+
27149+ while (count--) {
27150+ pgd_t pgd;
27151+
27152+#ifdef CONFIG_X86_64
27153+ pgd = __pgd(pgd_val(*src++) | _PAGE_USER);
27154+#else
27155+ pgd = *src++;
27156+#endif
27157+
27158+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
27159+ pgd = __pgd(pgd_val(pgd) & clone_pgd_mask);
27160+#endif
27161+
27162+ *dst++ = pgd;
27163+ }
27164+
27165+}
27166+#endif
27167+
27168+#ifdef CONFIG_X86_64
27169+#define pxd_t pud_t
27170+#define pyd_t pgd_t
27171+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
27172+#define pxd_free(mm, pud) pud_free((mm), (pud))
27173+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
27174+#define pyd_offset(mm, address) pgd_offset((mm), (address))
27175+#define PYD_SIZE PGDIR_SIZE
27176+#else
27177+#define pxd_t pmd_t
27178+#define pyd_t pud_t
27179+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
27180+#define pxd_free(mm, pud) pmd_free((mm), (pud))
27181+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
27182+#define pyd_offset(mm, address) pud_offset((mm), (address))
27183+#define PYD_SIZE PUD_SIZE
27184+#endif
27185+
27186+#ifdef CONFIG_PAX_PER_CPU_PGD
27187+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
27188+static inline void pgd_dtor(pgd_t *pgd) {}
27189+#else
27190 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
27191 {
27192 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
27193@@ -128,6 +182,7 @@ static void pgd_dtor(pgd_t *pgd)
27194 pgd_list_del(pgd);
27195 spin_unlock(&pgd_lock);
27196 }
27197+#endif
27198
27199 /*
27200 * List of all pgd's needed for non-PAE so it can invalidate entries
27201@@ -140,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
27202 * -- wli
27203 */
27204
27205-#ifdef CONFIG_X86_PAE
27206+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
27207 /*
27208 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
27209 * updating the top-level pagetable entries to guarantee the
27210@@ -152,7 +207,7 @@ static void pgd_dtor(pgd_t *pgd)
27211 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
27212 * and initialize the kernel pmds here.
27213 */
27214-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
27215+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
27216
27217 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
27218 {
27219@@ -170,36 +225,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
27220 */
27221 flush_tlb_mm(mm);
27222 }
27223+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
27224+#define PREALLOCATED_PXDS USER_PGD_PTRS
27225 #else /* !CONFIG_X86_PAE */
27226
27227 /* No need to prepopulate any pagetable entries in non-PAE modes. */
27228-#define PREALLOCATED_PMDS 0
27229+#define PREALLOCATED_PXDS 0
27230
27231 #endif /* CONFIG_X86_PAE */
27232
27233-static void free_pmds(pmd_t *pmds[])
27234+static void free_pxds(pxd_t *pxds[])
27235 {
27236 int i;
27237
27238- for(i = 0; i < PREALLOCATED_PMDS; i++)
27239- if (pmds[i])
27240- free_page((unsigned long)pmds[i]);
27241+ for(i = 0; i < PREALLOCATED_PXDS; i++)
27242+ if (pxds[i])
27243+ free_page((unsigned long)pxds[i]);
27244 }
27245
27246-static int preallocate_pmds(pmd_t *pmds[])
27247+static int preallocate_pxds(pxd_t *pxds[])
27248 {
27249 int i;
27250 bool failed = false;
27251
27252- for(i = 0; i < PREALLOCATED_PMDS; i++) {
27253- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
27254- if (pmd == NULL)
27255+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
27256+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
27257+ if (pxd == NULL)
27258 failed = true;
27259- pmds[i] = pmd;
27260+ pxds[i] = pxd;
27261 }
27262
27263 if (failed) {
27264- free_pmds(pmds);
27265+ free_pxds(pxds);
27266 return -ENOMEM;
27267 }
27268
27269@@ -212,51 +269,55 @@ static int preallocate_pmds(pmd_t *pmds[])
27270 * preallocate which never got a corresponding vma will need to be
27271 * freed manually.
27272 */
27273-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
27274+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
27275 {
27276 int i;
27277
27278- for(i = 0; i < PREALLOCATED_PMDS; i++) {
27279+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
27280 pgd_t pgd = pgdp[i];
27281
27282 if (pgd_val(pgd) != 0) {
27283- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
27284+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
27285
27286- pgdp[i] = native_make_pgd(0);
27287+ set_pgd(pgdp + i, native_make_pgd(0));
27288
27289- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
27290- pmd_free(mm, pmd);
27291+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
27292+ pxd_free(mm, pxd);
27293 }
27294 }
27295 }
27296
27297-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
27298+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
27299 {
27300- pud_t *pud;
27301+ pyd_t *pyd;
27302 unsigned long addr;
27303 int i;
27304
27305- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
27306+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
27307 return;
27308
27309- pud = pud_offset(pgd, 0);
27310+#ifdef CONFIG_X86_64
27311+ pyd = pyd_offset(mm, 0L);
27312+#else
27313+ pyd = pyd_offset(pgd, 0L);
27314+#endif
27315
27316- for (addr = i = 0; i < PREALLOCATED_PMDS;
27317- i++, pud++, addr += PUD_SIZE) {
27318- pmd_t *pmd = pmds[i];
27319+ for (addr = i = 0; i < PREALLOCATED_PXDS;
27320+ i++, pyd++, addr += PYD_SIZE) {
27321+ pxd_t *pxd = pxds[i];
27322
27323 if (i >= KERNEL_PGD_BOUNDARY)
27324- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
27325- sizeof(pmd_t) * PTRS_PER_PMD);
27326+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
27327+ sizeof(pxd_t) * PTRS_PER_PMD);
27328
27329- pud_populate(mm, pud, pmd);
27330+ pyd_populate(mm, pyd, pxd);
27331 }
27332 }
27333
27334 pgd_t *pgd_alloc(struct mm_struct *mm)
27335 {
27336 pgd_t *pgd;
27337- pmd_t *pmds[PREALLOCATED_PMDS];
27338+ pxd_t *pxds[PREALLOCATED_PXDS];
27339
27340 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
27341
27342@@ -265,11 +326,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
27343
27344 mm->pgd = pgd;
27345
27346- if (preallocate_pmds(pmds) != 0)
27347+ if (preallocate_pxds(pxds) != 0)
27348 goto out_free_pgd;
27349
27350 if (paravirt_pgd_alloc(mm) != 0)
27351- goto out_free_pmds;
27352+ goto out_free_pxds;
27353
27354 /*
27355 * Make sure that pre-populating the pmds is atomic with
27356@@ -279,14 +340,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
27357 spin_lock(&pgd_lock);
27358
27359 pgd_ctor(mm, pgd);
27360- pgd_prepopulate_pmd(mm, pgd, pmds);
27361+ pgd_prepopulate_pxd(mm, pgd, pxds);
27362
27363 spin_unlock(&pgd_lock);
27364
27365 return pgd;
27366
27367-out_free_pmds:
27368- free_pmds(pmds);
27369+out_free_pxds:
27370+ free_pxds(pxds);
27371 out_free_pgd:
27372 free_page((unsigned long)pgd);
27373 out:
27374@@ -295,7 +356,7 @@ out:
27375
27376 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
27377 {
27378- pgd_mop_up_pmds(mm, pgd);
27379+ pgd_mop_up_pxds(mm, pgd);
27380 pgd_dtor(pgd);
27381 paravirt_pgd_free(mm, pgd);
27382 free_page((unsigned long)pgd);
27383diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
27384index a69bcb8..19068ab 100644
27385--- a/arch/x86/mm/pgtable_32.c
27386+++ b/arch/x86/mm/pgtable_32.c
27387@@ -47,10 +47,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
27388 return;
27389 }
27390 pte = pte_offset_kernel(pmd, vaddr);
27391+
27392+ pax_open_kernel();
27393 if (pte_val(pteval))
27394 set_pte_at(&init_mm, vaddr, pte, pteval);
27395 else
27396 pte_clear(&init_mm, vaddr, pte);
27397+ pax_close_kernel();
27398
27399 /*
27400 * It's enough to flush this one mapping.
27401diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
27402index 410531d..0f16030 100644
27403--- a/arch/x86/mm/setup_nx.c
27404+++ b/arch/x86/mm/setup_nx.c
27405@@ -5,8 +5,10 @@
27406 #include <asm/pgtable.h>
27407 #include <asm/proto.h>
27408
27409+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
27410 static int disable_nx __cpuinitdata;
27411
27412+#ifndef CONFIG_PAX_PAGEEXEC
27413 /*
27414 * noexec = on|off
27415 *
27416@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
27417 return 0;
27418 }
27419 early_param("noexec", noexec_setup);
27420+#endif
27421+
27422+#endif
27423
27424 void __cpuinit x86_configure_nx(void)
27425 {
27426+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
27427 if (cpu_has_nx && !disable_nx)
27428 __supported_pte_mask |= _PAGE_NX;
27429 else
27430+#endif
27431 __supported_pte_mask &= ~_PAGE_NX;
27432 }
27433
27434diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
27435index 60f926c..a710970 100644
27436--- a/arch/x86/mm/tlb.c
27437+++ b/arch/x86/mm/tlb.c
27438@@ -48,7 +48,11 @@ void leave_mm(int cpu)
27439 BUG();
27440 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
27441 cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
27442+
27443+#ifndef CONFIG_PAX_PER_CPU_PGD
27444 load_cr3(swapper_pg_dir);
27445+#endif
27446+
27447 }
27448 }
27449 EXPORT_SYMBOL_GPL(leave_mm);
27450diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
27451index 877b9a1..a8ecf42 100644
27452--- a/arch/x86/net/bpf_jit.S
27453+++ b/arch/x86/net/bpf_jit.S
27454@@ -9,6 +9,7 @@
27455 */
27456 #include <linux/linkage.h>
27457 #include <asm/dwarf2.h>
27458+#include <asm/alternative-asm.h>
27459
27460 /*
27461 * Calling convention :
27462@@ -35,6 +36,7 @@ sk_load_word_positive_offset:
27463 jle bpf_slow_path_word
27464 mov (SKBDATA,%rsi),%eax
27465 bswap %eax /* ntohl() */
27466+ pax_force_retaddr
27467 ret
27468
27469 sk_load_half:
27470@@ -52,6 +54,7 @@ sk_load_half_positive_offset:
27471 jle bpf_slow_path_half
27472 movzwl (SKBDATA,%rsi),%eax
27473 rol $8,%ax # ntohs()
27474+ pax_force_retaddr
27475 ret
27476
27477 sk_load_byte:
27478@@ -66,6 +69,7 @@ sk_load_byte_positive_offset:
27479 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
27480 jle bpf_slow_path_byte
27481 movzbl (SKBDATA,%rsi),%eax
27482+ pax_force_retaddr
27483 ret
27484
27485 /**
27486@@ -87,6 +91,7 @@ sk_load_byte_msh_positive_offset:
27487 movzbl (SKBDATA,%rsi),%ebx
27488 and $15,%bl
27489 shl $2,%bl
27490+ pax_force_retaddr
27491 ret
27492
27493 /* rsi contains offset and can be scratched */
27494@@ -109,6 +114,7 @@ bpf_slow_path_word:
27495 js bpf_error
27496 mov -12(%rbp),%eax
27497 bswap %eax
27498+ pax_force_retaddr
27499 ret
27500
27501 bpf_slow_path_half:
27502@@ -117,12 +123,14 @@ bpf_slow_path_half:
27503 mov -12(%rbp),%ax
27504 rol $8,%ax
27505 movzwl %ax,%eax
27506+ pax_force_retaddr
27507 ret
27508
27509 bpf_slow_path_byte:
27510 bpf_slow_path_common(1)
27511 js bpf_error
27512 movzbl -12(%rbp),%eax
27513+ pax_force_retaddr
27514 ret
27515
27516 bpf_slow_path_byte_msh:
27517@@ -133,6 +141,7 @@ bpf_slow_path_byte_msh:
27518 and $15,%al
27519 shl $2,%al
27520 xchg %eax,%ebx
27521+ pax_force_retaddr
27522 ret
27523
27524 #define sk_negative_common(SIZE) \
27525@@ -157,6 +166,7 @@ sk_load_word_negative_offset:
27526 sk_negative_common(4)
27527 mov (%rax), %eax
27528 bswap %eax
27529+ pax_force_retaddr
27530 ret
27531
27532 bpf_slow_path_half_neg:
27533@@ -168,6 +178,7 @@ sk_load_half_negative_offset:
27534 mov (%rax),%ax
27535 rol $8,%ax
27536 movzwl %ax,%eax
27537+ pax_force_retaddr
27538 ret
27539
27540 bpf_slow_path_byte_neg:
27541@@ -177,6 +188,7 @@ sk_load_byte_negative_offset:
27542 .globl sk_load_byte_negative_offset
27543 sk_negative_common(1)
27544 movzbl (%rax), %eax
27545+ pax_force_retaddr
27546 ret
27547
27548 bpf_slow_path_byte_msh_neg:
27549@@ -190,6 +202,7 @@ sk_load_byte_msh_negative_offset:
27550 and $15,%al
27551 shl $2,%al
27552 xchg %eax,%ebx
27553+ pax_force_retaddr
27554 ret
27555
27556 bpf_error:
27557@@ -197,4 +210,5 @@ bpf_error:
27558 xor %eax,%eax
27559 mov -8(%rbp),%rbx
27560 leaveq
27561+ pax_force_retaddr
27562 ret
27563diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
27564index 520d2bd..b895ef4 100644
27565--- a/arch/x86/net/bpf_jit_comp.c
27566+++ b/arch/x86/net/bpf_jit_comp.c
27567@@ -11,6 +11,7 @@
27568 #include <asm/cacheflush.h>
27569 #include <linux/netdevice.h>
27570 #include <linux/filter.h>
27571+#include <linux/random.h>
27572
27573 /*
27574 * Conventions :
27575@@ -48,13 +49,87 @@ static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
27576 return ptr + len;
27577 }
27578
27579+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
27580+#define MAX_INSTR_CODE_SIZE 96
27581+#else
27582+#define MAX_INSTR_CODE_SIZE 64
27583+#endif
27584+
27585 #define EMIT(bytes, len) do { prog = emit_code(prog, bytes, len); } while (0)
27586
27587 #define EMIT1(b1) EMIT(b1, 1)
27588 #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
27589 #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
27590 #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
27591+
27592+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
27593+/* original constant will appear in ecx */
27594+#define DILUTE_CONST_SEQUENCE(_off, _key) \
27595+do { \
27596+ /* mov ecx, randkey */ \
27597+ EMIT1(0xb9); \
27598+ EMIT(_key, 4); \
27599+ /* xor ecx, randkey ^ off */ \
27600+ EMIT2(0x81, 0xf1); \
27601+ EMIT((_key) ^ (_off), 4); \
27602+} while (0)
27603+
27604+#define EMIT1_off32(b1, _off) \
27605+do { \
27606+ switch (b1) { \
27607+ case 0x05: /* add eax, imm32 */ \
27608+ case 0x2d: /* sub eax, imm32 */ \
27609+ case 0x25: /* and eax, imm32 */ \
27610+ case 0x0d: /* or eax, imm32 */ \
27611+ case 0xb8: /* mov eax, imm32 */ \
27612+ case 0x3d: /* cmp eax, imm32 */ \
27613+ case 0xa9: /* test eax, imm32 */ \
27614+ DILUTE_CONST_SEQUENCE(_off, randkey); \
27615+ EMIT2((b1) - 4, 0xc8); /* convert imm instruction to eax, ecx */\
27616+ break; \
27617+ case 0xbb: /* mov ebx, imm32 */ \
27618+ DILUTE_CONST_SEQUENCE(_off, randkey); \
27619+ /* mov ebx, ecx */ \
27620+ EMIT2(0x89, 0xcb); \
27621+ break; \
27622+ case 0xbe: /* mov esi, imm32 */ \
27623+ DILUTE_CONST_SEQUENCE(_off, randkey); \
27624+ /* mov esi, ecx */ \
27625+ EMIT2(0x89, 0xce); \
27626+ break; \
27627+ case 0xe9: /* jmp rel imm32 */ \
27628+ EMIT1(b1); \
27629+ EMIT(_off, 4); \
27630+ /* prevent fall-through, we're not called if off = 0 */ \
27631+ EMIT(0xcccccccc, 4); \
27632+ EMIT(0xcccccccc, 4); \
27633+ break; \
27634+ default: \
27635+ EMIT1(b1); \
27636+ EMIT(_off, 4); \
27637+ } \
27638+} while (0)
27639+
27640+#define EMIT2_off32(b1, b2, _off) \
27641+do { \
27642+ if ((b1) == 0x8d && (b2) == 0xb3) { /* lea esi, [rbx+imm32] */ \
27643+ EMIT2(0x8d, 0xb3); /* lea esi, [rbx+randkey] */ \
27644+ EMIT(randkey, 4); \
27645+ EMIT2(0x8d, 0xb6); /* lea esi, [esi+off-randkey] */ \
27646+ EMIT((_off) - randkey, 4); \
27647+ } else if ((b1) == 0x69 && (b2) == 0xc0) { /* imul eax, imm32 */\
27648+ DILUTE_CONST_SEQUENCE(_off, randkey); \
27649+ /* imul eax, ecx */ \
27650+ EMIT3(0x0f, 0xaf, 0xc1); \
27651+ } else { \
27652+ EMIT2(b1, b2); \
27653+ EMIT(_off, 4); \
27654+ } \
27655+} while (0)
27656+#else
27657 #define EMIT1_off32(b1, off) do { EMIT1(b1); EMIT(off, 4);} while (0)
27658+#define EMIT2_off32(b1, b2, off) do { EMIT2(b1, b2); EMIT(off, 4);} while (0)
27659+#endif
27660
27661 #define CLEAR_A() EMIT2(0x31, 0xc0) /* xor %eax,%eax */
27662 #define CLEAR_X() EMIT2(0x31, 0xdb) /* xor %ebx,%ebx */
27663@@ -89,6 +164,24 @@ do { \
27664 #define X86_JBE 0x76
27665 #define X86_JA 0x77
27666
27667+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
27668+#define APPEND_FLOW_VERIFY() \
27669+do { \
27670+ /* mov ecx, randkey */ \
27671+ EMIT1(0xb9); \
27672+ EMIT(randkey, 4); \
27673+ /* cmp ecx, randkey */ \
27674+ EMIT2(0x81, 0xf9); \
27675+ EMIT(randkey, 4); \
27676+ /* jz after 8 int 3s */ \
27677+ EMIT2(0x74, 0x08); \
27678+ EMIT(0xcccccccc, 4); \
27679+ EMIT(0xcccccccc, 4); \
27680+} while (0)
27681+#else
27682+#define APPEND_FLOW_VERIFY() do { } while (0)
27683+#endif
27684+
27685 #define EMIT_COND_JMP(op, offset) \
27686 do { \
27687 if (is_near(offset)) \
27688@@ -96,6 +189,7 @@ do { \
27689 else { \
27690 EMIT2(0x0f, op + 0x10); \
27691 EMIT(offset, 4); /* jxx .+off32 */ \
27692+ APPEND_FLOW_VERIFY(); \
27693 } \
27694 } while (0)
27695
27696@@ -120,12 +214,17 @@ static inline void bpf_flush_icache(void *start, void *end)
27697 set_fs(old_fs);
27698 }
27699
27700+struct bpf_jit_work {
27701+ struct work_struct work;
27702+ void *image;
27703+};
27704+
27705 #define CHOOSE_LOAD_FUNC(K, func) \
27706 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
27707
27708 void bpf_jit_compile(struct sk_filter *fp)
27709 {
27710- u8 temp[64];
27711+ u8 temp[MAX_INSTR_CODE_SIZE];
27712 u8 *prog;
27713 unsigned int proglen, oldproglen = 0;
27714 int ilen, i;
27715@@ -138,6 +237,9 @@ void bpf_jit_compile(struct sk_filter *fp)
27716 unsigned int *addrs;
27717 const struct sock_filter *filter = fp->insns;
27718 int flen = fp->len;
27719+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
27720+ unsigned int randkey;
27721+#endif
27722
27723 if (!bpf_jit_enable)
27724 return;
27725@@ -146,11 +248,19 @@ void bpf_jit_compile(struct sk_filter *fp)
27726 if (addrs == NULL)
27727 return;
27728
27729+ fp->work = kmalloc(sizeof(*fp->work), GFP_KERNEL);
27730+ if (!fp->work)
27731+ goto out;
27732+
27733+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
27734+ randkey = get_random_int();
27735+#endif
27736+
27737 /* Before first pass, make a rough estimation of addrs[]
27738- * each bpf instruction is translated to less than 64 bytes
27739+ * each bpf instruction is translated to less than MAX_INSTR_CODE_SIZE bytes
27740 */
27741 for (proglen = 0, i = 0; i < flen; i++) {
27742- proglen += 64;
27743+ proglen += MAX_INSTR_CODE_SIZE;
27744 addrs[i] = proglen;
27745 }
27746 cleanup_addr = proglen; /* epilogue address */
27747@@ -258,10 +368,8 @@ void bpf_jit_compile(struct sk_filter *fp)
27748 case BPF_S_ALU_MUL_K: /* A *= K */
27749 if (is_imm8(K))
27750 EMIT3(0x6b, 0xc0, K); /* imul imm8,%eax,%eax */
27751- else {
27752- EMIT2(0x69, 0xc0); /* imul imm32,%eax */
27753- EMIT(K, 4);
27754- }
27755+ else
27756+ EMIT2_off32(0x69, 0xc0, K); /* imul imm32,%eax */
27757 break;
27758 case BPF_S_ALU_DIV_X: /* A /= X; */
27759 seen |= SEEN_XREG;
27760@@ -301,13 +409,23 @@ void bpf_jit_compile(struct sk_filter *fp)
27761 break;
27762 case BPF_S_ALU_MOD_K: /* A %= K; */
27763 EMIT2(0x31, 0xd2); /* xor %edx,%edx */
27764+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
27765+ DILUTE_CONST_SEQUENCE(K, randkey);
27766+#else
27767 EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */
27768+#endif
27769 EMIT2(0xf7, 0xf1); /* div %ecx */
27770 EMIT2(0x89, 0xd0); /* mov %edx,%eax */
27771 break;
27772 case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K); */
27773+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
27774+ DILUTE_CONST_SEQUENCE(K, randkey);
27775+ // imul rax, rcx
27776+ EMIT4(0x48, 0x0f, 0xaf, 0xc1);
27777+#else
27778 EMIT3(0x48, 0x69, 0xc0); /* imul imm32,%rax,%rax */
27779 EMIT(K, 4);
27780+#endif
27781 EMIT4(0x48, 0xc1, 0xe8, 0x20); /* shr $0x20,%rax */
27782 break;
27783 case BPF_S_ALU_AND_X:
27784@@ -543,8 +661,7 @@ common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG;
27785 if (is_imm8(K)) {
27786 EMIT3(0x8d, 0x73, K); /* lea imm8(%rbx), %esi */
27787 } else {
27788- EMIT2(0x8d, 0xb3); /* lea imm32(%rbx),%esi */
27789- EMIT(K, 4);
27790+ EMIT2_off32(0x8d, 0xb3, K); /* lea imm32(%rbx),%esi */
27791 }
27792 } else {
27793 EMIT2(0x89,0xde); /* mov %ebx,%esi */
27794@@ -627,17 +744,18 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
27795 break;
27796 default:
27797 /* hmm, too complex filter, give up with jit compiler */
27798- goto out;
27799+ goto error;
27800 }
27801 ilen = prog - temp;
27802 if (image) {
27803 if (unlikely(proglen + ilen > oldproglen)) {
27804 pr_err("bpb_jit_compile fatal error\n");
27805- kfree(addrs);
27806- module_free(NULL, image);
27807- return;
27808+ module_free_exec(NULL, image);
27809+ goto error;
27810 }
27811+ pax_open_kernel();
27812 memcpy(image + proglen, temp, ilen);
27813+ pax_close_kernel();
27814 }
27815 proglen += ilen;
27816 addrs[i] = proglen;
27817@@ -658,11 +776,9 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
27818 break;
27819 }
27820 if (proglen == oldproglen) {
27821- image = module_alloc(max_t(unsigned int,
27822- proglen,
27823- sizeof(struct work_struct)));
27824+ image = module_alloc_exec(proglen);
27825 if (!image)
27826- goto out;
27827+ goto error;
27828 }
27829 oldproglen = proglen;
27830 }
27831@@ -678,7 +794,10 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
27832 bpf_flush_icache(image, image + proglen);
27833
27834 fp->bpf_func = (void *)image;
27835- }
27836+ } else
27837+error:
27838+ kfree(fp->work);
27839+
27840 out:
27841 kfree(addrs);
27842 return;
27843@@ -686,18 +805,20 @@ out:
27844
27845 static void jit_free_defer(struct work_struct *arg)
27846 {
27847- module_free(NULL, arg);
27848+ module_free_exec(NULL, ((struct bpf_jit_work *)arg)->image);
27849+ kfree(arg);
27850 }
27851
27852 /* run from softirq, we must use a work_struct to call
27853- * module_free() from process context
27854+ * module_free_exec() from process context
27855 */
27856 void bpf_jit_free(struct sk_filter *fp)
27857 {
27858 if (fp->bpf_func != sk_run_filter) {
27859- struct work_struct *work = (struct work_struct *)fp->bpf_func;
27860+ struct work_struct *work = &fp->work->work;
27861
27862 INIT_WORK(work, jit_free_defer);
27863+ fp->work->image = fp->bpf_func;
27864 schedule_work(work);
27865 }
27866 }
27867diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
27868index d6aa6e8..266395a 100644
27869--- a/arch/x86/oprofile/backtrace.c
27870+++ b/arch/x86/oprofile/backtrace.c
27871@@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
27872 struct stack_frame_ia32 *fp;
27873 unsigned long bytes;
27874
27875- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
27876+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
27877 if (bytes != sizeof(bufhead))
27878 return NULL;
27879
27880- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
27881+ fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
27882
27883 oprofile_add_trace(bufhead[0].return_address);
27884
27885@@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
27886 struct stack_frame bufhead[2];
27887 unsigned long bytes;
27888
27889- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
27890+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
27891 if (bytes != sizeof(bufhead))
27892 return NULL;
27893
27894@@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
27895 {
27896 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
27897
27898- if (!user_mode_vm(regs)) {
27899+ if (!user_mode(regs)) {
27900 unsigned long stack = kernel_stack_pointer(regs);
27901 if (depth)
27902 dump_trace(NULL, regs, (unsigned long *)stack, 0,
27903diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
27904index e14a2ff..3fd6b58 100644
27905--- a/arch/x86/pci/mrst.c
27906+++ b/arch/x86/pci/mrst.c
27907@@ -238,7 +238,9 @@ int __init pci_mrst_init(void)
27908 printk(KERN_INFO "Intel MID platform detected, using MID PCI ops\n");
27909 pci_mmcfg_late_init();
27910 pcibios_enable_irq = mrst_pci_irq_enable;
27911- pci_root_ops = pci_mrst_ops;
27912+ pax_open_kernel();
27913+ memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
27914+ pax_close_kernel();
27915 pci_soc_mode = 1;
27916 /* Continue with standard init */
27917 return 1;
27918diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
27919index da8fe05..7ee6704 100644
27920--- a/arch/x86/pci/pcbios.c
27921+++ b/arch/x86/pci/pcbios.c
27922@@ -79,50 +79,93 @@ union bios32 {
27923 static struct {
27924 unsigned long address;
27925 unsigned short segment;
27926-} bios32_indirect = { 0, __KERNEL_CS };
27927+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
27928
27929 /*
27930 * Returns the entry point for the given service, NULL on error
27931 */
27932
27933-static unsigned long bios32_service(unsigned long service)
27934+static unsigned long __devinit bios32_service(unsigned long service)
27935 {
27936 unsigned char return_code; /* %al */
27937 unsigned long address; /* %ebx */
27938 unsigned long length; /* %ecx */
27939 unsigned long entry; /* %edx */
27940 unsigned long flags;
27941+ struct desc_struct d, *gdt;
27942
27943 local_irq_save(flags);
27944- __asm__("lcall *(%%edi); cld"
27945+
27946+ gdt = get_cpu_gdt_table(smp_processor_id());
27947+
27948+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
27949+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
27950+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
27951+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
27952+
27953+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
27954 : "=a" (return_code),
27955 "=b" (address),
27956 "=c" (length),
27957 "=d" (entry)
27958 : "0" (service),
27959 "1" (0),
27960- "D" (&bios32_indirect));
27961+ "D" (&bios32_indirect),
27962+ "r"(__PCIBIOS_DS)
27963+ : "memory");
27964+
27965+ pax_open_kernel();
27966+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
27967+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
27968+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
27969+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
27970+ pax_close_kernel();
27971+
27972 local_irq_restore(flags);
27973
27974 switch (return_code) {
27975- case 0:
27976- return address + entry;
27977- case 0x80: /* Not present */
27978- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
27979- return 0;
27980- default: /* Shouldn't happen */
27981- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
27982- service, return_code);
27983+ case 0: {
27984+ int cpu;
27985+ unsigned char flags;
27986+
27987+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
27988+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
27989+ printk(KERN_WARNING "bios32_service: not valid\n");
27990 return 0;
27991+ }
27992+ address = address + PAGE_OFFSET;
27993+ length += 16UL; /* some BIOSs underreport this... */
27994+ flags = 4;
27995+ if (length >= 64*1024*1024) {
27996+ length >>= PAGE_SHIFT;
27997+ flags |= 8;
27998+ }
27999+
28000+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
28001+ gdt = get_cpu_gdt_table(cpu);
28002+ pack_descriptor(&d, address, length, 0x9b, flags);
28003+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
28004+ pack_descriptor(&d, address, length, 0x93, flags);
28005+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
28006+ }
28007+ return entry;
28008+ }
28009+ case 0x80: /* Not present */
28010+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
28011+ return 0;
28012+ default: /* Shouldn't happen */
28013+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
28014+ service, return_code);
28015+ return 0;
28016 }
28017 }
28018
28019 static struct {
28020 unsigned long address;
28021 unsigned short segment;
28022-} pci_indirect = { 0, __KERNEL_CS };
28023+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
28024
28025-static int pci_bios_present;
28026+static int pci_bios_present __read_only;
28027
28028 static int __devinit check_pcibios(void)
28029 {
28030@@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
28031 unsigned long flags, pcibios_entry;
28032
28033 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
28034- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
28035+ pci_indirect.address = pcibios_entry;
28036
28037 local_irq_save(flags);
28038- __asm__(
28039- "lcall *(%%edi); cld\n\t"
28040+ __asm__("movw %w6, %%ds\n\t"
28041+ "lcall *%%ss:(%%edi); cld\n\t"
28042+ "push %%ss\n\t"
28043+ "pop %%ds\n\t"
28044 "jc 1f\n\t"
28045 "xor %%ah, %%ah\n"
28046 "1:"
28047@@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
28048 "=b" (ebx),
28049 "=c" (ecx)
28050 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
28051- "D" (&pci_indirect)
28052+ "D" (&pci_indirect),
28053+ "r" (__PCIBIOS_DS)
28054 : "memory");
28055 local_irq_restore(flags);
28056
28057@@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
28058
28059 switch (len) {
28060 case 1:
28061- __asm__("lcall *(%%esi); cld\n\t"
28062+ __asm__("movw %w6, %%ds\n\t"
28063+ "lcall *%%ss:(%%esi); cld\n\t"
28064+ "push %%ss\n\t"
28065+ "pop %%ds\n\t"
28066 "jc 1f\n\t"
28067 "xor %%ah, %%ah\n"
28068 "1:"
28069@@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
28070 : "1" (PCIBIOS_READ_CONFIG_BYTE),
28071 "b" (bx),
28072 "D" ((long)reg),
28073- "S" (&pci_indirect));
28074+ "S" (&pci_indirect),
28075+ "r" (__PCIBIOS_DS));
28076 /*
28077 * Zero-extend the result beyond 8 bits, do not trust the
28078 * BIOS having done it:
28079@@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
28080 *value &= 0xff;
28081 break;
28082 case 2:
28083- __asm__("lcall *(%%esi); cld\n\t"
28084+ __asm__("movw %w6, %%ds\n\t"
28085+ "lcall *%%ss:(%%esi); cld\n\t"
28086+ "push %%ss\n\t"
28087+ "pop %%ds\n\t"
28088 "jc 1f\n\t"
28089 "xor %%ah, %%ah\n"
28090 "1:"
28091@@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
28092 : "1" (PCIBIOS_READ_CONFIG_WORD),
28093 "b" (bx),
28094 "D" ((long)reg),
28095- "S" (&pci_indirect));
28096+ "S" (&pci_indirect),
28097+ "r" (__PCIBIOS_DS));
28098 /*
28099 * Zero-extend the result beyond 16 bits, do not trust the
28100 * BIOS having done it:
28101@@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
28102 *value &= 0xffff;
28103 break;
28104 case 4:
28105- __asm__("lcall *(%%esi); cld\n\t"
28106+ __asm__("movw %w6, %%ds\n\t"
28107+ "lcall *%%ss:(%%esi); cld\n\t"
28108+ "push %%ss\n\t"
28109+ "pop %%ds\n\t"
28110 "jc 1f\n\t"
28111 "xor %%ah, %%ah\n"
28112 "1:"
28113@@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
28114 : "1" (PCIBIOS_READ_CONFIG_DWORD),
28115 "b" (bx),
28116 "D" ((long)reg),
28117- "S" (&pci_indirect));
28118+ "S" (&pci_indirect),
28119+ "r" (__PCIBIOS_DS));
28120 break;
28121 }
28122
28123@@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
28124
28125 switch (len) {
28126 case 1:
28127- __asm__("lcall *(%%esi); cld\n\t"
28128+ __asm__("movw %w6, %%ds\n\t"
28129+ "lcall *%%ss:(%%esi); cld\n\t"
28130+ "push %%ss\n\t"
28131+ "pop %%ds\n\t"
28132 "jc 1f\n\t"
28133 "xor %%ah, %%ah\n"
28134 "1:"
28135@@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
28136 "c" (value),
28137 "b" (bx),
28138 "D" ((long)reg),
28139- "S" (&pci_indirect));
28140+ "S" (&pci_indirect),
28141+ "r" (__PCIBIOS_DS));
28142 break;
28143 case 2:
28144- __asm__("lcall *(%%esi); cld\n\t"
28145+ __asm__("movw %w6, %%ds\n\t"
28146+ "lcall *%%ss:(%%esi); cld\n\t"
28147+ "push %%ss\n\t"
28148+ "pop %%ds\n\t"
28149 "jc 1f\n\t"
28150 "xor %%ah, %%ah\n"
28151 "1:"
28152@@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
28153 "c" (value),
28154 "b" (bx),
28155 "D" ((long)reg),
28156- "S" (&pci_indirect));
28157+ "S" (&pci_indirect),
28158+ "r" (__PCIBIOS_DS));
28159 break;
28160 case 4:
28161- __asm__("lcall *(%%esi); cld\n\t"
28162+ __asm__("movw %w6, %%ds\n\t"
28163+ "lcall *%%ss:(%%esi); cld\n\t"
28164+ "push %%ss\n\t"
28165+ "pop %%ds\n\t"
28166 "jc 1f\n\t"
28167 "xor %%ah, %%ah\n"
28168 "1:"
28169@@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
28170 "c" (value),
28171 "b" (bx),
28172 "D" ((long)reg),
28173- "S" (&pci_indirect));
28174+ "S" (&pci_indirect),
28175+ "r" (__PCIBIOS_DS));
28176 break;
28177 }
28178
28179@@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
28180
28181 DBG("PCI: Fetching IRQ routing table... ");
28182 __asm__("push %%es\n\t"
28183+ "movw %w8, %%ds\n\t"
28184 "push %%ds\n\t"
28185 "pop %%es\n\t"
28186- "lcall *(%%esi); cld\n\t"
28187+ "lcall *%%ss:(%%esi); cld\n\t"
28188 "pop %%es\n\t"
28189+ "push %%ss\n\t"
28190+ "pop %%ds\n"
28191 "jc 1f\n\t"
28192 "xor %%ah, %%ah\n"
28193 "1:"
28194@@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
28195 "1" (0),
28196 "D" ((long) &opt),
28197 "S" (&pci_indirect),
28198- "m" (opt)
28199+ "m" (opt),
28200+ "r" (__PCIBIOS_DS)
28201 : "memory");
28202 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
28203 if (ret & 0xff00)
28204@@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
28205 {
28206 int ret;
28207
28208- __asm__("lcall *(%%esi); cld\n\t"
28209+ __asm__("movw %w5, %%ds\n\t"
28210+ "lcall *%%ss:(%%esi); cld\n\t"
28211+ "push %%ss\n\t"
28212+ "pop %%ds\n"
28213 "jc 1f\n\t"
28214 "xor %%ah, %%ah\n"
28215 "1:"
28216@@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
28217 : "0" (PCIBIOS_SET_PCI_HW_INT),
28218 "b" ((dev->bus->number << 8) | dev->devfn),
28219 "c" ((irq << 8) | (pin + 10)),
28220- "S" (&pci_indirect));
28221+ "S" (&pci_indirect),
28222+ "r" (__PCIBIOS_DS));
28223 return !(ret & 0xff00);
28224 }
28225 EXPORT_SYMBOL(pcibios_set_irq_routing);
28226diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
28227index 40e4469..1ab536e 100644
28228--- a/arch/x86/platform/efi/efi_32.c
28229+++ b/arch/x86/platform/efi/efi_32.c
28230@@ -44,11 +44,22 @@ void efi_call_phys_prelog(void)
28231 {
28232 struct desc_ptr gdt_descr;
28233
28234+#ifdef CONFIG_PAX_KERNEXEC
28235+ struct desc_struct d;
28236+#endif
28237+
28238 local_irq_save(efi_rt_eflags);
28239
28240 load_cr3(initial_page_table);
28241 __flush_tlb_all();
28242
28243+#ifdef CONFIG_PAX_KERNEXEC
28244+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
28245+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
28246+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
28247+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
28248+#endif
28249+
28250 gdt_descr.address = __pa(get_cpu_gdt_table(0));
28251 gdt_descr.size = GDT_SIZE - 1;
28252 load_gdt(&gdt_descr);
28253@@ -58,6 +69,14 @@ void efi_call_phys_epilog(void)
28254 {
28255 struct desc_ptr gdt_descr;
28256
28257+#ifdef CONFIG_PAX_KERNEXEC
28258+ struct desc_struct d;
28259+
28260+ memset(&d, 0, sizeof d);
28261+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
28262+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
28263+#endif
28264+
28265 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
28266 gdt_descr.size = GDT_SIZE - 1;
28267 load_gdt(&gdt_descr);
28268diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
28269index fbe66e6..eae5e38 100644
28270--- a/arch/x86/platform/efi/efi_stub_32.S
28271+++ b/arch/x86/platform/efi/efi_stub_32.S
28272@@ -6,7 +6,9 @@
28273 */
28274
28275 #include <linux/linkage.h>
28276+#include <linux/init.h>
28277 #include <asm/page_types.h>
28278+#include <asm/segment.h>
28279
28280 /*
28281 * efi_call_phys(void *, ...) is a function with variable parameters.
28282@@ -20,7 +22,7 @@
28283 * service functions will comply with gcc calling convention, too.
28284 */
28285
28286-.text
28287+__INIT
28288 ENTRY(efi_call_phys)
28289 /*
28290 * 0. The function can only be called in Linux kernel. So CS has been
28291@@ -36,10 +38,24 @@ ENTRY(efi_call_phys)
28292 * The mapping of lower virtual memory has been created in prelog and
28293 * epilog.
28294 */
28295- movl $1f, %edx
28296- subl $__PAGE_OFFSET, %edx
28297- jmp *%edx
28298+#ifdef CONFIG_PAX_KERNEXEC
28299+ movl $(__KERNEXEC_EFI_DS), %edx
28300+ mov %edx, %ds
28301+ mov %edx, %es
28302+ mov %edx, %ss
28303+ addl $2f,(1f)
28304+ ljmp *(1f)
28305+
28306+__INITDATA
28307+1: .long __LOAD_PHYSICAL_ADDR, __KERNEXEC_EFI_CS
28308+.previous
28309+
28310+2:
28311+ subl $2b,(1b)
28312+#else
28313+ jmp 1f-__PAGE_OFFSET
28314 1:
28315+#endif
28316
28317 /*
28318 * 2. Now on the top of stack is the return
28319@@ -47,14 +63,8 @@ ENTRY(efi_call_phys)
28320 * parameter 2, ..., param n. To make things easy, we save the return
28321 * address of efi_call_phys in a global variable.
28322 */
28323- popl %edx
28324- movl %edx, saved_return_addr
28325- /* get the function pointer into ECX*/
28326- popl %ecx
28327- movl %ecx, efi_rt_function_ptr
28328- movl $2f, %edx
28329- subl $__PAGE_OFFSET, %edx
28330- pushl %edx
28331+ popl (saved_return_addr)
28332+ popl (efi_rt_function_ptr)
28333
28334 /*
28335 * 3. Clear PG bit in %CR0.
28336@@ -73,9 +83,8 @@ ENTRY(efi_call_phys)
28337 /*
28338 * 5. Call the physical function.
28339 */
28340- jmp *%ecx
28341+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
28342
28343-2:
28344 /*
28345 * 6. After EFI runtime service returns, control will return to
28346 * following instruction. We'd better readjust stack pointer first.
28347@@ -88,35 +97,36 @@ ENTRY(efi_call_phys)
28348 movl %cr0, %edx
28349 orl $0x80000000, %edx
28350 movl %edx, %cr0
28351- jmp 1f
28352-1:
28353+
28354 /*
28355 * 8. Now restore the virtual mode from flat mode by
28356 * adding EIP with PAGE_OFFSET.
28357 */
28358- movl $1f, %edx
28359- jmp *%edx
28360+#ifdef CONFIG_PAX_KERNEXEC
28361+ movl $(__KERNEL_DS), %edx
28362+ mov %edx, %ds
28363+ mov %edx, %es
28364+ mov %edx, %ss
28365+ ljmp $(__KERNEL_CS),$1f
28366+#else
28367+ jmp 1f+__PAGE_OFFSET
28368+#endif
28369 1:
28370
28371 /*
28372 * 9. Balance the stack. And because EAX contain the return value,
28373 * we'd better not clobber it.
28374 */
28375- leal efi_rt_function_ptr, %edx
28376- movl (%edx), %ecx
28377- pushl %ecx
28378+ pushl (efi_rt_function_ptr)
28379
28380 /*
28381- * 10. Push the saved return address onto the stack and return.
28382+ * 10. Return to the saved return address.
28383 */
28384- leal saved_return_addr, %edx
28385- movl (%edx), %ecx
28386- pushl %ecx
28387- ret
28388+ jmpl *(saved_return_addr)
28389 ENDPROC(efi_call_phys)
28390 .previous
28391
28392-.data
28393+__INITDATA
28394 saved_return_addr:
28395 .long 0
28396 efi_rt_function_ptr:
28397diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
28398index 4c07cca..2c8427d 100644
28399--- a/arch/x86/platform/efi/efi_stub_64.S
28400+++ b/arch/x86/platform/efi/efi_stub_64.S
28401@@ -7,6 +7,7 @@
28402 */
28403
28404 #include <linux/linkage.h>
28405+#include <asm/alternative-asm.h>
28406
28407 #define SAVE_XMM \
28408 mov %rsp, %rax; \
28409@@ -40,6 +41,7 @@ ENTRY(efi_call0)
28410 call *%rdi
28411 addq $32, %rsp
28412 RESTORE_XMM
28413+ pax_force_retaddr 0, 1
28414 ret
28415 ENDPROC(efi_call0)
28416
28417@@ -50,6 +52,7 @@ ENTRY(efi_call1)
28418 call *%rdi
28419 addq $32, %rsp
28420 RESTORE_XMM
28421+ pax_force_retaddr 0, 1
28422 ret
28423 ENDPROC(efi_call1)
28424
28425@@ -60,6 +63,7 @@ ENTRY(efi_call2)
28426 call *%rdi
28427 addq $32, %rsp
28428 RESTORE_XMM
28429+ pax_force_retaddr 0, 1
28430 ret
28431 ENDPROC(efi_call2)
28432
28433@@ -71,6 +75,7 @@ ENTRY(efi_call3)
28434 call *%rdi
28435 addq $32, %rsp
28436 RESTORE_XMM
28437+ pax_force_retaddr 0, 1
28438 ret
28439 ENDPROC(efi_call3)
28440
28441@@ -83,6 +88,7 @@ ENTRY(efi_call4)
28442 call *%rdi
28443 addq $32, %rsp
28444 RESTORE_XMM
28445+ pax_force_retaddr 0, 1
28446 ret
28447 ENDPROC(efi_call4)
28448
28449@@ -96,6 +102,7 @@ ENTRY(efi_call5)
28450 call *%rdi
28451 addq $48, %rsp
28452 RESTORE_XMM
28453+ pax_force_retaddr 0, 1
28454 ret
28455 ENDPROC(efi_call5)
28456
28457@@ -112,5 +119,6 @@ ENTRY(efi_call6)
28458 call *%rdi
28459 addq $48, %rsp
28460 RESTORE_XMM
28461+ pax_force_retaddr 0, 1
28462 ret
28463 ENDPROC(efi_call6)
28464diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
28465index fd41a92..9c33628 100644
28466--- a/arch/x86/platform/mrst/mrst.c
28467+++ b/arch/x86/platform/mrst/mrst.c
28468@@ -78,13 +78,15 @@ struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
28469 EXPORT_SYMBOL_GPL(sfi_mrtc_array);
28470 int sfi_mrtc_num;
28471
28472-static void mrst_power_off(void)
28473+static __noreturn void mrst_power_off(void)
28474 {
28475+ BUG();
28476 }
28477
28478-static void mrst_reboot(void)
28479+static __noreturn void mrst_reboot(void)
28480 {
28481 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
28482+ BUG();
28483 }
28484
28485 /* parse all the mtimer info to a static mtimer array */
28486diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c
28487index d6ee929..3637cb5 100644
28488--- a/arch/x86/platform/olpc/olpc_dt.c
28489+++ b/arch/x86/platform/olpc/olpc_dt.c
28490@@ -156,7 +156,7 @@ void * __init prom_early_alloc(unsigned long size)
28491 return res;
28492 }
28493
28494-static struct of_pdt_ops prom_olpc_ops __initdata = {
28495+static struct of_pdt_ops prom_olpc_ops __initconst = {
28496 .nextprop = olpc_dt_nextprop,
28497 .getproplen = olpc_dt_getproplen,
28498 .getproperty = olpc_dt_getproperty,
28499diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
28500index 218cdb1..c1178eb 100644
28501--- a/arch/x86/power/cpu.c
28502+++ b/arch/x86/power/cpu.c
28503@@ -132,7 +132,7 @@ static void do_fpu_end(void)
28504 static void fix_processor_context(void)
28505 {
28506 int cpu = smp_processor_id();
28507- struct tss_struct *t = &per_cpu(init_tss, cpu);
28508+ struct tss_struct *t = init_tss + cpu;
28509
28510 set_tss_desc(cpu, t); /*
28511 * This just modifies memory; should not be
28512@@ -142,8 +142,6 @@ static void fix_processor_context(void)
28513 */
28514
28515 #ifdef CONFIG_X86_64
28516- get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
28517-
28518 syscall_init(); /* This sets MSR_*STAR and related */
28519 #endif
28520 load_TR_desc(); /* This does ltr */
28521diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
28522index cbca565..bae7133 100644
28523--- a/arch/x86/realmode/init.c
28524+++ b/arch/x86/realmode/init.c
28525@@ -62,7 +62,13 @@ void __init setup_real_mode(void)
28526 __va(real_mode_header->trampoline_header);
28527
28528 #ifdef CONFIG_X86_32
28529- trampoline_header->start = __pa(startup_32_smp);
28530+ trampoline_header->start = __pa(ktla_ktva(startup_32_smp));
28531+
28532+#ifdef CONFIG_PAX_KERNEXEC
28533+ trampoline_header->start -= LOAD_PHYSICAL_ADDR;
28534+#endif
28535+
28536+ trampoline_header->boot_cs = __BOOT_CS;
28537 trampoline_header->gdt_limit = __BOOT_DS + 7;
28538 trampoline_header->gdt_base = __pa(boot_gdt);
28539 #else
28540diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
28541index 8869287..d577672 100644
28542--- a/arch/x86/realmode/rm/Makefile
28543+++ b/arch/x86/realmode/rm/Makefile
28544@@ -78,5 +78,8 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ -D_WAKEUP \
28545 $(call cc-option, -fno-unit-at-a-time)) \
28546 $(call cc-option, -fno-stack-protector) \
28547 $(call cc-option, -mpreferred-stack-boundary=2)
28548+ifdef CONSTIFY_PLUGIN
28549+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
28550+endif
28551 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
28552 GCOV_PROFILE := n
28553diff --git a/arch/x86/realmode/rm/header.S b/arch/x86/realmode/rm/header.S
28554index a28221d..93c40f1 100644
28555--- a/arch/x86/realmode/rm/header.S
28556+++ b/arch/x86/realmode/rm/header.S
28557@@ -30,7 +30,9 @@ GLOBAL(real_mode_header)
28558 #endif
28559 /* APM/BIOS reboot */
28560 .long pa_machine_real_restart_asm
28561-#ifdef CONFIG_X86_64
28562+#ifdef CONFIG_X86_32
28563+ .long __KERNEL_CS
28564+#else
28565 .long __KERNEL32_CS
28566 #endif
28567 END(real_mode_header)
28568diff --git a/arch/x86/realmode/rm/trampoline_32.S b/arch/x86/realmode/rm/trampoline_32.S
28569index c1b2791..f9e31c7 100644
28570--- a/arch/x86/realmode/rm/trampoline_32.S
28571+++ b/arch/x86/realmode/rm/trampoline_32.S
28572@@ -25,6 +25,12 @@
28573 #include <asm/page_types.h>
28574 #include "realmode.h"
28575
28576+#ifdef CONFIG_PAX_KERNEXEC
28577+#define ta(X) (X)
28578+#else
28579+#define ta(X) (pa_ ## X)
28580+#endif
28581+
28582 .text
28583 .code16
28584
28585@@ -39,8 +45,6 @@ ENTRY(trampoline_start)
28586
28587 cli # We should be safe anyway
28588
28589- movl tr_start, %eax # where we need to go
28590-
28591 movl $0xA5A5A5A5, trampoline_status
28592 # write marker for master knows we're running
28593
28594@@ -56,7 +60,7 @@ ENTRY(trampoline_start)
28595 movw $1, %dx # protected mode (PE) bit
28596 lmsw %dx # into protected mode
28597
28598- ljmpl $__BOOT_CS, $pa_startup_32
28599+ ljmpl *(trampoline_header)
28600
28601 .section ".text32","ax"
28602 .code32
28603@@ -67,7 +71,7 @@ ENTRY(startup_32) # note: also used from wakeup_asm.S
28604 .balign 8
28605 GLOBAL(trampoline_header)
28606 tr_start: .space 4
28607- tr_gdt_pad: .space 2
28608+ tr_boot_cs: .space 2
28609 tr_gdt: .space 6
28610 END(trampoline_header)
28611
28612diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S
28613index bb360dc..3e5945f 100644
28614--- a/arch/x86/realmode/rm/trampoline_64.S
28615+++ b/arch/x86/realmode/rm/trampoline_64.S
28616@@ -107,7 +107,7 @@ ENTRY(startup_32)
28617 wrmsr
28618
28619 # Enable paging and in turn activate Long Mode
28620- movl $(X86_CR0_PG | X86_CR0_WP | X86_CR0_PE), %eax
28621+ movl $(X86_CR0_PG | X86_CR0_PE), %eax
28622 movl %eax, %cr0
28623
28624 /*
28625diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
28626index 5a1847d..deccb30 100644
28627--- a/arch/x86/tools/relocs.c
28628+++ b/arch/x86/tools/relocs.c
28629@@ -12,10 +12,13 @@
28630 #include <regex.h>
28631 #include <tools/le_byteshift.h>
28632
28633+#include "../../../include/generated/autoconf.h"
28634+
28635 static void die(char *fmt, ...);
28636
28637 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
28638 static Elf32_Ehdr ehdr;
28639+static Elf32_Phdr *phdr;
28640 static unsigned long reloc_count, reloc_idx;
28641 static unsigned long *relocs;
28642 static unsigned long reloc16_count, reloc16_idx;
28643@@ -330,9 +333,39 @@ static void read_ehdr(FILE *fp)
28644 }
28645 }
28646
28647+static void read_phdrs(FILE *fp)
28648+{
28649+ unsigned int i;
28650+
28651+ phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
28652+ if (!phdr) {
28653+ die("Unable to allocate %d program headers\n",
28654+ ehdr.e_phnum);
28655+ }
28656+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
28657+ die("Seek to %d failed: %s\n",
28658+ ehdr.e_phoff, strerror(errno));
28659+ }
28660+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
28661+ die("Cannot read ELF program headers: %s\n",
28662+ strerror(errno));
28663+ }
28664+ for(i = 0; i < ehdr.e_phnum; i++) {
28665+ phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
28666+ phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
28667+ phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
28668+ phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
28669+ phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
28670+ phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
28671+ phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
28672+ phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
28673+ }
28674+
28675+}
28676+
28677 static void read_shdrs(FILE *fp)
28678 {
28679- int i;
28680+ unsigned int i;
28681 Elf32_Shdr shdr;
28682
28683 secs = calloc(ehdr.e_shnum, sizeof(struct section));
28684@@ -367,7 +400,7 @@ static void read_shdrs(FILE *fp)
28685
28686 static void read_strtabs(FILE *fp)
28687 {
28688- int i;
28689+ unsigned int i;
28690 for (i = 0; i < ehdr.e_shnum; i++) {
28691 struct section *sec = &secs[i];
28692 if (sec->shdr.sh_type != SHT_STRTAB) {
28693@@ -392,7 +425,7 @@ static void read_strtabs(FILE *fp)
28694
28695 static void read_symtabs(FILE *fp)
28696 {
28697- int i,j;
28698+ unsigned int i,j;
28699 for (i = 0; i < ehdr.e_shnum; i++) {
28700 struct section *sec = &secs[i];
28701 if (sec->shdr.sh_type != SHT_SYMTAB) {
28702@@ -423,9 +456,11 @@ static void read_symtabs(FILE *fp)
28703 }
28704
28705
28706-static void read_relocs(FILE *fp)
28707+static void read_relocs(FILE *fp, int use_real_mode)
28708 {
28709- int i,j;
28710+ unsigned int i,j;
28711+ uint32_t base;
28712+
28713 for (i = 0; i < ehdr.e_shnum; i++) {
28714 struct section *sec = &secs[i];
28715 if (sec->shdr.sh_type != SHT_REL) {
28716@@ -445,9 +480,22 @@ static void read_relocs(FILE *fp)
28717 die("Cannot read symbol table: %s\n",
28718 strerror(errno));
28719 }
28720+ base = 0;
28721+
28722+#ifdef CONFIG_X86_32
28723+ for (j = 0; !use_real_mode && j < ehdr.e_phnum; j++) {
28724+ if (phdr[j].p_type != PT_LOAD )
28725+ continue;
28726+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
28727+ continue;
28728+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
28729+ break;
28730+ }
28731+#endif
28732+
28733 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
28734 Elf32_Rel *rel = &sec->reltab[j];
28735- rel->r_offset = elf32_to_cpu(rel->r_offset);
28736+ rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
28737 rel->r_info = elf32_to_cpu(rel->r_info);
28738 }
28739 }
28740@@ -456,13 +504,13 @@ static void read_relocs(FILE *fp)
28741
28742 static void print_absolute_symbols(void)
28743 {
28744- int i;
28745+ unsigned int i;
28746 printf("Absolute symbols\n");
28747 printf(" Num: Value Size Type Bind Visibility Name\n");
28748 for (i = 0; i < ehdr.e_shnum; i++) {
28749 struct section *sec = &secs[i];
28750 char *sym_strtab;
28751- int j;
28752+ unsigned int j;
28753
28754 if (sec->shdr.sh_type != SHT_SYMTAB) {
28755 continue;
28756@@ -489,14 +537,14 @@ static void print_absolute_symbols(void)
28757
28758 static void print_absolute_relocs(void)
28759 {
28760- int i, printed = 0;
28761+ unsigned int i, printed = 0;
28762
28763 for (i = 0; i < ehdr.e_shnum; i++) {
28764 struct section *sec = &secs[i];
28765 struct section *sec_applies, *sec_symtab;
28766 char *sym_strtab;
28767 Elf32_Sym *sh_symtab;
28768- int j;
28769+ unsigned int j;
28770 if (sec->shdr.sh_type != SHT_REL) {
28771 continue;
28772 }
28773@@ -558,13 +606,13 @@ static void print_absolute_relocs(void)
28774 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym),
28775 int use_real_mode)
28776 {
28777- int i;
28778+ unsigned int i;
28779 /* Walk through the relocations */
28780 for (i = 0; i < ehdr.e_shnum; i++) {
28781 char *sym_strtab;
28782 Elf32_Sym *sh_symtab;
28783 struct section *sec_applies, *sec_symtab;
28784- int j;
28785+ unsigned int j;
28786 struct section *sec = &secs[i];
28787
28788 if (sec->shdr.sh_type != SHT_REL) {
28789@@ -588,6 +636,24 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym),
28790 sym = &sh_symtab[ELF32_R_SYM(rel->r_info)];
28791 r_type = ELF32_R_TYPE(rel->r_info);
28792
28793+ if (!use_real_mode) {
28794+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
28795+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
28796+ continue;
28797+
28798+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
28799+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
28800+ if (!strcmp(sec_name(sym->st_shndx), ".text.end") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
28801+ continue;
28802+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
28803+ continue;
28804+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
28805+ continue;
28806+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
28807+ continue;
28808+#endif
28809+ }
28810+
28811 shn_abs = sym->st_shndx == SHN_ABS;
28812
28813 switch (r_type) {
28814@@ -681,7 +747,7 @@ static int write32(unsigned int v, FILE *f)
28815
28816 static void emit_relocs(int as_text, int use_real_mode)
28817 {
28818- int i;
28819+ unsigned int i;
28820 /* Count how many relocations I have and allocate space for them. */
28821 reloc_count = 0;
28822 walk_relocs(count_reloc, use_real_mode);
28823@@ -808,10 +874,11 @@ int main(int argc, char **argv)
28824 fname, strerror(errno));
28825 }
28826 read_ehdr(fp);
28827+ read_phdrs(fp);
28828 read_shdrs(fp);
28829 read_strtabs(fp);
28830 read_symtabs(fp);
28831- read_relocs(fp);
28832+ read_relocs(fp, use_real_mode);
28833 if (show_absolute_syms) {
28834 print_absolute_symbols();
28835 return 0;
28836diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
28837index fd14be1..e3c79c0 100644
28838--- a/arch/x86/vdso/Makefile
28839+++ b/arch/x86/vdso/Makefile
28840@@ -181,7 +181,7 @@ quiet_cmd_vdso = VDSO $@
28841 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
28842 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
28843
28844-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
28845+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
28846 GCOV_PROFILE := n
28847
28848 #
28849diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
28850index 0faad64..39ef157 100644
28851--- a/arch/x86/vdso/vdso32-setup.c
28852+++ b/arch/x86/vdso/vdso32-setup.c
28853@@ -25,6 +25,7 @@
28854 #include <asm/tlbflush.h>
28855 #include <asm/vdso.h>
28856 #include <asm/proto.h>
28857+#include <asm/mman.h>
28858
28859 enum {
28860 VDSO_DISABLED = 0,
28861@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
28862 void enable_sep_cpu(void)
28863 {
28864 int cpu = get_cpu();
28865- struct tss_struct *tss = &per_cpu(init_tss, cpu);
28866+ struct tss_struct *tss = init_tss + cpu;
28867
28868 if (!boot_cpu_has(X86_FEATURE_SEP)) {
28869 put_cpu();
28870@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
28871 gate_vma.vm_start = FIXADDR_USER_START;
28872 gate_vma.vm_end = FIXADDR_USER_END;
28873 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
28874- gate_vma.vm_page_prot = __P101;
28875+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
28876
28877 return 0;
28878 }
28879@@ -330,14 +331,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
28880 if (compat)
28881 addr = VDSO_HIGH_BASE;
28882 else {
28883- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
28884+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
28885 if (IS_ERR_VALUE(addr)) {
28886 ret = addr;
28887 goto up_fail;
28888 }
28889 }
28890
28891- current->mm->context.vdso = (void *)addr;
28892+ current->mm->context.vdso = addr;
28893
28894 if (compat_uses_vma || !compat) {
28895 /*
28896@@ -353,11 +354,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
28897 }
28898
28899 current_thread_info()->sysenter_return =
28900- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
28901+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
28902
28903 up_fail:
28904 if (ret)
28905- current->mm->context.vdso = NULL;
28906+ current->mm->context.vdso = 0;
28907
28908 up_write(&mm->mmap_sem);
28909
28910@@ -404,8 +405,14 @@ __initcall(ia32_binfmt_init);
28911
28912 const char *arch_vma_name(struct vm_area_struct *vma)
28913 {
28914- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
28915+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
28916 return "[vdso]";
28917+
28918+#ifdef CONFIG_PAX_SEGMEXEC
28919+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
28920+ return "[vdso]";
28921+#endif
28922+
28923 return NULL;
28924 }
28925
28926@@ -415,7 +422,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
28927 * Check to see if the corresponding task was created in compat vdso
28928 * mode.
28929 */
28930- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
28931+ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
28932 return &gate_vma;
28933 return NULL;
28934 }
28935diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
28936index 00aaf04..4a26505 100644
28937--- a/arch/x86/vdso/vma.c
28938+++ b/arch/x86/vdso/vma.c
28939@@ -16,8 +16,6 @@
28940 #include <asm/vdso.h>
28941 #include <asm/page.h>
28942
28943-unsigned int __read_mostly vdso_enabled = 1;
28944-
28945 extern char vdso_start[], vdso_end[];
28946 extern unsigned short vdso_sync_cpuid;
28947
28948@@ -141,7 +139,6 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
28949 * unaligned here as a result of stack start randomization.
28950 */
28951 addr = PAGE_ALIGN(addr);
28952- addr = align_addr(addr, NULL, ALIGN_VDSO);
28953
28954 return addr;
28955 }
28956@@ -154,30 +151,31 @@ static int setup_additional_pages(struct linux_binprm *bprm,
28957 unsigned size)
28958 {
28959 struct mm_struct *mm = current->mm;
28960- unsigned long addr;
28961+ unsigned long addr = 0;
28962 int ret;
28963
28964- if (!vdso_enabled)
28965- return 0;
28966-
28967 down_write(&mm->mmap_sem);
28968+
28969+#ifdef CONFIG_PAX_RANDMMAP
28970+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
28971+#endif
28972+
28973 addr = vdso_addr(mm->start_stack, size);
28974+ addr = align_addr(addr, NULL, ALIGN_VDSO);
28975 addr = get_unmapped_area(NULL, addr, size, 0, 0);
28976 if (IS_ERR_VALUE(addr)) {
28977 ret = addr;
28978 goto up_fail;
28979 }
28980
28981- current->mm->context.vdso = (void *)addr;
28982+ mm->context.vdso = addr;
28983
28984 ret = install_special_mapping(mm, addr, size,
28985 VM_READ|VM_EXEC|
28986 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
28987 pages);
28988- if (ret) {
28989- current->mm->context.vdso = NULL;
28990- goto up_fail;
28991- }
28992+ if (ret)
28993+ mm->context.vdso = 0;
28994
28995 up_fail:
28996 up_write(&mm->mmap_sem);
28997@@ -197,10 +195,3 @@ int x32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
28998 vdsox32_size);
28999 }
29000 #endif
29001-
29002-static __init int vdso_setup(char *s)
29003-{
29004- vdso_enabled = simple_strtoul(s, NULL, 0);
29005- return 0;
29006-}
29007-__setup("vdso=", vdso_setup);
29008diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
29009index 586d838..e883209 100644
29010--- a/arch/x86/xen/enlighten.c
29011+++ b/arch/x86/xen/enlighten.c
29012@@ -99,8 +99,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
29013
29014 struct shared_info xen_dummy_shared_info;
29015
29016-void *xen_initial_gdt;
29017-
29018 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
29019 __read_mostly int xen_have_vector_callback;
29020 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
29021@@ -473,8 +471,7 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
29022 {
29023 unsigned long va = dtr->address;
29024 unsigned int size = dtr->size + 1;
29025- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
29026- unsigned long frames[pages];
29027+ unsigned long frames[65536 / PAGE_SIZE];
29028 int f;
29029
29030 /*
29031@@ -522,8 +519,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
29032 {
29033 unsigned long va = dtr->address;
29034 unsigned int size = dtr->size + 1;
29035- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
29036- unsigned long frames[pages];
29037+ unsigned long frames[65536 / PAGE_SIZE];
29038 int f;
29039
29040 /*
29041@@ -916,7 +912,7 @@ static u32 xen_safe_apic_wait_icr_idle(void)
29042 return 0;
29043 }
29044
29045-static void set_xen_basic_apic_ops(void)
29046+static void __init set_xen_basic_apic_ops(void)
29047 {
29048 apic->read = xen_apic_read;
29049 apic->write = xen_apic_write;
29050@@ -1222,30 +1218,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
29051 #endif
29052 };
29053
29054-static void xen_reboot(int reason)
29055+static __noreturn void xen_reboot(int reason)
29056 {
29057 struct sched_shutdown r = { .reason = reason };
29058
29059- if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
29060- BUG();
29061+ HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
29062+ BUG();
29063 }
29064
29065-static void xen_restart(char *msg)
29066+static __noreturn void xen_restart(char *msg)
29067 {
29068 xen_reboot(SHUTDOWN_reboot);
29069 }
29070
29071-static void xen_emergency_restart(void)
29072+static __noreturn void xen_emergency_restart(void)
29073 {
29074 xen_reboot(SHUTDOWN_reboot);
29075 }
29076
29077-static void xen_machine_halt(void)
29078+static __noreturn void xen_machine_halt(void)
29079 {
29080 xen_reboot(SHUTDOWN_poweroff);
29081 }
29082
29083-static void xen_machine_power_off(void)
29084+static __noreturn void xen_machine_power_off(void)
29085 {
29086 if (pm_power_off)
29087 pm_power_off();
29088@@ -1347,7 +1343,17 @@ asmlinkage void __init xen_start_kernel(void)
29089 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
29090
29091 /* Work out if we support NX */
29092- x86_configure_nx();
29093+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
29094+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
29095+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
29096+ unsigned l, h;
29097+
29098+ __supported_pte_mask |= _PAGE_NX;
29099+ rdmsr(MSR_EFER, l, h);
29100+ l |= EFER_NX;
29101+ wrmsr(MSR_EFER, l, h);
29102+ }
29103+#endif
29104
29105 xen_setup_features();
29106
29107@@ -1376,14 +1382,7 @@ asmlinkage void __init xen_start_kernel(void)
29108 pv_mmu_ops.ptep_modify_prot_commit = xen_ptep_modify_prot_commit;
29109 }
29110
29111- machine_ops = xen_machine_ops;
29112-
29113- /*
29114- * The only reliable way to retain the initial address of the
29115- * percpu gdt_page is to remember it here, so we can go and
29116- * mark it RW later, when the initial percpu area is freed.
29117- */
29118- xen_initial_gdt = &per_cpu(gdt_page, 0);
29119+ memcpy((void *)&machine_ops, &xen_machine_ops, sizeof machine_ops);
29120
29121 xen_smp_init();
29122
29123diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
29124index dcf5f2d..d804c25 100644
29125--- a/arch/x86/xen/mmu.c
29126+++ b/arch/x86/xen/mmu.c
29127@@ -1881,6 +1881,9 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
29128 /* L3_k[510] -> level2_kernel_pgt
29129 * L3_i[511] -> level2_fixmap_pgt */
29130 convert_pfn_mfn(level3_kernel_pgt);
29131+ convert_pfn_mfn(level3_vmalloc_start_pgt);
29132+ convert_pfn_mfn(level3_vmalloc_end_pgt);
29133+ convert_pfn_mfn(level3_vmemmap_pgt);
29134
29135 /* We get [511][511] and have Xen's version of level2_kernel_pgt */
29136 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
29137@@ -1910,8 +1913,12 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
29138 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
29139 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
29140 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
29141+ set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
29142+ set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
29143+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
29144 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
29145 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
29146+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
29147 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
29148 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
29149
29150@@ -2097,6 +2104,7 @@ static void __init xen_post_allocator_init(void)
29151 pv_mmu_ops.set_pud = xen_set_pud;
29152 #if PAGETABLE_LEVELS == 4
29153 pv_mmu_ops.set_pgd = xen_set_pgd;
29154+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
29155 #endif
29156
29157 /* This will work as long as patching hasn't happened yet
29158@@ -2178,6 +2186,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
29159 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
29160 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
29161 .set_pgd = xen_set_pgd_hyper,
29162+ .set_pgd_batched = xen_set_pgd_hyper,
29163
29164 .alloc_pud = xen_alloc_pmd_init,
29165 .release_pud = xen_release_pmd_init,
29166diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
29167index 353c50f..8f3c179 100644
29168--- a/arch/x86/xen/smp.c
29169+++ b/arch/x86/xen/smp.c
29170@@ -229,11 +229,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
29171 {
29172 BUG_ON(smp_processor_id() != 0);
29173 native_smp_prepare_boot_cpu();
29174-
29175- /* We've switched to the "real" per-cpu gdt, so make sure the
29176- old memory can be recycled */
29177- make_lowmem_page_readwrite(xen_initial_gdt);
29178-
29179 xen_filter_cpu_maps();
29180 xen_setup_vcpu_info_placement();
29181 }
29182@@ -300,12 +295,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
29183 gdt = get_cpu_gdt_table(cpu);
29184
29185 ctxt->flags = VGCF_IN_KERNEL;
29186- ctxt->user_regs.ds = __USER_DS;
29187- ctxt->user_regs.es = __USER_DS;
29188+ ctxt->user_regs.ds = __KERNEL_DS;
29189+ ctxt->user_regs.es = __KERNEL_DS;
29190 ctxt->user_regs.ss = __KERNEL_DS;
29191 #ifdef CONFIG_X86_32
29192 ctxt->user_regs.fs = __KERNEL_PERCPU;
29193- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
29194+ savesegment(gs, ctxt->user_regs.gs);
29195 #else
29196 ctxt->gs_base_kernel = per_cpu_offset(cpu);
29197 #endif
29198@@ -355,13 +350,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu, struct task_struct *idle)
29199 int rc;
29200
29201 per_cpu(current_task, cpu) = idle;
29202+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
29203 #ifdef CONFIG_X86_32
29204 irq_ctx_init(cpu);
29205 #else
29206 clear_tsk_thread_flag(idle, TIF_FORK);
29207- per_cpu(kernel_stack, cpu) =
29208- (unsigned long)task_stack_page(idle) -
29209- KERNEL_STACK_OFFSET + THREAD_SIZE;
29210+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
29211 #endif
29212 xen_setup_runstate_info(cpu);
29213 xen_setup_timer(cpu);
29214@@ -637,7 +631,7 @@ static const struct smp_ops xen_smp_ops __initconst = {
29215
29216 void __init xen_smp_init(void)
29217 {
29218- smp_ops = xen_smp_ops;
29219+ memcpy((void *)&smp_ops, &xen_smp_ops, sizeof smp_ops);
29220 xen_fill_possible_map();
29221 xen_init_spinlocks();
29222 }
29223diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
29224index 33ca6e4..0ded929 100644
29225--- a/arch/x86/xen/xen-asm_32.S
29226+++ b/arch/x86/xen/xen-asm_32.S
29227@@ -84,14 +84,14 @@ ENTRY(xen_iret)
29228 ESP_OFFSET=4 # bytes pushed onto stack
29229
29230 /*
29231- * Store vcpu_info pointer for easy access. Do it this way to
29232- * avoid having to reload %fs
29233+ * Store vcpu_info pointer for easy access.
29234 */
29235 #ifdef CONFIG_SMP
29236- GET_THREAD_INFO(%eax)
29237- movl %ss:TI_cpu(%eax), %eax
29238- movl %ss:__per_cpu_offset(,%eax,4), %eax
29239- mov %ss:xen_vcpu(%eax), %eax
29240+ push %fs
29241+ mov $(__KERNEL_PERCPU), %eax
29242+ mov %eax, %fs
29243+ mov PER_CPU_VAR(xen_vcpu), %eax
29244+ pop %fs
29245 #else
29246 movl %ss:xen_vcpu, %eax
29247 #endif
29248diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
29249index 7faed58..ba4427c 100644
29250--- a/arch/x86/xen/xen-head.S
29251+++ b/arch/x86/xen/xen-head.S
29252@@ -19,6 +19,17 @@ ENTRY(startup_xen)
29253 #ifdef CONFIG_X86_32
29254 mov %esi,xen_start_info
29255 mov $init_thread_union+THREAD_SIZE,%esp
29256+#ifdef CONFIG_SMP
29257+ movl $cpu_gdt_table,%edi
29258+ movl $__per_cpu_load,%eax
29259+ movw %ax,__KERNEL_PERCPU + 2(%edi)
29260+ rorl $16,%eax
29261+ movb %al,__KERNEL_PERCPU + 4(%edi)
29262+ movb %ah,__KERNEL_PERCPU + 7(%edi)
29263+ movl $__per_cpu_end - 1,%eax
29264+ subl $__per_cpu_start,%eax
29265+ movw %ax,__KERNEL_PERCPU + 0(%edi)
29266+#endif
29267 #else
29268 mov %rsi,xen_start_info
29269 mov $init_thread_union+THREAD_SIZE,%rsp
29270diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
29271index a95b417..b6dbd0b 100644
29272--- a/arch/x86/xen/xen-ops.h
29273+++ b/arch/x86/xen/xen-ops.h
29274@@ -10,8 +10,6 @@
29275 extern const char xen_hypervisor_callback[];
29276 extern const char xen_failsafe_callback[];
29277
29278-extern void *xen_initial_gdt;
29279-
29280 struct trap_info;
29281 void xen_copy_trap_info(struct trap_info *traps);
29282
29283diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
29284index 525bd3d..ef888b1 100644
29285--- a/arch/xtensa/variants/dc232b/include/variant/core.h
29286+++ b/arch/xtensa/variants/dc232b/include/variant/core.h
29287@@ -119,9 +119,9 @@
29288 ----------------------------------------------------------------------*/
29289
29290 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
29291-#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
29292 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
29293 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
29294+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
29295
29296 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
29297 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
29298diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
29299index 2f33760..835e50a 100644
29300--- a/arch/xtensa/variants/fsf/include/variant/core.h
29301+++ b/arch/xtensa/variants/fsf/include/variant/core.h
29302@@ -11,6 +11,7 @@
29303 #ifndef _XTENSA_CORE_H
29304 #define _XTENSA_CORE_H
29305
29306+#include <linux/const.h>
29307
29308 /****************************************************************************
29309 Parameters Useful for Any Code, USER or PRIVILEGED
29310@@ -112,9 +113,9 @@
29311 ----------------------------------------------------------------------*/
29312
29313 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
29314-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
29315 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
29316 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
29317+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
29318
29319 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
29320 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
29321diff --git a/arch/xtensa/variants/s6000/include/variant/core.h b/arch/xtensa/variants/s6000/include/variant/core.h
29322index af00795..2bb8105 100644
29323--- a/arch/xtensa/variants/s6000/include/variant/core.h
29324+++ b/arch/xtensa/variants/s6000/include/variant/core.h
29325@@ -11,6 +11,7 @@
29326 #ifndef _XTENSA_CORE_CONFIGURATION_H
29327 #define _XTENSA_CORE_CONFIGURATION_H
29328
29329+#include <linux/const.h>
29330
29331 /****************************************************************************
29332 Parameters Useful for Any Code, USER or PRIVILEGED
29333@@ -118,9 +119,9 @@
29334 ----------------------------------------------------------------------*/
29335
29336 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
29337-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
29338 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
29339 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
29340+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
29341
29342 #define XCHAL_ICACHE_SIZE 32768 /* I-cache size in bytes or 0 */
29343 #define XCHAL_DCACHE_SIZE 32768 /* D-cache size in bytes or 0 */
29344diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
29345index 58916af..9cb880b 100644
29346--- a/block/blk-iopoll.c
29347+++ b/block/blk-iopoll.c
29348@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
29349 }
29350 EXPORT_SYMBOL(blk_iopoll_complete);
29351
29352-static void blk_iopoll_softirq(struct softirq_action *h)
29353+static void blk_iopoll_softirq(void)
29354 {
29355 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
29356 int rearm = 0, budget = blk_iopoll_budget;
29357diff --git a/block/blk-map.c b/block/blk-map.c
29358index 623e1cd..ca1e109 100644
29359--- a/block/blk-map.c
29360+++ b/block/blk-map.c
29361@@ -302,7 +302,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
29362 if (!len || !kbuf)
29363 return -EINVAL;
29364
29365- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
29366+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
29367 if (do_copy)
29368 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
29369 else
29370diff --git a/block/blk-softirq.c b/block/blk-softirq.c
29371index 467c8de..4bddc6d 100644
29372--- a/block/blk-softirq.c
29373+++ b/block/blk-softirq.c
29374@@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
29375 * Softirq action handler - move entries to local list and loop over them
29376 * while passing them to the queue registered handler.
29377 */
29378-static void blk_done_softirq(struct softirq_action *h)
29379+static void blk_done_softirq(void)
29380 {
29381 struct list_head *cpu_list, local_list;
29382
29383diff --git a/block/bsg.c b/block/bsg.c
29384index ff64ae3..593560c 100644
29385--- a/block/bsg.c
29386+++ b/block/bsg.c
29387@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
29388 struct sg_io_v4 *hdr, struct bsg_device *bd,
29389 fmode_t has_write_perm)
29390 {
29391+ unsigned char tmpcmd[sizeof(rq->__cmd)];
29392+ unsigned char *cmdptr;
29393+
29394 if (hdr->request_len > BLK_MAX_CDB) {
29395 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
29396 if (!rq->cmd)
29397 return -ENOMEM;
29398- }
29399+ cmdptr = rq->cmd;
29400+ } else
29401+ cmdptr = tmpcmd;
29402
29403- if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
29404+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
29405 hdr->request_len))
29406 return -EFAULT;
29407
29408+ if (cmdptr != rq->cmd)
29409+ memcpy(rq->cmd, cmdptr, hdr->request_len);
29410+
29411 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
29412 if (blk_verify_command(rq->cmd, has_write_perm))
29413 return -EPERM;
29414diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
29415index 7c668c8..db3521c 100644
29416--- a/block/compat_ioctl.c
29417+++ b/block/compat_ioctl.c
29418@@ -340,7 +340,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
29419 err |= __get_user(f->spec1, &uf->spec1);
29420 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
29421 err |= __get_user(name, &uf->name);
29422- f->name = compat_ptr(name);
29423+ f->name = (void __force_kernel *)compat_ptr(name);
29424 if (err) {
29425 err = -EFAULT;
29426 goto out;
29427diff --git a/block/partitions/efi.c b/block/partitions/efi.c
29428index 6296b40..417c00f 100644
29429--- a/block/partitions/efi.c
29430+++ b/block/partitions/efi.c
29431@@ -234,14 +234,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
29432 if (!gpt)
29433 return NULL;
29434
29435+ if (!le32_to_cpu(gpt->num_partition_entries))
29436+ return NULL;
29437+ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
29438+ if (!pte)
29439+ return NULL;
29440+
29441 count = le32_to_cpu(gpt->num_partition_entries) *
29442 le32_to_cpu(gpt->sizeof_partition_entry);
29443- if (!count)
29444- return NULL;
29445- pte = kzalloc(count, GFP_KERNEL);
29446- if (!pte)
29447- return NULL;
29448-
29449 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
29450 (u8 *) pte,
29451 count) < count) {
29452diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
29453index 9a87daa..fb17486 100644
29454--- a/block/scsi_ioctl.c
29455+++ b/block/scsi_ioctl.c
29456@@ -223,8 +223,20 @@ EXPORT_SYMBOL(blk_verify_command);
29457 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
29458 struct sg_io_hdr *hdr, fmode_t mode)
29459 {
29460- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
29461+ unsigned char tmpcmd[sizeof(rq->__cmd)];
29462+ unsigned char *cmdptr;
29463+
29464+ if (rq->cmd != rq->__cmd)
29465+ cmdptr = rq->cmd;
29466+ else
29467+ cmdptr = tmpcmd;
29468+
29469+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
29470 return -EFAULT;
29471+
29472+ if (cmdptr != rq->cmd)
29473+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
29474+
29475 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
29476 return -EPERM;
29477
29478@@ -433,6 +445,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
29479 int err;
29480 unsigned int in_len, out_len, bytes, opcode, cmdlen;
29481 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
29482+ unsigned char tmpcmd[sizeof(rq->__cmd)];
29483+ unsigned char *cmdptr;
29484
29485 if (!sic)
29486 return -EINVAL;
29487@@ -466,9 +480,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
29488 */
29489 err = -EFAULT;
29490 rq->cmd_len = cmdlen;
29491- if (copy_from_user(rq->cmd, sic->data, cmdlen))
29492+
29493+ if (rq->cmd != rq->__cmd)
29494+ cmdptr = rq->cmd;
29495+ else
29496+ cmdptr = tmpcmd;
29497+
29498+ if (copy_from_user(cmdptr, sic->data, cmdlen))
29499 goto error;
29500
29501+ if (rq->cmd != cmdptr)
29502+ memcpy(rq->cmd, cmdptr, cmdlen);
29503+
29504 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
29505 goto error;
29506
29507diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
29508index 533de95..7d4a8d2 100644
29509--- a/crypto/ablkcipher.c
29510+++ b/crypto/ablkcipher.c
29511@@ -388,9 +388,9 @@ static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
29512 {
29513 struct crypto_report_blkcipher rblkcipher;
29514
29515- snprintf(rblkcipher.type, CRYPTO_MAX_ALG_NAME, "%s", "ablkcipher");
29516- snprintf(rblkcipher.geniv, CRYPTO_MAX_ALG_NAME, "%s",
29517- alg->cra_ablkcipher.geniv ?: "<default>");
29518+ strncpy(rblkcipher.type, "ablkcipher", sizeof(rblkcipher.type));
29519+ strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<default>",
29520+ sizeof(rblkcipher.geniv));
29521
29522 rblkcipher.blocksize = alg->cra_blocksize;
29523 rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
29524@@ -469,9 +469,9 @@ static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
29525 {
29526 struct crypto_report_blkcipher rblkcipher;
29527
29528- snprintf(rblkcipher.type, CRYPTO_MAX_ALG_NAME, "%s", "givcipher");
29529- snprintf(rblkcipher.geniv, CRYPTO_MAX_ALG_NAME, "%s",
29530- alg->cra_ablkcipher.geniv ?: "<built-in>");
29531+ strncpy(rblkcipher.type, "givcipher", sizeof(rblkcipher.type));
29532+ strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<built-in>",
29533+ sizeof(rblkcipher.geniv));
29534
29535 rblkcipher.blocksize = alg->cra_blocksize;
29536 rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
29537diff --git a/crypto/aead.c b/crypto/aead.c
29538index 0b8121e..27bc487 100644
29539--- a/crypto/aead.c
29540+++ b/crypto/aead.c
29541@@ -117,9 +117,8 @@ static int crypto_aead_report(struct sk_buff *skb, struct crypto_alg *alg)
29542 struct crypto_report_aead raead;
29543 struct aead_alg *aead = &alg->cra_aead;
29544
29545- snprintf(raead.type, CRYPTO_MAX_ALG_NAME, "%s", "aead");
29546- snprintf(raead.geniv, CRYPTO_MAX_ALG_NAME, "%s",
29547- aead->geniv ?: "<built-in>");
29548+ strncpy(raead.type, "aead", sizeof(raead.type));
29549+ strncpy(raead.geniv, aead->geniv ?: "<built-in>", sizeof(raead.geniv));
29550
29551 raead.blocksize = alg->cra_blocksize;
29552 raead.maxauthsize = aead->maxauthsize;
29553@@ -203,8 +202,8 @@ static int crypto_nivaead_report(struct sk_buff *skb, struct crypto_alg *alg)
29554 struct crypto_report_aead raead;
29555 struct aead_alg *aead = &alg->cra_aead;
29556
29557- snprintf(raead.type, CRYPTO_MAX_ALG_NAME, "%s", "nivaead");
29558- snprintf(raead.geniv, CRYPTO_MAX_ALG_NAME, "%s", aead->geniv);
29559+ strncpy(raead.type, "nivaead", sizeof(raead.type));
29560+ strncpy(raead.geniv, aead->geniv, sizeof(raead.geniv));
29561
29562 raead.blocksize = alg->cra_blocksize;
29563 raead.maxauthsize = aead->maxauthsize;
29564diff --git a/crypto/ahash.c b/crypto/ahash.c
29565index 3887856..793a27f 100644
29566--- a/crypto/ahash.c
29567+++ b/crypto/ahash.c
29568@@ -404,7 +404,7 @@ static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
29569 {
29570 struct crypto_report_hash rhash;
29571
29572- snprintf(rhash.type, CRYPTO_MAX_ALG_NAME, "%s", "ahash");
29573+ strncpy(rhash.type, "ahash", sizeof(rhash.type));
29574
29575 rhash.blocksize = alg->cra_blocksize;
29576 rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize;
29577diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
29578index a8d85a1..c44e014 100644
29579--- a/crypto/blkcipher.c
29580+++ b/crypto/blkcipher.c
29581@@ -499,9 +499,9 @@ static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
29582 {
29583 struct crypto_report_blkcipher rblkcipher;
29584
29585- snprintf(rblkcipher.type, CRYPTO_MAX_ALG_NAME, "%s", "blkcipher");
29586- snprintf(rblkcipher.geniv, CRYPTO_MAX_ALG_NAME, "%s",
29587- alg->cra_blkcipher.geniv ?: "<default>");
29588+ strncpy(rblkcipher.type, "blkcipher", sizeof(rblkcipher.type));
29589+ strncpy(rblkcipher.geniv, alg->cra_blkcipher.geniv ?: "<default>",
29590+ sizeof(rblkcipher.geniv));
29591
29592 rblkcipher.blocksize = alg->cra_blocksize;
29593 rblkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
29594diff --git a/crypto/cryptd.c b/crypto/cryptd.c
29595index 7bdd61b..afec999 100644
29596--- a/crypto/cryptd.c
29597+++ b/crypto/cryptd.c
29598@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
29599
29600 struct cryptd_blkcipher_request_ctx {
29601 crypto_completion_t complete;
29602-};
29603+} __no_const;
29604
29605 struct cryptd_hash_ctx {
29606 struct crypto_shash *child;
29607@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
29608
29609 struct cryptd_aead_request_ctx {
29610 crypto_completion_t complete;
29611-};
29612+} __no_const;
29613
29614 static void cryptd_queue_worker(struct work_struct *work);
29615
29616diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c
29617index 35d700a..dfd511f 100644
29618--- a/crypto/crypto_user.c
29619+++ b/crypto/crypto_user.c
29620@@ -30,6 +30,8 @@
29621
29622 #include "internal.h"
29623
29624+#define null_terminated(x) (strnlen(x, sizeof(x)) < sizeof(x))
29625+
29626 static DEFINE_MUTEX(crypto_cfg_mutex);
29627
29628 /* The crypto netlink socket */
29629@@ -75,7 +77,7 @@ static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg)
29630 {
29631 struct crypto_report_cipher rcipher;
29632
29633- snprintf(rcipher.type, CRYPTO_MAX_ALG_NAME, "%s", "cipher");
29634+ strncpy(rcipher.type, "cipher", sizeof(rcipher.type));
29635
29636 rcipher.blocksize = alg->cra_blocksize;
29637 rcipher.min_keysize = alg->cra_cipher.cia_min_keysize;
29638@@ -94,8 +96,7 @@ static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg)
29639 {
29640 struct crypto_report_comp rcomp;
29641
29642- snprintf(rcomp.type, CRYPTO_MAX_ALG_NAME, "%s", "compression");
29643-
29644+ strncpy(rcomp.type, "compression", sizeof(rcomp.type));
29645 if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
29646 sizeof(struct crypto_report_comp), &rcomp))
29647 goto nla_put_failure;
29648@@ -108,12 +109,14 @@ nla_put_failure:
29649 static int crypto_report_one(struct crypto_alg *alg,
29650 struct crypto_user_alg *ualg, struct sk_buff *skb)
29651 {
29652- memcpy(&ualg->cru_name, &alg->cra_name, sizeof(ualg->cru_name));
29653- memcpy(&ualg->cru_driver_name, &alg->cra_driver_name,
29654- sizeof(ualg->cru_driver_name));
29655- memcpy(&ualg->cru_module_name, module_name(alg->cra_module),
29656- CRYPTO_MAX_ALG_NAME);
29657+ strncpy(ualg->cru_name, alg->cra_name, sizeof(ualg->cru_name));
29658+ strncpy(ualg->cru_driver_name, alg->cra_driver_name,
29659+ sizeof(ualg->cru_driver_name));
29660+ strncpy(ualg->cru_module_name, module_name(alg->cra_module),
29661+ sizeof(ualg->cru_module_name));
29662
29663+ ualg->cru_type = 0;
29664+ ualg->cru_mask = 0;
29665 ualg->cru_flags = alg->cra_flags;
29666 ualg->cru_refcnt = atomic_read(&alg->cra_refcnt);
29667
29668@@ -122,8 +125,7 @@ static int crypto_report_one(struct crypto_alg *alg,
29669 if (alg->cra_flags & CRYPTO_ALG_LARVAL) {
29670 struct crypto_report_larval rl;
29671
29672- snprintf(rl.type, CRYPTO_MAX_ALG_NAME, "%s", "larval");
29673-
29674+ strncpy(rl.type, "larval", sizeof(rl.type));
29675 if (nla_put(skb, CRYPTOCFGA_REPORT_LARVAL,
29676 sizeof(struct crypto_report_larval), &rl))
29677 goto nla_put_failure;
29678@@ -196,7 +198,10 @@ static int crypto_report(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
29679 struct crypto_dump_info info;
29680 int err;
29681
29682- if (!p->cru_driver_name)
29683+ if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
29684+ return -EINVAL;
29685+
29686+ if (!p->cru_driver_name[0])
29687 return -EINVAL;
29688
29689 alg = crypto_alg_match(p, 1);
29690@@ -260,6 +265,9 @@ static int crypto_update_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
29691 struct nlattr *priority = attrs[CRYPTOCFGA_PRIORITY_VAL];
29692 LIST_HEAD(list);
29693
29694+ if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
29695+ return -EINVAL;
29696+
29697 if (priority && !strlen(p->cru_driver_name))
29698 return -EINVAL;
29699
29700@@ -287,6 +295,9 @@ static int crypto_del_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
29701 struct crypto_alg *alg;
29702 struct crypto_user_alg *p = nlmsg_data(nlh);
29703
29704+ if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
29705+ return -EINVAL;
29706+
29707 alg = crypto_alg_match(p, 1);
29708 if (!alg)
29709 return -ENOENT;
29710@@ -368,6 +379,9 @@ static int crypto_add_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
29711 struct crypto_user_alg *p = nlmsg_data(nlh);
29712 struct nlattr *priority = attrs[CRYPTOCFGA_PRIORITY_VAL];
29713
29714+ if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
29715+ return -EINVAL;
29716+
29717 if (strlen(p->cru_driver_name))
29718 exact = 1;
29719
29720diff --git a/crypto/pcompress.c b/crypto/pcompress.c
29721index 04e083f..7140fe7 100644
29722--- a/crypto/pcompress.c
29723+++ b/crypto/pcompress.c
29724@@ -53,8 +53,7 @@ static int crypto_pcomp_report(struct sk_buff *skb, struct crypto_alg *alg)
29725 {
29726 struct crypto_report_comp rpcomp;
29727
29728- snprintf(rpcomp.type, CRYPTO_MAX_ALG_NAME, "%s", "pcomp");
29729-
29730+ strncpy(rpcomp.type, "pcomp", sizeof(rpcomp.type));
29731 if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
29732 sizeof(struct crypto_report_comp), &rpcomp))
29733 goto nla_put_failure;
29734diff --git a/crypto/rng.c b/crypto/rng.c
29735index f3b7894..e0a25c2 100644
29736--- a/crypto/rng.c
29737+++ b/crypto/rng.c
29738@@ -65,7 +65,7 @@ static int crypto_rng_report(struct sk_buff *skb, struct crypto_alg *alg)
29739 {
29740 struct crypto_report_rng rrng;
29741
29742- snprintf(rrng.type, CRYPTO_MAX_ALG_NAME, "%s", "rng");
29743+ strncpy(rrng.type, "rng", sizeof(rrng.type));
29744
29745 rrng.seedsize = alg->cra_rng.seedsize;
29746
29747diff --git a/crypto/shash.c b/crypto/shash.c
29748index f426330f..929058a 100644
29749--- a/crypto/shash.c
29750+++ b/crypto/shash.c
29751@@ -530,7 +530,8 @@ static int crypto_shash_report(struct sk_buff *skb, struct crypto_alg *alg)
29752 struct crypto_report_hash rhash;
29753 struct shash_alg *salg = __crypto_shash_alg(alg);
29754
29755- snprintf(rhash.type, CRYPTO_MAX_ALG_NAME, "%s", "shash");
29756+ strncpy(rhash.type, "shash", sizeof(rhash.type));
29757+
29758 rhash.blocksize = alg->cra_blocksize;
29759 rhash.digestsize = salg->digestsize;
29760
29761diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
29762index e6defd8..c26a225 100644
29763--- a/drivers/acpi/apei/cper.c
29764+++ b/drivers/acpi/apei/cper.c
29765@@ -38,12 +38,12 @@
29766 */
29767 u64 cper_next_record_id(void)
29768 {
29769- static atomic64_t seq;
29770+ static atomic64_unchecked_t seq;
29771
29772- if (!atomic64_read(&seq))
29773- atomic64_set(&seq, ((u64)get_seconds()) << 32);
29774+ if (!atomic64_read_unchecked(&seq))
29775+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
29776
29777- return atomic64_inc_return(&seq);
29778+ return atomic64_inc_return_unchecked(&seq);
29779 }
29780 EXPORT_SYMBOL_GPL(cper_next_record_id);
29781
29782diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
29783index 7586544..636a2f0 100644
29784--- a/drivers/acpi/ec_sys.c
29785+++ b/drivers/acpi/ec_sys.c
29786@@ -12,6 +12,7 @@
29787 #include <linux/acpi.h>
29788 #include <linux/debugfs.h>
29789 #include <linux/module.h>
29790+#include <linux/uaccess.h>
29791 #include "internal.h"
29792
29793 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
29794@@ -34,7 +35,7 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
29795 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
29796 */
29797 unsigned int size = EC_SPACE_SIZE;
29798- u8 *data = (u8 *) buf;
29799+ u8 data;
29800 loff_t init_off = *off;
29801 int err = 0;
29802
29803@@ -47,9 +48,11 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
29804 size = count;
29805
29806 while (size) {
29807- err = ec_read(*off, &data[*off - init_off]);
29808+ err = ec_read(*off, &data);
29809 if (err)
29810 return err;
29811+ if (put_user(data, &buf[*off - init_off]))
29812+ return -EFAULT;
29813 *off += 1;
29814 size--;
29815 }
29816@@ -65,7 +68,6 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
29817
29818 unsigned int size = count;
29819 loff_t init_off = *off;
29820- u8 *data = (u8 *) buf;
29821 int err = 0;
29822
29823 if (*off >= EC_SPACE_SIZE)
29824@@ -76,7 +78,9 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
29825 }
29826
29827 while (size) {
29828- u8 byte_write = data[*off - init_off];
29829+ u8 byte_write;
29830+ if (get_user(byte_write, &buf[*off - init_off]))
29831+ return -EFAULT;
29832 err = ec_write(*off, byte_write);
29833 if (err)
29834 return err;
29835diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
29836index 27adb09..ef98796b 100644
29837--- a/drivers/acpi/proc.c
29838+++ b/drivers/acpi/proc.c
29839@@ -362,16 +362,13 @@ acpi_system_write_wakeup_device(struct file *file,
29840 struct list_head *node, *next;
29841 char strbuf[5];
29842 char str[5] = "";
29843- unsigned int len = count;
29844
29845- if (len > 4)
29846- len = 4;
29847- if (len < 0)
29848- return -EFAULT;
29849+ if (count > 4)
29850+ count = 4;
29851
29852- if (copy_from_user(strbuf, buffer, len))
29853+ if (copy_from_user(strbuf, buffer, count))
29854 return -EFAULT;
29855- strbuf[len] = '\0';
29856+ strbuf[count] = '\0';
29857 sscanf(strbuf, "%s", str);
29858
29859 mutex_lock(&acpi_device_lock);
29860diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
29861index bd4e5dc..0497b66 100644
29862--- a/drivers/acpi/processor_driver.c
29863+++ b/drivers/acpi/processor_driver.c
29864@@ -552,7 +552,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
29865 return 0;
29866 #endif
29867
29868- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
29869+ BUG_ON(pr->id >= nr_cpu_ids);
29870
29871 /*
29872 * Buggy BIOS check
29873diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
29874index c8ac4fe..631818e 100644
29875--- a/drivers/ata/libata-core.c
29876+++ b/drivers/ata/libata-core.c
29877@@ -4779,7 +4779,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
29878 struct ata_port *ap;
29879 unsigned int tag;
29880
29881- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
29882+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
29883 ap = qc->ap;
29884
29885 qc->flags = 0;
29886@@ -4795,7 +4795,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
29887 struct ata_port *ap;
29888 struct ata_link *link;
29889
29890- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
29891+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
29892 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
29893 ap = qc->ap;
29894 link = qc->dev->link;
29895@@ -5891,6 +5891,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
29896 return;
29897
29898 spin_lock(&lock);
29899+ pax_open_kernel();
29900
29901 for (cur = ops->inherits; cur; cur = cur->inherits) {
29902 void **inherit = (void **)cur;
29903@@ -5904,8 +5905,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
29904 if (IS_ERR(*pp))
29905 *pp = NULL;
29906
29907- ops->inherits = NULL;
29908+ *(struct ata_port_operations **)&ops->inherits = NULL;
29909
29910+ pax_close_kernel();
29911 spin_unlock(&lock);
29912 }
29913
29914diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
29915index 371fd2c..0836c78 100644
29916--- a/drivers/ata/pata_arasan_cf.c
29917+++ b/drivers/ata/pata_arasan_cf.c
29918@@ -861,7 +861,9 @@ static int __devinit arasan_cf_probe(struct platform_device *pdev)
29919 /* Handle platform specific quirks */
29920 if (pdata->quirk) {
29921 if (pdata->quirk & CF_BROKEN_PIO) {
29922- ap->ops->set_piomode = NULL;
29923+ pax_open_kernel();
29924+ *(void **)&ap->ops->set_piomode = NULL;
29925+ pax_close_kernel();
29926 ap->pio_mask = 0;
29927 }
29928 if (pdata->quirk & CF_BROKEN_MWDMA)
29929diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
29930index f9b983a..887b9d8 100644
29931--- a/drivers/atm/adummy.c
29932+++ b/drivers/atm/adummy.c
29933@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
29934 vcc->pop(vcc, skb);
29935 else
29936 dev_kfree_skb_any(skb);
29937- atomic_inc(&vcc->stats->tx);
29938+ atomic_inc_unchecked(&vcc->stats->tx);
29939
29940 return 0;
29941 }
29942diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
29943index ff7bb8a..568fc0b 100644
29944--- a/drivers/atm/ambassador.c
29945+++ b/drivers/atm/ambassador.c
29946@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
29947 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
29948
29949 // VC layer stats
29950- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
29951+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
29952
29953 // free the descriptor
29954 kfree (tx_descr);
29955@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
29956 dump_skb ("<<<", vc, skb);
29957
29958 // VC layer stats
29959- atomic_inc(&atm_vcc->stats->rx);
29960+ atomic_inc_unchecked(&atm_vcc->stats->rx);
29961 __net_timestamp(skb);
29962 // end of our responsibility
29963 atm_vcc->push (atm_vcc, skb);
29964@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
29965 } else {
29966 PRINTK (KERN_INFO, "dropped over-size frame");
29967 // should we count this?
29968- atomic_inc(&atm_vcc->stats->rx_drop);
29969+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
29970 }
29971
29972 } else {
29973@@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
29974 }
29975
29976 if (check_area (skb->data, skb->len)) {
29977- atomic_inc(&atm_vcc->stats->tx_err);
29978+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
29979 return -ENOMEM; // ?
29980 }
29981
29982diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
29983index b22d71c..d6e1049 100644
29984--- a/drivers/atm/atmtcp.c
29985+++ b/drivers/atm/atmtcp.c
29986@@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
29987 if (vcc->pop) vcc->pop(vcc,skb);
29988 else dev_kfree_skb(skb);
29989 if (dev_data) return 0;
29990- atomic_inc(&vcc->stats->tx_err);
29991+ atomic_inc_unchecked(&vcc->stats->tx_err);
29992 return -ENOLINK;
29993 }
29994 size = skb->len+sizeof(struct atmtcp_hdr);
29995@@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
29996 if (!new_skb) {
29997 if (vcc->pop) vcc->pop(vcc,skb);
29998 else dev_kfree_skb(skb);
29999- atomic_inc(&vcc->stats->tx_err);
30000+ atomic_inc_unchecked(&vcc->stats->tx_err);
30001 return -ENOBUFS;
30002 }
30003 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
30004@@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
30005 if (vcc->pop) vcc->pop(vcc,skb);
30006 else dev_kfree_skb(skb);
30007 out_vcc->push(out_vcc,new_skb);
30008- atomic_inc(&vcc->stats->tx);
30009- atomic_inc(&out_vcc->stats->rx);
30010+ atomic_inc_unchecked(&vcc->stats->tx);
30011+ atomic_inc_unchecked(&out_vcc->stats->rx);
30012 return 0;
30013 }
30014
30015@@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
30016 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
30017 read_unlock(&vcc_sklist_lock);
30018 if (!out_vcc) {
30019- atomic_inc(&vcc->stats->tx_err);
30020+ atomic_inc_unchecked(&vcc->stats->tx_err);
30021 goto done;
30022 }
30023 skb_pull(skb,sizeof(struct atmtcp_hdr));
30024@@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
30025 __net_timestamp(new_skb);
30026 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
30027 out_vcc->push(out_vcc,new_skb);
30028- atomic_inc(&vcc->stats->tx);
30029- atomic_inc(&out_vcc->stats->rx);
30030+ atomic_inc_unchecked(&vcc->stats->tx);
30031+ atomic_inc_unchecked(&out_vcc->stats->rx);
30032 done:
30033 if (vcc->pop) vcc->pop(vcc,skb);
30034 else dev_kfree_skb(skb);
30035diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
30036index 81e44f7..498ea36 100644
30037--- a/drivers/atm/eni.c
30038+++ b/drivers/atm/eni.c
30039@@ -522,7 +522,7 @@ static int rx_aal0(struct atm_vcc *vcc)
30040 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
30041 vcc->dev->number);
30042 length = 0;
30043- atomic_inc(&vcc->stats->rx_err);
30044+ atomic_inc_unchecked(&vcc->stats->rx_err);
30045 }
30046 else {
30047 length = ATM_CELL_SIZE-1; /* no HEC */
30048@@ -577,7 +577,7 @@ static int rx_aal5(struct atm_vcc *vcc)
30049 size);
30050 }
30051 eff = length = 0;
30052- atomic_inc(&vcc->stats->rx_err);
30053+ atomic_inc_unchecked(&vcc->stats->rx_err);
30054 }
30055 else {
30056 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
30057@@ -594,7 +594,7 @@ static int rx_aal5(struct atm_vcc *vcc)
30058 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
30059 vcc->dev->number,vcc->vci,length,size << 2,descr);
30060 length = eff = 0;
30061- atomic_inc(&vcc->stats->rx_err);
30062+ atomic_inc_unchecked(&vcc->stats->rx_err);
30063 }
30064 }
30065 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
30066@@ -767,7 +767,7 @@ rx_dequeued++;
30067 vcc->push(vcc,skb);
30068 pushed++;
30069 }
30070- atomic_inc(&vcc->stats->rx);
30071+ atomic_inc_unchecked(&vcc->stats->rx);
30072 }
30073 wake_up(&eni_dev->rx_wait);
30074 }
30075@@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *dev)
30076 PCI_DMA_TODEVICE);
30077 if (vcc->pop) vcc->pop(vcc,skb);
30078 else dev_kfree_skb_irq(skb);
30079- atomic_inc(&vcc->stats->tx);
30080+ atomic_inc_unchecked(&vcc->stats->tx);
30081 wake_up(&eni_dev->tx_wait);
30082 dma_complete++;
30083 }
30084diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
30085index 86fed1b..6dc4721 100644
30086--- a/drivers/atm/firestream.c
30087+++ b/drivers/atm/firestream.c
30088@@ -749,7 +749,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
30089 }
30090 }
30091
30092- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
30093+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
30094
30095 fs_dprintk (FS_DEBUG_TXMEM, "i");
30096 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
30097@@ -816,7 +816,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
30098 #endif
30099 skb_put (skb, qe->p1 & 0xffff);
30100 ATM_SKB(skb)->vcc = atm_vcc;
30101- atomic_inc(&atm_vcc->stats->rx);
30102+ atomic_inc_unchecked(&atm_vcc->stats->rx);
30103 __net_timestamp(skb);
30104 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
30105 atm_vcc->push (atm_vcc, skb);
30106@@ -837,12 +837,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
30107 kfree (pe);
30108 }
30109 if (atm_vcc)
30110- atomic_inc(&atm_vcc->stats->rx_drop);
30111+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
30112 break;
30113 case 0x1f: /* Reassembly abort: no buffers. */
30114 /* Silently increment error counter. */
30115 if (atm_vcc)
30116- atomic_inc(&atm_vcc->stats->rx_drop);
30117+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
30118 break;
30119 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
30120 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
30121diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
30122index 361f5ae..7fc552d 100644
30123--- a/drivers/atm/fore200e.c
30124+++ b/drivers/atm/fore200e.c
30125@@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
30126 #endif
30127 /* check error condition */
30128 if (*entry->status & STATUS_ERROR)
30129- atomic_inc(&vcc->stats->tx_err);
30130+ atomic_inc_unchecked(&vcc->stats->tx_err);
30131 else
30132- atomic_inc(&vcc->stats->tx);
30133+ atomic_inc_unchecked(&vcc->stats->tx);
30134 }
30135 }
30136
30137@@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
30138 if (skb == NULL) {
30139 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
30140
30141- atomic_inc(&vcc->stats->rx_drop);
30142+ atomic_inc_unchecked(&vcc->stats->rx_drop);
30143 return -ENOMEM;
30144 }
30145
30146@@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
30147
30148 dev_kfree_skb_any(skb);
30149
30150- atomic_inc(&vcc->stats->rx_drop);
30151+ atomic_inc_unchecked(&vcc->stats->rx_drop);
30152 return -ENOMEM;
30153 }
30154
30155 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
30156
30157 vcc->push(vcc, skb);
30158- atomic_inc(&vcc->stats->rx);
30159+ atomic_inc_unchecked(&vcc->stats->rx);
30160
30161 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
30162
30163@@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
30164 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
30165 fore200e->atm_dev->number,
30166 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
30167- atomic_inc(&vcc->stats->rx_err);
30168+ atomic_inc_unchecked(&vcc->stats->rx_err);
30169 }
30170 }
30171
30172@@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
30173 goto retry_here;
30174 }
30175
30176- atomic_inc(&vcc->stats->tx_err);
30177+ atomic_inc_unchecked(&vcc->stats->tx_err);
30178
30179 fore200e->tx_sat++;
30180 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
30181diff --git a/drivers/atm/he.c b/drivers/atm/he.c
30182index b182c2f..1c6fa8a 100644
30183--- a/drivers/atm/he.c
30184+++ b/drivers/atm/he.c
30185@@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
30186
30187 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
30188 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
30189- atomic_inc(&vcc->stats->rx_drop);
30190+ atomic_inc_unchecked(&vcc->stats->rx_drop);
30191 goto return_host_buffers;
30192 }
30193
30194@@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
30195 RBRQ_LEN_ERR(he_dev->rbrq_head)
30196 ? "LEN_ERR" : "",
30197 vcc->vpi, vcc->vci);
30198- atomic_inc(&vcc->stats->rx_err);
30199+ atomic_inc_unchecked(&vcc->stats->rx_err);
30200 goto return_host_buffers;
30201 }
30202
30203@@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
30204 vcc->push(vcc, skb);
30205 spin_lock(&he_dev->global_lock);
30206
30207- atomic_inc(&vcc->stats->rx);
30208+ atomic_inc_unchecked(&vcc->stats->rx);
30209
30210 return_host_buffers:
30211 ++pdus_assembled;
30212@@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
30213 tpd->vcc->pop(tpd->vcc, tpd->skb);
30214 else
30215 dev_kfree_skb_any(tpd->skb);
30216- atomic_inc(&tpd->vcc->stats->tx_err);
30217+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
30218 }
30219 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
30220 return;
30221@@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
30222 vcc->pop(vcc, skb);
30223 else
30224 dev_kfree_skb_any(skb);
30225- atomic_inc(&vcc->stats->tx_err);
30226+ atomic_inc_unchecked(&vcc->stats->tx_err);
30227 return -EINVAL;
30228 }
30229
30230@@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
30231 vcc->pop(vcc, skb);
30232 else
30233 dev_kfree_skb_any(skb);
30234- atomic_inc(&vcc->stats->tx_err);
30235+ atomic_inc_unchecked(&vcc->stats->tx_err);
30236 return -EINVAL;
30237 }
30238 #endif
30239@@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
30240 vcc->pop(vcc, skb);
30241 else
30242 dev_kfree_skb_any(skb);
30243- atomic_inc(&vcc->stats->tx_err);
30244+ atomic_inc_unchecked(&vcc->stats->tx_err);
30245 spin_unlock_irqrestore(&he_dev->global_lock, flags);
30246 return -ENOMEM;
30247 }
30248@@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
30249 vcc->pop(vcc, skb);
30250 else
30251 dev_kfree_skb_any(skb);
30252- atomic_inc(&vcc->stats->tx_err);
30253+ atomic_inc_unchecked(&vcc->stats->tx_err);
30254 spin_unlock_irqrestore(&he_dev->global_lock, flags);
30255 return -ENOMEM;
30256 }
30257@@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
30258 __enqueue_tpd(he_dev, tpd, cid);
30259 spin_unlock_irqrestore(&he_dev->global_lock, flags);
30260
30261- atomic_inc(&vcc->stats->tx);
30262+ atomic_inc_unchecked(&vcc->stats->tx);
30263
30264 return 0;
30265 }
30266diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
30267index 7d01c2a..4e3ac01 100644
30268--- a/drivers/atm/horizon.c
30269+++ b/drivers/atm/horizon.c
30270@@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
30271 {
30272 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
30273 // VC layer stats
30274- atomic_inc(&vcc->stats->rx);
30275+ atomic_inc_unchecked(&vcc->stats->rx);
30276 __net_timestamp(skb);
30277 // end of our responsibility
30278 vcc->push (vcc, skb);
30279@@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
30280 dev->tx_iovec = NULL;
30281
30282 // VC layer stats
30283- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
30284+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
30285
30286 // free the skb
30287 hrz_kfree_skb (skb);
30288diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
30289index 8974bd2..b856f85 100644
30290--- a/drivers/atm/idt77252.c
30291+++ b/drivers/atm/idt77252.c
30292@@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
30293 else
30294 dev_kfree_skb(skb);
30295
30296- atomic_inc(&vcc->stats->tx);
30297+ atomic_inc_unchecked(&vcc->stats->tx);
30298 }
30299
30300 atomic_dec(&scq->used);
30301@@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
30302 if ((sb = dev_alloc_skb(64)) == NULL) {
30303 printk("%s: Can't allocate buffers for aal0.\n",
30304 card->name);
30305- atomic_add(i, &vcc->stats->rx_drop);
30306+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
30307 break;
30308 }
30309 if (!atm_charge(vcc, sb->truesize)) {
30310 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
30311 card->name);
30312- atomic_add(i - 1, &vcc->stats->rx_drop);
30313+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
30314 dev_kfree_skb(sb);
30315 break;
30316 }
30317@@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
30318 ATM_SKB(sb)->vcc = vcc;
30319 __net_timestamp(sb);
30320 vcc->push(vcc, sb);
30321- atomic_inc(&vcc->stats->rx);
30322+ atomic_inc_unchecked(&vcc->stats->rx);
30323
30324 cell += ATM_CELL_PAYLOAD;
30325 }
30326@@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
30327 "(CDC: %08x)\n",
30328 card->name, len, rpp->len, readl(SAR_REG_CDC));
30329 recycle_rx_pool_skb(card, rpp);
30330- atomic_inc(&vcc->stats->rx_err);
30331+ atomic_inc_unchecked(&vcc->stats->rx_err);
30332 return;
30333 }
30334 if (stat & SAR_RSQE_CRC) {
30335 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
30336 recycle_rx_pool_skb(card, rpp);
30337- atomic_inc(&vcc->stats->rx_err);
30338+ atomic_inc_unchecked(&vcc->stats->rx_err);
30339 return;
30340 }
30341 if (skb_queue_len(&rpp->queue) > 1) {
30342@@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
30343 RXPRINTK("%s: Can't alloc RX skb.\n",
30344 card->name);
30345 recycle_rx_pool_skb(card, rpp);
30346- atomic_inc(&vcc->stats->rx_err);
30347+ atomic_inc_unchecked(&vcc->stats->rx_err);
30348 return;
30349 }
30350 if (!atm_charge(vcc, skb->truesize)) {
30351@@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
30352 __net_timestamp(skb);
30353
30354 vcc->push(vcc, skb);
30355- atomic_inc(&vcc->stats->rx);
30356+ atomic_inc_unchecked(&vcc->stats->rx);
30357
30358 return;
30359 }
30360@@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
30361 __net_timestamp(skb);
30362
30363 vcc->push(vcc, skb);
30364- atomic_inc(&vcc->stats->rx);
30365+ atomic_inc_unchecked(&vcc->stats->rx);
30366
30367 if (skb->truesize > SAR_FB_SIZE_3)
30368 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
30369@@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
30370 if (vcc->qos.aal != ATM_AAL0) {
30371 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
30372 card->name, vpi, vci);
30373- atomic_inc(&vcc->stats->rx_drop);
30374+ atomic_inc_unchecked(&vcc->stats->rx_drop);
30375 goto drop;
30376 }
30377
30378 if ((sb = dev_alloc_skb(64)) == NULL) {
30379 printk("%s: Can't allocate buffers for AAL0.\n",
30380 card->name);
30381- atomic_inc(&vcc->stats->rx_err);
30382+ atomic_inc_unchecked(&vcc->stats->rx_err);
30383 goto drop;
30384 }
30385
30386@@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
30387 ATM_SKB(sb)->vcc = vcc;
30388 __net_timestamp(sb);
30389 vcc->push(vcc, sb);
30390- atomic_inc(&vcc->stats->rx);
30391+ atomic_inc_unchecked(&vcc->stats->rx);
30392
30393 drop:
30394 skb_pull(queue, 64);
30395@@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
30396
30397 if (vc == NULL) {
30398 printk("%s: NULL connection in send().\n", card->name);
30399- atomic_inc(&vcc->stats->tx_err);
30400+ atomic_inc_unchecked(&vcc->stats->tx_err);
30401 dev_kfree_skb(skb);
30402 return -EINVAL;
30403 }
30404 if (!test_bit(VCF_TX, &vc->flags)) {
30405 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
30406- atomic_inc(&vcc->stats->tx_err);
30407+ atomic_inc_unchecked(&vcc->stats->tx_err);
30408 dev_kfree_skb(skb);
30409 return -EINVAL;
30410 }
30411@@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
30412 break;
30413 default:
30414 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
30415- atomic_inc(&vcc->stats->tx_err);
30416+ atomic_inc_unchecked(&vcc->stats->tx_err);
30417 dev_kfree_skb(skb);
30418 return -EINVAL;
30419 }
30420
30421 if (skb_shinfo(skb)->nr_frags != 0) {
30422 printk("%s: No scatter-gather yet.\n", card->name);
30423- atomic_inc(&vcc->stats->tx_err);
30424+ atomic_inc_unchecked(&vcc->stats->tx_err);
30425 dev_kfree_skb(skb);
30426 return -EINVAL;
30427 }
30428@@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
30429
30430 err = queue_skb(card, vc, skb, oam);
30431 if (err) {
30432- atomic_inc(&vcc->stats->tx_err);
30433+ atomic_inc_unchecked(&vcc->stats->tx_err);
30434 dev_kfree_skb(skb);
30435 return err;
30436 }
30437@@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
30438 skb = dev_alloc_skb(64);
30439 if (!skb) {
30440 printk("%s: Out of memory in send_oam().\n", card->name);
30441- atomic_inc(&vcc->stats->tx_err);
30442+ atomic_inc_unchecked(&vcc->stats->tx_err);
30443 return -ENOMEM;
30444 }
30445 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
30446diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
30447index 96cce6d..62c3ec5 100644
30448--- a/drivers/atm/iphase.c
30449+++ b/drivers/atm/iphase.c
30450@@ -1145,7 +1145,7 @@ static int rx_pkt(struct atm_dev *dev)
30451 status = (u_short) (buf_desc_ptr->desc_mode);
30452 if (status & (RX_CER | RX_PTE | RX_OFL))
30453 {
30454- atomic_inc(&vcc->stats->rx_err);
30455+ atomic_inc_unchecked(&vcc->stats->rx_err);
30456 IF_ERR(printk("IA: bad packet, dropping it");)
30457 if (status & RX_CER) {
30458 IF_ERR(printk(" cause: packet CRC error\n");)
30459@@ -1168,7 +1168,7 @@ static int rx_pkt(struct atm_dev *dev)
30460 len = dma_addr - buf_addr;
30461 if (len > iadev->rx_buf_sz) {
30462 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
30463- atomic_inc(&vcc->stats->rx_err);
30464+ atomic_inc_unchecked(&vcc->stats->rx_err);
30465 goto out_free_desc;
30466 }
30467
30468@@ -1318,7 +1318,7 @@ static void rx_dle_intr(struct atm_dev *dev)
30469 ia_vcc = INPH_IA_VCC(vcc);
30470 if (ia_vcc == NULL)
30471 {
30472- atomic_inc(&vcc->stats->rx_err);
30473+ atomic_inc_unchecked(&vcc->stats->rx_err);
30474 atm_return(vcc, skb->truesize);
30475 dev_kfree_skb_any(skb);
30476 goto INCR_DLE;
30477@@ -1330,7 +1330,7 @@ static void rx_dle_intr(struct atm_dev *dev)
30478 if ((length > iadev->rx_buf_sz) || (length >
30479 (skb->len - sizeof(struct cpcs_trailer))))
30480 {
30481- atomic_inc(&vcc->stats->rx_err);
30482+ atomic_inc_unchecked(&vcc->stats->rx_err);
30483 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
30484 length, skb->len);)
30485 atm_return(vcc, skb->truesize);
30486@@ -1346,7 +1346,7 @@ static void rx_dle_intr(struct atm_dev *dev)
30487
30488 IF_RX(printk("rx_dle_intr: skb push");)
30489 vcc->push(vcc,skb);
30490- atomic_inc(&vcc->stats->rx);
30491+ atomic_inc_unchecked(&vcc->stats->rx);
30492 iadev->rx_pkt_cnt++;
30493 }
30494 INCR_DLE:
30495@@ -2826,15 +2826,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
30496 {
30497 struct k_sonet_stats *stats;
30498 stats = &PRIV(_ia_dev[board])->sonet_stats;
30499- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
30500- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
30501- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
30502- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
30503- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
30504- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
30505- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
30506- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
30507- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
30508+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
30509+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
30510+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
30511+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
30512+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
30513+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
30514+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
30515+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
30516+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
30517 }
30518 ia_cmds.status = 0;
30519 break;
30520@@ -2939,7 +2939,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
30521 if ((desc == 0) || (desc > iadev->num_tx_desc))
30522 {
30523 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
30524- atomic_inc(&vcc->stats->tx);
30525+ atomic_inc_unchecked(&vcc->stats->tx);
30526 if (vcc->pop)
30527 vcc->pop(vcc, skb);
30528 else
30529@@ -3044,14 +3044,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
30530 ATM_DESC(skb) = vcc->vci;
30531 skb_queue_tail(&iadev->tx_dma_q, skb);
30532
30533- atomic_inc(&vcc->stats->tx);
30534+ atomic_inc_unchecked(&vcc->stats->tx);
30535 iadev->tx_pkt_cnt++;
30536 /* Increment transaction counter */
30537 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
30538
30539 #if 0
30540 /* add flow control logic */
30541- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
30542+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
30543 if (iavcc->vc_desc_cnt > 10) {
30544 vcc->tx_quota = vcc->tx_quota * 3 / 4;
30545 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
30546diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
30547index 68c7588..7036683 100644
30548--- a/drivers/atm/lanai.c
30549+++ b/drivers/atm/lanai.c
30550@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
30551 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
30552 lanai_endtx(lanai, lvcc);
30553 lanai_free_skb(lvcc->tx.atmvcc, skb);
30554- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
30555+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
30556 }
30557
30558 /* Try to fill the buffer - don't call unless there is backlog */
30559@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
30560 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
30561 __net_timestamp(skb);
30562 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
30563- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
30564+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
30565 out:
30566 lvcc->rx.buf.ptr = end;
30567 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
30568@@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
30569 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
30570 "vcc %d\n", lanai->number, (unsigned int) s, vci);
30571 lanai->stats.service_rxnotaal5++;
30572- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
30573+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
30574 return 0;
30575 }
30576 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
30577@@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
30578 int bytes;
30579 read_unlock(&vcc_sklist_lock);
30580 DPRINTK("got trashed rx pdu on vci %d\n", vci);
30581- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
30582+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
30583 lvcc->stats.x.aal5.service_trash++;
30584 bytes = (SERVICE_GET_END(s) * 16) -
30585 (((unsigned long) lvcc->rx.buf.ptr) -
30586@@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
30587 }
30588 if (s & SERVICE_STREAM) {
30589 read_unlock(&vcc_sklist_lock);
30590- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
30591+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
30592 lvcc->stats.x.aal5.service_stream++;
30593 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
30594 "PDU on VCI %d!\n", lanai->number, vci);
30595@@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
30596 return 0;
30597 }
30598 DPRINTK("got rx crc error on vci %d\n", vci);
30599- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
30600+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
30601 lvcc->stats.x.aal5.service_rxcrc++;
30602 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
30603 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
30604diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
30605index 1c70c45..300718d 100644
30606--- a/drivers/atm/nicstar.c
30607+++ b/drivers/atm/nicstar.c
30608@@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
30609 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
30610 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
30611 card->index);
30612- atomic_inc(&vcc->stats->tx_err);
30613+ atomic_inc_unchecked(&vcc->stats->tx_err);
30614 dev_kfree_skb_any(skb);
30615 return -EINVAL;
30616 }
30617@@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
30618 if (!vc->tx) {
30619 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
30620 card->index);
30621- atomic_inc(&vcc->stats->tx_err);
30622+ atomic_inc_unchecked(&vcc->stats->tx_err);
30623 dev_kfree_skb_any(skb);
30624 return -EINVAL;
30625 }
30626@@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
30627 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
30628 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
30629 card->index);
30630- atomic_inc(&vcc->stats->tx_err);
30631+ atomic_inc_unchecked(&vcc->stats->tx_err);
30632 dev_kfree_skb_any(skb);
30633 return -EINVAL;
30634 }
30635
30636 if (skb_shinfo(skb)->nr_frags != 0) {
30637 printk("nicstar%d: No scatter-gather yet.\n", card->index);
30638- atomic_inc(&vcc->stats->tx_err);
30639+ atomic_inc_unchecked(&vcc->stats->tx_err);
30640 dev_kfree_skb_any(skb);
30641 return -EINVAL;
30642 }
30643@@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
30644 }
30645
30646 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
30647- atomic_inc(&vcc->stats->tx_err);
30648+ atomic_inc_unchecked(&vcc->stats->tx_err);
30649 dev_kfree_skb_any(skb);
30650 return -EIO;
30651 }
30652- atomic_inc(&vcc->stats->tx);
30653+ atomic_inc_unchecked(&vcc->stats->tx);
30654
30655 return 0;
30656 }
30657@@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
30658 printk
30659 ("nicstar%d: Can't allocate buffers for aal0.\n",
30660 card->index);
30661- atomic_add(i, &vcc->stats->rx_drop);
30662+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
30663 break;
30664 }
30665 if (!atm_charge(vcc, sb->truesize)) {
30666 RXPRINTK
30667 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
30668 card->index);
30669- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
30670+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
30671 dev_kfree_skb_any(sb);
30672 break;
30673 }
30674@@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
30675 ATM_SKB(sb)->vcc = vcc;
30676 __net_timestamp(sb);
30677 vcc->push(vcc, sb);
30678- atomic_inc(&vcc->stats->rx);
30679+ atomic_inc_unchecked(&vcc->stats->rx);
30680 cell += ATM_CELL_PAYLOAD;
30681 }
30682
30683@@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
30684 if (iovb == NULL) {
30685 printk("nicstar%d: Out of iovec buffers.\n",
30686 card->index);
30687- atomic_inc(&vcc->stats->rx_drop);
30688+ atomic_inc_unchecked(&vcc->stats->rx_drop);
30689 recycle_rx_buf(card, skb);
30690 return;
30691 }
30692@@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
30693 small or large buffer itself. */
30694 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
30695 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
30696- atomic_inc(&vcc->stats->rx_err);
30697+ atomic_inc_unchecked(&vcc->stats->rx_err);
30698 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
30699 NS_MAX_IOVECS);
30700 NS_PRV_IOVCNT(iovb) = 0;
30701@@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
30702 ("nicstar%d: Expected a small buffer, and this is not one.\n",
30703 card->index);
30704 which_list(card, skb);
30705- atomic_inc(&vcc->stats->rx_err);
30706+ atomic_inc_unchecked(&vcc->stats->rx_err);
30707 recycle_rx_buf(card, skb);
30708 vc->rx_iov = NULL;
30709 recycle_iov_buf(card, iovb);
30710@@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
30711 ("nicstar%d: Expected a large buffer, and this is not one.\n",
30712 card->index);
30713 which_list(card, skb);
30714- atomic_inc(&vcc->stats->rx_err);
30715+ atomic_inc_unchecked(&vcc->stats->rx_err);
30716 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
30717 NS_PRV_IOVCNT(iovb));
30718 vc->rx_iov = NULL;
30719@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
30720 printk(" - PDU size mismatch.\n");
30721 else
30722 printk(".\n");
30723- atomic_inc(&vcc->stats->rx_err);
30724+ atomic_inc_unchecked(&vcc->stats->rx_err);
30725 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
30726 NS_PRV_IOVCNT(iovb));
30727 vc->rx_iov = NULL;
30728@@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
30729 /* skb points to a small buffer */
30730 if (!atm_charge(vcc, skb->truesize)) {
30731 push_rxbufs(card, skb);
30732- atomic_inc(&vcc->stats->rx_drop);
30733+ atomic_inc_unchecked(&vcc->stats->rx_drop);
30734 } else {
30735 skb_put(skb, len);
30736 dequeue_sm_buf(card, skb);
30737@@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
30738 ATM_SKB(skb)->vcc = vcc;
30739 __net_timestamp(skb);
30740 vcc->push(vcc, skb);
30741- atomic_inc(&vcc->stats->rx);
30742+ atomic_inc_unchecked(&vcc->stats->rx);
30743 }
30744 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
30745 struct sk_buff *sb;
30746@@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
30747 if (len <= NS_SMBUFSIZE) {
30748 if (!atm_charge(vcc, sb->truesize)) {
30749 push_rxbufs(card, sb);
30750- atomic_inc(&vcc->stats->rx_drop);
30751+ atomic_inc_unchecked(&vcc->stats->rx_drop);
30752 } else {
30753 skb_put(sb, len);
30754 dequeue_sm_buf(card, sb);
30755@@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
30756 ATM_SKB(sb)->vcc = vcc;
30757 __net_timestamp(sb);
30758 vcc->push(vcc, sb);
30759- atomic_inc(&vcc->stats->rx);
30760+ atomic_inc_unchecked(&vcc->stats->rx);
30761 }
30762
30763 push_rxbufs(card, skb);
30764@@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
30765
30766 if (!atm_charge(vcc, skb->truesize)) {
30767 push_rxbufs(card, skb);
30768- atomic_inc(&vcc->stats->rx_drop);
30769+ atomic_inc_unchecked(&vcc->stats->rx_drop);
30770 } else {
30771 dequeue_lg_buf(card, skb);
30772 #ifdef NS_USE_DESTRUCTORS
30773@@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
30774 ATM_SKB(skb)->vcc = vcc;
30775 __net_timestamp(skb);
30776 vcc->push(vcc, skb);
30777- atomic_inc(&vcc->stats->rx);
30778+ atomic_inc_unchecked(&vcc->stats->rx);
30779 }
30780
30781 push_rxbufs(card, sb);
30782@@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
30783 printk
30784 ("nicstar%d: Out of huge buffers.\n",
30785 card->index);
30786- atomic_inc(&vcc->stats->rx_drop);
30787+ atomic_inc_unchecked(&vcc->stats->rx_drop);
30788 recycle_iovec_rx_bufs(card,
30789 (struct iovec *)
30790 iovb->data,
30791@@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
30792 card->hbpool.count++;
30793 } else
30794 dev_kfree_skb_any(hb);
30795- atomic_inc(&vcc->stats->rx_drop);
30796+ atomic_inc_unchecked(&vcc->stats->rx_drop);
30797 } else {
30798 /* Copy the small buffer to the huge buffer */
30799 sb = (struct sk_buff *)iov->iov_base;
30800@@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
30801 #endif /* NS_USE_DESTRUCTORS */
30802 __net_timestamp(hb);
30803 vcc->push(vcc, hb);
30804- atomic_inc(&vcc->stats->rx);
30805+ atomic_inc_unchecked(&vcc->stats->rx);
30806 }
30807 }
30808
30809diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
30810index 1853a45..cf2426d 100644
30811--- a/drivers/atm/solos-pci.c
30812+++ b/drivers/atm/solos-pci.c
30813@@ -714,7 +714,7 @@ void solos_bh(unsigned long card_arg)
30814 }
30815 atm_charge(vcc, skb->truesize);
30816 vcc->push(vcc, skb);
30817- atomic_inc(&vcc->stats->rx);
30818+ atomic_inc_unchecked(&vcc->stats->rx);
30819 break;
30820
30821 case PKT_STATUS:
30822@@ -1010,7 +1010,7 @@ static uint32_t fpga_tx(struct solos_card *card)
30823 vcc = SKB_CB(oldskb)->vcc;
30824
30825 if (vcc) {
30826- atomic_inc(&vcc->stats->tx);
30827+ atomic_inc_unchecked(&vcc->stats->tx);
30828 solos_pop(vcc, oldskb);
30829 } else
30830 dev_kfree_skb_irq(oldskb);
30831diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
30832index 0215934..ce9f5b1 100644
30833--- a/drivers/atm/suni.c
30834+++ b/drivers/atm/suni.c
30835@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
30836
30837
30838 #define ADD_LIMITED(s,v) \
30839- atomic_add((v),&stats->s); \
30840- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
30841+ atomic_add_unchecked((v),&stats->s); \
30842+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
30843
30844
30845 static void suni_hz(unsigned long from_timer)
30846diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
30847index 5120a96..e2572bd 100644
30848--- a/drivers/atm/uPD98402.c
30849+++ b/drivers/atm/uPD98402.c
30850@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
30851 struct sonet_stats tmp;
30852 int error = 0;
30853
30854- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
30855+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
30856 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
30857 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
30858 if (zero && !error) {
30859@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
30860
30861
30862 #define ADD_LIMITED(s,v) \
30863- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
30864- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
30865- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
30866+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
30867+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
30868+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
30869
30870
30871 static void stat_event(struct atm_dev *dev)
30872@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
30873 if (reason & uPD98402_INT_PFM) stat_event(dev);
30874 if (reason & uPD98402_INT_PCO) {
30875 (void) GET(PCOCR); /* clear interrupt cause */
30876- atomic_add(GET(HECCT),
30877+ atomic_add_unchecked(GET(HECCT),
30878 &PRIV(dev)->sonet_stats.uncorr_hcs);
30879 }
30880 if ((reason & uPD98402_INT_RFO) &&
30881@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
30882 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
30883 uPD98402_INT_LOS),PIMR); /* enable them */
30884 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
30885- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
30886- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
30887- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
30888+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
30889+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
30890+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
30891 return 0;
30892 }
30893
30894diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
30895index abe4e20..83c4727 100644
30896--- a/drivers/atm/zatm.c
30897+++ b/drivers/atm/zatm.c
30898@@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
30899 }
30900 if (!size) {
30901 dev_kfree_skb_irq(skb);
30902- if (vcc) atomic_inc(&vcc->stats->rx_err);
30903+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
30904 continue;
30905 }
30906 if (!atm_charge(vcc,skb->truesize)) {
30907@@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
30908 skb->len = size;
30909 ATM_SKB(skb)->vcc = vcc;
30910 vcc->push(vcc,skb);
30911- atomic_inc(&vcc->stats->rx);
30912+ atomic_inc_unchecked(&vcc->stats->rx);
30913 }
30914 zout(pos & 0xffff,MTA(mbx));
30915 #if 0 /* probably a stupid idea */
30916@@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
30917 skb_queue_head(&zatm_vcc->backlog,skb);
30918 break;
30919 }
30920- atomic_inc(&vcc->stats->tx);
30921+ atomic_inc_unchecked(&vcc->stats->tx);
30922 wake_up(&zatm_vcc->tx_wait);
30923 }
30924
30925diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
30926index 147d1a4..d0fd4b0 100644
30927--- a/drivers/base/devtmpfs.c
30928+++ b/drivers/base/devtmpfs.c
30929@@ -347,7 +347,7 @@ int devtmpfs_mount(const char *mntdir)
30930 if (!thread)
30931 return 0;
30932
30933- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
30934+ err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
30935 if (err)
30936 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
30937 else
30938diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
30939index e6ee5e8..98ad7fc 100644
30940--- a/drivers/base/power/wakeup.c
30941+++ b/drivers/base/power/wakeup.c
30942@@ -29,14 +29,14 @@ bool events_check_enabled __read_mostly;
30943 * They need to be modified together atomically, so it's better to use one
30944 * atomic variable to hold them both.
30945 */
30946-static atomic_t combined_event_count = ATOMIC_INIT(0);
30947+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
30948
30949 #define IN_PROGRESS_BITS (sizeof(int) * 4)
30950 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
30951
30952 static void split_counters(unsigned int *cnt, unsigned int *inpr)
30953 {
30954- unsigned int comb = atomic_read(&combined_event_count);
30955+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
30956
30957 *cnt = (comb >> IN_PROGRESS_BITS);
30958 *inpr = comb & MAX_IN_PROGRESS;
30959@@ -389,7 +389,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
30960 ws->start_prevent_time = ws->last_time;
30961
30962 /* Increment the counter of events in progress. */
30963- cec = atomic_inc_return(&combined_event_count);
30964+ cec = atomic_inc_return_unchecked(&combined_event_count);
30965
30966 trace_wakeup_source_activate(ws->name, cec);
30967 }
30968@@ -515,7 +515,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
30969 * Increment the counter of registered wakeup events and decrement the
30970 * couter of wakeup events in progress simultaneously.
30971 */
30972- cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
30973+ cec = atomic_add_return_unchecked(MAX_IN_PROGRESS, &combined_event_count);
30974 trace_wakeup_source_deactivate(ws->name, cec);
30975
30976 split_counters(&cnt, &inpr);
30977diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
30978index ca83f96..69d4ea9 100644
30979--- a/drivers/block/cciss.c
30980+++ b/drivers/block/cciss.c
30981@@ -1198,6 +1198,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
30982 int err;
30983 u32 cp;
30984
30985+ memset(&arg64, 0, sizeof(arg64));
30986+
30987 err = 0;
30988 err |=
30989 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
30990@@ -3007,7 +3009,7 @@ static void start_io(ctlr_info_t *h)
30991 while (!list_empty(&h->reqQ)) {
30992 c = list_entry(h->reqQ.next, CommandList_struct, list);
30993 /* can't do anything if fifo is full */
30994- if ((h->access.fifo_full(h))) {
30995+ if ((h->access->fifo_full(h))) {
30996 dev_warn(&h->pdev->dev, "fifo full\n");
30997 break;
30998 }
30999@@ -3017,7 +3019,7 @@ static void start_io(ctlr_info_t *h)
31000 h->Qdepth--;
31001
31002 /* Tell the controller execute command */
31003- h->access.submit_command(h, c);
31004+ h->access->submit_command(h, c);
31005
31006 /* Put job onto the completed Q */
31007 addQ(&h->cmpQ, c);
31008@@ -3443,17 +3445,17 @@ startio:
31009
31010 static inline unsigned long get_next_completion(ctlr_info_t *h)
31011 {
31012- return h->access.command_completed(h);
31013+ return h->access->command_completed(h);
31014 }
31015
31016 static inline int interrupt_pending(ctlr_info_t *h)
31017 {
31018- return h->access.intr_pending(h);
31019+ return h->access->intr_pending(h);
31020 }
31021
31022 static inline long interrupt_not_for_us(ctlr_info_t *h)
31023 {
31024- return ((h->access.intr_pending(h) == 0) ||
31025+ return ((h->access->intr_pending(h) == 0) ||
31026 (h->interrupts_enabled == 0));
31027 }
31028
31029@@ -3486,7 +3488,7 @@ static inline u32 next_command(ctlr_info_t *h)
31030 u32 a;
31031
31032 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
31033- return h->access.command_completed(h);
31034+ return h->access->command_completed(h);
31035
31036 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
31037 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
31038@@ -4044,7 +4046,7 @@ static void __devinit cciss_put_controller_into_performant_mode(ctlr_info_t *h)
31039 trans_support & CFGTBL_Trans_use_short_tags);
31040
31041 /* Change the access methods to the performant access methods */
31042- h->access = SA5_performant_access;
31043+ h->access = &SA5_performant_access;
31044 h->transMethod = CFGTBL_Trans_Performant;
31045
31046 return;
31047@@ -4316,7 +4318,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *h)
31048 if (prod_index < 0)
31049 return -ENODEV;
31050 h->product_name = products[prod_index].product_name;
31051- h->access = *(products[prod_index].access);
31052+ h->access = products[prod_index].access;
31053
31054 if (cciss_board_disabled(h)) {
31055 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
31056@@ -5041,7 +5043,7 @@ reinit_after_soft_reset:
31057 }
31058
31059 /* make sure the board interrupts are off */
31060- h->access.set_intr_mask(h, CCISS_INTR_OFF);
31061+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
31062 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
31063 if (rc)
31064 goto clean2;
31065@@ -5093,7 +5095,7 @@ reinit_after_soft_reset:
31066 * fake ones to scoop up any residual completions.
31067 */
31068 spin_lock_irqsave(&h->lock, flags);
31069- h->access.set_intr_mask(h, CCISS_INTR_OFF);
31070+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
31071 spin_unlock_irqrestore(&h->lock, flags);
31072 free_irq(h->intr[h->intr_mode], h);
31073 rc = cciss_request_irq(h, cciss_msix_discard_completions,
31074@@ -5113,9 +5115,9 @@ reinit_after_soft_reset:
31075 dev_info(&h->pdev->dev, "Board READY.\n");
31076 dev_info(&h->pdev->dev,
31077 "Waiting for stale completions to drain.\n");
31078- h->access.set_intr_mask(h, CCISS_INTR_ON);
31079+ h->access->set_intr_mask(h, CCISS_INTR_ON);
31080 msleep(10000);
31081- h->access.set_intr_mask(h, CCISS_INTR_OFF);
31082+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
31083
31084 rc = controller_reset_failed(h->cfgtable);
31085 if (rc)
31086@@ -5138,7 +5140,7 @@ reinit_after_soft_reset:
31087 cciss_scsi_setup(h);
31088
31089 /* Turn the interrupts on so we can service requests */
31090- h->access.set_intr_mask(h, CCISS_INTR_ON);
31091+ h->access->set_intr_mask(h, CCISS_INTR_ON);
31092
31093 /* Get the firmware version */
31094 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
31095@@ -5210,7 +5212,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
31096 kfree(flush_buf);
31097 if (return_code != IO_OK)
31098 dev_warn(&h->pdev->dev, "Error flushing cache\n");
31099- h->access.set_intr_mask(h, CCISS_INTR_OFF);
31100+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
31101 free_irq(h->intr[h->intr_mode], h);
31102 }
31103
31104diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
31105index 7fda30e..eb5dfe0 100644
31106--- a/drivers/block/cciss.h
31107+++ b/drivers/block/cciss.h
31108@@ -101,7 +101,7 @@ struct ctlr_info
31109 /* information about each logical volume */
31110 drive_info_struct *drv[CISS_MAX_LUN];
31111
31112- struct access_method access;
31113+ struct access_method *access;
31114
31115 /* queue and queue Info */
31116 struct list_head reqQ;
31117diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
31118index 9125bbe..eede5c8 100644
31119--- a/drivers/block/cpqarray.c
31120+++ b/drivers/block/cpqarray.c
31121@@ -404,7 +404,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
31122 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
31123 goto Enomem4;
31124 }
31125- hba[i]->access.set_intr_mask(hba[i], 0);
31126+ hba[i]->access->set_intr_mask(hba[i], 0);
31127 if (request_irq(hba[i]->intr, do_ida_intr,
31128 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
31129 {
31130@@ -459,7 +459,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
31131 add_timer(&hba[i]->timer);
31132
31133 /* Enable IRQ now that spinlock and rate limit timer are set up */
31134- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
31135+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
31136
31137 for(j=0; j<NWD; j++) {
31138 struct gendisk *disk = ida_gendisk[i][j];
31139@@ -694,7 +694,7 @@ DBGINFO(
31140 for(i=0; i<NR_PRODUCTS; i++) {
31141 if (board_id == products[i].board_id) {
31142 c->product_name = products[i].product_name;
31143- c->access = *(products[i].access);
31144+ c->access = products[i].access;
31145 break;
31146 }
31147 }
31148@@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detect(void)
31149 hba[ctlr]->intr = intr;
31150 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
31151 hba[ctlr]->product_name = products[j].product_name;
31152- hba[ctlr]->access = *(products[j].access);
31153+ hba[ctlr]->access = products[j].access;
31154 hba[ctlr]->ctlr = ctlr;
31155 hba[ctlr]->board_id = board_id;
31156 hba[ctlr]->pci_dev = NULL; /* not PCI */
31157@@ -980,7 +980,7 @@ static void start_io(ctlr_info_t *h)
31158
31159 while((c = h->reqQ) != NULL) {
31160 /* Can't do anything if we're busy */
31161- if (h->access.fifo_full(h) == 0)
31162+ if (h->access->fifo_full(h) == 0)
31163 return;
31164
31165 /* Get the first entry from the request Q */
31166@@ -988,7 +988,7 @@ static void start_io(ctlr_info_t *h)
31167 h->Qdepth--;
31168
31169 /* Tell the controller to do our bidding */
31170- h->access.submit_command(h, c);
31171+ h->access->submit_command(h, c);
31172
31173 /* Get onto the completion Q */
31174 addQ(&h->cmpQ, c);
31175@@ -1050,7 +1050,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
31176 unsigned long flags;
31177 __u32 a,a1;
31178
31179- istat = h->access.intr_pending(h);
31180+ istat = h->access->intr_pending(h);
31181 /* Is this interrupt for us? */
31182 if (istat == 0)
31183 return IRQ_NONE;
31184@@ -1061,7 +1061,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
31185 */
31186 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
31187 if (istat & FIFO_NOT_EMPTY) {
31188- while((a = h->access.command_completed(h))) {
31189+ while((a = h->access->command_completed(h))) {
31190 a1 = a; a &= ~3;
31191 if ((c = h->cmpQ) == NULL)
31192 {
31193@@ -1449,11 +1449,11 @@ static int sendcmd(
31194 /*
31195 * Disable interrupt
31196 */
31197- info_p->access.set_intr_mask(info_p, 0);
31198+ info_p->access->set_intr_mask(info_p, 0);
31199 /* Make sure there is room in the command FIFO */
31200 /* Actually it should be completely empty at this time. */
31201 for (i = 200000; i > 0; i--) {
31202- temp = info_p->access.fifo_full(info_p);
31203+ temp = info_p->access->fifo_full(info_p);
31204 if (temp != 0) {
31205 break;
31206 }
31207@@ -1466,7 +1466,7 @@ DBG(
31208 /*
31209 * Send the cmd
31210 */
31211- info_p->access.submit_command(info_p, c);
31212+ info_p->access->submit_command(info_p, c);
31213 complete = pollcomplete(ctlr);
31214
31215 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
31216@@ -1549,9 +1549,9 @@ static int revalidate_allvol(ctlr_info_t *host)
31217 * we check the new geometry. Then turn interrupts back on when
31218 * we're done.
31219 */
31220- host->access.set_intr_mask(host, 0);
31221+ host->access->set_intr_mask(host, 0);
31222 getgeometry(ctlr);
31223- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
31224+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
31225
31226 for(i=0; i<NWD; i++) {
31227 struct gendisk *disk = ida_gendisk[ctlr][i];
31228@@ -1591,7 +1591,7 @@ static int pollcomplete(int ctlr)
31229 /* Wait (up to 2 seconds) for a command to complete */
31230
31231 for (i = 200000; i > 0; i--) {
31232- done = hba[ctlr]->access.command_completed(hba[ctlr]);
31233+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
31234 if (done == 0) {
31235 udelay(10); /* a short fixed delay */
31236 } else
31237diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
31238index be73e9d..7fbf140 100644
31239--- a/drivers/block/cpqarray.h
31240+++ b/drivers/block/cpqarray.h
31241@@ -99,7 +99,7 @@ struct ctlr_info {
31242 drv_info_t drv[NWD];
31243 struct proc_dir_entry *proc;
31244
31245- struct access_method access;
31246+ struct access_method *access;
31247
31248 cmdlist_t *reqQ;
31249 cmdlist_t *cmpQ;
31250diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
31251index b953cc7..e3dc580 100644
31252--- a/drivers/block/drbd/drbd_int.h
31253+++ b/drivers/block/drbd/drbd_int.h
31254@@ -735,7 +735,7 @@ struct drbd_request;
31255 struct drbd_epoch {
31256 struct list_head list;
31257 unsigned int barrier_nr;
31258- atomic_t epoch_size; /* increased on every request added. */
31259+ atomic_unchecked_t epoch_size; /* increased on every request added. */
31260 atomic_t active; /* increased on every req. added, and dec on every finished. */
31261 unsigned long flags;
31262 };
31263@@ -1116,7 +1116,7 @@ struct drbd_conf {
31264 void *int_dig_in;
31265 void *int_dig_vv;
31266 wait_queue_head_t seq_wait;
31267- atomic_t packet_seq;
31268+ atomic_unchecked_t packet_seq;
31269 unsigned int peer_seq;
31270 spinlock_t peer_seq_lock;
31271 unsigned int minor;
31272@@ -1658,30 +1658,30 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
31273
31274 static inline void drbd_tcp_cork(struct socket *sock)
31275 {
31276- int __user val = 1;
31277+ int val = 1;
31278 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
31279- (char __user *)&val, sizeof(val));
31280+ (char __force_user *)&val, sizeof(val));
31281 }
31282
31283 static inline void drbd_tcp_uncork(struct socket *sock)
31284 {
31285- int __user val = 0;
31286+ int val = 0;
31287 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
31288- (char __user *)&val, sizeof(val));
31289+ (char __force_user *)&val, sizeof(val));
31290 }
31291
31292 static inline void drbd_tcp_nodelay(struct socket *sock)
31293 {
31294- int __user val = 1;
31295+ int val = 1;
31296 (void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY,
31297- (char __user *)&val, sizeof(val));
31298+ (char __force_user *)&val, sizeof(val));
31299 }
31300
31301 static inline void drbd_tcp_quickack(struct socket *sock)
31302 {
31303- int __user val = 2;
31304+ int val = 2;
31305 (void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
31306- (char __user *)&val, sizeof(val));
31307+ (char __force_user *)&val, sizeof(val));
31308 }
31309
31310 void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo);
31311diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
31312index f55683a..2101b96 100644
31313--- a/drivers/block/drbd/drbd_main.c
31314+++ b/drivers/block/drbd/drbd_main.c
31315@@ -2556,7 +2556,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
31316 p.sector = sector;
31317 p.block_id = block_id;
31318 p.blksize = blksize;
31319- p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
31320+ p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
31321
31322 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
31323 return false;
31324@@ -2854,7 +2854,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
31325
31326 p.sector = cpu_to_be64(req->sector);
31327 p.block_id = (unsigned long)req;
31328- p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
31329+ p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
31330
31331 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
31332
31333@@ -3139,7 +3139,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
31334 atomic_set(&mdev->unacked_cnt, 0);
31335 atomic_set(&mdev->local_cnt, 0);
31336 atomic_set(&mdev->net_cnt, 0);
31337- atomic_set(&mdev->packet_seq, 0);
31338+ atomic_set_unchecked(&mdev->packet_seq, 0);
31339 atomic_set(&mdev->pp_in_use, 0);
31340 atomic_set(&mdev->pp_in_use_by_net, 0);
31341 atomic_set(&mdev->rs_sect_in, 0);
31342@@ -3221,8 +3221,8 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
31343 mdev->receiver.t_state);
31344
31345 /* no need to lock it, I'm the only thread alive */
31346- if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
31347- dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
31348+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
31349+ dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
31350 mdev->al_writ_cnt =
31351 mdev->bm_writ_cnt =
31352 mdev->read_cnt =
31353diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
31354index edb490a..ecd69da 100644
31355--- a/drivers/block/drbd/drbd_nl.c
31356+++ b/drivers/block/drbd/drbd_nl.c
31357@@ -2407,7 +2407,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms
31358 module_put(THIS_MODULE);
31359 }
31360
31361-static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
31362+static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
31363
31364 static unsigned short *
31365 __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
31366@@ -2478,7 +2478,7 @@ void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state state)
31367 cn_reply->id.idx = CN_IDX_DRBD;
31368 cn_reply->id.val = CN_VAL_DRBD;
31369
31370- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
31371+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
31372 cn_reply->ack = 0; /* not used here. */
31373 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
31374 (int)((char *)tl - (char *)reply->tag_list);
31375@@ -2510,7 +2510,7 @@ void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name)
31376 cn_reply->id.idx = CN_IDX_DRBD;
31377 cn_reply->id.val = CN_VAL_DRBD;
31378
31379- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
31380+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
31381 cn_reply->ack = 0; /* not used here. */
31382 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
31383 (int)((char *)tl - (char *)reply->tag_list);
31384@@ -2588,7 +2588,7 @@ void drbd_bcast_ee(struct drbd_conf *mdev,
31385 cn_reply->id.idx = CN_IDX_DRBD;
31386 cn_reply->id.val = CN_VAL_DRBD;
31387
31388- cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
31389+ cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
31390 cn_reply->ack = 0; // not used here.
31391 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
31392 (int)((char*)tl - (char*)reply->tag_list);
31393@@ -2627,7 +2627,7 @@ void drbd_bcast_sync_progress(struct drbd_conf *mdev)
31394 cn_reply->id.idx = CN_IDX_DRBD;
31395 cn_reply->id.val = CN_VAL_DRBD;
31396
31397- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
31398+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
31399 cn_reply->ack = 0; /* not used here. */
31400 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
31401 (int)((char *)tl - (char *)reply->tag_list);
31402diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
31403index c74ca2d..860c819 100644
31404--- a/drivers/block/drbd/drbd_receiver.c
31405+++ b/drivers/block/drbd/drbd_receiver.c
31406@@ -898,7 +898,7 @@ retry:
31407 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
31408 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
31409
31410- atomic_set(&mdev->packet_seq, 0);
31411+ atomic_set_unchecked(&mdev->packet_seq, 0);
31412 mdev->peer_seq = 0;
31413
31414 if (drbd_send_protocol(mdev) == -1)
31415@@ -999,7 +999,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
31416 do {
31417 next_epoch = NULL;
31418
31419- epoch_size = atomic_read(&epoch->epoch_size);
31420+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
31421
31422 switch (ev & ~EV_CLEANUP) {
31423 case EV_PUT:
31424@@ -1035,7 +1035,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
31425 rv = FE_DESTROYED;
31426 } else {
31427 epoch->flags = 0;
31428- atomic_set(&epoch->epoch_size, 0);
31429+ atomic_set_unchecked(&epoch->epoch_size, 0);
31430 /* atomic_set(&epoch->active, 0); is already zero */
31431 if (rv == FE_STILL_LIVE)
31432 rv = FE_RECYCLED;
31433@@ -1210,14 +1210,14 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
31434 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
31435 drbd_flush(mdev);
31436
31437- if (atomic_read(&mdev->current_epoch->epoch_size)) {
31438+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
31439 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
31440 if (epoch)
31441 break;
31442 }
31443
31444 epoch = mdev->current_epoch;
31445- wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
31446+ wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
31447
31448 D_ASSERT(atomic_read(&epoch->active) == 0);
31449 D_ASSERT(epoch->flags == 0);
31450@@ -1229,11 +1229,11 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
31451 }
31452
31453 epoch->flags = 0;
31454- atomic_set(&epoch->epoch_size, 0);
31455+ atomic_set_unchecked(&epoch->epoch_size, 0);
31456 atomic_set(&epoch->active, 0);
31457
31458 spin_lock(&mdev->epoch_lock);
31459- if (atomic_read(&mdev->current_epoch->epoch_size)) {
31460+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
31461 list_add(&epoch->list, &mdev->current_epoch->list);
31462 mdev->current_epoch = epoch;
31463 mdev->epochs++;
31464@@ -1702,7 +1702,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
31465 spin_unlock(&mdev->peer_seq_lock);
31466
31467 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
31468- atomic_inc(&mdev->current_epoch->epoch_size);
31469+ atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
31470 return drbd_drain_block(mdev, data_size);
31471 }
31472
31473@@ -1732,7 +1732,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
31474
31475 spin_lock(&mdev->epoch_lock);
31476 e->epoch = mdev->current_epoch;
31477- atomic_inc(&e->epoch->epoch_size);
31478+ atomic_inc_unchecked(&e->epoch->epoch_size);
31479 atomic_inc(&e->epoch->active);
31480 spin_unlock(&mdev->epoch_lock);
31481
31482@@ -3954,7 +3954,7 @@ static void drbd_disconnect(struct drbd_conf *mdev)
31483 D_ASSERT(list_empty(&mdev->done_ee));
31484
31485 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
31486- atomic_set(&mdev->current_epoch->epoch_size, 0);
31487+ atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
31488 D_ASSERT(list_empty(&mdev->current_epoch->list));
31489 }
31490
31491diff --git a/drivers/block/loop.c b/drivers/block/loop.c
31492index 54046e5..7759c55 100644
31493--- a/drivers/block/loop.c
31494+++ b/drivers/block/loop.c
31495@@ -226,7 +226,7 @@ static int __do_lo_send_write(struct file *file,
31496 mm_segment_t old_fs = get_fs();
31497
31498 set_fs(get_ds());
31499- bw = file->f_op->write(file, buf, len, &pos);
31500+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
31501 set_fs(old_fs);
31502 if (likely(bw == len))
31503 return 0;
31504diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
31505index d620b44..587561e 100644
31506--- a/drivers/cdrom/cdrom.c
31507+++ b/drivers/cdrom/cdrom.c
31508@@ -416,7 +416,6 @@ int register_cdrom(struct cdrom_device_info *cdi)
31509 ENSURE(reset, CDC_RESET);
31510 ENSURE(generic_packet, CDC_GENERIC_PACKET);
31511 cdi->mc_flags = 0;
31512- cdo->n_minors = 0;
31513 cdi->options = CDO_USE_FFLAGS;
31514
31515 if (autoclose==1 && CDROM_CAN(CDC_CLOSE_TRAY))
31516@@ -436,8 +435,11 @@ int register_cdrom(struct cdrom_device_info *cdi)
31517 else
31518 cdi->cdda_method = CDDA_OLD;
31519
31520- if (!cdo->generic_packet)
31521- cdo->generic_packet = cdrom_dummy_generic_packet;
31522+ if (!cdo->generic_packet) {
31523+ pax_open_kernel();
31524+ *(void **)&cdo->generic_packet = cdrom_dummy_generic_packet;
31525+ pax_close_kernel();
31526+ }
31527
31528 cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" registered\n", cdi->name);
31529 mutex_lock(&cdrom_mutex);
31530@@ -458,7 +460,6 @@ void unregister_cdrom(struct cdrom_device_info *cdi)
31531 if (cdi->exit)
31532 cdi->exit(cdi);
31533
31534- cdi->ops->n_minors--;
31535 cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name);
31536 }
31537
31538diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
31539index 75d485a..2809958 100644
31540--- a/drivers/cdrom/gdrom.c
31541+++ b/drivers/cdrom/gdrom.c
31542@@ -491,7 +491,6 @@ static struct cdrom_device_ops gdrom_ops = {
31543 .audio_ioctl = gdrom_audio_ioctl,
31544 .capability = CDC_MULTI_SESSION | CDC_MEDIA_CHANGED |
31545 CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R,
31546- .n_minors = 1,
31547 };
31548
31549 static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
31550diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
31551index 72bedad..8181ce1 100644
31552--- a/drivers/char/Kconfig
31553+++ b/drivers/char/Kconfig
31554@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
31555
31556 config DEVKMEM
31557 bool "/dev/kmem virtual device support"
31558- default y
31559+ default n
31560+ depends on !GRKERNSEC_KMEM
31561 help
31562 Say Y here if you want to support the /dev/kmem device. The
31563 /dev/kmem device is rarely used, but can be used for certain
31564@@ -581,6 +582,7 @@ config DEVPORT
31565 bool
31566 depends on !M68K
31567 depends on ISA || PCI
31568+ depends on !GRKERNSEC_KMEM
31569 default y
31570
31571 source "drivers/s390/char/Kconfig"
31572diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
31573index 2e04433..22afc64 100644
31574--- a/drivers/char/agp/frontend.c
31575+++ b/drivers/char/agp/frontend.c
31576@@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
31577 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
31578 return -EFAULT;
31579
31580- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
31581+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
31582 return -EFAULT;
31583
31584 client = agp_find_client_by_pid(reserve.pid);
31585diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
31586index 21cb980..f15107c 100644
31587--- a/drivers/char/genrtc.c
31588+++ b/drivers/char/genrtc.c
31589@@ -272,6 +272,7 @@ static int gen_rtc_ioctl(struct file *file,
31590 switch (cmd) {
31591
31592 case RTC_PLL_GET:
31593+ memset(&pll, 0, sizeof(pll));
31594 if (get_rtc_pll(&pll))
31595 return -EINVAL;
31596 else
31597diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
31598index dfd7876..c0b0885 100644
31599--- a/drivers/char/hpet.c
31600+++ b/drivers/char/hpet.c
31601@@ -571,7 +571,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
31602 }
31603
31604 static int
31605-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
31606+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
31607 struct hpet_info *info)
31608 {
31609 struct hpet_timer __iomem *timer;
31610diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
31611index a0c84bb..9edcf60 100644
31612--- a/drivers/char/ipmi/ipmi_msghandler.c
31613+++ b/drivers/char/ipmi/ipmi_msghandler.c
31614@@ -420,7 +420,7 @@ struct ipmi_smi {
31615 struct proc_dir_entry *proc_dir;
31616 char proc_dir_name[10];
31617
31618- atomic_t stats[IPMI_NUM_STATS];
31619+ atomic_unchecked_t stats[IPMI_NUM_STATS];
31620
31621 /*
31622 * run_to_completion duplicate of smb_info, smi_info
31623@@ -453,9 +453,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
31624
31625
31626 #define ipmi_inc_stat(intf, stat) \
31627- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
31628+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
31629 #define ipmi_get_stat(intf, stat) \
31630- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
31631+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
31632
31633 static int is_lan_addr(struct ipmi_addr *addr)
31634 {
31635@@ -2884,7 +2884,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
31636 INIT_LIST_HEAD(&intf->cmd_rcvrs);
31637 init_waitqueue_head(&intf->waitq);
31638 for (i = 0; i < IPMI_NUM_STATS; i++)
31639- atomic_set(&intf->stats[i], 0);
31640+ atomic_set_unchecked(&intf->stats[i], 0);
31641
31642 intf->proc_dir = NULL;
31643
31644diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
31645index 32a6c7e..f6966a9 100644
31646--- a/drivers/char/ipmi/ipmi_si_intf.c
31647+++ b/drivers/char/ipmi/ipmi_si_intf.c
31648@@ -275,7 +275,7 @@ struct smi_info {
31649 unsigned char slave_addr;
31650
31651 /* Counters and things for the proc filesystem. */
31652- atomic_t stats[SI_NUM_STATS];
31653+ atomic_unchecked_t stats[SI_NUM_STATS];
31654
31655 struct task_struct *thread;
31656
31657@@ -284,9 +284,9 @@ struct smi_info {
31658 };
31659
31660 #define smi_inc_stat(smi, stat) \
31661- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
31662+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
31663 #define smi_get_stat(smi, stat) \
31664- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
31665+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
31666
31667 #define SI_MAX_PARMS 4
31668
31669@@ -3225,7 +3225,7 @@ static int try_smi_init(struct smi_info *new_smi)
31670 atomic_set(&new_smi->req_events, 0);
31671 new_smi->run_to_completion = 0;
31672 for (i = 0; i < SI_NUM_STATS; i++)
31673- atomic_set(&new_smi->stats[i], 0);
31674+ atomic_set_unchecked(&new_smi->stats[i], 0);
31675
31676 new_smi->interrupt_disabled = 1;
31677 atomic_set(&new_smi->stop_operation, 0);
31678diff --git a/drivers/char/mem.c b/drivers/char/mem.c
31679index 0537903..121c699 100644
31680--- a/drivers/char/mem.c
31681+++ b/drivers/char/mem.c
31682@@ -18,6 +18,7 @@
31683 #include <linux/raw.h>
31684 #include <linux/tty.h>
31685 #include <linux/capability.h>
31686+#include <linux/security.h>
31687 #include <linux/ptrace.h>
31688 #include <linux/device.h>
31689 #include <linux/highmem.h>
31690@@ -37,6 +38,10 @@
31691
31692 #define DEVPORT_MINOR 4
31693
31694+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
31695+extern const struct file_operations grsec_fops;
31696+#endif
31697+
31698 static inline unsigned long size_inside_page(unsigned long start,
31699 unsigned long size)
31700 {
31701@@ -68,9 +73,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
31702
31703 while (cursor < to) {
31704 if (!devmem_is_allowed(pfn)) {
31705+#ifdef CONFIG_GRKERNSEC_KMEM
31706+ gr_handle_mem_readwrite(from, to);
31707+#else
31708 printk(KERN_INFO
31709 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
31710 current->comm, from, to);
31711+#endif
31712 return 0;
31713 }
31714 cursor += PAGE_SIZE;
31715@@ -78,6 +87,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
31716 }
31717 return 1;
31718 }
31719+#elif defined(CONFIG_GRKERNSEC_KMEM)
31720+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
31721+{
31722+ return 0;
31723+}
31724 #else
31725 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
31726 {
31727@@ -120,6 +134,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
31728
31729 while (count > 0) {
31730 unsigned long remaining;
31731+ char *temp;
31732
31733 sz = size_inside_page(p, count);
31734
31735@@ -135,7 +150,23 @@ static ssize_t read_mem(struct file *file, char __user *buf,
31736 if (!ptr)
31737 return -EFAULT;
31738
31739- remaining = copy_to_user(buf, ptr, sz);
31740+#ifdef CONFIG_PAX_USERCOPY
31741+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
31742+ if (!temp) {
31743+ unxlate_dev_mem_ptr(p, ptr);
31744+ return -ENOMEM;
31745+ }
31746+ memcpy(temp, ptr, sz);
31747+#else
31748+ temp = ptr;
31749+#endif
31750+
31751+ remaining = copy_to_user(buf, temp, sz);
31752+
31753+#ifdef CONFIG_PAX_USERCOPY
31754+ kfree(temp);
31755+#endif
31756+
31757 unxlate_dev_mem_ptr(p, ptr);
31758 if (remaining)
31759 return -EFAULT;
31760@@ -398,9 +429,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
31761 size_t count, loff_t *ppos)
31762 {
31763 unsigned long p = *ppos;
31764- ssize_t low_count, read, sz;
31765+ ssize_t low_count, read, sz, err = 0;
31766 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
31767- int err = 0;
31768
31769 read = 0;
31770 if (p < (unsigned long) high_memory) {
31771@@ -422,6 +452,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
31772 }
31773 #endif
31774 while (low_count > 0) {
31775+ char *temp;
31776+
31777 sz = size_inside_page(p, low_count);
31778
31779 /*
31780@@ -431,7 +463,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
31781 */
31782 kbuf = xlate_dev_kmem_ptr((char *)p);
31783
31784- if (copy_to_user(buf, kbuf, sz))
31785+#ifdef CONFIG_PAX_USERCOPY
31786+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
31787+ if (!temp)
31788+ return -ENOMEM;
31789+ memcpy(temp, kbuf, sz);
31790+#else
31791+ temp = kbuf;
31792+#endif
31793+
31794+ err = copy_to_user(buf, temp, sz);
31795+
31796+#ifdef CONFIG_PAX_USERCOPY
31797+ kfree(temp);
31798+#endif
31799+
31800+ if (err)
31801 return -EFAULT;
31802 buf += sz;
31803 p += sz;
31804@@ -833,6 +880,9 @@ static const struct memdev {
31805 #ifdef CONFIG_CRASH_DUMP
31806 [12] = { "oldmem", 0, &oldmem_fops, NULL },
31807 #endif
31808+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
31809+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
31810+#endif
31811 };
31812
31813 static int memory_open(struct inode *inode, struct file *filp)
31814diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
31815index 9df78e2..01ba9ae 100644
31816--- a/drivers/char/nvram.c
31817+++ b/drivers/char/nvram.c
31818@@ -247,7 +247,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
31819
31820 spin_unlock_irq(&rtc_lock);
31821
31822- if (copy_to_user(buf, contents, tmp - contents))
31823+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
31824 return -EFAULT;
31825
31826 *ppos = i;
31827diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
31828index 21721d2..4e98777 100644
31829--- a/drivers/char/pcmcia/synclink_cs.c
31830+++ b/drivers/char/pcmcia/synclink_cs.c
31831@@ -2346,9 +2346,9 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
31832
31833 if (debug_level >= DEBUG_LEVEL_INFO)
31834 printk("%s(%d):mgslpc_close(%s) entry, count=%d\n",
31835- __FILE__,__LINE__, info->device_name, port->count);
31836+ __FILE__,__LINE__, info->device_name, atomic_read(&port->count));
31837
31838- WARN_ON(!port->count);
31839+ WARN_ON(!atomic_read(&port->count));
31840
31841 if (tty_port_close_start(port, tty, filp) == 0)
31842 goto cleanup;
31843@@ -2366,7 +2366,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
31844 cleanup:
31845 if (debug_level >= DEBUG_LEVEL_INFO)
31846 printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__,__LINE__,
31847- tty->driver->name, port->count);
31848+ tty->driver->name, atomic_read(&port->count));
31849 }
31850
31851 /* Wait until the transmitter is empty.
31852@@ -2508,7 +2508,7 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
31853
31854 if (debug_level >= DEBUG_LEVEL_INFO)
31855 printk("%s(%d):mgslpc_open(%s), old ref count = %d\n",
31856- __FILE__,__LINE__,tty->driver->name, port->count);
31857+ __FILE__,__LINE__,tty->driver->name, atomic_read(&port->count));
31858
31859 /* If port is closing, signal caller to try again */
31860 if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING){
31861@@ -2528,11 +2528,11 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
31862 goto cleanup;
31863 }
31864 spin_lock(&port->lock);
31865- port->count++;
31866+ atomic_inc(&port->count);
31867 spin_unlock(&port->lock);
31868 spin_unlock_irqrestore(&info->netlock, flags);
31869
31870- if (port->count == 1) {
31871+ if (atomic_read(&port->count) == 1) {
31872 /* 1st open on this device, init hardware */
31873 retval = startup(info, tty);
31874 if (retval < 0)
31875@@ -3886,7 +3886,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
31876 unsigned short new_crctype;
31877
31878 /* return error if TTY interface open */
31879- if (info->port.count)
31880+ if (atomic_read(&info->port.count))
31881 return -EBUSY;
31882
31883 switch (encoding)
31884@@ -3989,7 +3989,7 @@ static int hdlcdev_open(struct net_device *dev)
31885
31886 /* arbitrate between network and tty opens */
31887 spin_lock_irqsave(&info->netlock, flags);
31888- if (info->port.count != 0 || info->netcount != 0) {
31889+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
31890 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
31891 spin_unlock_irqrestore(&info->netlock, flags);
31892 return -EBUSY;
31893@@ -4078,7 +4078,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
31894 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
31895
31896 /* return error if TTY interface open */
31897- if (info->port.count)
31898+ if (atomic_read(&info->port.count))
31899 return -EBUSY;
31900
31901 if (cmd != SIOCWANDEV)
31902diff --git a/drivers/char/random.c b/drivers/char/random.c
31903index b86eae9..b9c2ed7 100644
31904--- a/drivers/char/random.c
31905+++ b/drivers/char/random.c
31906@@ -272,8 +272,13 @@
31907 /*
31908 * Configuration information
31909 */
31910+#ifdef CONFIG_GRKERNSEC_RANDNET
31911+#define INPUT_POOL_WORDS 512
31912+#define OUTPUT_POOL_WORDS 128
31913+#else
31914 #define INPUT_POOL_WORDS 128
31915 #define OUTPUT_POOL_WORDS 32
31916+#endif
31917 #define SEC_XFER_SIZE 512
31918 #define EXTRACT_SIZE 10
31919
31920@@ -313,10 +318,17 @@ static struct poolinfo {
31921 int poolwords;
31922 int tap1, tap2, tap3, tap4, tap5;
31923 } poolinfo_table[] = {
31924+#ifdef CONFIG_GRKERNSEC_RANDNET
31925+ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
31926+ { 512, 411, 308, 208, 104, 1 },
31927+ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
31928+ { 128, 103, 76, 51, 25, 1 },
31929+#else
31930 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
31931 { 128, 103, 76, 51, 25, 1 },
31932 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
31933 { 32, 26, 20, 14, 7, 1 },
31934+#endif
31935 #if 0
31936 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
31937 { 2048, 1638, 1231, 819, 411, 1 },
31938@@ -437,6 +449,7 @@ struct entropy_store {
31939 int entropy_count;
31940 int entropy_total;
31941 unsigned int initialized:1;
31942+ bool last_data_init;
31943 __u8 last_data[EXTRACT_SIZE];
31944 };
31945
31946@@ -527,8 +540,8 @@ static void _mix_pool_bytes(struct entropy_store *r, const void *in,
31947 input_rotate += i ? 7 : 14;
31948 }
31949
31950- ACCESS_ONCE(r->input_rotate) = input_rotate;
31951- ACCESS_ONCE(r->add_ptr) = i;
31952+ ACCESS_ONCE_RW(r->input_rotate) = input_rotate;
31953+ ACCESS_ONCE_RW(r->add_ptr) = i;
31954 smp_wmb();
31955
31956 if (out)
31957@@ -957,6 +970,10 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
31958 ssize_t ret = 0, i;
31959 __u8 tmp[EXTRACT_SIZE];
31960
31961+ /* if last_data isn't primed, we need EXTRACT_SIZE extra bytes */
31962+ if (fips_enabled && !r->last_data_init)
31963+ nbytes += EXTRACT_SIZE;
31964+
31965 trace_extract_entropy(r->name, nbytes, r->entropy_count, _RET_IP_);
31966 xfer_secondary_pool(r, nbytes);
31967 nbytes = account(r, nbytes, min, reserved);
31968@@ -967,6 +984,17 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
31969 if (fips_enabled) {
31970 unsigned long flags;
31971
31972+
31973+ /* prime last_data value if need be, per fips 140-2 */
31974+ if (!r->last_data_init) {
31975+ spin_lock_irqsave(&r->lock, flags);
31976+ memcpy(r->last_data, tmp, EXTRACT_SIZE);
31977+ r->last_data_init = true;
31978+ nbytes -= EXTRACT_SIZE;
31979+ spin_unlock_irqrestore(&r->lock, flags);
31980+ extract_buf(r, tmp);
31981+ }
31982+
31983 spin_lock_irqsave(&r->lock, flags);
31984 if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
31985 panic("Hardware RNG duplicated output!\n");
31986@@ -1008,7 +1036,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
31987
31988 extract_buf(r, tmp);
31989 i = min_t(int, nbytes, EXTRACT_SIZE);
31990- if (copy_to_user(buf, tmp, i)) {
31991+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
31992 ret = -EFAULT;
31993 break;
31994 }
31995@@ -1086,6 +1114,7 @@ static void init_std_data(struct entropy_store *r)
31996
31997 r->entropy_count = 0;
31998 r->entropy_total = 0;
31999+ r->last_data_init = false;
32000 mix_pool_bytes(r, &now, sizeof(now), NULL);
32001 for (i = r->poolinfo->POOLBYTES; i > 0; i -= sizeof(rv)) {
32002 if (!arch_get_random_long(&rv))
32003@@ -1342,7 +1371,7 @@ EXPORT_SYMBOL(generate_random_uuid);
32004 #include <linux/sysctl.h>
32005
32006 static int min_read_thresh = 8, min_write_thresh;
32007-static int max_read_thresh = INPUT_POOL_WORDS * 32;
32008+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
32009 static int max_write_thresh = INPUT_POOL_WORDS * 32;
32010 static char sysctl_bootid[16];
32011
32012diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
32013index 9b4f011..b7e0a1a 100644
32014--- a/drivers/char/sonypi.c
32015+++ b/drivers/char/sonypi.c
32016@@ -54,6 +54,7 @@
32017
32018 #include <asm/uaccess.h>
32019 #include <asm/io.h>
32020+#include <asm/local.h>
32021
32022 #include <linux/sonypi.h>
32023
32024@@ -490,7 +491,7 @@ static struct sonypi_device {
32025 spinlock_t fifo_lock;
32026 wait_queue_head_t fifo_proc_list;
32027 struct fasync_struct *fifo_async;
32028- int open_count;
32029+ local_t open_count;
32030 int model;
32031 struct input_dev *input_jog_dev;
32032 struct input_dev *input_key_dev;
32033@@ -897,7 +898,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
32034 static int sonypi_misc_release(struct inode *inode, struct file *file)
32035 {
32036 mutex_lock(&sonypi_device.lock);
32037- sonypi_device.open_count--;
32038+ local_dec(&sonypi_device.open_count);
32039 mutex_unlock(&sonypi_device.lock);
32040 return 0;
32041 }
32042@@ -906,9 +907,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
32043 {
32044 mutex_lock(&sonypi_device.lock);
32045 /* Flush input queue on first open */
32046- if (!sonypi_device.open_count)
32047+ if (!local_read(&sonypi_device.open_count))
32048 kfifo_reset(&sonypi_device.fifo);
32049- sonypi_device.open_count++;
32050+ local_inc(&sonypi_device.open_count);
32051 mutex_unlock(&sonypi_device.lock);
32052
32053 return 0;
32054diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
32055index 93211df..c7805f7 100644
32056--- a/drivers/char/tpm/tpm.c
32057+++ b/drivers/char/tpm/tpm.c
32058@@ -410,7 +410,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
32059 chip->vendor.req_complete_val)
32060 goto out_recv;
32061
32062- if ((status == chip->vendor.req_canceled)) {
32063+ if (status == chip->vendor.req_canceled) {
32064 dev_err(chip->dev, "Operation Canceled\n");
32065 rc = -ECANCELED;
32066 goto out;
32067diff --git a/drivers/char/tpm/tpm_acpi.c b/drivers/char/tpm/tpm_acpi.c
32068index 56051d0..11cf3b7 100644
32069--- a/drivers/char/tpm/tpm_acpi.c
32070+++ b/drivers/char/tpm/tpm_acpi.c
32071@@ -98,11 +98,12 @@ int read_log(struct tpm_bios_log *log)
32072 virt = acpi_os_map_memory(start, len);
32073 if (!virt) {
32074 kfree(log->bios_event_log);
32075+ log->bios_event_log = NULL;
32076 printk("%s: ERROR - Unable to map memory\n", __func__);
32077 return -EIO;
32078 }
32079
32080- memcpy_fromio(log->bios_event_log, virt, len);
32081+ memcpy_fromio(log->bios_event_log, (const char __force_kernel *)virt, len);
32082
32083 acpi_os_unmap_memory(virt, len);
32084 return 0;
32085diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c
32086index 84ddc55..1d32f1e 100644
32087--- a/drivers/char/tpm/tpm_eventlog.c
32088+++ b/drivers/char/tpm/tpm_eventlog.c
32089@@ -95,7 +95,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
32090 event = addr;
32091
32092 if ((event->event_type == 0 && event->event_size == 0) ||
32093- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
32094+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
32095 return NULL;
32096
32097 return addr;
32098@@ -120,7 +120,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
32099 return NULL;
32100
32101 if ((event->event_type == 0 && event->event_size == 0) ||
32102- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
32103+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
32104 return NULL;
32105
32106 (*pos)++;
32107@@ -213,7 +213,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
32108 int i;
32109
32110 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
32111- seq_putc(m, data[i]);
32112+ if (!seq_putc(m, data[i]))
32113+ return -EFAULT;
32114
32115 return 0;
32116 }
32117diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
32118index 088c8fd..774c5a5 100644
32119--- a/drivers/char/virtio_console.c
32120+++ b/drivers/char/virtio_console.c
32121@@ -622,7 +622,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
32122 if (to_user) {
32123 ssize_t ret;
32124
32125- ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
32126+ ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
32127 if (ret)
32128 return -EFAULT;
32129 } else {
32130@@ -721,7 +721,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
32131 if (!port_has_data(port) && !port->host_connected)
32132 return 0;
32133
32134- return fill_readbuf(port, ubuf, count, true);
32135+ return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
32136 }
32137
32138 static int wait_port_writable(struct port *port, bool nonblock)
32139diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
32140index 75c0a1a..96ba8f6 100644
32141--- a/drivers/edac/edac_mc.c
32142+++ b/drivers/edac/edac_mc.c
32143@@ -340,7 +340,7 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
32144 /*
32145 * Alocate and fill the csrow/channels structs
32146 */
32147- mci->csrows = kcalloc(sizeof(*mci->csrows), tot_csrows, GFP_KERNEL);
32148+ mci->csrows = kcalloc(tot_csrows, sizeof(*mci->csrows), GFP_KERNEL);
32149 if (!mci->csrows)
32150 goto error;
32151 for (row = 0; row < tot_csrows; row++) {
32152@@ -351,7 +351,7 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
32153 csr->csrow_idx = row;
32154 csr->mci = mci;
32155 csr->nr_channels = tot_channels;
32156- csr->channels = kcalloc(sizeof(*csr->channels), tot_channels,
32157+ csr->channels = kcalloc(tot_channels, sizeof(*csr->channels),
32158 GFP_KERNEL);
32159 if (!csr->channels)
32160 goto error;
32161@@ -369,7 +369,7 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
32162 /*
32163 * Allocate and fill the dimm structs
32164 */
32165- mci->dimms = kcalloc(sizeof(*mci->dimms), tot_dimms, GFP_KERNEL);
32166+ mci->dimms = kcalloc(tot_dimms, sizeof(*mci->dimms), GFP_KERNEL);
32167 if (!mci->dimms)
32168 goto error;
32169
32170diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
32171index 1bfb207..0d059c2 100644
32172--- a/drivers/edac/edac_pci_sysfs.c
32173+++ b/drivers/edac/edac_pci_sysfs.c
32174@@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
32175 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
32176 static int edac_pci_poll_msec = 1000; /* one second workq period */
32177
32178-static atomic_t pci_parity_count = ATOMIC_INIT(0);
32179-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
32180+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
32181+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
32182
32183 static struct kobject *edac_pci_top_main_kobj;
32184 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
32185@@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32186 edac_printk(KERN_CRIT, EDAC_PCI,
32187 "Signaled System Error on %s\n",
32188 pci_name(dev));
32189- atomic_inc(&pci_nonparity_count);
32190+ atomic_inc_unchecked(&pci_nonparity_count);
32191 }
32192
32193 if (status & (PCI_STATUS_PARITY)) {
32194@@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32195 "Master Data Parity Error on %s\n",
32196 pci_name(dev));
32197
32198- atomic_inc(&pci_parity_count);
32199+ atomic_inc_unchecked(&pci_parity_count);
32200 }
32201
32202 if (status & (PCI_STATUS_DETECTED_PARITY)) {
32203@@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32204 "Detected Parity Error on %s\n",
32205 pci_name(dev));
32206
32207- atomic_inc(&pci_parity_count);
32208+ atomic_inc_unchecked(&pci_parity_count);
32209 }
32210 }
32211
32212@@ -618,7 +618,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32213 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
32214 "Signaled System Error on %s\n",
32215 pci_name(dev));
32216- atomic_inc(&pci_nonparity_count);
32217+ atomic_inc_unchecked(&pci_nonparity_count);
32218 }
32219
32220 if (status & (PCI_STATUS_PARITY)) {
32221@@ -626,7 +626,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32222 "Master Data Parity Error on "
32223 "%s\n", pci_name(dev));
32224
32225- atomic_inc(&pci_parity_count);
32226+ atomic_inc_unchecked(&pci_parity_count);
32227 }
32228
32229 if (status & (PCI_STATUS_DETECTED_PARITY)) {
32230@@ -634,7 +634,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32231 "Detected Parity Error on %s\n",
32232 pci_name(dev));
32233
32234- atomic_inc(&pci_parity_count);
32235+ atomic_inc_unchecked(&pci_parity_count);
32236 }
32237 }
32238 }
32239@@ -676,7 +676,7 @@ void edac_pci_do_parity_check(void)
32240 if (!check_pci_errors)
32241 return;
32242
32243- before_count = atomic_read(&pci_parity_count);
32244+ before_count = atomic_read_unchecked(&pci_parity_count);
32245
32246 /* scan all PCI devices looking for a Parity Error on devices and
32247 * bridges.
32248@@ -688,7 +688,7 @@ void edac_pci_do_parity_check(void)
32249 /* Only if operator has selected panic on PCI Error */
32250 if (edac_pci_get_panic_on_pe()) {
32251 /* If the count is different 'after' from 'before' */
32252- if (before_count != atomic_read(&pci_parity_count))
32253+ if (before_count != atomic_read_unchecked(&pci_parity_count))
32254 panic("EDAC: PCI Parity Error");
32255 }
32256 }
32257diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
32258index 8c87a5e..a19cbd7 100644
32259--- a/drivers/edac/mce_amd.h
32260+++ b/drivers/edac/mce_amd.h
32261@@ -80,7 +80,7 @@ extern const char * const ii_msgs[];
32262 struct amd_decoder_ops {
32263 bool (*dc_mce)(u16, u8);
32264 bool (*ic_mce)(u16, u8);
32265-};
32266+} __no_const;
32267
32268 void amd_report_gart_errors(bool);
32269 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
32270diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
32271index 57ea7f4..789e3c3 100644
32272--- a/drivers/firewire/core-card.c
32273+++ b/drivers/firewire/core-card.c
32274@@ -680,7 +680,7 @@ EXPORT_SYMBOL_GPL(fw_card_release);
32275
32276 void fw_core_remove_card(struct fw_card *card)
32277 {
32278- struct fw_card_driver dummy_driver = dummy_driver_template;
32279+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
32280
32281 card->driver->update_phy_reg(card, 4,
32282 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
32283diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
32284index f8d2287..5aaf4db 100644
32285--- a/drivers/firewire/core-cdev.c
32286+++ b/drivers/firewire/core-cdev.c
32287@@ -1365,8 +1365,7 @@ static int init_iso_resource(struct client *client,
32288 int ret;
32289
32290 if ((request->channels == 0 && request->bandwidth == 0) ||
32291- request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
32292- request->bandwidth < 0)
32293+ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
32294 return -EINVAL;
32295
32296 r = kmalloc(sizeof(*r), GFP_KERNEL);
32297diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
32298index 28a94c7..58da63a 100644
32299--- a/drivers/firewire/core-transaction.c
32300+++ b/drivers/firewire/core-transaction.c
32301@@ -38,6 +38,7 @@
32302 #include <linux/timer.h>
32303 #include <linux/types.h>
32304 #include <linux/workqueue.h>
32305+#include <linux/sched.h>
32306
32307 #include <asm/byteorder.h>
32308
32309diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
32310index 515a42c..5ecf3ba 100644
32311--- a/drivers/firewire/core.h
32312+++ b/drivers/firewire/core.h
32313@@ -111,6 +111,7 @@ struct fw_card_driver {
32314
32315 int (*stop_iso)(struct fw_iso_context *ctx);
32316 };
32317+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
32318
32319 void fw_card_initialize(struct fw_card *card,
32320 const struct fw_card_driver *driver, struct device *device);
32321diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
32322index 982f1f5..d21e5da 100644
32323--- a/drivers/firmware/dmi_scan.c
32324+++ b/drivers/firmware/dmi_scan.c
32325@@ -491,11 +491,6 @@ void __init dmi_scan_machine(void)
32326 }
32327 }
32328 else {
32329- /*
32330- * no iounmap() for that ioremap(); it would be a no-op, but
32331- * it's so early in setup that sucker gets confused into doing
32332- * what it shouldn't if we actually call it.
32333- */
32334 p = dmi_ioremap(0xF0000, 0x10000);
32335 if (p == NULL)
32336 goto error;
32337@@ -770,7 +765,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
32338 if (buf == NULL)
32339 return -1;
32340
32341- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
32342+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
32343
32344 iounmap(buf);
32345 return 0;
32346diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
32347index bfd8f43..b1fe1f8 100644
32348--- a/drivers/firmware/efivars.c
32349+++ b/drivers/firmware/efivars.c
32350@@ -1206,7 +1206,7 @@ out:
32351 EXPORT_SYMBOL_GPL(register_efivars);
32352
32353 static struct efivars __efivars;
32354-static struct efivar_operations ops;
32355+static efivar_operations_no_const ops __read_only;
32356
32357 /*
32358 * For now we register the efi subsystem with the firmware subsystem
32359diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
32360index 82d5c20..44a7177 100644
32361--- a/drivers/gpio/gpio-vr41xx.c
32362+++ b/drivers/gpio/gpio-vr41xx.c
32363@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
32364 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
32365 maskl, pendl, maskh, pendh);
32366
32367- atomic_inc(&irq_err_count);
32368+ atomic_inc_unchecked(&irq_err_count);
32369
32370 return -EINVAL;
32371 }
32372diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
32373index 1227adf..f2301c2 100644
32374--- a/drivers/gpu/drm/drm_crtc_helper.c
32375+++ b/drivers/gpu/drm/drm_crtc_helper.c
32376@@ -286,7 +286,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
32377 struct drm_crtc *tmp;
32378 int crtc_mask = 1;
32379
32380- WARN(!crtc, "checking null crtc?\n");
32381+ BUG_ON(!crtc);
32382
32383 dev = crtc->dev;
32384
32385diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
32386index be174ca..0bcbb71 100644
32387--- a/drivers/gpu/drm/drm_drv.c
32388+++ b/drivers/gpu/drm/drm_drv.c
32389@@ -307,7 +307,7 @@ module_exit(drm_core_exit);
32390 /**
32391 * Copy and IOCTL return string to user space
32392 */
32393-static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
32394+static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
32395 {
32396 int len;
32397
32398@@ -390,7 +390,7 @@ long drm_ioctl(struct file *filp,
32399 return -ENODEV;
32400
32401 atomic_inc(&dev->ioctl_count);
32402- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
32403+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
32404 ++file_priv->ioctl_count;
32405
32406 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
32407diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
32408index 133b413..fd68225 100644
32409--- a/drivers/gpu/drm/drm_fops.c
32410+++ b/drivers/gpu/drm/drm_fops.c
32411@@ -71,7 +71,7 @@ static int drm_setup(struct drm_device * dev)
32412 }
32413
32414 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
32415- atomic_set(&dev->counts[i], 0);
32416+ atomic_set_unchecked(&dev->counts[i], 0);
32417
32418 dev->sigdata.lock = NULL;
32419
32420@@ -134,7 +134,7 @@ int drm_open(struct inode *inode, struct file *filp)
32421 if (drm_device_is_unplugged(dev))
32422 return -ENODEV;
32423
32424- if (!dev->open_count++)
32425+ if (local_inc_return(&dev->open_count) == 1)
32426 need_setup = 1;
32427 mutex_lock(&dev->struct_mutex);
32428 old_mapping = dev->dev_mapping;
32429@@ -149,7 +149,7 @@ int drm_open(struct inode *inode, struct file *filp)
32430 retcode = drm_open_helper(inode, filp, dev);
32431 if (retcode)
32432 goto err_undo;
32433- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
32434+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
32435 if (need_setup) {
32436 retcode = drm_setup(dev);
32437 if (retcode)
32438@@ -164,7 +164,7 @@ err_undo:
32439 iput(container_of(dev->dev_mapping, struct inode, i_data));
32440 dev->dev_mapping = old_mapping;
32441 mutex_unlock(&dev->struct_mutex);
32442- dev->open_count--;
32443+ local_dec(&dev->open_count);
32444 return retcode;
32445 }
32446 EXPORT_SYMBOL(drm_open);
32447@@ -438,7 +438,7 @@ int drm_release(struct inode *inode, struct file *filp)
32448
32449 mutex_lock(&drm_global_mutex);
32450
32451- DRM_DEBUG("open_count = %d\n", dev->open_count);
32452+ DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count));
32453
32454 if (dev->driver->preclose)
32455 dev->driver->preclose(dev, file_priv);
32456@@ -447,10 +447,10 @@ int drm_release(struct inode *inode, struct file *filp)
32457 * Begin inline drm_release
32458 */
32459
32460- DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
32461+ DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n",
32462 task_pid_nr(current),
32463 (long)old_encode_dev(file_priv->minor->device),
32464- dev->open_count);
32465+ local_read(&dev->open_count));
32466
32467 /* Release any auth tokens that might point to this file_priv,
32468 (do that under the drm_global_mutex) */
32469@@ -547,8 +547,8 @@ int drm_release(struct inode *inode, struct file *filp)
32470 * End inline drm_release
32471 */
32472
32473- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
32474- if (!--dev->open_count) {
32475+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
32476+ if (local_dec_and_test(&dev->open_count)) {
32477 if (atomic_read(&dev->ioctl_count)) {
32478 DRM_ERROR("Device busy: %d\n",
32479 atomic_read(&dev->ioctl_count));
32480diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
32481index f731116..629842c 100644
32482--- a/drivers/gpu/drm/drm_global.c
32483+++ b/drivers/gpu/drm/drm_global.c
32484@@ -36,7 +36,7 @@
32485 struct drm_global_item {
32486 struct mutex mutex;
32487 void *object;
32488- int refcount;
32489+ atomic_t refcount;
32490 };
32491
32492 static struct drm_global_item glob[DRM_GLOBAL_NUM];
32493@@ -49,7 +49,7 @@ void drm_global_init(void)
32494 struct drm_global_item *item = &glob[i];
32495 mutex_init(&item->mutex);
32496 item->object = NULL;
32497- item->refcount = 0;
32498+ atomic_set(&item->refcount, 0);
32499 }
32500 }
32501
32502@@ -59,7 +59,7 @@ void drm_global_release(void)
32503 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
32504 struct drm_global_item *item = &glob[i];
32505 BUG_ON(item->object != NULL);
32506- BUG_ON(item->refcount != 0);
32507+ BUG_ON(atomic_read(&item->refcount) != 0);
32508 }
32509 }
32510
32511@@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
32512 void *object;
32513
32514 mutex_lock(&item->mutex);
32515- if (item->refcount == 0) {
32516+ if (atomic_read(&item->refcount) == 0) {
32517 item->object = kzalloc(ref->size, GFP_KERNEL);
32518 if (unlikely(item->object == NULL)) {
32519 ret = -ENOMEM;
32520@@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
32521 goto out_err;
32522
32523 }
32524- ++item->refcount;
32525+ atomic_inc(&item->refcount);
32526 ref->object = item->object;
32527 object = item->object;
32528 mutex_unlock(&item->mutex);
32529@@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
32530 struct drm_global_item *item = &glob[ref->global_type];
32531
32532 mutex_lock(&item->mutex);
32533- BUG_ON(item->refcount == 0);
32534+ BUG_ON(atomic_read(&item->refcount) == 0);
32535 BUG_ON(ref->object != item->object);
32536- if (--item->refcount == 0) {
32537+ if (atomic_dec_and_test(&item->refcount)) {
32538 ref->release(ref);
32539 item->object = NULL;
32540 }
32541diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
32542index d4b20ce..77a8d41 100644
32543--- a/drivers/gpu/drm/drm_info.c
32544+++ b/drivers/gpu/drm/drm_info.c
32545@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
32546 struct drm_local_map *map;
32547 struct drm_map_list *r_list;
32548
32549- /* Hardcoded from _DRM_FRAME_BUFFER,
32550- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
32551- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
32552- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
32553+ static const char * const types[] = {
32554+ [_DRM_FRAME_BUFFER] = "FB",
32555+ [_DRM_REGISTERS] = "REG",
32556+ [_DRM_SHM] = "SHM",
32557+ [_DRM_AGP] = "AGP",
32558+ [_DRM_SCATTER_GATHER] = "SG",
32559+ [_DRM_CONSISTENT] = "PCI",
32560+ [_DRM_GEM] = "GEM" };
32561 const char *type;
32562 int i;
32563
32564@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
32565 map = r_list->map;
32566 if (!map)
32567 continue;
32568- if (map->type < 0 || map->type > 5)
32569+ if (map->type >= ARRAY_SIZE(types))
32570 type = "??";
32571 else
32572 type = types[map->type];
32573@@ -253,7 +257,11 @@ int drm_vma_info(struct seq_file *m, void *data)
32574 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
32575 vma->vm_flags & VM_LOCKED ? 'l' : '-',
32576 vma->vm_flags & VM_IO ? 'i' : '-',
32577+#ifdef CONFIG_GRKERNSEC_HIDESYM
32578+ 0);
32579+#else
32580 vma->vm_pgoff);
32581+#endif
32582
32583 #if defined(__i386__)
32584 pgprot = pgprot_val(vma->vm_page_prot);
32585diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
32586index 2f4c434..764794b 100644
32587--- a/drivers/gpu/drm/drm_ioc32.c
32588+++ b/drivers/gpu/drm/drm_ioc32.c
32589@@ -457,7 +457,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
32590 request = compat_alloc_user_space(nbytes);
32591 if (!access_ok(VERIFY_WRITE, request, nbytes))
32592 return -EFAULT;
32593- list = (struct drm_buf_desc *) (request + 1);
32594+ list = (struct drm_buf_desc __user *) (request + 1);
32595
32596 if (__put_user(count, &request->count)
32597 || __put_user(list, &request->list))
32598@@ -518,7 +518,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
32599 request = compat_alloc_user_space(nbytes);
32600 if (!access_ok(VERIFY_WRITE, request, nbytes))
32601 return -EFAULT;
32602- list = (struct drm_buf_pub *) (request + 1);
32603+ list = (struct drm_buf_pub __user *) (request + 1);
32604
32605 if (__put_user(count, &request->count)
32606 || __put_user(list, &request->list))
32607diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
32608index 23dd975..63e9801 100644
32609--- a/drivers/gpu/drm/drm_ioctl.c
32610+++ b/drivers/gpu/drm/drm_ioctl.c
32611@@ -252,7 +252,7 @@ int drm_getstats(struct drm_device *dev, void *data,
32612 stats->data[i].value =
32613 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
32614 else
32615- stats->data[i].value = atomic_read(&dev->counts[i]);
32616+ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
32617 stats->data[i].type = dev->types[i];
32618 }
32619
32620diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
32621index d752c96..fe08455 100644
32622--- a/drivers/gpu/drm/drm_lock.c
32623+++ b/drivers/gpu/drm/drm_lock.c
32624@@ -86,7 +86,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
32625 if (drm_lock_take(&master->lock, lock->context)) {
32626 master->lock.file_priv = file_priv;
32627 master->lock.lock_time = jiffies;
32628- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
32629+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
32630 break; /* Got lock */
32631 }
32632
32633@@ -157,7 +157,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
32634 return -EINVAL;
32635 }
32636
32637- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
32638+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
32639
32640 if (drm_lock_free(&master->lock, lock->context)) {
32641 /* FIXME: Should really bail out here. */
32642diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
32643index c236fd2..6b5f2e7 100644
32644--- a/drivers/gpu/drm/drm_stub.c
32645+++ b/drivers/gpu/drm/drm_stub.c
32646@@ -511,7 +511,7 @@ void drm_unplug_dev(struct drm_device *dev)
32647
32648 drm_device_set_unplugged(dev);
32649
32650- if (dev->open_count == 0) {
32651+ if (local_read(&dev->open_count) == 0) {
32652 drm_put_dev(dev);
32653 }
32654 mutex_unlock(&drm_global_mutex);
32655diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
32656index 004ecdf..db1f6e0 100644
32657--- a/drivers/gpu/drm/i810/i810_dma.c
32658+++ b/drivers/gpu/drm/i810/i810_dma.c
32659@@ -945,8 +945,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
32660 dma->buflist[vertex->idx],
32661 vertex->discard, vertex->used);
32662
32663- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
32664- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
32665+ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
32666+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
32667 sarea_priv->last_enqueue = dev_priv->counter - 1;
32668 sarea_priv->last_dispatch = (int)hw_status[5];
32669
32670@@ -1106,8 +1106,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
32671 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
32672 mc->last_render);
32673
32674- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
32675- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
32676+ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
32677+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
32678 sarea_priv->last_enqueue = dev_priv->counter - 1;
32679 sarea_priv->last_dispatch = (int)hw_status[5];
32680
32681diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
32682index 6e0acad..93c8289 100644
32683--- a/drivers/gpu/drm/i810/i810_drv.h
32684+++ b/drivers/gpu/drm/i810/i810_drv.h
32685@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
32686 int page_flipping;
32687
32688 wait_queue_head_t irq_queue;
32689- atomic_t irq_received;
32690- atomic_t irq_emitted;
32691+ atomic_unchecked_t irq_received;
32692+ atomic_unchecked_t irq_emitted;
32693
32694 int front_offset;
32695 } drm_i810_private_t;
32696diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
32697index 3a1a495..995c093 100644
32698--- a/drivers/gpu/drm/i915/i915_debugfs.c
32699+++ b/drivers/gpu/drm/i915/i915_debugfs.c
32700@@ -496,7 +496,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
32701 I915_READ(GTIMR));
32702 }
32703 seq_printf(m, "Interrupts received: %d\n",
32704- atomic_read(&dev_priv->irq_received));
32705+ atomic_read_unchecked(&dev_priv->irq_received));
32706 for_each_ring(ring, dev_priv, i) {
32707 if (IS_GEN6(dev) || IS_GEN7(dev)) {
32708 seq_printf(m,
32709diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
32710index 61ae104..f8a4bc1 100644
32711--- a/drivers/gpu/drm/i915/i915_dma.c
32712+++ b/drivers/gpu/drm/i915/i915_dma.c
32713@@ -1274,7 +1274,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
32714 bool can_switch;
32715
32716 spin_lock(&dev->count_lock);
32717- can_switch = (dev->open_count == 0);
32718+ can_switch = (local_read(&dev->open_count) == 0);
32719 spin_unlock(&dev->count_lock);
32720 return can_switch;
32721 }
32722diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
32723index 92f1750..3beba74 100644
32724--- a/drivers/gpu/drm/i915/i915_drv.h
32725+++ b/drivers/gpu/drm/i915/i915_drv.h
32726@@ -430,7 +430,7 @@ typedef struct drm_i915_private {
32727
32728 struct resource mch_res;
32729
32730- atomic_t irq_received;
32731+ atomic_unchecked_t irq_received;
32732
32733 /* protects the irq masks */
32734 spinlock_t irq_lock;
32735@@ -1055,7 +1055,7 @@ struct drm_i915_gem_object {
32736 * will be page flipped away on the next vblank. When it
32737 * reaches 0, dev_priv->pending_flip_queue will be woken up.
32738 */
32739- atomic_t pending_flip;
32740+ atomic_unchecked_t pending_flip;
32741 };
32742
32743 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
32744@@ -1558,7 +1558,7 @@ extern struct i2c_adapter *intel_gmbus_get_adapter(
32745 struct drm_i915_private *dev_priv, unsigned port);
32746 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
32747 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
32748-extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
32749+static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
32750 {
32751 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
32752 }
32753diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
32754index 67036e9..b9f1357 100644
32755--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
32756+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
32757@@ -681,7 +681,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
32758 i915_gem_clflush_object(obj);
32759
32760 if (obj->base.pending_write_domain)
32761- flips |= atomic_read(&obj->pending_flip);
32762+ flips |= atomic_read_unchecked(&obj->pending_flip);
32763
32764 flush_domains |= obj->base.write_domain;
32765 }
32766@@ -712,9 +712,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
32767
32768 static int
32769 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
32770- int count)
32771+ unsigned int count)
32772 {
32773- int i;
32774+ unsigned int i;
32775
32776 for (i = 0; i < count; i++) {
32777 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
32778diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
32779index dc29ace..137d83a 100644
32780--- a/drivers/gpu/drm/i915/i915_irq.c
32781+++ b/drivers/gpu/drm/i915/i915_irq.c
32782@@ -531,7 +531,7 @@ static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS)
32783 u32 pipe_stats[I915_MAX_PIPES];
32784 bool blc_event;
32785
32786- atomic_inc(&dev_priv->irq_received);
32787+ atomic_inc_unchecked(&dev_priv->irq_received);
32788
32789 while (true) {
32790 iir = I915_READ(VLV_IIR);
32791@@ -678,7 +678,7 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
32792 irqreturn_t ret = IRQ_NONE;
32793 int i;
32794
32795- atomic_inc(&dev_priv->irq_received);
32796+ atomic_inc_unchecked(&dev_priv->irq_received);
32797
32798 /* disable master interrupt before clearing iir */
32799 de_ier = I915_READ(DEIER);
32800@@ -753,7 +753,7 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
32801 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
32802 u32 hotplug_mask;
32803
32804- atomic_inc(&dev_priv->irq_received);
32805+ atomic_inc_unchecked(&dev_priv->irq_received);
32806
32807 /* disable master interrupt before clearing iir */
32808 de_ier = I915_READ(DEIER);
32809@@ -1762,7 +1762,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
32810 {
32811 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
32812
32813- atomic_set(&dev_priv->irq_received, 0);
32814+ atomic_set_unchecked(&dev_priv->irq_received, 0);
32815
32816 I915_WRITE(HWSTAM, 0xeffe);
32817
32818@@ -1788,7 +1788,7 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
32819 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
32820 int pipe;
32821
32822- atomic_set(&dev_priv->irq_received, 0);
32823+ atomic_set_unchecked(&dev_priv->irq_received, 0);
32824
32825 /* VLV magic */
32826 I915_WRITE(VLV_IMR, 0);
32827@@ -2093,7 +2093,7 @@ static void i8xx_irq_preinstall(struct drm_device * dev)
32828 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
32829 int pipe;
32830
32831- atomic_set(&dev_priv->irq_received, 0);
32832+ atomic_set_unchecked(&dev_priv->irq_received, 0);
32833
32834 for_each_pipe(pipe)
32835 I915_WRITE(PIPESTAT(pipe), 0);
32836@@ -2144,7 +2144,7 @@ static irqreturn_t i8xx_irq_handler(DRM_IRQ_ARGS)
32837 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
32838 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
32839
32840- atomic_inc(&dev_priv->irq_received);
32841+ atomic_inc_unchecked(&dev_priv->irq_received);
32842
32843 iir = I915_READ16(IIR);
32844 if (iir == 0)
32845@@ -2229,7 +2229,7 @@ static void i915_irq_preinstall(struct drm_device * dev)
32846 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
32847 int pipe;
32848
32849- atomic_set(&dev_priv->irq_received, 0);
32850+ atomic_set_unchecked(&dev_priv->irq_received, 0);
32851
32852 if (I915_HAS_HOTPLUG(dev)) {
32853 I915_WRITE(PORT_HOTPLUG_EN, 0);
32854@@ -2324,7 +2324,7 @@ static irqreturn_t i915_irq_handler(DRM_IRQ_ARGS)
32855 };
32856 int pipe, ret = IRQ_NONE;
32857
32858- atomic_inc(&dev_priv->irq_received);
32859+ atomic_inc_unchecked(&dev_priv->irq_received);
32860
32861 iir = I915_READ(IIR);
32862 do {
32863@@ -2450,7 +2450,7 @@ static void i965_irq_preinstall(struct drm_device * dev)
32864 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
32865 int pipe;
32866
32867- atomic_set(&dev_priv->irq_received, 0);
32868+ atomic_set_unchecked(&dev_priv->irq_received, 0);
32869
32870 I915_WRITE(PORT_HOTPLUG_EN, 0);
32871 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
32872@@ -2557,7 +2557,7 @@ static irqreturn_t i965_irq_handler(DRM_IRQ_ARGS)
32873 int irq_received;
32874 int ret = IRQ_NONE, pipe;
32875
32876- atomic_inc(&dev_priv->irq_received);
32877+ atomic_inc_unchecked(&dev_priv->irq_received);
32878
32879 iir = I915_READ(IIR);
32880
32881diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
32882index 4d3c7c6..eaac87b 100644
32883--- a/drivers/gpu/drm/i915/intel_display.c
32884+++ b/drivers/gpu/drm/i915/intel_display.c
32885@@ -2131,7 +2131,7 @@ intel_finish_fb(struct drm_framebuffer *old_fb)
32886
32887 wait_event(dev_priv->pending_flip_queue,
32888 atomic_read(&dev_priv->mm.wedged) ||
32889- atomic_read(&obj->pending_flip) == 0);
32890+ atomic_read_unchecked(&obj->pending_flip) == 0);
32891
32892 /* Big Hammer, we also need to ensure that any pending
32893 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
32894@@ -6221,8 +6221,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
32895
32896 obj = work->old_fb_obj;
32897
32898- atomic_clear_mask(1 << intel_crtc->plane,
32899- &obj->pending_flip.counter);
32900+ atomic_clear_mask_unchecked(1 << intel_crtc->plane, &obj->pending_flip);
32901 wake_up(&dev_priv->pending_flip_queue);
32902
32903 queue_work(dev_priv->wq, &work->work);
32904@@ -6589,7 +6588,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
32905 /* Block clients from rendering to the new back buffer until
32906 * the flip occurs and the object is no longer visible.
32907 */
32908- atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
32909+ atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
32910 atomic_inc(&intel_crtc->unpin_work_count);
32911
32912 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
32913@@ -6606,7 +6605,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
32914
32915 cleanup_pending:
32916 atomic_dec(&intel_crtc->unpin_work_count);
32917- atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
32918+ atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
32919 drm_gem_object_unreference(&work->old_fb_obj->base);
32920 drm_gem_object_unreference(&obj->base);
32921 mutex_unlock(&dev->struct_mutex);
32922diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
32923index 54558a0..2d97005 100644
32924--- a/drivers/gpu/drm/mga/mga_drv.h
32925+++ b/drivers/gpu/drm/mga/mga_drv.h
32926@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
32927 u32 clear_cmd;
32928 u32 maccess;
32929
32930- atomic_t vbl_received; /**< Number of vblanks received. */
32931+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
32932 wait_queue_head_t fence_queue;
32933- atomic_t last_fence_retired;
32934+ atomic_unchecked_t last_fence_retired;
32935 u32 next_fence_to_post;
32936
32937 unsigned int fb_cpp;
32938diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
32939index 598c281..60d590e 100644
32940--- a/drivers/gpu/drm/mga/mga_irq.c
32941+++ b/drivers/gpu/drm/mga/mga_irq.c
32942@@ -43,7 +43,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
32943 if (crtc != 0)
32944 return 0;
32945
32946- return atomic_read(&dev_priv->vbl_received);
32947+ return atomic_read_unchecked(&dev_priv->vbl_received);
32948 }
32949
32950
32951@@ -59,7 +59,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
32952 /* VBLANK interrupt */
32953 if (status & MGA_VLINEPEN) {
32954 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
32955- atomic_inc(&dev_priv->vbl_received);
32956+ atomic_inc_unchecked(&dev_priv->vbl_received);
32957 drm_handle_vblank(dev, 0);
32958 handled = 1;
32959 }
32960@@ -78,7 +78,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
32961 if ((prim_start & ~0x03) != (prim_end & ~0x03))
32962 MGA_WRITE(MGA_PRIMEND, prim_end);
32963
32964- atomic_inc(&dev_priv->last_fence_retired);
32965+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
32966 DRM_WAKEUP(&dev_priv->fence_queue);
32967 handled = 1;
32968 }
32969@@ -129,7 +129,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
32970 * using fences.
32971 */
32972 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
32973- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
32974+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
32975 - *sequence) <= (1 << 23)));
32976
32977 *sequence = cur_fence;
32978diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
32979index 09fdef2..57f5c3b 100644
32980--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
32981+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
32982@@ -1240,7 +1240,7 @@ parse_bit_U_tbl_entry(struct drm_device *dev, struct nvbios *bios,
32983 struct bit_table {
32984 const char id;
32985 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
32986-};
32987+} __no_const;
32988
32989 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
32990
32991diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
32992index a101699..a163f0a 100644
32993--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
32994+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
32995@@ -80,7 +80,7 @@ struct nouveau_drm {
32996 struct drm_global_reference mem_global_ref;
32997 struct ttm_bo_global_ref bo_global_ref;
32998 struct ttm_bo_device bdev;
32999- atomic_t validate_sequence;
33000+ atomic_unchecked_t validate_sequence;
33001 int (*move)(struct nouveau_channel *,
33002 struct ttm_buffer_object *,
33003 struct ttm_mem_reg *, struct ttm_mem_reg *);
33004diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
33005index cdb83ac..27f0a16 100644
33006--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
33007+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
33008@@ -43,7 +43,7 @@ struct nouveau_fence_priv {
33009 int (*sync)(struct nouveau_fence *, struct nouveau_channel *,
33010 struct nouveau_channel *);
33011 u32 (*read)(struct nouveau_channel *);
33012-};
33013+} __no_const;
33014
33015 #define nouveau_fence(drm) ((struct nouveau_fence_priv *)(drm)->fence)
33016
33017diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
33018index 5e2f521..0d21436 100644
33019--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
33020+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
33021@@ -321,7 +321,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
33022 int trycnt = 0;
33023 int ret, i;
33024
33025- sequence = atomic_add_return(1, &drm->ttm.validate_sequence);
33026+ sequence = atomic_add_return_unchecked(1, &drm->ttm.validate_sequence);
33027 retry:
33028 if (++trycnt > 100000) {
33029 NV_ERROR(drm, "%s failed and gave up.\n", __func__);
33030diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
33031index 6f0ac64..9c2dfb4 100644
33032--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
33033+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
33034@@ -63,7 +63,7 @@ nouveau_switcheroo_can_switch(struct pci_dev *pdev)
33035 bool can_switch;
33036
33037 spin_lock(&dev->count_lock);
33038- can_switch = (dev->open_count == 0);
33039+ can_switch = (local_read(&dev->open_count) == 0);
33040 spin_unlock(&dev->count_lock);
33041 return can_switch;
33042 }
33043diff --git a/drivers/gpu/drm/nouveau/nv50_evo.c b/drivers/gpu/drm/nouveau/nv50_evo.c
33044index 9f6f55c..30e3a29 100644
33045--- a/drivers/gpu/drm/nouveau/nv50_evo.c
33046+++ b/drivers/gpu/drm/nouveau/nv50_evo.c
33047@@ -152,9 +152,9 @@ nv50_evo_channel_new(struct drm_device *dev, int chid,
33048 kzalloc(sizeof(*evo->object->oclass), GFP_KERNEL);
33049 evo->object->oclass->ofuncs =
33050 kzalloc(sizeof(*evo->object->oclass->ofuncs), GFP_KERNEL);
33051- evo->object->oclass->ofuncs->rd32 = nv50_evo_rd32;
33052- evo->object->oclass->ofuncs->wr32 = nv50_evo_wr32;
33053- evo->object->oclass->ofuncs->rd08 =
33054+ *(void**)&evo->object->oclass->ofuncs->rd32 = nv50_evo_rd32;
33055+ *(void**)&evo->object->oclass->ofuncs->wr32 = nv50_evo_wr32;
33056+ *(void**)&evo->object->oclass->ofuncs->rd08 =
33057 ioremap(pci_resource_start(dev->pdev, 0) +
33058 NV50_PDISPLAY_USER(evo->handle), PAGE_SIZE);
33059 return 0;
33060diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
33061index b562b59..9d725a8 100644
33062--- a/drivers/gpu/drm/nouveau/nv50_sor.c
33063+++ b/drivers/gpu/drm/nouveau/nv50_sor.c
33064@@ -317,7 +317,7 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
33065 }
33066
33067 if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
33068- struct dp_train_func func = {
33069+ static struct dp_train_func func = {
33070 .link_set = nv50_sor_dp_link_set,
33071 .train_set = nv50_sor_dp_train_set,
33072 .train_adj = nv50_sor_dp_train_adj
33073diff --git a/drivers/gpu/drm/nouveau/nvd0_display.c b/drivers/gpu/drm/nouveau/nvd0_display.c
33074index c402fca..f1d694b 100644
33075--- a/drivers/gpu/drm/nouveau/nvd0_display.c
33076+++ b/drivers/gpu/drm/nouveau/nvd0_display.c
33077@@ -1389,7 +1389,7 @@ nvd0_sor_dpms(struct drm_encoder *encoder, int mode)
33078 nv_wait(device, 0x61c030 + (or * 0x0800), 0x10000000, 0x00000000);
33079
33080 if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
33081- struct dp_train_func func = {
33082+ static struct dp_train_func func = {
33083 .link_set = nvd0_sor_dp_link_set,
33084 .train_set = nvd0_sor_dp_train_set,
33085 .train_adj = nvd0_sor_dp_train_adj
33086diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
33087index d4660cf..70dbe65 100644
33088--- a/drivers/gpu/drm/r128/r128_cce.c
33089+++ b/drivers/gpu/drm/r128/r128_cce.c
33090@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
33091
33092 /* GH: Simple idle check.
33093 */
33094- atomic_set(&dev_priv->idle_count, 0);
33095+ atomic_set_unchecked(&dev_priv->idle_count, 0);
33096
33097 /* We don't support anything other than bus-mastering ring mode,
33098 * but the ring can be in either AGP or PCI space for the ring
33099diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
33100index 930c71b..499aded 100644
33101--- a/drivers/gpu/drm/r128/r128_drv.h
33102+++ b/drivers/gpu/drm/r128/r128_drv.h
33103@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
33104 int is_pci;
33105 unsigned long cce_buffers_offset;
33106
33107- atomic_t idle_count;
33108+ atomic_unchecked_t idle_count;
33109
33110 int page_flipping;
33111 int current_page;
33112 u32 crtc_offset;
33113 u32 crtc_offset_cntl;
33114
33115- atomic_t vbl_received;
33116+ atomic_unchecked_t vbl_received;
33117
33118 u32 color_fmt;
33119 unsigned int front_offset;
33120diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
33121index 2ea4f09..d391371 100644
33122--- a/drivers/gpu/drm/r128/r128_irq.c
33123+++ b/drivers/gpu/drm/r128/r128_irq.c
33124@@ -41,7 +41,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
33125 if (crtc != 0)
33126 return 0;
33127
33128- return atomic_read(&dev_priv->vbl_received);
33129+ return atomic_read_unchecked(&dev_priv->vbl_received);
33130 }
33131
33132 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
33133@@ -55,7 +55,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
33134 /* VBLANK interrupt */
33135 if (status & R128_CRTC_VBLANK_INT) {
33136 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
33137- atomic_inc(&dev_priv->vbl_received);
33138+ atomic_inc_unchecked(&dev_priv->vbl_received);
33139 drm_handle_vblank(dev, 0);
33140 return IRQ_HANDLED;
33141 }
33142diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
33143index 19bb7e6..de7e2a2 100644
33144--- a/drivers/gpu/drm/r128/r128_state.c
33145+++ b/drivers/gpu/drm/r128/r128_state.c
33146@@ -320,10 +320,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
33147
33148 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
33149 {
33150- if (atomic_read(&dev_priv->idle_count) == 0)
33151+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
33152 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
33153 else
33154- atomic_set(&dev_priv->idle_count, 0);
33155+ atomic_set_unchecked(&dev_priv->idle_count, 0);
33156 }
33157
33158 #endif
33159diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
33160index 5a82b6b..9e69c73 100644
33161--- a/drivers/gpu/drm/radeon/mkregtable.c
33162+++ b/drivers/gpu/drm/radeon/mkregtable.c
33163@@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
33164 regex_t mask_rex;
33165 regmatch_t match[4];
33166 char buf[1024];
33167- size_t end;
33168+ long end;
33169 int len;
33170 int done = 0;
33171 int r;
33172 unsigned o;
33173 struct offset *offset;
33174 char last_reg_s[10];
33175- int last_reg;
33176+ unsigned long last_reg;
33177
33178 if (regcomp
33179 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
33180diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
33181index 008d645..de03849 100644
33182--- a/drivers/gpu/drm/radeon/radeon_device.c
33183+++ b/drivers/gpu/drm/radeon/radeon_device.c
33184@@ -941,7 +941,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
33185 bool can_switch;
33186
33187 spin_lock(&dev->count_lock);
33188- can_switch = (dev->open_count == 0);
33189+ can_switch = (local_read(&dev->open_count) == 0);
33190 spin_unlock(&dev->count_lock);
33191 return can_switch;
33192 }
33193diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
33194index a1b59ca..86f2d44 100644
33195--- a/drivers/gpu/drm/radeon/radeon_drv.h
33196+++ b/drivers/gpu/drm/radeon/radeon_drv.h
33197@@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
33198
33199 /* SW interrupt */
33200 wait_queue_head_t swi_queue;
33201- atomic_t swi_emitted;
33202+ atomic_unchecked_t swi_emitted;
33203 int vblank_crtc;
33204 uint32_t irq_enable_reg;
33205 uint32_t r500_disp_irq_reg;
33206diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
33207index c180df8..cd80dd2d 100644
33208--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
33209+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
33210@@ -358,7 +358,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
33211 request = compat_alloc_user_space(sizeof(*request));
33212 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
33213 || __put_user(req32.param, &request->param)
33214- || __put_user((void __user *)(unsigned long)req32.value,
33215+ || __put_user((unsigned long)req32.value,
33216 &request->value))
33217 return -EFAULT;
33218
33219diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
33220index e771033..a0bc6b3 100644
33221--- a/drivers/gpu/drm/radeon/radeon_irq.c
33222+++ b/drivers/gpu/drm/radeon/radeon_irq.c
33223@@ -224,8 +224,8 @@ static int radeon_emit_irq(struct drm_device * dev)
33224 unsigned int ret;
33225 RING_LOCALS;
33226
33227- atomic_inc(&dev_priv->swi_emitted);
33228- ret = atomic_read(&dev_priv->swi_emitted);
33229+ atomic_inc_unchecked(&dev_priv->swi_emitted);
33230+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
33231
33232 BEGIN_RING(4);
33233 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
33234@@ -351,7 +351,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
33235 drm_radeon_private_t *dev_priv =
33236 (drm_radeon_private_t *) dev->dev_private;
33237
33238- atomic_set(&dev_priv->swi_emitted, 0);
33239+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
33240 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
33241
33242 dev->max_vblank_count = 0x001fffff;
33243diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
33244index 8e9057b..af6dacb 100644
33245--- a/drivers/gpu/drm/radeon/radeon_state.c
33246+++ b/drivers/gpu/drm/radeon/radeon_state.c
33247@@ -2166,7 +2166,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
33248 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
33249 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
33250
33251- if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
33252+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
33253 sarea_priv->nbox * sizeof(depth_boxes[0])))
33254 return -EFAULT;
33255
33256@@ -3029,7 +3029,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
33257 {
33258 drm_radeon_private_t *dev_priv = dev->dev_private;
33259 drm_radeon_getparam_t *param = data;
33260- int value;
33261+ int value = 0;
33262
33263 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
33264
33265diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
33266index 5ebe1b3..cf69ba0 100644
33267--- a/drivers/gpu/drm/radeon/radeon_ttm.c
33268+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
33269@@ -781,7 +781,7 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
33270 man->size = size >> PAGE_SHIFT;
33271 }
33272
33273-static struct vm_operations_struct radeon_ttm_vm_ops;
33274+static vm_operations_struct_no_const radeon_ttm_vm_ops __read_only;
33275 static const struct vm_operations_struct *ttm_vm_ops = NULL;
33276
33277 static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
33278@@ -822,8 +822,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
33279 }
33280 if (unlikely(ttm_vm_ops == NULL)) {
33281 ttm_vm_ops = vma->vm_ops;
33282+ pax_open_kernel();
33283 radeon_ttm_vm_ops = *ttm_vm_ops;
33284 radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
33285+ pax_close_kernel();
33286 }
33287 vma->vm_ops = &radeon_ttm_vm_ops;
33288 return 0;
33289diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
33290index 5706d2a..17aedaa 100644
33291--- a/drivers/gpu/drm/radeon/rs690.c
33292+++ b/drivers/gpu/drm/radeon/rs690.c
33293@@ -304,9 +304,11 @@ static void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
33294 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
33295 rdev->pm.sideport_bandwidth.full)
33296 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
33297- read_delay_latency.full = dfixed_const(370 * 800 * 1000);
33298+ read_delay_latency.full = dfixed_const(800 * 1000);
33299 read_delay_latency.full = dfixed_div(read_delay_latency,
33300 rdev->pm.igp_sideport_mclk);
33301+ a.full = dfixed_const(370);
33302+ read_delay_latency.full = dfixed_mul(read_delay_latency, a);
33303 } else {
33304 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
33305 rdev->pm.k8_bandwidth.full)
33306diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
33307index bd2a3b4..122d9ad 100644
33308--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
33309+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
33310@@ -394,9 +394,9 @@ static int ttm_pool_get_num_unused_pages(void)
33311 static int ttm_pool_mm_shrink(struct shrinker *shrink,
33312 struct shrink_control *sc)
33313 {
33314- static atomic_t start_pool = ATOMIC_INIT(0);
33315+ static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
33316 unsigned i;
33317- unsigned pool_offset = atomic_add_return(1, &start_pool);
33318+ unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
33319 struct ttm_page_pool *pool;
33320 int shrink_pages = sc->nr_to_scan;
33321
33322diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
33323index 893a650..6190d3b 100644
33324--- a/drivers/gpu/drm/via/via_drv.h
33325+++ b/drivers/gpu/drm/via/via_drv.h
33326@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
33327 typedef uint32_t maskarray_t[5];
33328
33329 typedef struct drm_via_irq {
33330- atomic_t irq_received;
33331+ atomic_unchecked_t irq_received;
33332 uint32_t pending_mask;
33333 uint32_t enable_mask;
33334 wait_queue_head_t irq_queue;
33335@@ -75,7 +75,7 @@ typedef struct drm_via_private {
33336 struct timeval last_vblank;
33337 int last_vblank_valid;
33338 unsigned usec_per_vblank;
33339- atomic_t vbl_received;
33340+ atomic_unchecked_t vbl_received;
33341 drm_via_state_t hc_state;
33342 char pci_buf[VIA_PCI_BUF_SIZE];
33343 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
33344diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
33345index ac98964..5dbf512 100644
33346--- a/drivers/gpu/drm/via/via_irq.c
33347+++ b/drivers/gpu/drm/via/via_irq.c
33348@@ -101,7 +101,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
33349 if (crtc != 0)
33350 return 0;
33351
33352- return atomic_read(&dev_priv->vbl_received);
33353+ return atomic_read_unchecked(&dev_priv->vbl_received);
33354 }
33355
33356 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
33357@@ -116,8 +116,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
33358
33359 status = VIA_READ(VIA_REG_INTERRUPT);
33360 if (status & VIA_IRQ_VBLANK_PENDING) {
33361- atomic_inc(&dev_priv->vbl_received);
33362- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
33363+ atomic_inc_unchecked(&dev_priv->vbl_received);
33364+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
33365 do_gettimeofday(&cur_vblank);
33366 if (dev_priv->last_vblank_valid) {
33367 dev_priv->usec_per_vblank =
33368@@ -127,7 +127,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
33369 dev_priv->last_vblank = cur_vblank;
33370 dev_priv->last_vblank_valid = 1;
33371 }
33372- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
33373+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
33374 DRM_DEBUG("US per vblank is: %u\n",
33375 dev_priv->usec_per_vblank);
33376 }
33377@@ -137,7 +137,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
33378
33379 for (i = 0; i < dev_priv->num_irqs; ++i) {
33380 if (status & cur_irq->pending_mask) {
33381- atomic_inc(&cur_irq->irq_received);
33382+ atomic_inc_unchecked(&cur_irq->irq_received);
33383 DRM_WAKEUP(&cur_irq->irq_queue);
33384 handled = 1;
33385 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
33386@@ -242,11 +242,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
33387 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
33388 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
33389 masks[irq][4]));
33390- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
33391+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
33392 } else {
33393 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
33394 (((cur_irq_sequence =
33395- atomic_read(&cur_irq->irq_received)) -
33396+ atomic_read_unchecked(&cur_irq->irq_received)) -
33397 *sequence) <= (1 << 23)));
33398 }
33399 *sequence = cur_irq_sequence;
33400@@ -284,7 +284,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
33401 }
33402
33403 for (i = 0; i < dev_priv->num_irqs; ++i) {
33404- atomic_set(&cur_irq->irq_received, 0);
33405+ atomic_set_unchecked(&cur_irq->irq_received, 0);
33406 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
33407 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
33408 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
33409@@ -366,7 +366,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
33410 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
33411 case VIA_IRQ_RELATIVE:
33412 irqwait->request.sequence +=
33413- atomic_read(&cur_irq->irq_received);
33414+ atomic_read_unchecked(&cur_irq->irq_received);
33415 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
33416 case VIA_IRQ_ABSOLUTE:
33417 break;
33418diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
33419index 88a179e..57fe50481c 100644
33420--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
33421+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
33422@@ -263,7 +263,7 @@ struct vmw_private {
33423 * Fencing and IRQs.
33424 */
33425
33426- atomic_t marker_seq;
33427+ atomic_unchecked_t marker_seq;
33428 wait_queue_head_t fence_queue;
33429 wait_queue_head_t fifo_queue;
33430 int fence_queue_waiters; /* Protected by hw_mutex */
33431diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
33432index 3eb1486..0a47ee9 100644
33433--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
33434+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
33435@@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
33436 (unsigned int) min,
33437 (unsigned int) fifo->capabilities);
33438
33439- atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
33440+ atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
33441 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
33442 vmw_marker_queue_init(&fifo->marker_queue);
33443 return vmw_fifo_send_fence(dev_priv, &dummy);
33444@@ -355,7 +355,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
33445 if (reserveable)
33446 iowrite32(bytes, fifo_mem +
33447 SVGA_FIFO_RESERVED);
33448- return fifo_mem + (next_cmd >> 2);
33449+ return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
33450 } else {
33451 need_bounce = true;
33452 }
33453@@ -475,7 +475,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
33454
33455 fm = vmw_fifo_reserve(dev_priv, bytes);
33456 if (unlikely(fm == NULL)) {
33457- *seqno = atomic_read(&dev_priv->marker_seq);
33458+ *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
33459 ret = -ENOMEM;
33460 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
33461 false, 3*HZ);
33462@@ -483,7 +483,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
33463 }
33464
33465 do {
33466- *seqno = atomic_add_return(1, &dev_priv->marker_seq);
33467+ *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
33468 } while (*seqno == 0);
33469
33470 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
33471diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
33472index 4640adb..e1384ed 100644
33473--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
33474+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
33475@@ -107,7 +107,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
33476 * emitted. Then the fence is stale and signaled.
33477 */
33478
33479- ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
33480+ ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
33481 > VMW_FENCE_WRAP);
33482
33483 return ret;
33484@@ -138,7 +138,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
33485
33486 if (fifo_idle)
33487 down_read(&fifo_state->rwsem);
33488- signal_seq = atomic_read(&dev_priv->marker_seq);
33489+ signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
33490 ret = 0;
33491
33492 for (;;) {
33493diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
33494index 8a8725c..afed796 100644
33495--- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
33496+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
33497@@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
33498 while (!vmw_lag_lt(queue, us)) {
33499 spin_lock(&queue->lock);
33500 if (list_empty(&queue->head))
33501- seqno = atomic_read(&dev_priv->marker_seq);
33502+ seqno = atomic_read_unchecked(&dev_priv->marker_seq);
33503 else {
33504 marker = list_first_entry(&queue->head,
33505 struct vmw_marker, head);
33506diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
33507index 52146db..ae33762 100644
33508--- a/drivers/hid/hid-core.c
33509+++ b/drivers/hid/hid-core.c
33510@@ -2201,7 +2201,7 @@ static bool hid_ignore(struct hid_device *hdev)
33511
33512 int hid_add_device(struct hid_device *hdev)
33513 {
33514- static atomic_t id = ATOMIC_INIT(0);
33515+ static atomic_unchecked_t id = ATOMIC_INIT(0);
33516 int ret;
33517
33518 if (WARN_ON(hdev->status & HID_STAT_ADDED))
33519@@ -2236,7 +2236,7 @@ int hid_add_device(struct hid_device *hdev)
33520 /* XXX hack, any other cleaner solution after the driver core
33521 * is converted to allow more than 20 bytes as the device name? */
33522 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
33523- hdev->vendor, hdev->product, atomic_inc_return(&id));
33524+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
33525
33526 hid_debug_register(hdev, dev_name(&hdev->dev));
33527 ret = device_add(&hdev->dev);
33528diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c
33529index eec3291..8ed706b 100644
33530--- a/drivers/hid/hid-wiimote-debug.c
33531+++ b/drivers/hid/hid-wiimote-debug.c
33532@@ -66,7 +66,7 @@ static ssize_t wiidebug_eeprom_read(struct file *f, char __user *u, size_t s,
33533 else if (size == 0)
33534 return -EIO;
33535
33536- if (copy_to_user(u, buf, size))
33537+ if (size > sizeof(buf) || copy_to_user(u, buf, size))
33538 return -EFAULT;
33539
33540 *off += size;
33541diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
33542index 14599e2..711c965 100644
33543--- a/drivers/hid/usbhid/hiddev.c
33544+++ b/drivers/hid/usbhid/hiddev.c
33545@@ -625,7 +625,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
33546 break;
33547
33548 case HIDIOCAPPLICATION:
33549- if (arg < 0 || arg >= hid->maxapplication)
33550+ if (arg >= hid->maxapplication)
33551 break;
33552
33553 for (i = 0; i < hid->maxcollection; i++)
33554diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
33555index f4c3d28..82f45a9 100644
33556--- a/drivers/hv/channel.c
33557+++ b/drivers/hv/channel.c
33558@@ -402,8 +402,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
33559 int ret = 0;
33560 int t;
33561
33562- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
33563- atomic_inc(&vmbus_connection.next_gpadl_handle);
33564+ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
33565+ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
33566
33567 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
33568 if (ret)
33569diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
33570index 3648f8f..30ef30d 100644
33571--- a/drivers/hv/hv.c
33572+++ b/drivers/hv/hv.c
33573@@ -111,7 +111,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
33574 u64 output_address = (output) ? virt_to_phys(output) : 0;
33575 u32 output_address_hi = output_address >> 32;
33576 u32 output_address_lo = output_address & 0xFFFFFFFF;
33577- void *hypercall_page = hv_context.hypercall_page;
33578+ void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
33579
33580 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
33581 "=a"(hv_status_lo) : "d" (control_hi),
33582diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
33583index d8d1fad..b91caf7 100644
33584--- a/drivers/hv/hyperv_vmbus.h
33585+++ b/drivers/hv/hyperv_vmbus.h
33586@@ -594,7 +594,7 @@ enum vmbus_connect_state {
33587 struct vmbus_connection {
33588 enum vmbus_connect_state conn_state;
33589
33590- atomic_t next_gpadl_handle;
33591+ atomic_unchecked_t next_gpadl_handle;
33592
33593 /*
33594 * Represents channel interrupts. Each bit position represents a
33595diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
33596index 8e1a9ec..4687821 100644
33597--- a/drivers/hv/vmbus_drv.c
33598+++ b/drivers/hv/vmbus_drv.c
33599@@ -629,10 +629,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
33600 {
33601 int ret = 0;
33602
33603- static atomic_t device_num = ATOMIC_INIT(0);
33604+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
33605
33606 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
33607- atomic_inc_return(&device_num));
33608+ atomic_inc_return_unchecked(&device_num));
33609
33610 child_device_obj->device.bus = &hv_bus;
33611 child_device_obj->device.parent = &hv_acpi_dev->dev;
33612diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
33613index 07a0c1a..0cac334 100644
33614--- a/drivers/hwmon/sht15.c
33615+++ b/drivers/hwmon/sht15.c
33616@@ -169,7 +169,7 @@ struct sht15_data {
33617 int supply_uV;
33618 bool supply_uV_valid;
33619 struct work_struct update_supply_work;
33620- atomic_t interrupt_handled;
33621+ atomic_unchecked_t interrupt_handled;
33622 };
33623
33624 /**
33625@@ -512,13 +512,13 @@ static int sht15_measurement(struct sht15_data *data,
33626 return ret;
33627
33628 gpio_direction_input(data->pdata->gpio_data);
33629- atomic_set(&data->interrupt_handled, 0);
33630+ atomic_set_unchecked(&data->interrupt_handled, 0);
33631
33632 enable_irq(gpio_to_irq(data->pdata->gpio_data));
33633 if (gpio_get_value(data->pdata->gpio_data) == 0) {
33634 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
33635 /* Only relevant if the interrupt hasn't occurred. */
33636- if (!atomic_read(&data->interrupt_handled))
33637+ if (!atomic_read_unchecked(&data->interrupt_handled))
33638 schedule_work(&data->read_work);
33639 }
33640 ret = wait_event_timeout(data->wait_queue,
33641@@ -785,7 +785,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
33642
33643 /* First disable the interrupt */
33644 disable_irq_nosync(irq);
33645- atomic_inc(&data->interrupt_handled);
33646+ atomic_inc_unchecked(&data->interrupt_handled);
33647 /* Then schedule a reading work struct */
33648 if (data->state != SHT15_READING_NOTHING)
33649 schedule_work(&data->read_work);
33650@@ -807,11 +807,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
33651 * If not, then start the interrupt again - care here as could
33652 * have gone low in meantime so verify it hasn't!
33653 */
33654- atomic_set(&data->interrupt_handled, 0);
33655+ atomic_set_unchecked(&data->interrupt_handled, 0);
33656 enable_irq(gpio_to_irq(data->pdata->gpio_data));
33657 /* If still not occurred or another handler was scheduled */
33658 if (gpio_get_value(data->pdata->gpio_data)
33659- || atomic_read(&data->interrupt_handled))
33660+ || atomic_read_unchecked(&data->interrupt_handled))
33661 return;
33662 }
33663
33664diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
33665index 378fcb5..5e91fa8 100644
33666--- a/drivers/i2c/busses/i2c-amd756-s4882.c
33667+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
33668@@ -43,7 +43,7 @@
33669 extern struct i2c_adapter amd756_smbus;
33670
33671 static struct i2c_adapter *s4882_adapter;
33672-static struct i2c_algorithm *s4882_algo;
33673+static i2c_algorithm_no_const *s4882_algo;
33674
33675 /* Wrapper access functions for multiplexed SMBus */
33676 static DEFINE_MUTEX(amd756_lock);
33677diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
33678index 29015eb..af2d8e9 100644
33679--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
33680+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
33681@@ -41,7 +41,7 @@
33682 extern struct i2c_adapter *nforce2_smbus;
33683
33684 static struct i2c_adapter *s4985_adapter;
33685-static struct i2c_algorithm *s4985_algo;
33686+static i2c_algorithm_no_const *s4985_algo;
33687
33688 /* Wrapper access functions for multiplexed SMBus */
33689 static DEFINE_MUTEX(nforce2_lock);
33690diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
33691index 8126824..55a2798 100644
33692--- a/drivers/ide/ide-cd.c
33693+++ b/drivers/ide/ide-cd.c
33694@@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
33695 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
33696 if ((unsigned long)buf & alignment
33697 || blk_rq_bytes(rq) & q->dma_pad_mask
33698- || object_is_on_stack(buf))
33699+ || object_starts_on_stack(buf))
33700 drive->dma = 0;
33701 }
33702 }
33703diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
33704index 394fea2..c833880 100644
33705--- a/drivers/infiniband/core/cm.c
33706+++ b/drivers/infiniband/core/cm.c
33707@@ -114,7 +114,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
33708
33709 struct cm_counter_group {
33710 struct kobject obj;
33711- atomic_long_t counter[CM_ATTR_COUNT];
33712+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
33713 };
33714
33715 struct cm_counter_attribute {
33716@@ -1394,7 +1394,7 @@ static void cm_dup_req_handler(struct cm_work *work,
33717 struct ib_mad_send_buf *msg = NULL;
33718 int ret;
33719
33720- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
33721+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
33722 counter[CM_REQ_COUNTER]);
33723
33724 /* Quick state check to discard duplicate REQs. */
33725@@ -1778,7 +1778,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
33726 if (!cm_id_priv)
33727 return;
33728
33729- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
33730+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
33731 counter[CM_REP_COUNTER]);
33732 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
33733 if (ret)
33734@@ -1945,7 +1945,7 @@ static int cm_rtu_handler(struct cm_work *work)
33735 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
33736 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
33737 spin_unlock_irq(&cm_id_priv->lock);
33738- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
33739+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
33740 counter[CM_RTU_COUNTER]);
33741 goto out;
33742 }
33743@@ -2128,7 +2128,7 @@ static int cm_dreq_handler(struct cm_work *work)
33744 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
33745 dreq_msg->local_comm_id);
33746 if (!cm_id_priv) {
33747- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
33748+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
33749 counter[CM_DREQ_COUNTER]);
33750 cm_issue_drep(work->port, work->mad_recv_wc);
33751 return -EINVAL;
33752@@ -2153,7 +2153,7 @@ static int cm_dreq_handler(struct cm_work *work)
33753 case IB_CM_MRA_REP_RCVD:
33754 break;
33755 case IB_CM_TIMEWAIT:
33756- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
33757+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
33758 counter[CM_DREQ_COUNTER]);
33759 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
33760 goto unlock;
33761@@ -2167,7 +2167,7 @@ static int cm_dreq_handler(struct cm_work *work)
33762 cm_free_msg(msg);
33763 goto deref;
33764 case IB_CM_DREQ_RCVD:
33765- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
33766+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
33767 counter[CM_DREQ_COUNTER]);
33768 goto unlock;
33769 default:
33770@@ -2534,7 +2534,7 @@ static int cm_mra_handler(struct cm_work *work)
33771 ib_modify_mad(cm_id_priv->av.port->mad_agent,
33772 cm_id_priv->msg, timeout)) {
33773 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
33774- atomic_long_inc(&work->port->
33775+ atomic_long_inc_unchecked(&work->port->
33776 counter_group[CM_RECV_DUPLICATES].
33777 counter[CM_MRA_COUNTER]);
33778 goto out;
33779@@ -2543,7 +2543,7 @@ static int cm_mra_handler(struct cm_work *work)
33780 break;
33781 case IB_CM_MRA_REQ_RCVD:
33782 case IB_CM_MRA_REP_RCVD:
33783- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
33784+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
33785 counter[CM_MRA_COUNTER]);
33786 /* fall through */
33787 default:
33788@@ -2705,7 +2705,7 @@ static int cm_lap_handler(struct cm_work *work)
33789 case IB_CM_LAP_IDLE:
33790 break;
33791 case IB_CM_MRA_LAP_SENT:
33792- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
33793+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
33794 counter[CM_LAP_COUNTER]);
33795 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
33796 goto unlock;
33797@@ -2721,7 +2721,7 @@ static int cm_lap_handler(struct cm_work *work)
33798 cm_free_msg(msg);
33799 goto deref;
33800 case IB_CM_LAP_RCVD:
33801- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
33802+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
33803 counter[CM_LAP_COUNTER]);
33804 goto unlock;
33805 default:
33806@@ -3005,7 +3005,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
33807 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
33808 if (cur_cm_id_priv) {
33809 spin_unlock_irq(&cm.lock);
33810- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
33811+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
33812 counter[CM_SIDR_REQ_COUNTER]);
33813 goto out; /* Duplicate message. */
33814 }
33815@@ -3217,10 +3217,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
33816 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
33817 msg->retries = 1;
33818
33819- atomic_long_add(1 + msg->retries,
33820+ atomic_long_add_unchecked(1 + msg->retries,
33821 &port->counter_group[CM_XMIT].counter[attr_index]);
33822 if (msg->retries)
33823- atomic_long_add(msg->retries,
33824+ atomic_long_add_unchecked(msg->retries,
33825 &port->counter_group[CM_XMIT_RETRIES].
33826 counter[attr_index]);
33827
33828@@ -3430,7 +3430,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
33829 }
33830
33831 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
33832- atomic_long_inc(&port->counter_group[CM_RECV].
33833+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
33834 counter[attr_id - CM_ATTR_ID_OFFSET]);
33835
33836 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
33837@@ -3635,7 +3635,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
33838 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
33839
33840 return sprintf(buf, "%ld\n",
33841- atomic_long_read(&group->counter[cm_attr->index]));
33842+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
33843 }
33844
33845 static const struct sysfs_ops cm_counter_ops = {
33846diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
33847index 176c8f9..2627b62 100644
33848--- a/drivers/infiniband/core/fmr_pool.c
33849+++ b/drivers/infiniband/core/fmr_pool.c
33850@@ -98,8 +98,8 @@ struct ib_fmr_pool {
33851
33852 struct task_struct *thread;
33853
33854- atomic_t req_ser;
33855- atomic_t flush_ser;
33856+ atomic_unchecked_t req_ser;
33857+ atomic_unchecked_t flush_ser;
33858
33859 wait_queue_head_t force_wait;
33860 };
33861@@ -180,10 +180,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
33862 struct ib_fmr_pool *pool = pool_ptr;
33863
33864 do {
33865- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
33866+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
33867 ib_fmr_batch_release(pool);
33868
33869- atomic_inc(&pool->flush_ser);
33870+ atomic_inc_unchecked(&pool->flush_ser);
33871 wake_up_interruptible(&pool->force_wait);
33872
33873 if (pool->flush_function)
33874@@ -191,7 +191,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
33875 }
33876
33877 set_current_state(TASK_INTERRUPTIBLE);
33878- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
33879+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
33880 !kthread_should_stop())
33881 schedule();
33882 __set_current_state(TASK_RUNNING);
33883@@ -283,8 +283,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
33884 pool->dirty_watermark = params->dirty_watermark;
33885 pool->dirty_len = 0;
33886 spin_lock_init(&pool->pool_lock);
33887- atomic_set(&pool->req_ser, 0);
33888- atomic_set(&pool->flush_ser, 0);
33889+ atomic_set_unchecked(&pool->req_ser, 0);
33890+ atomic_set_unchecked(&pool->flush_ser, 0);
33891 init_waitqueue_head(&pool->force_wait);
33892
33893 pool->thread = kthread_run(ib_fmr_cleanup_thread,
33894@@ -412,11 +412,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
33895 }
33896 spin_unlock_irq(&pool->pool_lock);
33897
33898- serial = atomic_inc_return(&pool->req_ser);
33899+ serial = atomic_inc_return_unchecked(&pool->req_ser);
33900 wake_up_process(pool->thread);
33901
33902 if (wait_event_interruptible(pool->force_wait,
33903- atomic_read(&pool->flush_ser) - serial >= 0))
33904+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
33905 return -EINTR;
33906
33907 return 0;
33908@@ -526,7 +526,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
33909 } else {
33910 list_add_tail(&fmr->list, &pool->dirty_list);
33911 if (++pool->dirty_len >= pool->dirty_watermark) {
33912- atomic_inc(&pool->req_ser);
33913+ atomic_inc_unchecked(&pool->req_ser);
33914 wake_up_process(pool->thread);
33915 }
33916 }
33917diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
33918index afd8179..598063f 100644
33919--- a/drivers/infiniband/hw/cxgb4/mem.c
33920+++ b/drivers/infiniband/hw/cxgb4/mem.c
33921@@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
33922 int err;
33923 struct fw_ri_tpte tpt;
33924 u32 stag_idx;
33925- static atomic_t key;
33926+ static atomic_unchecked_t key;
33927
33928 if (c4iw_fatal_error(rdev))
33929 return -EIO;
33930@@ -139,7 +139,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
33931 if (rdev->stats.stag.cur > rdev->stats.stag.max)
33932 rdev->stats.stag.max = rdev->stats.stag.cur;
33933 mutex_unlock(&rdev->stats.lock);
33934- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
33935+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
33936 }
33937 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
33938 __func__, stag_state, type, pdid, stag_idx);
33939diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
33940index 79b3dbc..96e5fcc 100644
33941--- a/drivers/infiniband/hw/ipath/ipath_rc.c
33942+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
33943@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
33944 struct ib_atomic_eth *ateth;
33945 struct ipath_ack_entry *e;
33946 u64 vaddr;
33947- atomic64_t *maddr;
33948+ atomic64_unchecked_t *maddr;
33949 u64 sdata;
33950 u32 rkey;
33951 u8 next;
33952@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
33953 IB_ACCESS_REMOTE_ATOMIC)))
33954 goto nack_acc_unlck;
33955 /* Perform atomic OP and save result. */
33956- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
33957+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
33958 sdata = be64_to_cpu(ateth->swap_data);
33959 e = &qp->s_ack_queue[qp->r_head_ack_queue];
33960 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
33961- (u64) atomic64_add_return(sdata, maddr) - sdata :
33962+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
33963 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
33964 be64_to_cpu(ateth->compare_data),
33965 sdata);
33966diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
33967index 1f95bba..9530f87 100644
33968--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
33969+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
33970@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
33971 unsigned long flags;
33972 struct ib_wc wc;
33973 u64 sdata;
33974- atomic64_t *maddr;
33975+ atomic64_unchecked_t *maddr;
33976 enum ib_wc_status send_status;
33977
33978 /*
33979@@ -382,11 +382,11 @@ again:
33980 IB_ACCESS_REMOTE_ATOMIC)))
33981 goto acc_err;
33982 /* Perform atomic OP and save result. */
33983- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
33984+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
33985 sdata = wqe->wr.wr.atomic.compare_add;
33986 *(u64 *) sqp->s_sge.sge.vaddr =
33987 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
33988- (u64) atomic64_add_return(sdata, maddr) - sdata :
33989+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
33990 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
33991 sdata, wqe->wr.wr.atomic.swap);
33992 goto send_comp;
33993diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
33994index 748db2d..5f75cc3 100644
33995--- a/drivers/infiniband/hw/nes/nes.c
33996+++ b/drivers/infiniband/hw/nes/nes.c
33997@@ -98,7 +98,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
33998 LIST_HEAD(nes_adapter_list);
33999 static LIST_HEAD(nes_dev_list);
34000
34001-atomic_t qps_destroyed;
34002+atomic_unchecked_t qps_destroyed;
34003
34004 static unsigned int ee_flsh_adapter;
34005 static unsigned int sysfs_nonidx_addr;
34006@@ -267,7 +267,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
34007 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
34008 struct nes_adapter *nesadapter = nesdev->nesadapter;
34009
34010- atomic_inc(&qps_destroyed);
34011+ atomic_inc_unchecked(&qps_destroyed);
34012
34013 /* Free the control structures */
34014
34015diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
34016index 33cc589..3bd6538 100644
34017--- a/drivers/infiniband/hw/nes/nes.h
34018+++ b/drivers/infiniband/hw/nes/nes.h
34019@@ -177,17 +177,17 @@ extern unsigned int nes_debug_level;
34020 extern unsigned int wqm_quanta;
34021 extern struct list_head nes_adapter_list;
34022
34023-extern atomic_t cm_connects;
34024-extern atomic_t cm_accepts;
34025-extern atomic_t cm_disconnects;
34026-extern atomic_t cm_closes;
34027-extern atomic_t cm_connecteds;
34028-extern atomic_t cm_connect_reqs;
34029-extern atomic_t cm_rejects;
34030-extern atomic_t mod_qp_timouts;
34031-extern atomic_t qps_created;
34032-extern atomic_t qps_destroyed;
34033-extern atomic_t sw_qps_destroyed;
34034+extern atomic_unchecked_t cm_connects;
34035+extern atomic_unchecked_t cm_accepts;
34036+extern atomic_unchecked_t cm_disconnects;
34037+extern atomic_unchecked_t cm_closes;
34038+extern atomic_unchecked_t cm_connecteds;
34039+extern atomic_unchecked_t cm_connect_reqs;
34040+extern atomic_unchecked_t cm_rejects;
34041+extern atomic_unchecked_t mod_qp_timouts;
34042+extern atomic_unchecked_t qps_created;
34043+extern atomic_unchecked_t qps_destroyed;
34044+extern atomic_unchecked_t sw_qps_destroyed;
34045 extern u32 mh_detected;
34046 extern u32 mh_pauses_sent;
34047 extern u32 cm_packets_sent;
34048@@ -196,16 +196,16 @@ extern u32 cm_packets_created;
34049 extern u32 cm_packets_received;
34050 extern u32 cm_packets_dropped;
34051 extern u32 cm_packets_retrans;
34052-extern atomic_t cm_listens_created;
34053-extern atomic_t cm_listens_destroyed;
34054+extern atomic_unchecked_t cm_listens_created;
34055+extern atomic_unchecked_t cm_listens_destroyed;
34056 extern u32 cm_backlog_drops;
34057-extern atomic_t cm_loopbacks;
34058-extern atomic_t cm_nodes_created;
34059-extern atomic_t cm_nodes_destroyed;
34060-extern atomic_t cm_accel_dropped_pkts;
34061-extern atomic_t cm_resets_recvd;
34062-extern atomic_t pau_qps_created;
34063-extern atomic_t pau_qps_destroyed;
34064+extern atomic_unchecked_t cm_loopbacks;
34065+extern atomic_unchecked_t cm_nodes_created;
34066+extern atomic_unchecked_t cm_nodes_destroyed;
34067+extern atomic_unchecked_t cm_accel_dropped_pkts;
34068+extern atomic_unchecked_t cm_resets_recvd;
34069+extern atomic_unchecked_t pau_qps_created;
34070+extern atomic_unchecked_t pau_qps_destroyed;
34071
34072 extern u32 int_mod_timer_init;
34073 extern u32 int_mod_cq_depth_256;
34074diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
34075index cfaacaf..fa0722e 100644
34076--- a/drivers/infiniband/hw/nes/nes_cm.c
34077+++ b/drivers/infiniband/hw/nes/nes_cm.c
34078@@ -68,14 +68,14 @@ u32 cm_packets_dropped;
34079 u32 cm_packets_retrans;
34080 u32 cm_packets_created;
34081 u32 cm_packets_received;
34082-atomic_t cm_listens_created;
34083-atomic_t cm_listens_destroyed;
34084+atomic_unchecked_t cm_listens_created;
34085+atomic_unchecked_t cm_listens_destroyed;
34086 u32 cm_backlog_drops;
34087-atomic_t cm_loopbacks;
34088-atomic_t cm_nodes_created;
34089-atomic_t cm_nodes_destroyed;
34090-atomic_t cm_accel_dropped_pkts;
34091-atomic_t cm_resets_recvd;
34092+atomic_unchecked_t cm_loopbacks;
34093+atomic_unchecked_t cm_nodes_created;
34094+atomic_unchecked_t cm_nodes_destroyed;
34095+atomic_unchecked_t cm_accel_dropped_pkts;
34096+atomic_unchecked_t cm_resets_recvd;
34097
34098 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
34099 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
34100@@ -148,13 +148,13 @@ static struct nes_cm_ops nes_cm_api = {
34101
34102 static struct nes_cm_core *g_cm_core;
34103
34104-atomic_t cm_connects;
34105-atomic_t cm_accepts;
34106-atomic_t cm_disconnects;
34107-atomic_t cm_closes;
34108-atomic_t cm_connecteds;
34109-atomic_t cm_connect_reqs;
34110-atomic_t cm_rejects;
34111+atomic_unchecked_t cm_connects;
34112+atomic_unchecked_t cm_accepts;
34113+atomic_unchecked_t cm_disconnects;
34114+atomic_unchecked_t cm_closes;
34115+atomic_unchecked_t cm_connecteds;
34116+atomic_unchecked_t cm_connect_reqs;
34117+atomic_unchecked_t cm_rejects;
34118
34119 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
34120 {
34121@@ -1281,7 +1281,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
34122 kfree(listener);
34123 listener = NULL;
34124 ret = 0;
34125- atomic_inc(&cm_listens_destroyed);
34126+ atomic_inc_unchecked(&cm_listens_destroyed);
34127 } else {
34128 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
34129 }
34130@@ -1480,7 +1480,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
34131 cm_node->rem_mac);
34132
34133 add_hte_node(cm_core, cm_node);
34134- atomic_inc(&cm_nodes_created);
34135+ atomic_inc_unchecked(&cm_nodes_created);
34136
34137 return cm_node;
34138 }
34139@@ -1538,7 +1538,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
34140 }
34141
34142 atomic_dec(&cm_core->node_cnt);
34143- atomic_inc(&cm_nodes_destroyed);
34144+ atomic_inc_unchecked(&cm_nodes_destroyed);
34145 nesqp = cm_node->nesqp;
34146 if (nesqp) {
34147 nesqp->cm_node = NULL;
34148@@ -1602,7 +1602,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
34149
34150 static void drop_packet(struct sk_buff *skb)
34151 {
34152- atomic_inc(&cm_accel_dropped_pkts);
34153+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
34154 dev_kfree_skb_any(skb);
34155 }
34156
34157@@ -1665,7 +1665,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
34158 {
34159
34160 int reset = 0; /* whether to send reset in case of err.. */
34161- atomic_inc(&cm_resets_recvd);
34162+ atomic_inc_unchecked(&cm_resets_recvd);
34163 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
34164 " refcnt=%d\n", cm_node, cm_node->state,
34165 atomic_read(&cm_node->ref_count));
34166@@ -2306,7 +2306,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
34167 rem_ref_cm_node(cm_node->cm_core, cm_node);
34168 return NULL;
34169 }
34170- atomic_inc(&cm_loopbacks);
34171+ atomic_inc_unchecked(&cm_loopbacks);
34172 loopbackremotenode->loopbackpartner = cm_node;
34173 loopbackremotenode->tcp_cntxt.rcv_wscale =
34174 NES_CM_DEFAULT_RCV_WND_SCALE;
34175@@ -2581,7 +2581,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
34176 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
34177 else {
34178 rem_ref_cm_node(cm_core, cm_node);
34179- atomic_inc(&cm_accel_dropped_pkts);
34180+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
34181 dev_kfree_skb_any(skb);
34182 }
34183 break;
34184@@ -2889,7 +2889,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
34185
34186 if ((cm_id) && (cm_id->event_handler)) {
34187 if (issue_disconn) {
34188- atomic_inc(&cm_disconnects);
34189+ atomic_inc_unchecked(&cm_disconnects);
34190 cm_event.event = IW_CM_EVENT_DISCONNECT;
34191 cm_event.status = disconn_status;
34192 cm_event.local_addr = cm_id->local_addr;
34193@@ -2911,7 +2911,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
34194 }
34195
34196 if (issue_close) {
34197- atomic_inc(&cm_closes);
34198+ atomic_inc_unchecked(&cm_closes);
34199 nes_disconnect(nesqp, 1);
34200
34201 cm_id->provider_data = nesqp;
34202@@ -3047,7 +3047,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
34203
34204 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
34205 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
34206- atomic_inc(&cm_accepts);
34207+ atomic_inc_unchecked(&cm_accepts);
34208
34209 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
34210 netdev_refcnt_read(nesvnic->netdev));
34211@@ -3242,7 +3242,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
34212 struct nes_cm_core *cm_core;
34213 u8 *start_buff;
34214
34215- atomic_inc(&cm_rejects);
34216+ atomic_inc_unchecked(&cm_rejects);
34217 cm_node = (struct nes_cm_node *)cm_id->provider_data;
34218 loopback = cm_node->loopbackpartner;
34219 cm_core = cm_node->cm_core;
34220@@ -3302,7 +3302,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
34221 ntohl(cm_id->local_addr.sin_addr.s_addr),
34222 ntohs(cm_id->local_addr.sin_port));
34223
34224- atomic_inc(&cm_connects);
34225+ atomic_inc_unchecked(&cm_connects);
34226 nesqp->active_conn = 1;
34227
34228 /* cache the cm_id in the qp */
34229@@ -3412,7 +3412,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
34230 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
34231 return err;
34232 }
34233- atomic_inc(&cm_listens_created);
34234+ atomic_inc_unchecked(&cm_listens_created);
34235 }
34236
34237 cm_id->add_ref(cm_id);
34238@@ -3513,7 +3513,7 @@ static void cm_event_connected(struct nes_cm_event *event)
34239
34240 if (nesqp->destroyed)
34241 return;
34242- atomic_inc(&cm_connecteds);
34243+ atomic_inc_unchecked(&cm_connecteds);
34244 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
34245 " local port 0x%04X. jiffies = %lu.\n",
34246 nesqp->hwqp.qp_id,
34247@@ -3693,7 +3693,7 @@ static void cm_event_reset(struct nes_cm_event *event)
34248
34249 cm_id->add_ref(cm_id);
34250 ret = cm_id->event_handler(cm_id, &cm_event);
34251- atomic_inc(&cm_closes);
34252+ atomic_inc_unchecked(&cm_closes);
34253 cm_event.event = IW_CM_EVENT_CLOSE;
34254 cm_event.status = 0;
34255 cm_event.provider_data = cm_id->provider_data;
34256@@ -3729,7 +3729,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
34257 return;
34258 cm_id = cm_node->cm_id;
34259
34260- atomic_inc(&cm_connect_reqs);
34261+ atomic_inc_unchecked(&cm_connect_reqs);
34262 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
34263 cm_node, cm_id, jiffies);
34264
34265@@ -3769,7 +3769,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
34266 return;
34267 cm_id = cm_node->cm_id;
34268
34269- atomic_inc(&cm_connect_reqs);
34270+ atomic_inc_unchecked(&cm_connect_reqs);
34271 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
34272 cm_node, cm_id, jiffies);
34273
34274diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
34275index 3ba7be3..c81f6ff 100644
34276--- a/drivers/infiniband/hw/nes/nes_mgt.c
34277+++ b/drivers/infiniband/hw/nes/nes_mgt.c
34278@@ -40,8 +40,8 @@
34279 #include "nes.h"
34280 #include "nes_mgt.h"
34281
34282-atomic_t pau_qps_created;
34283-atomic_t pau_qps_destroyed;
34284+atomic_unchecked_t pau_qps_created;
34285+atomic_unchecked_t pau_qps_destroyed;
34286
34287 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
34288 {
34289@@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
34290 {
34291 struct sk_buff *skb;
34292 unsigned long flags;
34293- atomic_inc(&pau_qps_destroyed);
34294+ atomic_inc_unchecked(&pau_qps_destroyed);
34295
34296 /* Free packets that have not yet been forwarded */
34297 /* Lock is acquired by skb_dequeue when removing the skb */
34298@@ -812,7 +812,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
34299 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
34300 skb_queue_head_init(&nesqp->pau_list);
34301 spin_lock_init(&nesqp->pau_lock);
34302- atomic_inc(&pau_qps_created);
34303+ atomic_inc_unchecked(&pau_qps_created);
34304 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
34305 }
34306
34307diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
34308index 0564be7..f68b0f1 100644
34309--- a/drivers/infiniband/hw/nes/nes_nic.c
34310+++ b/drivers/infiniband/hw/nes/nes_nic.c
34311@@ -1272,39 +1272,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
34312 target_stat_values[++index] = mh_detected;
34313 target_stat_values[++index] = mh_pauses_sent;
34314 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
34315- target_stat_values[++index] = atomic_read(&cm_connects);
34316- target_stat_values[++index] = atomic_read(&cm_accepts);
34317- target_stat_values[++index] = atomic_read(&cm_disconnects);
34318- target_stat_values[++index] = atomic_read(&cm_connecteds);
34319- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
34320- target_stat_values[++index] = atomic_read(&cm_rejects);
34321- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
34322- target_stat_values[++index] = atomic_read(&qps_created);
34323- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
34324- target_stat_values[++index] = atomic_read(&qps_destroyed);
34325- target_stat_values[++index] = atomic_read(&cm_closes);
34326+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
34327+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
34328+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
34329+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
34330+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
34331+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
34332+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
34333+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
34334+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
34335+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
34336+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
34337 target_stat_values[++index] = cm_packets_sent;
34338 target_stat_values[++index] = cm_packets_bounced;
34339 target_stat_values[++index] = cm_packets_created;
34340 target_stat_values[++index] = cm_packets_received;
34341 target_stat_values[++index] = cm_packets_dropped;
34342 target_stat_values[++index] = cm_packets_retrans;
34343- target_stat_values[++index] = atomic_read(&cm_listens_created);
34344- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
34345+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
34346+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
34347 target_stat_values[++index] = cm_backlog_drops;
34348- target_stat_values[++index] = atomic_read(&cm_loopbacks);
34349- target_stat_values[++index] = atomic_read(&cm_nodes_created);
34350- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
34351- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
34352- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
34353+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
34354+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
34355+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
34356+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
34357+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
34358 target_stat_values[++index] = nesadapter->free_4kpbl;
34359 target_stat_values[++index] = nesadapter->free_256pbl;
34360 target_stat_values[++index] = int_mod_timer_init;
34361 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
34362 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
34363 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
34364- target_stat_values[++index] = atomic_read(&pau_qps_created);
34365- target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
34366+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
34367+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
34368 }
34369
34370 /**
34371diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
34372index 07e4fba..685f041 100644
34373--- a/drivers/infiniband/hw/nes/nes_verbs.c
34374+++ b/drivers/infiniband/hw/nes/nes_verbs.c
34375@@ -46,9 +46,9 @@
34376
34377 #include <rdma/ib_umem.h>
34378
34379-atomic_t mod_qp_timouts;
34380-atomic_t qps_created;
34381-atomic_t sw_qps_destroyed;
34382+atomic_unchecked_t mod_qp_timouts;
34383+atomic_unchecked_t qps_created;
34384+atomic_unchecked_t sw_qps_destroyed;
34385
34386 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
34387
34388@@ -1131,7 +1131,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
34389 if (init_attr->create_flags)
34390 return ERR_PTR(-EINVAL);
34391
34392- atomic_inc(&qps_created);
34393+ atomic_inc_unchecked(&qps_created);
34394 switch (init_attr->qp_type) {
34395 case IB_QPT_RC:
34396 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
34397@@ -1462,7 +1462,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
34398 struct iw_cm_event cm_event;
34399 int ret = 0;
34400
34401- atomic_inc(&sw_qps_destroyed);
34402+ atomic_inc_unchecked(&sw_qps_destroyed);
34403 nesqp->destroyed = 1;
34404
34405 /* Blow away the connection if it exists. */
34406diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
34407index 4d11575..3e890e5 100644
34408--- a/drivers/infiniband/hw/qib/qib.h
34409+++ b/drivers/infiniband/hw/qib/qib.h
34410@@ -51,6 +51,7 @@
34411 #include <linux/completion.h>
34412 #include <linux/kref.h>
34413 #include <linux/sched.h>
34414+#include <linux/slab.h>
34415
34416 #include "qib_common.h"
34417 #include "qib_verbs.h"
34418diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
34419index da739d9..da1c7f4 100644
34420--- a/drivers/input/gameport/gameport.c
34421+++ b/drivers/input/gameport/gameport.c
34422@@ -487,14 +487,14 @@ EXPORT_SYMBOL(gameport_set_phys);
34423 */
34424 static void gameport_init_port(struct gameport *gameport)
34425 {
34426- static atomic_t gameport_no = ATOMIC_INIT(0);
34427+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
34428
34429 __module_get(THIS_MODULE);
34430
34431 mutex_init(&gameport->drv_mutex);
34432 device_initialize(&gameport->dev);
34433 dev_set_name(&gameport->dev, "gameport%lu",
34434- (unsigned long)atomic_inc_return(&gameport_no) - 1);
34435+ (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
34436 gameport->dev.bus = &gameport_bus;
34437 gameport->dev.release = gameport_release_port;
34438 if (gameport->parent)
34439diff --git a/drivers/input/input.c b/drivers/input/input.c
34440index 53a0dde..abffda7 100644
34441--- a/drivers/input/input.c
34442+++ b/drivers/input/input.c
34443@@ -1902,7 +1902,7 @@ static void input_cleanse_bitmasks(struct input_dev *dev)
34444 */
34445 int input_register_device(struct input_dev *dev)
34446 {
34447- static atomic_t input_no = ATOMIC_INIT(0);
34448+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
34449 struct input_handler *handler;
34450 unsigned int packet_size;
34451 const char *path;
34452@@ -1945,7 +1945,7 @@ int input_register_device(struct input_dev *dev)
34453 dev->setkeycode = input_default_setkeycode;
34454
34455 dev_set_name(&dev->dev, "input%ld",
34456- (unsigned long) atomic_inc_return(&input_no) - 1);
34457+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
34458
34459 error = device_add(&dev->dev);
34460 if (error)
34461diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
34462index 04c69af..5f92d00 100644
34463--- a/drivers/input/joystick/sidewinder.c
34464+++ b/drivers/input/joystick/sidewinder.c
34465@@ -30,6 +30,7 @@
34466 #include <linux/kernel.h>
34467 #include <linux/module.h>
34468 #include <linux/slab.h>
34469+#include <linux/sched.h>
34470 #include <linux/init.h>
34471 #include <linux/input.h>
34472 #include <linux/gameport.h>
34473diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
34474index 83811e4..0822b90 100644
34475--- a/drivers/input/joystick/xpad.c
34476+++ b/drivers/input/joystick/xpad.c
34477@@ -726,7 +726,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
34478
34479 static int xpad_led_probe(struct usb_xpad *xpad)
34480 {
34481- static atomic_t led_seq = ATOMIC_INIT(0);
34482+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
34483 long led_no;
34484 struct xpad_led *led;
34485 struct led_classdev *led_cdev;
34486@@ -739,7 +739,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
34487 if (!led)
34488 return -ENOMEM;
34489
34490- led_no = (long)atomic_inc_return(&led_seq) - 1;
34491+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
34492
34493 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
34494 led->xpad = xpad;
34495diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
34496index 4c842c3..590b0bf 100644
34497--- a/drivers/input/mousedev.c
34498+++ b/drivers/input/mousedev.c
34499@@ -738,7 +738,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
34500
34501 spin_unlock_irq(&client->packet_lock);
34502
34503- if (copy_to_user(buffer, data, count))
34504+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
34505 return -EFAULT;
34506
34507 return count;
34508diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
34509index d0f7533..fb8215b 100644
34510--- a/drivers/input/serio/serio.c
34511+++ b/drivers/input/serio/serio.c
34512@@ -496,7 +496,7 @@ static void serio_release_port(struct device *dev)
34513 */
34514 static void serio_init_port(struct serio *serio)
34515 {
34516- static atomic_t serio_no = ATOMIC_INIT(0);
34517+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
34518
34519 __module_get(THIS_MODULE);
34520
34521@@ -507,7 +507,7 @@ static void serio_init_port(struct serio *serio)
34522 mutex_init(&serio->drv_mutex);
34523 device_initialize(&serio->dev);
34524 dev_set_name(&serio->dev, "serio%ld",
34525- (long)atomic_inc_return(&serio_no) - 1);
34526+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
34527 serio->dev.bus = &serio_bus;
34528 serio->dev.release = serio_release_port;
34529 serio->dev.groups = serio_device_attr_groups;
34530diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
34531index c679867..6e2e34d 100644
34532--- a/drivers/isdn/capi/capi.c
34533+++ b/drivers/isdn/capi/capi.c
34534@@ -83,8 +83,8 @@ struct capiminor {
34535
34536 struct capi20_appl *ap;
34537 u32 ncci;
34538- atomic_t datahandle;
34539- atomic_t msgid;
34540+ atomic_unchecked_t datahandle;
34541+ atomic_unchecked_t msgid;
34542
34543 struct tty_port port;
34544 int ttyinstop;
34545@@ -393,7 +393,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
34546 capimsg_setu16(s, 2, mp->ap->applid);
34547 capimsg_setu8 (s, 4, CAPI_DATA_B3);
34548 capimsg_setu8 (s, 5, CAPI_RESP);
34549- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
34550+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
34551 capimsg_setu32(s, 8, mp->ncci);
34552 capimsg_setu16(s, 12, datahandle);
34553 }
34554@@ -514,14 +514,14 @@ static void handle_minor_send(struct capiminor *mp)
34555 mp->outbytes -= len;
34556 spin_unlock_bh(&mp->outlock);
34557
34558- datahandle = atomic_inc_return(&mp->datahandle);
34559+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
34560 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
34561 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
34562 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
34563 capimsg_setu16(skb->data, 2, mp->ap->applid);
34564 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
34565 capimsg_setu8 (skb->data, 5, CAPI_REQ);
34566- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
34567+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
34568 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
34569 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
34570 capimsg_setu16(skb->data, 16, len); /* Data length */
34571diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
34572index 67abf3f..076b3a6 100644
34573--- a/drivers/isdn/gigaset/interface.c
34574+++ b/drivers/isdn/gigaset/interface.c
34575@@ -160,9 +160,9 @@ static int if_open(struct tty_struct *tty, struct file *filp)
34576 }
34577 tty->driver_data = cs;
34578
34579- ++cs->port.count;
34580+ atomic_inc(&cs->port.count);
34581
34582- if (cs->port.count == 1) {
34583+ if (atomic_read(&cs->port.count) == 1) {
34584 tty_port_tty_set(&cs->port, tty);
34585 tty->low_latency = 1;
34586 }
34587@@ -186,9 +186,9 @@ static void if_close(struct tty_struct *tty, struct file *filp)
34588
34589 if (!cs->connected)
34590 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
34591- else if (!cs->port.count)
34592+ else if (!atomic_read(&cs->port.count))
34593 dev_warn(cs->dev, "%s: device not opened\n", __func__);
34594- else if (!--cs->port.count)
34595+ else if (!atomic_dec_return(&cs->port.count))
34596 tty_port_tty_set(&cs->port, NULL);
34597
34598 mutex_unlock(&cs->mutex);
34599diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
34600index 821f7ac..28d4030 100644
34601--- a/drivers/isdn/hardware/avm/b1.c
34602+++ b/drivers/isdn/hardware/avm/b1.c
34603@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart *t4file)
34604 }
34605 if (left) {
34606 if (t4file->user) {
34607- if (copy_from_user(buf, dp, left))
34608+ if (left > sizeof buf || copy_from_user(buf, dp, left))
34609 return -EFAULT;
34610 } else {
34611 memcpy(buf, dp, left);
34612@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart *config)
34613 }
34614 if (left) {
34615 if (config->user) {
34616- if (copy_from_user(buf, dp, left))
34617+ if (left > sizeof buf || copy_from_user(buf, dp, left))
34618 return -EFAULT;
34619 } else {
34620 memcpy(buf, dp, left);
34621diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
34622index b817809..409caff 100644
34623--- a/drivers/isdn/i4l/isdn_tty.c
34624+++ b/drivers/isdn/i4l/isdn_tty.c
34625@@ -1513,9 +1513,9 @@ isdn_tty_open(struct tty_struct *tty, struct file *filp)
34626
34627 #ifdef ISDN_DEBUG_MODEM_OPEN
34628 printk(KERN_DEBUG "isdn_tty_open %s, count = %d\n", tty->name,
34629- port->count);
34630+ atomic_read(&port->count));
34631 #endif
34632- port->count++;
34633+ atomic_inc(&port->count);
34634 port->tty = tty;
34635 /*
34636 * Start up serial port
34637@@ -1559,7 +1559,7 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
34638 #endif
34639 return;
34640 }
34641- if ((tty->count == 1) && (port->count != 1)) {
34642+ if ((tty->count == 1) && (atomic_read(&port->count) != 1)) {
34643 /*
34644 * Uh, oh. tty->count is 1, which means that the tty
34645 * structure will be freed. Info->count should always
34646@@ -1568,15 +1568,15 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
34647 * serial port won't be shutdown.
34648 */
34649 printk(KERN_ERR "isdn_tty_close: bad port count; tty->count is 1, "
34650- "info->count is %d\n", port->count);
34651- port->count = 1;
34652+ "info->count is %d\n", atomic_read(&port->count));
34653+ atomic_set(&port->count, 1);
34654 }
34655- if (--port->count < 0) {
34656+ if (atomic_dec_return(&port->count) < 0) {
34657 printk(KERN_ERR "isdn_tty_close: bad port count for ttyi%d: %d\n",
34658- info->line, port->count);
34659- port->count = 0;
34660+ info->line, atomic_read(&port->count));
34661+ atomic_set(&port->count, 0);
34662 }
34663- if (port->count) {
34664+ if (atomic_read(&port->count)) {
34665 #ifdef ISDN_DEBUG_MODEM_OPEN
34666 printk(KERN_DEBUG "isdn_tty_close after info->count != 0\n");
34667 #endif
34668@@ -1630,7 +1630,7 @@ isdn_tty_hangup(struct tty_struct *tty)
34669 if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_hangup"))
34670 return;
34671 isdn_tty_shutdown(info);
34672- port->count = 0;
34673+ atomic_set(&port->count, 0);
34674 port->flags &= ~ASYNC_NORMAL_ACTIVE;
34675 port->tty = NULL;
34676 wake_up_interruptible(&port->open_wait);
34677@@ -1971,7 +1971,7 @@ isdn_tty_find_icall(int di, int ch, setup_parm *setup)
34678 for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
34679 modem_info *info = &dev->mdm.info[i];
34680
34681- if (info->port.count == 0)
34682+ if (atomic_read(&info->port.count) == 0)
34683 continue;
34684 if ((info->emu.mdmreg[REG_SI1] & si2bit[si1]) && /* SI1 is matching */
34685 (info->emu.mdmreg[REG_SI2] == si2)) { /* SI2 is matching */
34686diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
34687index e74df7c..03a03ba 100644
34688--- a/drivers/isdn/icn/icn.c
34689+++ b/drivers/isdn/icn/icn.c
34690@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char *buf, int len, int user, icn_card *card)
34691 if (count > len)
34692 count = len;
34693 if (user) {
34694- if (copy_from_user(msg, buf, count))
34695+ if (count > sizeof msg || copy_from_user(msg, buf, count))
34696 return -EFAULT;
34697 } else
34698 memcpy(msg, buf, count);
34699diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
34700index b5fdcb7..5b6c59f 100644
34701--- a/drivers/lguest/core.c
34702+++ b/drivers/lguest/core.c
34703@@ -92,9 +92,17 @@ static __init int map_switcher(void)
34704 * it's worked so far. The end address needs +1 because __get_vm_area
34705 * allocates an extra guard page, so we need space for that.
34706 */
34707+
34708+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
34709+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
34710+ VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
34711+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
34712+#else
34713 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
34714 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
34715 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
34716+#endif
34717+
34718 if (!switcher_vma) {
34719 err = -ENOMEM;
34720 printk("lguest: could not map switcher pages high\n");
34721@@ -119,7 +127,7 @@ static __init int map_switcher(void)
34722 * Now the Switcher is mapped at the right address, we can't fail!
34723 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
34724 */
34725- memcpy(switcher_vma->addr, start_switcher_text,
34726+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
34727 end_switcher_text - start_switcher_text);
34728
34729 printk(KERN_INFO "lguest: mapped switcher at %p\n",
34730diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
34731index 4af12e1..0e89afe 100644
34732--- a/drivers/lguest/x86/core.c
34733+++ b/drivers/lguest/x86/core.c
34734@@ -59,7 +59,7 @@ static struct {
34735 /* Offset from where switcher.S was compiled to where we've copied it */
34736 static unsigned long switcher_offset(void)
34737 {
34738- return SWITCHER_ADDR - (unsigned long)start_switcher_text;
34739+ return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
34740 }
34741
34742 /* This cpu's struct lguest_pages. */
34743@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
34744 * These copies are pretty cheap, so we do them unconditionally: */
34745 /* Save the current Host top-level page directory.
34746 */
34747+
34748+#ifdef CONFIG_PAX_PER_CPU_PGD
34749+ pages->state.host_cr3 = read_cr3();
34750+#else
34751 pages->state.host_cr3 = __pa(current->mm->pgd);
34752+#endif
34753+
34754 /*
34755 * Set up the Guest's page tables to see this CPU's pages (and no
34756 * other CPU's pages).
34757@@ -476,7 +482,7 @@ void __init lguest_arch_host_init(void)
34758 * compiled-in switcher code and the high-mapped copy we just made.
34759 */
34760 for (i = 0; i < IDT_ENTRIES; i++)
34761- default_idt_entries[i] += switcher_offset();
34762+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
34763
34764 /*
34765 * Set up the Switcher's per-cpu areas.
34766@@ -559,7 +565,7 @@ void __init lguest_arch_host_init(void)
34767 * it will be undisturbed when we switch. To change %cs and jump we
34768 * need this structure to feed to Intel's "lcall" instruction.
34769 */
34770- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
34771+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
34772 lguest_entry.segment = LGUEST_CS;
34773
34774 /*
34775diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
34776index 40634b0..4f5855e 100644
34777--- a/drivers/lguest/x86/switcher_32.S
34778+++ b/drivers/lguest/x86/switcher_32.S
34779@@ -87,6 +87,7 @@
34780 #include <asm/page.h>
34781 #include <asm/segment.h>
34782 #include <asm/lguest.h>
34783+#include <asm/processor-flags.h>
34784
34785 // We mark the start of the code to copy
34786 // It's placed in .text tho it's never run here
34787@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
34788 // Changes type when we load it: damn Intel!
34789 // For after we switch over our page tables
34790 // That entry will be read-only: we'd crash.
34791+
34792+#ifdef CONFIG_PAX_KERNEXEC
34793+ mov %cr0, %edx
34794+ xor $X86_CR0_WP, %edx
34795+ mov %edx, %cr0
34796+#endif
34797+
34798 movl $(GDT_ENTRY_TSS*8), %edx
34799 ltr %dx
34800
34801@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
34802 // Let's clear it again for our return.
34803 // The GDT descriptor of the Host
34804 // Points to the table after two "size" bytes
34805- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
34806+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
34807 // Clear "used" from type field (byte 5, bit 2)
34808- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
34809+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
34810+
34811+#ifdef CONFIG_PAX_KERNEXEC
34812+ mov %cr0, %eax
34813+ xor $X86_CR0_WP, %eax
34814+ mov %eax, %cr0
34815+#endif
34816
34817 // Once our page table's switched, the Guest is live!
34818 // The Host fades as we run this final step.
34819@@ -295,13 +309,12 @@ deliver_to_host:
34820 // I consulted gcc, and it gave
34821 // These instructions, which I gladly credit:
34822 leal (%edx,%ebx,8), %eax
34823- movzwl (%eax),%edx
34824- movl 4(%eax), %eax
34825- xorw %ax, %ax
34826- orl %eax, %edx
34827+ movl 4(%eax), %edx
34828+ movw (%eax), %dx
34829 // Now the address of the handler's in %edx
34830 // We call it now: its "iret" drops us home.
34831- jmp *%edx
34832+ ljmp $__KERNEL_CS, $1f
34833+1: jmp *%edx
34834
34835 // Every interrupt can come to us here
34836 // But we must truly tell each apart.
34837diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
34838index 7155945..4bcc562 100644
34839--- a/drivers/md/bitmap.c
34840+++ b/drivers/md/bitmap.c
34841@@ -1779,7 +1779,7 @@ void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
34842 chunk_kb ? "KB" : "B");
34843 if (bitmap->storage.file) {
34844 seq_printf(seq, ", file: ");
34845- seq_path(seq, &bitmap->storage.file->f_path, " \t\n");
34846+ seq_path(seq, &bitmap->storage.file->f_path, " \t\n\\");
34847 }
34848
34849 seq_printf(seq, "\n");
34850diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
34851index a651d52..82f8a95 100644
34852--- a/drivers/md/dm-ioctl.c
34853+++ b/drivers/md/dm-ioctl.c
34854@@ -1601,7 +1601,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
34855 cmd == DM_LIST_VERSIONS_CMD)
34856 return 0;
34857
34858- if ((cmd == DM_DEV_CREATE_CMD)) {
34859+ if (cmd == DM_DEV_CREATE_CMD) {
34860 if (!*param->name) {
34861 DMWARN("name not supplied when creating device");
34862 return -EINVAL;
34863diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
34864index fd61f98..8050783 100644
34865--- a/drivers/md/dm-raid1.c
34866+++ b/drivers/md/dm-raid1.c
34867@@ -40,7 +40,7 @@ enum dm_raid1_error {
34868
34869 struct mirror {
34870 struct mirror_set *ms;
34871- atomic_t error_count;
34872+ atomic_unchecked_t error_count;
34873 unsigned long error_type;
34874 struct dm_dev *dev;
34875 sector_t offset;
34876@@ -185,7 +185,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
34877 struct mirror *m;
34878
34879 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
34880- if (!atomic_read(&m->error_count))
34881+ if (!atomic_read_unchecked(&m->error_count))
34882 return m;
34883
34884 return NULL;
34885@@ -217,7 +217,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
34886 * simple way to tell if a device has encountered
34887 * errors.
34888 */
34889- atomic_inc(&m->error_count);
34890+ atomic_inc_unchecked(&m->error_count);
34891
34892 if (test_and_set_bit(error_type, &m->error_type))
34893 return;
34894@@ -408,7 +408,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
34895 struct mirror *m = get_default_mirror(ms);
34896
34897 do {
34898- if (likely(!atomic_read(&m->error_count)))
34899+ if (likely(!atomic_read_unchecked(&m->error_count)))
34900 return m;
34901
34902 if (m-- == ms->mirror)
34903@@ -422,7 +422,7 @@ static int default_ok(struct mirror *m)
34904 {
34905 struct mirror *default_mirror = get_default_mirror(m->ms);
34906
34907- return !atomic_read(&default_mirror->error_count);
34908+ return !atomic_read_unchecked(&default_mirror->error_count);
34909 }
34910
34911 static int mirror_available(struct mirror_set *ms, struct bio *bio)
34912@@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
34913 */
34914 if (likely(region_in_sync(ms, region, 1)))
34915 m = choose_mirror(ms, bio->bi_sector);
34916- else if (m && atomic_read(&m->error_count))
34917+ else if (m && atomic_read_unchecked(&m->error_count))
34918 m = NULL;
34919
34920 if (likely(m))
34921@@ -938,7 +938,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
34922 }
34923
34924 ms->mirror[mirror].ms = ms;
34925- atomic_set(&(ms->mirror[mirror].error_count), 0);
34926+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
34927 ms->mirror[mirror].error_type = 0;
34928 ms->mirror[mirror].offset = offset;
34929
34930@@ -1356,7 +1356,7 @@ static void mirror_resume(struct dm_target *ti)
34931 */
34932 static char device_status_char(struct mirror *m)
34933 {
34934- if (!atomic_read(&(m->error_count)))
34935+ if (!atomic_read_unchecked(&(m->error_count)))
34936 return 'A';
34937
34938 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
34939diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
34940index e2f87653..f279abe 100644
34941--- a/drivers/md/dm-stripe.c
34942+++ b/drivers/md/dm-stripe.c
34943@@ -20,7 +20,7 @@ struct stripe {
34944 struct dm_dev *dev;
34945 sector_t physical_start;
34946
34947- atomic_t error_count;
34948+ atomic_unchecked_t error_count;
34949 };
34950
34951 struct stripe_c {
34952@@ -183,7 +183,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
34953 kfree(sc);
34954 return r;
34955 }
34956- atomic_set(&(sc->stripe[i].error_count), 0);
34957+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
34958 }
34959
34960 ti->private = sc;
34961@@ -324,7 +324,7 @@ static int stripe_status(struct dm_target *ti, status_type_t type,
34962 DMEMIT("%d ", sc->stripes);
34963 for (i = 0; i < sc->stripes; i++) {
34964 DMEMIT("%s ", sc->stripe[i].dev->name);
34965- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
34966+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
34967 'D' : 'A';
34968 }
34969 buffer[i] = '\0';
34970@@ -371,8 +371,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
34971 */
34972 for (i = 0; i < sc->stripes; i++)
34973 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
34974- atomic_inc(&(sc->stripe[i].error_count));
34975- if (atomic_read(&(sc->stripe[i].error_count)) <
34976+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
34977+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
34978 DM_IO_ERROR_THRESHOLD)
34979 schedule_work(&sc->trigger_event);
34980 }
34981diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
34982index fa29557..d24a5b7 100644
34983--- a/drivers/md/dm-table.c
34984+++ b/drivers/md/dm-table.c
34985@@ -390,7 +390,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
34986 if (!dev_size)
34987 return 0;
34988
34989- if ((start >= dev_size) || (start + len > dev_size)) {
34990+ if ((start >= dev_size) || (len > dev_size - start)) {
34991 DMWARN("%s: %s too small for target: "
34992 "start=%llu, len=%llu, dev_size=%llu",
34993 dm_device_name(ti->table->md), bdevname(bdev, b),
34994diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
34995index 693e149..b7e0fde 100644
34996--- a/drivers/md/dm-thin-metadata.c
34997+++ b/drivers/md/dm-thin-metadata.c
34998@@ -397,7 +397,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
34999 {
35000 pmd->info.tm = pmd->tm;
35001 pmd->info.levels = 2;
35002- pmd->info.value_type.context = pmd->data_sm;
35003+ pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
35004 pmd->info.value_type.size = sizeof(__le64);
35005 pmd->info.value_type.inc = data_block_inc;
35006 pmd->info.value_type.dec = data_block_dec;
35007@@ -416,7 +416,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
35008
35009 pmd->bl_info.tm = pmd->tm;
35010 pmd->bl_info.levels = 1;
35011- pmd->bl_info.value_type.context = pmd->data_sm;
35012+ pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
35013 pmd->bl_info.value_type.size = sizeof(__le64);
35014 pmd->bl_info.value_type.inc = data_block_inc;
35015 pmd->bl_info.value_type.dec = data_block_dec;
35016diff --git a/drivers/md/dm.c b/drivers/md/dm.c
35017index 77e6eff..913d695 100644
35018--- a/drivers/md/dm.c
35019+++ b/drivers/md/dm.c
35020@@ -182,9 +182,9 @@ struct mapped_device {
35021 /*
35022 * Event handling.
35023 */
35024- atomic_t event_nr;
35025+ atomic_unchecked_t event_nr;
35026 wait_queue_head_t eventq;
35027- atomic_t uevent_seq;
35028+ atomic_unchecked_t uevent_seq;
35029 struct list_head uevent_list;
35030 spinlock_t uevent_lock; /* Protect access to uevent_list */
35031
35032@@ -1847,8 +1847,8 @@ static struct mapped_device *alloc_dev(int minor)
35033 rwlock_init(&md->map_lock);
35034 atomic_set(&md->holders, 1);
35035 atomic_set(&md->open_count, 0);
35036- atomic_set(&md->event_nr, 0);
35037- atomic_set(&md->uevent_seq, 0);
35038+ atomic_set_unchecked(&md->event_nr, 0);
35039+ atomic_set_unchecked(&md->uevent_seq, 0);
35040 INIT_LIST_HEAD(&md->uevent_list);
35041 spin_lock_init(&md->uevent_lock);
35042
35043@@ -1982,7 +1982,7 @@ static void event_callback(void *context)
35044
35045 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
35046
35047- atomic_inc(&md->event_nr);
35048+ atomic_inc_unchecked(&md->event_nr);
35049 wake_up(&md->eventq);
35050 }
35051
35052@@ -2637,18 +2637,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
35053
35054 uint32_t dm_next_uevent_seq(struct mapped_device *md)
35055 {
35056- return atomic_add_return(1, &md->uevent_seq);
35057+ return atomic_add_return_unchecked(1, &md->uevent_seq);
35058 }
35059
35060 uint32_t dm_get_event_nr(struct mapped_device *md)
35061 {
35062- return atomic_read(&md->event_nr);
35063+ return atomic_read_unchecked(&md->event_nr);
35064 }
35065
35066 int dm_wait_event(struct mapped_device *md, int event_nr)
35067 {
35068 return wait_event_interruptible(md->eventq,
35069- (event_nr != atomic_read(&md->event_nr)));
35070+ (event_nr != atomic_read_unchecked(&md->event_nr)));
35071 }
35072
35073 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
35074diff --git a/drivers/md/md.c b/drivers/md/md.c
35075index 6120071..31d9be2 100644
35076--- a/drivers/md/md.c
35077+++ b/drivers/md/md.c
35078@@ -240,10 +240,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio);
35079 * start build, activate spare
35080 */
35081 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
35082-static atomic_t md_event_count;
35083+static atomic_unchecked_t md_event_count;
35084 void md_new_event(struct mddev *mddev)
35085 {
35086- atomic_inc(&md_event_count);
35087+ atomic_inc_unchecked(&md_event_count);
35088 wake_up(&md_event_waiters);
35089 }
35090 EXPORT_SYMBOL_GPL(md_new_event);
35091@@ -253,7 +253,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
35092 */
35093 static void md_new_event_inintr(struct mddev *mddev)
35094 {
35095- atomic_inc(&md_event_count);
35096+ atomic_inc_unchecked(&md_event_count);
35097 wake_up(&md_event_waiters);
35098 }
35099
35100@@ -1504,7 +1504,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
35101 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
35102 (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
35103 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
35104- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
35105+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
35106
35107 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
35108 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
35109@@ -1748,7 +1748,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
35110 else
35111 sb->resync_offset = cpu_to_le64(0);
35112
35113- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
35114+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
35115
35116 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
35117 sb->size = cpu_to_le64(mddev->dev_sectors);
35118@@ -2748,7 +2748,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
35119 static ssize_t
35120 errors_show(struct md_rdev *rdev, char *page)
35121 {
35122- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
35123+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
35124 }
35125
35126 static ssize_t
35127@@ -2757,7 +2757,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
35128 char *e;
35129 unsigned long n = simple_strtoul(buf, &e, 10);
35130 if (*buf && (*e == 0 || *e == '\n')) {
35131- atomic_set(&rdev->corrected_errors, n);
35132+ atomic_set_unchecked(&rdev->corrected_errors, n);
35133 return len;
35134 }
35135 return -EINVAL;
35136@@ -3204,8 +3204,8 @@ int md_rdev_init(struct md_rdev *rdev)
35137 rdev->sb_loaded = 0;
35138 rdev->bb_page = NULL;
35139 atomic_set(&rdev->nr_pending, 0);
35140- atomic_set(&rdev->read_errors, 0);
35141- atomic_set(&rdev->corrected_errors, 0);
35142+ atomic_set_unchecked(&rdev->read_errors, 0);
35143+ atomic_set_unchecked(&rdev->corrected_errors, 0);
35144
35145 INIT_LIST_HEAD(&rdev->same_set);
35146 init_waitqueue_head(&rdev->blocked_wait);
35147@@ -6984,7 +6984,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
35148
35149 spin_unlock(&pers_lock);
35150 seq_printf(seq, "\n");
35151- seq->poll_event = atomic_read(&md_event_count);
35152+ seq->poll_event = atomic_read_unchecked(&md_event_count);
35153 return 0;
35154 }
35155 if (v == (void*)2) {
35156@@ -7087,7 +7087,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
35157 return error;
35158
35159 seq = file->private_data;
35160- seq->poll_event = atomic_read(&md_event_count);
35161+ seq->poll_event = atomic_read_unchecked(&md_event_count);
35162 return error;
35163 }
35164
35165@@ -7101,7 +7101,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
35166 /* always allow read */
35167 mask = POLLIN | POLLRDNORM;
35168
35169- if (seq->poll_event != atomic_read(&md_event_count))
35170+ if (seq->poll_event != atomic_read_unchecked(&md_event_count))
35171 mask |= POLLERR | POLLPRI;
35172 return mask;
35173 }
35174@@ -7145,7 +7145,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
35175 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
35176 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
35177 (int)part_stat_read(&disk->part0, sectors[1]) -
35178- atomic_read(&disk->sync_io);
35179+ atomic_read_unchecked(&disk->sync_io);
35180 /* sync IO will cause sync_io to increase before the disk_stats
35181 * as sync_io is counted when a request starts, and
35182 * disk_stats is counted when it completes.
35183diff --git a/drivers/md/md.h b/drivers/md/md.h
35184index af443ab..0f93be3 100644
35185--- a/drivers/md/md.h
35186+++ b/drivers/md/md.h
35187@@ -94,13 +94,13 @@ struct md_rdev {
35188 * only maintained for arrays that
35189 * support hot removal
35190 */
35191- atomic_t read_errors; /* number of consecutive read errors that
35192+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
35193 * we have tried to ignore.
35194 */
35195 struct timespec last_read_error; /* monotonic time since our
35196 * last read error
35197 */
35198- atomic_t corrected_errors; /* number of corrected read errors,
35199+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
35200 * for reporting to userspace and storing
35201 * in superblock.
35202 */
35203@@ -432,7 +432,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
35204
35205 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
35206 {
35207- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
35208+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
35209 }
35210
35211 struct md_personality
35212diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
35213index 1cbfc6b..56e1dbb 100644
35214--- a/drivers/md/persistent-data/dm-space-map.h
35215+++ b/drivers/md/persistent-data/dm-space-map.h
35216@@ -60,6 +60,7 @@ struct dm_space_map {
35217 int (*root_size)(struct dm_space_map *sm, size_t *result);
35218 int (*copy_root)(struct dm_space_map *sm, void *copy_to_here_le, size_t len);
35219 };
35220+typedef struct dm_space_map __no_const dm_space_map_no_const;
35221
35222 /*----------------------------------------------------------------*/
35223
35224diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
35225index a0f7309..5599dbc 100644
35226--- a/drivers/md/raid1.c
35227+++ b/drivers/md/raid1.c
35228@@ -1819,7 +1819,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
35229 if (r1_sync_page_io(rdev, sect, s,
35230 bio->bi_io_vec[idx].bv_page,
35231 READ) != 0)
35232- atomic_add(s, &rdev->corrected_errors);
35233+ atomic_add_unchecked(s, &rdev->corrected_errors);
35234 }
35235 sectors -= s;
35236 sect += s;
35237@@ -2041,7 +2041,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
35238 test_bit(In_sync, &rdev->flags)) {
35239 if (r1_sync_page_io(rdev, sect, s,
35240 conf->tmppage, READ)) {
35241- atomic_add(s, &rdev->corrected_errors);
35242+ atomic_add_unchecked(s, &rdev->corrected_errors);
35243 printk(KERN_INFO
35244 "md/raid1:%s: read error corrected "
35245 "(%d sectors at %llu on %s)\n",
35246diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
35247index c9acbd7..386cd3e 100644
35248--- a/drivers/md/raid10.c
35249+++ b/drivers/md/raid10.c
35250@@ -1878,7 +1878,7 @@ static void end_sync_read(struct bio *bio, int error)
35251 /* The write handler will notice the lack of
35252 * R10BIO_Uptodate and record any errors etc
35253 */
35254- atomic_add(r10_bio->sectors,
35255+ atomic_add_unchecked(r10_bio->sectors,
35256 &conf->mirrors[d].rdev->corrected_errors);
35257
35258 /* for reconstruct, we always reschedule after a read.
35259@@ -2227,7 +2227,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
35260 {
35261 struct timespec cur_time_mon;
35262 unsigned long hours_since_last;
35263- unsigned int read_errors = atomic_read(&rdev->read_errors);
35264+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
35265
35266 ktime_get_ts(&cur_time_mon);
35267
35268@@ -2249,9 +2249,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
35269 * overflowing the shift of read_errors by hours_since_last.
35270 */
35271 if (hours_since_last >= 8 * sizeof(read_errors))
35272- atomic_set(&rdev->read_errors, 0);
35273+ atomic_set_unchecked(&rdev->read_errors, 0);
35274 else
35275- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
35276+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
35277 }
35278
35279 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
35280@@ -2305,8 +2305,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
35281 return;
35282
35283 check_decay_read_errors(mddev, rdev);
35284- atomic_inc(&rdev->read_errors);
35285- if (atomic_read(&rdev->read_errors) > max_read_errors) {
35286+ atomic_inc_unchecked(&rdev->read_errors);
35287+ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
35288 char b[BDEVNAME_SIZE];
35289 bdevname(rdev->bdev, b);
35290
35291@@ -2314,7 +2314,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
35292 "md/raid10:%s: %s: Raid device exceeded "
35293 "read_error threshold [cur %d:max %d]\n",
35294 mdname(mddev), b,
35295- atomic_read(&rdev->read_errors), max_read_errors);
35296+ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
35297 printk(KERN_NOTICE
35298 "md/raid10:%s: %s: Failing raid device\n",
35299 mdname(mddev), b);
35300@@ -2469,7 +2469,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
35301 sect +
35302 choose_data_offset(r10_bio, rdev)),
35303 bdevname(rdev->bdev, b));
35304- atomic_add(s, &rdev->corrected_errors);
35305+ atomic_add_unchecked(s, &rdev->corrected_errors);
35306 }
35307
35308 rdev_dec_pending(rdev, mddev);
35309diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
35310index a450268..c4168a9 100644
35311--- a/drivers/md/raid5.c
35312+++ b/drivers/md/raid5.c
35313@@ -1789,21 +1789,21 @@ static void raid5_end_read_request(struct bio * bi, int error)
35314 mdname(conf->mddev), STRIPE_SECTORS,
35315 (unsigned long long)s,
35316 bdevname(rdev->bdev, b));
35317- atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
35318+ atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
35319 clear_bit(R5_ReadError, &sh->dev[i].flags);
35320 clear_bit(R5_ReWrite, &sh->dev[i].flags);
35321 } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
35322 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
35323
35324- if (atomic_read(&rdev->read_errors))
35325- atomic_set(&rdev->read_errors, 0);
35326+ if (atomic_read_unchecked(&rdev->read_errors))
35327+ atomic_set_unchecked(&rdev->read_errors, 0);
35328 } else {
35329 const char *bdn = bdevname(rdev->bdev, b);
35330 int retry = 0;
35331 int set_bad = 0;
35332
35333 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
35334- atomic_inc(&rdev->read_errors);
35335+ atomic_inc_unchecked(&rdev->read_errors);
35336 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
35337 printk_ratelimited(
35338 KERN_WARNING
35339@@ -1831,7 +1831,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
35340 mdname(conf->mddev),
35341 (unsigned long long)s,
35342 bdn);
35343- } else if (atomic_read(&rdev->read_errors)
35344+ } else if (atomic_read_unchecked(&rdev->read_errors)
35345 > conf->max_nr_stripes)
35346 printk(KERN_WARNING
35347 "md/raid:%s: Too many read errors, failing device %s.\n",
35348diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
35349index d33101a..6b13069 100644
35350--- a/drivers/media/dvb-core/dvbdev.c
35351+++ b/drivers/media/dvb-core/dvbdev.c
35352@@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
35353 const struct dvb_device *template, void *priv, int type)
35354 {
35355 struct dvb_device *dvbdev;
35356- struct file_operations *dvbdevfops;
35357+ file_operations_no_const *dvbdevfops;
35358 struct device *clsdev;
35359 int minor;
35360 int id;
35361diff --git a/drivers/media/dvb-frontends/dib3000.h b/drivers/media/dvb-frontends/dib3000.h
35362index 404f63a..4796533 100644
35363--- a/drivers/media/dvb-frontends/dib3000.h
35364+++ b/drivers/media/dvb-frontends/dib3000.h
35365@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
35366 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
35367 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
35368 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
35369-};
35370+} __no_const;
35371
35372 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
35373 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
35374diff --git a/drivers/media/pci/cx88/cx88-alsa.c b/drivers/media/pci/cx88/cx88-alsa.c
35375index 3aa6856..435ad25 100644
35376--- a/drivers/media/pci/cx88/cx88-alsa.c
35377+++ b/drivers/media/pci/cx88/cx88-alsa.c
35378@@ -749,7 +749,7 @@ static struct snd_kcontrol_new snd_cx88_alc_switch = {
35379 * Only boards with eeprom and byte 1 at eeprom=1 have it
35380 */
35381
35382-static const struct pci_device_id cx88_audio_pci_tbl[] __devinitdata = {
35383+static const struct pci_device_id cx88_audio_pci_tbl[] __devinitconst = {
35384 {0x14f1,0x8801,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
35385 {0x14f1,0x8811,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
35386 {0, }
35387diff --git a/drivers/media/pci/ddbridge/ddbridge-core.c b/drivers/media/pci/ddbridge/ddbridge-core.c
35388index feff57e..66a2c67 100644
35389--- a/drivers/media/pci/ddbridge/ddbridge-core.c
35390+++ b/drivers/media/pci/ddbridge/ddbridge-core.c
35391@@ -1679,7 +1679,7 @@ static struct ddb_info ddb_v6 = {
35392 .subvendor = _subvend, .subdevice = _subdev, \
35393 .driver_data = (unsigned long)&_driverdata }
35394
35395-static const struct pci_device_id ddb_id_tbl[] __devinitdata = {
35396+static const struct pci_device_id ddb_id_tbl[] __devinitconst = {
35397 DDB_ID(DDVID, 0x0002, DDVID, 0x0001, ddb_octopus),
35398 DDB_ID(DDVID, 0x0003, DDVID, 0x0001, ddb_octopus),
35399 DDB_ID(DDVID, 0x0003, DDVID, 0x0002, ddb_octopus_le),
35400diff --git a/drivers/media/pci/ngene/ngene-cards.c b/drivers/media/pci/ngene/ngene-cards.c
35401index 96a13ed..6df45b4 100644
35402--- a/drivers/media/pci/ngene/ngene-cards.c
35403+++ b/drivers/media/pci/ngene/ngene-cards.c
35404@@ -741,7 +741,7 @@ static struct ngene_info ngene_info_terratec = {
35405
35406 /****************************************************************************/
35407
35408-static const struct pci_device_id ngene_id_tbl[] __devinitdata = {
35409+static const struct pci_device_id ngene_id_tbl[] __devinitconst = {
35410 NGENE_ID(0x18c3, 0xabc3, ngene_info_cineS2),
35411 NGENE_ID(0x18c3, 0xabc4, ngene_info_cineS2),
35412 NGENE_ID(0x18c3, 0xdb01, ngene_info_satixS2),
35413diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
35414index a3b1a34..71ce0e3 100644
35415--- a/drivers/media/platform/omap/omap_vout.c
35416+++ b/drivers/media/platform/omap/omap_vout.c
35417@@ -65,7 +65,6 @@ enum omap_vout_channels {
35418 OMAP_VIDEO2,
35419 };
35420
35421-static struct videobuf_queue_ops video_vbq_ops;
35422 /* Variables configurable through module params*/
35423 static u32 video1_numbuffers = 3;
35424 static u32 video2_numbuffers = 3;
35425@@ -1012,6 +1011,12 @@ static int omap_vout_open(struct file *file)
35426 {
35427 struct videobuf_queue *q;
35428 struct omap_vout_device *vout = NULL;
35429+ static struct videobuf_queue_ops video_vbq_ops = {
35430+ .buf_setup = omap_vout_buffer_setup,
35431+ .buf_prepare = omap_vout_buffer_prepare,
35432+ .buf_release = omap_vout_buffer_release,
35433+ .buf_queue = omap_vout_buffer_queue,
35434+ };
35435
35436 vout = video_drvdata(file);
35437 v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
35438@@ -1029,10 +1034,6 @@ static int omap_vout_open(struct file *file)
35439 vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
35440
35441 q = &vout->vbq;
35442- video_vbq_ops.buf_setup = omap_vout_buffer_setup;
35443- video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
35444- video_vbq_ops.buf_release = omap_vout_buffer_release;
35445- video_vbq_ops.buf_queue = omap_vout_buffer_queue;
35446 spin_lock_init(&vout->vbq_lock);
35447
35448 videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
35449diff --git a/drivers/media/platform/s5p-tv/mixer.h b/drivers/media/platform/s5p-tv/mixer.h
35450index ddb422e..8cf008e 100644
35451--- a/drivers/media/platform/s5p-tv/mixer.h
35452+++ b/drivers/media/platform/s5p-tv/mixer.h
35453@@ -155,7 +155,7 @@ struct mxr_layer {
35454 /** layer index (unique identifier) */
35455 int idx;
35456 /** callbacks for layer methods */
35457- struct mxr_layer_ops ops;
35458+ struct mxr_layer_ops *ops;
35459 /** format array */
35460 const struct mxr_format **fmt_array;
35461 /** size of format array */
35462diff --git a/drivers/media/platform/s5p-tv/mixer_grp_layer.c b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
35463index b93a21f..2535195 100644
35464--- a/drivers/media/platform/s5p-tv/mixer_grp_layer.c
35465+++ b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
35466@@ -235,7 +235,7 @@ struct mxr_layer *mxr_graph_layer_create(struct mxr_device *mdev, int idx)
35467 {
35468 struct mxr_layer *layer;
35469 int ret;
35470- struct mxr_layer_ops ops = {
35471+ static struct mxr_layer_ops ops = {
35472 .release = mxr_graph_layer_release,
35473 .buffer_set = mxr_graph_buffer_set,
35474 .stream_set = mxr_graph_stream_set,
35475diff --git a/drivers/media/platform/s5p-tv/mixer_reg.c b/drivers/media/platform/s5p-tv/mixer_reg.c
35476index 3b1670a..595c939 100644
35477--- a/drivers/media/platform/s5p-tv/mixer_reg.c
35478+++ b/drivers/media/platform/s5p-tv/mixer_reg.c
35479@@ -276,7 +276,7 @@ static void mxr_irq_layer_handle(struct mxr_layer *layer)
35480 layer->update_buf = next;
35481 }
35482
35483- layer->ops.buffer_set(layer, layer->update_buf);
35484+ layer->ops->buffer_set(layer, layer->update_buf);
35485
35486 if (done && done != layer->shadow_buf)
35487 vb2_buffer_done(&done->vb, VB2_BUF_STATE_DONE);
35488diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c
35489index 0c1cd89..6574647 100644
35490--- a/drivers/media/platform/s5p-tv/mixer_video.c
35491+++ b/drivers/media/platform/s5p-tv/mixer_video.c
35492@@ -209,7 +209,7 @@ static void mxr_layer_default_geo(struct mxr_layer *layer)
35493 layer->geo.src.height = layer->geo.src.full_height;
35494
35495 mxr_geometry_dump(mdev, &layer->geo);
35496- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
35497+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
35498 mxr_geometry_dump(mdev, &layer->geo);
35499 }
35500
35501@@ -227,7 +227,7 @@ static void mxr_layer_update_output(struct mxr_layer *layer)
35502 layer->geo.dst.full_width = mbus_fmt.width;
35503 layer->geo.dst.full_height = mbus_fmt.height;
35504 layer->geo.dst.field = mbus_fmt.field;
35505- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
35506+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
35507
35508 mxr_geometry_dump(mdev, &layer->geo);
35509 }
35510@@ -333,7 +333,7 @@ static int mxr_s_fmt(struct file *file, void *priv,
35511 /* set source size to highest accepted value */
35512 geo->src.full_width = max(geo->dst.full_width, pix->width);
35513 geo->src.full_height = max(geo->dst.full_height, pix->height);
35514- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
35515+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
35516 mxr_geometry_dump(mdev, &layer->geo);
35517 /* set cropping to total visible screen */
35518 geo->src.width = pix->width;
35519@@ -341,12 +341,12 @@ static int mxr_s_fmt(struct file *file, void *priv,
35520 geo->src.x_offset = 0;
35521 geo->src.y_offset = 0;
35522 /* assure consistency of geometry */
35523- layer->ops.fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
35524+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
35525 mxr_geometry_dump(mdev, &layer->geo);
35526 /* set full size to lowest possible value */
35527 geo->src.full_width = 0;
35528 geo->src.full_height = 0;
35529- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
35530+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
35531 mxr_geometry_dump(mdev, &layer->geo);
35532
35533 /* returning results */
35534@@ -473,7 +473,7 @@ static int mxr_s_selection(struct file *file, void *fh,
35535 target->width = s->r.width;
35536 target->height = s->r.height;
35537
35538- layer->ops.fix_geometry(layer, stage, s->flags);
35539+ layer->ops->fix_geometry(layer, stage, s->flags);
35540
35541 /* retrieve update selection rectangle */
35542 res.left = target->x_offset;
35543@@ -928,13 +928,13 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
35544 mxr_output_get(mdev);
35545
35546 mxr_layer_update_output(layer);
35547- layer->ops.format_set(layer);
35548+ layer->ops->format_set(layer);
35549 /* enabling layer in hardware */
35550 spin_lock_irqsave(&layer->enq_slock, flags);
35551 layer->state = MXR_LAYER_STREAMING;
35552 spin_unlock_irqrestore(&layer->enq_slock, flags);
35553
35554- layer->ops.stream_set(layer, MXR_ENABLE);
35555+ layer->ops->stream_set(layer, MXR_ENABLE);
35556 mxr_streamer_get(mdev);
35557
35558 return 0;
35559@@ -1004,7 +1004,7 @@ static int stop_streaming(struct vb2_queue *vq)
35560 spin_unlock_irqrestore(&layer->enq_slock, flags);
35561
35562 /* disabling layer in hardware */
35563- layer->ops.stream_set(layer, MXR_DISABLE);
35564+ layer->ops->stream_set(layer, MXR_DISABLE);
35565 /* remove one streamer */
35566 mxr_streamer_put(mdev);
35567 /* allow changes in output configuration */
35568@@ -1043,8 +1043,8 @@ void mxr_base_layer_unregister(struct mxr_layer *layer)
35569
35570 void mxr_layer_release(struct mxr_layer *layer)
35571 {
35572- if (layer->ops.release)
35573- layer->ops.release(layer);
35574+ if (layer->ops->release)
35575+ layer->ops->release(layer);
35576 }
35577
35578 void mxr_base_layer_release(struct mxr_layer *layer)
35579@@ -1070,7 +1070,7 @@ struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
35580
35581 layer->mdev = mdev;
35582 layer->idx = idx;
35583- layer->ops = *ops;
35584+ layer->ops = ops;
35585
35586 spin_lock_init(&layer->enq_slock);
35587 INIT_LIST_HEAD(&layer->enq_list);
35588diff --git a/drivers/media/platform/s5p-tv/mixer_vp_layer.c b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
35589index 3d13a63..da31bf1 100644
35590--- a/drivers/media/platform/s5p-tv/mixer_vp_layer.c
35591+++ b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
35592@@ -206,7 +206,7 @@ struct mxr_layer *mxr_vp_layer_create(struct mxr_device *mdev, int idx)
35593 {
35594 struct mxr_layer *layer;
35595 int ret;
35596- struct mxr_layer_ops ops = {
35597+ static struct mxr_layer_ops ops = {
35598 .release = mxr_vp_layer_release,
35599 .buffer_set = mxr_vp_buffer_set,
35600 .stream_set = mxr_vp_stream_set,
35601diff --git a/drivers/media/platform/timblogiw.c b/drivers/media/platform/timblogiw.c
35602index 02194c0..36d69c1 100644
35603--- a/drivers/media/platform/timblogiw.c
35604+++ b/drivers/media/platform/timblogiw.c
35605@@ -745,7 +745,7 @@ static int timblogiw_mmap(struct file *file, struct vm_area_struct *vma)
35606
35607 /* Platform device functions */
35608
35609-static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
35610+static struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
35611 .vidioc_querycap = timblogiw_querycap,
35612 .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
35613 .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
35614@@ -767,7 +767,7 @@ static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
35615 .vidioc_enum_framesizes = timblogiw_enum_framesizes,
35616 };
35617
35618-static __devinitconst struct v4l2_file_operations timblogiw_fops = {
35619+static struct v4l2_file_operations timblogiw_fops = {
35620 .owner = THIS_MODULE,
35621 .open = timblogiw_open,
35622 .release = timblogiw_close,
35623diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
35624index 697a421..16c5a5f 100644
35625--- a/drivers/media/radio/radio-cadet.c
35626+++ b/drivers/media/radio/radio-cadet.c
35627@@ -302,6 +302,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
35628 unsigned char readbuf[RDS_BUFFER];
35629 int i = 0;
35630
35631+ if (count > RDS_BUFFER)
35632+ return -EFAULT;
35633 mutex_lock(&dev->lock);
35634 if (dev->rdsstat == 0)
35635 cadet_start_rds(dev);
35636@@ -317,7 +319,7 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
35637 while (i < count && dev->rdsin != dev->rdsout)
35638 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
35639
35640- if (i && copy_to_user(data, readbuf, i))
35641+ if (i > sizeof(readbuf) || copy_to_user(data, readbuf, i))
35642 i = -EFAULT;
35643 unlock:
35644 mutex_unlock(&dev->lock);
35645diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
35646index 3940bb0..fb3952a 100644
35647--- a/drivers/media/usb/dvb-usb/cxusb.c
35648+++ b/drivers/media/usb/dvb-usb/cxusb.c
35649@@ -1068,7 +1068,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
35650
35651 struct dib0700_adapter_state {
35652 int (*set_param_save) (struct dvb_frontend *);
35653-};
35654+} __no_const;
35655
35656 static int dib7070_set_param_override(struct dvb_frontend *fe)
35657 {
35658diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
35659index 9382895..ac8093c 100644
35660--- a/drivers/media/usb/dvb-usb/dw2102.c
35661+++ b/drivers/media/usb/dvb-usb/dw2102.c
35662@@ -95,7 +95,7 @@ struct su3000_state {
35663
35664 struct s6x0_state {
35665 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
35666-};
35667+} __no_const;
35668
35669 /* debug */
35670 static int dvb_usb_dw2102_debug;
35671diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
35672index fb69baa..cf7ad22 100644
35673--- a/drivers/message/fusion/mptbase.c
35674+++ b/drivers/message/fusion/mptbase.c
35675@@ -6755,8 +6755,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
35676 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
35677 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
35678
35679+#ifdef CONFIG_GRKERNSEC_HIDESYM
35680+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
35681+#else
35682 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
35683 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
35684+#endif
35685+
35686 /*
35687 * Rounding UP to nearest 4-kB boundary here...
35688 */
35689diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
35690index 551262e..7551198 100644
35691--- a/drivers/message/fusion/mptsas.c
35692+++ b/drivers/message/fusion/mptsas.c
35693@@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
35694 return 0;
35695 }
35696
35697+static inline void
35698+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
35699+{
35700+ if (phy_info->port_details) {
35701+ phy_info->port_details->rphy = rphy;
35702+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
35703+ ioc->name, rphy));
35704+ }
35705+
35706+ if (rphy) {
35707+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
35708+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
35709+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
35710+ ioc->name, rphy, rphy->dev.release));
35711+ }
35712+}
35713+
35714 /* no mutex */
35715 static void
35716 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
35717@@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
35718 return NULL;
35719 }
35720
35721-static inline void
35722-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
35723-{
35724- if (phy_info->port_details) {
35725- phy_info->port_details->rphy = rphy;
35726- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
35727- ioc->name, rphy));
35728- }
35729-
35730- if (rphy) {
35731- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
35732- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
35733- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
35734- ioc->name, rphy, rphy->dev.release));
35735- }
35736-}
35737-
35738 static inline struct sas_port *
35739 mptsas_get_port(struct mptsas_phyinfo *phy_info)
35740 {
35741diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
35742index 0c3ced7..1fe34ec 100644
35743--- a/drivers/message/fusion/mptscsih.c
35744+++ b/drivers/message/fusion/mptscsih.c
35745@@ -1270,15 +1270,16 @@ mptscsih_info(struct Scsi_Host *SChost)
35746
35747 h = shost_priv(SChost);
35748
35749- if (h) {
35750- if (h->info_kbuf == NULL)
35751- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
35752- return h->info_kbuf;
35753- h->info_kbuf[0] = '\0';
35754+ if (!h)
35755+ return NULL;
35756
35757- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
35758- h->info_kbuf[size-1] = '\0';
35759- }
35760+ if (h->info_kbuf == NULL)
35761+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
35762+ return h->info_kbuf;
35763+ h->info_kbuf[0] = '\0';
35764+
35765+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
35766+ h->info_kbuf[size-1] = '\0';
35767
35768 return h->info_kbuf;
35769 }
35770diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
35771index 8001aa6..b137580 100644
35772--- a/drivers/message/i2o/i2o_proc.c
35773+++ b/drivers/message/i2o/i2o_proc.c
35774@@ -255,12 +255,6 @@ static char *scsi_devices[] = {
35775 "Array Controller Device"
35776 };
35777
35778-static char *chtostr(char *tmp, u8 *chars, int n)
35779-{
35780- tmp[0] = 0;
35781- return strncat(tmp, (char *)chars, n);
35782-}
35783-
35784 static int i2o_report_query_status(struct seq_file *seq, int block_status,
35785 char *group)
35786 {
35787@@ -790,7 +784,6 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
35788 } *result;
35789
35790 i2o_exec_execute_ddm_table ddm_table;
35791- char tmp[28 + 1];
35792
35793 result = kmalloc(sizeof(*result), GFP_KERNEL);
35794 if (!result)
35795@@ -825,8 +818,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
35796
35797 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
35798 seq_printf(seq, "%-#8x", ddm_table.module_id);
35799- seq_printf(seq, "%-29s",
35800- chtostr(tmp, ddm_table.module_name_version, 28));
35801+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
35802 seq_printf(seq, "%9d ", ddm_table.data_size);
35803 seq_printf(seq, "%8d", ddm_table.code_size);
35804
35805@@ -893,7 +885,6 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
35806
35807 i2o_driver_result_table *result;
35808 i2o_driver_store_table *dst;
35809- char tmp[28 + 1];
35810
35811 result = kmalloc(sizeof(i2o_driver_result_table), GFP_KERNEL);
35812 if (result == NULL)
35813@@ -928,9 +919,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
35814
35815 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
35816 seq_printf(seq, "%-#8x", dst->module_id);
35817- seq_printf(seq, "%-29s",
35818- chtostr(tmp, dst->module_name_version, 28));
35819- seq_printf(seq, "%-9s", chtostr(tmp, dst->date, 8));
35820+ seq_printf(seq, "%-.28s", dst->module_name_version);
35821+ seq_printf(seq, "%-.8s", dst->date);
35822 seq_printf(seq, "%8d ", dst->module_size);
35823 seq_printf(seq, "%8d ", dst->mpb_size);
35824 seq_printf(seq, "0x%04x", dst->module_flags);
35825@@ -1250,7 +1240,6 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
35826 // == (allow) 512d bytes (max)
35827 static u16 *work16 = (u16 *) work32;
35828 int token;
35829- char tmp[16 + 1];
35830
35831 token = i2o_parm_field_get(d, 0xF100, -1, &work32, sizeof(work32));
35832
35833@@ -1262,14 +1251,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
35834 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
35835 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
35836 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
35837- seq_printf(seq, "Vendor info : %s\n",
35838- chtostr(tmp, (u8 *) (work32 + 2), 16));
35839- seq_printf(seq, "Product info : %s\n",
35840- chtostr(tmp, (u8 *) (work32 + 6), 16));
35841- seq_printf(seq, "Description : %s\n",
35842- chtostr(tmp, (u8 *) (work32 + 10), 16));
35843- seq_printf(seq, "Product rev. : %s\n",
35844- chtostr(tmp, (u8 *) (work32 + 14), 8));
35845+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
35846+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
35847+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
35848+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
35849
35850 seq_printf(seq, "Serial number : ");
35851 print_serial_number(seq, (u8 *) (work32 + 16),
35852@@ -1306,8 +1291,6 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
35853 u8 pad[256]; // allow up to 256 byte (max) serial number
35854 } result;
35855
35856- char tmp[24 + 1];
35857-
35858 token = i2o_parm_field_get(d, 0xF101, -1, &result, sizeof(result));
35859
35860 if (token < 0) {
35861@@ -1316,10 +1299,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
35862 }
35863
35864 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
35865- seq_printf(seq, "Module name : %s\n",
35866- chtostr(tmp, result.module_name, 24));
35867- seq_printf(seq, "Module revision : %s\n",
35868- chtostr(tmp, result.module_rev, 8));
35869+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
35870+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
35871
35872 seq_printf(seq, "Serial number : ");
35873 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
35874@@ -1343,8 +1324,6 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
35875 u8 instance_number[4];
35876 } result;
35877
35878- char tmp[64 + 1];
35879-
35880 token = i2o_parm_field_get(d, 0xF102, -1, &result, sizeof(result));
35881
35882 if (token < 0) {
35883@@ -1352,14 +1331,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
35884 return 0;
35885 }
35886
35887- seq_printf(seq, "Device name : %s\n",
35888- chtostr(tmp, result.device_name, 64));
35889- seq_printf(seq, "Service name : %s\n",
35890- chtostr(tmp, result.service_name, 64));
35891- seq_printf(seq, "Physical name : %s\n",
35892- chtostr(tmp, result.physical_location, 64));
35893- seq_printf(seq, "Instance number : %s\n",
35894- chtostr(tmp, result.instance_number, 4));
35895+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
35896+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
35897+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
35898+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
35899
35900 return 0;
35901 }
35902diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
35903index a8c08f3..155fe3d 100644
35904--- a/drivers/message/i2o/iop.c
35905+++ b/drivers/message/i2o/iop.c
35906@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
35907
35908 spin_lock_irqsave(&c->context_list_lock, flags);
35909
35910- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
35911- atomic_inc(&c->context_list_counter);
35912+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
35913+ atomic_inc_unchecked(&c->context_list_counter);
35914
35915- entry->context = atomic_read(&c->context_list_counter);
35916+ entry->context = atomic_read_unchecked(&c->context_list_counter);
35917
35918 list_add(&entry->list, &c->context_list);
35919
35920@@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(void)
35921
35922 #if BITS_PER_LONG == 64
35923 spin_lock_init(&c->context_list_lock);
35924- atomic_set(&c->context_list_counter, 0);
35925+ atomic_set_unchecked(&c->context_list_counter, 0);
35926 INIT_LIST_HEAD(&c->context_list);
35927 #endif
35928
35929diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
35930index 965c480..71f2db9 100644
35931--- a/drivers/mfd/janz-cmodio.c
35932+++ b/drivers/mfd/janz-cmodio.c
35933@@ -13,6 +13,7 @@
35934
35935 #include <linux/kernel.h>
35936 #include <linux/module.h>
35937+#include <linux/slab.h>
35938 #include <linux/init.h>
35939 #include <linux/pci.h>
35940 #include <linux/interrupt.h>
35941diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
35942index 3aa9a96..59cf685 100644
35943--- a/drivers/misc/kgdbts.c
35944+++ b/drivers/misc/kgdbts.c
35945@@ -832,7 +832,7 @@ static void run_plant_and_detach_test(int is_early)
35946 char before[BREAK_INSTR_SIZE];
35947 char after[BREAK_INSTR_SIZE];
35948
35949- probe_kernel_read(before, (char *)kgdbts_break_test,
35950+ probe_kernel_read(before, ktla_ktva((char *)kgdbts_break_test),
35951 BREAK_INSTR_SIZE);
35952 init_simple_test();
35953 ts.tst = plant_and_detach_test;
35954@@ -840,7 +840,7 @@ static void run_plant_and_detach_test(int is_early)
35955 /* Activate test with initial breakpoint */
35956 if (!is_early)
35957 kgdb_breakpoint();
35958- probe_kernel_read(after, (char *)kgdbts_break_test,
35959+ probe_kernel_read(after, ktla_ktva((char *)kgdbts_break_test),
35960 BREAK_INSTR_SIZE);
35961 if (memcmp(before, after, BREAK_INSTR_SIZE)) {
35962 printk(KERN_CRIT "kgdbts: ERROR kgdb corrupted memory\n");
35963diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
35964index 4a87e5c..76bdf5c 100644
35965--- a/drivers/misc/lis3lv02d/lis3lv02d.c
35966+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
35967@@ -498,7 +498,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
35968 * the lid is closed. This leads to interrupts as soon as a little move
35969 * is done.
35970 */
35971- atomic_inc(&lis3->count);
35972+ atomic_inc_unchecked(&lis3->count);
35973
35974 wake_up_interruptible(&lis3->misc_wait);
35975 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
35976@@ -584,7 +584,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
35977 if (lis3->pm_dev)
35978 pm_runtime_get_sync(lis3->pm_dev);
35979
35980- atomic_set(&lis3->count, 0);
35981+ atomic_set_unchecked(&lis3->count, 0);
35982 return 0;
35983 }
35984
35985@@ -617,7 +617,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
35986 add_wait_queue(&lis3->misc_wait, &wait);
35987 while (true) {
35988 set_current_state(TASK_INTERRUPTIBLE);
35989- data = atomic_xchg(&lis3->count, 0);
35990+ data = atomic_xchg_unchecked(&lis3->count, 0);
35991 if (data)
35992 break;
35993
35994@@ -658,7 +658,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
35995 struct lis3lv02d, miscdev);
35996
35997 poll_wait(file, &lis3->misc_wait, wait);
35998- if (atomic_read(&lis3->count))
35999+ if (atomic_read_unchecked(&lis3->count))
36000 return POLLIN | POLLRDNORM;
36001 return 0;
36002 }
36003diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
36004index c439c82..1f20f57 100644
36005--- a/drivers/misc/lis3lv02d/lis3lv02d.h
36006+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
36007@@ -297,7 +297,7 @@ struct lis3lv02d {
36008 struct input_polled_dev *idev; /* input device */
36009 struct platform_device *pdev; /* platform device */
36010 struct regulator_bulk_data regulators[2];
36011- atomic_t count; /* interrupt count after last read */
36012+ atomic_unchecked_t count; /* interrupt count after last read */
36013 union axis_conversion ac; /* hw -> logical axis */
36014 int mapped_btns[3];
36015
36016diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
36017index 2f30bad..c4c13d0 100644
36018--- a/drivers/misc/sgi-gru/gruhandles.c
36019+++ b/drivers/misc/sgi-gru/gruhandles.c
36020@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
36021 unsigned long nsec;
36022
36023 nsec = CLKS2NSEC(clks);
36024- atomic_long_inc(&mcs_op_statistics[op].count);
36025- atomic_long_add(nsec, &mcs_op_statistics[op].total);
36026+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
36027+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
36028 if (mcs_op_statistics[op].max < nsec)
36029 mcs_op_statistics[op].max = nsec;
36030 }
36031diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
36032index 950dbe9..eeef0f8 100644
36033--- a/drivers/misc/sgi-gru/gruprocfs.c
36034+++ b/drivers/misc/sgi-gru/gruprocfs.c
36035@@ -32,9 +32,9 @@
36036
36037 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
36038
36039-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
36040+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
36041 {
36042- unsigned long val = atomic_long_read(v);
36043+ unsigned long val = atomic_long_read_unchecked(v);
36044
36045 seq_printf(s, "%16lu %s\n", val, id);
36046 }
36047@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
36048
36049 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
36050 for (op = 0; op < mcsop_last; op++) {
36051- count = atomic_long_read(&mcs_op_statistics[op].count);
36052- total = atomic_long_read(&mcs_op_statistics[op].total);
36053+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
36054+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
36055 max = mcs_op_statistics[op].max;
36056 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
36057 count ? total / count : 0, max);
36058diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
36059index 5c3ce24..4915ccb 100644
36060--- a/drivers/misc/sgi-gru/grutables.h
36061+++ b/drivers/misc/sgi-gru/grutables.h
36062@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
36063 * GRU statistics.
36064 */
36065 struct gru_stats_s {
36066- atomic_long_t vdata_alloc;
36067- atomic_long_t vdata_free;
36068- atomic_long_t gts_alloc;
36069- atomic_long_t gts_free;
36070- atomic_long_t gms_alloc;
36071- atomic_long_t gms_free;
36072- atomic_long_t gts_double_allocate;
36073- atomic_long_t assign_context;
36074- atomic_long_t assign_context_failed;
36075- atomic_long_t free_context;
36076- atomic_long_t load_user_context;
36077- atomic_long_t load_kernel_context;
36078- atomic_long_t lock_kernel_context;
36079- atomic_long_t unlock_kernel_context;
36080- atomic_long_t steal_user_context;
36081- atomic_long_t steal_kernel_context;
36082- atomic_long_t steal_context_failed;
36083- atomic_long_t nopfn;
36084- atomic_long_t asid_new;
36085- atomic_long_t asid_next;
36086- atomic_long_t asid_wrap;
36087- atomic_long_t asid_reuse;
36088- atomic_long_t intr;
36089- atomic_long_t intr_cbr;
36090- atomic_long_t intr_tfh;
36091- atomic_long_t intr_spurious;
36092- atomic_long_t intr_mm_lock_failed;
36093- atomic_long_t call_os;
36094- atomic_long_t call_os_wait_queue;
36095- atomic_long_t user_flush_tlb;
36096- atomic_long_t user_unload_context;
36097- atomic_long_t user_exception;
36098- atomic_long_t set_context_option;
36099- atomic_long_t check_context_retarget_intr;
36100- atomic_long_t check_context_unload;
36101- atomic_long_t tlb_dropin;
36102- atomic_long_t tlb_preload_page;
36103- atomic_long_t tlb_dropin_fail_no_asid;
36104- atomic_long_t tlb_dropin_fail_upm;
36105- atomic_long_t tlb_dropin_fail_invalid;
36106- atomic_long_t tlb_dropin_fail_range_active;
36107- atomic_long_t tlb_dropin_fail_idle;
36108- atomic_long_t tlb_dropin_fail_fmm;
36109- atomic_long_t tlb_dropin_fail_no_exception;
36110- atomic_long_t tfh_stale_on_fault;
36111- atomic_long_t mmu_invalidate_range;
36112- atomic_long_t mmu_invalidate_page;
36113- atomic_long_t flush_tlb;
36114- atomic_long_t flush_tlb_gru;
36115- atomic_long_t flush_tlb_gru_tgh;
36116- atomic_long_t flush_tlb_gru_zero_asid;
36117+ atomic_long_unchecked_t vdata_alloc;
36118+ atomic_long_unchecked_t vdata_free;
36119+ atomic_long_unchecked_t gts_alloc;
36120+ atomic_long_unchecked_t gts_free;
36121+ atomic_long_unchecked_t gms_alloc;
36122+ atomic_long_unchecked_t gms_free;
36123+ atomic_long_unchecked_t gts_double_allocate;
36124+ atomic_long_unchecked_t assign_context;
36125+ atomic_long_unchecked_t assign_context_failed;
36126+ atomic_long_unchecked_t free_context;
36127+ atomic_long_unchecked_t load_user_context;
36128+ atomic_long_unchecked_t load_kernel_context;
36129+ atomic_long_unchecked_t lock_kernel_context;
36130+ atomic_long_unchecked_t unlock_kernel_context;
36131+ atomic_long_unchecked_t steal_user_context;
36132+ atomic_long_unchecked_t steal_kernel_context;
36133+ atomic_long_unchecked_t steal_context_failed;
36134+ atomic_long_unchecked_t nopfn;
36135+ atomic_long_unchecked_t asid_new;
36136+ atomic_long_unchecked_t asid_next;
36137+ atomic_long_unchecked_t asid_wrap;
36138+ atomic_long_unchecked_t asid_reuse;
36139+ atomic_long_unchecked_t intr;
36140+ atomic_long_unchecked_t intr_cbr;
36141+ atomic_long_unchecked_t intr_tfh;
36142+ atomic_long_unchecked_t intr_spurious;
36143+ atomic_long_unchecked_t intr_mm_lock_failed;
36144+ atomic_long_unchecked_t call_os;
36145+ atomic_long_unchecked_t call_os_wait_queue;
36146+ atomic_long_unchecked_t user_flush_tlb;
36147+ atomic_long_unchecked_t user_unload_context;
36148+ atomic_long_unchecked_t user_exception;
36149+ atomic_long_unchecked_t set_context_option;
36150+ atomic_long_unchecked_t check_context_retarget_intr;
36151+ atomic_long_unchecked_t check_context_unload;
36152+ atomic_long_unchecked_t tlb_dropin;
36153+ atomic_long_unchecked_t tlb_preload_page;
36154+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
36155+ atomic_long_unchecked_t tlb_dropin_fail_upm;
36156+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
36157+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
36158+ atomic_long_unchecked_t tlb_dropin_fail_idle;
36159+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
36160+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
36161+ atomic_long_unchecked_t tfh_stale_on_fault;
36162+ atomic_long_unchecked_t mmu_invalidate_range;
36163+ atomic_long_unchecked_t mmu_invalidate_page;
36164+ atomic_long_unchecked_t flush_tlb;
36165+ atomic_long_unchecked_t flush_tlb_gru;
36166+ atomic_long_unchecked_t flush_tlb_gru_tgh;
36167+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
36168
36169- atomic_long_t copy_gpa;
36170- atomic_long_t read_gpa;
36171+ atomic_long_unchecked_t copy_gpa;
36172+ atomic_long_unchecked_t read_gpa;
36173
36174- atomic_long_t mesq_receive;
36175- atomic_long_t mesq_receive_none;
36176- atomic_long_t mesq_send;
36177- atomic_long_t mesq_send_failed;
36178- atomic_long_t mesq_noop;
36179- atomic_long_t mesq_send_unexpected_error;
36180- atomic_long_t mesq_send_lb_overflow;
36181- atomic_long_t mesq_send_qlimit_reached;
36182- atomic_long_t mesq_send_amo_nacked;
36183- atomic_long_t mesq_send_put_nacked;
36184- atomic_long_t mesq_page_overflow;
36185- atomic_long_t mesq_qf_locked;
36186- atomic_long_t mesq_qf_noop_not_full;
36187- atomic_long_t mesq_qf_switch_head_failed;
36188- atomic_long_t mesq_qf_unexpected_error;
36189- atomic_long_t mesq_noop_unexpected_error;
36190- atomic_long_t mesq_noop_lb_overflow;
36191- atomic_long_t mesq_noop_qlimit_reached;
36192- atomic_long_t mesq_noop_amo_nacked;
36193- atomic_long_t mesq_noop_put_nacked;
36194- atomic_long_t mesq_noop_page_overflow;
36195+ atomic_long_unchecked_t mesq_receive;
36196+ atomic_long_unchecked_t mesq_receive_none;
36197+ atomic_long_unchecked_t mesq_send;
36198+ atomic_long_unchecked_t mesq_send_failed;
36199+ atomic_long_unchecked_t mesq_noop;
36200+ atomic_long_unchecked_t mesq_send_unexpected_error;
36201+ atomic_long_unchecked_t mesq_send_lb_overflow;
36202+ atomic_long_unchecked_t mesq_send_qlimit_reached;
36203+ atomic_long_unchecked_t mesq_send_amo_nacked;
36204+ atomic_long_unchecked_t mesq_send_put_nacked;
36205+ atomic_long_unchecked_t mesq_page_overflow;
36206+ atomic_long_unchecked_t mesq_qf_locked;
36207+ atomic_long_unchecked_t mesq_qf_noop_not_full;
36208+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
36209+ atomic_long_unchecked_t mesq_qf_unexpected_error;
36210+ atomic_long_unchecked_t mesq_noop_unexpected_error;
36211+ atomic_long_unchecked_t mesq_noop_lb_overflow;
36212+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
36213+ atomic_long_unchecked_t mesq_noop_amo_nacked;
36214+ atomic_long_unchecked_t mesq_noop_put_nacked;
36215+ atomic_long_unchecked_t mesq_noop_page_overflow;
36216
36217 };
36218
36219@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
36220 tghop_invalidate, mcsop_last};
36221
36222 struct mcs_op_statistic {
36223- atomic_long_t count;
36224- atomic_long_t total;
36225+ atomic_long_unchecked_t count;
36226+ atomic_long_unchecked_t total;
36227 unsigned long max;
36228 };
36229
36230@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
36231
36232 #define STAT(id) do { \
36233 if (gru_options & OPT_STATS) \
36234- atomic_long_inc(&gru_stats.id); \
36235+ atomic_long_inc_unchecked(&gru_stats.id); \
36236 } while (0)
36237
36238 #ifdef CONFIG_SGI_GRU_DEBUG
36239diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
36240index c862cd4..0d176fe 100644
36241--- a/drivers/misc/sgi-xp/xp.h
36242+++ b/drivers/misc/sgi-xp/xp.h
36243@@ -288,7 +288,7 @@ struct xpc_interface {
36244 xpc_notify_func, void *);
36245 void (*received) (short, int, void *);
36246 enum xp_retval (*partid_to_nasids) (short, void *);
36247-};
36248+} __no_const;
36249
36250 extern struct xpc_interface xpc_interface;
36251
36252diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
36253index b94d5f7..7f494c5 100644
36254--- a/drivers/misc/sgi-xp/xpc.h
36255+++ b/drivers/misc/sgi-xp/xpc.h
36256@@ -835,6 +835,7 @@ struct xpc_arch_operations {
36257 void (*received_payload) (struct xpc_channel *, void *);
36258 void (*notify_senders_of_disconnect) (struct xpc_channel *);
36259 };
36260+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
36261
36262 /* struct xpc_partition act_state values (for XPC HB) */
36263
36264@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
36265 /* found in xpc_main.c */
36266 extern struct device *xpc_part;
36267 extern struct device *xpc_chan;
36268-extern struct xpc_arch_operations xpc_arch_ops;
36269+extern xpc_arch_operations_no_const xpc_arch_ops;
36270 extern int xpc_disengage_timelimit;
36271 extern int xpc_disengage_timedout;
36272 extern int xpc_activate_IRQ_rcvd;
36273diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
36274index d971817..33bdca5 100644
36275--- a/drivers/misc/sgi-xp/xpc_main.c
36276+++ b/drivers/misc/sgi-xp/xpc_main.c
36277@@ -166,7 +166,7 @@ static struct notifier_block xpc_die_notifier = {
36278 .notifier_call = xpc_system_die,
36279 };
36280
36281-struct xpc_arch_operations xpc_arch_ops;
36282+xpc_arch_operations_no_const xpc_arch_ops;
36283
36284 /*
36285 * Timer function to enforce the timelimit on the partition disengage.
36286@@ -1210,7 +1210,7 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args)
36287
36288 if (((die_args->trapnr == X86_TRAP_MF) ||
36289 (die_args->trapnr == X86_TRAP_XF)) &&
36290- !user_mode_vm(die_args->regs))
36291+ !user_mode(die_args->regs))
36292 xpc_die_deactivate();
36293
36294 break;
36295diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
36296index a0e1720..ee63d0b 100644
36297--- a/drivers/mmc/core/mmc_ops.c
36298+++ b/drivers/mmc/core/mmc_ops.c
36299@@ -245,7 +245,7 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
36300 void *data_buf;
36301 int is_on_stack;
36302
36303- is_on_stack = object_is_on_stack(buf);
36304+ is_on_stack = object_starts_on_stack(buf);
36305 if (is_on_stack) {
36306 /*
36307 * dma onto stack is unsafe/nonportable, but callers to this
36308diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
36309index 53b8fd9..615b462 100644
36310--- a/drivers/mmc/host/dw_mmc.h
36311+++ b/drivers/mmc/host/dw_mmc.h
36312@@ -205,5 +205,5 @@ struct dw_mci_drv_data {
36313 int (*parse_dt)(struct dw_mci *host);
36314 int (*setup_bus)(struct dw_mci *host,
36315 struct device_node *slot_np, u8 bus_width);
36316-};
36317+} __do_const;
36318 #endif /* _DW_MMC_H_ */
36319diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
36320index c9ec725..178e79a 100644
36321--- a/drivers/mmc/host/sdhci-s3c.c
36322+++ b/drivers/mmc/host/sdhci-s3c.c
36323@@ -719,9 +719,11 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
36324 * we can use overriding functions instead of default.
36325 */
36326 if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK) {
36327- sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
36328- sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
36329- sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
36330+ pax_open_kernel();
36331+ *(void **)&sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
36332+ *(void **)&sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
36333+ *(void **)&sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
36334+ pax_close_kernel();
36335 }
36336
36337 /* It supports additional host capabilities if needed */
36338diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
36339index a4eb8b5..8c0628f 100644
36340--- a/drivers/mtd/devices/doc2000.c
36341+++ b/drivers/mtd/devices/doc2000.c
36342@@ -753,7 +753,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
36343
36344 /* The ECC will not be calculated correctly if less than 512 is written */
36345 /* DBB-
36346- if (len != 0x200 && eccbuf)
36347+ if (len != 0x200)
36348 printk(KERN_WARNING
36349 "ECC needs a full sector write (adr: %lx size %lx)\n",
36350 (long) to, (long) len);
36351diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
36352index e706a23..b3d262f 100644
36353--- a/drivers/mtd/nand/denali.c
36354+++ b/drivers/mtd/nand/denali.c
36355@@ -26,6 +26,7 @@
36356 #include <linux/pci.h>
36357 #include <linux/mtd/mtd.h>
36358 #include <linux/module.h>
36359+#include <linux/slab.h>
36360
36361 #include "denali.h"
36362
36363diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
36364index 51b9d6a..52af9a7 100644
36365--- a/drivers/mtd/nftlmount.c
36366+++ b/drivers/mtd/nftlmount.c
36367@@ -24,6 +24,7 @@
36368 #include <asm/errno.h>
36369 #include <linux/delay.h>
36370 #include <linux/slab.h>
36371+#include <linux/sched.h>
36372 #include <linux/mtd/mtd.h>
36373 #include <linux/mtd/nand.h>
36374 #include <linux/mtd/nftl.h>
36375diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
36376index 203ff9d..0968ca8 100644
36377--- a/drivers/net/ethernet/8390/ax88796.c
36378+++ b/drivers/net/ethernet/8390/ax88796.c
36379@@ -872,9 +872,11 @@ static int ax_probe(struct platform_device *pdev)
36380 if (ax->plat->reg_offsets)
36381 ei_local->reg_offset = ax->plat->reg_offsets;
36382 else {
36383+ resource_size_t _mem_size = mem_size;
36384+ do_div(_mem_size, 0x18);
36385 ei_local->reg_offset = ax->reg_offsets;
36386 for (ret = 0; ret < 0x18; ret++)
36387- ax->reg_offsets[ret] = (mem_size / 0x18) * ret;
36388+ ax->reg_offsets[ret] = _mem_size * ret;
36389 }
36390
36391 if (!request_mem_region(mem->start, mem_size, pdev->name)) {
36392diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
36393index 9c5ea6c..eaad276 100644
36394--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
36395+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
36396@@ -1046,7 +1046,7 @@ static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
36397 static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
36398 {
36399 /* RX_MODE controlling object */
36400- bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj);
36401+ bnx2x_init_rx_mode_obj(bp);
36402
36403 /* multicast configuration controlling object */
36404 bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid,
36405diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
36406index 614981c..11216c7 100644
36407--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
36408+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
36409@@ -2375,15 +2375,14 @@ int bnx2x_config_rx_mode(struct bnx2x *bp,
36410 return rc;
36411 }
36412
36413-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
36414- struct bnx2x_rx_mode_obj *o)
36415+void bnx2x_init_rx_mode_obj(struct bnx2x *bp)
36416 {
36417 if (CHIP_IS_E1x(bp)) {
36418- o->wait_comp = bnx2x_empty_rx_mode_wait;
36419- o->config_rx_mode = bnx2x_set_rx_mode_e1x;
36420+ bp->rx_mode_obj.wait_comp = bnx2x_empty_rx_mode_wait;
36421+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e1x;
36422 } else {
36423- o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
36424- o->config_rx_mode = bnx2x_set_rx_mode_e2;
36425+ bp->rx_mode_obj.wait_comp = bnx2x_wait_rx_mode_comp_e2;
36426+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e2;
36427 }
36428 }
36429
36430diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
36431index acf2fe4..efb96df 100644
36432--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
36433+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
36434@@ -1281,8 +1281,7 @@ int bnx2x_vlan_mac_move(struct bnx2x *bp,
36435
36436 /********************* RX MODE ****************/
36437
36438-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
36439- struct bnx2x_rx_mode_obj *o);
36440+void bnx2x_init_rx_mode_obj(struct bnx2x *bp);
36441
36442 /**
36443 * bnx2x_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters.
36444diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
36445index d9308c32..d87b824 100644
36446--- a/drivers/net/ethernet/broadcom/tg3.h
36447+++ b/drivers/net/ethernet/broadcom/tg3.h
36448@@ -140,6 +140,7 @@
36449 #define CHIPREV_ID_5750_A0 0x4000
36450 #define CHIPREV_ID_5750_A1 0x4001
36451 #define CHIPREV_ID_5750_A3 0x4003
36452+#define CHIPREV_ID_5750_C1 0x4201
36453 #define CHIPREV_ID_5750_C2 0x4202
36454 #define CHIPREV_ID_5752_A0_HW 0x5000
36455 #define CHIPREV_ID_5752_A0 0x6000
36456diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
36457index 8cffcdf..aadf043 100644
36458--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
36459+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
36460@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
36461 */
36462 struct l2t_skb_cb {
36463 arp_failure_handler_func arp_failure_handler;
36464-};
36465+} __no_const;
36466
36467 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
36468
36469diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
36470index f879e92..726f20f 100644
36471--- a/drivers/net/ethernet/dec/tulip/de4x5.c
36472+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
36473@@ -5388,7 +5388,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
36474 for (i=0; i<ETH_ALEN; i++) {
36475 tmp.addr[i] = dev->dev_addr[i];
36476 }
36477- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
36478+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
36479 break;
36480
36481 case DE4X5_SET_HWADDR: /* Set the hardware address */
36482@@ -5428,7 +5428,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
36483 spin_lock_irqsave(&lp->lock, flags);
36484 memcpy(&statbuf, &lp->pktStats, ioc->len);
36485 spin_unlock_irqrestore(&lp->lock, flags);
36486- if (copy_to_user(ioc->data, &statbuf, ioc->len))
36487+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
36488 return -EFAULT;
36489 break;
36490 }
36491diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
36492index d1b6cc5..cde0d97 100644
36493--- a/drivers/net/ethernet/emulex/benet/be_main.c
36494+++ b/drivers/net/ethernet/emulex/benet/be_main.c
36495@@ -403,7 +403,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
36496
36497 if (wrapped)
36498 newacc += 65536;
36499- ACCESS_ONCE(*acc) = newacc;
36500+ ACCESS_ONCE_RW(*acc) = newacc;
36501 }
36502
36503 void be_parse_stats(struct be_adapter *adapter)
36504diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
36505index 74d749e..eefb1bd 100644
36506--- a/drivers/net/ethernet/faraday/ftgmac100.c
36507+++ b/drivers/net/ethernet/faraday/ftgmac100.c
36508@@ -31,6 +31,8 @@
36509 #include <linux/netdevice.h>
36510 #include <linux/phy.h>
36511 #include <linux/platform_device.h>
36512+#include <linux/interrupt.h>
36513+#include <linux/irqreturn.h>
36514 #include <net/ip.h>
36515
36516 #include "ftgmac100.h"
36517diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
36518index b901a01..1ff32ee 100644
36519--- a/drivers/net/ethernet/faraday/ftmac100.c
36520+++ b/drivers/net/ethernet/faraday/ftmac100.c
36521@@ -31,6 +31,8 @@
36522 #include <linux/module.h>
36523 #include <linux/netdevice.h>
36524 #include <linux/platform_device.h>
36525+#include <linux/interrupt.h>
36526+#include <linux/irqreturn.h>
36527
36528 #include "ftmac100.h"
36529
36530diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
36531index d929131..aed108f 100644
36532--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
36533+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
36534@@ -865,7 +865,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
36535 /* store the new cycle speed */
36536 adapter->cycle_speed = cycle_speed;
36537
36538- ACCESS_ONCE(adapter->base_incval) = incval;
36539+ ACCESS_ONCE_RW(adapter->base_incval) = incval;
36540 smp_mb();
36541
36542 /* grab the ptp lock */
36543diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
36544index c2e420a..26a75e0 100644
36545--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
36546+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
36547@@ -3461,7 +3461,10 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
36548 struct __vxge_hw_fifo *fifo;
36549 struct vxge_hw_fifo_config *config;
36550 u32 txdl_size, txdl_per_memblock;
36551- struct vxge_hw_mempool_cbs fifo_mp_callback;
36552+ static struct vxge_hw_mempool_cbs fifo_mp_callback = {
36553+ .item_func_alloc = __vxge_hw_fifo_mempool_item_alloc,
36554+ };
36555+
36556 struct __vxge_hw_virtualpath *vpath;
36557
36558 if ((vp == NULL) || (attr == NULL)) {
36559@@ -3544,8 +3547,6 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
36560 goto exit;
36561 }
36562
36563- fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
36564-
36565 fifo->mempool =
36566 __vxge_hw_mempool_create(vpath->hldev,
36567 fifo->config->memblock_size,
36568diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
36569index 6afe74e..2e2950f 100644
36570--- a/drivers/net/ethernet/realtek/r8169.c
36571+++ b/drivers/net/ethernet/realtek/r8169.c
36572@@ -747,22 +747,22 @@ struct rtl8169_private {
36573 struct mdio_ops {
36574 void (*write)(struct rtl8169_private *, int, int);
36575 int (*read)(struct rtl8169_private *, int);
36576- } mdio_ops;
36577+ } __no_const mdio_ops;
36578
36579 struct pll_power_ops {
36580 void (*down)(struct rtl8169_private *);
36581 void (*up)(struct rtl8169_private *);
36582- } pll_power_ops;
36583+ } __no_const pll_power_ops;
36584
36585 struct jumbo_ops {
36586 void (*enable)(struct rtl8169_private *);
36587 void (*disable)(struct rtl8169_private *);
36588- } jumbo_ops;
36589+ } __no_const jumbo_ops;
36590
36591 struct csi_ops {
36592 void (*write)(struct rtl8169_private *, int, int);
36593 u32 (*read)(struct rtl8169_private *, int);
36594- } csi_ops;
36595+ } __no_const csi_ops;
36596
36597 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
36598 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
36599diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
36600index 0767043f..08c2553 100644
36601--- a/drivers/net/ethernet/sfc/ptp.c
36602+++ b/drivers/net/ethernet/sfc/ptp.c
36603@@ -553,7 +553,7 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
36604 (u32)((u64)ptp->start.dma_addr >> 32));
36605
36606 /* Clear flag that signals MC ready */
36607- ACCESS_ONCE(*start) = 0;
36608+ ACCESS_ONCE_RW(*start) = 0;
36609 efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
36610 MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
36611
36612diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
36613index 0c74a70..3bc6f68 100644
36614--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
36615+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
36616@@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
36617
36618 writel(value, ioaddr + MMC_CNTRL);
36619
36620- pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
36621- MMC_CNTRL, value);
36622+// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
36623+// MMC_CNTRL, value);
36624 }
36625
36626 /* To mask all all interrupts.*/
36627diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
36628index 5fd6f46..ee1f265 100644
36629--- a/drivers/net/hyperv/hyperv_net.h
36630+++ b/drivers/net/hyperv/hyperv_net.h
36631@@ -101,7 +101,7 @@ struct rndis_device {
36632
36633 enum rndis_device_state state;
36634 bool link_state;
36635- atomic_t new_req_id;
36636+ atomic_unchecked_t new_req_id;
36637
36638 spinlock_t request_lock;
36639 struct list_head req_list;
36640diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
36641index 928148c..d83298e 100644
36642--- a/drivers/net/hyperv/rndis_filter.c
36643+++ b/drivers/net/hyperv/rndis_filter.c
36644@@ -107,7 +107,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
36645 * template
36646 */
36647 set = &rndis_msg->msg.set_req;
36648- set->req_id = atomic_inc_return(&dev->new_req_id);
36649+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
36650
36651 /* Add to the request list */
36652 spin_lock_irqsave(&dev->request_lock, flags);
36653@@ -760,7 +760,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
36654
36655 /* Setup the rndis set */
36656 halt = &request->request_msg.msg.halt_req;
36657- halt->req_id = atomic_inc_return(&dev->new_req_id);
36658+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
36659
36660 /* Ignore return since this msg is optional. */
36661 rndis_filter_send_request(dev, request);
36662diff --git a/drivers/net/ieee802154/fakehard.c b/drivers/net/ieee802154/fakehard.c
36663index 7d39add..037e1da 100644
36664--- a/drivers/net/ieee802154/fakehard.c
36665+++ b/drivers/net/ieee802154/fakehard.c
36666@@ -386,7 +386,7 @@ static int __devinit ieee802154fake_probe(struct platform_device *pdev)
36667 phy->transmit_power = 0xbf;
36668
36669 dev->netdev_ops = &fake_ops;
36670- dev->ml_priv = &fake_mlme;
36671+ dev->ml_priv = (void *)&fake_mlme;
36672
36673 priv = netdev_priv(dev);
36674 priv->phy = phy;
36675diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
36676index 0f0f9ce..0ca5819 100644
36677--- a/drivers/net/macvtap.c
36678+++ b/drivers/net/macvtap.c
36679@@ -1100,7 +1100,7 @@ static int macvtap_device_event(struct notifier_block *unused,
36680 return NOTIFY_DONE;
36681 }
36682
36683-static struct notifier_block macvtap_notifier_block __read_mostly = {
36684+static struct notifier_block macvtap_notifier_block = {
36685 .notifier_call = macvtap_device_event,
36686 };
36687
36688diff --git a/drivers/net/phy/mdio-bitbang.c b/drivers/net/phy/mdio-bitbang.c
36689index daec9b0..6428fcb 100644
36690--- a/drivers/net/phy/mdio-bitbang.c
36691+++ b/drivers/net/phy/mdio-bitbang.c
36692@@ -234,6 +234,7 @@ void free_mdio_bitbang(struct mii_bus *bus)
36693 struct mdiobb_ctrl *ctrl = bus->priv;
36694
36695 module_put(ctrl->ops->owner);
36696+ mdiobus_unregister(bus);
36697 mdiobus_free(bus);
36698 }
36699 EXPORT_SYMBOL(free_mdio_bitbang);
36700diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
36701index eb3f5ce..d773730 100644
36702--- a/drivers/net/ppp/ppp_generic.c
36703+++ b/drivers/net/ppp/ppp_generic.c
36704@@ -999,7 +999,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
36705 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
36706 struct ppp_stats stats;
36707 struct ppp_comp_stats cstats;
36708- char *vers;
36709
36710 switch (cmd) {
36711 case SIOCGPPPSTATS:
36712@@ -1021,8 +1020,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
36713 break;
36714
36715 case SIOCGPPPVER:
36716- vers = PPP_VERSION;
36717- if (copy_to_user(addr, vers, strlen(vers) + 1))
36718+ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
36719 break;
36720 err = 0;
36721 break;
36722diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
36723index ad86660..9fd0884 100644
36724--- a/drivers/net/team/team.c
36725+++ b/drivers/net/team/team.c
36726@@ -2601,7 +2601,7 @@ static int team_device_event(struct notifier_block *unused,
36727 return NOTIFY_DONE;
36728 }
36729
36730-static struct notifier_block team_notifier_block __read_mostly = {
36731+static struct notifier_block team_notifier_block = {
36732 .notifier_call = team_device_event,
36733 };
36734
36735diff --git a/drivers/net/tun.c b/drivers/net/tun.c
36736index 0873cdc..ddb178e 100644
36737--- a/drivers/net/tun.c
36738+++ b/drivers/net/tun.c
36739@@ -1374,7 +1374,7 @@ static int set_offload(struct tun_struct *tun, unsigned long arg)
36740 }
36741
36742 static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
36743- unsigned long arg, int ifreq_len)
36744+ unsigned long arg, size_t ifreq_len)
36745 {
36746 struct tun_file *tfile = file->private_data;
36747 struct tun_struct *tun;
36748@@ -1387,6 +1387,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
36749 int vnet_hdr_sz;
36750 int ret;
36751
36752+ if (ifreq_len > sizeof ifr)
36753+ return -EFAULT;
36754+
36755 if (cmd == TUNSETIFF || _IOC_TYPE(cmd) == 0x89) {
36756 if (copy_from_user(&ifr, argp, ifreq_len))
36757 return -EFAULT;
36758diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
36759index 605a4ba..a883dd1 100644
36760--- a/drivers/net/usb/hso.c
36761+++ b/drivers/net/usb/hso.c
36762@@ -71,7 +71,7 @@
36763 #include <asm/byteorder.h>
36764 #include <linux/serial_core.h>
36765 #include <linux/serial.h>
36766-
36767+#include <asm/local.h>
36768
36769 #define MOD_AUTHOR "Option Wireless"
36770 #define MOD_DESCRIPTION "USB High Speed Option driver"
36771@@ -1180,7 +1180,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
36772 struct urb *urb;
36773
36774 urb = serial->rx_urb[0];
36775- if (serial->port.count > 0) {
36776+ if (atomic_read(&serial->port.count) > 0) {
36777 count = put_rxbuf_data(urb, serial);
36778 if (count == -1)
36779 return;
36780@@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
36781 DUMP1(urb->transfer_buffer, urb->actual_length);
36782
36783 /* Anyone listening? */
36784- if (serial->port.count == 0)
36785+ if (atomic_read(&serial->port.count) == 0)
36786 return;
36787
36788 if (status == 0) {
36789@@ -1298,8 +1298,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
36790 tty_port_tty_set(&serial->port, tty);
36791
36792 /* check for port already opened, if not set the termios */
36793- serial->port.count++;
36794- if (serial->port.count == 1) {
36795+ if (atomic_inc_return(&serial->port.count) == 1) {
36796 serial->rx_state = RX_IDLE;
36797 /* Force default termio settings */
36798 _hso_serial_set_termios(tty, NULL);
36799@@ -1311,7 +1310,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
36800 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
36801 if (result) {
36802 hso_stop_serial_device(serial->parent);
36803- serial->port.count--;
36804+ atomic_dec(&serial->port.count);
36805 kref_put(&serial->parent->ref, hso_serial_ref_free);
36806 }
36807 } else {
36808@@ -1348,10 +1347,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
36809
36810 /* reset the rts and dtr */
36811 /* do the actual close */
36812- serial->port.count--;
36813+ atomic_dec(&serial->port.count);
36814
36815- if (serial->port.count <= 0) {
36816- serial->port.count = 0;
36817+ if (atomic_read(&serial->port.count) <= 0) {
36818+ atomic_set(&serial->port.count, 0);
36819 tty_port_tty_set(&serial->port, NULL);
36820 if (!usb_gone)
36821 hso_stop_serial_device(serial->parent);
36822@@ -1427,7 +1426,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
36823
36824 /* the actual setup */
36825 spin_lock_irqsave(&serial->serial_lock, flags);
36826- if (serial->port.count)
36827+ if (atomic_read(&serial->port.count))
36828 _hso_serial_set_termios(tty, old);
36829 else
36830 tty->termios = *old;
36831@@ -1886,7 +1885,7 @@ static void intr_callback(struct urb *urb)
36832 D1("Pending read interrupt on port %d\n", i);
36833 spin_lock(&serial->serial_lock);
36834 if (serial->rx_state == RX_IDLE &&
36835- serial->port.count > 0) {
36836+ atomic_read(&serial->port.count) > 0) {
36837 /* Setup and send a ctrl req read on
36838 * port i */
36839 if (!serial->rx_urb_filled[0]) {
36840@@ -3078,7 +3077,7 @@ static int hso_resume(struct usb_interface *iface)
36841 /* Start all serial ports */
36842 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
36843 if (serial_table[i] && (serial_table[i]->interface == iface)) {
36844- if (dev2ser(serial_table[i])->port.count) {
36845+ if (atomic_read(&dev2ser(serial_table[i])->port.count)) {
36846 result =
36847 hso_start_serial_device(serial_table[i], GFP_NOIO);
36848 hso_kick_transmit(dev2ser(serial_table[i]));
36849diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
36850index edb81ed..ab8931c 100644
36851--- a/drivers/net/usb/usbnet.c
36852+++ b/drivers/net/usb/usbnet.c
36853@@ -380,6 +380,12 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
36854 unsigned long lockflags;
36855 size_t size = dev->rx_urb_size;
36856
36857+ /* prevent rx skb allocation when error ratio is high */
36858+ if (test_bit(EVENT_RX_KILL, &dev->flags)) {
36859+ usb_free_urb(urb);
36860+ return -ENOLINK;
36861+ }
36862+
36863 skb = __netdev_alloc_skb_ip_align(dev->net, size, flags);
36864 if (!skb) {
36865 netif_dbg(dev, rx_err, dev->net, "no rx skb\n");
36866@@ -539,6 +545,17 @@ block:
36867 break;
36868 }
36869
36870+ /* stop rx if packet error rate is high */
36871+ if (++dev->pkt_cnt > 30) {
36872+ dev->pkt_cnt = 0;
36873+ dev->pkt_err = 0;
36874+ } else {
36875+ if (state == rx_cleanup)
36876+ dev->pkt_err++;
36877+ if (dev->pkt_err > 20)
36878+ set_bit(EVENT_RX_KILL, &dev->flags);
36879+ }
36880+
36881 state = defer_bh(dev, skb, &dev->rxq, state);
36882
36883 if (urb) {
36884@@ -790,6 +807,11 @@ int usbnet_open (struct net_device *net)
36885 (dev->driver_info->flags & FLAG_FRAMING_AX) ? "ASIX" :
36886 "simple");
36887
36888+ /* reset rx error state */
36889+ dev->pkt_cnt = 0;
36890+ dev->pkt_err = 0;
36891+ clear_bit(EVENT_RX_KILL, &dev->flags);
36892+
36893 // delay posting reads until we're fully open
36894 tasklet_schedule (&dev->bh);
36895 if (info->manage_power) {
36896@@ -1253,6 +1275,9 @@ static void usbnet_bh (unsigned long param)
36897 }
36898 }
36899
36900+ /* restart RX again after disabling due to high error rate */
36901+ clear_bit(EVENT_RX_KILL, &dev->flags);
36902+
36903 // waiting for all pending urbs to complete?
36904 if (dev->wait) {
36905 if ((dev->txq.qlen + dev->rxq.qlen + dev->done.qlen) == 0) {
36906diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
36907index 8d78253..bebbb68 100644
36908--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
36909+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
36910@@ -184,8 +184,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
36911 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
36912 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
36913
36914- ACCESS_ONCE(ads->ds_link) = i->link;
36915- ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
36916+ ACCESS_ONCE_RW(ads->ds_link) = i->link;
36917+ ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
36918
36919 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
36920 ctl6 = SM(i->keytype, AR_EncrType);
36921@@ -199,26 +199,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
36922
36923 if ((i->is_first || i->is_last) &&
36924 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
36925- ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
36926+ ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
36927 | set11nTries(i->rates, 1)
36928 | set11nTries(i->rates, 2)
36929 | set11nTries(i->rates, 3)
36930 | (i->dur_update ? AR_DurUpdateEna : 0)
36931 | SM(0, AR_BurstDur);
36932
36933- ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
36934+ ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
36935 | set11nRate(i->rates, 1)
36936 | set11nRate(i->rates, 2)
36937 | set11nRate(i->rates, 3);
36938 } else {
36939- ACCESS_ONCE(ads->ds_ctl2) = 0;
36940- ACCESS_ONCE(ads->ds_ctl3) = 0;
36941+ ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
36942+ ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
36943 }
36944
36945 if (!i->is_first) {
36946- ACCESS_ONCE(ads->ds_ctl0) = 0;
36947- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
36948- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
36949+ ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
36950+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
36951+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
36952 return;
36953 }
36954
36955@@ -243,7 +243,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
36956 break;
36957 }
36958
36959- ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
36960+ ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
36961 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
36962 | SM(i->txpower, AR_XmitPower)
36963 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
36964@@ -253,19 +253,19 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
36965 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
36966 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
36967
36968- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
36969- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
36970+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
36971+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
36972
36973 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
36974 return;
36975
36976- ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
36977+ ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
36978 | set11nPktDurRTSCTS(i->rates, 1);
36979
36980- ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
36981+ ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
36982 | set11nPktDurRTSCTS(i->rates, 3);
36983
36984- ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
36985+ ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
36986 | set11nRateFlags(i->rates, 1)
36987 | set11nRateFlags(i->rates, 2)
36988 | set11nRateFlags(i->rates, 3)
36989diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
36990index 301bf72..3f5654f 100644
36991--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
36992+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
36993@@ -39,47 +39,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
36994 (i->qcu << AR_TxQcuNum_S) | desc_len;
36995
36996 checksum += val;
36997- ACCESS_ONCE(ads->info) = val;
36998+ ACCESS_ONCE_RW(ads->info) = val;
36999
37000 checksum += i->link;
37001- ACCESS_ONCE(ads->link) = i->link;
37002+ ACCESS_ONCE_RW(ads->link) = i->link;
37003
37004 checksum += i->buf_addr[0];
37005- ACCESS_ONCE(ads->data0) = i->buf_addr[0];
37006+ ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
37007 checksum += i->buf_addr[1];
37008- ACCESS_ONCE(ads->data1) = i->buf_addr[1];
37009+ ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
37010 checksum += i->buf_addr[2];
37011- ACCESS_ONCE(ads->data2) = i->buf_addr[2];
37012+ ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
37013 checksum += i->buf_addr[3];
37014- ACCESS_ONCE(ads->data3) = i->buf_addr[3];
37015+ ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
37016
37017 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
37018- ACCESS_ONCE(ads->ctl3) = val;
37019+ ACCESS_ONCE_RW(ads->ctl3) = val;
37020 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
37021- ACCESS_ONCE(ads->ctl5) = val;
37022+ ACCESS_ONCE_RW(ads->ctl5) = val;
37023 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
37024- ACCESS_ONCE(ads->ctl7) = val;
37025+ ACCESS_ONCE_RW(ads->ctl7) = val;
37026 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
37027- ACCESS_ONCE(ads->ctl9) = val;
37028+ ACCESS_ONCE_RW(ads->ctl9) = val;
37029
37030 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
37031- ACCESS_ONCE(ads->ctl10) = checksum;
37032+ ACCESS_ONCE_RW(ads->ctl10) = checksum;
37033
37034 if (i->is_first || i->is_last) {
37035- ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
37036+ ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
37037 | set11nTries(i->rates, 1)
37038 | set11nTries(i->rates, 2)
37039 | set11nTries(i->rates, 3)
37040 | (i->dur_update ? AR_DurUpdateEna : 0)
37041 | SM(0, AR_BurstDur);
37042
37043- ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
37044+ ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
37045 | set11nRate(i->rates, 1)
37046 | set11nRate(i->rates, 2)
37047 | set11nRate(i->rates, 3);
37048 } else {
37049- ACCESS_ONCE(ads->ctl13) = 0;
37050- ACCESS_ONCE(ads->ctl14) = 0;
37051+ ACCESS_ONCE_RW(ads->ctl13) = 0;
37052+ ACCESS_ONCE_RW(ads->ctl14) = 0;
37053 }
37054
37055 ads->ctl20 = 0;
37056@@ -89,17 +89,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
37057
37058 ctl17 = SM(i->keytype, AR_EncrType);
37059 if (!i->is_first) {
37060- ACCESS_ONCE(ads->ctl11) = 0;
37061- ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
37062- ACCESS_ONCE(ads->ctl15) = 0;
37063- ACCESS_ONCE(ads->ctl16) = 0;
37064- ACCESS_ONCE(ads->ctl17) = ctl17;
37065- ACCESS_ONCE(ads->ctl18) = 0;
37066- ACCESS_ONCE(ads->ctl19) = 0;
37067+ ACCESS_ONCE_RW(ads->ctl11) = 0;
37068+ ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
37069+ ACCESS_ONCE_RW(ads->ctl15) = 0;
37070+ ACCESS_ONCE_RW(ads->ctl16) = 0;
37071+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
37072+ ACCESS_ONCE_RW(ads->ctl18) = 0;
37073+ ACCESS_ONCE_RW(ads->ctl19) = 0;
37074 return;
37075 }
37076
37077- ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
37078+ ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
37079 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
37080 | SM(i->txpower, AR_XmitPower)
37081 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
37082@@ -135,22 +135,22 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
37083 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
37084 ctl12 |= SM(val, AR_PAPRDChainMask);
37085
37086- ACCESS_ONCE(ads->ctl12) = ctl12;
37087- ACCESS_ONCE(ads->ctl17) = ctl17;
37088+ ACCESS_ONCE_RW(ads->ctl12) = ctl12;
37089+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
37090
37091- ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
37092+ ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
37093 | set11nPktDurRTSCTS(i->rates, 1);
37094
37095- ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
37096+ ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
37097 | set11nPktDurRTSCTS(i->rates, 3);
37098
37099- ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
37100+ ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
37101 | set11nRateFlags(i->rates, 1)
37102 | set11nRateFlags(i->rates, 2)
37103 | set11nRateFlags(i->rates, 3)
37104 | SM(i->rtscts_rate, AR_RTSCTSRate);
37105
37106- ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
37107+ ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
37108 }
37109
37110 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
37111diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
37112index b68aaf5..fb20845 100644
37113--- a/drivers/net/wireless/ath/ath9k/hw.h
37114+++ b/drivers/net/wireless/ath/ath9k/hw.h
37115@@ -657,7 +657,7 @@ struct ath_hw_private_ops {
37116
37117 /* ANI */
37118 void (*ani_cache_ini_regs)(struct ath_hw *ah);
37119-};
37120+} __no_const;
37121
37122 /**
37123 * struct ath_hw_ops - callbacks used by hardware code and driver code
37124@@ -687,7 +687,7 @@ struct ath_hw_ops {
37125 void (*antdiv_comb_conf_set)(struct ath_hw *ah,
37126 struct ath_hw_antcomb_conf *antconf);
37127 void (*antctrl_shared_chain_lnadiv)(struct ath_hw *hw, bool enable);
37128-};
37129+} __no_const;
37130
37131 struct ath_nf_limits {
37132 s16 max;
37133diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
37134index 71ced17..cd82b12 100644
37135--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
37136+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
37137@@ -184,7 +184,7 @@ struct brcmf_cfg80211_event_loop {
37138 struct net_device *ndev,
37139 const struct brcmf_event_msg *e,
37140 void *data);
37141-};
37142+} __no_const;
37143
37144 /* basic structure of scan request */
37145 struct brcmf_cfg80211_scan_req {
37146@@ -239,7 +239,7 @@ struct brcmf_cfg80211_profile {
37147 struct brcmf_cfg80211_iscan_eloop {
37148 s32 (*handler[WL_SCAN_ERSULTS_LAST])
37149 (struct brcmf_cfg80211_info *cfg);
37150-};
37151+} __no_const;
37152
37153 /* dongle iscan controller */
37154 struct brcmf_cfg80211_iscan_ctrl {
37155diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
37156index e252acb..6ad1e65 100644
37157--- a/drivers/net/wireless/iwlegacy/3945-mac.c
37158+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
37159@@ -3615,7 +3615,9 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
37160 */
37161 if (il3945_mod_params.disable_hw_scan) {
37162 D_INFO("Disabling hw_scan\n");
37163- il3945_mac_ops.hw_scan = NULL;
37164+ pax_open_kernel();
37165+ *(void **)&il3945_mac_ops.hw_scan = NULL;
37166+ pax_close_kernel();
37167 }
37168
37169 D_INFO("*** LOAD DRIVER ***\n");
37170diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
37171index 1a98fa3..51e6661 100644
37172--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
37173+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
37174@@ -203,7 +203,7 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
37175 {
37176 struct iwl_priv *priv = file->private_data;
37177 char buf[64];
37178- int buf_size;
37179+ size_t buf_size;
37180 u32 offset, len;
37181
37182 memset(buf, 0, sizeof(buf));
37183@@ -473,7 +473,7 @@ static ssize_t iwl_dbgfs_rx_handlers_write(struct file *file,
37184 struct iwl_priv *priv = file->private_data;
37185
37186 char buf[8];
37187- int buf_size;
37188+ size_t buf_size;
37189 u32 reset_flag;
37190
37191 memset(buf, 0, sizeof(buf));
37192@@ -554,7 +554,7 @@ static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file,
37193 {
37194 struct iwl_priv *priv = file->private_data;
37195 char buf[8];
37196- int buf_size;
37197+ size_t buf_size;
37198 int ht40;
37199
37200 memset(buf, 0, sizeof(buf));
37201@@ -606,7 +606,7 @@ static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
37202 {
37203 struct iwl_priv *priv = file->private_data;
37204 char buf[8];
37205- int buf_size;
37206+ size_t buf_size;
37207 int value;
37208
37209 memset(buf, 0, sizeof(buf));
37210@@ -1871,7 +1871,7 @@ static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
37211 {
37212 struct iwl_priv *priv = file->private_data;
37213 char buf[8];
37214- int buf_size;
37215+ size_t buf_size;
37216 int clear;
37217
37218 memset(buf, 0, sizeof(buf));
37219@@ -1916,7 +1916,7 @@ static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
37220 {
37221 struct iwl_priv *priv = file->private_data;
37222 char buf[8];
37223- int buf_size;
37224+ size_t buf_size;
37225 int trace;
37226
37227 memset(buf, 0, sizeof(buf));
37228@@ -1987,7 +1987,7 @@ static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file,
37229 {
37230 struct iwl_priv *priv = file->private_data;
37231 char buf[8];
37232- int buf_size;
37233+ size_t buf_size;
37234 int missed;
37235
37236 memset(buf, 0, sizeof(buf));
37237@@ -2028,7 +2028,7 @@ static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
37238
37239 struct iwl_priv *priv = file->private_data;
37240 char buf[8];
37241- int buf_size;
37242+ size_t buf_size;
37243 int plcp;
37244
37245 memset(buf, 0, sizeof(buf));
37246@@ -2088,7 +2088,7 @@ static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file,
37247
37248 struct iwl_priv *priv = file->private_data;
37249 char buf[8];
37250- int buf_size;
37251+ size_t buf_size;
37252 int flush;
37253
37254 memset(buf, 0, sizeof(buf));
37255@@ -2178,7 +2178,7 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
37256
37257 struct iwl_priv *priv = file->private_data;
37258 char buf[8];
37259- int buf_size;
37260+ size_t buf_size;
37261 int rts;
37262
37263 if (!priv->cfg->ht_params)
37264@@ -2220,7 +2220,7 @@ static ssize_t iwl_dbgfs_echo_test_write(struct file *file,
37265 {
37266 struct iwl_priv *priv = file->private_data;
37267 char buf[8];
37268- int buf_size;
37269+ size_t buf_size;
37270
37271 memset(buf, 0, sizeof(buf));
37272 buf_size = min(count, sizeof(buf) - 1);
37273@@ -2256,7 +2256,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
37274 struct iwl_priv *priv = file->private_data;
37275 u32 event_log_flag;
37276 char buf[8];
37277- int buf_size;
37278+ size_t buf_size;
37279
37280 /* check that the interface is up */
37281 if (!iwl_is_ready(priv))
37282@@ -2310,7 +2310,7 @@ static ssize_t iwl_dbgfs_calib_disabled_write(struct file *file,
37283 struct iwl_priv *priv = file->private_data;
37284 char buf[8];
37285 u32 calib_disabled;
37286- int buf_size;
37287+ size_t buf_size;
37288
37289 memset(buf, 0, sizeof(buf));
37290 buf_size = min(count, sizeof(buf) - 1);
37291diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
37292index fe0fffd..b4c5724 100644
37293--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
37294+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
37295@@ -1967,7 +1967,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
37296 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
37297
37298 char buf[8];
37299- int buf_size;
37300+ size_t buf_size;
37301 u32 reset_flag;
37302
37303 memset(buf, 0, sizeof(buf));
37304@@ -1988,7 +1988,7 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
37305 {
37306 struct iwl_trans *trans = file->private_data;
37307 char buf[8];
37308- int buf_size;
37309+ size_t buf_size;
37310 int csr;
37311
37312 memset(buf, 0, sizeof(buf));
37313diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
37314index 429ca32..f86236b 100644
37315--- a/drivers/net/wireless/mac80211_hwsim.c
37316+++ b/drivers/net/wireless/mac80211_hwsim.c
37317@@ -1751,9 +1751,11 @@ static int __init init_mac80211_hwsim(void)
37318 return -EINVAL;
37319
37320 if (fake_hw_scan) {
37321- mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
37322- mac80211_hwsim_ops.sw_scan_start = NULL;
37323- mac80211_hwsim_ops.sw_scan_complete = NULL;
37324+ pax_open_kernel();
37325+ *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
37326+ *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
37327+ *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
37328+ pax_close_kernel();
37329 }
37330
37331 spin_lock_init(&hwsim_radio_lock);
37332diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
37333index bd1f0cb..db85ab0 100644
37334--- a/drivers/net/wireless/rndis_wlan.c
37335+++ b/drivers/net/wireless/rndis_wlan.c
37336@@ -1235,7 +1235,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
37337
37338 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
37339
37340- if (rts_threshold < 0 || rts_threshold > 2347)
37341+ if (rts_threshold > 2347)
37342 rts_threshold = 2347;
37343
37344 tmp = cpu_to_le32(rts_threshold);
37345diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
37346index 0751b35..246ba3e 100644
37347--- a/drivers/net/wireless/rt2x00/rt2x00.h
37348+++ b/drivers/net/wireless/rt2x00/rt2x00.h
37349@@ -398,7 +398,7 @@ struct rt2x00_intf {
37350 * for hardware which doesn't support hardware
37351 * sequence counting.
37352 */
37353- atomic_t seqno;
37354+ atomic_unchecked_t seqno;
37355 };
37356
37357 static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
37358diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
37359index e488b94..14b6a0c 100644
37360--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
37361+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
37362@@ -240,9 +240,9 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
37363 * sequence counter given by mac80211.
37364 */
37365 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
37366- seqno = atomic_add_return(0x10, &intf->seqno);
37367+ seqno = atomic_add_return_unchecked(0x10, &intf->seqno);
37368 else
37369- seqno = atomic_read(&intf->seqno);
37370+ seqno = atomic_read_unchecked(&intf->seqno);
37371
37372 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
37373 hdr->seq_ctrl |= cpu_to_le16(seqno);
37374diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
37375index e2750a1..797e179 100644
37376--- a/drivers/net/wireless/ti/wl1251/sdio.c
37377+++ b/drivers/net/wireless/ti/wl1251/sdio.c
37378@@ -269,13 +269,17 @@ static int wl1251_sdio_probe(struct sdio_func *func,
37379
37380 irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
37381
37382- wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
37383- wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
37384+ pax_open_kernel();
37385+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
37386+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
37387+ pax_close_kernel();
37388
37389 wl1251_info("using dedicated interrupt line");
37390 } else {
37391- wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
37392- wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
37393+ pax_open_kernel();
37394+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
37395+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
37396+ pax_close_kernel();
37397
37398 wl1251_info("using SDIO interrupt");
37399 }
37400diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
37401index dadf1db..d9db7a7 100644
37402--- a/drivers/net/wireless/ti/wl12xx/main.c
37403+++ b/drivers/net/wireless/ti/wl12xx/main.c
37404@@ -644,7 +644,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
37405 sizeof(wl->conf.mem));
37406
37407 /* read data preparation is only needed by wl127x */
37408- wl->ops->prepare_read = wl127x_prepare_read;
37409+ pax_open_kernel();
37410+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
37411+ pax_close_kernel();
37412
37413 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER, WL127X_IFTYPE_VER,
37414 WL127X_MAJOR_VER, WL127X_SUBTYPE_VER,
37415@@ -665,7 +667,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
37416 sizeof(wl->conf.mem));
37417
37418 /* read data preparation is only needed by wl127x */
37419- wl->ops->prepare_read = wl127x_prepare_read;
37420+ pax_open_kernel();
37421+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
37422+ pax_close_kernel();
37423
37424 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER, WL127X_IFTYPE_VER,
37425 WL127X_MAJOR_VER, WL127X_SUBTYPE_VER,
37426diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
37427index a39682a..1e8220c 100644
37428--- a/drivers/net/wireless/ti/wl18xx/main.c
37429+++ b/drivers/net/wireless/ti/wl18xx/main.c
37430@@ -1489,8 +1489,10 @@ static int wl18xx_setup(struct wl1271 *wl)
37431 }
37432
37433 if (!checksum_param) {
37434- wl18xx_ops.set_rx_csum = NULL;
37435- wl18xx_ops.init_vif = NULL;
37436+ pax_open_kernel();
37437+ *(void **)&wl18xx_ops.set_rx_csum = NULL;
37438+ *(void **)&wl18xx_ops.init_vif = NULL;
37439+ pax_close_kernel();
37440 }
37441
37442 /* Enable 11a Band only if we have 5G antennas */
37443diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
37444index d93b2b6..ae50401 100644
37445--- a/drivers/oprofile/buffer_sync.c
37446+++ b/drivers/oprofile/buffer_sync.c
37447@@ -332,7 +332,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
37448 if (cookie == NO_COOKIE)
37449 offset = pc;
37450 if (cookie == INVALID_COOKIE) {
37451- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
37452+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
37453 offset = pc;
37454 }
37455 if (cookie != last_cookie) {
37456@@ -376,14 +376,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
37457 /* add userspace sample */
37458
37459 if (!mm) {
37460- atomic_inc(&oprofile_stats.sample_lost_no_mm);
37461+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
37462 return 0;
37463 }
37464
37465 cookie = lookup_dcookie(mm, s->eip, &offset);
37466
37467 if (cookie == INVALID_COOKIE) {
37468- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
37469+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
37470 return 0;
37471 }
37472
37473@@ -552,7 +552,7 @@ void sync_buffer(int cpu)
37474 /* ignore backtraces if failed to add a sample */
37475 if (state == sb_bt_start) {
37476 state = sb_bt_ignore;
37477- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
37478+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
37479 }
37480 }
37481 release_mm(mm);
37482diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
37483index c0cc4e7..44d4e54 100644
37484--- a/drivers/oprofile/event_buffer.c
37485+++ b/drivers/oprofile/event_buffer.c
37486@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
37487 }
37488
37489 if (buffer_pos == buffer_size) {
37490- atomic_inc(&oprofile_stats.event_lost_overflow);
37491+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
37492 return;
37493 }
37494
37495diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
37496index ed2c3ec..deda85a 100644
37497--- a/drivers/oprofile/oprof.c
37498+++ b/drivers/oprofile/oprof.c
37499@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
37500 if (oprofile_ops.switch_events())
37501 return;
37502
37503- atomic_inc(&oprofile_stats.multiplex_counter);
37504+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
37505 start_switch_worker();
37506 }
37507
37508diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
37509index 917d28e..d62d981 100644
37510--- a/drivers/oprofile/oprofile_stats.c
37511+++ b/drivers/oprofile/oprofile_stats.c
37512@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
37513 cpu_buf->sample_invalid_eip = 0;
37514 }
37515
37516- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
37517- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
37518- atomic_set(&oprofile_stats.event_lost_overflow, 0);
37519- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
37520- atomic_set(&oprofile_stats.multiplex_counter, 0);
37521+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
37522+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
37523+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
37524+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
37525+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
37526 }
37527
37528
37529diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
37530index 38b6fc0..b5cbfce 100644
37531--- a/drivers/oprofile/oprofile_stats.h
37532+++ b/drivers/oprofile/oprofile_stats.h
37533@@ -13,11 +13,11 @@
37534 #include <linux/atomic.h>
37535
37536 struct oprofile_stat_struct {
37537- atomic_t sample_lost_no_mm;
37538- atomic_t sample_lost_no_mapping;
37539- atomic_t bt_lost_no_mapping;
37540- atomic_t event_lost_overflow;
37541- atomic_t multiplex_counter;
37542+ atomic_unchecked_t sample_lost_no_mm;
37543+ atomic_unchecked_t sample_lost_no_mapping;
37544+ atomic_unchecked_t bt_lost_no_mapping;
37545+ atomic_unchecked_t event_lost_overflow;
37546+ atomic_unchecked_t multiplex_counter;
37547 };
37548
37549 extern struct oprofile_stat_struct oprofile_stats;
37550diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
37551index 849357c..b83c1e0 100644
37552--- a/drivers/oprofile/oprofilefs.c
37553+++ b/drivers/oprofile/oprofilefs.c
37554@@ -185,7 +185,7 @@ static const struct file_operations atomic_ro_fops = {
37555
37556
37557 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
37558- char const *name, atomic_t *val)
37559+ char const *name, atomic_unchecked_t *val)
37560 {
37561 return __oprofilefs_create_file(sb, root, name,
37562 &atomic_ro_fops, 0444, val);
37563diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
37564index 3f56bc0..707d642 100644
37565--- a/drivers/parport/procfs.c
37566+++ b/drivers/parport/procfs.c
37567@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
37568
37569 *ppos += len;
37570
37571- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
37572+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
37573 }
37574
37575 #ifdef CONFIG_PARPORT_1284
37576@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
37577
37578 *ppos += len;
37579
37580- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
37581+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
37582 }
37583 #endif /* IEEE1284.3 support. */
37584
37585diff --git a/drivers/pci/hotplug/cpcihp_generic.c b/drivers/pci/hotplug/cpcihp_generic.c
37586index a6a71c4..c91097b 100644
37587--- a/drivers/pci/hotplug/cpcihp_generic.c
37588+++ b/drivers/pci/hotplug/cpcihp_generic.c
37589@@ -73,7 +73,6 @@ static u16 port;
37590 static unsigned int enum_bit;
37591 static u8 enum_mask;
37592
37593-static struct cpci_hp_controller_ops generic_hpc_ops;
37594 static struct cpci_hp_controller generic_hpc;
37595
37596 static int __init validate_parameters(void)
37597@@ -139,6 +138,10 @@ static int query_enum(void)
37598 return ((value & enum_mask) == enum_mask);
37599 }
37600
37601+static struct cpci_hp_controller_ops generic_hpc_ops = {
37602+ .query_enum = query_enum,
37603+};
37604+
37605 static int __init cpcihp_generic_init(void)
37606 {
37607 int status;
37608@@ -165,7 +168,6 @@ static int __init cpcihp_generic_init(void)
37609 pci_dev_put(dev);
37610
37611 memset(&generic_hpc, 0, sizeof (struct cpci_hp_controller));
37612- generic_hpc_ops.query_enum = query_enum;
37613 generic_hpc.ops = &generic_hpc_ops;
37614
37615 status = cpci_hp_register_controller(&generic_hpc);
37616diff --git a/drivers/pci/hotplug/cpcihp_zt5550.c b/drivers/pci/hotplug/cpcihp_zt5550.c
37617index 6bf8d2a..9711ce0 100644
37618--- a/drivers/pci/hotplug/cpcihp_zt5550.c
37619+++ b/drivers/pci/hotplug/cpcihp_zt5550.c
37620@@ -59,7 +59,6 @@
37621 /* local variables */
37622 static bool debug;
37623 static bool poll;
37624-static struct cpci_hp_controller_ops zt5550_hpc_ops;
37625 static struct cpci_hp_controller zt5550_hpc;
37626
37627 /* Primary cPCI bus bridge device */
37628@@ -205,6 +204,10 @@ static int zt5550_hc_disable_irq(void)
37629 return 0;
37630 }
37631
37632+static struct cpci_hp_controller_ops zt5550_hpc_ops = {
37633+ .query_enum = zt5550_hc_query_enum,
37634+};
37635+
37636 static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
37637 {
37638 int status;
37639@@ -216,16 +219,17 @@ static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id
37640 dbg("returned from zt5550_hc_config");
37641
37642 memset(&zt5550_hpc, 0, sizeof (struct cpci_hp_controller));
37643- zt5550_hpc_ops.query_enum = zt5550_hc_query_enum;
37644 zt5550_hpc.ops = &zt5550_hpc_ops;
37645 if(!poll) {
37646 zt5550_hpc.irq = hc_dev->irq;
37647 zt5550_hpc.irq_flags = IRQF_SHARED;
37648 zt5550_hpc.dev_id = hc_dev;
37649
37650- zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
37651- zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
37652- zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
37653+ pax_open_kernel();
37654+ *(void **)&zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
37655+ *(void **)&zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
37656+ *(void **)&zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
37657+ pax_open_kernel();
37658 } else {
37659 info("using ENUM# polling mode");
37660 }
37661diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
37662index 76ba8a1..20ca857 100644
37663--- a/drivers/pci/hotplug/cpqphp_nvram.c
37664+++ b/drivers/pci/hotplug/cpqphp_nvram.c
37665@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
37666
37667 void compaq_nvram_init (void __iomem *rom_start)
37668 {
37669+
37670+#ifndef CONFIG_PAX_KERNEXEC
37671 if (rom_start) {
37672 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
37673 }
37674+#endif
37675+
37676 dbg("int15 entry = %p\n", compaq_int15_entry_point);
37677
37678 /* initialize our int15 lock */
37679diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
37680index 449f257..0731e96 100644
37681--- a/drivers/pci/pcie/aspm.c
37682+++ b/drivers/pci/pcie/aspm.c
37683@@ -27,9 +27,9 @@
37684 #define MODULE_PARAM_PREFIX "pcie_aspm."
37685
37686 /* Note: those are not register definitions */
37687-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
37688-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
37689-#define ASPM_STATE_L1 (4) /* L1 state */
37690+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
37691+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
37692+#define ASPM_STATE_L1 (4U) /* L1 state */
37693 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
37694 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
37695
37696diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
37697index ec909af..e7517f3 100644
37698--- a/drivers/pci/probe.c
37699+++ b/drivers/pci/probe.c
37700@@ -173,7 +173,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
37701 struct pci_bus_region region;
37702 bool bar_too_big = false, bar_disabled = false;
37703
37704- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
37705+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
37706
37707 /* No printks while decoding is disabled! */
37708 if (!dev->mmio_always_on) {
37709diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
37710index 9b8505c..f00870a 100644
37711--- a/drivers/pci/proc.c
37712+++ b/drivers/pci/proc.c
37713@@ -465,7 +465,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
37714 static int __init pci_proc_init(void)
37715 {
37716 struct pci_dev *dev = NULL;
37717+
37718+#ifdef CONFIG_GRKERNSEC_PROC_ADD
37719+#ifdef CONFIG_GRKERNSEC_PROC_USER
37720+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
37721+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
37722+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
37723+#endif
37724+#else
37725 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
37726+#endif
37727 proc_create("devices", 0, proc_bus_pci_dir,
37728 &proc_bus_pci_dev_operations);
37729 proc_initialized = 1;
37730diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
37731index 75dd651..2af4c9a 100644
37732--- a/drivers/platform/x86/thinkpad_acpi.c
37733+++ b/drivers/platform/x86/thinkpad_acpi.c
37734@@ -2097,7 +2097,7 @@ static int hotkey_mask_get(void)
37735 return 0;
37736 }
37737
37738-void static hotkey_mask_warn_incomplete_mask(void)
37739+static void hotkey_mask_warn_incomplete_mask(void)
37740 {
37741 /* log only what the user can fix... */
37742 const u32 wantedmask = hotkey_driver_mask &
37743@@ -2328,11 +2328,6 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
37744 }
37745 }
37746
37747-static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
37748- struct tp_nvram_state *newn,
37749- const u32 event_mask)
37750-{
37751-
37752 #define TPACPI_COMPARE_KEY(__scancode, __member) \
37753 do { \
37754 if ((event_mask & (1 << __scancode)) && \
37755@@ -2346,36 +2341,42 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
37756 tpacpi_hotkey_send_key(__scancode); \
37757 } while (0)
37758
37759- void issue_volchange(const unsigned int oldvol,
37760- const unsigned int newvol)
37761- {
37762- unsigned int i = oldvol;
37763+static void issue_volchange(const unsigned int oldvol,
37764+ const unsigned int newvol,
37765+ const u32 event_mask)
37766+{
37767+ unsigned int i = oldvol;
37768
37769- while (i > newvol) {
37770- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
37771- i--;
37772- }
37773- while (i < newvol) {
37774- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
37775- i++;
37776- }
37777+ while (i > newvol) {
37778+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
37779+ i--;
37780 }
37781+ while (i < newvol) {
37782+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
37783+ i++;
37784+ }
37785+}
37786
37787- void issue_brightnesschange(const unsigned int oldbrt,
37788- const unsigned int newbrt)
37789- {
37790- unsigned int i = oldbrt;
37791+static void issue_brightnesschange(const unsigned int oldbrt,
37792+ const unsigned int newbrt,
37793+ const u32 event_mask)
37794+{
37795+ unsigned int i = oldbrt;
37796
37797- while (i > newbrt) {
37798- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
37799- i--;
37800- }
37801- while (i < newbrt) {
37802- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
37803- i++;
37804- }
37805+ while (i > newbrt) {
37806+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
37807+ i--;
37808+ }
37809+ while (i < newbrt) {
37810+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
37811+ i++;
37812 }
37813+}
37814
37815+static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
37816+ struct tp_nvram_state *newn,
37817+ const u32 event_mask)
37818+{
37819 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
37820 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
37821 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
37822@@ -2409,7 +2410,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
37823 oldn->volume_level != newn->volume_level) {
37824 /* recently muted, or repeated mute keypress, or
37825 * multiple presses ending in mute */
37826- issue_volchange(oldn->volume_level, newn->volume_level);
37827+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
37828 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
37829 }
37830 } else {
37831@@ -2419,7 +2420,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
37832 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
37833 }
37834 if (oldn->volume_level != newn->volume_level) {
37835- issue_volchange(oldn->volume_level, newn->volume_level);
37836+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
37837 } else if (oldn->volume_toggle != newn->volume_toggle) {
37838 /* repeated vol up/down keypress at end of scale ? */
37839 if (newn->volume_level == 0)
37840@@ -2432,7 +2433,8 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
37841 /* handle brightness */
37842 if (oldn->brightness_level != newn->brightness_level) {
37843 issue_brightnesschange(oldn->brightness_level,
37844- newn->brightness_level);
37845+ newn->brightness_level,
37846+ event_mask);
37847 } else if (oldn->brightness_toggle != newn->brightness_toggle) {
37848 /* repeated key presses that didn't change state */
37849 if (newn->brightness_level == 0)
37850@@ -2441,10 +2443,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
37851 && !tp_features.bright_unkfw)
37852 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
37853 }
37854+}
37855
37856 #undef TPACPI_COMPARE_KEY
37857 #undef TPACPI_MAY_SEND_KEY
37858-}
37859
37860 /*
37861 * Polling driver
37862diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
37863index 769d265..a3a05ca 100644
37864--- a/drivers/pnp/pnpbios/bioscalls.c
37865+++ b/drivers/pnp/pnpbios/bioscalls.c
37866@@ -58,7 +58,7 @@ do { \
37867 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
37868 } while(0)
37869
37870-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
37871+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
37872 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
37873
37874 /*
37875@@ -95,7 +95,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
37876
37877 cpu = get_cpu();
37878 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
37879+
37880+ pax_open_kernel();
37881 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
37882+ pax_close_kernel();
37883
37884 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
37885 spin_lock_irqsave(&pnp_bios_lock, flags);
37886@@ -133,7 +136,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
37887 :"memory");
37888 spin_unlock_irqrestore(&pnp_bios_lock, flags);
37889
37890+ pax_open_kernel();
37891 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
37892+ pax_close_kernel();
37893+
37894 put_cpu();
37895
37896 /* If we get here and this is set then the PnP BIOS faulted on us. */
37897@@ -467,7 +473,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
37898 return status;
37899 }
37900
37901-void pnpbios_calls_init(union pnp_bios_install_struct *header)
37902+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
37903 {
37904 int i;
37905
37906@@ -475,6 +481,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
37907 pnp_bios_callpoint.offset = header->fields.pm16offset;
37908 pnp_bios_callpoint.segment = PNP_CS16;
37909
37910+ pax_open_kernel();
37911+
37912 for_each_possible_cpu(i) {
37913 struct desc_struct *gdt = get_cpu_gdt_table(i);
37914 if (!gdt)
37915@@ -486,4 +494,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
37916 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
37917 (unsigned long)__va(header->fields.pm16dseg));
37918 }
37919+
37920+ pax_close_kernel();
37921 }
37922diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
37923index b0ecacb..7c9da2e 100644
37924--- a/drivers/pnp/resource.c
37925+++ b/drivers/pnp/resource.c
37926@@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
37927 return 1;
37928
37929 /* check if the resource is valid */
37930- if (*irq < 0 || *irq > 15)
37931+ if (*irq > 15)
37932 return 0;
37933
37934 /* check if the resource is reserved */
37935@@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
37936 return 1;
37937
37938 /* check if the resource is valid */
37939- if (*dma < 0 || *dma == 4 || *dma > 7)
37940+ if (*dma == 4 || *dma > 7)
37941 return 0;
37942
37943 /* check if the resource is reserved */
37944diff --git a/drivers/power/pda_power.c b/drivers/power/pda_power.c
37945index 7df7c5f..bd48c47 100644
37946--- a/drivers/power/pda_power.c
37947+++ b/drivers/power/pda_power.c
37948@@ -37,7 +37,11 @@ static int polling;
37949
37950 #ifdef CONFIG_USB_OTG_UTILS
37951 static struct usb_phy *transceiver;
37952-static struct notifier_block otg_nb;
37953+static int otg_handle_notification(struct notifier_block *nb,
37954+ unsigned long event, void *unused);
37955+static struct notifier_block otg_nb = {
37956+ .notifier_call = otg_handle_notification
37957+};
37958 #endif
37959
37960 static struct regulator *ac_draw;
37961@@ -369,7 +373,6 @@ static int pda_power_probe(struct platform_device *pdev)
37962
37963 #ifdef CONFIG_USB_OTG_UTILS
37964 if (!IS_ERR_OR_NULL(transceiver) && pdata->use_otg_notifier) {
37965- otg_nb.notifier_call = otg_handle_notification;
37966 ret = usb_register_notifier(transceiver, &otg_nb);
37967 if (ret) {
37968 dev_err(dev, "failure to register otg notifier\n");
37969diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
37970index 8d53174..04c65de 100644
37971--- a/drivers/regulator/max8660.c
37972+++ b/drivers/regulator/max8660.c
37973@@ -333,8 +333,10 @@ static int __devinit max8660_probe(struct i2c_client *client,
37974 max8660->shadow_regs[MAX8660_OVER1] = 5;
37975 } else {
37976 /* Otherwise devices can be toggled via software */
37977- max8660_dcdc_ops.enable = max8660_dcdc_enable;
37978- max8660_dcdc_ops.disable = max8660_dcdc_disable;
37979+ pax_open_kernel();
37980+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
37981+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
37982+ pax_close_kernel();
37983 }
37984
37985 /*
37986diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
37987index 1fa6381..f58834e 100644
37988--- a/drivers/regulator/mc13892-regulator.c
37989+++ b/drivers/regulator/mc13892-regulator.c
37990@@ -540,10 +540,12 @@ static int __devinit mc13892_regulator_probe(struct platform_device *pdev)
37991 }
37992 mc13xxx_unlock(mc13892);
37993
37994- mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
37995+ pax_open_kernel();
37996+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
37997 = mc13892_vcam_set_mode;
37998- mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
37999+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
38000 = mc13892_vcam_get_mode;
38001+ pax_close_kernel();
38002
38003 mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
38004 ARRAY_SIZE(mc13892_regulators));
38005diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
38006index cace6d3..f623fda 100644
38007--- a/drivers/rtc/rtc-dev.c
38008+++ b/drivers/rtc/rtc-dev.c
38009@@ -14,6 +14,7 @@
38010 #include <linux/module.h>
38011 #include <linux/rtc.h>
38012 #include <linux/sched.h>
38013+#include <linux/grsecurity.h>
38014 #include "rtc-core.h"
38015
38016 static dev_t rtc_devt;
38017@@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *file,
38018 if (copy_from_user(&tm, uarg, sizeof(tm)))
38019 return -EFAULT;
38020
38021+ gr_log_timechange();
38022+
38023 return rtc_set_time(rtc, &tm);
38024
38025 case RTC_PIE_ON:
38026diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h
38027index 4ad7e36..d004679 100644
38028--- a/drivers/scsi/bfa/bfa.h
38029+++ b/drivers/scsi/bfa/bfa.h
38030@@ -196,7 +196,7 @@ struct bfa_hwif_s {
38031 u32 *end);
38032 int cpe_vec_q0;
38033 int rme_vec_q0;
38034-};
38035+} __no_const;
38036 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
38037
38038 struct bfa_faa_cbfn_s {
38039diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
38040index e693af6..2e525b6 100644
38041--- a/drivers/scsi/bfa/bfa_fcpim.h
38042+++ b/drivers/scsi/bfa/bfa_fcpim.h
38043@@ -36,7 +36,7 @@ struct bfa_iotag_s {
38044
38045 struct bfa_itn_s {
38046 bfa_isr_func_t isr;
38047-};
38048+} __no_const;
38049
38050 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
38051 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
38052diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
38053index 23a90e7..9cf04ee 100644
38054--- a/drivers/scsi/bfa/bfa_ioc.h
38055+++ b/drivers/scsi/bfa/bfa_ioc.h
38056@@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
38057 bfa_ioc_disable_cbfn_t disable_cbfn;
38058 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
38059 bfa_ioc_reset_cbfn_t reset_cbfn;
38060-};
38061+} __no_const;
38062
38063 /*
38064 * IOC event notification mechanism.
38065@@ -346,7 +346,7 @@ struct bfa_ioc_hwif_s {
38066 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
38067 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
38068 bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc);
38069-};
38070+} __no_const;
38071
38072 /*
38073 * Queue element to wait for room in request queue. FIFO order is
38074diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
38075index 593085a..47aa999 100644
38076--- a/drivers/scsi/hosts.c
38077+++ b/drivers/scsi/hosts.c
38078@@ -42,7 +42,7 @@
38079 #include "scsi_logging.h"
38080
38081
38082-static atomic_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
38083+static atomic_unchecked_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
38084
38085
38086 static void scsi_host_cls_release(struct device *dev)
38087@@ -361,7 +361,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
38088 * subtract one because we increment first then return, but we need to
38089 * know what the next host number was before increment
38090 */
38091- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
38092+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
38093 shost->dma_channel = 0xff;
38094
38095 /* These three are default values which can be overridden */
38096diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
38097index 4217e49..9c77e3e 100644
38098--- a/drivers/scsi/hpsa.c
38099+++ b/drivers/scsi/hpsa.c
38100@@ -554,7 +554,7 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
38101 unsigned long flags;
38102
38103 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
38104- return h->access.command_completed(h, q);
38105+ return h->access->command_completed(h, q);
38106
38107 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
38108 a = rq->head[rq->current_entry];
38109@@ -3374,7 +3374,7 @@ static void start_io(struct ctlr_info *h)
38110 while (!list_empty(&h->reqQ)) {
38111 c = list_entry(h->reqQ.next, struct CommandList, list);
38112 /* can't do anything if fifo is full */
38113- if ((h->access.fifo_full(h))) {
38114+ if ((h->access->fifo_full(h))) {
38115 dev_warn(&h->pdev->dev, "fifo full\n");
38116 break;
38117 }
38118@@ -3396,7 +3396,7 @@ static void start_io(struct ctlr_info *h)
38119
38120 /* Tell the controller execute command */
38121 spin_unlock_irqrestore(&h->lock, flags);
38122- h->access.submit_command(h, c);
38123+ h->access->submit_command(h, c);
38124 spin_lock_irqsave(&h->lock, flags);
38125 }
38126 spin_unlock_irqrestore(&h->lock, flags);
38127@@ -3404,17 +3404,17 @@ static void start_io(struct ctlr_info *h)
38128
38129 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
38130 {
38131- return h->access.command_completed(h, q);
38132+ return h->access->command_completed(h, q);
38133 }
38134
38135 static inline bool interrupt_pending(struct ctlr_info *h)
38136 {
38137- return h->access.intr_pending(h);
38138+ return h->access->intr_pending(h);
38139 }
38140
38141 static inline long interrupt_not_for_us(struct ctlr_info *h)
38142 {
38143- return (h->access.intr_pending(h) == 0) ||
38144+ return (h->access->intr_pending(h) == 0) ||
38145 (h->interrupts_enabled == 0);
38146 }
38147
38148@@ -4318,7 +4318,7 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
38149 if (prod_index < 0)
38150 return -ENODEV;
38151 h->product_name = products[prod_index].product_name;
38152- h->access = *(products[prod_index].access);
38153+ h->access = products[prod_index].access;
38154
38155 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
38156 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
38157@@ -4600,7 +4600,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
38158
38159 assert_spin_locked(&lockup_detector_lock);
38160 remove_ctlr_from_lockup_detector_list(h);
38161- h->access.set_intr_mask(h, HPSA_INTR_OFF);
38162+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
38163 spin_lock_irqsave(&h->lock, flags);
38164 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
38165 spin_unlock_irqrestore(&h->lock, flags);
38166@@ -4778,7 +4778,7 @@ reinit_after_soft_reset:
38167 }
38168
38169 /* make sure the board interrupts are off */
38170- h->access.set_intr_mask(h, HPSA_INTR_OFF);
38171+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
38172
38173 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
38174 goto clean2;
38175@@ -4812,7 +4812,7 @@ reinit_after_soft_reset:
38176 * fake ones to scoop up any residual completions.
38177 */
38178 spin_lock_irqsave(&h->lock, flags);
38179- h->access.set_intr_mask(h, HPSA_INTR_OFF);
38180+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
38181 spin_unlock_irqrestore(&h->lock, flags);
38182 free_irqs(h);
38183 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
38184@@ -4831,9 +4831,9 @@ reinit_after_soft_reset:
38185 dev_info(&h->pdev->dev, "Board READY.\n");
38186 dev_info(&h->pdev->dev,
38187 "Waiting for stale completions to drain.\n");
38188- h->access.set_intr_mask(h, HPSA_INTR_ON);
38189+ h->access->set_intr_mask(h, HPSA_INTR_ON);
38190 msleep(10000);
38191- h->access.set_intr_mask(h, HPSA_INTR_OFF);
38192+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
38193
38194 rc = controller_reset_failed(h->cfgtable);
38195 if (rc)
38196@@ -4854,7 +4854,7 @@ reinit_after_soft_reset:
38197 }
38198
38199 /* Turn the interrupts on so we can service requests */
38200- h->access.set_intr_mask(h, HPSA_INTR_ON);
38201+ h->access->set_intr_mask(h, HPSA_INTR_ON);
38202
38203 hpsa_hba_inquiry(h);
38204 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
38205@@ -4906,7 +4906,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
38206 * To write all data in the battery backed cache to disks
38207 */
38208 hpsa_flush_cache(h);
38209- h->access.set_intr_mask(h, HPSA_INTR_OFF);
38210+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
38211 hpsa_free_irqs_and_disable_msix(h);
38212 }
38213
38214@@ -5075,7 +5075,7 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
38215 return;
38216 }
38217 /* Change the access methods to the performant access methods */
38218- h->access = SA5_performant_access;
38219+ h->access = &SA5_performant_access;
38220 h->transMethod = CFGTBL_Trans_Performant;
38221 }
38222
38223diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
38224index 9816479..c5d4e97 100644
38225--- a/drivers/scsi/hpsa.h
38226+++ b/drivers/scsi/hpsa.h
38227@@ -79,7 +79,7 @@ struct ctlr_info {
38228 unsigned int msix_vector;
38229 unsigned int msi_vector;
38230 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
38231- struct access_method access;
38232+ struct access_method *access;
38233
38234 /* queue and queue Info */
38235 struct list_head reqQ;
38236diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
38237index c772d8d..35c362c 100644
38238--- a/drivers/scsi/libfc/fc_exch.c
38239+++ b/drivers/scsi/libfc/fc_exch.c
38240@@ -100,12 +100,12 @@ struct fc_exch_mgr {
38241 u16 pool_max_index;
38242
38243 struct {
38244- atomic_t no_free_exch;
38245- atomic_t no_free_exch_xid;
38246- atomic_t xid_not_found;
38247- atomic_t xid_busy;
38248- atomic_t seq_not_found;
38249- atomic_t non_bls_resp;
38250+ atomic_unchecked_t no_free_exch;
38251+ atomic_unchecked_t no_free_exch_xid;
38252+ atomic_unchecked_t xid_not_found;
38253+ atomic_unchecked_t xid_busy;
38254+ atomic_unchecked_t seq_not_found;
38255+ atomic_unchecked_t non_bls_resp;
38256 } stats;
38257 };
38258
38259@@ -725,7 +725,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
38260 /* allocate memory for exchange */
38261 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
38262 if (!ep) {
38263- atomic_inc(&mp->stats.no_free_exch);
38264+ atomic_inc_unchecked(&mp->stats.no_free_exch);
38265 goto out;
38266 }
38267 memset(ep, 0, sizeof(*ep));
38268@@ -786,7 +786,7 @@ out:
38269 return ep;
38270 err:
38271 spin_unlock_bh(&pool->lock);
38272- atomic_inc(&mp->stats.no_free_exch_xid);
38273+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
38274 mempool_free(ep, mp->ep_pool);
38275 return NULL;
38276 }
38277@@ -929,7 +929,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
38278 xid = ntohs(fh->fh_ox_id); /* we originated exch */
38279 ep = fc_exch_find(mp, xid);
38280 if (!ep) {
38281- atomic_inc(&mp->stats.xid_not_found);
38282+ atomic_inc_unchecked(&mp->stats.xid_not_found);
38283 reject = FC_RJT_OX_ID;
38284 goto out;
38285 }
38286@@ -959,7 +959,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
38287 ep = fc_exch_find(mp, xid);
38288 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
38289 if (ep) {
38290- atomic_inc(&mp->stats.xid_busy);
38291+ atomic_inc_unchecked(&mp->stats.xid_busy);
38292 reject = FC_RJT_RX_ID;
38293 goto rel;
38294 }
38295@@ -970,7 +970,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
38296 }
38297 xid = ep->xid; /* get our XID */
38298 } else if (!ep) {
38299- atomic_inc(&mp->stats.xid_not_found);
38300+ atomic_inc_unchecked(&mp->stats.xid_not_found);
38301 reject = FC_RJT_RX_ID; /* XID not found */
38302 goto out;
38303 }
38304@@ -987,7 +987,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
38305 } else {
38306 sp = &ep->seq;
38307 if (sp->id != fh->fh_seq_id) {
38308- atomic_inc(&mp->stats.seq_not_found);
38309+ atomic_inc_unchecked(&mp->stats.seq_not_found);
38310 if (f_ctl & FC_FC_END_SEQ) {
38311 /*
38312 * Update sequence_id based on incoming last
38313@@ -1437,22 +1437,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
38314
38315 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
38316 if (!ep) {
38317- atomic_inc(&mp->stats.xid_not_found);
38318+ atomic_inc_unchecked(&mp->stats.xid_not_found);
38319 goto out;
38320 }
38321 if (ep->esb_stat & ESB_ST_COMPLETE) {
38322- atomic_inc(&mp->stats.xid_not_found);
38323+ atomic_inc_unchecked(&mp->stats.xid_not_found);
38324 goto rel;
38325 }
38326 if (ep->rxid == FC_XID_UNKNOWN)
38327 ep->rxid = ntohs(fh->fh_rx_id);
38328 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
38329- atomic_inc(&mp->stats.xid_not_found);
38330+ atomic_inc_unchecked(&mp->stats.xid_not_found);
38331 goto rel;
38332 }
38333 if (ep->did != ntoh24(fh->fh_s_id) &&
38334 ep->did != FC_FID_FLOGI) {
38335- atomic_inc(&mp->stats.xid_not_found);
38336+ atomic_inc_unchecked(&mp->stats.xid_not_found);
38337 goto rel;
38338 }
38339 sof = fr_sof(fp);
38340@@ -1461,7 +1461,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
38341 sp->ssb_stat |= SSB_ST_RESP;
38342 sp->id = fh->fh_seq_id;
38343 } else if (sp->id != fh->fh_seq_id) {
38344- atomic_inc(&mp->stats.seq_not_found);
38345+ atomic_inc_unchecked(&mp->stats.seq_not_found);
38346 goto rel;
38347 }
38348
38349@@ -1525,9 +1525,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
38350 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
38351
38352 if (!sp)
38353- atomic_inc(&mp->stats.xid_not_found);
38354+ atomic_inc_unchecked(&mp->stats.xid_not_found);
38355 else
38356- atomic_inc(&mp->stats.non_bls_resp);
38357+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
38358
38359 fc_frame_free(fp);
38360 }
38361@@ -2174,13 +2174,13 @@ void fc_exch_update_stats(struct fc_lport *lport)
38362
38363 list_for_each_entry(ema, &lport->ema_list, ema_list) {
38364 mp = ema->mp;
38365- st->fc_no_free_exch += atomic_read(&mp->stats.no_free_exch);
38366+ st->fc_no_free_exch += atomic_read_unchecked(&mp->stats.no_free_exch);
38367 st->fc_no_free_exch_xid +=
38368- atomic_read(&mp->stats.no_free_exch_xid);
38369- st->fc_xid_not_found += atomic_read(&mp->stats.xid_not_found);
38370- st->fc_xid_busy += atomic_read(&mp->stats.xid_busy);
38371- st->fc_seq_not_found += atomic_read(&mp->stats.seq_not_found);
38372- st->fc_non_bls_resp += atomic_read(&mp->stats.non_bls_resp);
38373+ atomic_read_unchecked(&mp->stats.no_free_exch_xid);
38374+ st->fc_xid_not_found += atomic_read_unchecked(&mp->stats.xid_not_found);
38375+ st->fc_xid_busy += atomic_read_unchecked(&mp->stats.xid_busy);
38376+ st->fc_seq_not_found += atomic_read_unchecked(&mp->stats.seq_not_found);
38377+ st->fc_non_bls_resp += atomic_read_unchecked(&mp->stats.non_bls_resp);
38378 }
38379 }
38380 EXPORT_SYMBOL(fc_exch_update_stats);
38381diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
38382index bdb81cd..d3c7c2c 100644
38383--- a/drivers/scsi/libsas/sas_ata.c
38384+++ b/drivers/scsi/libsas/sas_ata.c
38385@@ -554,7 +554,7 @@ static struct ata_port_operations sas_sata_ops = {
38386 .postreset = ata_std_postreset,
38387 .error_handler = ata_std_error_handler,
38388 .post_internal_cmd = sas_ata_post_internal,
38389- .qc_defer = ata_std_qc_defer,
38390+ .qc_defer = ata_std_qc_defer,
38391 .qc_prep = ata_noop_qc_prep,
38392 .qc_issue = sas_ata_qc_issue,
38393 .qc_fill_rtf = sas_ata_qc_fill_rtf,
38394diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
38395index 69b5993..1ac9dce 100644
38396--- a/drivers/scsi/lpfc/lpfc.h
38397+++ b/drivers/scsi/lpfc/lpfc.h
38398@@ -424,7 +424,7 @@ struct lpfc_vport {
38399 struct dentry *debug_nodelist;
38400 struct dentry *vport_debugfs_root;
38401 struct lpfc_debugfs_trc *disc_trc;
38402- atomic_t disc_trc_cnt;
38403+ atomic_unchecked_t disc_trc_cnt;
38404 #endif
38405 uint8_t stat_data_enabled;
38406 uint8_t stat_data_blocked;
38407@@ -840,8 +840,8 @@ struct lpfc_hba {
38408 struct timer_list fabric_block_timer;
38409 unsigned long bit_flags;
38410 #define FABRIC_COMANDS_BLOCKED 0
38411- atomic_t num_rsrc_err;
38412- atomic_t num_cmd_success;
38413+ atomic_unchecked_t num_rsrc_err;
38414+ atomic_unchecked_t num_cmd_success;
38415 unsigned long last_rsrc_error_time;
38416 unsigned long last_ramp_down_time;
38417 unsigned long last_ramp_up_time;
38418@@ -877,7 +877,7 @@ struct lpfc_hba {
38419
38420 struct dentry *debug_slow_ring_trc;
38421 struct lpfc_debugfs_trc *slow_ring_trc;
38422- atomic_t slow_ring_trc_cnt;
38423+ atomic_unchecked_t slow_ring_trc_cnt;
38424 /* iDiag debugfs sub-directory */
38425 struct dentry *idiag_root;
38426 struct dentry *idiag_pci_cfg;
38427diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
38428index f63f5ff..de29189 100644
38429--- a/drivers/scsi/lpfc/lpfc_debugfs.c
38430+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
38431@@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
38432
38433 #include <linux/debugfs.h>
38434
38435-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
38436+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
38437 static unsigned long lpfc_debugfs_start_time = 0L;
38438
38439 /* iDiag */
38440@@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
38441 lpfc_debugfs_enable = 0;
38442
38443 len = 0;
38444- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
38445+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
38446 (lpfc_debugfs_max_disc_trc - 1);
38447 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
38448 dtp = vport->disc_trc + i;
38449@@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
38450 lpfc_debugfs_enable = 0;
38451
38452 len = 0;
38453- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
38454+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
38455 (lpfc_debugfs_max_slow_ring_trc - 1);
38456 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
38457 dtp = phba->slow_ring_trc + i;
38458@@ -646,14 +646,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
38459 !vport || !vport->disc_trc)
38460 return;
38461
38462- index = atomic_inc_return(&vport->disc_trc_cnt) &
38463+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
38464 (lpfc_debugfs_max_disc_trc - 1);
38465 dtp = vport->disc_trc + index;
38466 dtp->fmt = fmt;
38467 dtp->data1 = data1;
38468 dtp->data2 = data2;
38469 dtp->data3 = data3;
38470- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
38471+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
38472 dtp->jif = jiffies;
38473 #endif
38474 return;
38475@@ -684,14 +684,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
38476 !phba || !phba->slow_ring_trc)
38477 return;
38478
38479- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
38480+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
38481 (lpfc_debugfs_max_slow_ring_trc - 1);
38482 dtp = phba->slow_ring_trc + index;
38483 dtp->fmt = fmt;
38484 dtp->data1 = data1;
38485 dtp->data2 = data2;
38486 dtp->data3 = data3;
38487- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
38488+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
38489 dtp->jif = jiffies;
38490 #endif
38491 return;
38492@@ -4182,7 +4182,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
38493 "slow_ring buffer\n");
38494 goto debug_failed;
38495 }
38496- atomic_set(&phba->slow_ring_trc_cnt, 0);
38497+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
38498 memset(phba->slow_ring_trc, 0,
38499 (sizeof(struct lpfc_debugfs_trc) *
38500 lpfc_debugfs_max_slow_ring_trc));
38501@@ -4228,7 +4228,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
38502 "buffer\n");
38503 goto debug_failed;
38504 }
38505- atomic_set(&vport->disc_trc_cnt, 0);
38506+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
38507
38508 snprintf(name, sizeof(name), "discovery_trace");
38509 vport->debug_disc_trc =
38510diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
38511index 7dc4218..3436f08 100644
38512--- a/drivers/scsi/lpfc/lpfc_init.c
38513+++ b/drivers/scsi/lpfc/lpfc_init.c
38514@@ -10589,8 +10589,10 @@ lpfc_init(void)
38515 "misc_register returned with status %d", error);
38516
38517 if (lpfc_enable_npiv) {
38518- lpfc_transport_functions.vport_create = lpfc_vport_create;
38519- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
38520+ pax_open_kernel();
38521+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
38522+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
38523+ pax_close_kernel();
38524 }
38525 lpfc_transport_template =
38526 fc_attach_transport(&lpfc_transport_functions);
38527diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
38528index 7f45ac9..cf62eda 100644
38529--- a/drivers/scsi/lpfc/lpfc_scsi.c
38530+++ b/drivers/scsi/lpfc/lpfc_scsi.c
38531@@ -305,7 +305,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
38532 uint32_t evt_posted;
38533
38534 spin_lock_irqsave(&phba->hbalock, flags);
38535- atomic_inc(&phba->num_rsrc_err);
38536+ atomic_inc_unchecked(&phba->num_rsrc_err);
38537 phba->last_rsrc_error_time = jiffies;
38538
38539 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
38540@@ -346,7 +346,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
38541 unsigned long flags;
38542 struct lpfc_hba *phba = vport->phba;
38543 uint32_t evt_posted;
38544- atomic_inc(&phba->num_cmd_success);
38545+ atomic_inc_unchecked(&phba->num_cmd_success);
38546
38547 if (vport->cfg_lun_queue_depth <= queue_depth)
38548 return;
38549@@ -390,8 +390,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
38550 unsigned long num_rsrc_err, num_cmd_success;
38551 int i;
38552
38553- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
38554- num_cmd_success = atomic_read(&phba->num_cmd_success);
38555+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
38556+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
38557
38558 /*
38559 * The error and success command counters are global per
38560@@ -419,8 +419,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
38561 }
38562 }
38563 lpfc_destroy_vport_work_array(phba, vports);
38564- atomic_set(&phba->num_rsrc_err, 0);
38565- atomic_set(&phba->num_cmd_success, 0);
38566+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
38567+ atomic_set_unchecked(&phba->num_cmd_success, 0);
38568 }
38569
38570 /**
38571@@ -454,8 +454,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
38572 }
38573 }
38574 lpfc_destroy_vport_work_array(phba, vports);
38575- atomic_set(&phba->num_rsrc_err, 0);
38576- atomic_set(&phba->num_cmd_success, 0);
38577+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
38578+ atomic_set_unchecked(&phba->num_cmd_success, 0);
38579 }
38580
38581 /**
38582diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
38583index af763ea..41904f7 100644
38584--- a/drivers/scsi/pmcraid.c
38585+++ b/drivers/scsi/pmcraid.c
38586@@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
38587 res->scsi_dev = scsi_dev;
38588 scsi_dev->hostdata = res;
38589 res->change_detected = 0;
38590- atomic_set(&res->read_failures, 0);
38591- atomic_set(&res->write_failures, 0);
38592+ atomic_set_unchecked(&res->read_failures, 0);
38593+ atomic_set_unchecked(&res->write_failures, 0);
38594 rc = 0;
38595 }
38596 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
38597@@ -2676,9 +2676,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
38598
38599 /* If this was a SCSI read/write command keep count of errors */
38600 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
38601- atomic_inc(&res->read_failures);
38602+ atomic_inc_unchecked(&res->read_failures);
38603 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
38604- atomic_inc(&res->write_failures);
38605+ atomic_inc_unchecked(&res->write_failures);
38606
38607 if (!RES_IS_GSCSI(res->cfg_entry) &&
38608 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
38609@@ -3534,7 +3534,7 @@ static int pmcraid_queuecommand_lck(
38610 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
38611 * hrrq_id assigned here in queuecommand
38612 */
38613- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
38614+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
38615 pinstance->num_hrrq;
38616 cmd->cmd_done = pmcraid_io_done;
38617
38618@@ -3859,7 +3859,7 @@ static long pmcraid_ioctl_passthrough(
38619 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
38620 * hrrq_id assigned here in queuecommand
38621 */
38622- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
38623+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
38624 pinstance->num_hrrq;
38625
38626 if (request_size) {
38627@@ -4497,7 +4497,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
38628
38629 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
38630 /* add resources only after host is added into system */
38631- if (!atomic_read(&pinstance->expose_resources))
38632+ if (!atomic_read_unchecked(&pinstance->expose_resources))
38633 return;
38634
38635 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
38636@@ -5331,8 +5331,8 @@ static int __devinit pmcraid_init_instance(
38637 init_waitqueue_head(&pinstance->reset_wait_q);
38638
38639 atomic_set(&pinstance->outstanding_cmds, 0);
38640- atomic_set(&pinstance->last_message_id, 0);
38641- atomic_set(&pinstance->expose_resources, 0);
38642+ atomic_set_unchecked(&pinstance->last_message_id, 0);
38643+ atomic_set_unchecked(&pinstance->expose_resources, 0);
38644
38645 INIT_LIST_HEAD(&pinstance->free_res_q);
38646 INIT_LIST_HEAD(&pinstance->used_res_q);
38647@@ -6047,7 +6047,7 @@ static int __devinit pmcraid_probe(
38648 /* Schedule worker thread to handle CCN and take care of adding and
38649 * removing devices to OS
38650 */
38651- atomic_set(&pinstance->expose_resources, 1);
38652+ atomic_set_unchecked(&pinstance->expose_resources, 1);
38653 schedule_work(&pinstance->worker_q);
38654 return rc;
38655
38656diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
38657index e1d150f..6c6df44 100644
38658--- a/drivers/scsi/pmcraid.h
38659+++ b/drivers/scsi/pmcraid.h
38660@@ -748,7 +748,7 @@ struct pmcraid_instance {
38661 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
38662
38663 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
38664- atomic_t last_message_id;
38665+ atomic_unchecked_t last_message_id;
38666
38667 /* configuration table */
38668 struct pmcraid_config_table *cfg_table;
38669@@ -777,7 +777,7 @@ struct pmcraid_instance {
38670 atomic_t outstanding_cmds;
38671
38672 /* should add/delete resources to mid-layer now ?*/
38673- atomic_t expose_resources;
38674+ atomic_unchecked_t expose_resources;
38675
38676
38677
38678@@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
38679 struct pmcraid_config_table_entry_ext cfg_entry_ext;
38680 };
38681 struct scsi_device *scsi_dev; /* Link scsi_device structure */
38682- atomic_t read_failures; /* count of failed READ commands */
38683- atomic_t write_failures; /* count of failed WRITE commands */
38684+ atomic_unchecked_t read_failures; /* count of failed READ commands */
38685+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
38686
38687 /* To indicate add/delete/modify during CCN */
38688 u8 change_detected;
38689diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
38690index 83d7984..a27d947 100644
38691--- a/drivers/scsi/qla2xxx/qla_attr.c
38692+++ b/drivers/scsi/qla2xxx/qla_attr.c
38693@@ -1969,7 +1969,7 @@ qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
38694 return 0;
38695 }
38696
38697-struct fc_function_template qla2xxx_transport_functions = {
38698+fc_function_template_no_const qla2xxx_transport_functions = {
38699
38700 .show_host_node_name = 1,
38701 .show_host_port_name = 1,
38702@@ -2016,7 +2016,7 @@ struct fc_function_template qla2xxx_transport_functions = {
38703 .bsg_timeout = qla24xx_bsg_timeout,
38704 };
38705
38706-struct fc_function_template qla2xxx_transport_vport_functions = {
38707+fc_function_template_no_const qla2xxx_transport_vport_functions = {
38708
38709 .show_host_node_name = 1,
38710 .show_host_port_name = 1,
38711diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
38712index 6acb397..d86e3e0 100644
38713--- a/drivers/scsi/qla2xxx/qla_gbl.h
38714+++ b/drivers/scsi/qla2xxx/qla_gbl.h
38715@@ -515,8 +515,8 @@ extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *);
38716 struct device_attribute;
38717 extern struct device_attribute *qla2x00_host_attrs[];
38718 struct fc_function_template;
38719-extern struct fc_function_template qla2xxx_transport_functions;
38720-extern struct fc_function_template qla2xxx_transport_vport_functions;
38721+extern fc_function_template_no_const qla2xxx_transport_functions;
38722+extern fc_function_template_no_const qla2xxx_transport_vport_functions;
38723 extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
38724 extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *);
38725 extern void qla2x00_init_host_attr(scsi_qla_host_t *);
38726diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
38727index f4b1fc8..a1ce4dd 100644
38728--- a/drivers/scsi/qla2xxx/qla_os.c
38729+++ b/drivers/scsi/qla2xxx/qla_os.c
38730@@ -1462,8 +1462,10 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha)
38731 !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
38732 /* Ok, a 64bit DMA mask is applicable. */
38733 ha->flags.enable_64bit_addressing = 1;
38734- ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
38735- ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
38736+ pax_open_kernel();
38737+ *(void **)&ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
38738+ *(void **)&ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
38739+ pax_close_kernel();
38740 return;
38741 }
38742 }
38743diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
38744index 329d553..f20d31d 100644
38745--- a/drivers/scsi/qla4xxx/ql4_def.h
38746+++ b/drivers/scsi/qla4xxx/ql4_def.h
38747@@ -273,7 +273,7 @@ struct ddb_entry {
38748 * (4000 only) */
38749 atomic_t relogin_timer; /* Max Time to wait for
38750 * relogin to complete */
38751- atomic_t relogin_retry_count; /* Num of times relogin has been
38752+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
38753 * retried */
38754 uint32_t default_time2wait; /* Default Min time between
38755 * relogins (+aens) */
38756diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
38757index fbc546e..c7d1b48 100644
38758--- a/drivers/scsi/qla4xxx/ql4_os.c
38759+++ b/drivers/scsi/qla4xxx/ql4_os.c
38760@@ -2621,12 +2621,12 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
38761 */
38762 if (!iscsi_is_session_online(cls_sess)) {
38763 /* Reset retry relogin timer */
38764- atomic_inc(&ddb_entry->relogin_retry_count);
38765+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
38766 DEBUG2(ql4_printk(KERN_INFO, ha,
38767 "%s: index[%d] relogin timed out-retrying"
38768 " relogin (%d), retry (%d)\n", __func__,
38769 ddb_entry->fw_ddb_index,
38770- atomic_read(&ddb_entry->relogin_retry_count),
38771+ atomic_read_unchecked(&ddb_entry->relogin_retry_count),
38772 ddb_entry->default_time2wait + 4));
38773 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
38774 atomic_set(&ddb_entry->retry_relogin_timer,
38775@@ -4738,7 +4738,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
38776
38777 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
38778 atomic_set(&ddb_entry->relogin_timer, 0);
38779- atomic_set(&ddb_entry->relogin_retry_count, 0);
38780+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
38781 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
38782 ddb_entry->default_relogin_timeout =
38783 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
38784diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
38785index 2c0d0ec..4e8681a 100644
38786--- a/drivers/scsi/scsi.c
38787+++ b/drivers/scsi/scsi.c
38788@@ -661,7 +661,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
38789 unsigned long timeout;
38790 int rtn = 0;
38791
38792- atomic_inc(&cmd->device->iorequest_cnt);
38793+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
38794
38795 /* check if the device is still usable */
38796 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
38797diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
38798index 9032e91..7a805d0 100644
38799--- a/drivers/scsi/scsi_lib.c
38800+++ b/drivers/scsi/scsi_lib.c
38801@@ -1454,7 +1454,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
38802 shost = sdev->host;
38803 scsi_init_cmd_errh(cmd);
38804 cmd->result = DID_NO_CONNECT << 16;
38805- atomic_inc(&cmd->device->iorequest_cnt);
38806+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
38807
38808 /*
38809 * SCSI request completion path will do scsi_device_unbusy(),
38810@@ -1480,9 +1480,9 @@ static void scsi_softirq_done(struct request *rq)
38811
38812 INIT_LIST_HEAD(&cmd->eh_entry);
38813
38814- atomic_inc(&cmd->device->iodone_cnt);
38815+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
38816 if (cmd->result)
38817- atomic_inc(&cmd->device->ioerr_cnt);
38818+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
38819
38820 disposition = scsi_decide_disposition(cmd);
38821 if (disposition != SUCCESS &&
38822diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
38823index 931a7d9..0c2a754 100644
38824--- a/drivers/scsi/scsi_sysfs.c
38825+++ b/drivers/scsi/scsi_sysfs.c
38826@@ -658,7 +658,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
38827 char *buf) \
38828 { \
38829 struct scsi_device *sdev = to_scsi_device(dev); \
38830- unsigned long long count = atomic_read(&sdev->field); \
38831+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
38832 return snprintf(buf, 20, "0x%llx\n", count); \
38833 } \
38834 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
38835diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
38836index 84a1fdf..693b0d6 100644
38837--- a/drivers/scsi/scsi_tgt_lib.c
38838+++ b/drivers/scsi/scsi_tgt_lib.c
38839@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
38840 int err;
38841
38842 dprintk("%lx %u\n", uaddr, len);
38843- err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
38844+ err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
38845 if (err) {
38846 /*
38847 * TODO: need to fixup sg_tablesize, max_segment_size,
38848diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
38849index e894ca7..de9d7660 100644
38850--- a/drivers/scsi/scsi_transport_fc.c
38851+++ b/drivers/scsi/scsi_transport_fc.c
38852@@ -498,7 +498,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
38853 * Netlink Infrastructure
38854 */
38855
38856-static atomic_t fc_event_seq;
38857+static atomic_unchecked_t fc_event_seq;
38858
38859 /**
38860 * fc_get_event_number - Obtain the next sequential FC event number
38861@@ -511,7 +511,7 @@ static atomic_t fc_event_seq;
38862 u32
38863 fc_get_event_number(void)
38864 {
38865- return atomic_add_return(1, &fc_event_seq);
38866+ return atomic_add_return_unchecked(1, &fc_event_seq);
38867 }
38868 EXPORT_SYMBOL(fc_get_event_number);
38869
38870@@ -659,7 +659,7 @@ static __init int fc_transport_init(void)
38871 {
38872 int error;
38873
38874- atomic_set(&fc_event_seq, 0);
38875+ atomic_set_unchecked(&fc_event_seq, 0);
38876
38877 error = transport_class_register(&fc_host_class);
38878 if (error)
38879@@ -849,7 +849,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
38880 char *cp;
38881
38882 *val = simple_strtoul(buf, &cp, 0);
38883- if ((*cp && (*cp != '\n')) || (*val < 0))
38884+ if (*cp && (*cp != '\n'))
38885 return -EINVAL;
38886 /*
38887 * Check for overflow; dev_loss_tmo is u32
38888diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
38889index 31969f2..2b348f0 100644
38890--- a/drivers/scsi/scsi_transport_iscsi.c
38891+++ b/drivers/scsi/scsi_transport_iscsi.c
38892@@ -79,7 +79,7 @@ struct iscsi_internal {
38893 struct transport_container session_cont;
38894 };
38895
38896-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
38897+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
38898 static struct workqueue_struct *iscsi_eh_timer_workq;
38899
38900 static DEFINE_IDA(iscsi_sess_ida);
38901@@ -1064,7 +1064,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
38902 int err;
38903
38904 ihost = shost->shost_data;
38905- session->sid = atomic_add_return(1, &iscsi_session_nr);
38906+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
38907
38908 if (target_id == ISCSI_MAX_TARGET) {
38909 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
38910@@ -2943,7 +2943,7 @@ static __init int iscsi_transport_init(void)
38911 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
38912 ISCSI_TRANSPORT_VERSION);
38913
38914- atomic_set(&iscsi_session_nr, 0);
38915+ atomic_set_unchecked(&iscsi_session_nr, 0);
38916
38917 err = class_register(&iscsi_transport_class);
38918 if (err)
38919diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
38920index 21a045e..ec89e03 100644
38921--- a/drivers/scsi/scsi_transport_srp.c
38922+++ b/drivers/scsi/scsi_transport_srp.c
38923@@ -33,7 +33,7 @@
38924 #include "scsi_transport_srp_internal.h"
38925
38926 struct srp_host_attrs {
38927- atomic_t next_port_id;
38928+ atomic_unchecked_t next_port_id;
38929 };
38930 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
38931
38932@@ -62,7 +62,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
38933 struct Scsi_Host *shost = dev_to_shost(dev);
38934 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
38935
38936- atomic_set(&srp_host->next_port_id, 0);
38937+ atomic_set_unchecked(&srp_host->next_port_id, 0);
38938 return 0;
38939 }
38940
38941@@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
38942 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
38943 rport->roles = ids->roles;
38944
38945- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
38946+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
38947 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
38948
38949 transport_setup_device(&rport->dev);
38950diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
38951index a45e12a..d9120cb 100644
38952--- a/drivers/scsi/sd.c
38953+++ b/drivers/scsi/sd.c
38954@@ -2899,7 +2899,7 @@ static int sd_probe(struct device *dev)
38955 sdkp->disk = gd;
38956 sdkp->index = index;
38957 atomic_set(&sdkp->openers, 0);
38958- atomic_set(&sdkp->device->ioerr_cnt, 0);
38959+ atomic_set_unchecked(&sdkp->device->ioerr_cnt, 0);
38960
38961 if (!sdp->request_queue->rq_timeout) {
38962 if (sdp->type != TYPE_MOD)
38963diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
38964index be2c9a6..275525c 100644
38965--- a/drivers/scsi/sg.c
38966+++ b/drivers/scsi/sg.c
38967@@ -1101,7 +1101,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
38968 sdp->disk->disk_name,
38969 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
38970 NULL,
38971- (char *)arg);
38972+ (char __user *)arg);
38973 case BLKTRACESTART:
38974 return blk_trace_startstop(sdp->device->request_queue, 1);
38975 case BLKTRACESTOP:
38976diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
38977index 84c2861..ece0a31 100644
38978--- a/drivers/spi/spi.c
38979+++ b/drivers/spi/spi.c
38980@@ -1453,7 +1453,7 @@ int spi_bus_unlock(struct spi_master *master)
38981 EXPORT_SYMBOL_GPL(spi_bus_unlock);
38982
38983 /* portable code must never pass more than 32 bytes */
38984-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
38985+#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
38986
38987 static u8 *buf;
38988
38989diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
38990index 34afc16..ffe44dd 100644
38991--- a/drivers/staging/octeon/ethernet-rx.c
38992+++ b/drivers/staging/octeon/ethernet-rx.c
38993@@ -421,11 +421,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
38994 /* Increment RX stats for virtual ports */
38995 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
38996 #ifdef CONFIG_64BIT
38997- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
38998- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
38999+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
39000+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
39001 #else
39002- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
39003- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
39004+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
39005+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
39006 #endif
39007 }
39008 netif_receive_skb(skb);
39009@@ -437,9 +437,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
39010 dev->name);
39011 */
39012 #ifdef CONFIG_64BIT
39013- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
39014+ atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
39015 #else
39016- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
39017+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
39018 #endif
39019 dev_kfree_skb_irq(skb);
39020 }
39021diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
39022index 683bedc..86dba9a 100644
39023--- a/drivers/staging/octeon/ethernet.c
39024+++ b/drivers/staging/octeon/ethernet.c
39025@@ -252,11 +252,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
39026 * since the RX tasklet also increments it.
39027 */
39028 #ifdef CONFIG_64BIT
39029- atomic64_add(rx_status.dropped_packets,
39030- (atomic64_t *)&priv->stats.rx_dropped);
39031+ atomic64_add_unchecked(rx_status.dropped_packets,
39032+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
39033 #else
39034- atomic_add(rx_status.dropped_packets,
39035- (atomic_t *)&priv->stats.rx_dropped);
39036+ atomic_add_unchecked(rx_status.dropped_packets,
39037+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
39038 #endif
39039 }
39040
39041diff --git a/drivers/staging/ramster/tmem.c b/drivers/staging/ramster/tmem.c
39042index a2b7e03..aaf3630 100644
39043--- a/drivers/staging/ramster/tmem.c
39044+++ b/drivers/staging/ramster/tmem.c
39045@@ -50,25 +50,25 @@
39046 * A tmem host implementation must use this function to register callbacks
39047 * for memory allocation.
39048 */
39049-static struct tmem_hostops tmem_hostops;
39050+static struct tmem_hostops *tmem_hostops;
39051
39052 static void tmem_objnode_tree_init(void);
39053
39054 void tmem_register_hostops(struct tmem_hostops *m)
39055 {
39056 tmem_objnode_tree_init();
39057- tmem_hostops = *m;
39058+ tmem_hostops = m;
39059 }
39060
39061 /*
39062 * A tmem host implementation must use this function to register
39063 * callbacks for a page-accessible memory (PAM) implementation.
39064 */
39065-static struct tmem_pamops tmem_pamops;
39066+static struct tmem_pamops *tmem_pamops;
39067
39068 void tmem_register_pamops(struct tmem_pamops *m)
39069 {
39070- tmem_pamops = *m;
39071+ tmem_pamops = m;
39072 }
39073
39074 /*
39075@@ -174,7 +174,7 @@ static void tmem_obj_init(struct tmem_obj *obj, struct tmem_hashbucket *hb,
39076 obj->pampd_count = 0;
39077 #ifdef CONFIG_RAMSTER
39078 if (tmem_pamops.new_obj != NULL)
39079- (*tmem_pamops.new_obj)(obj);
39080+ (tmem_pamops->new_obj)(obj);
39081 #endif
39082 SET_SENTINEL(obj, OBJ);
39083
39084@@ -210,7 +210,7 @@ static void tmem_pool_flush(struct tmem_pool *pool, bool destroy)
39085 rbnode = rb_next(rbnode);
39086 tmem_pampd_destroy_all_in_obj(obj, true);
39087 tmem_obj_free(obj, hb);
39088- (*tmem_hostops.obj_free)(obj, pool);
39089+ (tmem_hostops->obj_free)(obj, pool);
39090 }
39091 spin_unlock(&hb->lock);
39092 }
39093@@ -261,7 +261,7 @@ static struct tmem_objnode *tmem_objnode_alloc(struct tmem_obj *obj)
39094 ASSERT_SENTINEL(obj, OBJ);
39095 BUG_ON(obj->pool == NULL);
39096 ASSERT_SENTINEL(obj->pool, POOL);
39097- objnode = (*tmem_hostops.objnode_alloc)(obj->pool);
39098+ objnode = (tmem_hostops->objnode_alloc)(obj->pool);
39099 if (unlikely(objnode == NULL))
39100 goto out;
39101 objnode->obj = obj;
39102@@ -290,7 +290,7 @@ static void tmem_objnode_free(struct tmem_objnode *objnode)
39103 ASSERT_SENTINEL(pool, POOL);
39104 objnode->obj->objnode_count--;
39105 objnode->obj = NULL;
39106- (*tmem_hostops.objnode_free)(objnode, pool);
39107+ (tmem_hostops->objnode_free)(objnode, pool);
39108 }
39109
39110 /*
39111@@ -348,7 +348,7 @@ static void *tmem_pampd_replace_in_obj(struct tmem_obj *obj, uint32_t index,
39112 void *old_pampd = *(void **)slot;
39113 *(void **)slot = new_pampd;
39114 if (!no_free)
39115- (*tmem_pamops.free)(old_pampd, obj->pool,
39116+ (tmem_pamops->free)(old_pampd, obj->pool,
39117 NULL, 0, false);
39118 ret = new_pampd;
39119 }
39120@@ -505,7 +505,7 @@ static void tmem_objnode_node_destroy(struct tmem_obj *obj,
39121 if (objnode->slots[i]) {
39122 if (ht == 1) {
39123 obj->pampd_count--;
39124- (*tmem_pamops.free)(objnode->slots[i],
39125+ (tmem_pamops->free)(objnode->slots[i],
39126 obj->pool, NULL, 0, true);
39127 objnode->slots[i] = NULL;
39128 continue;
39129@@ -524,7 +524,7 @@ static void tmem_pampd_destroy_all_in_obj(struct tmem_obj *obj,
39130 return;
39131 if (obj->objnode_tree_height == 0) {
39132 obj->pampd_count--;
39133- (*tmem_pamops.free)(obj->objnode_tree_root,
39134+ (tmem_pamops->free)(obj->objnode_tree_root,
39135 obj->pool, NULL, 0, true);
39136 } else {
39137 tmem_objnode_node_destroy(obj, obj->objnode_tree_root,
39138@@ -535,7 +535,7 @@ static void tmem_pampd_destroy_all_in_obj(struct tmem_obj *obj,
39139 obj->objnode_tree_root = NULL;
39140 #ifdef CONFIG_RAMSTER
39141 if (tmem_pamops.free_obj != NULL)
39142- (*tmem_pamops.free_obj)(obj->pool, obj, pool_destroy);
39143+ (tmem_pamops->free_obj)(obj->pool, obj, pool_destroy);
39144 #endif
39145 }
39146
39147@@ -574,7 +574,7 @@ int tmem_put(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
39148 /* if found, is a dup put, flush the old one */
39149 pampd_del = tmem_pampd_delete_from_obj(obj, index);
39150 BUG_ON(pampd_del != pampd);
39151- (*tmem_pamops.free)(pampd, pool, oidp, index, true);
39152+ (tmem_pamops->free)(pampd, pool, oidp, index, true);
39153 if (obj->pampd_count == 0) {
39154 objnew = obj;
39155 objfound = NULL;
39156@@ -582,7 +582,7 @@ int tmem_put(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
39157 pampd = NULL;
39158 }
39159 } else {
39160- obj = objnew = (*tmem_hostops.obj_alloc)(pool);
39161+ obj = objnew = (tmem_hostops->obj_alloc)(pool);
39162 if (unlikely(obj == NULL)) {
39163 ret = -ENOMEM;
39164 goto out;
39165@@ -597,16 +597,16 @@ int tmem_put(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
39166 if (unlikely(ret == -ENOMEM))
39167 /* may have partially built objnode tree ("stump") */
39168 goto delete_and_free;
39169- (*tmem_pamops.create_finish)(pampd, is_ephemeral(pool));
39170+ (tmem_pamops->create_finish)(pampd, is_ephemeral(pool));
39171 goto out;
39172
39173 delete_and_free:
39174 (void)tmem_pampd_delete_from_obj(obj, index);
39175 if (pampd)
39176- (*tmem_pamops.free)(pampd, pool, NULL, 0, true);
39177+ (tmem_pamops->free)(pampd, pool, NULL, 0, true);
39178 if (objnew) {
39179 tmem_obj_free(objnew, hb);
39180- (*tmem_hostops.obj_free)(objnew, pool);
39181+ (tmem_hostops->obj_free)(objnew, pool);
39182 }
39183 out:
39184 spin_unlock(&hb->lock);
39185@@ -651,7 +651,7 @@ void tmem_localify_finish(struct tmem_obj *obj, uint32_t index,
39186 if (pampd != NULL) {
39187 BUG_ON(obj == NULL);
39188 (void)tmem_pampd_replace_in_obj(obj, index, pampd, 1);
39189- (*tmem_pamops.create_finish)(pampd, is_ephemeral(obj->pool));
39190+ (tmem_pamops->create_finish)(pampd, is_ephemeral(obj->pool));
39191 } else if (delete) {
39192 BUG_ON(obj == NULL);
39193 (void)tmem_pampd_delete_from_obj(obj, index);
39194@@ -671,7 +671,7 @@ static int tmem_repatriate(void **ppampd, struct tmem_hashbucket *hb,
39195 int ret = 0;
39196
39197 if (!is_ephemeral(pool))
39198- new_pampd = (*tmem_pamops.repatriate_preload)(
39199+ new_pampd = (tmem_pamops->repatriate_preload)(
39200 old_pampd, pool, oidp, index, &intransit);
39201 if (intransit)
39202 ret = -EAGAIN;
39203@@ -680,7 +680,7 @@ static int tmem_repatriate(void **ppampd, struct tmem_hashbucket *hb,
39204 /* must release the hb->lock else repatriate can't sleep */
39205 spin_unlock(&hb->lock);
39206 if (!intransit)
39207- ret = (*tmem_pamops.repatriate)(old_pampd, new_pampd, pool,
39208+ ret = (tmem_pamops->repatriate)(old_pampd, new_pampd, pool,
39209 oidp, index, free, data);
39210 if (ret == -EAGAIN) {
39211 /* rare I think, but should cond_resched()??? */
39212@@ -714,7 +714,7 @@ int tmem_replace(struct tmem_pool *pool, struct tmem_oid *oidp,
39213 new_pampd = tmem_pampd_replace_in_obj(obj, index, new_pampd, 0);
39214 /* if we bug here, pamops wasn't properly set up for ramster */
39215 BUG_ON(tmem_pamops.replace_in_obj == NULL);
39216- ret = (*tmem_pamops.replace_in_obj)(new_pampd, obj);
39217+ ret = (tmem_pamops->replace_in_obj)(new_pampd, obj);
39218 out:
39219 spin_unlock(&hb->lock);
39220 return ret;
39221@@ -776,15 +776,15 @@ int tmem_get(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
39222 if (free) {
39223 if (obj->pampd_count == 0) {
39224 tmem_obj_free(obj, hb);
39225- (*tmem_hostops.obj_free)(obj, pool);
39226+ (tmem_hostops->obj_free)(obj, pool);
39227 obj = NULL;
39228 }
39229 }
39230 if (free)
39231- ret = (*tmem_pamops.get_data_and_free)(
39232+ ret = (tmem_pamops->get_data_and_free)(
39233 data, sizep, raw, pampd, pool, oidp, index);
39234 else
39235- ret = (*tmem_pamops.get_data)(
39236+ ret = (tmem_pamops->get_data)(
39237 data, sizep, raw, pampd, pool, oidp, index);
39238 if (ret < 0)
39239 goto out;
39240@@ -816,10 +816,10 @@ int tmem_flush_page(struct tmem_pool *pool,
39241 pampd = tmem_pampd_delete_from_obj(obj, index);
39242 if (pampd == NULL)
39243 goto out;
39244- (*tmem_pamops.free)(pampd, pool, oidp, index, true);
39245+ (tmem_pamops->free)(pampd, pool, oidp, index, true);
39246 if (obj->pampd_count == 0) {
39247 tmem_obj_free(obj, hb);
39248- (*tmem_hostops.obj_free)(obj, pool);
39249+ (tmem_hostops->obj_free)(obj, pool);
39250 }
39251 ret = 0;
39252
39253@@ -844,7 +844,7 @@ int tmem_flush_object(struct tmem_pool *pool, struct tmem_oid *oidp)
39254 goto out;
39255 tmem_pampd_destroy_all_in_obj(obj, false);
39256 tmem_obj_free(obj, hb);
39257- (*tmem_hostops.obj_free)(obj, pool);
39258+ (tmem_hostops->obj_free)(obj, pool);
39259 ret = 0;
39260
39261 out:
39262diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
39263index dc23395..cf7e9b1 100644
39264--- a/drivers/staging/rtl8712/rtl871x_io.h
39265+++ b/drivers/staging/rtl8712/rtl871x_io.h
39266@@ -108,7 +108,7 @@ struct _io_ops {
39267 u8 *pmem);
39268 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
39269 u8 *pmem);
39270-};
39271+} __no_const;
39272
39273 struct io_req {
39274 struct list_head list;
39275diff --git a/drivers/staging/sbe-2t3e3/netdev.c b/drivers/staging/sbe-2t3e3/netdev.c
39276index 180c963..1f18377 100644
39277--- a/drivers/staging/sbe-2t3e3/netdev.c
39278+++ b/drivers/staging/sbe-2t3e3/netdev.c
39279@@ -51,7 +51,7 @@ static int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
39280 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
39281
39282 if (rlen)
39283- if (copy_to_user(data, &resp, rlen))
39284+ if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
39285 return -EFAULT;
39286
39287 return 0;
39288diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
39289index c66b8b3..a4a035b 100644
39290--- a/drivers/staging/usbip/vhci.h
39291+++ b/drivers/staging/usbip/vhci.h
39292@@ -83,7 +83,7 @@ struct vhci_hcd {
39293 unsigned resuming:1;
39294 unsigned long re_timeout;
39295
39296- atomic_t seqnum;
39297+ atomic_unchecked_t seqnum;
39298
39299 /*
39300 * NOTE:
39301diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
39302index 620d1be..1cd6711 100644
39303--- a/drivers/staging/usbip/vhci_hcd.c
39304+++ b/drivers/staging/usbip/vhci_hcd.c
39305@@ -471,7 +471,7 @@ static void vhci_tx_urb(struct urb *urb)
39306 return;
39307 }
39308
39309- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
39310+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
39311 if (priv->seqnum == 0xffff)
39312 dev_info(&urb->dev->dev, "seqnum max\n");
39313
39314@@ -723,7 +723,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
39315 return -ENOMEM;
39316 }
39317
39318- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
39319+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
39320 if (unlink->seqnum == 0xffff)
39321 pr_info("seqnum max\n");
39322
39323@@ -924,7 +924,7 @@ static int vhci_start(struct usb_hcd *hcd)
39324 vdev->rhport = rhport;
39325 }
39326
39327- atomic_set(&vhci->seqnum, 0);
39328+ atomic_set_unchecked(&vhci->seqnum, 0);
39329 spin_lock_init(&vhci->lock);
39330
39331 hcd->power_budget = 0; /* no limit */
39332diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
39333index f0eaf04..5a82e06 100644
39334--- a/drivers/staging/usbip/vhci_rx.c
39335+++ b/drivers/staging/usbip/vhci_rx.c
39336@@ -77,7 +77,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
39337 if (!urb) {
39338 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
39339 pr_info("max seqnum %d\n",
39340- atomic_read(&the_controller->seqnum));
39341+ atomic_read_unchecked(&the_controller->seqnum));
39342 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
39343 return;
39344 }
39345diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
39346index 67b1b88..6392fe9 100644
39347--- a/drivers/staging/vt6655/hostap.c
39348+++ b/drivers/staging/vt6655/hostap.c
39349@@ -73,14 +73,13 @@ static int msglevel =MSG_LEVEL_INFO;
39350 *
39351 */
39352
39353+static net_device_ops_no_const apdev_netdev_ops;
39354+
39355 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
39356 {
39357 PSDevice apdev_priv;
39358 struct net_device *dev = pDevice->dev;
39359 int ret;
39360- const struct net_device_ops apdev_netdev_ops = {
39361- .ndo_start_xmit = pDevice->tx_80211,
39362- };
39363
39364 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
39365
39366@@ -92,6 +91,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
39367 *apdev_priv = *pDevice;
39368 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
39369
39370+ /* only half broken now */
39371+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
39372 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
39373
39374 pDevice->apdev->type = ARPHRD_IEEE80211;
39375diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
39376index 0a73d40..6fda560 100644
39377--- a/drivers/staging/vt6656/hostap.c
39378+++ b/drivers/staging/vt6656/hostap.c
39379@@ -60,14 +60,13 @@ static int msglevel =MSG_LEVEL_INFO;
39380 *
39381 */
39382
39383+static net_device_ops_no_const apdev_netdev_ops;
39384+
39385 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
39386 {
39387 PSDevice apdev_priv;
39388 struct net_device *dev = pDevice->dev;
39389 int ret;
39390- const struct net_device_ops apdev_netdev_ops = {
39391- .ndo_start_xmit = pDevice->tx_80211,
39392- };
39393
39394 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
39395
39396@@ -79,6 +78,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
39397 *apdev_priv = *pDevice;
39398 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
39399
39400+ /* only half broken now */
39401+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
39402 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
39403
39404 pDevice->apdev->type = ARPHRD_IEEE80211;
39405diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c
39406index 56c8e60..1920c63 100644
39407--- a/drivers/staging/zcache/tmem.c
39408+++ b/drivers/staging/zcache/tmem.c
39409@@ -39,7 +39,7 @@
39410 * A tmem host implementation must use this function to register callbacks
39411 * for memory allocation.
39412 */
39413-static struct tmem_hostops tmem_hostops;
39414+static tmem_hostops_no_const tmem_hostops;
39415
39416 static void tmem_objnode_tree_init(void);
39417
39418@@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_hostops *m)
39419 * A tmem host implementation must use this function to register
39420 * callbacks for a page-accessible memory (PAM) implementation
39421 */
39422-static struct tmem_pamops tmem_pamops;
39423+static tmem_pamops_no_const tmem_pamops;
39424
39425 void tmem_register_pamops(struct tmem_pamops *m)
39426 {
39427diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h
39428index 0d4aa82..f7832d4 100644
39429--- a/drivers/staging/zcache/tmem.h
39430+++ b/drivers/staging/zcache/tmem.h
39431@@ -180,6 +180,7 @@ struct tmem_pamops {
39432 void (*new_obj)(struct tmem_obj *);
39433 int (*replace_in_obj)(void *, struct tmem_obj *);
39434 };
39435+typedef struct tmem_pamops __no_const tmem_pamops_no_const;
39436 extern void tmem_register_pamops(struct tmem_pamops *m);
39437
39438 /* memory allocation methods provided by the host implementation */
39439@@ -189,6 +190,7 @@ struct tmem_hostops {
39440 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
39441 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
39442 };
39443+typedef struct tmem_hostops __no_const tmem_hostops_no_const;
39444 extern void tmem_register_hostops(struct tmem_hostops *m);
39445
39446 /* core tmem accessor functions */
39447diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
39448index 13fe16c..cbdc39a 100644
39449--- a/drivers/target/target_core_transport.c
39450+++ b/drivers/target/target_core_transport.c
39451@@ -1085,7 +1085,7 @@ struct se_device *transport_add_device_to_core_hba(
39452 spin_lock_init(&dev->se_port_lock);
39453 spin_lock_init(&dev->se_tmr_lock);
39454 spin_lock_init(&dev->qf_cmd_lock);
39455- atomic_set(&dev->dev_ordered_id, 0);
39456+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
39457
39458 se_dev_set_default_attribs(dev, dev_limits);
39459
39460@@ -1275,7 +1275,7 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)
39461 * Used to determine when ORDERED commands should go from
39462 * Dormant to Active status.
39463 */
39464- cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
39465+ cmd->se_ordered_id = atomic_inc_return_unchecked(&cmd->se_dev->dev_ordered_id);
39466 smp_mb__after_atomic_inc();
39467 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
39468 cmd->se_ordered_id, cmd->sam_task_attr,
39469diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
39470index 0a6a0bc..5501b06 100644
39471--- a/drivers/tty/cyclades.c
39472+++ b/drivers/tty/cyclades.c
39473@@ -1589,10 +1589,10 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
39474 printk(KERN_DEBUG "cyc:cy_open ttyC%d, count = %d\n", info->line,
39475 info->port.count);
39476 #endif
39477- info->port.count++;
39478+ atomic_inc(&info->port.count);
39479 #ifdef CY_DEBUG_COUNT
39480 printk(KERN_DEBUG "cyc:cy_open (%d): incrementing count to %d\n",
39481- current->pid, info->port.count);
39482+ current->pid, atomic_read(&info->port.count));
39483 #endif
39484
39485 /*
39486@@ -3989,7 +3989,7 @@ static int cyclades_proc_show(struct seq_file *m, void *v)
39487 for (j = 0; j < cy_card[i].nports; j++) {
39488 info = &cy_card[i].ports[j];
39489
39490- if (info->port.count) {
39491+ if (atomic_read(&info->port.count)) {
39492 /* XXX is the ldisc num worth this? */
39493 struct tty_struct *tty;
39494 struct tty_ldisc *ld;
39495diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
39496index 13ee53b..418d164 100644
39497--- a/drivers/tty/hvc/hvc_console.c
39498+++ b/drivers/tty/hvc/hvc_console.c
39499@@ -338,7 +338,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
39500
39501 spin_lock_irqsave(&hp->port.lock, flags);
39502 /* Check and then increment for fast path open. */
39503- if (hp->port.count++ > 0) {
39504+ if (atomic_inc_return(&hp->port.count) > 1) {
39505 spin_unlock_irqrestore(&hp->port.lock, flags);
39506 hvc_kick();
39507 return 0;
39508@@ -388,7 +388,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
39509
39510 spin_lock_irqsave(&hp->port.lock, flags);
39511
39512- if (--hp->port.count == 0) {
39513+ if (atomic_dec_return(&hp->port.count) == 0) {
39514 spin_unlock_irqrestore(&hp->port.lock, flags);
39515 /* We are done with the tty pointer now. */
39516 tty_port_tty_set(&hp->port, NULL);
39517@@ -406,9 +406,9 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
39518 */
39519 tty_wait_until_sent_from_close(tty, HVC_CLOSE_WAIT);
39520 } else {
39521- if (hp->port.count < 0)
39522+ if (atomic_read(&hp->port.count) < 0)
39523 printk(KERN_ERR "hvc_close %X: oops, count is %d\n",
39524- hp->vtermno, hp->port.count);
39525+ hp->vtermno, atomic_read(&hp->port.count));
39526 spin_unlock_irqrestore(&hp->port.lock, flags);
39527 }
39528 }
39529@@ -438,12 +438,12 @@ static void hvc_hangup(struct tty_struct *tty)
39530 * open->hangup case this can be called after the final close so prevent
39531 * that from happening for now.
39532 */
39533- if (hp->port.count <= 0) {
39534+ if (atomic_read(&hp->port.count) <= 0) {
39535 spin_unlock_irqrestore(&hp->port.lock, flags);
39536 return;
39537 }
39538
39539- hp->port.count = 0;
39540+ atomic_set(&hp->port.count, 0);
39541 spin_unlock_irqrestore(&hp->port.lock, flags);
39542 tty_port_tty_set(&hp->port, NULL);
39543
39544@@ -491,7 +491,7 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
39545 return -EPIPE;
39546
39547 /* FIXME what's this (unprotected) check for? */
39548- if (hp->port.count <= 0)
39549+ if (atomic_read(&hp->port.count) <= 0)
39550 return -EIO;
39551
39552 spin_lock_irqsave(&hp->lock, flags);
39553diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
39554index cab5c7a..4cc66ea 100644
39555--- a/drivers/tty/hvc/hvcs.c
39556+++ b/drivers/tty/hvc/hvcs.c
39557@@ -83,6 +83,7 @@
39558 #include <asm/hvcserver.h>
39559 #include <asm/uaccess.h>
39560 #include <asm/vio.h>
39561+#include <asm/local.h>
39562
39563 /*
39564 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
39565@@ -416,7 +417,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
39566
39567 spin_lock_irqsave(&hvcsd->lock, flags);
39568
39569- if (hvcsd->port.count > 0) {
39570+ if (atomic_read(&hvcsd->port.count) > 0) {
39571 spin_unlock_irqrestore(&hvcsd->lock, flags);
39572 printk(KERN_INFO "HVCS: vterm state unchanged. "
39573 "The hvcs device node is still in use.\n");
39574@@ -1132,7 +1133,7 @@ static int hvcs_install(struct tty_driver *driver, struct tty_struct *tty)
39575 }
39576 }
39577
39578- hvcsd->port.count = 0;
39579+ atomic_set(&hvcsd->port.count, 0);
39580 hvcsd->port.tty = tty;
39581 tty->driver_data = hvcsd;
39582
39583@@ -1185,7 +1186,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
39584 unsigned long flags;
39585
39586 spin_lock_irqsave(&hvcsd->lock, flags);
39587- hvcsd->port.count++;
39588+ atomic_inc(&hvcsd->port.count);
39589 hvcsd->todo_mask |= HVCS_SCHED_READ;
39590 spin_unlock_irqrestore(&hvcsd->lock, flags);
39591
39592@@ -1221,7 +1222,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
39593 hvcsd = tty->driver_data;
39594
39595 spin_lock_irqsave(&hvcsd->lock, flags);
39596- if (--hvcsd->port.count == 0) {
39597+ if (atomic_dec_and_test(&hvcsd->port.count)) {
39598
39599 vio_disable_interrupts(hvcsd->vdev);
39600
39601@@ -1246,10 +1247,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
39602
39603 free_irq(irq, hvcsd);
39604 return;
39605- } else if (hvcsd->port.count < 0) {
39606+ } else if (atomic_read(&hvcsd->port.count) < 0) {
39607 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
39608 " is missmanaged.\n",
39609- hvcsd->vdev->unit_address, hvcsd->port.count);
39610+ hvcsd->vdev->unit_address, atomic_read(&hvcsd->port.count));
39611 }
39612
39613 spin_unlock_irqrestore(&hvcsd->lock, flags);
39614@@ -1271,7 +1272,7 @@ static void hvcs_hangup(struct tty_struct * tty)
39615
39616 spin_lock_irqsave(&hvcsd->lock, flags);
39617 /* Preserve this so that we know how many kref refs to put */
39618- temp_open_count = hvcsd->port.count;
39619+ temp_open_count = atomic_read(&hvcsd->port.count);
39620
39621 /*
39622 * Don't kref put inside the spinlock because the destruction
39623@@ -1286,7 +1287,7 @@ static void hvcs_hangup(struct tty_struct * tty)
39624 tty->driver_data = NULL;
39625 hvcsd->port.tty = NULL;
39626
39627- hvcsd->port.count = 0;
39628+ atomic_set(&hvcsd->port.count, 0);
39629
39630 /* This will drop any buffered data on the floor which is OK in a hangup
39631 * scenario. */
39632@@ -1357,7 +1358,7 @@ static int hvcs_write(struct tty_struct *tty,
39633 * the middle of a write operation? This is a crummy place to do this
39634 * but we want to keep it all in the spinlock.
39635 */
39636- if (hvcsd->port.count <= 0) {
39637+ if (atomic_read(&hvcsd->port.count) <= 0) {
39638 spin_unlock_irqrestore(&hvcsd->lock, flags);
39639 return -ENODEV;
39640 }
39641@@ -1431,7 +1432,7 @@ static int hvcs_write_room(struct tty_struct *tty)
39642 {
39643 struct hvcs_struct *hvcsd = tty->driver_data;
39644
39645- if (!hvcsd || hvcsd->port.count <= 0)
39646+ if (!hvcsd || atomic_read(&hvcsd->port.count) <= 0)
39647 return 0;
39648
39649 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
39650diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
39651index 160f0ad..588b853 100644
39652--- a/drivers/tty/ipwireless/tty.c
39653+++ b/drivers/tty/ipwireless/tty.c
39654@@ -29,6 +29,7 @@
39655 #include <linux/tty_driver.h>
39656 #include <linux/tty_flip.h>
39657 #include <linux/uaccess.h>
39658+#include <asm/local.h>
39659
39660 #include "tty.h"
39661 #include "network.h"
39662@@ -99,10 +100,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
39663 mutex_unlock(&tty->ipw_tty_mutex);
39664 return -ENODEV;
39665 }
39666- if (tty->port.count == 0)
39667+ if (atomic_read(&tty->port.count) == 0)
39668 tty->tx_bytes_queued = 0;
39669
39670- tty->port.count++;
39671+ atomic_inc(&tty->port.count);
39672
39673 tty->port.tty = linux_tty;
39674 linux_tty->driver_data = tty;
39675@@ -118,9 +119,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
39676
39677 static void do_ipw_close(struct ipw_tty *tty)
39678 {
39679- tty->port.count--;
39680-
39681- if (tty->port.count == 0) {
39682+ if (atomic_dec_return(&tty->port.count) == 0) {
39683 struct tty_struct *linux_tty = tty->port.tty;
39684
39685 if (linux_tty != NULL) {
39686@@ -141,7 +140,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
39687 return;
39688
39689 mutex_lock(&tty->ipw_tty_mutex);
39690- if (tty->port.count == 0) {
39691+ if (atomic_read(&tty->port.count) == 0) {
39692 mutex_unlock(&tty->ipw_tty_mutex);
39693 return;
39694 }
39695@@ -170,7 +169,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
39696 return;
39697 }
39698
39699- if (!tty->port.count) {
39700+ if (!atomic_read(&tty->port.count)) {
39701 mutex_unlock(&tty->ipw_tty_mutex);
39702 return;
39703 }
39704@@ -212,7 +211,7 @@ static int ipw_write(struct tty_struct *linux_tty,
39705 return -ENODEV;
39706
39707 mutex_lock(&tty->ipw_tty_mutex);
39708- if (!tty->port.count) {
39709+ if (!atomic_read(&tty->port.count)) {
39710 mutex_unlock(&tty->ipw_tty_mutex);
39711 return -EINVAL;
39712 }
39713@@ -252,7 +251,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
39714 if (!tty)
39715 return -ENODEV;
39716
39717- if (!tty->port.count)
39718+ if (!atomic_read(&tty->port.count))
39719 return -EINVAL;
39720
39721 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
39722@@ -294,7 +293,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
39723 if (!tty)
39724 return 0;
39725
39726- if (!tty->port.count)
39727+ if (!atomic_read(&tty->port.count))
39728 return 0;
39729
39730 return tty->tx_bytes_queued;
39731@@ -375,7 +374,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
39732 if (!tty)
39733 return -ENODEV;
39734
39735- if (!tty->port.count)
39736+ if (!atomic_read(&tty->port.count))
39737 return -EINVAL;
39738
39739 return get_control_lines(tty);
39740@@ -391,7 +390,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
39741 if (!tty)
39742 return -ENODEV;
39743
39744- if (!tty->port.count)
39745+ if (!atomic_read(&tty->port.count))
39746 return -EINVAL;
39747
39748 return set_control_lines(tty, set, clear);
39749@@ -405,7 +404,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
39750 if (!tty)
39751 return -ENODEV;
39752
39753- if (!tty->port.count)
39754+ if (!atomic_read(&tty->port.count))
39755 return -EINVAL;
39756
39757 /* FIXME: Exactly how is the tty object locked here .. */
39758@@ -561,7 +560,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
39759 * are gone */
39760 mutex_lock(&ttyj->ipw_tty_mutex);
39761 }
39762- while (ttyj->port.count)
39763+ while (atomic_read(&ttyj->port.count))
39764 do_ipw_close(ttyj);
39765 ipwireless_disassociate_network_ttys(network,
39766 ttyj->channel_idx);
39767diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
39768index 56e616b..9d9f10a 100644
39769--- a/drivers/tty/moxa.c
39770+++ b/drivers/tty/moxa.c
39771@@ -1189,7 +1189,7 @@ static int moxa_open(struct tty_struct *tty, struct file *filp)
39772 }
39773
39774 ch = &brd->ports[port % MAX_PORTS_PER_BOARD];
39775- ch->port.count++;
39776+ atomic_inc(&ch->port.count);
39777 tty->driver_data = ch;
39778 tty_port_tty_set(&ch->port, tty);
39779 mutex_lock(&ch->port.mutex);
39780diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
39781index 1e8e8ce..a9efc93 100644
39782--- a/drivers/tty/n_gsm.c
39783+++ b/drivers/tty/n_gsm.c
39784@@ -1638,7 +1638,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
39785 kref_init(&dlci->ref);
39786 mutex_init(&dlci->mutex);
39787 dlci->fifo = &dlci->_fifo;
39788- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
39789+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
39790 kfree(dlci);
39791 return NULL;
39792 }
39793@@ -2925,7 +2925,7 @@ static int gsmtty_open(struct tty_struct *tty, struct file *filp)
39794 struct gsm_dlci *dlci = tty->driver_data;
39795 struct tty_port *port = &dlci->port;
39796
39797- port->count++;
39798+ atomic_inc(&port->count);
39799 dlci_get(dlci);
39800 dlci_get(dlci->gsm->dlci[0]);
39801 mux_get(dlci->gsm);
39802diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
39803index 8c0b7b4..e88f052 100644
39804--- a/drivers/tty/n_tty.c
39805+++ b/drivers/tty/n_tty.c
39806@@ -2142,6 +2142,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
39807 {
39808 *ops = tty_ldisc_N_TTY;
39809 ops->owner = NULL;
39810- ops->refcount = ops->flags = 0;
39811+ atomic_set(&ops->refcount, 0);
39812+ ops->flags = 0;
39813 }
39814 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
39815diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
39816index 8cf8d0a..4ef9ed0 100644
39817--- a/drivers/tty/pty.c
39818+++ b/drivers/tty/pty.c
39819@@ -730,8 +730,10 @@ static void __init unix98_pty_init(void)
39820 panic("Couldn't register Unix98 pts driver");
39821
39822 /* Now create the /dev/ptmx special device */
39823+ pax_open_kernel();
39824 tty_default_fops(&ptmx_fops);
39825- ptmx_fops.open = ptmx_open;
39826+ *(void **)&ptmx_fops.open = ptmx_open;
39827+ pax_close_kernel();
39828
39829 cdev_init(&ptmx_cdev, &ptmx_fops);
39830 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
39831diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
39832index 9700d34..df7520c 100644
39833--- a/drivers/tty/rocket.c
39834+++ b/drivers/tty/rocket.c
39835@@ -924,7 +924,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
39836 tty->driver_data = info;
39837 tty_port_tty_set(port, tty);
39838
39839- if (port->count++ == 0) {
39840+ if (atomic_inc_return(&port->count) == 1) {
39841 atomic_inc(&rp_num_ports_open);
39842
39843 #ifdef ROCKET_DEBUG_OPEN
39844@@ -933,7 +933,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
39845 #endif
39846 }
39847 #ifdef ROCKET_DEBUG_OPEN
39848- printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, info->port.count);
39849+ printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, atomic-read(&info->port.count));
39850 #endif
39851
39852 /*
39853@@ -1528,7 +1528,7 @@ static void rp_hangup(struct tty_struct *tty)
39854 spin_unlock_irqrestore(&info->port.lock, flags);
39855 return;
39856 }
39857- if (info->port.count)
39858+ if (atomic_read(&info->port.count))
39859 atomic_dec(&rp_num_ports_open);
39860 clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]);
39861 spin_unlock_irqrestore(&info->port.lock, flags);
39862diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
39863index 1002054..dd644a8 100644
39864--- a/drivers/tty/serial/kgdboc.c
39865+++ b/drivers/tty/serial/kgdboc.c
39866@@ -24,8 +24,9 @@
39867 #define MAX_CONFIG_LEN 40
39868
39869 static struct kgdb_io kgdboc_io_ops;
39870+static struct kgdb_io kgdboc_io_ops_console;
39871
39872-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
39873+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
39874 static int configured = -1;
39875
39876 static char config[MAX_CONFIG_LEN];
39877@@ -151,6 +152,8 @@ static void cleanup_kgdboc(void)
39878 kgdboc_unregister_kbd();
39879 if (configured == 1)
39880 kgdb_unregister_io_module(&kgdboc_io_ops);
39881+ else if (configured == 2)
39882+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
39883 }
39884
39885 static int configure_kgdboc(void)
39886@@ -160,13 +163,13 @@ static int configure_kgdboc(void)
39887 int err;
39888 char *cptr = config;
39889 struct console *cons;
39890+ int is_console = 0;
39891
39892 err = kgdboc_option_setup(config);
39893 if (err || !strlen(config) || isspace(config[0]))
39894 goto noconfig;
39895
39896 err = -ENODEV;
39897- kgdboc_io_ops.is_console = 0;
39898 kgdb_tty_driver = NULL;
39899
39900 kgdboc_use_kms = 0;
39901@@ -187,7 +190,7 @@ static int configure_kgdboc(void)
39902 int idx;
39903 if (cons->device && cons->device(cons, &idx) == p &&
39904 idx == tty_line) {
39905- kgdboc_io_ops.is_console = 1;
39906+ is_console = 1;
39907 break;
39908 }
39909 cons = cons->next;
39910@@ -197,7 +200,13 @@ static int configure_kgdboc(void)
39911 kgdb_tty_line = tty_line;
39912
39913 do_register:
39914- err = kgdb_register_io_module(&kgdboc_io_ops);
39915+ if (is_console) {
39916+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
39917+ configured = 2;
39918+ } else {
39919+ err = kgdb_register_io_module(&kgdboc_io_ops);
39920+ configured = 1;
39921+ }
39922 if (err)
39923 goto noconfig;
39924
39925@@ -205,8 +214,6 @@ do_register:
39926 if (err)
39927 goto nmi_con_failed;
39928
39929- configured = 1;
39930-
39931 return 0;
39932
39933 nmi_con_failed:
39934@@ -223,7 +230,7 @@ noconfig:
39935 static int __init init_kgdboc(void)
39936 {
39937 /* Already configured? */
39938- if (configured == 1)
39939+ if (configured >= 1)
39940 return 0;
39941
39942 return configure_kgdboc();
39943@@ -272,7 +279,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
39944 if (config[len - 1] == '\n')
39945 config[len - 1] = '\0';
39946
39947- if (configured == 1)
39948+ if (configured >= 1)
39949 cleanup_kgdboc();
39950
39951 /* Go and configure with the new params. */
39952@@ -312,6 +319,15 @@ static struct kgdb_io kgdboc_io_ops = {
39953 .post_exception = kgdboc_post_exp_handler,
39954 };
39955
39956+static struct kgdb_io kgdboc_io_ops_console = {
39957+ .name = "kgdboc",
39958+ .read_char = kgdboc_get_char,
39959+ .write_char = kgdboc_put_char,
39960+ .pre_exception = kgdboc_pre_exp_handler,
39961+ .post_exception = kgdboc_post_exp_handler,
39962+ .is_console = 1
39963+};
39964+
39965 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
39966 /* This is only available if kgdboc is a built in for early debugging */
39967 static int __init kgdboc_early_init(char *opt)
39968diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
39969index 7f04717..0f3794f 100644
39970--- a/drivers/tty/serial/samsung.c
39971+++ b/drivers/tty/serial/samsung.c
39972@@ -445,11 +445,16 @@ static void s3c24xx_serial_shutdown(struct uart_port *port)
39973 }
39974 }
39975
39976+static int s3c64xx_serial_startup(struct uart_port *port);
39977 static int s3c24xx_serial_startup(struct uart_port *port)
39978 {
39979 struct s3c24xx_uart_port *ourport = to_ourport(port);
39980 int ret;
39981
39982+ /* Startup sequence is different for s3c64xx and higher SoC's */
39983+ if (s3c24xx_serial_has_interrupt_mask(port))
39984+ return s3c64xx_serial_startup(port);
39985+
39986 dbg("s3c24xx_serial_startup: port=%p (%08lx,%p)\n",
39987 port->mapbase, port->membase);
39988
39989@@ -1115,10 +1120,6 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
39990 /* setup info for port */
39991 port->dev = &platdev->dev;
39992
39993- /* Startup sequence is different for s3c64xx and higher SoC's */
39994- if (s3c24xx_serial_has_interrupt_mask(port))
39995- s3c24xx_serial_ops.startup = s3c64xx_serial_startup;
39996-
39997 port->uartclk = 1;
39998
39999 if (cfg->uart_flags & UPF_CONS_FLOW) {
40000diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
40001index 0fcfd98..8244fce 100644
40002--- a/drivers/tty/serial/serial_core.c
40003+++ b/drivers/tty/serial/serial_core.c
40004@@ -1408,7 +1408,7 @@ static void uart_hangup(struct tty_struct *tty)
40005 uart_flush_buffer(tty);
40006 uart_shutdown(tty, state);
40007 spin_lock_irqsave(&port->lock, flags);
40008- port->count = 0;
40009+ atomic_set(&port->count, 0);
40010 clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags);
40011 spin_unlock_irqrestore(&port->lock, flags);
40012 tty_port_tty_set(port, NULL);
40013@@ -1504,7 +1504,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
40014 goto end;
40015 }
40016
40017- port->count++;
40018+ atomic_inc(&port->count);
40019 if (!state->uart_port || state->uart_port->flags & UPF_DEAD) {
40020 retval = -ENXIO;
40021 goto err_dec_count;
40022@@ -1531,7 +1531,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
40023 /*
40024 * Make sure the device is in D0 state.
40025 */
40026- if (port->count == 1)
40027+ if (atomic_read(&port->count) == 1)
40028 uart_change_pm(state, 0);
40029
40030 /*
40031@@ -1549,7 +1549,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
40032 end:
40033 return retval;
40034 err_dec_count:
40035- port->count--;
40036+ atomic_inc(&port->count);
40037 mutex_unlock(&port->mutex);
40038 goto end;
40039 }
40040diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
40041index 70e3a52..5742052 100644
40042--- a/drivers/tty/synclink.c
40043+++ b/drivers/tty/synclink.c
40044@@ -3095,7 +3095,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
40045
40046 if (debug_level >= DEBUG_LEVEL_INFO)
40047 printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
40048- __FILE__,__LINE__, info->device_name, info->port.count);
40049+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
40050
40051 if (tty_port_close_start(&info->port, tty, filp) == 0)
40052 goto cleanup;
40053@@ -3113,7 +3113,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
40054 cleanup:
40055 if (debug_level >= DEBUG_LEVEL_INFO)
40056 printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
40057- tty->driver->name, info->port.count);
40058+ tty->driver->name, atomic_read(&info->port.count));
40059
40060 } /* end of mgsl_close() */
40061
40062@@ -3212,8 +3212,8 @@ static void mgsl_hangup(struct tty_struct *tty)
40063
40064 mgsl_flush_buffer(tty);
40065 shutdown(info);
40066-
40067- info->port.count = 0;
40068+
40069+ atomic_set(&info->port.count, 0);
40070 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
40071 info->port.tty = NULL;
40072
40073@@ -3302,12 +3302,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
40074
40075 if (debug_level >= DEBUG_LEVEL_INFO)
40076 printk("%s(%d):block_til_ready before block on %s count=%d\n",
40077- __FILE__,__LINE__, tty->driver->name, port->count );
40078+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
40079
40080 spin_lock_irqsave(&info->irq_spinlock, flags);
40081 if (!tty_hung_up_p(filp)) {
40082 extra_count = true;
40083- port->count--;
40084+ atomic_dec(&port->count);
40085 }
40086 spin_unlock_irqrestore(&info->irq_spinlock, flags);
40087 port->blocked_open++;
40088@@ -3336,7 +3336,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
40089
40090 if (debug_level >= DEBUG_LEVEL_INFO)
40091 printk("%s(%d):block_til_ready blocking on %s count=%d\n",
40092- __FILE__,__LINE__, tty->driver->name, port->count );
40093+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
40094
40095 tty_unlock(tty);
40096 schedule();
40097@@ -3348,12 +3348,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
40098
40099 /* FIXME: Racy on hangup during close wait */
40100 if (extra_count)
40101- port->count++;
40102+ atomic_inc(&port->count);
40103 port->blocked_open--;
40104
40105 if (debug_level >= DEBUG_LEVEL_INFO)
40106 printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
40107- __FILE__,__LINE__, tty->driver->name, port->count );
40108+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
40109
40110 if (!retval)
40111 port->flags |= ASYNC_NORMAL_ACTIVE;
40112@@ -3405,7 +3405,7 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
40113
40114 if (debug_level >= DEBUG_LEVEL_INFO)
40115 printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
40116- __FILE__,__LINE__,tty->driver->name, info->port.count);
40117+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
40118
40119 /* If port is closing, signal caller to try again */
40120 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
40121@@ -3424,10 +3424,10 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
40122 spin_unlock_irqrestore(&info->netlock, flags);
40123 goto cleanup;
40124 }
40125- info->port.count++;
40126+ atomic_inc(&info->port.count);
40127 spin_unlock_irqrestore(&info->netlock, flags);
40128
40129- if (info->port.count == 1) {
40130+ if (atomic_read(&info->port.count) == 1) {
40131 /* 1st open on this device, init hardware */
40132 retval = startup(info);
40133 if (retval < 0)
40134@@ -3451,8 +3451,8 @@ cleanup:
40135 if (retval) {
40136 if (tty->count == 1)
40137 info->port.tty = NULL; /* tty layer will release tty struct */
40138- if(info->port.count)
40139- info->port.count--;
40140+ if (atomic_read(&info->port.count))
40141+ atomic_dec(&info->port.count);
40142 }
40143
40144 return retval;
40145@@ -7661,7 +7661,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
40146 unsigned short new_crctype;
40147
40148 /* return error if TTY interface open */
40149- if (info->port.count)
40150+ if (atomic_read(&info->port.count))
40151 return -EBUSY;
40152
40153 switch (encoding)
40154@@ -7756,7 +7756,7 @@ static int hdlcdev_open(struct net_device *dev)
40155
40156 /* arbitrate between network and tty opens */
40157 spin_lock_irqsave(&info->netlock, flags);
40158- if (info->port.count != 0 || info->netcount != 0) {
40159+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
40160 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
40161 spin_unlock_irqrestore(&info->netlock, flags);
40162 return -EBUSY;
40163@@ -7842,7 +7842,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
40164 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
40165
40166 /* return error if TTY interface open */
40167- if (info->port.count)
40168+ if (atomic_read(&info->port.count))
40169 return -EBUSY;
40170
40171 if (cmd != SIOCWANDEV)
40172diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
40173index b38e954..ce45b38 100644
40174--- a/drivers/tty/synclink_gt.c
40175+++ b/drivers/tty/synclink_gt.c
40176@@ -671,7 +671,7 @@ static int open(struct tty_struct *tty, struct file *filp)
40177 tty->driver_data = info;
40178 info->port.tty = tty;
40179
40180- DBGINFO(("%s open, old ref count = %d\n", info->device_name, info->port.count));
40181+ DBGINFO(("%s open, old ref count = %d\n", info->device_name, atomic_read(&info->port.count)));
40182
40183 /* If port is closing, signal caller to try again */
40184 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
40185@@ -692,10 +692,10 @@ static int open(struct tty_struct *tty, struct file *filp)
40186 mutex_unlock(&info->port.mutex);
40187 goto cleanup;
40188 }
40189- info->port.count++;
40190+ atomic_inc(&info->port.count);
40191 spin_unlock_irqrestore(&info->netlock, flags);
40192
40193- if (info->port.count == 1) {
40194+ if (atomic_read(&info->port.count) == 1) {
40195 /* 1st open on this device, init hardware */
40196 retval = startup(info);
40197 if (retval < 0) {
40198@@ -716,8 +716,8 @@ cleanup:
40199 if (retval) {
40200 if (tty->count == 1)
40201 info->port.tty = NULL; /* tty layer will release tty struct */
40202- if(info->port.count)
40203- info->port.count--;
40204+ if(atomic_read(&info->port.count))
40205+ atomic_dec(&info->port.count);
40206 }
40207
40208 DBGINFO(("%s open rc=%d\n", info->device_name, retval));
40209@@ -730,7 +730,7 @@ static void close(struct tty_struct *tty, struct file *filp)
40210
40211 if (sanity_check(info, tty->name, "close"))
40212 return;
40213- DBGINFO(("%s close entry, count=%d\n", info->device_name, info->port.count));
40214+ DBGINFO(("%s close entry, count=%d\n", info->device_name, atomic_read(&info->port.count)));
40215
40216 if (tty_port_close_start(&info->port, tty, filp) == 0)
40217 goto cleanup;
40218@@ -747,7 +747,7 @@ static void close(struct tty_struct *tty, struct file *filp)
40219 tty_port_close_end(&info->port, tty);
40220 info->port.tty = NULL;
40221 cleanup:
40222- DBGINFO(("%s close exit, count=%d\n", tty->driver->name, info->port.count));
40223+ DBGINFO(("%s close exit, count=%d\n", tty->driver->name, atomic_read(&info->port.count)));
40224 }
40225
40226 static void hangup(struct tty_struct *tty)
40227@@ -765,7 +765,7 @@ static void hangup(struct tty_struct *tty)
40228 shutdown(info);
40229
40230 spin_lock_irqsave(&info->port.lock, flags);
40231- info->port.count = 0;
40232+ atomic_set(&info->port.count, 0);
40233 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
40234 info->port.tty = NULL;
40235 spin_unlock_irqrestore(&info->port.lock, flags);
40236@@ -1450,7 +1450,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
40237 unsigned short new_crctype;
40238
40239 /* return error if TTY interface open */
40240- if (info->port.count)
40241+ if (atomic_read(&info->port.count))
40242 return -EBUSY;
40243
40244 DBGINFO(("%s hdlcdev_attach\n", info->device_name));
40245@@ -1545,7 +1545,7 @@ static int hdlcdev_open(struct net_device *dev)
40246
40247 /* arbitrate between network and tty opens */
40248 spin_lock_irqsave(&info->netlock, flags);
40249- if (info->port.count != 0 || info->netcount != 0) {
40250+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
40251 DBGINFO(("%s hdlc_open busy\n", dev->name));
40252 spin_unlock_irqrestore(&info->netlock, flags);
40253 return -EBUSY;
40254@@ -1630,7 +1630,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
40255 DBGINFO(("%s hdlcdev_ioctl\n", dev->name));
40256
40257 /* return error if TTY interface open */
40258- if (info->port.count)
40259+ if (atomic_read(&info->port.count))
40260 return -EBUSY;
40261
40262 if (cmd != SIOCWANDEV)
40263@@ -2419,7 +2419,7 @@ static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
40264 if (port == NULL)
40265 continue;
40266 spin_lock(&port->lock);
40267- if ((port->port.count || port->netcount) &&
40268+ if ((atomic_read(&port->port.count) || port->netcount) &&
40269 port->pending_bh && !port->bh_running &&
40270 !port->bh_requested) {
40271 DBGISR(("%s bh queued\n", port->device_name));
40272@@ -3308,7 +3308,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
40273 spin_lock_irqsave(&info->lock, flags);
40274 if (!tty_hung_up_p(filp)) {
40275 extra_count = true;
40276- port->count--;
40277+ atomic_dec(&port->count);
40278 }
40279 spin_unlock_irqrestore(&info->lock, flags);
40280 port->blocked_open++;
40281@@ -3345,7 +3345,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
40282 remove_wait_queue(&port->open_wait, &wait);
40283
40284 if (extra_count)
40285- port->count++;
40286+ atomic_inc(&port->count);
40287 port->blocked_open--;
40288
40289 if (!retval)
40290diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
40291index f17d9f3..27a041b 100644
40292--- a/drivers/tty/synclinkmp.c
40293+++ b/drivers/tty/synclinkmp.c
40294@@ -751,7 +751,7 @@ static int open(struct tty_struct *tty, struct file *filp)
40295
40296 if (debug_level >= DEBUG_LEVEL_INFO)
40297 printk("%s(%d):%s open(), old ref count = %d\n",
40298- __FILE__,__LINE__,tty->driver->name, info->port.count);
40299+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
40300
40301 /* If port is closing, signal caller to try again */
40302 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
40303@@ -770,10 +770,10 @@ static int open(struct tty_struct *tty, struct file *filp)
40304 spin_unlock_irqrestore(&info->netlock, flags);
40305 goto cleanup;
40306 }
40307- info->port.count++;
40308+ atomic_inc(&info->port.count);
40309 spin_unlock_irqrestore(&info->netlock, flags);
40310
40311- if (info->port.count == 1) {
40312+ if (atomic_read(&info->port.count) == 1) {
40313 /* 1st open on this device, init hardware */
40314 retval = startup(info);
40315 if (retval < 0)
40316@@ -797,8 +797,8 @@ cleanup:
40317 if (retval) {
40318 if (tty->count == 1)
40319 info->port.tty = NULL; /* tty layer will release tty struct */
40320- if(info->port.count)
40321- info->port.count--;
40322+ if(atomic_read(&info->port.count))
40323+ atomic_dec(&info->port.count);
40324 }
40325
40326 return retval;
40327@@ -816,7 +816,7 @@ static void close(struct tty_struct *tty, struct file *filp)
40328
40329 if (debug_level >= DEBUG_LEVEL_INFO)
40330 printk("%s(%d):%s close() entry, count=%d\n",
40331- __FILE__,__LINE__, info->device_name, info->port.count);
40332+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
40333
40334 if (tty_port_close_start(&info->port, tty, filp) == 0)
40335 goto cleanup;
40336@@ -835,7 +835,7 @@ static void close(struct tty_struct *tty, struct file *filp)
40337 cleanup:
40338 if (debug_level >= DEBUG_LEVEL_INFO)
40339 printk("%s(%d):%s close() exit, count=%d\n", __FILE__,__LINE__,
40340- tty->driver->name, info->port.count);
40341+ tty->driver->name, atomic_read(&info->port.count));
40342 }
40343
40344 /* Called by tty_hangup() when a hangup is signaled.
40345@@ -858,7 +858,7 @@ static void hangup(struct tty_struct *tty)
40346 shutdown(info);
40347
40348 spin_lock_irqsave(&info->port.lock, flags);
40349- info->port.count = 0;
40350+ atomic_set(&info->port.count, 0);
40351 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
40352 info->port.tty = NULL;
40353 spin_unlock_irqrestore(&info->port.lock, flags);
40354@@ -1566,7 +1566,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
40355 unsigned short new_crctype;
40356
40357 /* return error if TTY interface open */
40358- if (info->port.count)
40359+ if (atomic_read(&info->port.count))
40360 return -EBUSY;
40361
40362 switch (encoding)
40363@@ -1661,7 +1661,7 @@ static int hdlcdev_open(struct net_device *dev)
40364
40365 /* arbitrate between network and tty opens */
40366 spin_lock_irqsave(&info->netlock, flags);
40367- if (info->port.count != 0 || info->netcount != 0) {
40368+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
40369 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
40370 spin_unlock_irqrestore(&info->netlock, flags);
40371 return -EBUSY;
40372@@ -1747,7 +1747,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
40373 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
40374
40375 /* return error if TTY interface open */
40376- if (info->port.count)
40377+ if (atomic_read(&info->port.count))
40378 return -EBUSY;
40379
40380 if (cmd != SIOCWANDEV)
40381@@ -2632,7 +2632,7 @@ static irqreturn_t synclinkmp_interrupt(int dummy, void *dev_id)
40382 * do not request bottom half processing if the
40383 * device is not open in a normal mode.
40384 */
40385- if ( port && (port->port.count || port->netcount) &&
40386+ if ( port && (atomic_read(&port->port.count) || port->netcount) &&
40387 port->pending_bh && !port->bh_running &&
40388 !port->bh_requested ) {
40389 if ( debug_level >= DEBUG_LEVEL_ISR )
40390@@ -3330,12 +3330,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
40391
40392 if (debug_level >= DEBUG_LEVEL_INFO)
40393 printk("%s(%d):%s block_til_ready() before block, count=%d\n",
40394- __FILE__,__LINE__, tty->driver->name, port->count );
40395+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
40396
40397 spin_lock_irqsave(&info->lock, flags);
40398 if (!tty_hung_up_p(filp)) {
40399 extra_count = true;
40400- port->count--;
40401+ atomic_dec(&port->count);
40402 }
40403 spin_unlock_irqrestore(&info->lock, flags);
40404 port->blocked_open++;
40405@@ -3364,7 +3364,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
40406
40407 if (debug_level >= DEBUG_LEVEL_INFO)
40408 printk("%s(%d):%s block_til_ready() count=%d\n",
40409- __FILE__,__LINE__, tty->driver->name, port->count );
40410+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
40411
40412 tty_unlock(tty);
40413 schedule();
40414@@ -3375,12 +3375,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
40415 remove_wait_queue(&port->open_wait, &wait);
40416
40417 if (extra_count)
40418- port->count++;
40419+ atomic_inc(&port->count);
40420 port->blocked_open--;
40421
40422 if (debug_level >= DEBUG_LEVEL_INFO)
40423 printk("%s(%d):%s block_til_ready() after, count=%d\n",
40424- __FILE__,__LINE__, tty->driver->name, port->count );
40425+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
40426
40427 if (!retval)
40428 port->flags |= ASYNC_NORMAL_ACTIVE;
40429diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
40430index 16ee6ce..bfcac57 100644
40431--- a/drivers/tty/sysrq.c
40432+++ b/drivers/tty/sysrq.c
40433@@ -866,7 +866,7 @@ EXPORT_SYMBOL(unregister_sysrq_key);
40434 static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
40435 size_t count, loff_t *ppos)
40436 {
40437- if (count) {
40438+ if (count && capable(CAP_SYS_ADMIN)) {
40439 char c;
40440
40441 if (get_user(c, buf))
40442diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
40443index 2ea176b..2877bc8 100644
40444--- a/drivers/tty/tty_io.c
40445+++ b/drivers/tty/tty_io.c
40446@@ -3395,7 +3395,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
40447
40448 void tty_default_fops(struct file_operations *fops)
40449 {
40450- *fops = tty_fops;
40451+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
40452 }
40453
40454 /*
40455diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
40456index 0f2a2c5..471e228 100644
40457--- a/drivers/tty/tty_ldisc.c
40458+++ b/drivers/tty/tty_ldisc.c
40459@@ -56,7 +56,7 @@ static void put_ldisc(struct tty_ldisc *ld)
40460 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
40461 struct tty_ldisc_ops *ldo = ld->ops;
40462
40463- ldo->refcount--;
40464+ atomic_dec(&ldo->refcount);
40465 module_put(ldo->owner);
40466 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
40467
40468@@ -91,7 +91,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
40469 spin_lock_irqsave(&tty_ldisc_lock, flags);
40470 tty_ldiscs[disc] = new_ldisc;
40471 new_ldisc->num = disc;
40472- new_ldisc->refcount = 0;
40473+ atomic_set(&new_ldisc->refcount, 0);
40474 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
40475
40476 return ret;
40477@@ -119,7 +119,7 @@ int tty_unregister_ldisc(int disc)
40478 return -EINVAL;
40479
40480 spin_lock_irqsave(&tty_ldisc_lock, flags);
40481- if (tty_ldiscs[disc]->refcount)
40482+ if (atomic_read(&tty_ldiscs[disc]->refcount))
40483 ret = -EBUSY;
40484 else
40485 tty_ldiscs[disc] = NULL;
40486@@ -140,7 +140,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
40487 if (ldops) {
40488 ret = ERR_PTR(-EAGAIN);
40489 if (try_module_get(ldops->owner)) {
40490- ldops->refcount++;
40491+ atomic_inc(&ldops->refcount);
40492 ret = ldops;
40493 }
40494 }
40495@@ -153,7 +153,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
40496 unsigned long flags;
40497
40498 spin_lock_irqsave(&tty_ldisc_lock, flags);
40499- ldops->refcount--;
40500+ atomic_dec(&ldops->refcount);
40501 module_put(ldops->owner);
40502 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
40503 }
40504diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
40505index d7bdd8d..feaef30 100644
40506--- a/drivers/tty/tty_port.c
40507+++ b/drivers/tty/tty_port.c
40508@@ -202,7 +202,7 @@ void tty_port_hangup(struct tty_port *port)
40509 unsigned long flags;
40510
40511 spin_lock_irqsave(&port->lock, flags);
40512- port->count = 0;
40513+ atomic_set(&port->count, 0);
40514 port->flags &= ~ASYNC_NORMAL_ACTIVE;
40515 if (port->tty) {
40516 set_bit(TTY_IO_ERROR, &port->tty->flags);
40517@@ -328,7 +328,7 @@ int tty_port_block_til_ready(struct tty_port *port,
40518 /* The port lock protects the port counts */
40519 spin_lock_irqsave(&port->lock, flags);
40520 if (!tty_hung_up_p(filp))
40521- port->count--;
40522+ atomic_dec(&port->count);
40523 port->blocked_open++;
40524 spin_unlock_irqrestore(&port->lock, flags);
40525
40526@@ -370,7 +370,7 @@ int tty_port_block_til_ready(struct tty_port *port,
40527 we must not mess that up further */
40528 spin_lock_irqsave(&port->lock, flags);
40529 if (!tty_hung_up_p(filp))
40530- port->count++;
40531+ atomic_inc(&port->count);
40532 port->blocked_open--;
40533 if (retval == 0)
40534 port->flags |= ASYNC_NORMAL_ACTIVE;
40535@@ -390,19 +390,19 @@ int tty_port_close_start(struct tty_port *port,
40536 return 0;
40537 }
40538
40539- if (tty->count == 1 && port->count != 1) {
40540+ if (tty->count == 1 && atomic_read(&port->count) != 1) {
40541 printk(KERN_WARNING
40542 "tty_port_close_start: tty->count = 1 port count = %d.\n",
40543- port->count);
40544- port->count = 1;
40545+ atomic_read(&port->count));
40546+ atomic_set(&port->count, 1);
40547 }
40548- if (--port->count < 0) {
40549+ if (atomic_dec_return(&port->count) < 0) {
40550 printk(KERN_WARNING "tty_port_close_start: count = %d\n",
40551- port->count);
40552- port->count = 0;
40553+ atomic_read(&port->count));
40554+ atomic_set(&port->count, 0);
40555 }
40556
40557- if (port->count) {
40558+ if (atomic_read(&port->count)) {
40559 spin_unlock_irqrestore(&port->lock, flags);
40560 if (port->ops->drop)
40561 port->ops->drop(port);
40562@@ -500,7 +500,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty,
40563 {
40564 spin_lock_irq(&port->lock);
40565 if (!tty_hung_up_p(filp))
40566- ++port->count;
40567+ atomic_inc(&port->count);
40568 spin_unlock_irq(&port->lock);
40569 tty_port_tty_set(port, tty);
40570
40571diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
40572index 681765b..d3ccdf2 100644
40573--- a/drivers/tty/vt/keyboard.c
40574+++ b/drivers/tty/vt/keyboard.c
40575@@ -660,6 +660,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
40576 kbd->kbdmode == VC_OFF) &&
40577 value != KVAL(K_SAK))
40578 return; /* SAK is allowed even in raw mode */
40579+
40580+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
40581+ {
40582+ void *func = fn_handler[value];
40583+ if (func == fn_show_state || func == fn_show_ptregs ||
40584+ func == fn_show_mem)
40585+ return;
40586+ }
40587+#endif
40588+
40589 fn_handler[value](vc);
40590 }
40591
40592@@ -1808,9 +1818,6 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
40593 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
40594 return -EFAULT;
40595
40596- if (!capable(CAP_SYS_TTY_CONFIG))
40597- perm = 0;
40598-
40599 switch (cmd) {
40600 case KDGKBENT:
40601 /* Ensure another thread doesn't free it under us */
40602@@ -1825,6 +1832,9 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
40603 spin_unlock_irqrestore(&kbd_event_lock, flags);
40604 return put_user(val, &user_kbe->kb_value);
40605 case KDSKBENT:
40606+ if (!capable(CAP_SYS_TTY_CONFIG))
40607+ perm = 0;
40608+
40609 if (!perm)
40610 return -EPERM;
40611 if (!i && v == K_NOSUCHMAP) {
40612@@ -1915,9 +1925,6 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
40613 int i, j, k;
40614 int ret;
40615
40616- if (!capable(CAP_SYS_TTY_CONFIG))
40617- perm = 0;
40618-
40619 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
40620 if (!kbs) {
40621 ret = -ENOMEM;
40622@@ -1951,6 +1958,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
40623 kfree(kbs);
40624 return ((p && *p) ? -EOVERFLOW : 0);
40625 case KDSKBSENT:
40626+ if (!capable(CAP_SYS_TTY_CONFIG))
40627+ perm = 0;
40628+
40629 if (!perm) {
40630 ret = -EPERM;
40631 goto reterr;
40632diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
40633index 5110f36..8dc0a74 100644
40634--- a/drivers/uio/uio.c
40635+++ b/drivers/uio/uio.c
40636@@ -25,6 +25,7 @@
40637 #include <linux/kobject.h>
40638 #include <linux/cdev.h>
40639 #include <linux/uio_driver.h>
40640+#include <asm/local.h>
40641
40642 #define UIO_MAX_DEVICES (1U << MINORBITS)
40643
40644@@ -32,10 +33,10 @@ struct uio_device {
40645 struct module *owner;
40646 struct device *dev;
40647 int minor;
40648- atomic_t event;
40649+ atomic_unchecked_t event;
40650 struct fasync_struct *async_queue;
40651 wait_queue_head_t wait;
40652- int vma_count;
40653+ local_t vma_count;
40654 struct uio_info *info;
40655 struct kobject *map_dir;
40656 struct kobject *portio_dir;
40657@@ -242,7 +243,7 @@ static ssize_t show_event(struct device *dev,
40658 struct device_attribute *attr, char *buf)
40659 {
40660 struct uio_device *idev = dev_get_drvdata(dev);
40661- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
40662+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
40663 }
40664
40665 static struct device_attribute uio_class_attributes[] = {
40666@@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *info)
40667 {
40668 struct uio_device *idev = info->uio_dev;
40669
40670- atomic_inc(&idev->event);
40671+ atomic_inc_unchecked(&idev->event);
40672 wake_up_interruptible(&idev->wait);
40673 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
40674 }
40675@@ -461,7 +462,7 @@ static int uio_open(struct inode *inode, struct file *filep)
40676 }
40677
40678 listener->dev = idev;
40679- listener->event_count = atomic_read(&idev->event);
40680+ listener->event_count = atomic_read_unchecked(&idev->event);
40681 filep->private_data = listener;
40682
40683 if (idev->info->open) {
40684@@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
40685 return -EIO;
40686
40687 poll_wait(filep, &idev->wait, wait);
40688- if (listener->event_count != atomic_read(&idev->event))
40689+ if (listener->event_count != atomic_read_unchecked(&idev->event))
40690 return POLLIN | POLLRDNORM;
40691 return 0;
40692 }
40693@@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
40694 do {
40695 set_current_state(TASK_INTERRUPTIBLE);
40696
40697- event_count = atomic_read(&idev->event);
40698+ event_count = atomic_read_unchecked(&idev->event);
40699 if (event_count != listener->event_count) {
40700 if (copy_to_user(buf, &event_count, count))
40701 retval = -EFAULT;
40702@@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
40703 static void uio_vma_open(struct vm_area_struct *vma)
40704 {
40705 struct uio_device *idev = vma->vm_private_data;
40706- idev->vma_count++;
40707+ local_inc(&idev->vma_count);
40708 }
40709
40710 static void uio_vma_close(struct vm_area_struct *vma)
40711 {
40712 struct uio_device *idev = vma->vm_private_data;
40713- idev->vma_count--;
40714+ local_dec(&idev->vma_count);
40715 }
40716
40717 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
40718@@ -819,7 +820,7 @@ int __uio_register_device(struct module *owner,
40719 idev->owner = owner;
40720 idev->info = info;
40721 init_waitqueue_head(&idev->wait);
40722- atomic_set(&idev->event, 0);
40723+ atomic_set_unchecked(&idev->event, 0);
40724
40725 ret = uio_get_minor(idev);
40726 if (ret)
40727diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
40728index b7eb86a..36d28af 100644
40729--- a/drivers/usb/atm/cxacru.c
40730+++ b/drivers/usb/atm/cxacru.c
40731@@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
40732 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
40733 if (ret < 2)
40734 return -EINVAL;
40735- if (index < 0 || index > 0x7f)
40736+ if (index > 0x7f)
40737 return -EINVAL;
40738 pos += tmp;
40739
40740diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
40741index 35f10bf..6a38a0b 100644
40742--- a/drivers/usb/atm/usbatm.c
40743+++ b/drivers/usb/atm/usbatm.c
40744@@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
40745 if (printk_ratelimit())
40746 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
40747 __func__, vpi, vci);
40748- atomic_inc(&vcc->stats->rx_err);
40749+ atomic_inc_unchecked(&vcc->stats->rx_err);
40750 return;
40751 }
40752
40753@@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
40754 if (length > ATM_MAX_AAL5_PDU) {
40755 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
40756 __func__, length, vcc);
40757- atomic_inc(&vcc->stats->rx_err);
40758+ atomic_inc_unchecked(&vcc->stats->rx_err);
40759 goto out;
40760 }
40761
40762@@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
40763 if (sarb->len < pdu_length) {
40764 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
40765 __func__, pdu_length, sarb->len, vcc);
40766- atomic_inc(&vcc->stats->rx_err);
40767+ atomic_inc_unchecked(&vcc->stats->rx_err);
40768 goto out;
40769 }
40770
40771 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
40772 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
40773 __func__, vcc);
40774- atomic_inc(&vcc->stats->rx_err);
40775+ atomic_inc_unchecked(&vcc->stats->rx_err);
40776 goto out;
40777 }
40778
40779@@ -389,7 +389,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
40780 if (printk_ratelimit())
40781 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
40782 __func__, length);
40783- atomic_inc(&vcc->stats->rx_drop);
40784+ atomic_inc_unchecked(&vcc->stats->rx_drop);
40785 goto out;
40786 }
40787
40788@@ -417,7 +417,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
40789
40790 vcc->push(vcc, skb);
40791
40792- atomic_inc(&vcc->stats->rx);
40793+ atomic_inc_unchecked(&vcc->stats->rx);
40794 out:
40795 skb_trim(sarb, 0);
40796 }
40797@@ -623,7 +623,7 @@ static void usbatm_tx_process(unsigned long data)
40798 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
40799
40800 usbatm_pop(vcc, skb);
40801- atomic_inc(&vcc->stats->tx);
40802+ atomic_inc_unchecked(&vcc->stats->tx);
40803
40804 skb = skb_dequeue(&instance->sndqueue);
40805 }
40806@@ -779,11 +779,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
40807 if (!left--)
40808 return sprintf(page,
40809 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
40810- atomic_read(&atm_dev->stats.aal5.tx),
40811- atomic_read(&atm_dev->stats.aal5.tx_err),
40812- atomic_read(&atm_dev->stats.aal5.rx),
40813- atomic_read(&atm_dev->stats.aal5.rx_err),
40814- atomic_read(&atm_dev->stats.aal5.rx_drop));
40815+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
40816+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
40817+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
40818+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
40819+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
40820
40821 if (!left--) {
40822 if (instance->disconnected)
40823diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
40824index f460de3..95ba1f6 100644
40825--- a/drivers/usb/core/devices.c
40826+++ b/drivers/usb/core/devices.c
40827@@ -126,7 +126,7 @@ static const char format_endpt[] =
40828 * time it gets called.
40829 */
40830 static struct device_connect_event {
40831- atomic_t count;
40832+ atomic_unchecked_t count;
40833 wait_queue_head_t wait;
40834 } device_event = {
40835 .count = ATOMIC_INIT(1),
40836@@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
40837
40838 void usbfs_conn_disc_event(void)
40839 {
40840- atomic_add(2, &device_event.count);
40841+ atomic_add_unchecked(2, &device_event.count);
40842 wake_up(&device_event.wait);
40843 }
40844
40845@@ -647,7 +647,7 @@ static unsigned int usb_device_poll(struct file *file,
40846
40847 poll_wait(file, &device_event.wait, wait);
40848
40849- event_count = atomic_read(&device_event.count);
40850+ event_count = atomic_read_unchecked(&device_event.count);
40851 if (file->f_version != event_count) {
40852 file->f_version = event_count;
40853 return POLLIN | POLLRDNORM;
40854diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
40855index f034716..aed0368 100644
40856--- a/drivers/usb/core/hcd.c
40857+++ b/drivers/usb/core/hcd.c
40858@@ -1478,7 +1478,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
40859 */
40860 usb_get_urb(urb);
40861 atomic_inc(&urb->use_count);
40862- atomic_inc(&urb->dev->urbnum);
40863+ atomic_inc_unchecked(&urb->dev->urbnum);
40864 usbmon_urb_submit(&hcd->self, urb);
40865
40866 /* NOTE requirements on root-hub callers (usbfs and the hub
40867@@ -1505,7 +1505,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
40868 urb->hcpriv = NULL;
40869 INIT_LIST_HEAD(&urb->urb_list);
40870 atomic_dec(&urb->use_count);
40871- atomic_dec(&urb->dev->urbnum);
40872+ atomic_dec_unchecked(&urb->dev->urbnum);
40873 if (atomic_read(&urb->reject))
40874 wake_up(&usb_kill_urb_queue);
40875 usb_put_urb(urb);
40876diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
40877index 818e4a0..0fc9589 100644
40878--- a/drivers/usb/core/sysfs.c
40879+++ b/drivers/usb/core/sysfs.c
40880@@ -226,7 +226,7 @@ show_urbnum(struct device *dev, struct device_attribute *attr, char *buf)
40881 struct usb_device *udev;
40882
40883 udev = to_usb_device(dev);
40884- return sprintf(buf, "%d\n", atomic_read(&udev->urbnum));
40885+ return sprintf(buf, "%d\n", atomic_read_unchecked(&udev->urbnum));
40886 }
40887 static DEVICE_ATTR(urbnum, S_IRUGO, show_urbnum, NULL);
40888
40889diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
40890index cd8fb44..17fbe0c 100644
40891--- a/drivers/usb/core/usb.c
40892+++ b/drivers/usb/core/usb.c
40893@@ -397,7 +397,7 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
40894 set_dev_node(&dev->dev, dev_to_node(bus->controller));
40895 dev->state = USB_STATE_ATTACHED;
40896 dev->lpm_disable_count = 1;
40897- atomic_set(&dev->urbnum, 0);
40898+ atomic_set_unchecked(&dev->urbnum, 0);
40899
40900 INIT_LIST_HEAD(&dev->ep0.urb_list);
40901 dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE;
40902diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
40903index 4bfa78a..902bfbd 100644
40904--- a/drivers/usb/early/ehci-dbgp.c
40905+++ b/drivers/usb/early/ehci-dbgp.c
40906@@ -98,7 +98,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
40907
40908 #ifdef CONFIG_KGDB
40909 static struct kgdb_io kgdbdbgp_io_ops;
40910-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
40911+static struct kgdb_io kgdbdbgp_io_ops_console;
40912+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
40913 #else
40914 #define dbgp_kgdb_mode (0)
40915 #endif
40916@@ -1047,6 +1048,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
40917 .write_char = kgdbdbgp_write_char,
40918 };
40919
40920+static struct kgdb_io kgdbdbgp_io_ops_console = {
40921+ .name = "kgdbdbgp",
40922+ .read_char = kgdbdbgp_read_char,
40923+ .write_char = kgdbdbgp_write_char,
40924+ .is_console = 1
40925+};
40926+
40927 static int kgdbdbgp_wait_time;
40928
40929 static int __init kgdbdbgp_parse_config(char *str)
40930@@ -1062,8 +1070,10 @@ static int __init kgdbdbgp_parse_config(char *str)
40931 ptr++;
40932 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
40933 }
40934- kgdb_register_io_module(&kgdbdbgp_io_ops);
40935- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
40936+ if (early_dbgp_console.index != -1)
40937+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
40938+ else
40939+ kgdb_register_io_module(&kgdbdbgp_io_ops);
40940
40941 return 0;
40942 }
40943diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c
40944index f173952..83d6ec0 100644
40945--- a/drivers/usb/gadget/u_serial.c
40946+++ b/drivers/usb/gadget/u_serial.c
40947@@ -735,9 +735,9 @@ static int gs_open(struct tty_struct *tty, struct file *file)
40948 spin_lock_irq(&port->port_lock);
40949
40950 /* already open? Great. */
40951- if (port->port.count) {
40952+ if (atomic_read(&port->port.count)) {
40953 status = 0;
40954- port->port.count++;
40955+ atomic_inc(&port->port.count);
40956
40957 /* currently opening/closing? wait ... */
40958 } else if (port->openclose) {
40959@@ -796,7 +796,7 @@ static int gs_open(struct tty_struct *tty, struct file *file)
40960 tty->driver_data = port;
40961 port->port.tty = tty;
40962
40963- port->port.count = 1;
40964+ atomic_set(&port->port.count, 1);
40965 port->openclose = false;
40966
40967 /* if connected, start the I/O stream */
40968@@ -838,11 +838,11 @@ static void gs_close(struct tty_struct *tty, struct file *file)
40969
40970 spin_lock_irq(&port->port_lock);
40971
40972- if (port->port.count != 1) {
40973- if (port->port.count == 0)
40974+ if (atomic_read(&port->port.count) != 1) {
40975+ if (atomic_read(&port->port.count) == 0)
40976 WARN_ON(1);
40977 else
40978- --port->port.count;
40979+ atomic_dec(&port->port.count);
40980 goto exit;
40981 }
40982
40983@@ -852,7 +852,7 @@ static void gs_close(struct tty_struct *tty, struct file *file)
40984 * and sleep if necessary
40985 */
40986 port->openclose = true;
40987- port->port.count = 0;
40988+ atomic_set(&port->port.count, 0);
40989
40990 gser = port->port_usb;
40991 if (gser && gser->disconnect)
40992@@ -1157,7 +1157,7 @@ static int gs_closed(struct gs_port *port)
40993 int cond;
40994
40995 spin_lock_irq(&port->port_lock);
40996- cond = (port->port.count == 0) && !port->openclose;
40997+ cond = (atomic_read(&port->port.count) == 0) && !port->openclose;
40998 spin_unlock_irq(&port->port_lock);
40999 return cond;
41000 }
41001@@ -1270,7 +1270,7 @@ int gserial_connect(struct gserial *gser, u8 port_num)
41002 /* if it's already open, start I/O ... and notify the serial
41003 * protocol about open/close status (connect/disconnect).
41004 */
41005- if (port->port.count) {
41006+ if (atomic_read(&port->port.count)) {
41007 pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
41008 gs_start_io(port);
41009 if (gser->connect)
41010@@ -1317,7 +1317,7 @@ void gserial_disconnect(struct gserial *gser)
41011
41012 port->port_usb = NULL;
41013 gser->ioport = NULL;
41014- if (port->port.count > 0 || port->openclose) {
41015+ if (atomic_read(&port->port.count) > 0 || port->openclose) {
41016 wake_up_interruptible(&port->drain_wait);
41017 if (port->port.tty)
41018 tty_hangup(port->port.tty);
41019@@ -1333,7 +1333,7 @@ void gserial_disconnect(struct gserial *gser)
41020
41021 /* finally, free any unused/unusable I/O buffers */
41022 spin_lock_irqsave(&port->port_lock, flags);
41023- if (port->port.count == 0 && !port->openclose)
41024+ if (atomic_read(&port->port.count) == 0 && !port->openclose)
41025 gs_buf_free(&port->port_write_buf);
41026 gs_free_requests(gser->out, &port->read_pool, NULL);
41027 gs_free_requests(gser->out, &port->read_queue, NULL);
41028diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
41029index 5f3bcd3..bfca43f 100644
41030--- a/drivers/usb/serial/console.c
41031+++ b/drivers/usb/serial/console.c
41032@@ -124,7 +124,7 @@ static int usb_console_setup(struct console *co, char *options)
41033
41034 info->port = port;
41035
41036- ++port->port.count;
41037+ atomic_inc(&port->port.count);
41038 if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags)) {
41039 if (serial->type->set_termios) {
41040 /*
41041@@ -174,7 +174,7 @@ static int usb_console_setup(struct console *co, char *options)
41042 }
41043 /* Now that any required fake tty operations are completed restore
41044 * the tty port count */
41045- --port->port.count;
41046+ atomic_dec(&port->port.count);
41047 /* The console is special in terms of closing the device so
41048 * indicate this port is now acting as a system console. */
41049 port->port.console = 1;
41050@@ -187,7 +187,7 @@ static int usb_console_setup(struct console *co, char *options)
41051 free_tty:
41052 kfree(tty);
41053 reset_open_count:
41054- port->port.count = 0;
41055+ atomic_set(&port->port.count, 0);
41056 usb_autopm_put_interface(serial->interface);
41057 error_get_interface:
41058 usb_serial_put(serial);
41059diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
41060index d6bea3e..60b250e 100644
41061--- a/drivers/usb/wusbcore/wa-hc.h
41062+++ b/drivers/usb/wusbcore/wa-hc.h
41063@@ -192,7 +192,7 @@ struct wahc {
41064 struct list_head xfer_delayed_list;
41065 spinlock_t xfer_list_lock;
41066 struct work_struct xfer_work;
41067- atomic_t xfer_id_count;
41068+ atomic_unchecked_t xfer_id_count;
41069 };
41070
41071
41072@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
41073 INIT_LIST_HEAD(&wa->xfer_delayed_list);
41074 spin_lock_init(&wa->xfer_list_lock);
41075 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
41076- atomic_set(&wa->xfer_id_count, 1);
41077+ atomic_set_unchecked(&wa->xfer_id_count, 1);
41078 }
41079
41080 /**
41081diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
41082index 57c01ab..8a05959 100644
41083--- a/drivers/usb/wusbcore/wa-xfer.c
41084+++ b/drivers/usb/wusbcore/wa-xfer.c
41085@@ -296,7 +296,7 @@ out:
41086 */
41087 static void wa_xfer_id_init(struct wa_xfer *xfer)
41088 {
41089- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
41090+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
41091 }
41092
41093 /*
41094diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
41095index dedaf81..b0f11ab 100644
41096--- a/drivers/vhost/vhost.c
41097+++ b/drivers/vhost/vhost.c
41098@@ -634,7 +634,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
41099 return 0;
41100 }
41101
41102-static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
41103+static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
41104 {
41105 struct file *eventfp, *filep = NULL;
41106 bool pollstart = false, pollstop = false;
41107diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
41108index 0fefa84..7a9d581 100644
41109--- a/drivers/video/aty/aty128fb.c
41110+++ b/drivers/video/aty/aty128fb.c
41111@@ -149,7 +149,7 @@ enum {
41112 };
41113
41114 /* Must match above enum */
41115-static char * const r128_family[] __devinitconst = {
41116+static const char * const r128_family[] __devinitconst = {
41117 "AGP",
41118 "PCI",
41119 "PRO AGP",
41120diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
41121index 5c3960d..15cf8fc 100644
41122--- a/drivers/video/fbcmap.c
41123+++ b/drivers/video/fbcmap.c
41124@@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
41125 rc = -ENODEV;
41126 goto out;
41127 }
41128- if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
41129- !info->fbops->fb_setcmap)) {
41130+ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
41131 rc = -EINVAL;
41132 goto out1;
41133 }
41134diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
41135index 3ff0105..7589d98 100644
41136--- a/drivers/video/fbmem.c
41137+++ b/drivers/video/fbmem.c
41138@@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
41139 image->dx += image->width + 8;
41140 }
41141 } else if (rotate == FB_ROTATE_UD) {
41142- for (x = 0; x < num && image->dx >= 0; x++) {
41143+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
41144 info->fbops->fb_imageblit(info, image);
41145 image->dx -= image->width + 8;
41146 }
41147@@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
41148 image->dy += image->height + 8;
41149 }
41150 } else if (rotate == FB_ROTATE_CCW) {
41151- for (x = 0; x < num && image->dy >= 0; x++) {
41152+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
41153 info->fbops->fb_imageblit(info, image);
41154 image->dy -= image->height + 8;
41155 }
41156@@ -1166,7 +1166,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
41157 return -EFAULT;
41158 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
41159 return -EINVAL;
41160- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
41161+ if (con2fb.framebuffer >= FB_MAX)
41162 return -EINVAL;
41163 if (!registered_fb[con2fb.framebuffer])
41164 request_module("fb%d", con2fb.framebuffer);
41165diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
41166index 7672d2e..b56437f 100644
41167--- a/drivers/video/i810/i810_accel.c
41168+++ b/drivers/video/i810/i810_accel.c
41169@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
41170 }
41171 }
41172 printk("ringbuffer lockup!!!\n");
41173+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
41174 i810_report_error(mmio);
41175 par->dev_flags |= LOCKUP;
41176 info->pixmap.scan_align = 1;
41177diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
41178index 3c14e43..eafa544 100644
41179--- a/drivers/video/logo/logo_linux_clut224.ppm
41180+++ b/drivers/video/logo/logo_linux_clut224.ppm
41181@@ -1,1604 +1,1123 @@
41182 P3
41183-# Standard 224-color Linux logo
41184 80 80
41185 255
41186- 0 0 0 0 0 0 0 0 0 0 0 0
41187- 0 0 0 0 0 0 0 0 0 0 0 0
41188- 0 0 0 0 0 0 0 0 0 0 0 0
41189- 0 0 0 0 0 0 0 0 0 0 0 0
41190- 0 0 0 0 0 0 0 0 0 0 0 0
41191- 0 0 0 0 0 0 0 0 0 0 0 0
41192- 0 0 0 0 0 0 0 0 0 0 0 0
41193- 0 0 0 0 0 0 0 0 0 0 0 0
41194- 0 0 0 0 0 0 0 0 0 0 0 0
41195- 6 6 6 6 6 6 10 10 10 10 10 10
41196- 10 10 10 6 6 6 6 6 6 6 6 6
41197- 0 0 0 0 0 0 0 0 0 0 0 0
41198- 0 0 0 0 0 0 0 0 0 0 0 0
41199- 0 0 0 0 0 0 0 0 0 0 0 0
41200- 0 0 0 0 0 0 0 0 0 0 0 0
41201- 0 0 0 0 0 0 0 0 0 0 0 0
41202- 0 0 0 0 0 0 0 0 0 0 0 0
41203- 0 0 0 0 0 0 0 0 0 0 0 0
41204- 0 0 0 0 0 0 0 0 0 0 0 0
41205- 0 0 0 0 0 0 0 0 0 0 0 0
41206- 0 0 0 0 0 0 0 0 0 0 0 0
41207- 0 0 0 0 0 0 0 0 0 0 0 0
41208- 0 0 0 0 0 0 0 0 0 0 0 0
41209- 0 0 0 0 0 0 0 0 0 0 0 0
41210- 0 0 0 0 0 0 0 0 0 0 0 0
41211- 0 0 0 0 0 0 0 0 0 0 0 0
41212- 0 0 0 0 0 0 0 0 0 0 0 0
41213- 0 0 0 0 0 0 0 0 0 0 0 0
41214- 0 0 0 6 6 6 10 10 10 14 14 14
41215- 22 22 22 26 26 26 30 30 30 34 34 34
41216- 30 30 30 30 30 30 26 26 26 18 18 18
41217- 14 14 14 10 10 10 6 6 6 0 0 0
41218- 0 0 0 0 0 0 0 0 0 0 0 0
41219- 0 0 0 0 0 0 0 0 0 0 0 0
41220- 0 0 0 0 0 0 0 0 0 0 0 0
41221- 0 0 0 0 0 0 0 0 0 0 0 0
41222- 0 0 0 0 0 0 0 0 0 0 0 0
41223- 0 0 0 0 0 0 0 0 0 0 0 0
41224- 0 0 0 0 0 0 0 0 0 0 0 0
41225- 0 0 0 0 0 0 0 0 0 0 0 0
41226- 0 0 0 0 0 0 0 0 0 0 0 0
41227- 0 0 0 0 0 1 0 0 1 0 0 0
41228- 0 0 0 0 0 0 0 0 0 0 0 0
41229- 0 0 0 0 0 0 0 0 0 0 0 0
41230- 0 0 0 0 0 0 0 0 0 0 0 0
41231- 0 0 0 0 0 0 0 0 0 0 0 0
41232- 0 0 0 0 0 0 0 0 0 0 0 0
41233- 0 0 0 0 0 0 0 0 0 0 0 0
41234- 6 6 6 14 14 14 26 26 26 42 42 42
41235- 54 54 54 66 66 66 78 78 78 78 78 78
41236- 78 78 78 74 74 74 66 66 66 54 54 54
41237- 42 42 42 26 26 26 18 18 18 10 10 10
41238- 6 6 6 0 0 0 0 0 0 0 0 0
41239- 0 0 0 0 0 0 0 0 0 0 0 0
41240- 0 0 0 0 0 0 0 0 0 0 0 0
41241- 0 0 0 0 0 0 0 0 0 0 0 0
41242- 0 0 0 0 0 0 0 0 0 0 0 0
41243- 0 0 0 0 0 0 0 0 0 0 0 0
41244- 0 0 0 0 0 0 0 0 0 0 0 0
41245- 0 0 0 0 0 0 0 0 0 0 0 0
41246- 0 0 0 0 0 0 0 0 0 0 0 0
41247- 0 0 1 0 0 0 0 0 0 0 0 0
41248- 0 0 0 0 0 0 0 0 0 0 0 0
41249- 0 0 0 0 0 0 0 0 0 0 0 0
41250- 0 0 0 0 0 0 0 0 0 0 0 0
41251- 0 0 0 0 0 0 0 0 0 0 0 0
41252- 0 0 0 0 0 0 0 0 0 0 0 0
41253- 0 0 0 0 0 0 0 0 0 10 10 10
41254- 22 22 22 42 42 42 66 66 66 86 86 86
41255- 66 66 66 38 38 38 38 38 38 22 22 22
41256- 26 26 26 34 34 34 54 54 54 66 66 66
41257- 86 86 86 70 70 70 46 46 46 26 26 26
41258- 14 14 14 6 6 6 0 0 0 0 0 0
41259- 0 0 0 0 0 0 0 0 0 0 0 0
41260- 0 0 0 0 0 0 0 0 0 0 0 0
41261- 0 0 0 0 0 0 0 0 0 0 0 0
41262- 0 0 0 0 0 0 0 0 0 0 0 0
41263- 0 0 0 0 0 0 0 0 0 0 0 0
41264- 0 0 0 0 0 0 0 0 0 0 0 0
41265- 0 0 0 0 0 0 0 0 0 0 0 0
41266- 0 0 0 0 0 0 0 0 0 0 0 0
41267- 0 0 1 0 0 1 0 0 1 0 0 0
41268- 0 0 0 0 0 0 0 0 0 0 0 0
41269- 0 0 0 0 0 0 0 0 0 0 0 0
41270- 0 0 0 0 0 0 0 0 0 0 0 0
41271- 0 0 0 0 0 0 0 0 0 0 0 0
41272- 0 0 0 0 0 0 0 0 0 0 0 0
41273- 0 0 0 0 0 0 10 10 10 26 26 26
41274- 50 50 50 82 82 82 58 58 58 6 6 6
41275- 2 2 6 2 2 6 2 2 6 2 2 6
41276- 2 2 6 2 2 6 2 2 6 2 2 6
41277- 6 6 6 54 54 54 86 86 86 66 66 66
41278- 38 38 38 18 18 18 6 6 6 0 0 0
41279- 0 0 0 0 0 0 0 0 0 0 0 0
41280- 0 0 0 0 0 0 0 0 0 0 0 0
41281- 0 0 0 0 0 0 0 0 0 0 0 0
41282- 0 0 0 0 0 0 0 0 0 0 0 0
41283- 0 0 0 0 0 0 0 0 0 0 0 0
41284- 0 0 0 0 0 0 0 0 0 0 0 0
41285- 0 0 0 0 0 0 0 0 0 0 0 0
41286- 0 0 0 0 0 0 0 0 0 0 0 0
41287- 0 0 0 0 0 0 0 0 0 0 0 0
41288- 0 0 0 0 0 0 0 0 0 0 0 0
41289- 0 0 0 0 0 0 0 0 0 0 0 0
41290- 0 0 0 0 0 0 0 0 0 0 0 0
41291- 0 0 0 0 0 0 0 0 0 0 0 0
41292- 0 0 0 0 0 0 0 0 0 0 0 0
41293- 0 0 0 6 6 6 22 22 22 50 50 50
41294- 78 78 78 34 34 34 2 2 6 2 2 6
41295- 2 2 6 2 2 6 2 2 6 2 2 6
41296- 2 2 6 2 2 6 2 2 6 2 2 6
41297- 2 2 6 2 2 6 6 6 6 70 70 70
41298- 78 78 78 46 46 46 22 22 22 6 6 6
41299- 0 0 0 0 0 0 0 0 0 0 0 0
41300- 0 0 0 0 0 0 0 0 0 0 0 0
41301- 0 0 0 0 0 0 0 0 0 0 0 0
41302- 0 0 0 0 0 0 0 0 0 0 0 0
41303- 0 0 0 0 0 0 0 0 0 0 0 0
41304- 0 0 0 0 0 0 0 0 0 0 0 0
41305- 0 0 0 0 0 0 0 0 0 0 0 0
41306- 0 0 0 0 0 0 0 0 0 0 0 0
41307- 0 0 1 0 0 1 0 0 1 0 0 0
41308- 0 0 0 0 0 0 0 0 0 0 0 0
41309- 0 0 0 0 0 0 0 0 0 0 0 0
41310- 0 0 0 0 0 0 0 0 0 0 0 0
41311- 0 0 0 0 0 0 0 0 0 0 0 0
41312- 0 0 0 0 0 0 0 0 0 0 0 0
41313- 6 6 6 18 18 18 42 42 42 82 82 82
41314- 26 26 26 2 2 6 2 2 6 2 2 6
41315- 2 2 6 2 2 6 2 2 6 2 2 6
41316- 2 2 6 2 2 6 2 2 6 14 14 14
41317- 46 46 46 34 34 34 6 6 6 2 2 6
41318- 42 42 42 78 78 78 42 42 42 18 18 18
41319- 6 6 6 0 0 0 0 0 0 0 0 0
41320- 0 0 0 0 0 0 0 0 0 0 0 0
41321- 0 0 0 0 0 0 0 0 0 0 0 0
41322- 0 0 0 0 0 0 0 0 0 0 0 0
41323- 0 0 0 0 0 0 0 0 0 0 0 0
41324- 0 0 0 0 0 0 0 0 0 0 0 0
41325- 0 0 0 0 0 0 0 0 0 0 0 0
41326- 0 0 0 0 0 0 0 0 0 0 0 0
41327- 0 0 1 0 0 0 0 0 1 0 0 0
41328- 0 0 0 0 0 0 0 0 0 0 0 0
41329- 0 0 0 0 0 0 0 0 0 0 0 0
41330- 0 0 0 0 0 0 0 0 0 0 0 0
41331- 0 0 0 0 0 0 0 0 0 0 0 0
41332- 0 0 0 0 0 0 0 0 0 0 0 0
41333- 10 10 10 30 30 30 66 66 66 58 58 58
41334- 2 2 6 2 2 6 2 2 6 2 2 6
41335- 2 2 6 2 2 6 2 2 6 2 2 6
41336- 2 2 6 2 2 6 2 2 6 26 26 26
41337- 86 86 86 101 101 101 46 46 46 10 10 10
41338- 2 2 6 58 58 58 70 70 70 34 34 34
41339- 10 10 10 0 0 0 0 0 0 0 0 0
41340- 0 0 0 0 0 0 0 0 0 0 0 0
41341- 0 0 0 0 0 0 0 0 0 0 0 0
41342- 0 0 0 0 0 0 0 0 0 0 0 0
41343- 0 0 0 0 0 0 0 0 0 0 0 0
41344- 0 0 0 0 0 0 0 0 0 0 0 0
41345- 0 0 0 0 0 0 0 0 0 0 0 0
41346- 0 0 0 0 0 0 0 0 0 0 0 0
41347- 0 0 1 0 0 1 0 0 1 0 0 0
41348- 0 0 0 0 0 0 0 0 0 0 0 0
41349- 0 0 0 0 0 0 0 0 0 0 0 0
41350- 0 0 0 0 0 0 0 0 0 0 0 0
41351- 0 0 0 0 0 0 0 0 0 0 0 0
41352- 0 0 0 0 0 0 0 0 0 0 0 0
41353- 14 14 14 42 42 42 86 86 86 10 10 10
41354- 2 2 6 2 2 6 2 2 6 2 2 6
41355- 2 2 6 2 2 6 2 2 6 2 2 6
41356- 2 2 6 2 2 6 2 2 6 30 30 30
41357- 94 94 94 94 94 94 58 58 58 26 26 26
41358- 2 2 6 6 6 6 78 78 78 54 54 54
41359- 22 22 22 6 6 6 0 0 0 0 0 0
41360- 0 0 0 0 0 0 0 0 0 0 0 0
41361- 0 0 0 0 0 0 0 0 0 0 0 0
41362- 0 0 0 0 0 0 0 0 0 0 0 0
41363- 0 0 0 0 0 0 0 0 0 0 0 0
41364- 0 0 0 0 0 0 0 0 0 0 0 0
41365- 0 0 0 0 0 0 0 0 0 0 0 0
41366- 0 0 0 0 0 0 0 0 0 0 0 0
41367- 0 0 0 0 0 0 0 0 0 0 0 0
41368- 0 0 0 0 0 0 0 0 0 0 0 0
41369- 0 0 0 0 0 0 0 0 0 0 0 0
41370- 0 0 0 0 0 0 0 0 0 0 0 0
41371- 0 0 0 0 0 0 0 0 0 0 0 0
41372- 0 0 0 0 0 0 0 0 0 6 6 6
41373- 22 22 22 62 62 62 62 62 62 2 2 6
41374- 2 2 6 2 2 6 2 2 6 2 2 6
41375- 2 2 6 2 2 6 2 2 6 2 2 6
41376- 2 2 6 2 2 6 2 2 6 26 26 26
41377- 54 54 54 38 38 38 18 18 18 10 10 10
41378- 2 2 6 2 2 6 34 34 34 82 82 82
41379- 38 38 38 14 14 14 0 0 0 0 0 0
41380- 0 0 0 0 0 0 0 0 0 0 0 0
41381- 0 0 0 0 0 0 0 0 0 0 0 0
41382- 0 0 0 0 0 0 0 0 0 0 0 0
41383- 0 0 0 0 0 0 0 0 0 0 0 0
41384- 0 0 0 0 0 0 0 0 0 0 0 0
41385- 0 0 0 0 0 0 0 0 0 0 0 0
41386- 0 0 0 0 0 0 0 0 0 0 0 0
41387- 0 0 0 0 0 1 0 0 1 0 0 0
41388- 0 0 0 0 0 0 0 0 0 0 0 0
41389- 0 0 0 0 0 0 0 0 0 0 0 0
41390- 0 0 0 0 0 0 0 0 0 0 0 0
41391- 0 0 0 0 0 0 0 0 0 0 0 0
41392- 0 0 0 0 0 0 0 0 0 6 6 6
41393- 30 30 30 78 78 78 30 30 30 2 2 6
41394- 2 2 6 2 2 6 2 2 6 2 2 6
41395- 2 2 6 2 2 6 2 2 6 2 2 6
41396- 2 2 6 2 2 6 2 2 6 10 10 10
41397- 10 10 10 2 2 6 2 2 6 2 2 6
41398- 2 2 6 2 2 6 2 2 6 78 78 78
41399- 50 50 50 18 18 18 6 6 6 0 0 0
41400- 0 0 0 0 0 0 0 0 0 0 0 0
41401- 0 0 0 0 0 0 0 0 0 0 0 0
41402- 0 0 0 0 0 0 0 0 0 0 0 0
41403- 0 0 0 0 0 0 0 0 0 0 0 0
41404- 0 0 0 0 0 0 0 0 0 0 0 0
41405- 0 0 0 0 0 0 0 0 0 0 0 0
41406- 0 0 0 0 0 0 0 0 0 0 0 0
41407- 0 0 1 0 0 0 0 0 0 0 0 0
41408- 0 0 0 0 0 0 0 0 0 0 0 0
41409- 0 0 0 0 0 0 0 0 0 0 0 0
41410- 0 0 0 0 0 0 0 0 0 0 0 0
41411- 0 0 0 0 0 0 0 0 0 0 0 0
41412- 0 0 0 0 0 0 0 0 0 10 10 10
41413- 38 38 38 86 86 86 14 14 14 2 2 6
41414- 2 2 6 2 2 6 2 2 6 2 2 6
41415- 2 2 6 2 2 6 2 2 6 2 2 6
41416- 2 2 6 2 2 6 2 2 6 2 2 6
41417- 2 2 6 2 2 6 2 2 6 2 2 6
41418- 2 2 6 2 2 6 2 2 6 54 54 54
41419- 66 66 66 26 26 26 6 6 6 0 0 0
41420- 0 0 0 0 0 0 0 0 0 0 0 0
41421- 0 0 0 0 0 0 0 0 0 0 0 0
41422- 0 0 0 0 0 0 0 0 0 0 0 0
41423- 0 0 0 0 0 0 0 0 0 0 0 0
41424- 0 0 0 0 0 0 0 0 0 0 0 0
41425- 0 0 0 0 0 0 0 0 0 0 0 0
41426- 0 0 0 0 0 0 0 0 0 0 0 0
41427- 0 0 0 0 0 1 0 0 1 0 0 0
41428- 0 0 0 0 0 0 0 0 0 0 0 0
41429- 0 0 0 0 0 0 0 0 0 0 0 0
41430- 0 0 0 0 0 0 0 0 0 0 0 0
41431- 0 0 0 0 0 0 0 0 0 0 0 0
41432- 0 0 0 0 0 0 0 0 0 14 14 14
41433- 42 42 42 82 82 82 2 2 6 2 2 6
41434- 2 2 6 6 6 6 10 10 10 2 2 6
41435- 2 2 6 2 2 6 2 2 6 2 2 6
41436- 2 2 6 2 2 6 2 2 6 6 6 6
41437- 14 14 14 10 10 10 2 2 6 2 2 6
41438- 2 2 6 2 2 6 2 2 6 18 18 18
41439- 82 82 82 34 34 34 10 10 10 0 0 0
41440- 0 0 0 0 0 0 0 0 0 0 0 0
41441- 0 0 0 0 0 0 0 0 0 0 0 0
41442- 0 0 0 0 0 0 0 0 0 0 0 0
41443- 0 0 0 0 0 0 0 0 0 0 0 0
41444- 0 0 0 0 0 0 0 0 0 0 0 0
41445- 0 0 0 0 0 0 0 0 0 0 0 0
41446- 0 0 0 0 0 0 0 0 0 0 0 0
41447- 0 0 1 0 0 0 0 0 0 0 0 0
41448- 0 0 0 0 0 0 0 0 0 0 0 0
41449- 0 0 0 0 0 0 0 0 0 0 0 0
41450- 0 0 0 0 0 0 0 0 0 0 0 0
41451- 0 0 0 0 0 0 0 0 0 0 0 0
41452- 0 0 0 0 0 0 0 0 0 14 14 14
41453- 46 46 46 86 86 86 2 2 6 2 2 6
41454- 6 6 6 6 6 6 22 22 22 34 34 34
41455- 6 6 6 2 2 6 2 2 6 2 2 6
41456- 2 2 6 2 2 6 18 18 18 34 34 34
41457- 10 10 10 50 50 50 22 22 22 2 2 6
41458- 2 2 6 2 2 6 2 2 6 10 10 10
41459- 86 86 86 42 42 42 14 14 14 0 0 0
41460- 0 0 0 0 0 0 0 0 0 0 0 0
41461- 0 0 0 0 0 0 0 0 0 0 0 0
41462- 0 0 0 0 0 0 0 0 0 0 0 0
41463- 0 0 0 0 0 0 0 0 0 0 0 0
41464- 0 0 0 0 0 0 0 0 0 0 0 0
41465- 0 0 0 0 0 0 0 0 0 0 0 0
41466- 0 0 0 0 0 0 0 0 0 0 0 0
41467- 0 0 1 0 0 1 0 0 1 0 0 0
41468- 0 0 0 0 0 0 0 0 0 0 0 0
41469- 0 0 0 0 0 0 0 0 0 0 0 0
41470- 0 0 0 0 0 0 0 0 0 0 0 0
41471- 0 0 0 0 0 0 0 0 0 0 0 0
41472- 0 0 0 0 0 0 0 0 0 14 14 14
41473- 46 46 46 86 86 86 2 2 6 2 2 6
41474- 38 38 38 116 116 116 94 94 94 22 22 22
41475- 22 22 22 2 2 6 2 2 6 2 2 6
41476- 14 14 14 86 86 86 138 138 138 162 162 162
41477-154 154 154 38 38 38 26 26 26 6 6 6
41478- 2 2 6 2 2 6 2 2 6 2 2 6
41479- 86 86 86 46 46 46 14 14 14 0 0 0
41480- 0 0 0 0 0 0 0 0 0 0 0 0
41481- 0 0 0 0 0 0 0 0 0 0 0 0
41482- 0 0 0 0 0 0 0 0 0 0 0 0
41483- 0 0 0 0 0 0 0 0 0 0 0 0
41484- 0 0 0 0 0 0 0 0 0 0 0 0
41485- 0 0 0 0 0 0 0 0 0 0 0 0
41486- 0 0 0 0 0 0 0 0 0 0 0 0
41487- 0 0 0 0 0 0 0 0 0 0 0 0
41488- 0 0 0 0 0 0 0 0 0 0 0 0
41489- 0 0 0 0 0 0 0 0 0 0 0 0
41490- 0 0 0 0 0 0 0 0 0 0 0 0
41491- 0 0 0 0 0 0 0 0 0 0 0 0
41492- 0 0 0 0 0 0 0 0 0 14 14 14
41493- 46 46 46 86 86 86 2 2 6 14 14 14
41494-134 134 134 198 198 198 195 195 195 116 116 116
41495- 10 10 10 2 2 6 2 2 6 6 6 6
41496-101 98 89 187 187 187 210 210 210 218 218 218
41497-214 214 214 134 134 134 14 14 14 6 6 6
41498- 2 2 6 2 2 6 2 2 6 2 2 6
41499- 86 86 86 50 50 50 18 18 18 6 6 6
41500- 0 0 0 0 0 0 0 0 0 0 0 0
41501- 0 0 0 0 0 0 0 0 0 0 0 0
41502- 0 0 0 0 0 0 0 0 0 0 0 0
41503- 0 0 0 0 0 0 0 0 0 0 0 0
41504- 0 0 0 0 0 0 0 0 0 0 0 0
41505- 0 0 0 0 0 0 0 0 0 0 0 0
41506- 0 0 0 0 0 0 0 0 1 0 0 0
41507- 0 0 1 0 0 1 0 0 1 0 0 0
41508- 0 0 0 0 0 0 0 0 0 0 0 0
41509- 0 0 0 0 0 0 0 0 0 0 0 0
41510- 0 0 0 0 0 0 0 0 0 0 0 0
41511- 0 0 0 0 0 0 0 0 0 0 0 0
41512- 0 0 0 0 0 0 0 0 0 14 14 14
41513- 46 46 46 86 86 86 2 2 6 54 54 54
41514-218 218 218 195 195 195 226 226 226 246 246 246
41515- 58 58 58 2 2 6 2 2 6 30 30 30
41516-210 210 210 253 253 253 174 174 174 123 123 123
41517-221 221 221 234 234 234 74 74 74 2 2 6
41518- 2 2 6 2 2 6 2 2 6 2 2 6
41519- 70 70 70 58 58 58 22 22 22 6 6 6
41520- 0 0 0 0 0 0 0 0 0 0 0 0
41521- 0 0 0 0 0 0 0 0 0 0 0 0
41522- 0 0 0 0 0 0 0 0 0 0 0 0
41523- 0 0 0 0 0 0 0 0 0 0 0 0
41524- 0 0 0 0 0 0 0 0 0 0 0 0
41525- 0 0 0 0 0 0 0 0 0 0 0 0
41526- 0 0 0 0 0 0 0 0 0 0 0 0
41527- 0 0 0 0 0 0 0 0 0 0 0 0
41528- 0 0 0 0 0 0 0 0 0 0 0 0
41529- 0 0 0 0 0 0 0 0 0 0 0 0
41530- 0 0 0 0 0 0 0 0 0 0 0 0
41531- 0 0 0 0 0 0 0 0 0 0 0 0
41532- 0 0 0 0 0 0 0 0 0 14 14 14
41533- 46 46 46 82 82 82 2 2 6 106 106 106
41534-170 170 170 26 26 26 86 86 86 226 226 226
41535-123 123 123 10 10 10 14 14 14 46 46 46
41536-231 231 231 190 190 190 6 6 6 70 70 70
41537- 90 90 90 238 238 238 158 158 158 2 2 6
41538- 2 2 6 2 2 6 2 2 6 2 2 6
41539- 70 70 70 58 58 58 22 22 22 6 6 6
41540- 0 0 0 0 0 0 0 0 0 0 0 0
41541- 0 0 0 0 0 0 0 0 0 0 0 0
41542- 0 0 0 0 0 0 0 0 0 0 0 0
41543- 0 0 0 0 0 0 0 0 0 0 0 0
41544- 0 0 0 0 0 0 0 0 0 0 0 0
41545- 0 0 0 0 0 0 0 0 0 0 0 0
41546- 0 0 0 0 0 0 0 0 1 0 0 0
41547- 0 0 1 0 0 1 0 0 1 0 0 0
41548- 0 0 0 0 0 0 0 0 0 0 0 0
41549- 0 0 0 0 0 0 0 0 0 0 0 0
41550- 0 0 0 0 0 0 0 0 0 0 0 0
41551- 0 0 0 0 0 0 0 0 0 0 0 0
41552- 0 0 0 0 0 0 0 0 0 14 14 14
41553- 42 42 42 86 86 86 6 6 6 116 116 116
41554-106 106 106 6 6 6 70 70 70 149 149 149
41555-128 128 128 18 18 18 38 38 38 54 54 54
41556-221 221 221 106 106 106 2 2 6 14 14 14
41557- 46 46 46 190 190 190 198 198 198 2 2 6
41558- 2 2 6 2 2 6 2 2 6 2 2 6
41559- 74 74 74 62 62 62 22 22 22 6 6 6
41560- 0 0 0 0 0 0 0 0 0 0 0 0
41561- 0 0 0 0 0 0 0 0 0 0 0 0
41562- 0 0 0 0 0 0 0 0 0 0 0 0
41563- 0 0 0 0 0 0 0 0 0 0 0 0
41564- 0 0 0 0 0 0 0 0 0 0 0 0
41565- 0 0 0 0 0 0 0 0 0 0 0 0
41566- 0 0 0 0 0 0 0 0 1 0 0 0
41567- 0 0 1 0 0 0 0 0 1 0 0 0
41568- 0 0 0 0 0 0 0 0 0 0 0 0
41569- 0 0 0 0 0 0 0 0 0 0 0 0
41570- 0 0 0 0 0 0 0 0 0 0 0 0
41571- 0 0 0 0 0 0 0 0 0 0 0 0
41572- 0 0 0 0 0 0 0 0 0 14 14 14
41573- 42 42 42 94 94 94 14 14 14 101 101 101
41574-128 128 128 2 2 6 18 18 18 116 116 116
41575-118 98 46 121 92 8 121 92 8 98 78 10
41576-162 162 162 106 106 106 2 2 6 2 2 6
41577- 2 2 6 195 195 195 195 195 195 6 6 6
41578- 2 2 6 2 2 6 2 2 6 2 2 6
41579- 74 74 74 62 62 62 22 22 22 6 6 6
41580- 0 0 0 0 0 0 0 0 0 0 0 0
41581- 0 0 0 0 0 0 0 0 0 0 0 0
41582- 0 0 0 0 0 0 0 0 0 0 0 0
41583- 0 0 0 0 0 0 0 0 0 0 0 0
41584- 0 0 0 0 0 0 0 0 0 0 0 0
41585- 0 0 0 0 0 0 0 0 0 0 0 0
41586- 0 0 0 0 0 0 0 0 1 0 0 1
41587- 0 0 1 0 0 0 0 0 1 0 0 0
41588- 0 0 0 0 0 0 0 0 0 0 0 0
41589- 0 0 0 0 0 0 0 0 0 0 0 0
41590- 0 0 0 0 0 0 0 0 0 0 0 0
41591- 0 0 0 0 0 0 0 0 0 0 0 0
41592- 0 0 0 0 0 0 0 0 0 10 10 10
41593- 38 38 38 90 90 90 14 14 14 58 58 58
41594-210 210 210 26 26 26 54 38 6 154 114 10
41595-226 170 11 236 186 11 225 175 15 184 144 12
41596-215 174 15 175 146 61 37 26 9 2 2 6
41597- 70 70 70 246 246 246 138 138 138 2 2 6
41598- 2 2 6 2 2 6 2 2 6 2 2 6
41599- 70 70 70 66 66 66 26 26 26 6 6 6
41600- 0 0 0 0 0 0 0 0 0 0 0 0
41601- 0 0 0 0 0 0 0 0 0 0 0 0
41602- 0 0 0 0 0 0 0 0 0 0 0 0
41603- 0 0 0 0 0 0 0 0 0 0 0 0
41604- 0 0 0 0 0 0 0 0 0 0 0 0
41605- 0 0 0 0 0 0 0 0 0 0 0 0
41606- 0 0 0 0 0 0 0 0 0 0 0 0
41607- 0 0 0 0 0 0 0 0 0 0 0 0
41608- 0 0 0 0 0 0 0 0 0 0 0 0
41609- 0 0 0 0 0 0 0 0 0 0 0 0
41610- 0 0 0 0 0 0 0 0 0 0 0 0
41611- 0 0 0 0 0 0 0 0 0 0 0 0
41612- 0 0 0 0 0 0 0 0 0 10 10 10
41613- 38 38 38 86 86 86 14 14 14 10 10 10
41614-195 195 195 188 164 115 192 133 9 225 175 15
41615-239 182 13 234 190 10 232 195 16 232 200 30
41616-245 207 45 241 208 19 232 195 16 184 144 12
41617-218 194 134 211 206 186 42 42 42 2 2 6
41618- 2 2 6 2 2 6 2 2 6 2 2 6
41619- 50 50 50 74 74 74 30 30 30 6 6 6
41620- 0 0 0 0 0 0 0 0 0 0 0 0
41621- 0 0 0 0 0 0 0 0 0 0 0 0
41622- 0 0 0 0 0 0 0 0 0 0 0 0
41623- 0 0 0 0 0 0 0 0 0 0 0 0
41624- 0 0 0 0 0 0 0 0 0 0 0 0
41625- 0 0 0 0 0 0 0 0 0 0 0 0
41626- 0 0 0 0 0 0 0 0 0 0 0 0
41627- 0 0 0 0 0 0 0 0 0 0 0 0
41628- 0 0 0 0 0 0 0 0 0 0 0 0
41629- 0 0 0 0 0 0 0 0 0 0 0 0
41630- 0 0 0 0 0 0 0 0 0 0 0 0
41631- 0 0 0 0 0 0 0 0 0 0 0 0
41632- 0 0 0 0 0 0 0 0 0 10 10 10
41633- 34 34 34 86 86 86 14 14 14 2 2 6
41634-121 87 25 192 133 9 219 162 10 239 182 13
41635-236 186 11 232 195 16 241 208 19 244 214 54
41636-246 218 60 246 218 38 246 215 20 241 208 19
41637-241 208 19 226 184 13 121 87 25 2 2 6
41638- 2 2 6 2 2 6 2 2 6 2 2 6
41639- 50 50 50 82 82 82 34 34 34 10 10 10
41640- 0 0 0 0 0 0 0 0 0 0 0 0
41641- 0 0 0 0 0 0 0 0 0 0 0 0
41642- 0 0 0 0 0 0 0 0 0 0 0 0
41643- 0 0 0 0 0 0 0 0 0 0 0 0
41644- 0 0 0 0 0 0 0 0 0 0 0 0
41645- 0 0 0 0 0 0 0 0 0 0 0 0
41646- 0 0 0 0 0 0 0 0 0 0 0 0
41647- 0 0 0 0 0 0 0 0 0 0 0 0
41648- 0 0 0 0 0 0 0 0 0 0 0 0
41649- 0 0 0 0 0 0 0 0 0 0 0 0
41650- 0 0 0 0 0 0 0 0 0 0 0 0
41651- 0 0 0 0 0 0 0 0 0 0 0 0
41652- 0 0 0 0 0 0 0 0 0 10 10 10
41653- 34 34 34 82 82 82 30 30 30 61 42 6
41654-180 123 7 206 145 10 230 174 11 239 182 13
41655-234 190 10 238 202 15 241 208 19 246 218 74
41656-246 218 38 246 215 20 246 215 20 246 215 20
41657-226 184 13 215 174 15 184 144 12 6 6 6
41658- 2 2 6 2 2 6 2 2 6 2 2 6
41659- 26 26 26 94 94 94 42 42 42 14 14 14
41660- 0 0 0 0 0 0 0 0 0 0 0 0
41661- 0 0 0 0 0 0 0 0 0 0 0 0
41662- 0 0 0 0 0 0 0 0 0 0 0 0
41663- 0 0 0 0 0 0 0 0 0 0 0 0
41664- 0 0 0 0 0 0 0 0 0 0 0 0
41665- 0 0 0 0 0 0 0 0 0 0 0 0
41666- 0 0 0 0 0 0 0 0 0 0 0 0
41667- 0 0 0 0 0 0 0 0 0 0 0 0
41668- 0 0 0 0 0 0 0 0 0 0 0 0
41669- 0 0 0 0 0 0 0 0 0 0 0 0
41670- 0 0 0 0 0 0 0 0 0 0 0 0
41671- 0 0 0 0 0 0 0 0 0 0 0 0
41672- 0 0 0 0 0 0 0 0 0 10 10 10
41673- 30 30 30 78 78 78 50 50 50 104 69 6
41674-192 133 9 216 158 10 236 178 12 236 186 11
41675-232 195 16 241 208 19 244 214 54 245 215 43
41676-246 215 20 246 215 20 241 208 19 198 155 10
41677-200 144 11 216 158 10 156 118 10 2 2 6
41678- 2 2 6 2 2 6 2 2 6 2 2 6
41679- 6 6 6 90 90 90 54 54 54 18 18 18
41680- 6 6 6 0 0 0 0 0 0 0 0 0
41681- 0 0 0 0 0 0 0 0 0 0 0 0
41682- 0 0 0 0 0 0 0 0 0 0 0 0
41683- 0 0 0 0 0 0 0 0 0 0 0 0
41684- 0 0 0 0 0 0 0 0 0 0 0 0
41685- 0 0 0 0 0 0 0 0 0 0 0 0
41686- 0 0 0 0 0 0 0 0 0 0 0 0
41687- 0 0 0 0 0 0 0 0 0 0 0 0
41688- 0 0 0 0 0 0 0 0 0 0 0 0
41689- 0 0 0 0 0 0 0 0 0 0 0 0
41690- 0 0 0 0 0 0 0 0 0 0 0 0
41691- 0 0 0 0 0 0 0 0 0 0 0 0
41692- 0 0 0 0 0 0 0 0 0 10 10 10
41693- 30 30 30 78 78 78 46 46 46 22 22 22
41694-137 92 6 210 162 10 239 182 13 238 190 10
41695-238 202 15 241 208 19 246 215 20 246 215 20
41696-241 208 19 203 166 17 185 133 11 210 150 10
41697-216 158 10 210 150 10 102 78 10 2 2 6
41698- 6 6 6 54 54 54 14 14 14 2 2 6
41699- 2 2 6 62 62 62 74 74 74 30 30 30
41700- 10 10 10 0 0 0 0 0 0 0 0 0
41701- 0 0 0 0 0 0 0 0 0 0 0 0
41702- 0 0 0 0 0 0 0 0 0 0 0 0
41703- 0 0 0 0 0 0 0 0 0 0 0 0
41704- 0 0 0 0 0 0 0 0 0 0 0 0
41705- 0 0 0 0 0 0 0 0 0 0 0 0
41706- 0 0 0 0 0 0 0 0 0 0 0 0
41707- 0 0 0 0 0 0 0 0 0 0 0 0
41708- 0 0 0 0 0 0 0 0 0 0 0 0
41709- 0 0 0 0 0 0 0 0 0 0 0 0
41710- 0 0 0 0 0 0 0 0 0 0 0 0
41711- 0 0 0 0 0 0 0 0 0 0 0 0
41712- 0 0 0 0 0 0 0 0 0 10 10 10
41713- 34 34 34 78 78 78 50 50 50 6 6 6
41714- 94 70 30 139 102 15 190 146 13 226 184 13
41715-232 200 30 232 195 16 215 174 15 190 146 13
41716-168 122 10 192 133 9 210 150 10 213 154 11
41717-202 150 34 182 157 106 101 98 89 2 2 6
41718- 2 2 6 78 78 78 116 116 116 58 58 58
41719- 2 2 6 22 22 22 90 90 90 46 46 46
41720- 18 18 18 6 6 6 0 0 0 0 0 0
41721- 0 0 0 0 0 0 0 0 0 0 0 0
41722- 0 0 0 0 0 0 0 0 0 0 0 0
41723- 0 0 0 0 0 0 0 0 0 0 0 0
41724- 0 0 0 0 0 0 0 0 0 0 0 0
41725- 0 0 0 0 0 0 0 0 0 0 0 0
41726- 0 0 0 0 0 0 0 0 0 0 0 0
41727- 0 0 0 0 0 0 0 0 0 0 0 0
41728- 0 0 0 0 0 0 0 0 0 0 0 0
41729- 0 0 0 0 0 0 0 0 0 0 0 0
41730- 0 0 0 0 0 0 0 0 0 0 0 0
41731- 0 0 0 0 0 0 0 0 0 0 0 0
41732- 0 0 0 0 0 0 0 0 0 10 10 10
41733- 38 38 38 86 86 86 50 50 50 6 6 6
41734-128 128 128 174 154 114 156 107 11 168 122 10
41735-198 155 10 184 144 12 197 138 11 200 144 11
41736-206 145 10 206 145 10 197 138 11 188 164 115
41737-195 195 195 198 198 198 174 174 174 14 14 14
41738- 2 2 6 22 22 22 116 116 116 116 116 116
41739- 22 22 22 2 2 6 74 74 74 70 70 70
41740- 30 30 30 10 10 10 0 0 0 0 0 0
41741- 0 0 0 0 0 0 0 0 0 0 0 0
41742- 0 0 0 0 0 0 0 0 0 0 0 0
41743- 0 0 0 0 0 0 0 0 0 0 0 0
41744- 0 0 0 0 0 0 0 0 0 0 0 0
41745- 0 0 0 0 0 0 0 0 0 0 0 0
41746- 0 0 0 0 0 0 0 0 0 0 0 0
41747- 0 0 0 0 0 0 0 0 0 0 0 0
41748- 0 0 0 0 0 0 0 0 0 0 0 0
41749- 0 0 0 0 0 0 0 0 0 0 0 0
41750- 0 0 0 0 0 0 0 0 0 0 0 0
41751- 0 0 0 0 0 0 0 0 0 0 0 0
41752- 0 0 0 0 0 0 6 6 6 18 18 18
41753- 50 50 50 101 101 101 26 26 26 10 10 10
41754-138 138 138 190 190 190 174 154 114 156 107 11
41755-197 138 11 200 144 11 197 138 11 192 133 9
41756-180 123 7 190 142 34 190 178 144 187 187 187
41757-202 202 202 221 221 221 214 214 214 66 66 66
41758- 2 2 6 2 2 6 50 50 50 62 62 62
41759- 6 6 6 2 2 6 10 10 10 90 90 90
41760- 50 50 50 18 18 18 6 6 6 0 0 0
41761- 0 0 0 0 0 0 0 0 0 0 0 0
41762- 0 0 0 0 0 0 0 0 0 0 0 0
41763- 0 0 0 0 0 0 0 0 0 0 0 0
41764- 0 0 0 0 0 0 0 0 0 0 0 0
41765- 0 0 0 0 0 0 0 0 0 0 0 0
41766- 0 0 0 0 0 0 0 0 0 0 0 0
41767- 0 0 0 0 0 0 0 0 0 0 0 0
41768- 0 0 0 0 0 0 0 0 0 0 0 0
41769- 0 0 0 0 0 0 0 0 0 0 0 0
41770- 0 0 0 0 0 0 0 0 0 0 0 0
41771- 0 0 0 0 0 0 0 0 0 0 0 0
41772- 0 0 0 0 0 0 10 10 10 34 34 34
41773- 74 74 74 74 74 74 2 2 6 6 6 6
41774-144 144 144 198 198 198 190 190 190 178 166 146
41775-154 121 60 156 107 11 156 107 11 168 124 44
41776-174 154 114 187 187 187 190 190 190 210 210 210
41777-246 246 246 253 253 253 253 253 253 182 182 182
41778- 6 6 6 2 2 6 2 2 6 2 2 6
41779- 2 2 6 2 2 6 2 2 6 62 62 62
41780- 74 74 74 34 34 34 14 14 14 0 0 0
41781- 0 0 0 0 0 0 0 0 0 0 0 0
41782- 0 0 0 0 0 0 0 0 0 0 0 0
41783- 0 0 0 0 0 0 0 0 0 0 0 0
41784- 0 0 0 0 0 0 0 0 0 0 0 0
41785- 0 0 0 0 0 0 0 0 0 0 0 0
41786- 0 0 0 0 0 0 0 0 0 0 0 0
41787- 0 0 0 0 0 0 0 0 0 0 0 0
41788- 0 0 0 0 0 0 0 0 0 0 0 0
41789- 0 0 0 0 0 0 0 0 0 0 0 0
41790- 0 0 0 0 0 0 0 0 0 0 0 0
41791- 0 0 0 0 0 0 0 0 0 0 0 0
41792- 0 0 0 10 10 10 22 22 22 54 54 54
41793- 94 94 94 18 18 18 2 2 6 46 46 46
41794-234 234 234 221 221 221 190 190 190 190 190 190
41795-190 190 190 187 187 187 187 187 187 190 190 190
41796-190 190 190 195 195 195 214 214 214 242 242 242
41797-253 253 253 253 253 253 253 253 253 253 253 253
41798- 82 82 82 2 2 6 2 2 6 2 2 6
41799- 2 2 6 2 2 6 2 2 6 14 14 14
41800- 86 86 86 54 54 54 22 22 22 6 6 6
41801- 0 0 0 0 0 0 0 0 0 0 0 0
41802- 0 0 0 0 0 0 0 0 0 0 0 0
41803- 0 0 0 0 0 0 0 0 0 0 0 0
41804- 0 0 0 0 0 0 0 0 0 0 0 0
41805- 0 0 0 0 0 0 0 0 0 0 0 0
41806- 0 0 0 0 0 0 0 0 0 0 0 0
41807- 0 0 0 0 0 0 0 0 0 0 0 0
41808- 0 0 0 0 0 0 0 0 0 0 0 0
41809- 0 0 0 0 0 0 0 0 0 0 0 0
41810- 0 0 0 0 0 0 0 0 0 0 0 0
41811- 0 0 0 0 0 0 0 0 0 0 0 0
41812- 6 6 6 18 18 18 46 46 46 90 90 90
41813- 46 46 46 18 18 18 6 6 6 182 182 182
41814-253 253 253 246 246 246 206 206 206 190 190 190
41815-190 190 190 190 190 190 190 190 190 190 190 190
41816-206 206 206 231 231 231 250 250 250 253 253 253
41817-253 253 253 253 253 253 253 253 253 253 253 253
41818-202 202 202 14 14 14 2 2 6 2 2 6
41819- 2 2 6 2 2 6 2 2 6 2 2 6
41820- 42 42 42 86 86 86 42 42 42 18 18 18
41821- 6 6 6 0 0 0 0 0 0 0 0 0
41822- 0 0 0 0 0 0 0 0 0 0 0 0
41823- 0 0 0 0 0 0 0 0 0 0 0 0
41824- 0 0 0 0 0 0 0 0 0 0 0 0
41825- 0 0 0 0 0 0 0 0 0 0 0 0
41826- 0 0 0 0 0 0 0 0 0 0 0 0
41827- 0 0 0 0 0 0 0 0 0 0 0 0
41828- 0 0 0 0 0 0 0 0 0 0 0 0
41829- 0 0 0 0 0 0 0 0 0 0 0 0
41830- 0 0 0 0 0 0 0 0 0 0 0 0
41831- 0 0 0 0 0 0 0 0 0 6 6 6
41832- 14 14 14 38 38 38 74 74 74 66 66 66
41833- 2 2 6 6 6 6 90 90 90 250 250 250
41834-253 253 253 253 253 253 238 238 238 198 198 198
41835-190 190 190 190 190 190 195 195 195 221 221 221
41836-246 246 246 253 253 253 253 253 253 253 253 253
41837-253 253 253 253 253 253 253 253 253 253 253 253
41838-253 253 253 82 82 82 2 2 6 2 2 6
41839- 2 2 6 2 2 6 2 2 6 2 2 6
41840- 2 2 6 78 78 78 70 70 70 34 34 34
41841- 14 14 14 6 6 6 0 0 0 0 0 0
41842- 0 0 0 0 0 0 0 0 0 0 0 0
41843- 0 0 0 0 0 0 0 0 0 0 0 0
41844- 0 0 0 0 0 0 0 0 0 0 0 0
41845- 0 0 0 0 0 0 0 0 0 0 0 0
41846- 0 0 0 0 0 0 0 0 0 0 0 0
41847- 0 0 0 0 0 0 0 0 0 0 0 0
41848- 0 0 0 0 0 0 0 0 0 0 0 0
41849- 0 0 0 0 0 0 0 0 0 0 0 0
41850- 0 0 0 0 0 0 0 0 0 0 0 0
41851- 0 0 0 0 0 0 0 0 0 14 14 14
41852- 34 34 34 66 66 66 78 78 78 6 6 6
41853- 2 2 6 18 18 18 218 218 218 253 253 253
41854-253 253 253 253 253 253 253 253 253 246 246 246
41855-226 226 226 231 231 231 246 246 246 253 253 253
41856-253 253 253 253 253 253 253 253 253 253 253 253
41857-253 253 253 253 253 253 253 253 253 253 253 253
41858-253 253 253 178 178 178 2 2 6 2 2 6
41859- 2 2 6 2 2 6 2 2 6 2 2 6
41860- 2 2 6 18 18 18 90 90 90 62 62 62
41861- 30 30 30 10 10 10 0 0 0 0 0 0
41862- 0 0 0 0 0 0 0 0 0 0 0 0
41863- 0 0 0 0 0 0 0 0 0 0 0 0
41864- 0 0 0 0 0 0 0 0 0 0 0 0
41865- 0 0 0 0 0 0 0 0 0 0 0 0
41866- 0 0 0 0 0 0 0 0 0 0 0 0
41867- 0 0 0 0 0 0 0 0 0 0 0 0
41868- 0 0 0 0 0 0 0 0 0 0 0 0
41869- 0 0 0 0 0 0 0 0 0 0 0 0
41870- 0 0 0 0 0 0 0 0 0 0 0 0
41871- 0 0 0 0 0 0 10 10 10 26 26 26
41872- 58 58 58 90 90 90 18 18 18 2 2 6
41873- 2 2 6 110 110 110 253 253 253 253 253 253
41874-253 253 253 253 253 253 253 253 253 253 253 253
41875-250 250 250 253 253 253 253 253 253 253 253 253
41876-253 253 253 253 253 253 253 253 253 253 253 253
41877-253 253 253 253 253 253 253 253 253 253 253 253
41878-253 253 253 231 231 231 18 18 18 2 2 6
41879- 2 2 6 2 2 6 2 2 6 2 2 6
41880- 2 2 6 2 2 6 18 18 18 94 94 94
41881- 54 54 54 26 26 26 10 10 10 0 0 0
41882- 0 0 0 0 0 0 0 0 0 0 0 0
41883- 0 0 0 0 0 0 0 0 0 0 0 0
41884- 0 0 0 0 0 0 0 0 0 0 0 0
41885- 0 0 0 0 0 0 0 0 0 0 0 0
41886- 0 0 0 0 0 0 0 0 0 0 0 0
41887- 0 0 0 0 0 0 0 0 0 0 0 0
41888- 0 0 0 0 0 0 0 0 0 0 0 0
41889- 0 0 0 0 0 0 0 0 0 0 0 0
41890- 0 0 0 0 0 0 0 0 0 0 0 0
41891- 0 0 0 6 6 6 22 22 22 50 50 50
41892- 90 90 90 26 26 26 2 2 6 2 2 6
41893- 14 14 14 195 195 195 250 250 250 253 253 253
41894-253 253 253 253 253 253 253 253 253 253 253 253
41895-253 253 253 253 253 253 253 253 253 253 253 253
41896-253 253 253 253 253 253 253 253 253 253 253 253
41897-253 253 253 253 253 253 253 253 253 253 253 253
41898-250 250 250 242 242 242 54 54 54 2 2 6
41899- 2 2 6 2 2 6 2 2 6 2 2 6
41900- 2 2 6 2 2 6 2 2 6 38 38 38
41901- 86 86 86 50 50 50 22 22 22 6 6 6
41902- 0 0 0 0 0 0 0 0 0 0 0 0
41903- 0 0 0 0 0 0 0 0 0 0 0 0
41904- 0 0 0 0 0 0 0 0 0 0 0 0
41905- 0 0 0 0 0 0 0 0 0 0 0 0
41906- 0 0 0 0 0 0 0 0 0 0 0 0
41907- 0 0 0 0 0 0 0 0 0 0 0 0
41908- 0 0 0 0 0 0 0 0 0 0 0 0
41909- 0 0 0 0 0 0 0 0 0 0 0 0
41910- 0 0 0 0 0 0 0 0 0 0 0 0
41911- 6 6 6 14 14 14 38 38 38 82 82 82
41912- 34 34 34 2 2 6 2 2 6 2 2 6
41913- 42 42 42 195 195 195 246 246 246 253 253 253
41914-253 253 253 253 253 253 253 253 253 250 250 250
41915-242 242 242 242 242 242 250 250 250 253 253 253
41916-253 253 253 253 253 253 253 253 253 253 253 253
41917-253 253 253 250 250 250 246 246 246 238 238 238
41918-226 226 226 231 231 231 101 101 101 6 6 6
41919- 2 2 6 2 2 6 2 2 6 2 2 6
41920- 2 2 6 2 2 6 2 2 6 2 2 6
41921- 38 38 38 82 82 82 42 42 42 14 14 14
41922- 6 6 6 0 0 0 0 0 0 0 0 0
41923- 0 0 0 0 0 0 0 0 0 0 0 0
41924- 0 0 0 0 0 0 0 0 0 0 0 0
41925- 0 0 0 0 0 0 0 0 0 0 0 0
41926- 0 0 0 0 0 0 0 0 0 0 0 0
41927- 0 0 0 0 0 0 0 0 0 0 0 0
41928- 0 0 0 0 0 0 0 0 0 0 0 0
41929- 0 0 0 0 0 0 0 0 0 0 0 0
41930- 0 0 0 0 0 0 0 0 0 0 0 0
41931- 10 10 10 26 26 26 62 62 62 66 66 66
41932- 2 2 6 2 2 6 2 2 6 6 6 6
41933- 70 70 70 170 170 170 206 206 206 234 234 234
41934-246 246 246 250 250 250 250 250 250 238 238 238
41935-226 226 226 231 231 231 238 238 238 250 250 250
41936-250 250 250 250 250 250 246 246 246 231 231 231
41937-214 214 214 206 206 206 202 202 202 202 202 202
41938-198 198 198 202 202 202 182 182 182 18 18 18
41939- 2 2 6 2 2 6 2 2 6 2 2 6
41940- 2 2 6 2 2 6 2 2 6 2 2 6
41941- 2 2 6 62 62 62 66 66 66 30 30 30
41942- 10 10 10 0 0 0 0 0 0 0 0 0
41943- 0 0 0 0 0 0 0 0 0 0 0 0
41944- 0 0 0 0 0 0 0 0 0 0 0 0
41945- 0 0 0 0 0 0 0 0 0 0 0 0
41946- 0 0 0 0 0 0 0 0 0 0 0 0
41947- 0 0 0 0 0 0 0 0 0 0 0 0
41948- 0 0 0 0 0 0 0 0 0 0 0 0
41949- 0 0 0 0 0 0 0 0 0 0 0 0
41950- 0 0 0 0 0 0 0 0 0 0 0 0
41951- 14 14 14 42 42 42 82 82 82 18 18 18
41952- 2 2 6 2 2 6 2 2 6 10 10 10
41953- 94 94 94 182 182 182 218 218 218 242 242 242
41954-250 250 250 253 253 253 253 253 253 250 250 250
41955-234 234 234 253 253 253 253 253 253 253 253 253
41956-253 253 253 253 253 253 253 253 253 246 246 246
41957-238 238 238 226 226 226 210 210 210 202 202 202
41958-195 195 195 195 195 195 210 210 210 158 158 158
41959- 6 6 6 14 14 14 50 50 50 14 14 14
41960- 2 2 6 2 2 6 2 2 6 2 2 6
41961- 2 2 6 6 6 6 86 86 86 46 46 46
41962- 18 18 18 6 6 6 0 0 0 0 0 0
41963- 0 0 0 0 0 0 0 0 0 0 0 0
41964- 0 0 0 0 0 0 0 0 0 0 0 0
41965- 0 0 0 0 0 0 0 0 0 0 0 0
41966- 0 0 0 0 0 0 0 0 0 0 0 0
41967- 0 0 0 0 0 0 0 0 0 0 0 0
41968- 0 0 0 0 0 0 0 0 0 0 0 0
41969- 0 0 0 0 0 0 0 0 0 0 0 0
41970- 0 0 0 0 0 0 0 0 0 6 6 6
41971- 22 22 22 54 54 54 70 70 70 2 2 6
41972- 2 2 6 10 10 10 2 2 6 22 22 22
41973-166 166 166 231 231 231 250 250 250 253 253 253
41974-253 253 253 253 253 253 253 253 253 250 250 250
41975-242 242 242 253 253 253 253 253 253 253 253 253
41976-253 253 253 253 253 253 253 253 253 253 253 253
41977-253 253 253 253 253 253 253 253 253 246 246 246
41978-231 231 231 206 206 206 198 198 198 226 226 226
41979- 94 94 94 2 2 6 6 6 6 38 38 38
41980- 30 30 30 2 2 6 2 2 6 2 2 6
41981- 2 2 6 2 2 6 62 62 62 66 66 66
41982- 26 26 26 10 10 10 0 0 0 0 0 0
41983- 0 0 0 0 0 0 0 0 0 0 0 0
41984- 0 0 0 0 0 0 0 0 0 0 0 0
41985- 0 0 0 0 0 0 0 0 0 0 0 0
41986- 0 0 0 0 0 0 0 0 0 0 0 0
41987- 0 0 0 0 0 0 0 0 0 0 0 0
41988- 0 0 0 0 0 0 0 0 0 0 0 0
41989- 0 0 0 0 0 0 0 0 0 0 0 0
41990- 0 0 0 0 0 0 0 0 0 10 10 10
41991- 30 30 30 74 74 74 50 50 50 2 2 6
41992- 26 26 26 26 26 26 2 2 6 106 106 106
41993-238 238 238 253 253 253 253 253 253 253 253 253
41994-253 253 253 253 253 253 253 253 253 253 253 253
41995-253 253 253 253 253 253 253 253 253 253 253 253
41996-253 253 253 253 253 253 253 253 253 253 253 253
41997-253 253 253 253 253 253 253 253 253 253 253 253
41998-253 253 253 246 246 246 218 218 218 202 202 202
41999-210 210 210 14 14 14 2 2 6 2 2 6
42000- 30 30 30 22 22 22 2 2 6 2 2 6
42001- 2 2 6 2 2 6 18 18 18 86 86 86
42002- 42 42 42 14 14 14 0 0 0 0 0 0
42003- 0 0 0 0 0 0 0 0 0 0 0 0
42004- 0 0 0 0 0 0 0 0 0 0 0 0
42005- 0 0 0 0 0 0 0 0 0 0 0 0
42006- 0 0 0 0 0 0 0 0 0 0 0 0
42007- 0 0 0 0 0 0 0 0 0 0 0 0
42008- 0 0 0 0 0 0 0 0 0 0 0 0
42009- 0 0 0 0 0 0 0 0 0 0 0 0
42010- 0 0 0 0 0 0 0 0 0 14 14 14
42011- 42 42 42 90 90 90 22 22 22 2 2 6
42012- 42 42 42 2 2 6 18 18 18 218 218 218
42013-253 253 253 253 253 253 253 253 253 253 253 253
42014-253 253 253 253 253 253 253 253 253 253 253 253
42015-253 253 253 253 253 253 253 253 253 253 253 253
42016-253 253 253 253 253 253 253 253 253 253 253 253
42017-253 253 253 253 253 253 253 253 253 253 253 253
42018-253 253 253 253 253 253 250 250 250 221 221 221
42019-218 218 218 101 101 101 2 2 6 14 14 14
42020- 18 18 18 38 38 38 10 10 10 2 2 6
42021- 2 2 6 2 2 6 2 2 6 78 78 78
42022- 58 58 58 22 22 22 6 6 6 0 0 0
42023- 0 0 0 0 0 0 0 0 0 0 0 0
42024- 0 0 0 0 0 0 0 0 0 0 0 0
42025- 0 0 0 0 0 0 0 0 0 0 0 0
42026- 0 0 0 0 0 0 0 0 0 0 0 0
42027- 0 0 0 0 0 0 0 0 0 0 0 0
42028- 0 0 0 0 0 0 0 0 0 0 0 0
42029- 0 0 0 0 0 0 0 0 0 0 0 0
42030- 0 0 0 0 0 0 6 6 6 18 18 18
42031- 54 54 54 82 82 82 2 2 6 26 26 26
42032- 22 22 22 2 2 6 123 123 123 253 253 253
42033-253 253 253 253 253 253 253 253 253 253 253 253
42034-253 253 253 253 253 253 253 253 253 253 253 253
42035-253 253 253 253 253 253 253 253 253 253 253 253
42036-253 253 253 253 253 253 253 253 253 253 253 253
42037-253 253 253 253 253 253 253 253 253 253 253 253
42038-253 253 253 253 253 253 253 253 253 250 250 250
42039-238 238 238 198 198 198 6 6 6 38 38 38
42040- 58 58 58 26 26 26 38 38 38 2 2 6
42041- 2 2 6 2 2 6 2 2 6 46 46 46
42042- 78 78 78 30 30 30 10 10 10 0 0 0
42043- 0 0 0 0 0 0 0 0 0 0 0 0
42044- 0 0 0 0 0 0 0 0 0 0 0 0
42045- 0 0 0 0 0 0 0 0 0 0 0 0
42046- 0 0 0 0 0 0 0 0 0 0 0 0
42047- 0 0 0 0 0 0 0 0 0 0 0 0
42048- 0 0 0 0 0 0 0 0 0 0 0 0
42049- 0 0 0 0 0 0 0 0 0 0 0 0
42050- 0 0 0 0 0 0 10 10 10 30 30 30
42051- 74 74 74 58 58 58 2 2 6 42 42 42
42052- 2 2 6 22 22 22 231 231 231 253 253 253
42053-253 253 253 253 253 253 253 253 253 253 253 253
42054-253 253 253 253 253 253 253 253 253 250 250 250
42055-253 253 253 253 253 253 253 253 253 253 253 253
42056-253 253 253 253 253 253 253 253 253 253 253 253
42057-253 253 253 253 253 253 253 253 253 253 253 253
42058-253 253 253 253 253 253 253 253 253 253 253 253
42059-253 253 253 246 246 246 46 46 46 38 38 38
42060- 42 42 42 14 14 14 38 38 38 14 14 14
42061- 2 2 6 2 2 6 2 2 6 6 6 6
42062- 86 86 86 46 46 46 14 14 14 0 0 0
42063- 0 0 0 0 0 0 0 0 0 0 0 0
42064- 0 0 0 0 0 0 0 0 0 0 0 0
42065- 0 0 0 0 0 0 0 0 0 0 0 0
42066- 0 0 0 0 0 0 0 0 0 0 0 0
42067- 0 0 0 0 0 0 0 0 0 0 0 0
42068- 0 0 0 0 0 0 0 0 0 0 0 0
42069- 0 0 0 0 0 0 0 0 0 0 0 0
42070- 0 0 0 6 6 6 14 14 14 42 42 42
42071- 90 90 90 18 18 18 18 18 18 26 26 26
42072- 2 2 6 116 116 116 253 253 253 253 253 253
42073-253 253 253 253 253 253 253 253 253 253 253 253
42074-253 253 253 253 253 253 250 250 250 238 238 238
42075-253 253 253 253 253 253 253 253 253 253 253 253
42076-253 253 253 253 253 253 253 253 253 253 253 253
42077-253 253 253 253 253 253 253 253 253 253 253 253
42078-253 253 253 253 253 253 253 253 253 253 253 253
42079-253 253 253 253 253 253 94 94 94 6 6 6
42080- 2 2 6 2 2 6 10 10 10 34 34 34
42081- 2 2 6 2 2 6 2 2 6 2 2 6
42082- 74 74 74 58 58 58 22 22 22 6 6 6
42083- 0 0 0 0 0 0 0 0 0 0 0 0
42084- 0 0 0 0 0 0 0 0 0 0 0 0
42085- 0 0 0 0 0 0 0 0 0 0 0 0
42086- 0 0 0 0 0 0 0 0 0 0 0 0
42087- 0 0 0 0 0 0 0 0 0 0 0 0
42088- 0 0 0 0 0 0 0 0 0 0 0 0
42089- 0 0 0 0 0 0 0 0 0 0 0 0
42090- 0 0 0 10 10 10 26 26 26 66 66 66
42091- 82 82 82 2 2 6 38 38 38 6 6 6
42092- 14 14 14 210 210 210 253 253 253 253 253 253
42093-253 253 253 253 253 253 253 253 253 253 253 253
42094-253 253 253 253 253 253 246 246 246 242 242 242
42095-253 253 253 253 253 253 253 253 253 253 253 253
42096-253 253 253 253 253 253 253 253 253 253 253 253
42097-253 253 253 253 253 253 253 253 253 253 253 253
42098-253 253 253 253 253 253 253 253 253 253 253 253
42099-253 253 253 253 253 253 144 144 144 2 2 6
42100- 2 2 6 2 2 6 2 2 6 46 46 46
42101- 2 2 6 2 2 6 2 2 6 2 2 6
42102- 42 42 42 74 74 74 30 30 30 10 10 10
42103- 0 0 0 0 0 0 0 0 0 0 0 0
42104- 0 0 0 0 0 0 0 0 0 0 0 0
42105- 0 0 0 0 0 0 0 0 0 0 0 0
42106- 0 0 0 0 0 0 0 0 0 0 0 0
42107- 0 0 0 0 0 0 0 0 0 0 0 0
42108- 0 0 0 0 0 0 0 0 0 0 0 0
42109- 0 0 0 0 0 0 0 0 0 0 0 0
42110- 6 6 6 14 14 14 42 42 42 90 90 90
42111- 26 26 26 6 6 6 42 42 42 2 2 6
42112- 74 74 74 250 250 250 253 253 253 253 253 253
42113-253 253 253 253 253 253 253 253 253 253 253 253
42114-253 253 253 253 253 253 242 242 242 242 242 242
42115-253 253 253 253 253 253 253 253 253 253 253 253
42116-253 253 253 253 253 253 253 253 253 253 253 253
42117-253 253 253 253 253 253 253 253 253 253 253 253
42118-253 253 253 253 253 253 253 253 253 253 253 253
42119-253 253 253 253 253 253 182 182 182 2 2 6
42120- 2 2 6 2 2 6 2 2 6 46 46 46
42121- 2 2 6 2 2 6 2 2 6 2 2 6
42122- 10 10 10 86 86 86 38 38 38 10 10 10
42123- 0 0 0 0 0 0 0 0 0 0 0 0
42124- 0 0 0 0 0 0 0 0 0 0 0 0
42125- 0 0 0 0 0 0 0 0 0 0 0 0
42126- 0 0 0 0 0 0 0 0 0 0 0 0
42127- 0 0 0 0 0 0 0 0 0 0 0 0
42128- 0 0 0 0 0 0 0 0 0 0 0 0
42129- 0 0 0 0 0 0 0 0 0 0 0 0
42130- 10 10 10 26 26 26 66 66 66 82 82 82
42131- 2 2 6 22 22 22 18 18 18 2 2 6
42132-149 149 149 253 253 253 253 253 253 253 253 253
42133-253 253 253 253 253 253 253 253 253 253 253 253
42134-253 253 253 253 253 253 234 234 234 242 242 242
42135-253 253 253 253 253 253 253 253 253 253 253 253
42136-253 253 253 253 253 253 253 253 253 253 253 253
42137-253 253 253 253 253 253 253 253 253 253 253 253
42138-253 253 253 253 253 253 253 253 253 253 253 253
42139-253 253 253 253 253 253 206 206 206 2 2 6
42140- 2 2 6 2 2 6 2 2 6 38 38 38
42141- 2 2 6 2 2 6 2 2 6 2 2 6
42142- 6 6 6 86 86 86 46 46 46 14 14 14
42143- 0 0 0 0 0 0 0 0 0 0 0 0
42144- 0 0 0 0 0 0 0 0 0 0 0 0
42145- 0 0 0 0 0 0 0 0 0 0 0 0
42146- 0 0 0 0 0 0 0 0 0 0 0 0
42147- 0 0 0 0 0 0 0 0 0 0 0 0
42148- 0 0 0 0 0 0 0 0 0 0 0 0
42149- 0 0 0 0 0 0 0 0 0 6 6 6
42150- 18 18 18 46 46 46 86 86 86 18 18 18
42151- 2 2 6 34 34 34 10 10 10 6 6 6
42152-210 210 210 253 253 253 253 253 253 253 253 253
42153-253 253 253 253 253 253 253 253 253 253 253 253
42154-253 253 253 253 253 253 234 234 234 242 242 242
42155-253 253 253 253 253 253 253 253 253 253 253 253
42156-253 253 253 253 253 253 253 253 253 253 253 253
42157-253 253 253 253 253 253 253 253 253 253 253 253
42158-253 253 253 253 253 253 253 253 253 253 253 253
42159-253 253 253 253 253 253 221 221 221 6 6 6
42160- 2 2 6 2 2 6 6 6 6 30 30 30
42161- 2 2 6 2 2 6 2 2 6 2 2 6
42162- 2 2 6 82 82 82 54 54 54 18 18 18
42163- 6 6 6 0 0 0 0 0 0 0 0 0
42164- 0 0 0 0 0 0 0 0 0 0 0 0
42165- 0 0 0 0 0 0 0 0 0 0 0 0
42166- 0 0 0 0 0 0 0 0 0 0 0 0
42167- 0 0 0 0 0 0 0 0 0 0 0 0
42168- 0 0 0 0 0 0 0 0 0 0 0 0
42169- 0 0 0 0 0 0 0 0 0 10 10 10
42170- 26 26 26 66 66 66 62 62 62 2 2 6
42171- 2 2 6 38 38 38 10 10 10 26 26 26
42172-238 238 238 253 253 253 253 253 253 253 253 253
42173-253 253 253 253 253 253 253 253 253 253 253 253
42174-253 253 253 253 253 253 231 231 231 238 238 238
42175-253 253 253 253 253 253 253 253 253 253 253 253
42176-253 253 253 253 253 253 253 253 253 253 253 253
42177-253 253 253 253 253 253 253 253 253 253 253 253
42178-253 253 253 253 253 253 253 253 253 253 253 253
42179-253 253 253 253 253 253 231 231 231 6 6 6
42180- 2 2 6 2 2 6 10 10 10 30 30 30
42181- 2 2 6 2 2 6 2 2 6 2 2 6
42182- 2 2 6 66 66 66 58 58 58 22 22 22
42183- 6 6 6 0 0 0 0 0 0 0 0 0
42184- 0 0 0 0 0 0 0 0 0 0 0 0
42185- 0 0 0 0 0 0 0 0 0 0 0 0
42186- 0 0 0 0 0 0 0 0 0 0 0 0
42187- 0 0 0 0 0 0 0 0 0 0 0 0
42188- 0 0 0 0 0 0 0 0 0 0 0 0
42189- 0 0 0 0 0 0 0 0 0 10 10 10
42190- 38 38 38 78 78 78 6 6 6 2 2 6
42191- 2 2 6 46 46 46 14 14 14 42 42 42
42192-246 246 246 253 253 253 253 253 253 253 253 253
42193-253 253 253 253 253 253 253 253 253 253 253 253
42194-253 253 253 253 253 253 231 231 231 242 242 242
42195-253 253 253 253 253 253 253 253 253 253 253 253
42196-253 253 253 253 253 253 253 253 253 253 253 253
42197-253 253 253 253 253 253 253 253 253 253 253 253
42198-253 253 253 253 253 253 253 253 253 253 253 253
42199-253 253 253 253 253 253 234 234 234 10 10 10
42200- 2 2 6 2 2 6 22 22 22 14 14 14
42201- 2 2 6 2 2 6 2 2 6 2 2 6
42202- 2 2 6 66 66 66 62 62 62 22 22 22
42203- 6 6 6 0 0 0 0 0 0 0 0 0
42204- 0 0 0 0 0 0 0 0 0 0 0 0
42205- 0 0 0 0 0 0 0 0 0 0 0 0
42206- 0 0 0 0 0 0 0 0 0 0 0 0
42207- 0 0 0 0 0 0 0 0 0 0 0 0
42208- 0 0 0 0 0 0 0 0 0 0 0 0
42209- 0 0 0 0 0 0 6 6 6 18 18 18
42210- 50 50 50 74 74 74 2 2 6 2 2 6
42211- 14 14 14 70 70 70 34 34 34 62 62 62
42212-250 250 250 253 253 253 253 253 253 253 253 253
42213-253 253 253 253 253 253 253 253 253 253 253 253
42214-253 253 253 253 253 253 231 231 231 246 246 246
42215-253 253 253 253 253 253 253 253 253 253 253 253
42216-253 253 253 253 253 253 253 253 253 253 253 253
42217-253 253 253 253 253 253 253 253 253 253 253 253
42218-253 253 253 253 253 253 253 253 253 253 253 253
42219-253 253 253 253 253 253 234 234 234 14 14 14
42220- 2 2 6 2 2 6 30 30 30 2 2 6
42221- 2 2 6 2 2 6 2 2 6 2 2 6
42222- 2 2 6 66 66 66 62 62 62 22 22 22
42223- 6 6 6 0 0 0 0 0 0 0 0 0
42224- 0 0 0 0 0 0 0 0 0 0 0 0
42225- 0 0 0 0 0 0 0 0 0 0 0 0
42226- 0 0 0 0 0 0 0 0 0 0 0 0
42227- 0 0 0 0 0 0 0 0 0 0 0 0
42228- 0 0 0 0 0 0 0 0 0 0 0 0
42229- 0 0 0 0 0 0 6 6 6 18 18 18
42230- 54 54 54 62 62 62 2 2 6 2 2 6
42231- 2 2 6 30 30 30 46 46 46 70 70 70
42232-250 250 250 253 253 253 253 253 253 253 253 253
42233-253 253 253 253 253 253 253 253 253 253 253 253
42234-253 253 253 253 253 253 231 231 231 246 246 246
42235-253 253 253 253 253 253 253 253 253 253 253 253
42236-253 253 253 253 253 253 253 253 253 253 253 253
42237-253 253 253 253 253 253 253 253 253 253 253 253
42238-253 253 253 253 253 253 253 253 253 253 253 253
42239-253 253 253 253 253 253 226 226 226 10 10 10
42240- 2 2 6 6 6 6 30 30 30 2 2 6
42241- 2 2 6 2 2 6 2 2 6 2 2 6
42242- 2 2 6 66 66 66 58 58 58 22 22 22
42243- 6 6 6 0 0 0 0 0 0 0 0 0
42244- 0 0 0 0 0 0 0 0 0 0 0 0
42245- 0 0 0 0 0 0 0 0 0 0 0 0
42246- 0 0 0 0 0 0 0 0 0 0 0 0
42247- 0 0 0 0 0 0 0 0 0 0 0 0
42248- 0 0 0 0 0 0 0 0 0 0 0 0
42249- 0 0 0 0 0 0 6 6 6 22 22 22
42250- 58 58 58 62 62 62 2 2 6 2 2 6
42251- 2 2 6 2 2 6 30 30 30 78 78 78
42252-250 250 250 253 253 253 253 253 253 253 253 253
42253-253 253 253 253 253 253 253 253 253 253 253 253
42254-253 253 253 253 253 253 231 231 231 246 246 246
42255-253 253 253 253 253 253 253 253 253 253 253 253
42256-253 253 253 253 253 253 253 253 253 253 253 253
42257-253 253 253 253 253 253 253 253 253 253 253 253
42258-253 253 253 253 253 253 253 253 253 253 253 253
42259-253 253 253 253 253 253 206 206 206 2 2 6
42260- 22 22 22 34 34 34 18 14 6 22 22 22
42261- 26 26 26 18 18 18 6 6 6 2 2 6
42262- 2 2 6 82 82 82 54 54 54 18 18 18
42263- 6 6 6 0 0 0 0 0 0 0 0 0
42264- 0 0 0 0 0 0 0 0 0 0 0 0
42265- 0 0 0 0 0 0 0 0 0 0 0 0
42266- 0 0 0 0 0 0 0 0 0 0 0 0
42267- 0 0 0 0 0 0 0 0 0 0 0 0
42268- 0 0 0 0 0 0 0 0 0 0 0 0
42269- 0 0 0 0 0 0 6 6 6 26 26 26
42270- 62 62 62 106 106 106 74 54 14 185 133 11
42271-210 162 10 121 92 8 6 6 6 62 62 62
42272-238 238 238 253 253 253 253 253 253 253 253 253
42273-253 253 253 253 253 253 253 253 253 253 253 253
42274-253 253 253 253 253 253 231 231 231 246 246 246
42275-253 253 253 253 253 253 253 253 253 253 253 253
42276-253 253 253 253 253 253 253 253 253 253 253 253
42277-253 253 253 253 253 253 253 253 253 253 253 253
42278-253 253 253 253 253 253 253 253 253 253 253 253
42279-253 253 253 253 253 253 158 158 158 18 18 18
42280- 14 14 14 2 2 6 2 2 6 2 2 6
42281- 6 6 6 18 18 18 66 66 66 38 38 38
42282- 6 6 6 94 94 94 50 50 50 18 18 18
42283- 6 6 6 0 0 0 0 0 0 0 0 0
42284- 0 0 0 0 0 0 0 0 0 0 0 0
42285- 0 0 0 0 0 0 0 0 0 0 0 0
42286- 0 0 0 0 0 0 0 0 0 0 0 0
42287- 0 0 0 0 0 0 0 0 0 0 0 0
42288- 0 0 0 0 0 0 0 0 0 6 6 6
42289- 10 10 10 10 10 10 18 18 18 38 38 38
42290- 78 78 78 142 134 106 216 158 10 242 186 14
42291-246 190 14 246 190 14 156 118 10 10 10 10
42292- 90 90 90 238 238 238 253 253 253 253 253 253
42293-253 253 253 253 253 253 253 253 253 253 253 253
42294-253 253 253 253 253 253 231 231 231 250 250 250
42295-253 253 253 253 253 253 253 253 253 253 253 253
42296-253 253 253 253 253 253 253 253 253 253 253 253
42297-253 253 253 253 253 253 253 253 253 253 253 253
42298-253 253 253 253 253 253 253 253 253 246 230 190
42299-238 204 91 238 204 91 181 142 44 37 26 9
42300- 2 2 6 2 2 6 2 2 6 2 2 6
42301- 2 2 6 2 2 6 38 38 38 46 46 46
42302- 26 26 26 106 106 106 54 54 54 18 18 18
42303- 6 6 6 0 0 0 0 0 0 0 0 0
42304- 0 0 0 0 0 0 0 0 0 0 0 0
42305- 0 0 0 0 0 0 0 0 0 0 0 0
42306- 0 0 0 0 0 0 0 0 0 0 0 0
42307- 0 0 0 0 0 0 0 0 0 0 0 0
42308- 0 0 0 6 6 6 14 14 14 22 22 22
42309- 30 30 30 38 38 38 50 50 50 70 70 70
42310-106 106 106 190 142 34 226 170 11 242 186 14
42311-246 190 14 246 190 14 246 190 14 154 114 10
42312- 6 6 6 74 74 74 226 226 226 253 253 253
42313-253 253 253 253 253 253 253 253 253 253 253 253
42314-253 253 253 253 253 253 231 231 231 250 250 250
42315-253 253 253 253 253 253 253 253 253 253 253 253
42316-253 253 253 253 253 253 253 253 253 253 253 253
42317-253 253 253 253 253 253 253 253 253 253 253 253
42318-253 253 253 253 253 253 253 253 253 228 184 62
42319-241 196 14 241 208 19 232 195 16 38 30 10
42320- 2 2 6 2 2 6 2 2 6 2 2 6
42321- 2 2 6 6 6 6 30 30 30 26 26 26
42322-203 166 17 154 142 90 66 66 66 26 26 26
42323- 6 6 6 0 0 0 0 0 0 0 0 0
42324- 0 0 0 0 0 0 0 0 0 0 0 0
42325- 0 0 0 0 0 0 0 0 0 0 0 0
42326- 0 0 0 0 0 0 0 0 0 0 0 0
42327- 0 0 0 0 0 0 0 0 0 0 0 0
42328- 6 6 6 18 18 18 38 38 38 58 58 58
42329- 78 78 78 86 86 86 101 101 101 123 123 123
42330-175 146 61 210 150 10 234 174 13 246 186 14
42331-246 190 14 246 190 14 246 190 14 238 190 10
42332-102 78 10 2 2 6 46 46 46 198 198 198
42333-253 253 253 253 253 253 253 253 253 253 253 253
42334-253 253 253 253 253 253 234 234 234 242 242 242
42335-253 253 253 253 253 253 253 253 253 253 253 253
42336-253 253 253 253 253 253 253 253 253 253 253 253
42337-253 253 253 253 253 253 253 253 253 253 253 253
42338-253 253 253 253 253 253 253 253 253 224 178 62
42339-242 186 14 241 196 14 210 166 10 22 18 6
42340- 2 2 6 2 2 6 2 2 6 2 2 6
42341- 2 2 6 2 2 6 6 6 6 121 92 8
42342-238 202 15 232 195 16 82 82 82 34 34 34
42343- 10 10 10 0 0 0 0 0 0 0 0 0
42344- 0 0 0 0 0 0 0 0 0 0 0 0
42345- 0 0 0 0 0 0 0 0 0 0 0 0
42346- 0 0 0 0 0 0 0 0 0 0 0 0
42347- 0 0 0 0 0 0 0 0 0 0 0 0
42348- 14 14 14 38 38 38 70 70 70 154 122 46
42349-190 142 34 200 144 11 197 138 11 197 138 11
42350-213 154 11 226 170 11 242 186 14 246 190 14
42351-246 190 14 246 190 14 246 190 14 246 190 14
42352-225 175 15 46 32 6 2 2 6 22 22 22
42353-158 158 158 250 250 250 253 253 253 253 253 253
42354-253 253 253 253 253 253 253 253 253 253 253 253
42355-253 253 253 253 253 253 253 253 253 253 253 253
42356-253 253 253 253 253 253 253 253 253 253 253 253
42357-253 253 253 253 253 253 253 253 253 253 253 253
42358-253 253 253 250 250 250 242 242 242 224 178 62
42359-239 182 13 236 186 11 213 154 11 46 32 6
42360- 2 2 6 2 2 6 2 2 6 2 2 6
42361- 2 2 6 2 2 6 61 42 6 225 175 15
42362-238 190 10 236 186 11 112 100 78 42 42 42
42363- 14 14 14 0 0 0 0 0 0 0 0 0
42364- 0 0 0 0 0 0 0 0 0 0 0 0
42365- 0 0 0 0 0 0 0 0 0 0 0 0
42366- 0 0 0 0 0 0 0 0 0 0 0 0
42367- 0 0 0 0 0 0 0 0 0 6 6 6
42368- 22 22 22 54 54 54 154 122 46 213 154 11
42369-226 170 11 230 174 11 226 170 11 226 170 11
42370-236 178 12 242 186 14 246 190 14 246 190 14
42371-246 190 14 246 190 14 246 190 14 246 190 14
42372-241 196 14 184 144 12 10 10 10 2 2 6
42373- 6 6 6 116 116 116 242 242 242 253 253 253
42374-253 253 253 253 253 253 253 253 253 253 253 253
42375-253 253 253 253 253 253 253 253 253 253 253 253
42376-253 253 253 253 253 253 253 253 253 253 253 253
42377-253 253 253 253 253 253 253 253 253 253 253 253
42378-253 253 253 231 231 231 198 198 198 214 170 54
42379-236 178 12 236 178 12 210 150 10 137 92 6
42380- 18 14 6 2 2 6 2 2 6 2 2 6
42381- 6 6 6 70 47 6 200 144 11 236 178 12
42382-239 182 13 239 182 13 124 112 88 58 58 58
42383- 22 22 22 6 6 6 0 0 0 0 0 0
42384- 0 0 0 0 0 0 0 0 0 0 0 0
42385- 0 0 0 0 0 0 0 0 0 0 0 0
42386- 0 0 0 0 0 0 0 0 0 0 0 0
42387- 0 0 0 0 0 0 0 0 0 10 10 10
42388- 30 30 30 70 70 70 180 133 36 226 170 11
42389-239 182 13 242 186 14 242 186 14 246 186 14
42390-246 190 14 246 190 14 246 190 14 246 190 14
42391-246 190 14 246 190 14 246 190 14 246 190 14
42392-246 190 14 232 195 16 98 70 6 2 2 6
42393- 2 2 6 2 2 6 66 66 66 221 221 221
42394-253 253 253 253 253 253 253 253 253 253 253 253
42395-253 253 253 253 253 253 253 253 253 253 253 253
42396-253 253 253 253 253 253 253 253 253 253 253 253
42397-253 253 253 253 253 253 253 253 253 253 253 253
42398-253 253 253 206 206 206 198 198 198 214 166 58
42399-230 174 11 230 174 11 216 158 10 192 133 9
42400-163 110 8 116 81 8 102 78 10 116 81 8
42401-167 114 7 197 138 11 226 170 11 239 182 13
42402-242 186 14 242 186 14 162 146 94 78 78 78
42403- 34 34 34 14 14 14 6 6 6 0 0 0
42404- 0 0 0 0 0 0 0 0 0 0 0 0
42405- 0 0 0 0 0 0 0 0 0 0 0 0
42406- 0 0 0 0 0 0 0 0 0 0 0 0
42407- 0 0 0 0 0 0 0 0 0 6 6 6
42408- 30 30 30 78 78 78 190 142 34 226 170 11
42409-239 182 13 246 190 14 246 190 14 246 190 14
42410-246 190 14 246 190 14 246 190 14 246 190 14
42411-246 190 14 246 190 14 246 190 14 246 190 14
42412-246 190 14 241 196 14 203 166 17 22 18 6
42413- 2 2 6 2 2 6 2 2 6 38 38 38
42414-218 218 218 253 253 253 253 253 253 253 253 253
42415-253 253 253 253 253 253 253 253 253 253 253 253
42416-253 253 253 253 253 253 253 253 253 253 253 253
42417-253 253 253 253 253 253 253 253 253 253 253 253
42418-250 250 250 206 206 206 198 198 198 202 162 69
42419-226 170 11 236 178 12 224 166 10 210 150 10
42420-200 144 11 197 138 11 192 133 9 197 138 11
42421-210 150 10 226 170 11 242 186 14 246 190 14
42422-246 190 14 246 186 14 225 175 15 124 112 88
42423- 62 62 62 30 30 30 14 14 14 6 6 6
42424- 0 0 0 0 0 0 0 0 0 0 0 0
42425- 0 0 0 0 0 0 0 0 0 0 0 0
42426- 0 0 0 0 0 0 0 0 0 0 0 0
42427- 0 0 0 0 0 0 0 0 0 10 10 10
42428- 30 30 30 78 78 78 174 135 50 224 166 10
42429-239 182 13 246 190 14 246 190 14 246 190 14
42430-246 190 14 246 190 14 246 190 14 246 190 14
42431-246 190 14 246 190 14 246 190 14 246 190 14
42432-246 190 14 246 190 14 241 196 14 139 102 15
42433- 2 2 6 2 2 6 2 2 6 2 2 6
42434- 78 78 78 250 250 250 253 253 253 253 253 253
42435-253 253 253 253 253 253 253 253 253 253 253 253
42436-253 253 253 253 253 253 253 253 253 253 253 253
42437-253 253 253 253 253 253 253 253 253 253 253 253
42438-250 250 250 214 214 214 198 198 198 190 150 46
42439-219 162 10 236 178 12 234 174 13 224 166 10
42440-216 158 10 213 154 11 213 154 11 216 158 10
42441-226 170 11 239 182 13 246 190 14 246 190 14
42442-246 190 14 246 190 14 242 186 14 206 162 42
42443-101 101 101 58 58 58 30 30 30 14 14 14
42444- 6 6 6 0 0 0 0 0 0 0 0 0
42445- 0 0 0 0 0 0 0 0 0 0 0 0
42446- 0 0 0 0 0 0 0 0 0 0 0 0
42447- 0 0 0 0 0 0 0 0 0 10 10 10
42448- 30 30 30 74 74 74 174 135 50 216 158 10
42449-236 178 12 246 190 14 246 190 14 246 190 14
42450-246 190 14 246 190 14 246 190 14 246 190 14
42451-246 190 14 246 190 14 246 190 14 246 190 14
42452-246 190 14 246 190 14 241 196 14 226 184 13
42453- 61 42 6 2 2 6 2 2 6 2 2 6
42454- 22 22 22 238 238 238 253 253 253 253 253 253
42455-253 253 253 253 253 253 253 253 253 253 253 253
42456-253 253 253 253 253 253 253 253 253 253 253 253
42457-253 253 253 253 253 253 253 253 253 253 253 253
42458-253 253 253 226 226 226 187 187 187 180 133 36
42459-216 158 10 236 178 12 239 182 13 236 178 12
42460-230 174 11 226 170 11 226 170 11 230 174 11
42461-236 178 12 242 186 14 246 190 14 246 190 14
42462-246 190 14 246 190 14 246 186 14 239 182 13
42463-206 162 42 106 106 106 66 66 66 34 34 34
42464- 14 14 14 6 6 6 0 0 0 0 0 0
42465- 0 0 0 0 0 0 0 0 0 0 0 0
42466- 0 0 0 0 0 0 0 0 0 0 0 0
42467- 0 0 0 0 0 0 0 0 0 6 6 6
42468- 26 26 26 70 70 70 163 133 67 213 154 11
42469-236 178 12 246 190 14 246 190 14 246 190 14
42470-246 190 14 246 190 14 246 190 14 246 190 14
42471-246 190 14 246 190 14 246 190 14 246 190 14
42472-246 190 14 246 190 14 246 190 14 241 196 14
42473-190 146 13 18 14 6 2 2 6 2 2 6
42474- 46 46 46 246 246 246 253 253 253 253 253 253
42475-253 253 253 253 253 253 253 253 253 253 253 253
42476-253 253 253 253 253 253 253 253 253 253 253 253
42477-253 253 253 253 253 253 253 253 253 253 253 253
42478-253 253 253 221 221 221 86 86 86 156 107 11
42479-216 158 10 236 178 12 242 186 14 246 186 14
42480-242 186 14 239 182 13 239 182 13 242 186 14
42481-242 186 14 246 186 14 246 190 14 246 190 14
42482-246 190 14 246 190 14 246 190 14 246 190 14
42483-242 186 14 225 175 15 142 122 72 66 66 66
42484- 30 30 30 10 10 10 0 0 0 0 0 0
42485- 0 0 0 0 0 0 0 0 0 0 0 0
42486- 0 0 0 0 0 0 0 0 0 0 0 0
42487- 0 0 0 0 0 0 0 0 0 6 6 6
42488- 26 26 26 70 70 70 163 133 67 210 150 10
42489-236 178 12 246 190 14 246 190 14 246 190 14
42490-246 190 14 246 190 14 246 190 14 246 190 14
42491-246 190 14 246 190 14 246 190 14 246 190 14
42492-246 190 14 246 190 14 246 190 14 246 190 14
42493-232 195 16 121 92 8 34 34 34 106 106 106
42494-221 221 221 253 253 253 253 253 253 253 253 253
42495-253 253 253 253 253 253 253 253 253 253 253 253
42496-253 253 253 253 253 253 253 253 253 253 253 253
42497-253 253 253 253 253 253 253 253 253 253 253 253
42498-242 242 242 82 82 82 18 14 6 163 110 8
42499-216 158 10 236 178 12 242 186 14 246 190 14
42500-246 190 14 246 190 14 246 190 14 246 190 14
42501-246 190 14 246 190 14 246 190 14 246 190 14
42502-246 190 14 246 190 14 246 190 14 246 190 14
42503-246 190 14 246 190 14 242 186 14 163 133 67
42504- 46 46 46 18 18 18 6 6 6 0 0 0
42505- 0 0 0 0 0 0 0 0 0 0 0 0
42506- 0 0 0 0 0 0 0 0 0 0 0 0
42507- 0 0 0 0 0 0 0 0 0 10 10 10
42508- 30 30 30 78 78 78 163 133 67 210 150 10
42509-236 178 12 246 186 14 246 190 14 246 190 14
42510-246 190 14 246 190 14 246 190 14 246 190 14
42511-246 190 14 246 190 14 246 190 14 246 190 14
42512-246 190 14 246 190 14 246 190 14 246 190 14
42513-241 196 14 215 174 15 190 178 144 253 253 253
42514-253 253 253 253 253 253 253 253 253 253 253 253
42515-253 253 253 253 253 253 253 253 253 253 253 253
42516-253 253 253 253 253 253 253 253 253 253 253 253
42517-253 253 253 253 253 253 253 253 253 218 218 218
42518- 58 58 58 2 2 6 22 18 6 167 114 7
42519-216 158 10 236 178 12 246 186 14 246 190 14
42520-246 190 14 246 190 14 246 190 14 246 190 14
42521-246 190 14 246 190 14 246 190 14 246 190 14
42522-246 190 14 246 190 14 246 190 14 246 190 14
42523-246 190 14 246 186 14 242 186 14 190 150 46
42524- 54 54 54 22 22 22 6 6 6 0 0 0
42525- 0 0 0 0 0 0 0 0 0 0 0 0
42526- 0 0 0 0 0 0 0 0 0 0 0 0
42527- 0 0 0 0 0 0 0 0 0 14 14 14
42528- 38 38 38 86 86 86 180 133 36 213 154 11
42529-236 178 12 246 186 14 246 190 14 246 190 14
42530-246 190 14 246 190 14 246 190 14 246 190 14
42531-246 190 14 246 190 14 246 190 14 246 190 14
42532-246 190 14 246 190 14 246 190 14 246 190 14
42533-246 190 14 232 195 16 190 146 13 214 214 214
42534-253 253 253 253 253 253 253 253 253 253 253 253
42535-253 253 253 253 253 253 253 253 253 253 253 253
42536-253 253 253 253 253 253 253 253 253 253 253 253
42537-253 253 253 250 250 250 170 170 170 26 26 26
42538- 2 2 6 2 2 6 37 26 9 163 110 8
42539-219 162 10 239 182 13 246 186 14 246 190 14
42540-246 190 14 246 190 14 246 190 14 246 190 14
42541-246 190 14 246 190 14 246 190 14 246 190 14
42542-246 190 14 246 190 14 246 190 14 246 190 14
42543-246 186 14 236 178 12 224 166 10 142 122 72
42544- 46 46 46 18 18 18 6 6 6 0 0 0
42545- 0 0 0 0 0 0 0 0 0 0 0 0
42546- 0 0 0 0 0 0 0 0 0 0 0 0
42547- 0 0 0 0 0 0 6 6 6 18 18 18
42548- 50 50 50 109 106 95 192 133 9 224 166 10
42549-242 186 14 246 190 14 246 190 14 246 190 14
42550-246 190 14 246 190 14 246 190 14 246 190 14
42551-246 190 14 246 190 14 246 190 14 246 190 14
42552-246 190 14 246 190 14 246 190 14 246 190 14
42553-242 186 14 226 184 13 210 162 10 142 110 46
42554-226 226 226 253 253 253 253 253 253 253 253 253
42555-253 253 253 253 253 253 253 253 253 253 253 253
42556-253 253 253 253 253 253 253 253 253 253 253 253
42557-198 198 198 66 66 66 2 2 6 2 2 6
42558- 2 2 6 2 2 6 50 34 6 156 107 11
42559-219 162 10 239 182 13 246 186 14 246 190 14
42560-246 190 14 246 190 14 246 190 14 246 190 14
42561-246 190 14 246 190 14 246 190 14 246 190 14
42562-246 190 14 246 190 14 246 190 14 242 186 14
42563-234 174 13 213 154 11 154 122 46 66 66 66
42564- 30 30 30 10 10 10 0 0 0 0 0 0
42565- 0 0 0 0 0 0 0 0 0 0 0 0
42566- 0 0 0 0 0 0 0 0 0 0 0 0
42567- 0 0 0 0 0 0 6 6 6 22 22 22
42568- 58 58 58 154 121 60 206 145 10 234 174 13
42569-242 186 14 246 186 14 246 190 14 246 190 14
42570-246 190 14 246 190 14 246 190 14 246 190 14
42571-246 190 14 246 190 14 246 190 14 246 190 14
42572-246 190 14 246 190 14 246 190 14 246 190 14
42573-246 186 14 236 178 12 210 162 10 163 110 8
42574- 61 42 6 138 138 138 218 218 218 250 250 250
42575-253 253 253 253 253 253 253 253 253 250 250 250
42576-242 242 242 210 210 210 144 144 144 66 66 66
42577- 6 6 6 2 2 6 2 2 6 2 2 6
42578- 2 2 6 2 2 6 61 42 6 163 110 8
42579-216 158 10 236 178 12 246 190 14 246 190 14
42580-246 190 14 246 190 14 246 190 14 246 190 14
42581-246 190 14 246 190 14 246 190 14 246 190 14
42582-246 190 14 239 182 13 230 174 11 216 158 10
42583-190 142 34 124 112 88 70 70 70 38 38 38
42584- 18 18 18 6 6 6 0 0 0 0 0 0
42585- 0 0 0 0 0 0 0 0 0 0 0 0
42586- 0 0 0 0 0 0 0 0 0 0 0 0
42587- 0 0 0 0 0 0 6 6 6 22 22 22
42588- 62 62 62 168 124 44 206 145 10 224 166 10
42589-236 178 12 239 182 13 242 186 14 242 186 14
42590-246 186 14 246 190 14 246 190 14 246 190 14
42591-246 190 14 246 190 14 246 190 14 246 190 14
42592-246 190 14 246 190 14 246 190 14 246 190 14
42593-246 190 14 236 178 12 216 158 10 175 118 6
42594- 80 54 7 2 2 6 6 6 6 30 30 30
42595- 54 54 54 62 62 62 50 50 50 38 38 38
42596- 14 14 14 2 2 6 2 2 6 2 2 6
42597- 2 2 6 2 2 6 2 2 6 2 2 6
42598- 2 2 6 6 6 6 80 54 7 167 114 7
42599-213 154 11 236 178 12 246 190 14 246 190 14
42600-246 190 14 246 190 14 246 190 14 246 190 14
42601-246 190 14 242 186 14 239 182 13 239 182 13
42602-230 174 11 210 150 10 174 135 50 124 112 88
42603- 82 82 82 54 54 54 34 34 34 18 18 18
42604- 6 6 6 0 0 0 0 0 0 0 0 0
42605- 0 0 0 0 0 0 0 0 0 0 0 0
42606- 0 0 0 0 0 0 0 0 0 0 0 0
42607- 0 0 0 0 0 0 6 6 6 18 18 18
42608- 50 50 50 158 118 36 192 133 9 200 144 11
42609-216 158 10 219 162 10 224 166 10 226 170 11
42610-230 174 11 236 178 12 239 182 13 239 182 13
42611-242 186 14 246 186 14 246 190 14 246 190 14
42612-246 190 14 246 190 14 246 190 14 246 190 14
42613-246 186 14 230 174 11 210 150 10 163 110 8
42614-104 69 6 10 10 10 2 2 6 2 2 6
42615- 2 2 6 2 2 6 2 2 6 2 2 6
42616- 2 2 6 2 2 6 2 2 6 2 2 6
42617- 2 2 6 2 2 6 2 2 6 2 2 6
42618- 2 2 6 6 6 6 91 60 6 167 114 7
42619-206 145 10 230 174 11 242 186 14 246 190 14
42620-246 190 14 246 190 14 246 186 14 242 186 14
42621-239 182 13 230 174 11 224 166 10 213 154 11
42622-180 133 36 124 112 88 86 86 86 58 58 58
42623- 38 38 38 22 22 22 10 10 10 6 6 6
42624- 0 0 0 0 0 0 0 0 0 0 0 0
42625- 0 0 0 0 0 0 0 0 0 0 0 0
42626- 0 0 0 0 0 0 0 0 0 0 0 0
42627- 0 0 0 0 0 0 0 0 0 14 14 14
42628- 34 34 34 70 70 70 138 110 50 158 118 36
42629-167 114 7 180 123 7 192 133 9 197 138 11
42630-200 144 11 206 145 10 213 154 11 219 162 10
42631-224 166 10 230 174 11 239 182 13 242 186 14
42632-246 186 14 246 186 14 246 186 14 246 186 14
42633-239 182 13 216 158 10 185 133 11 152 99 6
42634-104 69 6 18 14 6 2 2 6 2 2 6
42635- 2 2 6 2 2 6 2 2 6 2 2 6
42636- 2 2 6 2 2 6 2 2 6 2 2 6
42637- 2 2 6 2 2 6 2 2 6 2 2 6
42638- 2 2 6 6 6 6 80 54 7 152 99 6
42639-192 133 9 219 162 10 236 178 12 239 182 13
42640-246 186 14 242 186 14 239 182 13 236 178 12
42641-224 166 10 206 145 10 192 133 9 154 121 60
42642- 94 94 94 62 62 62 42 42 42 22 22 22
42643- 14 14 14 6 6 6 0 0 0 0 0 0
42644- 0 0 0 0 0 0 0 0 0 0 0 0
42645- 0 0 0 0 0 0 0 0 0 0 0 0
42646- 0 0 0 0 0 0 0 0 0 0 0 0
42647- 0 0 0 0 0 0 0 0 0 6 6 6
42648- 18 18 18 34 34 34 58 58 58 78 78 78
42649-101 98 89 124 112 88 142 110 46 156 107 11
42650-163 110 8 167 114 7 175 118 6 180 123 7
42651-185 133 11 197 138 11 210 150 10 219 162 10
42652-226 170 11 236 178 12 236 178 12 234 174 13
42653-219 162 10 197 138 11 163 110 8 130 83 6
42654- 91 60 6 10 10 10 2 2 6 2 2 6
42655- 18 18 18 38 38 38 38 38 38 38 38 38
42656- 38 38 38 38 38 38 38 38 38 38 38 38
42657- 38 38 38 38 38 38 26 26 26 2 2 6
42658- 2 2 6 6 6 6 70 47 6 137 92 6
42659-175 118 6 200 144 11 219 162 10 230 174 11
42660-234 174 13 230 174 11 219 162 10 210 150 10
42661-192 133 9 163 110 8 124 112 88 82 82 82
42662- 50 50 50 30 30 30 14 14 14 6 6 6
42663- 0 0 0 0 0 0 0 0 0 0 0 0
42664- 0 0 0 0 0 0 0 0 0 0 0 0
42665- 0 0 0 0 0 0 0 0 0 0 0 0
42666- 0 0 0 0 0 0 0 0 0 0 0 0
42667- 0 0 0 0 0 0 0 0 0 0 0 0
42668- 6 6 6 14 14 14 22 22 22 34 34 34
42669- 42 42 42 58 58 58 74 74 74 86 86 86
42670-101 98 89 122 102 70 130 98 46 121 87 25
42671-137 92 6 152 99 6 163 110 8 180 123 7
42672-185 133 11 197 138 11 206 145 10 200 144 11
42673-180 123 7 156 107 11 130 83 6 104 69 6
42674- 50 34 6 54 54 54 110 110 110 101 98 89
42675- 86 86 86 82 82 82 78 78 78 78 78 78
42676- 78 78 78 78 78 78 78 78 78 78 78 78
42677- 78 78 78 82 82 82 86 86 86 94 94 94
42678-106 106 106 101 101 101 86 66 34 124 80 6
42679-156 107 11 180 123 7 192 133 9 200 144 11
42680-206 145 10 200 144 11 192 133 9 175 118 6
42681-139 102 15 109 106 95 70 70 70 42 42 42
42682- 22 22 22 10 10 10 0 0 0 0 0 0
42683- 0 0 0 0 0 0 0 0 0 0 0 0
42684- 0 0 0 0 0 0 0 0 0 0 0 0
42685- 0 0 0 0 0 0 0 0 0 0 0 0
42686- 0 0 0 0 0 0 0 0 0 0 0 0
42687- 0 0 0 0 0 0 0 0 0 0 0 0
42688- 0 0 0 0 0 0 6 6 6 10 10 10
42689- 14 14 14 22 22 22 30 30 30 38 38 38
42690- 50 50 50 62 62 62 74 74 74 90 90 90
42691-101 98 89 112 100 78 121 87 25 124 80 6
42692-137 92 6 152 99 6 152 99 6 152 99 6
42693-138 86 6 124 80 6 98 70 6 86 66 30
42694-101 98 89 82 82 82 58 58 58 46 46 46
42695- 38 38 38 34 34 34 34 34 34 34 34 34
42696- 34 34 34 34 34 34 34 34 34 34 34 34
42697- 34 34 34 34 34 34 38 38 38 42 42 42
42698- 54 54 54 82 82 82 94 86 76 91 60 6
42699-134 86 6 156 107 11 167 114 7 175 118 6
42700-175 118 6 167 114 7 152 99 6 121 87 25
42701-101 98 89 62 62 62 34 34 34 18 18 18
42702- 6 6 6 0 0 0 0 0 0 0 0 0
42703- 0 0 0 0 0 0 0 0 0 0 0 0
42704- 0 0 0 0 0 0 0 0 0 0 0 0
42705- 0 0 0 0 0 0 0 0 0 0 0 0
42706- 0 0 0 0 0 0 0 0 0 0 0 0
42707- 0 0 0 0 0 0 0 0 0 0 0 0
42708- 0 0 0 0 0 0 0 0 0 0 0 0
42709- 0 0 0 6 6 6 6 6 6 10 10 10
42710- 18 18 18 22 22 22 30 30 30 42 42 42
42711- 50 50 50 66 66 66 86 86 86 101 98 89
42712-106 86 58 98 70 6 104 69 6 104 69 6
42713-104 69 6 91 60 6 82 62 34 90 90 90
42714- 62 62 62 38 38 38 22 22 22 14 14 14
42715- 10 10 10 10 10 10 10 10 10 10 10 10
42716- 10 10 10 10 10 10 6 6 6 10 10 10
42717- 10 10 10 10 10 10 10 10 10 14 14 14
42718- 22 22 22 42 42 42 70 70 70 89 81 66
42719- 80 54 7 104 69 6 124 80 6 137 92 6
42720-134 86 6 116 81 8 100 82 52 86 86 86
42721- 58 58 58 30 30 30 14 14 14 6 6 6
42722- 0 0 0 0 0 0 0 0 0 0 0 0
42723- 0 0 0 0 0 0 0 0 0 0 0 0
42724- 0 0 0 0 0 0 0 0 0 0 0 0
42725- 0 0 0 0 0 0 0 0 0 0 0 0
42726- 0 0 0 0 0 0 0 0 0 0 0 0
42727- 0 0 0 0 0 0 0 0 0 0 0 0
42728- 0 0 0 0 0 0 0 0 0 0 0 0
42729- 0 0 0 0 0 0 0 0 0 0 0 0
42730- 0 0 0 6 6 6 10 10 10 14 14 14
42731- 18 18 18 26 26 26 38 38 38 54 54 54
42732- 70 70 70 86 86 86 94 86 76 89 81 66
42733- 89 81 66 86 86 86 74 74 74 50 50 50
42734- 30 30 30 14 14 14 6 6 6 0 0 0
42735- 0 0 0 0 0 0 0 0 0 0 0 0
42736- 0 0 0 0 0 0 0 0 0 0 0 0
42737- 0 0 0 0 0 0 0 0 0 0 0 0
42738- 6 6 6 18 18 18 34 34 34 58 58 58
42739- 82 82 82 89 81 66 89 81 66 89 81 66
42740- 94 86 66 94 86 76 74 74 74 50 50 50
42741- 26 26 26 14 14 14 6 6 6 0 0 0
42742- 0 0 0 0 0 0 0 0 0 0 0 0
42743- 0 0 0 0 0 0 0 0 0 0 0 0
42744- 0 0 0 0 0 0 0 0 0 0 0 0
42745- 0 0 0 0 0 0 0 0 0 0 0 0
42746- 0 0 0 0 0 0 0 0 0 0 0 0
42747- 0 0 0 0 0 0 0 0 0 0 0 0
42748- 0 0 0 0 0 0 0 0 0 0 0 0
42749- 0 0 0 0 0 0 0 0 0 0 0 0
42750- 0 0 0 0 0 0 0 0 0 0 0 0
42751- 6 6 6 6 6 6 14 14 14 18 18 18
42752- 30 30 30 38 38 38 46 46 46 54 54 54
42753- 50 50 50 42 42 42 30 30 30 18 18 18
42754- 10 10 10 0 0 0 0 0 0 0 0 0
42755- 0 0 0 0 0 0 0 0 0 0 0 0
42756- 0 0 0 0 0 0 0 0 0 0 0 0
42757- 0 0 0 0 0 0 0 0 0 0 0 0
42758- 0 0 0 6 6 6 14 14 14 26 26 26
42759- 38 38 38 50 50 50 58 58 58 58 58 58
42760- 54 54 54 42 42 42 30 30 30 18 18 18
42761- 10 10 10 0 0 0 0 0 0 0 0 0
42762- 0 0 0 0 0 0 0 0 0 0 0 0
42763- 0 0 0 0 0 0 0 0 0 0 0 0
42764- 0 0 0 0 0 0 0 0 0 0 0 0
42765- 0 0 0 0 0 0 0 0 0 0 0 0
42766- 0 0 0 0 0 0 0 0 0 0 0 0
42767- 0 0 0 0 0 0 0 0 0 0 0 0
42768- 0 0 0 0 0 0 0 0 0 0 0 0
42769- 0 0 0 0 0 0 0 0 0 0 0 0
42770- 0 0 0 0 0 0 0 0 0 0 0 0
42771- 0 0 0 0 0 0 0 0 0 6 6 6
42772- 6 6 6 10 10 10 14 14 14 18 18 18
42773- 18 18 18 14 14 14 10 10 10 6 6 6
42774- 0 0 0 0 0 0 0 0 0 0 0 0
42775- 0 0 0 0 0 0 0 0 0 0 0 0
42776- 0 0 0 0 0 0 0 0 0 0 0 0
42777- 0 0 0 0 0 0 0 0 0 0 0 0
42778- 0 0 0 0 0 0 0 0 0 6 6 6
42779- 14 14 14 18 18 18 22 22 22 22 22 22
42780- 18 18 18 14 14 14 10 10 10 6 6 6
42781- 0 0 0 0 0 0 0 0 0 0 0 0
42782- 0 0 0 0 0 0 0 0 0 0 0 0
42783- 0 0 0 0 0 0 0 0 0 0 0 0
42784- 0 0 0 0 0 0 0 0 0 0 0 0
42785- 0 0 0 0 0 0 0 0 0 0 0 0
42786+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42787+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42788+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42789+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42790+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42791+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42792+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42793+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42794+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42795+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42796+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42797+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42798+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42799+4 4 4 4 4 4
42800+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42801+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42802+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42803+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42804+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42805+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42806+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42807+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42808+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42809+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42810+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42811+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42812+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42813+4 4 4 4 4 4
42814+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42815+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42816+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42817+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42818+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42819+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42820+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42821+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42822+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42823+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42824+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42825+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42826+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42827+4 4 4 4 4 4
42828+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42829+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42830+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42831+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42832+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42833+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42834+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42835+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42836+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42837+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42838+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42839+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42840+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42841+4 4 4 4 4 4
42842+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42843+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42844+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42845+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42846+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42847+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42848+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42849+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42850+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42851+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42852+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42853+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42854+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42855+4 4 4 4 4 4
42856+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42857+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42858+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42859+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42860+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42861+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42862+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42863+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42864+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42865+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42866+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42867+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42868+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42869+4 4 4 4 4 4
42870+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42871+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42872+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42873+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42874+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
42875+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
42876+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42877+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42878+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42879+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
42880+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
42881+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
42882+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42883+4 4 4 4 4 4
42884+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42885+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42886+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42887+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42888+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
42889+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
42890+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42891+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42892+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42893+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
42894+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
42895+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
42896+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42897+4 4 4 4 4 4
42898+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42899+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42900+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42901+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42902+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
42903+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
42904+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
42905+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42906+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42907+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
42908+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
42909+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
42910+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
42911+4 4 4 4 4 4
42912+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42913+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42914+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42915+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
42916+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
42917+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
42918+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
42919+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42920+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
42921+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
42922+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
42923+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
42924+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
42925+4 4 4 4 4 4
42926+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42927+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42928+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42929+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
42930+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
42931+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
42932+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
42933+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
42934+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
42935+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
42936+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
42937+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
42938+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
42939+4 4 4 4 4 4
42940+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42941+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42942+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
42943+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
42944+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
42945+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
42946+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
42947+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
42948+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
42949+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
42950+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
42951+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
42952+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
42953+4 4 4 4 4 4
42954+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42955+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42956+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
42957+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
42958+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
42959+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
42960+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
42961+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
42962+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
42963+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
42964+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
42965+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
42966+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
42967+4 4 4 4 4 4
42968+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42969+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42970+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
42971+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
42972+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
42973+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
42974+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
42975+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
42976+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
42977+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
42978+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
42979+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
42980+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
42981+4 4 4 4 4 4
42982+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42983+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42984+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
42985+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
42986+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
42987+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
42988+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
42989+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
42990+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
42991+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
42992+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
42993+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
42994+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
42995+4 4 4 4 4 4
42996+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42997+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42998+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
42999+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
43000+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
43001+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
43002+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
43003+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
43004+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
43005+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
43006+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
43007+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
43008+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
43009+4 4 4 4 4 4
43010+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43011+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
43012+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
43013+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
43014+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
43015+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
43016+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
43017+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
43018+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
43019+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
43020+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
43021+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
43022+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
43023+4 4 4 4 4 4
43024+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43025+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
43026+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
43027+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
43028+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
43029+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
43030+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
43031+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
43032+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
43033+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
43034+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
43035+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
43036+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
43037+0 0 0 4 4 4
43038+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
43039+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
43040+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
43041+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
43042+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
43043+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
43044+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
43045+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
43046+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
43047+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
43048+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
43049+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
43050+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
43051+2 0 0 0 0 0
43052+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
43053+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
43054+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
43055+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
43056+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
43057+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
43058+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
43059+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
43060+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
43061+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
43062+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
43063+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
43064+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
43065+37 38 37 0 0 0
43066+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
43067+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
43068+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
43069+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
43070+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
43071+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
43072+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
43073+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
43074+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
43075+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
43076+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
43077+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
43078+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
43079+85 115 134 4 0 0
43080+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
43081+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
43082+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
43083+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
43084+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
43085+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
43086+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
43087+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
43088+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
43089+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
43090+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
43091+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
43092+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
43093+60 73 81 4 0 0
43094+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
43095+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
43096+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
43097+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
43098+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
43099+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
43100+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
43101+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
43102+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
43103+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
43104+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
43105+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
43106+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
43107+16 19 21 4 0 0
43108+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
43109+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
43110+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
43111+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
43112+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
43113+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
43114+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
43115+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
43116+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
43117+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
43118+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
43119+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
43120+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
43121+4 0 0 4 3 3
43122+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
43123+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
43124+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
43125+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
43126+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
43127+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
43128+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
43129+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
43130+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
43131+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
43132+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
43133+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
43134+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
43135+3 2 2 4 4 4
43136+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
43137+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
43138+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
43139+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
43140+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
43141+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
43142+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
43143+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
43144+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
43145+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
43146+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
43147+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
43148+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
43149+4 4 4 4 4 4
43150+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
43151+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
43152+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
43153+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
43154+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
43155+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
43156+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
43157+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
43158+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
43159+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
43160+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
43161+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
43162+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
43163+4 4 4 4 4 4
43164+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
43165+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
43166+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
43167+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
43168+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
43169+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
43170+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
43171+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
43172+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
43173+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
43174+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
43175+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
43176+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
43177+5 5 5 5 5 5
43178+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
43179+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
43180+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
43181+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
43182+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
43183+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
43184+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
43185+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
43186+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
43187+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
43188+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
43189+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
43190+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
43191+5 5 5 4 4 4
43192+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
43193+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
43194+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
43195+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
43196+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
43197+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
43198+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
43199+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
43200+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
43201+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
43202+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
43203+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
43204+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43205+4 4 4 4 4 4
43206+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
43207+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
43208+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
43209+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
43210+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
43211+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
43212+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
43213+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
43214+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
43215+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
43216+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
43217+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
43218+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43219+4 4 4 4 4 4
43220+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
43221+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
43222+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
43223+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
43224+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
43225+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
43226+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
43227+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
43228+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
43229+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
43230+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
43231+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43232+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43233+4 4 4 4 4 4
43234+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
43235+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
43236+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
43237+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
43238+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
43239+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
43240+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
43241+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
43242+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
43243+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
43244+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
43245+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43246+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43247+4 4 4 4 4 4
43248+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
43249+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
43250+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
43251+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
43252+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
43253+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
43254+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
43255+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
43256+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
43257+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
43258+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43259+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43260+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43261+4 4 4 4 4 4
43262+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
43263+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
43264+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
43265+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
43266+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
43267+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
43268+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
43269+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
43270+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
43271+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
43272+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
43273+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43274+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43275+4 4 4 4 4 4
43276+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
43277+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
43278+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
43279+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
43280+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
43281+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
43282+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
43283+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
43284+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
43285+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
43286+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
43287+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43288+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43289+4 4 4 4 4 4
43290+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
43291+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
43292+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
43293+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
43294+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
43295+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
43296+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
43297+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
43298+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
43299+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
43300+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43301+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43302+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43303+4 4 4 4 4 4
43304+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
43305+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
43306+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
43307+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
43308+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
43309+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
43310+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
43311+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
43312+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
43313+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
43314+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43315+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43316+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43317+4 4 4 4 4 4
43318+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
43319+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
43320+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
43321+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
43322+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
43323+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
43324+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
43325+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
43326+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
43327+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
43328+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43329+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43330+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43331+4 4 4 4 4 4
43332+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
43333+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
43334+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
43335+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
43336+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
43337+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
43338+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
43339+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
43340+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
43341+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
43342+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43343+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43344+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43345+4 4 4 4 4 4
43346+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
43347+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
43348+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
43349+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
43350+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
43351+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
43352+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
43353+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
43354+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
43355+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
43356+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43357+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43358+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43359+4 4 4 4 4 4
43360+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
43361+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
43362+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
43363+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
43364+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
43365+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
43366+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
43367+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
43368+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
43369+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
43370+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43371+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43372+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43373+4 4 4 4 4 4
43374+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
43375+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
43376+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
43377+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
43378+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
43379+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
43380+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
43381+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
43382+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
43383+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
43384+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43385+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43386+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43387+4 4 4 4 4 4
43388+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
43389+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
43390+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
43391+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
43392+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
43393+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
43394+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
43395+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
43396+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
43397+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
43398+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43399+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43400+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43401+4 4 4 4 4 4
43402+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
43403+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
43404+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
43405+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
43406+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
43407+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
43408+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
43409+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
43410+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
43411+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
43412+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43413+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43414+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43415+4 4 4 4 4 4
43416+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
43417+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
43418+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
43419+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
43420+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
43421+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
43422+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
43423+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
43424+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
43425+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
43426+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43427+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43428+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43429+4 4 4 4 4 4
43430+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
43431+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
43432+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
43433+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
43434+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
43435+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
43436+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
43437+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
43438+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
43439+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
43440+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43441+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43442+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43443+4 4 4 4 4 4
43444+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
43445+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
43446+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
43447+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
43448+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
43449+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
43450+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
43451+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
43452+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
43453+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
43454+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43455+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43456+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43457+4 4 4 4 4 4
43458+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
43459+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
43460+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
43461+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
43462+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
43463+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
43464+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
43465+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
43466+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
43467+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
43468+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43469+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43470+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43471+4 4 4 4 4 4
43472+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
43473+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
43474+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
43475+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
43476+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
43477+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
43478+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
43479+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
43480+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
43481+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
43482+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43483+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43484+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43485+4 4 4 4 4 4
43486+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
43487+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
43488+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
43489+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
43490+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
43491+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
43492+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
43493+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
43494+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
43495+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
43496+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43497+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43498+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43499+4 4 4 4 4 4
43500+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
43501+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
43502+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
43503+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
43504+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
43505+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
43506+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
43507+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
43508+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
43509+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
43510+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43511+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43512+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43513+4 4 4 4 4 4
43514+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
43515+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
43516+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
43517+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
43518+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
43519+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
43520+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
43521+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
43522+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
43523+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
43524+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43525+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43526+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43527+4 4 4 4 4 4
43528+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
43529+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
43530+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
43531+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
43532+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
43533+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
43534+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
43535+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
43536+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
43537+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
43538+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43539+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43540+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43541+4 4 4 4 4 4
43542+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
43543+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
43544+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
43545+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
43546+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
43547+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
43548+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
43549+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
43550+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
43551+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
43552+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43553+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43554+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43555+4 4 4 4 4 4
43556+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
43557+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
43558+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
43559+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
43560+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
43561+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
43562+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
43563+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
43564+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
43565+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
43566+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43567+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43568+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43569+4 4 4 4 4 4
43570+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
43571+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
43572+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
43573+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
43574+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
43575+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
43576+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43577+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
43578+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
43579+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
43580+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
43581+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43582+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43583+4 4 4 4 4 4
43584+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
43585+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
43586+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
43587+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
43588+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
43589+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
43590+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
43591+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
43592+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
43593+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
43594+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43595+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43596+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43597+4 4 4 4 4 4
43598+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
43599+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
43600+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
43601+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
43602+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
43603+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
43604+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
43605+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
43606+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
43607+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
43608+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43609+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43610+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43611+4 4 4 4 4 4
43612+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
43613+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
43614+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
43615+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
43616+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
43617+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
43618+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
43619+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
43620+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
43621+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
43622+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43623+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43624+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43625+4 4 4 4 4 4
43626+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
43627+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
43628+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
43629+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
43630+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
43631+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
43632+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
43633+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
43634+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
43635+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
43636+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43637+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43638+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43639+4 4 4 4 4 4
43640+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
43641+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
43642+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
43643+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
43644+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
43645+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
43646+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
43647+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
43648+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
43649+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
43650+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43651+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43652+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43653+4 4 4 4 4 4
43654+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
43655+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
43656+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
43657+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
43658+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
43659+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
43660+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
43661+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
43662+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
43663+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43664+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43665+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43666+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43667+4 4 4 4 4 4
43668+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
43669+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
43670+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
43671+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
43672+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
43673+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
43674+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
43675+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
43676+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
43677+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43678+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43679+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43680+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43681+4 4 4 4 4 4
43682+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
43683+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
43684+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
43685+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
43686+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
43687+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
43688+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
43689+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
43690+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43691+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43692+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43693+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43694+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43695+4 4 4 4 4 4
43696+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
43697+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
43698+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
43699+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
43700+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
43701+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
43702+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
43703+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
43704+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43705+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43706+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43707+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43708+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43709+4 4 4 4 4 4
43710+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
43711+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
43712+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
43713+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
43714+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
43715+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
43716+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
43717+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
43718+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43719+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43720+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43721+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43722+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43723+4 4 4 4 4 4
43724+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
43725+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
43726+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
43727+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
43728+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
43729+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
43730+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
43731+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
43732+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43733+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43734+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43735+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43736+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43737+4 4 4 4 4 4
43738+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43739+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
43740+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
43741+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
43742+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
43743+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
43744+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
43745+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
43746+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43747+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43748+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43749+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43750+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43751+4 4 4 4 4 4
43752+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43753+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
43754+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
43755+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
43756+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
43757+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
43758+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
43759+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
43760+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43761+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43762+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43763+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43764+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43765+4 4 4 4 4 4
43766+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43767+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43768+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
43769+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
43770+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
43771+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
43772+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
43773+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
43774+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43775+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43776+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43777+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43778+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43779+4 4 4 4 4 4
43780+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43781+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43782+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
43783+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
43784+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
43785+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
43786+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
43787+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43788+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43789+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43790+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43791+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43792+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43793+4 4 4 4 4 4
43794+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43795+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43796+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43797+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
43798+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
43799+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
43800+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
43801+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43802+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43803+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43804+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43805+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43806+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43807+4 4 4 4 4 4
43808+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43809+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43810+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43811+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
43812+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
43813+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
43814+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
43815+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43816+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43817+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43818+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43819+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43820+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43821+4 4 4 4 4 4
43822+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43823+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43824+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43825+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
43826+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
43827+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
43828+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
43829+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43830+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43831+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43832+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43833+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43834+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43835+4 4 4 4 4 4
43836+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43837+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43838+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43839+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
43840+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
43841+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
43842+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
43843+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43844+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43845+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43846+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43847+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43848+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43849+4 4 4 4 4 4
43850+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43851+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43852+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43853+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43854+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
43855+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
43856+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
43857+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43858+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43859+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43860+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43861+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43862+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43863+4 4 4 4 4 4
43864+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43865+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43866+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43867+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43868+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
43869+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
43870+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43871+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43872+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43873+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43874+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43875+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43876+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43877+4 4 4 4 4 4
43878+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43879+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43880+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43881+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43882+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
43883+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
43884+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43885+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43886+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43887+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43888+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43889+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43890+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43891+4 4 4 4 4 4
43892+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43893+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43894+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43895+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43896+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
43897+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
43898+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43899+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43900+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43901+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43902+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43903+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43904+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
43905+4 4 4 4 4 4
43906diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
43907index 86d449e..af6a7f7 100644
43908--- a/drivers/video/udlfb.c
43909+++ b/drivers/video/udlfb.c
43910@@ -619,11 +619,11 @@ int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
43911 dlfb_urb_completion(urb);
43912
43913 error:
43914- atomic_add(bytes_sent, &dev->bytes_sent);
43915- atomic_add(bytes_identical, &dev->bytes_identical);
43916- atomic_add(width*height*2, &dev->bytes_rendered);
43917+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
43918+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
43919+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
43920 end_cycles = get_cycles();
43921- atomic_add(((unsigned int) ((end_cycles - start_cycles)
43922+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
43923 >> 10)), /* Kcycles */
43924 &dev->cpu_kcycles_used);
43925
43926@@ -744,11 +744,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
43927 dlfb_urb_completion(urb);
43928
43929 error:
43930- atomic_add(bytes_sent, &dev->bytes_sent);
43931- atomic_add(bytes_identical, &dev->bytes_identical);
43932- atomic_add(bytes_rendered, &dev->bytes_rendered);
43933+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
43934+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
43935+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
43936 end_cycles = get_cycles();
43937- atomic_add(((unsigned int) ((end_cycles - start_cycles)
43938+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
43939 >> 10)), /* Kcycles */
43940 &dev->cpu_kcycles_used);
43941 }
43942@@ -1372,7 +1372,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
43943 struct fb_info *fb_info = dev_get_drvdata(fbdev);
43944 struct dlfb_data *dev = fb_info->par;
43945 return snprintf(buf, PAGE_SIZE, "%u\n",
43946- atomic_read(&dev->bytes_rendered));
43947+ atomic_read_unchecked(&dev->bytes_rendered));
43948 }
43949
43950 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
43951@@ -1380,7 +1380,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
43952 struct fb_info *fb_info = dev_get_drvdata(fbdev);
43953 struct dlfb_data *dev = fb_info->par;
43954 return snprintf(buf, PAGE_SIZE, "%u\n",
43955- atomic_read(&dev->bytes_identical));
43956+ atomic_read_unchecked(&dev->bytes_identical));
43957 }
43958
43959 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
43960@@ -1388,7 +1388,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
43961 struct fb_info *fb_info = dev_get_drvdata(fbdev);
43962 struct dlfb_data *dev = fb_info->par;
43963 return snprintf(buf, PAGE_SIZE, "%u\n",
43964- atomic_read(&dev->bytes_sent));
43965+ atomic_read_unchecked(&dev->bytes_sent));
43966 }
43967
43968 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
43969@@ -1396,7 +1396,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
43970 struct fb_info *fb_info = dev_get_drvdata(fbdev);
43971 struct dlfb_data *dev = fb_info->par;
43972 return snprintf(buf, PAGE_SIZE, "%u\n",
43973- atomic_read(&dev->cpu_kcycles_used));
43974+ atomic_read_unchecked(&dev->cpu_kcycles_used));
43975 }
43976
43977 static ssize_t edid_show(
43978@@ -1456,10 +1456,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
43979 struct fb_info *fb_info = dev_get_drvdata(fbdev);
43980 struct dlfb_data *dev = fb_info->par;
43981
43982- atomic_set(&dev->bytes_rendered, 0);
43983- atomic_set(&dev->bytes_identical, 0);
43984- atomic_set(&dev->bytes_sent, 0);
43985- atomic_set(&dev->cpu_kcycles_used, 0);
43986+ atomic_set_unchecked(&dev->bytes_rendered, 0);
43987+ atomic_set_unchecked(&dev->bytes_identical, 0);
43988+ atomic_set_unchecked(&dev->bytes_sent, 0);
43989+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
43990
43991 return count;
43992 }
43993diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
43994index 2f8f82d..191de37 100644
43995--- a/drivers/video/uvesafb.c
43996+++ b/drivers/video/uvesafb.c
43997@@ -19,6 +19,7 @@
43998 #include <linux/io.h>
43999 #include <linux/mutex.h>
44000 #include <linux/slab.h>
44001+#include <linux/moduleloader.h>
44002 #include <video/edid.h>
44003 #include <video/uvesafb.h>
44004 #ifdef CONFIG_X86
44005@@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
44006 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
44007 par->pmi_setpal = par->ypan = 0;
44008 } else {
44009+
44010+#ifdef CONFIG_PAX_KERNEXEC
44011+#ifdef CONFIG_MODULES
44012+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
44013+#endif
44014+ if (!par->pmi_code) {
44015+ par->pmi_setpal = par->ypan = 0;
44016+ return 0;
44017+ }
44018+#endif
44019+
44020 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
44021 + task->t.regs.edi);
44022+
44023+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
44024+ pax_open_kernel();
44025+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
44026+ pax_close_kernel();
44027+
44028+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
44029+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
44030+#else
44031 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
44032 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
44033+#endif
44034+
44035 printk(KERN_INFO "uvesafb: protected mode interface info at "
44036 "%04x:%04x\n",
44037 (u16)task->t.regs.es, (u16)task->t.regs.edi);
44038@@ -818,13 +841,14 @@ static int __devinit uvesafb_vbe_init(struct fb_info *info)
44039 par->ypan = ypan;
44040
44041 if (par->pmi_setpal || par->ypan) {
44042+#if !defined(CONFIG_MODULES) || !defined(CONFIG_PAX_KERNEXEC)
44043 if (__supported_pte_mask & _PAGE_NX) {
44044 par->pmi_setpal = par->ypan = 0;
44045 printk(KERN_WARNING "uvesafb: NX protection is actively."
44046 "We have better not to use the PMI.\n");
44047- } else {
44048+ } else
44049+#endif
44050 uvesafb_vbe_getpmi(task, par);
44051- }
44052 }
44053 #else
44054 /* The protected mode interface is not available on non-x86. */
44055@@ -1838,6 +1862,11 @@ out:
44056 if (par->vbe_modes)
44057 kfree(par->vbe_modes);
44058
44059+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
44060+ if (par->pmi_code)
44061+ module_free_exec(NULL, par->pmi_code);
44062+#endif
44063+
44064 framebuffer_release(info);
44065 return err;
44066 }
44067@@ -1864,6 +1893,12 @@ static int uvesafb_remove(struct platform_device *dev)
44068 kfree(par->vbe_state_orig);
44069 if (par->vbe_state_saved)
44070 kfree(par->vbe_state_saved);
44071+
44072+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
44073+ if (par->pmi_code)
44074+ module_free_exec(NULL, par->pmi_code);
44075+#endif
44076+
44077 }
44078
44079 framebuffer_release(info);
44080diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
44081index 501b340..86bd4cf 100644
44082--- a/drivers/video/vesafb.c
44083+++ b/drivers/video/vesafb.c
44084@@ -9,6 +9,7 @@
44085 */
44086
44087 #include <linux/module.h>
44088+#include <linux/moduleloader.h>
44089 #include <linux/kernel.h>
44090 #include <linux/errno.h>
44091 #include <linux/string.h>
44092@@ -52,8 +53,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
44093 static int vram_total __initdata; /* Set total amount of memory */
44094 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
44095 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
44096-static void (*pmi_start)(void) __read_mostly;
44097-static void (*pmi_pal) (void) __read_mostly;
44098+static void (*pmi_start)(void) __read_only;
44099+static void (*pmi_pal) (void) __read_only;
44100 static int depth __read_mostly;
44101 static int vga_compat __read_mostly;
44102 /* --------------------------------------------------------------------- */
44103@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
44104 unsigned int size_vmode;
44105 unsigned int size_remap;
44106 unsigned int size_total;
44107+ void *pmi_code = NULL;
44108
44109 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
44110 return -ENODEV;
44111@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
44112 size_remap = size_total;
44113 vesafb_fix.smem_len = size_remap;
44114
44115-#ifndef __i386__
44116- screen_info.vesapm_seg = 0;
44117-#endif
44118-
44119 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
44120 printk(KERN_WARNING
44121 "vesafb: cannot reserve video memory at 0x%lx\n",
44122@@ -307,9 +305,21 @@ static int __init vesafb_probe(struct platform_device *dev)
44123 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
44124 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
44125
44126+#ifdef __i386__
44127+
44128+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
44129+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
44130+ if (!pmi_code)
44131+#elif !defined(CONFIG_PAX_KERNEXEC)
44132+ if (0)
44133+#endif
44134+
44135+#endif
44136+ screen_info.vesapm_seg = 0;
44137+
44138 if (screen_info.vesapm_seg) {
44139- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
44140- screen_info.vesapm_seg,screen_info.vesapm_off);
44141+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
44142+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
44143 }
44144
44145 if (screen_info.vesapm_seg < 0xc000)
44146@@ -317,9 +327,25 @@ static int __init vesafb_probe(struct platform_device *dev)
44147
44148 if (ypan || pmi_setpal) {
44149 unsigned short *pmi_base;
44150+
44151 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
44152- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
44153- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
44154+
44155+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
44156+ pax_open_kernel();
44157+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
44158+#else
44159+ pmi_code = pmi_base;
44160+#endif
44161+
44162+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
44163+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
44164+
44165+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
44166+ pmi_start = ktva_ktla(pmi_start);
44167+ pmi_pal = ktva_ktla(pmi_pal);
44168+ pax_close_kernel();
44169+#endif
44170+
44171 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
44172 if (pmi_base[3]) {
44173 printk(KERN_INFO "vesafb: pmi: ports = ");
44174@@ -488,6 +514,11 @@ static int __init vesafb_probe(struct platform_device *dev)
44175 info->node, info->fix.id);
44176 return 0;
44177 err:
44178+
44179+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
44180+ module_free_exec(NULL, pmi_code);
44181+#endif
44182+
44183 if (info->screen_base)
44184 iounmap(info->screen_base);
44185 framebuffer_release(info);
44186diff --git a/drivers/video/via/via_clock.h b/drivers/video/via/via_clock.h
44187index 88714ae..16c2e11 100644
44188--- a/drivers/video/via/via_clock.h
44189+++ b/drivers/video/via/via_clock.h
44190@@ -56,7 +56,7 @@ struct via_clock {
44191
44192 void (*set_engine_pll_state)(u8 state);
44193 void (*set_engine_pll)(struct via_pll_config config);
44194-};
44195+} __no_const;
44196
44197
44198 static inline u32 get_pll_internal_frequency(u32 ref_freq,
44199diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
44200index 6b1b7e1..b2fa4d5 100644
44201--- a/drivers/virtio/virtio_mmio.c
44202+++ b/drivers/virtio/virtio_mmio.c
44203@@ -530,7 +530,7 @@ static int vm_cmdline_set(const char *device,
44204
44205 resources[0].end = memparse(device, &str) - 1;
44206
44207- processed = sscanf(str, "@%lli:%u%n:%d%n",
44208+ processed = sscanf(str, "@%lli:%llu%n:%d%n",
44209 &base, &resources[1].start, &consumed,
44210 &vm_cmdline_id, &consumed);
44211
44212diff --git a/drivers/xen/xenfs/xenstored.c b/drivers/xen/xenfs/xenstored.c
44213index fef20db..d28b1ab 100644
44214--- a/drivers/xen/xenfs/xenstored.c
44215+++ b/drivers/xen/xenfs/xenstored.c
44216@@ -24,7 +24,12 @@ static int xsd_release(struct inode *inode, struct file *file)
44217 static int xsd_kva_open(struct inode *inode, struct file *file)
44218 {
44219 file->private_data = (void *)kasprintf(GFP_KERNEL, "0x%p",
44220+#ifdef CONFIG_GRKERNSEC_HIDESYM
44221+ NULL);
44222+#else
44223 xen_store_interface);
44224+#endif
44225+
44226 if (!file->private_data)
44227 return -ENOMEM;
44228 return 0;
44229diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
44230index 890bed5..17ae73e 100644
44231--- a/fs/9p/vfs_inode.c
44232+++ b/fs/9p/vfs_inode.c
44233@@ -1329,7 +1329,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
44234 void
44235 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
44236 {
44237- char *s = nd_get_link(nd);
44238+ const char *s = nd_get_link(nd);
44239
44240 p9_debug(P9_DEBUG_VFS, " %s %s\n",
44241 dentry->d_name.name, IS_ERR(s) ? "<error>" : s);
44242diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
44243index 0efd152..b5802ad 100644
44244--- a/fs/Kconfig.binfmt
44245+++ b/fs/Kconfig.binfmt
44246@@ -89,7 +89,7 @@ config HAVE_AOUT
44247
44248 config BINFMT_AOUT
44249 tristate "Kernel support for a.out and ECOFF binaries"
44250- depends on HAVE_AOUT
44251+ depends on HAVE_AOUT && BROKEN
44252 ---help---
44253 A.out (Assembler.OUTput) is a set of formats for libraries and
44254 executables used in the earliest versions of UNIX. Linux used
44255diff --git a/fs/aio.c b/fs/aio.c
44256index 71f613c..9d01f1f 100644
44257--- a/fs/aio.c
44258+++ b/fs/aio.c
44259@@ -111,7 +111,7 @@ static int aio_setup_ring(struct kioctx *ctx)
44260 size += sizeof(struct io_event) * nr_events;
44261 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
44262
44263- if (nr_pages < 0)
44264+ if (nr_pages <= 0)
44265 return -EINVAL;
44266
44267 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
44268@@ -1373,18 +1373,19 @@ static ssize_t aio_fsync(struct kiocb *iocb)
44269 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
44270 {
44271 ssize_t ret;
44272+ struct iovec iovstack;
44273
44274 #ifdef CONFIG_COMPAT
44275 if (compat)
44276 ret = compat_rw_copy_check_uvector(type,
44277 (struct compat_iovec __user *)kiocb->ki_buf,
44278- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
44279+ kiocb->ki_nbytes, 1, &iovstack,
44280 &kiocb->ki_iovec);
44281 else
44282 #endif
44283 ret = rw_copy_check_uvector(type,
44284 (struct iovec __user *)kiocb->ki_buf,
44285- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
44286+ kiocb->ki_nbytes, 1, &iovstack,
44287 &kiocb->ki_iovec);
44288 if (ret < 0)
44289 goto out;
44290@@ -1393,6 +1394,10 @@ static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
44291 if (ret < 0)
44292 goto out;
44293
44294+ if (kiocb->ki_iovec == &iovstack) {
44295+ kiocb->ki_inline_vec = iovstack;
44296+ kiocb->ki_iovec = &kiocb->ki_inline_vec;
44297+ }
44298 kiocb->ki_nr_segs = kiocb->ki_nbytes;
44299 kiocb->ki_cur_seg = 0;
44300 /* ki_nbytes/left now reflect bytes instead of segs */
44301diff --git a/fs/attr.c b/fs/attr.c
44302index cce7df5..eaa2731 100644
44303--- a/fs/attr.c
44304+++ b/fs/attr.c
44305@@ -100,6 +100,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
44306 unsigned long limit;
44307
44308 limit = rlimit(RLIMIT_FSIZE);
44309+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
44310 if (limit != RLIM_INFINITY && offset > limit)
44311 goto out_sig;
44312 if (offset > inode->i_sb->s_maxbytes)
44313diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
44314index dce436e..55e670d 100644
44315--- a/fs/autofs4/waitq.c
44316+++ b/fs/autofs4/waitq.c
44317@@ -61,7 +61,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
44318 {
44319 unsigned long sigpipe, flags;
44320 mm_segment_t fs;
44321- const char *data = (const char *)addr;
44322+ const char __user *data = (const char __force_user *)addr;
44323 ssize_t wr = 0;
44324
44325 sigpipe = sigismember(&current->pending.signal, SIGPIPE);
44326@@ -347,6 +347,10 @@ static int validate_request(struct autofs_wait_queue **wait,
44327 return 1;
44328 }
44329
44330+#ifdef CONFIG_GRKERNSEC_HIDESYM
44331+static atomic_unchecked_t autofs_dummy_name_id = ATOMIC_INIT(0);
44332+#endif
44333+
44334 int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
44335 enum autofs_notify notify)
44336 {
44337@@ -380,7 +384,12 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
44338
44339 /* If this is a direct mount request create a dummy name */
44340 if (IS_ROOT(dentry) && autofs_type_trigger(sbi->type))
44341+#ifdef CONFIG_GRKERNSEC_HIDESYM
44342+ /* this name does get written to userland via autofs4_write() */
44343+ qstr.len = sprintf(name, "%08x", atomic_inc_return_unchecked(&autofs_dummy_name_id));
44344+#else
44345 qstr.len = sprintf(name, "%p", dentry);
44346+#endif
44347 else {
44348 qstr.len = autofs4_getpath(sbi, dentry, &name);
44349 if (!qstr.len) {
44350diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
44351index 2b3bda8..6a2d4be 100644
44352--- a/fs/befs/linuxvfs.c
44353+++ b/fs/befs/linuxvfs.c
44354@@ -510,7 +510,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
44355 {
44356 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
44357 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
44358- char *link = nd_get_link(nd);
44359+ const char *link = nd_get_link(nd);
44360 if (!IS_ERR(link))
44361 kfree(link);
44362 }
44363diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
44364index 0e7a6f8..332b1ca 100644
44365--- a/fs/binfmt_aout.c
44366+++ b/fs/binfmt_aout.c
44367@@ -16,6 +16,7 @@
44368 #include <linux/string.h>
44369 #include <linux/fs.h>
44370 #include <linux/file.h>
44371+#include <linux/security.h>
44372 #include <linux/stat.h>
44373 #include <linux/fcntl.h>
44374 #include <linux/ptrace.h>
44375@@ -59,6 +60,8 @@ static int aout_core_dump(struct coredump_params *cprm)
44376 #endif
44377 # define START_STACK(u) ((void __user *)u.start_stack)
44378
44379+ memset(&dump, 0, sizeof(dump));
44380+
44381 fs = get_fs();
44382 set_fs(KERNEL_DS);
44383 has_dumped = 1;
44384@@ -70,10 +73,12 @@ static int aout_core_dump(struct coredump_params *cprm)
44385
44386 /* If the size of the dump file exceeds the rlimit, then see what would happen
44387 if we wrote the stack, but not the data area. */
44388+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
44389 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
44390 dump.u_dsize = 0;
44391
44392 /* Make sure we have enough room to write the stack and data areas. */
44393+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
44394 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
44395 dump.u_ssize = 0;
44396
44397@@ -233,6 +238,8 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
44398 rlim = rlimit(RLIMIT_DATA);
44399 if (rlim >= RLIM_INFINITY)
44400 rlim = ~0;
44401+
44402+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
44403 if (ex.a_data + ex.a_bss > rlim)
44404 return -ENOMEM;
44405
44406@@ -267,6 +274,27 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
44407
44408 install_exec_creds(bprm);
44409
44410+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
44411+ current->mm->pax_flags = 0UL;
44412+#endif
44413+
44414+#ifdef CONFIG_PAX_PAGEEXEC
44415+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
44416+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
44417+
44418+#ifdef CONFIG_PAX_EMUTRAMP
44419+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
44420+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
44421+#endif
44422+
44423+#ifdef CONFIG_PAX_MPROTECT
44424+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
44425+ current->mm->pax_flags |= MF_PAX_MPROTECT;
44426+#endif
44427+
44428+ }
44429+#endif
44430+
44431 if (N_MAGIC(ex) == OMAGIC) {
44432 unsigned long text_addr, map_size;
44433 loff_t pos;
44434@@ -332,7 +360,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
44435 }
44436
44437 error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
44438- PROT_READ | PROT_WRITE | PROT_EXEC,
44439+ PROT_READ | PROT_WRITE,
44440 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
44441 fd_offset + ex.a_text);
44442 if (error != N_DATADDR(ex)) {
44443diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
44444index fbd9f60..0b845dd 100644
44445--- a/fs/binfmt_elf.c
44446+++ b/fs/binfmt_elf.c
44447@@ -33,6 +33,7 @@
44448 #include <linux/elf.h>
44449 #include <linux/utsname.h>
44450 #include <linux/coredump.h>
44451+#include <linux/xattr.h>
44452 #include <asm/uaccess.h>
44453 #include <asm/param.h>
44454 #include <asm/page.h>
44455@@ -59,6 +60,10 @@ static int elf_core_dump(struct coredump_params *cprm);
44456 #define elf_core_dump NULL
44457 #endif
44458
44459+#ifdef CONFIG_PAX_MPROTECT
44460+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
44461+#endif
44462+
44463 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
44464 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
44465 #else
44466@@ -78,6 +83,11 @@ static struct linux_binfmt elf_format = {
44467 .load_binary = load_elf_binary,
44468 .load_shlib = load_elf_library,
44469 .core_dump = elf_core_dump,
44470+
44471+#ifdef CONFIG_PAX_MPROTECT
44472+ .handle_mprotect= elf_handle_mprotect,
44473+#endif
44474+
44475 .min_coredump = ELF_EXEC_PAGESIZE,
44476 };
44477
44478@@ -85,6 +95,8 @@ static struct linux_binfmt elf_format = {
44479
44480 static int set_brk(unsigned long start, unsigned long end)
44481 {
44482+ unsigned long e = end;
44483+
44484 start = ELF_PAGEALIGN(start);
44485 end = ELF_PAGEALIGN(end);
44486 if (end > start) {
44487@@ -93,7 +105,7 @@ static int set_brk(unsigned long start, unsigned long end)
44488 if (BAD_ADDR(addr))
44489 return addr;
44490 }
44491- current->mm->start_brk = current->mm->brk = end;
44492+ current->mm->start_brk = current->mm->brk = e;
44493 return 0;
44494 }
44495
44496@@ -154,12 +166,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
44497 elf_addr_t __user *u_rand_bytes;
44498 const char *k_platform = ELF_PLATFORM;
44499 const char *k_base_platform = ELF_BASE_PLATFORM;
44500- unsigned char k_rand_bytes[16];
44501+ u32 k_rand_bytes[4];
44502 int items;
44503 elf_addr_t *elf_info;
44504 int ei_index = 0;
44505 const struct cred *cred = current_cred();
44506 struct vm_area_struct *vma;
44507+ unsigned long saved_auxv[AT_VECTOR_SIZE];
44508
44509 /*
44510 * In some cases (e.g. Hyper-Threading), we want to avoid L1
44511@@ -201,8 +214,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
44512 * Generate 16 random bytes for userspace PRNG seeding.
44513 */
44514 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
44515- u_rand_bytes = (elf_addr_t __user *)
44516- STACK_ALLOC(p, sizeof(k_rand_bytes));
44517+ srandom32(k_rand_bytes[0] ^ random32());
44518+ srandom32(k_rand_bytes[1] ^ random32());
44519+ srandom32(k_rand_bytes[2] ^ random32());
44520+ srandom32(k_rand_bytes[3] ^ random32());
44521+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
44522+ u_rand_bytes = (elf_addr_t __user *) p;
44523 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
44524 return -EFAULT;
44525
44526@@ -314,9 +331,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
44527 return -EFAULT;
44528 current->mm->env_end = p;
44529
44530+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
44531+
44532 /* Put the elf_info on the stack in the right place. */
44533 sp = (elf_addr_t __user *)envp + 1;
44534- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
44535+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
44536 return -EFAULT;
44537 return 0;
44538 }
44539@@ -380,15 +399,14 @@ static unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
44540 an ELF header */
44541
44542 static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
44543- struct file *interpreter, unsigned long *interp_map_addr,
44544- unsigned long no_base)
44545+ struct file *interpreter, unsigned long no_base)
44546 {
44547 struct elf_phdr *elf_phdata;
44548 struct elf_phdr *eppnt;
44549- unsigned long load_addr = 0;
44550+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
44551 int load_addr_set = 0;
44552 unsigned long last_bss = 0, elf_bss = 0;
44553- unsigned long error = ~0UL;
44554+ unsigned long error = -EINVAL;
44555 unsigned long total_size;
44556 int retval, i, size;
44557
44558@@ -434,6 +452,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
44559 goto out_close;
44560 }
44561
44562+#ifdef CONFIG_PAX_SEGMEXEC
44563+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
44564+ pax_task_size = SEGMEXEC_TASK_SIZE;
44565+#endif
44566+
44567 eppnt = elf_phdata;
44568 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
44569 if (eppnt->p_type == PT_LOAD) {
44570@@ -457,8 +480,6 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
44571 map_addr = elf_map(interpreter, load_addr + vaddr,
44572 eppnt, elf_prot, elf_type, total_size);
44573 total_size = 0;
44574- if (!*interp_map_addr)
44575- *interp_map_addr = map_addr;
44576 error = map_addr;
44577 if (BAD_ADDR(map_addr))
44578 goto out_close;
44579@@ -477,8 +498,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
44580 k = load_addr + eppnt->p_vaddr;
44581 if (BAD_ADDR(k) ||
44582 eppnt->p_filesz > eppnt->p_memsz ||
44583- eppnt->p_memsz > TASK_SIZE ||
44584- TASK_SIZE - eppnt->p_memsz < k) {
44585+ eppnt->p_memsz > pax_task_size ||
44586+ pax_task_size - eppnt->p_memsz < k) {
44587 error = -ENOMEM;
44588 goto out_close;
44589 }
44590@@ -530,6 +551,315 @@ out:
44591 return error;
44592 }
44593
44594+#ifdef CONFIG_PAX_PT_PAX_FLAGS
44595+#ifdef CONFIG_PAX_SOFTMODE
44596+static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
44597+{
44598+ unsigned long pax_flags = 0UL;
44599+
44600+#ifdef CONFIG_PAX_PAGEEXEC
44601+ if (elf_phdata->p_flags & PF_PAGEEXEC)
44602+ pax_flags |= MF_PAX_PAGEEXEC;
44603+#endif
44604+
44605+#ifdef CONFIG_PAX_SEGMEXEC
44606+ if (elf_phdata->p_flags & PF_SEGMEXEC)
44607+ pax_flags |= MF_PAX_SEGMEXEC;
44608+#endif
44609+
44610+#ifdef CONFIG_PAX_EMUTRAMP
44611+ if ((elf_phdata->p_flags & PF_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
44612+ pax_flags |= MF_PAX_EMUTRAMP;
44613+#endif
44614+
44615+#ifdef CONFIG_PAX_MPROTECT
44616+ if (elf_phdata->p_flags & PF_MPROTECT)
44617+ pax_flags |= MF_PAX_MPROTECT;
44618+#endif
44619+
44620+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
44621+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
44622+ pax_flags |= MF_PAX_RANDMMAP;
44623+#endif
44624+
44625+ return pax_flags;
44626+}
44627+#endif
44628+
44629+static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
44630+{
44631+ unsigned long pax_flags = 0UL;
44632+
44633+#ifdef CONFIG_PAX_PAGEEXEC
44634+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
44635+ pax_flags |= MF_PAX_PAGEEXEC;
44636+#endif
44637+
44638+#ifdef CONFIG_PAX_SEGMEXEC
44639+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
44640+ pax_flags |= MF_PAX_SEGMEXEC;
44641+#endif
44642+
44643+#ifdef CONFIG_PAX_EMUTRAMP
44644+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
44645+ pax_flags |= MF_PAX_EMUTRAMP;
44646+#endif
44647+
44648+#ifdef CONFIG_PAX_MPROTECT
44649+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
44650+ pax_flags |= MF_PAX_MPROTECT;
44651+#endif
44652+
44653+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
44654+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
44655+ pax_flags |= MF_PAX_RANDMMAP;
44656+#endif
44657+
44658+ return pax_flags;
44659+}
44660+#endif
44661+
44662+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
44663+#ifdef CONFIG_PAX_SOFTMODE
44664+static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
44665+{
44666+ unsigned long pax_flags = 0UL;
44667+
44668+#ifdef CONFIG_PAX_PAGEEXEC
44669+ if (pax_flags_softmode & MF_PAX_PAGEEXEC)
44670+ pax_flags |= MF_PAX_PAGEEXEC;
44671+#endif
44672+
44673+#ifdef CONFIG_PAX_SEGMEXEC
44674+ if (pax_flags_softmode & MF_PAX_SEGMEXEC)
44675+ pax_flags |= MF_PAX_SEGMEXEC;
44676+#endif
44677+
44678+#ifdef CONFIG_PAX_EMUTRAMP
44679+ if (pax_flags_softmode & MF_PAX_EMUTRAMP)
44680+ pax_flags |= MF_PAX_EMUTRAMP;
44681+#endif
44682+
44683+#ifdef CONFIG_PAX_MPROTECT
44684+ if (pax_flags_softmode & MF_PAX_MPROTECT)
44685+ pax_flags |= MF_PAX_MPROTECT;
44686+#endif
44687+
44688+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
44689+ if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
44690+ pax_flags |= MF_PAX_RANDMMAP;
44691+#endif
44692+
44693+ return pax_flags;
44694+}
44695+#endif
44696+
44697+static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
44698+{
44699+ unsigned long pax_flags = 0UL;
44700+
44701+#ifdef CONFIG_PAX_PAGEEXEC
44702+ if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
44703+ pax_flags |= MF_PAX_PAGEEXEC;
44704+#endif
44705+
44706+#ifdef CONFIG_PAX_SEGMEXEC
44707+ if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
44708+ pax_flags |= MF_PAX_SEGMEXEC;
44709+#endif
44710+
44711+#ifdef CONFIG_PAX_EMUTRAMP
44712+ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
44713+ pax_flags |= MF_PAX_EMUTRAMP;
44714+#endif
44715+
44716+#ifdef CONFIG_PAX_MPROTECT
44717+ if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
44718+ pax_flags |= MF_PAX_MPROTECT;
44719+#endif
44720+
44721+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
44722+ if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
44723+ pax_flags |= MF_PAX_RANDMMAP;
44724+#endif
44725+
44726+ return pax_flags;
44727+}
44728+#endif
44729+
44730+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
44731+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
44732+{
44733+ unsigned long pax_flags = 0UL;
44734+
44735+#ifdef CONFIG_PAX_EI_PAX
44736+
44737+#ifdef CONFIG_PAX_PAGEEXEC
44738+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
44739+ pax_flags |= MF_PAX_PAGEEXEC;
44740+#endif
44741+
44742+#ifdef CONFIG_PAX_SEGMEXEC
44743+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
44744+ pax_flags |= MF_PAX_SEGMEXEC;
44745+#endif
44746+
44747+#ifdef CONFIG_PAX_EMUTRAMP
44748+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
44749+ pax_flags |= MF_PAX_EMUTRAMP;
44750+#endif
44751+
44752+#ifdef CONFIG_PAX_MPROTECT
44753+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
44754+ pax_flags |= MF_PAX_MPROTECT;
44755+#endif
44756+
44757+#ifdef CONFIG_PAX_ASLR
44758+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
44759+ pax_flags |= MF_PAX_RANDMMAP;
44760+#endif
44761+
44762+#else
44763+
44764+#ifdef CONFIG_PAX_PAGEEXEC
44765+ pax_flags |= MF_PAX_PAGEEXEC;
44766+#endif
44767+
44768+#ifdef CONFIG_PAX_SEGMEXEC
44769+ pax_flags |= MF_PAX_SEGMEXEC;
44770+#endif
44771+
44772+#ifdef CONFIG_PAX_MPROTECT
44773+ pax_flags |= MF_PAX_MPROTECT;
44774+#endif
44775+
44776+#ifdef CONFIG_PAX_RANDMMAP
44777+ if (randomize_va_space)
44778+ pax_flags |= MF_PAX_RANDMMAP;
44779+#endif
44780+
44781+#endif
44782+
44783+ return pax_flags;
44784+}
44785+
44786+static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
44787+{
44788+
44789+#ifdef CONFIG_PAX_PT_PAX_FLAGS
44790+ unsigned long i;
44791+
44792+ for (i = 0UL; i < elf_ex->e_phnum; i++)
44793+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
44794+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
44795+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
44796+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
44797+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
44798+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
44799+ return ~0UL;
44800+
44801+#ifdef CONFIG_PAX_SOFTMODE
44802+ if (pax_softmode)
44803+ return pax_parse_pt_pax_softmode(&elf_phdata[i]);
44804+ else
44805+#endif
44806+
44807+ return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
44808+ break;
44809+ }
44810+#endif
44811+
44812+ return ~0UL;
44813+}
44814+
44815+static unsigned long pax_parse_xattr_pax(struct file * const file)
44816+{
44817+
44818+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
44819+ ssize_t xattr_size, i;
44820+ unsigned char xattr_value[5];
44821+ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
44822+
44823+ xattr_size = vfs_getxattr(file->f_path.dentry, XATTR_NAME_PAX_FLAGS, xattr_value, sizeof xattr_value);
44824+ if (xattr_size <= 0 || xattr_size > 5)
44825+ return ~0UL;
44826+
44827+ for (i = 0; i < xattr_size; i++)
44828+ switch (xattr_value[i]) {
44829+ default:
44830+ return ~0UL;
44831+
44832+#define parse_flag(option1, option2, flag) \
44833+ case option1: \
44834+ if (pax_flags_hardmode & MF_PAX_##flag) \
44835+ return ~0UL; \
44836+ pax_flags_hardmode |= MF_PAX_##flag; \
44837+ break; \
44838+ case option2: \
44839+ if (pax_flags_softmode & MF_PAX_##flag) \
44840+ return ~0UL; \
44841+ pax_flags_softmode |= MF_PAX_##flag; \
44842+ break;
44843+
44844+ parse_flag('p', 'P', PAGEEXEC);
44845+ parse_flag('e', 'E', EMUTRAMP);
44846+ parse_flag('m', 'M', MPROTECT);
44847+ parse_flag('r', 'R', RANDMMAP);
44848+ parse_flag('s', 'S', SEGMEXEC);
44849+
44850+#undef parse_flag
44851+ }
44852+
44853+ if (pax_flags_hardmode & pax_flags_softmode)
44854+ return ~0UL;
44855+
44856+#ifdef CONFIG_PAX_SOFTMODE
44857+ if (pax_softmode)
44858+ return pax_parse_xattr_pax_softmode(pax_flags_softmode);
44859+ else
44860+#endif
44861+
44862+ return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
44863+#else
44864+ return ~0UL;
44865+#endif
44866+
44867+}
44868+
44869+static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
44870+{
44871+ unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
44872+
44873+ pax_flags = pax_parse_ei_pax(elf_ex);
44874+ pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
44875+ xattr_pax_flags = pax_parse_xattr_pax(file);
44876+
44877+ if (pt_pax_flags == ~0UL)
44878+ pt_pax_flags = xattr_pax_flags;
44879+ else if (xattr_pax_flags == ~0UL)
44880+ xattr_pax_flags = pt_pax_flags;
44881+ if (pt_pax_flags != xattr_pax_flags)
44882+ return -EINVAL;
44883+ if (pt_pax_flags != ~0UL)
44884+ pax_flags = pt_pax_flags;
44885+
44886+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
44887+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
44888+ if ((__supported_pte_mask & _PAGE_NX))
44889+ pax_flags &= ~MF_PAX_SEGMEXEC;
44890+ else
44891+ pax_flags &= ~MF_PAX_PAGEEXEC;
44892+ }
44893+#endif
44894+
44895+ if (0 > pax_check_flags(&pax_flags))
44896+ return -EINVAL;
44897+
44898+ current->mm->pax_flags = pax_flags;
44899+ return 0;
44900+}
44901+#endif
44902+
44903 /*
44904 * These are the functions used to load ELF style executables and shared
44905 * libraries. There is no binary dependent code anywhere else.
44906@@ -546,6 +876,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
44907 {
44908 unsigned int random_variable = 0;
44909
44910+#ifdef CONFIG_PAX_RANDUSTACK
44911+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
44912+ return stack_top - current->mm->delta_stack;
44913+#endif
44914+
44915 if ((current->flags & PF_RANDOMIZE) &&
44916 !(current->personality & ADDR_NO_RANDOMIZE)) {
44917 random_variable = get_random_int() & STACK_RND_MASK;
44918@@ -564,7 +899,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
44919 unsigned long load_addr = 0, load_bias = 0;
44920 int load_addr_set = 0;
44921 char * elf_interpreter = NULL;
44922- unsigned long error;
44923+ unsigned long error = 0;
44924 struct elf_phdr *elf_ppnt, *elf_phdata;
44925 unsigned long elf_bss, elf_brk;
44926 int retval, i;
44927@@ -574,11 +909,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
44928 unsigned long start_code, end_code, start_data, end_data;
44929 unsigned long reloc_func_desc __maybe_unused = 0;
44930 int executable_stack = EXSTACK_DEFAULT;
44931- unsigned long def_flags = 0;
44932 struct {
44933 struct elfhdr elf_ex;
44934 struct elfhdr interp_elf_ex;
44935 } *loc;
44936+ unsigned long pax_task_size = TASK_SIZE;
44937
44938 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
44939 if (!loc) {
44940@@ -714,11 +1049,81 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
44941 goto out_free_dentry;
44942
44943 /* OK, This is the point of no return */
44944- current->mm->def_flags = def_flags;
44945+
44946+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
44947+ current->mm->pax_flags = 0UL;
44948+#endif
44949+
44950+#ifdef CONFIG_PAX_DLRESOLVE
44951+ current->mm->call_dl_resolve = 0UL;
44952+#endif
44953+
44954+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
44955+ current->mm->call_syscall = 0UL;
44956+#endif
44957+
44958+#ifdef CONFIG_PAX_ASLR
44959+ current->mm->delta_mmap = 0UL;
44960+ current->mm->delta_stack = 0UL;
44961+#endif
44962+
44963+ current->mm->def_flags = 0;
44964+
44965+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
44966+ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
44967+ send_sig(SIGKILL, current, 0);
44968+ goto out_free_dentry;
44969+ }
44970+#endif
44971+
44972+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
44973+ pax_set_initial_flags(bprm);
44974+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
44975+ if (pax_set_initial_flags_func)
44976+ (pax_set_initial_flags_func)(bprm);
44977+#endif
44978+
44979+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
44980+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
44981+ current->mm->context.user_cs_limit = PAGE_SIZE;
44982+ current->mm->def_flags |= VM_PAGEEXEC;
44983+ }
44984+#endif
44985+
44986+#ifdef CONFIG_PAX_SEGMEXEC
44987+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
44988+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
44989+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
44990+ pax_task_size = SEGMEXEC_TASK_SIZE;
44991+ current->mm->def_flags |= VM_NOHUGEPAGE;
44992+ }
44993+#endif
44994+
44995+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
44996+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
44997+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
44998+ put_cpu();
44999+ }
45000+#endif
45001
45002 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
45003 may depend on the personality. */
45004 SET_PERSONALITY(loc->elf_ex);
45005+
45006+#ifdef CONFIG_PAX_ASLR
45007+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
45008+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
45009+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
45010+ }
45011+#endif
45012+
45013+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
45014+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
45015+ executable_stack = EXSTACK_DISABLE_X;
45016+ current->personality &= ~READ_IMPLIES_EXEC;
45017+ } else
45018+#endif
45019+
45020 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
45021 current->personality |= READ_IMPLIES_EXEC;
45022
45023@@ -809,6 +1214,20 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
45024 #else
45025 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
45026 #endif
45027+
45028+#ifdef CONFIG_PAX_RANDMMAP
45029+ /* PaX: randomize base address at the default exe base if requested */
45030+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
45031+#ifdef CONFIG_SPARC64
45032+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
45033+#else
45034+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
45035+#endif
45036+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
45037+ elf_flags |= MAP_FIXED;
45038+ }
45039+#endif
45040+
45041 }
45042
45043 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
45044@@ -841,9 +1260,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
45045 * allowed task size. Note that p_filesz must always be
45046 * <= p_memsz so it is only necessary to check p_memsz.
45047 */
45048- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
45049- elf_ppnt->p_memsz > TASK_SIZE ||
45050- TASK_SIZE - elf_ppnt->p_memsz < k) {
45051+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
45052+ elf_ppnt->p_memsz > pax_task_size ||
45053+ pax_task_size - elf_ppnt->p_memsz < k) {
45054 /* set_brk can never work. Avoid overflows. */
45055 send_sig(SIGKILL, current, 0);
45056 retval = -EINVAL;
45057@@ -882,17 +1301,44 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
45058 goto out_free_dentry;
45059 }
45060 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
45061- send_sig(SIGSEGV, current, 0);
45062- retval = -EFAULT; /* Nobody gets to see this, but.. */
45063- goto out_free_dentry;
45064+ /*
45065+ * This bss-zeroing can fail if the ELF
45066+ * file specifies odd protections. So
45067+ * we don't check the return value
45068+ */
45069 }
45070
45071+#ifdef CONFIG_PAX_RANDMMAP
45072+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
45073+ unsigned long start, size;
45074+
45075+ start = ELF_PAGEALIGN(elf_brk);
45076+ size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
45077+ down_read(&current->mm->mmap_sem);
45078+ retval = -ENOMEM;
45079+ if (!find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) {
45080+ unsigned long prot = PROT_NONE;
45081+
45082+ up_read(&current->mm->mmap_sem);
45083+ current->mm->brk_gap = PAGE_ALIGN(size) >> PAGE_SHIFT;
45084+// if (current->personality & ADDR_NO_RANDOMIZE)
45085+// prot = PROT_READ;
45086+ start = vm_mmap(NULL, start, size, prot, MAP_ANONYMOUS | MAP_FIXED | MAP_PRIVATE, 0);
45087+ retval = IS_ERR_VALUE(start) ? start : 0;
45088+ } else
45089+ up_read(&current->mm->mmap_sem);
45090+ if (retval == 0)
45091+ retval = set_brk(start + size, start + size + PAGE_SIZE);
45092+ if (retval < 0) {
45093+ send_sig(SIGKILL, current, 0);
45094+ goto out_free_dentry;
45095+ }
45096+ }
45097+#endif
45098+
45099 if (elf_interpreter) {
45100- unsigned long interp_map_addr = 0;
45101-
45102 elf_entry = load_elf_interp(&loc->interp_elf_ex,
45103 interpreter,
45104- &interp_map_addr,
45105 load_bias);
45106 if (!IS_ERR((void *)elf_entry)) {
45107 /*
45108@@ -1114,7 +1560,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
45109 * Decide what to dump of a segment, part, all or none.
45110 */
45111 static unsigned long vma_dump_size(struct vm_area_struct *vma,
45112- unsigned long mm_flags)
45113+ unsigned long mm_flags, long signr)
45114 {
45115 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
45116
45117@@ -1151,7 +1597,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
45118 if (vma->vm_file == NULL)
45119 return 0;
45120
45121- if (FILTER(MAPPED_PRIVATE))
45122+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
45123 goto whole;
45124
45125 /*
45126@@ -1373,9 +1819,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
45127 {
45128 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
45129 int i = 0;
45130- do
45131+ do {
45132 i += 2;
45133- while (auxv[i - 2] != AT_NULL);
45134+ } while (auxv[i - 2] != AT_NULL);
45135 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
45136 }
45137
45138@@ -2003,14 +2449,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
45139 }
45140
45141 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
45142- unsigned long mm_flags)
45143+ struct coredump_params *cprm)
45144 {
45145 struct vm_area_struct *vma;
45146 size_t size = 0;
45147
45148 for (vma = first_vma(current, gate_vma); vma != NULL;
45149 vma = next_vma(vma, gate_vma))
45150- size += vma_dump_size(vma, mm_flags);
45151+ size += vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
45152 return size;
45153 }
45154
45155@@ -2104,7 +2550,7 @@ static int elf_core_dump(struct coredump_params *cprm)
45156
45157 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
45158
45159- offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
45160+ offset += elf_core_vma_data_size(gate_vma, cprm);
45161 offset += elf_core_extra_data_size();
45162 e_shoff = offset;
45163
45164@@ -2118,10 +2564,12 @@ static int elf_core_dump(struct coredump_params *cprm)
45165 offset = dataoff;
45166
45167 size += sizeof(*elf);
45168+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
45169 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
45170 goto end_coredump;
45171
45172 size += sizeof(*phdr4note);
45173+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
45174 if (size > cprm->limit
45175 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
45176 goto end_coredump;
45177@@ -2135,7 +2583,7 @@ static int elf_core_dump(struct coredump_params *cprm)
45178 phdr.p_offset = offset;
45179 phdr.p_vaddr = vma->vm_start;
45180 phdr.p_paddr = 0;
45181- phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
45182+ phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
45183 phdr.p_memsz = vma->vm_end - vma->vm_start;
45184 offset += phdr.p_filesz;
45185 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
45186@@ -2146,6 +2594,7 @@ static int elf_core_dump(struct coredump_params *cprm)
45187 phdr.p_align = ELF_EXEC_PAGESIZE;
45188
45189 size += sizeof(phdr);
45190+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
45191 if (size > cprm->limit
45192 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
45193 goto end_coredump;
45194@@ -2170,7 +2619,7 @@ static int elf_core_dump(struct coredump_params *cprm)
45195 unsigned long addr;
45196 unsigned long end;
45197
45198- end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
45199+ end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
45200
45201 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
45202 struct page *page;
45203@@ -2179,6 +2628,7 @@ static int elf_core_dump(struct coredump_params *cprm)
45204 page = get_dump_page(addr);
45205 if (page) {
45206 void *kaddr = kmap(page);
45207+ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
45208 stop = ((size += PAGE_SIZE) > cprm->limit) ||
45209 !dump_write(cprm->file, kaddr,
45210 PAGE_SIZE);
45211@@ -2196,6 +2646,7 @@ static int elf_core_dump(struct coredump_params *cprm)
45212
45213 if (e_phnum == PN_XNUM) {
45214 size += sizeof(*shdr4extnum);
45215+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
45216 if (size > cprm->limit
45217 || !dump_write(cprm->file, shdr4extnum,
45218 sizeof(*shdr4extnum)))
45219@@ -2216,6 +2667,97 @@ out:
45220
45221 #endif /* CONFIG_ELF_CORE */
45222
45223+#ifdef CONFIG_PAX_MPROTECT
45224+/* PaX: non-PIC ELF libraries need relocations on their executable segments
45225+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
45226+ * we'll remove VM_MAYWRITE for good on RELRO segments.
45227+ *
45228+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
45229+ * basis because we want to allow the common case and not the special ones.
45230+ */
45231+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
45232+{
45233+ struct elfhdr elf_h;
45234+ struct elf_phdr elf_p;
45235+ unsigned long i;
45236+ unsigned long oldflags;
45237+ bool is_textrel_rw, is_textrel_rx, is_relro;
45238+
45239+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
45240+ return;
45241+
45242+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
45243+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
45244+
45245+#ifdef CONFIG_PAX_ELFRELOCS
45246+ /* possible TEXTREL */
45247+ is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
45248+ is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
45249+#else
45250+ is_textrel_rw = false;
45251+ is_textrel_rx = false;
45252+#endif
45253+
45254+ /* possible RELRO */
45255+ is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
45256+
45257+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
45258+ return;
45259+
45260+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
45261+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
45262+
45263+#ifdef CONFIG_PAX_ETEXECRELOCS
45264+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
45265+#else
45266+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
45267+#endif
45268+
45269+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
45270+ !elf_check_arch(&elf_h) ||
45271+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
45272+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
45273+ return;
45274+
45275+ for (i = 0UL; i < elf_h.e_phnum; i++) {
45276+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
45277+ return;
45278+ switch (elf_p.p_type) {
45279+ case PT_DYNAMIC:
45280+ if (!is_textrel_rw && !is_textrel_rx)
45281+ continue;
45282+ i = 0UL;
45283+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
45284+ elf_dyn dyn;
45285+
45286+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
45287+ return;
45288+ if (dyn.d_tag == DT_NULL)
45289+ return;
45290+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
45291+ gr_log_textrel(vma);
45292+ if (is_textrel_rw)
45293+ vma->vm_flags |= VM_MAYWRITE;
45294+ else
45295+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
45296+ vma->vm_flags &= ~VM_MAYWRITE;
45297+ return;
45298+ }
45299+ i++;
45300+ }
45301+ return;
45302+
45303+ case PT_GNU_RELRO:
45304+ if (!is_relro)
45305+ continue;
45306+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
45307+ vma->vm_flags &= ~VM_MAYWRITE;
45308+ return;
45309+ }
45310+ }
45311+}
45312+#endif
45313+
45314 static int __init init_elf_binfmt(void)
45315 {
45316 register_binfmt(&elf_format);
45317diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
45318index e280352..7b2f231 100644
45319--- a/fs/binfmt_flat.c
45320+++ b/fs/binfmt_flat.c
45321@@ -562,7 +562,9 @@ static int load_flat_file(struct linux_binprm * bprm,
45322 realdatastart = (unsigned long) -ENOMEM;
45323 printk("Unable to allocate RAM for process data, errno %d\n",
45324 (int)-realdatastart);
45325+ down_write(&current->mm->mmap_sem);
45326 vm_munmap(textpos, text_len);
45327+ up_write(&current->mm->mmap_sem);
45328 ret = realdatastart;
45329 goto err;
45330 }
45331@@ -586,8 +588,10 @@ static int load_flat_file(struct linux_binprm * bprm,
45332 }
45333 if (IS_ERR_VALUE(result)) {
45334 printk("Unable to read data+bss, errno %d\n", (int)-result);
45335+ down_write(&current->mm->mmap_sem);
45336 vm_munmap(textpos, text_len);
45337 vm_munmap(realdatastart, len);
45338+ up_write(&current->mm->mmap_sem);
45339 ret = result;
45340 goto err;
45341 }
45342@@ -654,8 +658,10 @@ static int load_flat_file(struct linux_binprm * bprm,
45343 }
45344 if (IS_ERR_VALUE(result)) {
45345 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
45346+ down_write(&current->mm->mmap_sem);
45347 vm_munmap(textpos, text_len + data_len + extra +
45348 MAX_SHARED_LIBS * sizeof(unsigned long));
45349+ up_write(&current->mm->mmap_sem);
45350 ret = result;
45351 goto err;
45352 }
45353diff --git a/fs/bio.c b/fs/bio.c
45354index b96fc6c..431d628 100644
45355--- a/fs/bio.c
45356+++ b/fs/bio.c
45357@@ -818,7 +818,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
45358 /*
45359 * Overflow, abort
45360 */
45361- if (end < start)
45362+ if (end < start || end - start > INT_MAX - nr_pages)
45363 return ERR_PTR(-EINVAL);
45364
45365 nr_pages += end - start;
45366@@ -952,7 +952,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
45367 /*
45368 * Overflow, abort
45369 */
45370- if (end < start)
45371+ if (end < start || end - start > INT_MAX - nr_pages)
45372 return ERR_PTR(-EINVAL);
45373
45374 nr_pages += end - start;
45375@@ -1214,7 +1214,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
45376 const int read = bio_data_dir(bio) == READ;
45377 struct bio_map_data *bmd = bio->bi_private;
45378 int i;
45379- char *p = bmd->sgvecs[0].iov_base;
45380+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
45381
45382 __bio_for_each_segment(bvec, bio, i, 0) {
45383 char *addr = page_address(bvec->bv_page);
45384diff --git a/fs/block_dev.c b/fs/block_dev.c
45385index ab3a456..7da538b 100644
45386--- a/fs/block_dev.c
45387+++ b/fs/block_dev.c
45388@@ -651,7 +651,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
45389 else if (bdev->bd_contains == bdev)
45390 return true; /* is a whole device which isn't held */
45391
45392- else if (whole->bd_holder == bd_may_claim)
45393+ else if (whole->bd_holder == (void *)bd_may_claim)
45394 return true; /* is a partition of a device that is being partitioned */
45395 else if (whole->bd_holder != NULL)
45396 return false; /* is a partition of a held device */
45397diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
45398index cdfb4c4..da736d4 100644
45399--- a/fs/btrfs/ctree.c
45400+++ b/fs/btrfs/ctree.c
45401@@ -1035,9 +1035,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
45402 free_extent_buffer(buf);
45403 add_root_to_dirty_list(root);
45404 } else {
45405- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
45406- parent_start = parent->start;
45407- else
45408+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
45409+ if (parent)
45410+ parent_start = parent->start;
45411+ else
45412+ parent_start = 0;
45413+ } else
45414 parent_start = 0;
45415
45416 WARN_ON(trans->transid != btrfs_header_generation(parent));
45417diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
45418index 95542a1..95a8727 100644
45419--- a/fs/btrfs/inode.c
45420+++ b/fs/btrfs/inode.c
45421@@ -7243,7 +7243,7 @@ fail:
45422 return -ENOMEM;
45423 }
45424
45425-static int btrfs_getattr(struct vfsmount *mnt,
45426+int btrfs_getattr(struct vfsmount *mnt,
45427 struct dentry *dentry, struct kstat *stat)
45428 {
45429 struct inode *inode = dentry->d_inode;
45430@@ -7257,6 +7257,14 @@ static int btrfs_getattr(struct vfsmount *mnt,
45431 return 0;
45432 }
45433
45434+EXPORT_SYMBOL(btrfs_getattr);
45435+
45436+dev_t get_btrfs_dev_from_inode(struct inode *inode)
45437+{
45438+ return BTRFS_I(inode)->root->anon_dev;
45439+}
45440+EXPORT_SYMBOL(get_btrfs_dev_from_inode);
45441+
45442 /*
45443 * If a file is moved, it will inherit the cow and compression flags of the new
45444 * directory.
45445diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
45446index 8fcf9a5..a200000 100644
45447--- a/fs/btrfs/ioctl.c
45448+++ b/fs/btrfs/ioctl.c
45449@@ -2965,9 +2965,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
45450 for (i = 0; i < num_types; i++) {
45451 struct btrfs_space_info *tmp;
45452
45453+ /* Don't copy in more than we allocated */
45454 if (!slot_count)
45455 break;
45456
45457+ slot_count--;
45458+
45459 info = NULL;
45460 rcu_read_lock();
45461 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
45462@@ -2989,10 +2992,7 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
45463 memcpy(dest, &space, sizeof(space));
45464 dest++;
45465 space_args.total_spaces++;
45466- slot_count--;
45467 }
45468- if (!slot_count)
45469- break;
45470 }
45471 up_read(&info->groups_sem);
45472 }
45473diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
45474index 776f0aa..3aad281 100644
45475--- a/fs/btrfs/relocation.c
45476+++ b/fs/btrfs/relocation.c
45477@@ -1269,7 +1269,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
45478 }
45479 spin_unlock(&rc->reloc_root_tree.lock);
45480
45481- BUG_ON((struct btrfs_root *)node->data != root);
45482+ BUG_ON(!node || (struct btrfs_root *)node->data != root);
45483
45484 if (!del) {
45485 spin_lock(&rc->reloc_root_tree.lock);
45486diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
45487index 622f469..e8d2d55 100644
45488--- a/fs/cachefiles/bind.c
45489+++ b/fs/cachefiles/bind.c
45490@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
45491 args);
45492
45493 /* start by checking things over */
45494- ASSERT(cache->fstop_percent >= 0 &&
45495- cache->fstop_percent < cache->fcull_percent &&
45496+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
45497 cache->fcull_percent < cache->frun_percent &&
45498 cache->frun_percent < 100);
45499
45500- ASSERT(cache->bstop_percent >= 0 &&
45501- cache->bstop_percent < cache->bcull_percent &&
45502+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
45503 cache->bcull_percent < cache->brun_percent &&
45504 cache->brun_percent < 100);
45505
45506diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
45507index 0a1467b..6a53245 100644
45508--- a/fs/cachefiles/daemon.c
45509+++ b/fs/cachefiles/daemon.c
45510@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
45511 if (n > buflen)
45512 return -EMSGSIZE;
45513
45514- if (copy_to_user(_buffer, buffer, n) != 0)
45515+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
45516 return -EFAULT;
45517
45518 return n;
45519@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
45520 if (test_bit(CACHEFILES_DEAD, &cache->flags))
45521 return -EIO;
45522
45523- if (datalen < 0 || datalen > PAGE_SIZE - 1)
45524+ if (datalen > PAGE_SIZE - 1)
45525 return -EOPNOTSUPP;
45526
45527 /* drag the command string into the kernel so we can parse it */
45528@@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
45529 if (args[0] != '%' || args[1] != '\0')
45530 return -EINVAL;
45531
45532- if (fstop < 0 || fstop >= cache->fcull_percent)
45533+ if (fstop >= cache->fcull_percent)
45534 return cachefiles_daemon_range_error(cache, args);
45535
45536 cache->fstop_percent = fstop;
45537@@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
45538 if (args[0] != '%' || args[1] != '\0')
45539 return -EINVAL;
45540
45541- if (bstop < 0 || bstop >= cache->bcull_percent)
45542+ if (bstop >= cache->bcull_percent)
45543 return cachefiles_daemon_range_error(cache, args);
45544
45545 cache->bstop_percent = bstop;
45546diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
45547index bd6bc1b..b627b53 100644
45548--- a/fs/cachefiles/internal.h
45549+++ b/fs/cachefiles/internal.h
45550@@ -57,7 +57,7 @@ struct cachefiles_cache {
45551 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
45552 struct rb_root active_nodes; /* active nodes (can't be culled) */
45553 rwlock_t active_lock; /* lock for active_nodes */
45554- atomic_t gravecounter; /* graveyard uniquifier */
45555+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
45556 unsigned frun_percent; /* when to stop culling (% files) */
45557 unsigned fcull_percent; /* when to start culling (% files) */
45558 unsigned fstop_percent; /* when to stop allocating (% files) */
45559@@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
45560 * proc.c
45561 */
45562 #ifdef CONFIG_CACHEFILES_HISTOGRAM
45563-extern atomic_t cachefiles_lookup_histogram[HZ];
45564-extern atomic_t cachefiles_mkdir_histogram[HZ];
45565-extern atomic_t cachefiles_create_histogram[HZ];
45566+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
45567+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
45568+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
45569
45570 extern int __init cachefiles_proc_init(void);
45571 extern void cachefiles_proc_cleanup(void);
45572 static inline
45573-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
45574+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
45575 {
45576 unsigned long jif = jiffies - start_jif;
45577 if (jif >= HZ)
45578 jif = HZ - 1;
45579- atomic_inc(&histogram[jif]);
45580+ atomic_inc_unchecked(&histogram[jif]);
45581 }
45582
45583 #else
45584diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
45585index b0b5f7c..039bb26 100644
45586--- a/fs/cachefiles/namei.c
45587+++ b/fs/cachefiles/namei.c
45588@@ -318,7 +318,7 @@ try_again:
45589 /* first step is to make up a grave dentry in the graveyard */
45590 sprintf(nbuffer, "%08x%08x",
45591 (uint32_t) get_seconds(),
45592- (uint32_t) atomic_inc_return(&cache->gravecounter));
45593+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
45594
45595 /* do the multiway lock magic */
45596 trap = lock_rename(cache->graveyard, dir);
45597diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
45598index eccd339..4c1d995 100644
45599--- a/fs/cachefiles/proc.c
45600+++ b/fs/cachefiles/proc.c
45601@@ -14,9 +14,9 @@
45602 #include <linux/seq_file.h>
45603 #include "internal.h"
45604
45605-atomic_t cachefiles_lookup_histogram[HZ];
45606-atomic_t cachefiles_mkdir_histogram[HZ];
45607-atomic_t cachefiles_create_histogram[HZ];
45608+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
45609+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
45610+atomic_unchecked_t cachefiles_create_histogram[HZ];
45611
45612 /*
45613 * display the latency histogram
45614@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
45615 return 0;
45616 default:
45617 index = (unsigned long) v - 3;
45618- x = atomic_read(&cachefiles_lookup_histogram[index]);
45619- y = atomic_read(&cachefiles_mkdir_histogram[index]);
45620- z = atomic_read(&cachefiles_create_histogram[index]);
45621+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
45622+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
45623+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
45624 if (x == 0 && y == 0 && z == 0)
45625 return 0;
45626
45627diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
45628index c994691..2a1537f 100644
45629--- a/fs/cachefiles/rdwr.c
45630+++ b/fs/cachefiles/rdwr.c
45631@@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
45632 old_fs = get_fs();
45633 set_fs(KERNEL_DS);
45634 ret = file->f_op->write(
45635- file, (const void __user *) data, len, &pos);
45636+ file, (const void __force_user *) data, len, &pos);
45637 set_fs(old_fs);
45638 kunmap(page);
45639 if (ret != len)
45640diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
45641index e5b7731..b9c59fb 100644
45642--- a/fs/ceph/dir.c
45643+++ b/fs/ceph/dir.c
45644@@ -243,7 +243,7 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
45645 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
45646 struct ceph_mds_client *mdsc = fsc->mdsc;
45647 unsigned frag = fpos_frag(filp->f_pos);
45648- int off = fpos_off(filp->f_pos);
45649+ unsigned int off = fpos_off(filp->f_pos);
45650 int err;
45651 u32 ftype;
45652 struct ceph_mds_reply_info_parsed *rinfo;
45653diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
45654index d9ea6ed..1e6c8ac 100644
45655--- a/fs/cifs/cifs_debug.c
45656+++ b/fs/cifs/cifs_debug.c
45657@@ -267,8 +267,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
45658
45659 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
45660 #ifdef CONFIG_CIFS_STATS2
45661- atomic_set(&totBufAllocCount, 0);
45662- atomic_set(&totSmBufAllocCount, 0);
45663+ atomic_set_unchecked(&totBufAllocCount, 0);
45664+ atomic_set_unchecked(&totSmBufAllocCount, 0);
45665 #endif /* CONFIG_CIFS_STATS2 */
45666 spin_lock(&cifs_tcp_ses_lock);
45667 list_for_each(tmp1, &cifs_tcp_ses_list) {
45668@@ -281,7 +281,7 @@ static ssize_t cifs_stats_proc_write(struct file *file,
45669 tcon = list_entry(tmp3,
45670 struct cifs_tcon,
45671 tcon_list);
45672- atomic_set(&tcon->num_smbs_sent, 0);
45673+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
45674 if (server->ops->clear_stats)
45675 server->ops->clear_stats(tcon);
45676 }
45677@@ -313,8 +313,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
45678 smBufAllocCount.counter, cifs_min_small);
45679 #ifdef CONFIG_CIFS_STATS2
45680 seq_printf(m, "Total Large %d Small %d Allocations\n",
45681- atomic_read(&totBufAllocCount),
45682- atomic_read(&totSmBufAllocCount));
45683+ atomic_read_unchecked(&totBufAllocCount),
45684+ atomic_read_unchecked(&totSmBufAllocCount));
45685 #endif /* CONFIG_CIFS_STATS2 */
45686
45687 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
45688@@ -343,7 +343,7 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
45689 if (tcon->need_reconnect)
45690 seq_puts(m, "\tDISCONNECTED ");
45691 seq_printf(m, "\nSMBs: %d",
45692- atomic_read(&tcon->num_smbs_sent));
45693+ atomic_read_unchecked(&tcon->num_smbs_sent));
45694 if (server->ops->print_stats)
45695 server->ops->print_stats(m, tcon);
45696 }
45697diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
45698index e7931cc..76a1ab9 100644
45699--- a/fs/cifs/cifsfs.c
45700+++ b/fs/cifs/cifsfs.c
45701@@ -999,7 +999,7 @@ cifs_init_request_bufs(void)
45702 /* cERROR(1, "CIFSMaxBufSize %d 0x%x",CIFSMaxBufSize,CIFSMaxBufSize); */
45703 cifs_req_cachep = kmem_cache_create("cifs_request",
45704 CIFSMaxBufSize + max_hdr_size, 0,
45705- SLAB_HWCACHE_ALIGN, NULL);
45706+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
45707 if (cifs_req_cachep == NULL)
45708 return -ENOMEM;
45709
45710@@ -1026,7 +1026,7 @@ cifs_init_request_bufs(void)
45711 efficient to alloc 1 per page off the slab compared to 17K (5page)
45712 alloc of large cifs buffers even when page debugging is on */
45713 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
45714- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
45715+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
45716 NULL);
45717 if (cifs_sm_req_cachep == NULL) {
45718 mempool_destroy(cifs_req_poolp);
45719@@ -1111,8 +1111,8 @@ init_cifs(void)
45720 atomic_set(&bufAllocCount, 0);
45721 atomic_set(&smBufAllocCount, 0);
45722 #ifdef CONFIG_CIFS_STATS2
45723- atomic_set(&totBufAllocCount, 0);
45724- atomic_set(&totSmBufAllocCount, 0);
45725+ atomic_set_unchecked(&totBufAllocCount, 0);
45726+ atomic_set_unchecked(&totSmBufAllocCount, 0);
45727 #endif /* CONFIG_CIFS_STATS2 */
45728
45729 atomic_set(&midCount, 0);
45730diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
45731index f5af252..489b5f2 100644
45732--- a/fs/cifs/cifsglob.h
45733+++ b/fs/cifs/cifsglob.h
45734@@ -751,35 +751,35 @@ struct cifs_tcon {
45735 __u16 Flags; /* optional support bits */
45736 enum statusEnum tidStatus;
45737 #ifdef CONFIG_CIFS_STATS
45738- atomic_t num_smbs_sent;
45739+ atomic_unchecked_t num_smbs_sent;
45740 union {
45741 struct {
45742- atomic_t num_writes;
45743- atomic_t num_reads;
45744- atomic_t num_flushes;
45745- atomic_t num_oplock_brks;
45746- atomic_t num_opens;
45747- atomic_t num_closes;
45748- atomic_t num_deletes;
45749- atomic_t num_mkdirs;
45750- atomic_t num_posixopens;
45751- atomic_t num_posixmkdirs;
45752- atomic_t num_rmdirs;
45753- atomic_t num_renames;
45754- atomic_t num_t2renames;
45755- atomic_t num_ffirst;
45756- atomic_t num_fnext;
45757- atomic_t num_fclose;
45758- atomic_t num_hardlinks;
45759- atomic_t num_symlinks;
45760- atomic_t num_locks;
45761- atomic_t num_acl_get;
45762- atomic_t num_acl_set;
45763+ atomic_unchecked_t num_writes;
45764+ atomic_unchecked_t num_reads;
45765+ atomic_unchecked_t num_flushes;
45766+ atomic_unchecked_t num_oplock_brks;
45767+ atomic_unchecked_t num_opens;
45768+ atomic_unchecked_t num_closes;
45769+ atomic_unchecked_t num_deletes;
45770+ atomic_unchecked_t num_mkdirs;
45771+ atomic_unchecked_t num_posixopens;
45772+ atomic_unchecked_t num_posixmkdirs;
45773+ atomic_unchecked_t num_rmdirs;
45774+ atomic_unchecked_t num_renames;
45775+ atomic_unchecked_t num_t2renames;
45776+ atomic_unchecked_t num_ffirst;
45777+ atomic_unchecked_t num_fnext;
45778+ atomic_unchecked_t num_fclose;
45779+ atomic_unchecked_t num_hardlinks;
45780+ atomic_unchecked_t num_symlinks;
45781+ atomic_unchecked_t num_locks;
45782+ atomic_unchecked_t num_acl_get;
45783+ atomic_unchecked_t num_acl_set;
45784 } cifs_stats;
45785 #ifdef CONFIG_CIFS_SMB2
45786 struct {
45787- atomic_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
45788- atomic_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
45789+ atomic_unchecked_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
45790+ atomic_unchecked_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
45791 } smb2_stats;
45792 #endif /* CONFIG_CIFS_SMB2 */
45793 } stats;
45794@@ -1094,7 +1094,7 @@ build_path_to_root(struct smb_vol *vol, struct cifs_sb_info *cifs_sb,
45795 }
45796
45797 #ifdef CONFIG_CIFS_STATS
45798-#define cifs_stats_inc atomic_inc
45799+#define cifs_stats_inc atomic_inc_unchecked
45800
45801 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
45802 unsigned int bytes)
45803@@ -1459,8 +1459,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
45804 /* Various Debug counters */
45805 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
45806 #ifdef CONFIG_CIFS_STATS2
45807-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
45808-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
45809+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
45810+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
45811 #endif
45812 GLOBAL_EXTERN atomic_t smBufAllocCount;
45813 GLOBAL_EXTERN atomic_t midCount;
45814diff --git a/fs/cifs/link.c b/fs/cifs/link.c
45815index 51dc2fb..1e12a33 100644
45816--- a/fs/cifs/link.c
45817+++ b/fs/cifs/link.c
45818@@ -616,7 +616,7 @@ symlink_exit:
45819
45820 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
45821 {
45822- char *p = nd_get_link(nd);
45823+ const char *p = nd_get_link(nd);
45824 if (!IS_ERR(p))
45825 kfree(p);
45826 }
45827diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
45828index 3a00c0d..42d901c 100644
45829--- a/fs/cifs/misc.c
45830+++ b/fs/cifs/misc.c
45831@@ -169,7 +169,7 @@ cifs_buf_get(void)
45832 memset(ret_buf, 0, buf_size + 3);
45833 atomic_inc(&bufAllocCount);
45834 #ifdef CONFIG_CIFS_STATS2
45835- atomic_inc(&totBufAllocCount);
45836+ atomic_inc_unchecked(&totBufAllocCount);
45837 #endif /* CONFIG_CIFS_STATS2 */
45838 }
45839
45840@@ -204,7 +204,7 @@ cifs_small_buf_get(void)
45841 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
45842 atomic_inc(&smBufAllocCount);
45843 #ifdef CONFIG_CIFS_STATS2
45844- atomic_inc(&totSmBufAllocCount);
45845+ atomic_inc_unchecked(&totSmBufAllocCount);
45846 #endif /* CONFIG_CIFS_STATS2 */
45847
45848 }
45849diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
45850index 591bf19..690d600 100644
45851--- a/fs/cifs/smb1ops.c
45852+++ b/fs/cifs/smb1ops.c
45853@@ -617,27 +617,27 @@ static void
45854 cifs_clear_stats(struct cifs_tcon *tcon)
45855 {
45856 #ifdef CONFIG_CIFS_STATS
45857- atomic_set(&tcon->stats.cifs_stats.num_writes, 0);
45858- atomic_set(&tcon->stats.cifs_stats.num_reads, 0);
45859- atomic_set(&tcon->stats.cifs_stats.num_flushes, 0);
45860- atomic_set(&tcon->stats.cifs_stats.num_oplock_brks, 0);
45861- atomic_set(&tcon->stats.cifs_stats.num_opens, 0);
45862- atomic_set(&tcon->stats.cifs_stats.num_posixopens, 0);
45863- atomic_set(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
45864- atomic_set(&tcon->stats.cifs_stats.num_closes, 0);
45865- atomic_set(&tcon->stats.cifs_stats.num_deletes, 0);
45866- atomic_set(&tcon->stats.cifs_stats.num_mkdirs, 0);
45867- atomic_set(&tcon->stats.cifs_stats.num_rmdirs, 0);
45868- atomic_set(&tcon->stats.cifs_stats.num_renames, 0);
45869- atomic_set(&tcon->stats.cifs_stats.num_t2renames, 0);
45870- atomic_set(&tcon->stats.cifs_stats.num_ffirst, 0);
45871- atomic_set(&tcon->stats.cifs_stats.num_fnext, 0);
45872- atomic_set(&tcon->stats.cifs_stats.num_fclose, 0);
45873- atomic_set(&tcon->stats.cifs_stats.num_hardlinks, 0);
45874- atomic_set(&tcon->stats.cifs_stats.num_symlinks, 0);
45875- atomic_set(&tcon->stats.cifs_stats.num_locks, 0);
45876- atomic_set(&tcon->stats.cifs_stats.num_acl_get, 0);
45877- atomic_set(&tcon->stats.cifs_stats.num_acl_set, 0);
45878+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_writes, 0);
45879+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_reads, 0);
45880+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_flushes, 0);
45881+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_oplock_brks, 0);
45882+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_opens, 0);
45883+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixopens, 0);
45884+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
45885+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_closes, 0);
45886+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_deletes, 0);
45887+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_mkdirs, 0);
45888+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_rmdirs, 0);
45889+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_renames, 0);
45890+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_t2renames, 0);
45891+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_ffirst, 0);
45892+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fnext, 0);
45893+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fclose, 0);
45894+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_hardlinks, 0);
45895+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_symlinks, 0);
45896+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_locks, 0);
45897+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_get, 0);
45898+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_set, 0);
45899 #endif
45900 }
45901
45902@@ -646,36 +646,36 @@ cifs_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
45903 {
45904 #ifdef CONFIG_CIFS_STATS
45905 seq_printf(m, " Oplocks breaks: %d",
45906- atomic_read(&tcon->stats.cifs_stats.num_oplock_brks));
45907+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_oplock_brks));
45908 seq_printf(m, "\nReads: %d Bytes: %llu",
45909- atomic_read(&tcon->stats.cifs_stats.num_reads),
45910+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_reads),
45911 (long long)(tcon->bytes_read));
45912 seq_printf(m, "\nWrites: %d Bytes: %llu",
45913- atomic_read(&tcon->stats.cifs_stats.num_writes),
45914+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_writes),
45915 (long long)(tcon->bytes_written));
45916 seq_printf(m, "\nFlushes: %d",
45917- atomic_read(&tcon->stats.cifs_stats.num_flushes));
45918+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_flushes));
45919 seq_printf(m, "\nLocks: %d HardLinks: %d Symlinks: %d",
45920- atomic_read(&tcon->stats.cifs_stats.num_locks),
45921- atomic_read(&tcon->stats.cifs_stats.num_hardlinks),
45922- atomic_read(&tcon->stats.cifs_stats.num_symlinks));
45923+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_locks),
45924+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_hardlinks),
45925+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_symlinks));
45926 seq_printf(m, "\nOpens: %d Closes: %d Deletes: %d",
45927- atomic_read(&tcon->stats.cifs_stats.num_opens),
45928- atomic_read(&tcon->stats.cifs_stats.num_closes),
45929- atomic_read(&tcon->stats.cifs_stats.num_deletes));
45930+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_opens),
45931+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_closes),
45932+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_deletes));
45933 seq_printf(m, "\nPosix Opens: %d Posix Mkdirs: %d",
45934- atomic_read(&tcon->stats.cifs_stats.num_posixopens),
45935- atomic_read(&tcon->stats.cifs_stats.num_posixmkdirs));
45936+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixopens),
45937+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs));
45938 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
45939- atomic_read(&tcon->stats.cifs_stats.num_mkdirs),
45940- atomic_read(&tcon->stats.cifs_stats.num_rmdirs));
45941+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_mkdirs),
45942+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_rmdirs));
45943 seq_printf(m, "\nRenames: %d T2 Renames %d",
45944- atomic_read(&tcon->stats.cifs_stats.num_renames),
45945- atomic_read(&tcon->stats.cifs_stats.num_t2renames));
45946+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_renames),
45947+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_t2renames));
45948 seq_printf(m, "\nFindFirst: %d FNext %d FClose %d",
45949- atomic_read(&tcon->stats.cifs_stats.num_ffirst),
45950- atomic_read(&tcon->stats.cifs_stats.num_fnext),
45951- atomic_read(&tcon->stats.cifs_stats.num_fclose));
45952+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_ffirst),
45953+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fnext),
45954+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fclose));
45955 #endif
45956 }
45957
45958diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
45959index 4d9dbe0..0af4601 100644
45960--- a/fs/cifs/smb2ops.c
45961+++ b/fs/cifs/smb2ops.c
45962@@ -291,8 +291,8 @@ smb2_clear_stats(struct cifs_tcon *tcon)
45963 #ifdef CONFIG_CIFS_STATS
45964 int i;
45965 for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
45966- atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
45967- atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
45968+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
45969+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
45970 }
45971 #endif
45972 }
45973@@ -301,66 +301,66 @@ static void
45974 smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
45975 {
45976 #ifdef CONFIG_CIFS_STATS
45977- atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
45978- atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
45979+ atomic_unchecked_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
45980+ atomic_unchecked_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
45981 seq_printf(m, "\nNegotiates: %d sent %d failed",
45982- atomic_read(&sent[SMB2_NEGOTIATE_HE]),
45983- atomic_read(&failed[SMB2_NEGOTIATE_HE]));
45984+ atomic_read_unchecked(&sent[SMB2_NEGOTIATE_HE]),
45985+ atomic_read_unchecked(&failed[SMB2_NEGOTIATE_HE]));
45986 seq_printf(m, "\nSessionSetups: %d sent %d failed",
45987- atomic_read(&sent[SMB2_SESSION_SETUP_HE]),
45988- atomic_read(&failed[SMB2_SESSION_SETUP_HE]));
45989+ atomic_read_unchecked(&sent[SMB2_SESSION_SETUP_HE]),
45990+ atomic_read_unchecked(&failed[SMB2_SESSION_SETUP_HE]));
45991 #define SMB2LOGOFF 0x0002 /* trivial request/resp */
45992 seq_printf(m, "\nLogoffs: %d sent %d failed",
45993- atomic_read(&sent[SMB2_LOGOFF_HE]),
45994- atomic_read(&failed[SMB2_LOGOFF_HE]));
45995+ atomic_read_unchecked(&sent[SMB2_LOGOFF_HE]),
45996+ atomic_read_unchecked(&failed[SMB2_LOGOFF_HE]));
45997 seq_printf(m, "\nTreeConnects: %d sent %d failed",
45998- atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
45999- atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
46000+ atomic_read_unchecked(&sent[SMB2_TREE_CONNECT_HE]),
46001+ atomic_read_unchecked(&failed[SMB2_TREE_CONNECT_HE]));
46002 seq_printf(m, "\nTreeDisconnects: %d sent %d failed",
46003- atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]),
46004- atomic_read(&failed[SMB2_TREE_DISCONNECT_HE]));
46005+ atomic_read_unchecked(&sent[SMB2_TREE_DISCONNECT_HE]),
46006+ atomic_read_unchecked(&failed[SMB2_TREE_DISCONNECT_HE]));
46007 seq_printf(m, "\nCreates: %d sent %d failed",
46008- atomic_read(&sent[SMB2_CREATE_HE]),
46009- atomic_read(&failed[SMB2_CREATE_HE]));
46010+ atomic_read_unchecked(&sent[SMB2_CREATE_HE]),
46011+ atomic_read_unchecked(&failed[SMB2_CREATE_HE]));
46012 seq_printf(m, "\nCloses: %d sent %d failed",
46013- atomic_read(&sent[SMB2_CLOSE_HE]),
46014- atomic_read(&failed[SMB2_CLOSE_HE]));
46015+ atomic_read_unchecked(&sent[SMB2_CLOSE_HE]),
46016+ atomic_read_unchecked(&failed[SMB2_CLOSE_HE]));
46017 seq_printf(m, "\nFlushes: %d sent %d failed",
46018- atomic_read(&sent[SMB2_FLUSH_HE]),
46019- atomic_read(&failed[SMB2_FLUSH_HE]));
46020+ atomic_read_unchecked(&sent[SMB2_FLUSH_HE]),
46021+ atomic_read_unchecked(&failed[SMB2_FLUSH_HE]));
46022 seq_printf(m, "\nReads: %d sent %d failed",
46023- atomic_read(&sent[SMB2_READ_HE]),
46024- atomic_read(&failed[SMB2_READ_HE]));
46025+ atomic_read_unchecked(&sent[SMB2_READ_HE]),
46026+ atomic_read_unchecked(&failed[SMB2_READ_HE]));
46027 seq_printf(m, "\nWrites: %d sent %d failed",
46028- atomic_read(&sent[SMB2_WRITE_HE]),
46029- atomic_read(&failed[SMB2_WRITE_HE]));
46030+ atomic_read_unchecked(&sent[SMB2_WRITE_HE]),
46031+ atomic_read_unchecked(&failed[SMB2_WRITE_HE]));
46032 seq_printf(m, "\nLocks: %d sent %d failed",
46033- atomic_read(&sent[SMB2_LOCK_HE]),
46034- atomic_read(&failed[SMB2_LOCK_HE]));
46035+ atomic_read_unchecked(&sent[SMB2_LOCK_HE]),
46036+ atomic_read_unchecked(&failed[SMB2_LOCK_HE]));
46037 seq_printf(m, "\nIOCTLs: %d sent %d failed",
46038- atomic_read(&sent[SMB2_IOCTL_HE]),
46039- atomic_read(&failed[SMB2_IOCTL_HE]));
46040+ atomic_read_unchecked(&sent[SMB2_IOCTL_HE]),
46041+ atomic_read_unchecked(&failed[SMB2_IOCTL_HE]));
46042 seq_printf(m, "\nCancels: %d sent %d failed",
46043- atomic_read(&sent[SMB2_CANCEL_HE]),
46044- atomic_read(&failed[SMB2_CANCEL_HE]));
46045+ atomic_read_unchecked(&sent[SMB2_CANCEL_HE]),
46046+ atomic_read_unchecked(&failed[SMB2_CANCEL_HE]));
46047 seq_printf(m, "\nEchos: %d sent %d failed",
46048- atomic_read(&sent[SMB2_ECHO_HE]),
46049- atomic_read(&failed[SMB2_ECHO_HE]));
46050+ atomic_read_unchecked(&sent[SMB2_ECHO_HE]),
46051+ atomic_read_unchecked(&failed[SMB2_ECHO_HE]));
46052 seq_printf(m, "\nQueryDirectories: %d sent %d failed",
46053- atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]),
46054- atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE]));
46055+ atomic_read_unchecked(&sent[SMB2_QUERY_DIRECTORY_HE]),
46056+ atomic_read_unchecked(&failed[SMB2_QUERY_DIRECTORY_HE]));
46057 seq_printf(m, "\nChangeNotifies: %d sent %d failed",
46058- atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]),
46059- atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE]));
46060+ atomic_read_unchecked(&sent[SMB2_CHANGE_NOTIFY_HE]),
46061+ atomic_read_unchecked(&failed[SMB2_CHANGE_NOTIFY_HE]));
46062 seq_printf(m, "\nQueryInfos: %d sent %d failed",
46063- atomic_read(&sent[SMB2_QUERY_INFO_HE]),
46064- atomic_read(&failed[SMB2_QUERY_INFO_HE]));
46065+ atomic_read_unchecked(&sent[SMB2_QUERY_INFO_HE]),
46066+ atomic_read_unchecked(&failed[SMB2_QUERY_INFO_HE]));
46067 seq_printf(m, "\nSetInfos: %d sent %d failed",
46068- atomic_read(&sent[SMB2_SET_INFO_HE]),
46069- atomic_read(&failed[SMB2_SET_INFO_HE]));
46070+ atomic_read_unchecked(&sent[SMB2_SET_INFO_HE]),
46071+ atomic_read_unchecked(&failed[SMB2_SET_INFO_HE]));
46072 seq_printf(m, "\nOplockBreaks: %d sent %d failed",
46073- atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]),
46074- atomic_read(&failed[SMB2_OPLOCK_BREAK_HE]));
46075+ atomic_read_unchecked(&sent[SMB2_OPLOCK_BREAK_HE]),
46076+ atomic_read_unchecked(&failed[SMB2_OPLOCK_BREAK_HE]));
46077 #endif
46078 }
46079
46080diff --git a/fs/coda/cache.c b/fs/coda/cache.c
46081index 958ae0e..505c9d0 100644
46082--- a/fs/coda/cache.c
46083+++ b/fs/coda/cache.c
46084@@ -24,7 +24,7 @@
46085 #include "coda_linux.h"
46086 #include "coda_cache.h"
46087
46088-static atomic_t permission_epoch = ATOMIC_INIT(0);
46089+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
46090
46091 /* replace or extend an acl cache hit */
46092 void coda_cache_enter(struct inode *inode, int mask)
46093@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
46094 struct coda_inode_info *cii = ITOC(inode);
46095
46096 spin_lock(&cii->c_lock);
46097- cii->c_cached_epoch = atomic_read(&permission_epoch);
46098+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
46099 if (cii->c_uid != current_fsuid()) {
46100 cii->c_uid = current_fsuid();
46101 cii->c_cached_perm = mask;
46102@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
46103 {
46104 struct coda_inode_info *cii = ITOC(inode);
46105 spin_lock(&cii->c_lock);
46106- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
46107+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
46108 spin_unlock(&cii->c_lock);
46109 }
46110
46111 /* remove all acl caches */
46112 void coda_cache_clear_all(struct super_block *sb)
46113 {
46114- atomic_inc(&permission_epoch);
46115+ atomic_inc_unchecked(&permission_epoch);
46116 }
46117
46118
46119@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
46120 spin_lock(&cii->c_lock);
46121 hit = (mask & cii->c_cached_perm) == mask &&
46122 cii->c_uid == current_fsuid() &&
46123- cii->c_cached_epoch == atomic_read(&permission_epoch);
46124+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
46125 spin_unlock(&cii->c_lock);
46126
46127 return hit;
46128diff --git a/fs/compat.c b/fs/compat.c
46129index 015e1e1..5ce8e54 100644
46130--- a/fs/compat.c
46131+++ b/fs/compat.c
46132@@ -490,7 +490,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
46133
46134 set_fs(KERNEL_DS);
46135 /* The __user pointer cast is valid because of the set_fs() */
46136- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
46137+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
46138 set_fs(oldfs);
46139 /* truncating is ok because it's a user address */
46140 if (!ret)
46141@@ -548,7 +548,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
46142 goto out;
46143
46144 ret = -EINVAL;
46145- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
46146+ if (nr_segs > UIO_MAXIOV)
46147 goto out;
46148 if (nr_segs > fast_segs) {
46149 ret = -ENOMEM;
46150@@ -831,6 +831,7 @@ struct compat_old_linux_dirent {
46151
46152 struct compat_readdir_callback {
46153 struct compat_old_linux_dirent __user *dirent;
46154+ struct file * file;
46155 int result;
46156 };
46157
46158@@ -848,6 +849,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
46159 buf->result = -EOVERFLOW;
46160 return -EOVERFLOW;
46161 }
46162+
46163+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46164+ return 0;
46165+
46166 buf->result++;
46167 dirent = buf->dirent;
46168 if (!access_ok(VERIFY_WRITE, dirent,
46169@@ -878,6 +883,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
46170
46171 buf.result = 0;
46172 buf.dirent = dirent;
46173+ buf.file = f.file;
46174
46175 error = vfs_readdir(f.file, compat_fillonedir, &buf);
46176 if (buf.result)
46177@@ -897,6 +903,7 @@ struct compat_linux_dirent {
46178 struct compat_getdents_callback {
46179 struct compat_linux_dirent __user *current_dir;
46180 struct compat_linux_dirent __user *previous;
46181+ struct file * file;
46182 int count;
46183 int error;
46184 };
46185@@ -918,6 +925,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
46186 buf->error = -EOVERFLOW;
46187 return -EOVERFLOW;
46188 }
46189+
46190+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46191+ return 0;
46192+
46193 dirent = buf->previous;
46194 if (dirent) {
46195 if (__put_user(offset, &dirent->d_off))
46196@@ -963,6 +974,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
46197 buf.previous = NULL;
46198 buf.count = count;
46199 buf.error = 0;
46200+ buf.file = f.file;
46201
46202 error = vfs_readdir(f.file, compat_filldir, &buf);
46203 if (error >= 0)
46204@@ -983,6 +995,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
46205 struct compat_getdents_callback64 {
46206 struct linux_dirent64 __user *current_dir;
46207 struct linux_dirent64 __user *previous;
46208+ struct file * file;
46209 int count;
46210 int error;
46211 };
46212@@ -999,6 +1012,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
46213 buf->error = -EINVAL; /* only used if we fail.. */
46214 if (reclen > buf->count)
46215 return -EINVAL;
46216+
46217+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46218+ return 0;
46219+
46220 dirent = buf->previous;
46221
46222 if (dirent) {
46223@@ -1048,13 +1065,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
46224 buf.previous = NULL;
46225 buf.count = count;
46226 buf.error = 0;
46227+ buf.file = f.file;
46228
46229 error = vfs_readdir(f.file, compat_filldir64, &buf);
46230 if (error >= 0)
46231 error = buf.error;
46232 lastdirent = buf.previous;
46233 if (lastdirent) {
46234- typeof(lastdirent->d_off) d_off = f.file->f_pos;
46235+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = f.file->f_pos;
46236 if (__put_user_unaligned(d_off, &lastdirent->d_off))
46237 error = -EFAULT;
46238 else
46239diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
46240index a81147e..20bf2b5 100644
46241--- a/fs/compat_binfmt_elf.c
46242+++ b/fs/compat_binfmt_elf.c
46243@@ -30,11 +30,13 @@
46244 #undef elf_phdr
46245 #undef elf_shdr
46246 #undef elf_note
46247+#undef elf_dyn
46248 #undef elf_addr_t
46249 #define elfhdr elf32_hdr
46250 #define elf_phdr elf32_phdr
46251 #define elf_shdr elf32_shdr
46252 #define elf_note elf32_note
46253+#define elf_dyn Elf32_Dyn
46254 #define elf_addr_t Elf32_Addr
46255
46256 /*
46257diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
46258index 4c6285f..b7a2411 100644
46259--- a/fs/compat_ioctl.c
46260+++ b/fs/compat_ioctl.c
46261@@ -623,7 +623,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
46262 return -EFAULT;
46263 if (__get_user(udata, &ss32->iomem_base))
46264 return -EFAULT;
46265- ss.iomem_base = compat_ptr(udata);
46266+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
46267 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
46268 __get_user(ss.port_high, &ss32->port_high))
46269 return -EFAULT;
46270@@ -798,7 +798,7 @@ static int compat_ioctl_preallocate(struct file *file,
46271 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
46272 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
46273 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
46274- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
46275+ copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
46276 return -EFAULT;
46277
46278 return ioctl_preallocate(file, p);
46279@@ -1617,8 +1617,8 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
46280 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
46281 {
46282 unsigned int a, b;
46283- a = *(unsigned int *)p;
46284- b = *(unsigned int *)q;
46285+ a = *(const unsigned int *)p;
46286+ b = *(const unsigned int *)q;
46287 if (a > b)
46288 return 1;
46289 if (a < b)
46290diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
46291index 7414ae2..d98ad6d 100644
46292--- a/fs/configfs/dir.c
46293+++ b/fs/configfs/dir.c
46294@@ -1564,7 +1564,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
46295 }
46296 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
46297 struct configfs_dirent *next;
46298- const char * name;
46299+ const unsigned char * name;
46300+ char d_name[sizeof(next->s_dentry->d_iname)];
46301 int len;
46302 struct inode *inode = NULL;
46303
46304@@ -1574,7 +1575,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
46305 continue;
46306
46307 name = configfs_get_name(next);
46308- len = strlen(name);
46309+ if (next->s_dentry && name == next->s_dentry->d_iname) {
46310+ len = next->s_dentry->d_name.len;
46311+ memcpy(d_name, name, len);
46312+ name = d_name;
46313+ } else
46314+ len = strlen(name);
46315
46316 /*
46317 * We'll have a dentry and an inode for
46318diff --git a/fs/coredump.c b/fs/coredump.c
46319index ce47379..68c8e43 100644
46320--- a/fs/coredump.c
46321+++ b/fs/coredump.c
46322@@ -52,7 +52,7 @@ struct core_name {
46323 char *corename;
46324 int used, size;
46325 };
46326-static atomic_t call_count = ATOMIC_INIT(1);
46327+static atomic_unchecked_t call_count = ATOMIC_INIT(1);
46328
46329 /* The maximal length of core_pattern is also specified in sysctl.c */
46330
46331@@ -60,7 +60,7 @@ static int expand_corename(struct core_name *cn)
46332 {
46333 char *old_corename = cn->corename;
46334
46335- cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
46336+ cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
46337 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
46338
46339 if (!cn->corename) {
46340@@ -157,7 +157,7 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm)
46341 int pid_in_pattern = 0;
46342 int err = 0;
46343
46344- cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
46345+ cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
46346 cn->corename = kmalloc(cn->size, GFP_KERNEL);
46347 cn->used = 0;
46348
46349@@ -414,17 +414,17 @@ static void wait_for_dump_helpers(struct file *file)
46350 pipe = file->f_path.dentry->d_inode->i_pipe;
46351
46352 pipe_lock(pipe);
46353- pipe->readers++;
46354- pipe->writers--;
46355+ atomic_inc(&pipe->readers);
46356+ atomic_dec(&pipe->writers);
46357
46358- while ((pipe->readers > 1) && (!signal_pending(current))) {
46359+ while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
46360 wake_up_interruptible_sync(&pipe->wait);
46361 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
46362 pipe_wait(pipe);
46363 }
46364
46365- pipe->readers--;
46366- pipe->writers++;
46367+ atomic_dec(&pipe->readers);
46368+ atomic_inc(&pipe->writers);
46369 pipe_unlock(pipe);
46370
46371 }
46372@@ -471,7 +471,8 @@ void do_coredump(siginfo_t *siginfo, struct pt_regs *regs)
46373 int ispipe;
46374 struct files_struct *displaced;
46375 bool need_nonrelative = false;
46376- static atomic_t core_dump_count = ATOMIC_INIT(0);
46377+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
46378+ long signr = siginfo->si_signo;
46379 struct coredump_params cprm = {
46380 .siginfo = siginfo,
46381 .regs = regs,
46382@@ -484,7 +485,10 @@ void do_coredump(siginfo_t *siginfo, struct pt_regs *regs)
46383 .mm_flags = mm->flags,
46384 };
46385
46386- audit_core_dumps(siginfo->si_signo);
46387+ audit_core_dumps(signr);
46388+
46389+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
46390+ gr_handle_brute_attach(cprm.mm_flags);
46391
46392 binfmt = mm->binfmt;
46393 if (!binfmt || !binfmt->core_dump)
46394@@ -508,7 +512,7 @@ void do_coredump(siginfo_t *siginfo, struct pt_regs *regs)
46395 need_nonrelative = true;
46396 }
46397
46398- retval = coredump_wait(siginfo->si_signo, &core_state);
46399+ retval = coredump_wait(signr, &core_state);
46400 if (retval < 0)
46401 goto fail_creds;
46402
46403@@ -556,7 +560,7 @@ void do_coredump(siginfo_t *siginfo, struct pt_regs *regs)
46404 }
46405 cprm.limit = RLIM_INFINITY;
46406
46407- dump_count = atomic_inc_return(&core_dump_count);
46408+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
46409 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
46410 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
46411 task_tgid_vnr(current), current->comm);
46412@@ -583,6 +587,8 @@ void do_coredump(siginfo_t *siginfo, struct pt_regs *regs)
46413 } else {
46414 struct inode *inode;
46415
46416+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
46417+
46418 if (cprm.limit < binfmt->min_coredump)
46419 goto fail_unlock;
46420
46421@@ -640,7 +646,7 @@ close_fail:
46422 filp_close(cprm.file, NULL);
46423 fail_dropcount:
46424 if (ispipe)
46425- atomic_dec(&core_dump_count);
46426+ atomic_dec_unchecked(&core_dump_count);
46427 fail_unlock:
46428 kfree(cn.corename);
46429 fail_corename:
46430@@ -659,7 +665,7 @@ fail:
46431 */
46432 int dump_write(struct file *file, const void *addr, int nr)
46433 {
46434- return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
46435+ return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
46436 }
46437 EXPORT_SYMBOL(dump_write);
46438
46439diff --git a/fs/dcache.c b/fs/dcache.c
46440index 0d0adb6..f4646e9 100644
46441--- a/fs/dcache.c
46442+++ b/fs/dcache.c
46443@@ -3164,7 +3164,7 @@ void __init vfs_caches_init(unsigned long mempages)
46444 mempages -= reserve;
46445
46446 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
46447- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
46448+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
46449
46450 dcache_init();
46451 inode_init();
46452diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
46453index b607d92..41fda09 100644
46454--- a/fs/debugfs/inode.c
46455+++ b/fs/debugfs/inode.c
46456@@ -416,7 +416,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
46457 */
46458 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
46459 {
46460+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
46461+ return __create_file(name, S_IFDIR | S_IRWXU,
46462+#else
46463 return __create_file(name, S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
46464+#endif
46465 parent, NULL, NULL);
46466 }
46467 EXPORT_SYMBOL_GPL(debugfs_create_dir);
46468diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
46469index cc7709e..7e7211f 100644
46470--- a/fs/ecryptfs/inode.c
46471+++ b/fs/ecryptfs/inode.c
46472@@ -674,7 +674,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
46473 old_fs = get_fs();
46474 set_fs(get_ds());
46475 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
46476- (char __user *)lower_buf,
46477+ (char __force_user *)lower_buf,
46478 PATH_MAX);
46479 set_fs(old_fs);
46480 if (rc < 0)
46481@@ -706,7 +706,7 @@ out:
46482 static void
46483 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
46484 {
46485- char *buf = nd_get_link(nd);
46486+ const char *buf = nd_get_link(nd);
46487 if (!IS_ERR(buf)) {
46488 /* Free the char* */
46489 kfree(buf);
46490diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
46491index 412e6ed..4292d22 100644
46492--- a/fs/ecryptfs/miscdev.c
46493+++ b/fs/ecryptfs/miscdev.c
46494@@ -315,7 +315,7 @@ check_list:
46495 goto out_unlock_msg_ctx;
46496 i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
46497 if (msg_ctx->msg) {
46498- if (copy_to_user(&buf[i], packet_length, packet_length_size))
46499+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
46500 goto out_unlock_msg_ctx;
46501 i += packet_length_size;
46502 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
46503diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
46504index b2a34a1..162fa69 100644
46505--- a/fs/ecryptfs/read_write.c
46506+++ b/fs/ecryptfs/read_write.c
46507@@ -48,7 +48,7 @@ int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data,
46508 return -EIO;
46509 fs_save = get_fs();
46510 set_fs(get_ds());
46511- rc = vfs_write(lower_file, data, size, &offset);
46512+ rc = vfs_write(lower_file, (const char __force_user *)data, size, &offset);
46513 set_fs(fs_save);
46514 mark_inode_dirty_sync(ecryptfs_inode);
46515 return rc;
46516@@ -244,7 +244,7 @@ int ecryptfs_read_lower(char *data, loff_t offset, size_t size,
46517 return -EIO;
46518 fs_save = get_fs();
46519 set_fs(get_ds());
46520- rc = vfs_read(lower_file, data, size, &offset);
46521+ rc = vfs_read(lower_file, (char __force_user *)data, size, &offset);
46522 set_fs(fs_save);
46523 return rc;
46524 }
46525diff --git a/fs/exec.c b/fs/exec.c
46526index c6e6de4..45e71ad 100644
46527--- a/fs/exec.c
46528+++ b/fs/exec.c
46529@@ -55,6 +55,17 @@
46530 #include <linux/pipe_fs_i.h>
46531 #include <linux/oom.h>
46532 #include <linux/compat.h>
46533+#include <linux/random.h>
46534+#include <linux/seq_file.h>
46535+#include <linux/coredump.h>
46536+#include <linux/mman.h>
46537+
46538+#ifdef CONFIG_PAX_REFCOUNT
46539+#include <linux/kallsyms.h>
46540+#include <linux/kdebug.h>
46541+#endif
46542+
46543+#include <trace/events/fs.h>
46544
46545 #include <asm/uaccess.h>
46546 #include <asm/mmu_context.h>
46547@@ -66,6 +77,18 @@
46548
46549 #include <trace/events/sched.h>
46550
46551+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
46552+void __weak pax_set_initial_flags(struct linux_binprm *bprm)
46553+{
46554+ pr_warn_once("PAX: PAX_HAVE_ACL_FLAGS was enabled without providing the pax_set_initial_flags callback, this is probably not what you wanted.\n");
46555+}
46556+#endif
46557+
46558+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
46559+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
46560+EXPORT_SYMBOL(pax_set_initial_flags_func);
46561+#endif
46562+
46563 int suid_dumpable = 0;
46564
46565 static LIST_HEAD(formats);
46566@@ -180,18 +203,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
46567 int write)
46568 {
46569 struct page *page;
46570- int ret;
46571
46572-#ifdef CONFIG_STACK_GROWSUP
46573- if (write) {
46574- ret = expand_downwards(bprm->vma, pos);
46575- if (ret < 0)
46576- return NULL;
46577- }
46578-#endif
46579- ret = get_user_pages(current, bprm->mm, pos,
46580- 1, write, 1, &page, NULL);
46581- if (ret <= 0)
46582+ if (0 > expand_downwards(bprm->vma, pos))
46583+ return NULL;
46584+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
46585 return NULL;
46586
46587 if (write) {
46588@@ -207,6 +222,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
46589 if (size <= ARG_MAX)
46590 return page;
46591
46592+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46593+ // only allow 512KB for argv+env on suid/sgid binaries
46594+ // to prevent easy ASLR exhaustion
46595+ if (((bprm->cred->euid != current_euid()) ||
46596+ (bprm->cred->egid != current_egid())) &&
46597+ (size > (512 * 1024))) {
46598+ put_page(page);
46599+ return NULL;
46600+ }
46601+#endif
46602+
46603 /*
46604 * Limit to 1/4-th the stack size for the argv+env strings.
46605 * This ensures that:
46606@@ -266,6 +292,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
46607 vma->vm_end = STACK_TOP_MAX;
46608 vma->vm_start = vma->vm_end - PAGE_SIZE;
46609 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
46610+
46611+#ifdef CONFIG_PAX_SEGMEXEC
46612+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
46613+#endif
46614+
46615 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
46616 INIT_LIST_HEAD(&vma->anon_vma_chain);
46617
46618@@ -276,6 +307,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
46619 mm->stack_vm = mm->total_vm = 1;
46620 up_write(&mm->mmap_sem);
46621 bprm->p = vma->vm_end - sizeof(void *);
46622+
46623+#ifdef CONFIG_PAX_RANDUSTACK
46624+ if (randomize_va_space)
46625+ bprm->p ^= random32() & ~PAGE_MASK;
46626+#endif
46627+
46628 return 0;
46629 err:
46630 up_write(&mm->mmap_sem);
46631@@ -384,19 +421,7 @@ err:
46632 return err;
46633 }
46634
46635-struct user_arg_ptr {
46636-#ifdef CONFIG_COMPAT
46637- bool is_compat;
46638-#endif
46639- union {
46640- const char __user *const __user *native;
46641-#ifdef CONFIG_COMPAT
46642- const compat_uptr_t __user *compat;
46643-#endif
46644- } ptr;
46645-};
46646-
46647-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
46648+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
46649 {
46650 const char __user *native;
46651
46652@@ -405,14 +430,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
46653 compat_uptr_t compat;
46654
46655 if (get_user(compat, argv.ptr.compat + nr))
46656- return ERR_PTR(-EFAULT);
46657+ return (const char __force_user *)ERR_PTR(-EFAULT);
46658
46659 return compat_ptr(compat);
46660 }
46661 #endif
46662
46663 if (get_user(native, argv.ptr.native + nr))
46664- return ERR_PTR(-EFAULT);
46665+ return (const char __force_user *)ERR_PTR(-EFAULT);
46666
46667 return native;
46668 }
46669@@ -431,11 +456,12 @@ static int count(struct user_arg_ptr argv, int max)
46670 if (!p)
46671 break;
46672
46673- if (IS_ERR(p))
46674+ if (IS_ERR((const char __force_kernel *)p))
46675 return -EFAULT;
46676
46677- if (i++ >= max)
46678+ if (i >= max)
46679 return -E2BIG;
46680+ ++i;
46681
46682 if (fatal_signal_pending(current))
46683 return -ERESTARTNOHAND;
46684@@ -465,7 +491,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
46685
46686 ret = -EFAULT;
46687 str = get_user_arg_ptr(argv, argc);
46688- if (IS_ERR(str))
46689+ if (IS_ERR((const char __force_kernel *)str))
46690 goto out;
46691
46692 len = strnlen_user(str, MAX_ARG_STRLEN);
46693@@ -547,7 +573,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
46694 int r;
46695 mm_segment_t oldfs = get_fs();
46696 struct user_arg_ptr argv = {
46697- .ptr.native = (const char __user *const __user *)__argv,
46698+ .ptr.native = (const char __force_user *const __force_user *)__argv,
46699 };
46700
46701 set_fs(KERNEL_DS);
46702@@ -582,7 +608,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
46703 unsigned long new_end = old_end - shift;
46704 struct mmu_gather tlb;
46705
46706- BUG_ON(new_start > new_end);
46707+ if (new_start >= new_end || new_start < mmap_min_addr)
46708+ return -ENOMEM;
46709
46710 /*
46711 * ensure there are no vmas between where we want to go
46712@@ -591,6 +618,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
46713 if (vma != find_vma(mm, new_start))
46714 return -EFAULT;
46715
46716+#ifdef CONFIG_PAX_SEGMEXEC
46717+ BUG_ON(pax_find_mirror_vma(vma));
46718+#endif
46719+
46720 /*
46721 * cover the whole range: [new_start, old_end)
46722 */
46723@@ -671,10 +702,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
46724 stack_top = arch_align_stack(stack_top);
46725 stack_top = PAGE_ALIGN(stack_top);
46726
46727- if (unlikely(stack_top < mmap_min_addr) ||
46728- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
46729- return -ENOMEM;
46730-
46731 stack_shift = vma->vm_end - stack_top;
46732
46733 bprm->p -= stack_shift;
46734@@ -686,8 +713,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
46735 bprm->exec -= stack_shift;
46736
46737 down_write(&mm->mmap_sem);
46738+
46739+ /* Move stack pages down in memory. */
46740+ if (stack_shift) {
46741+ ret = shift_arg_pages(vma, stack_shift);
46742+ if (ret)
46743+ goto out_unlock;
46744+ }
46745+
46746 vm_flags = VM_STACK_FLAGS;
46747
46748+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
46749+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46750+ vm_flags &= ~VM_EXEC;
46751+
46752+#ifdef CONFIG_PAX_MPROTECT
46753+ if (mm->pax_flags & MF_PAX_MPROTECT)
46754+ vm_flags &= ~VM_MAYEXEC;
46755+#endif
46756+
46757+ }
46758+#endif
46759+
46760 /*
46761 * Adjust stack execute permissions; explicitly enable for
46762 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
46763@@ -706,13 +753,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
46764 goto out_unlock;
46765 BUG_ON(prev != vma);
46766
46767- /* Move stack pages down in memory. */
46768- if (stack_shift) {
46769- ret = shift_arg_pages(vma, stack_shift);
46770- if (ret)
46771- goto out_unlock;
46772- }
46773-
46774 /* mprotect_fixup is overkill to remove the temporary stack flags */
46775 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
46776
46777@@ -736,6 +776,27 @@ int setup_arg_pages(struct linux_binprm *bprm,
46778 #endif
46779 current->mm->start_stack = bprm->p;
46780 ret = expand_stack(vma, stack_base);
46781+
46782+#if !defined(CONFIG_STACK_GROWSUP) && defined(CONFIG_PAX_ASLR)
46783+ if (!ret && (mm->pax_flags & MF_PAX_RANDMMAP) && STACK_TOP <= 0xFFFFFFFFU && STACK_TOP > vma->vm_end) {
46784+ unsigned long size, flags, vm_flags;
46785+
46786+ size = STACK_TOP - vma->vm_end;
46787+ flags = MAP_FIXED | MAP_PRIVATE;
46788+ vm_flags = VM_NONE | VM_DONTEXPAND | VM_DONTDUMP;
46789+
46790+ ret = vma->vm_end != mmap_region(NULL, vma->vm_end, size, flags, vm_flags, 0);
46791+
46792+#ifdef CONFIG_X86
46793+ if (!ret) {
46794+ size = mmap_min_addr + ((mm->delta_mmap ^ mm->delta_stack) & (0xFFUL << PAGE_SHIFT));
46795+ ret = 0 != mmap_region(NULL, 0, size, flags, vm_flags, 0);
46796+ }
46797+#endif
46798+
46799+ }
46800+#endif
46801+
46802 if (ret)
46803 ret = -EFAULT;
46804
46805@@ -771,6 +832,8 @@ struct file *open_exec(const char *name)
46806
46807 fsnotify_open(file);
46808
46809+ trace_open_exec(name);
46810+
46811 err = deny_write_access(file);
46812 if (err)
46813 goto exit;
46814@@ -794,7 +857,7 @@ int kernel_read(struct file *file, loff_t offset,
46815 old_fs = get_fs();
46816 set_fs(get_ds());
46817 /* The cast to a user pointer is valid due to the set_fs() */
46818- result = vfs_read(file, (void __user *)addr, count, &pos);
46819+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
46820 set_fs(old_fs);
46821 return result;
46822 }
46823@@ -1246,7 +1309,7 @@ static int check_unsafe_exec(struct linux_binprm *bprm)
46824 }
46825 rcu_read_unlock();
46826
46827- if (p->fs->users > n_fs) {
46828+ if (atomic_read(&p->fs->users) > n_fs) {
46829 bprm->unsafe |= LSM_UNSAFE_SHARE;
46830 } else {
46831 res = -EAGAIN;
46832@@ -1449,6 +1512,28 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
46833
46834 EXPORT_SYMBOL(search_binary_handler);
46835
46836+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46837+static DEFINE_PER_CPU(u64, exec_counter);
46838+static int __init init_exec_counters(void)
46839+{
46840+ unsigned int cpu;
46841+
46842+ for_each_possible_cpu(cpu) {
46843+ per_cpu(exec_counter, cpu) = (u64)cpu;
46844+ }
46845+
46846+ return 0;
46847+}
46848+early_initcall(init_exec_counters);
46849+static inline void increment_exec_counter(void)
46850+{
46851+ BUILD_BUG_ON(NR_CPUS > (1 << 16));
46852+ current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
46853+}
46854+#else
46855+static inline void increment_exec_counter(void) {}
46856+#endif
46857+
46858 /*
46859 * sys_execve() executes a new program.
46860 */
46861@@ -1457,6 +1542,11 @@ static int do_execve_common(const char *filename,
46862 struct user_arg_ptr envp,
46863 struct pt_regs *regs)
46864 {
46865+#ifdef CONFIG_GRKERNSEC
46866+ struct file *old_exec_file;
46867+ struct acl_subject_label *old_acl;
46868+ struct rlimit old_rlim[RLIM_NLIMITS];
46869+#endif
46870 struct linux_binprm *bprm;
46871 struct file *file;
46872 struct files_struct *displaced;
46873@@ -1464,6 +1554,8 @@ static int do_execve_common(const char *filename,
46874 int retval;
46875 const struct cred *cred = current_cred();
46876
46877+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
46878+
46879 /*
46880 * We move the actual failure in case of RLIMIT_NPROC excess from
46881 * set*uid() to execve() because too many poorly written programs
46882@@ -1504,12 +1596,27 @@ static int do_execve_common(const char *filename,
46883 if (IS_ERR(file))
46884 goto out_unmark;
46885
46886+ if (gr_ptrace_readexec(file, bprm->unsafe)) {
46887+ retval = -EPERM;
46888+ goto out_file;
46889+ }
46890+
46891 sched_exec();
46892
46893 bprm->file = file;
46894 bprm->filename = filename;
46895 bprm->interp = filename;
46896
46897+ if (gr_process_user_ban()) {
46898+ retval = -EPERM;
46899+ goto out_file;
46900+ }
46901+
46902+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
46903+ retval = -EACCES;
46904+ goto out_file;
46905+ }
46906+
46907 retval = bprm_mm_init(bprm);
46908 if (retval)
46909 goto out_file;
46910@@ -1526,24 +1633,65 @@ static int do_execve_common(const char *filename,
46911 if (retval < 0)
46912 goto out;
46913
46914+#ifdef CONFIG_GRKERNSEC
46915+ old_acl = current->acl;
46916+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
46917+ old_exec_file = current->exec_file;
46918+ get_file(file);
46919+ current->exec_file = file;
46920+#endif
46921+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46922+ /* limit suid stack to 8MB
46923+ we saved the old limits above and will restore them if this exec fails
46924+ */
46925+ if (((bprm->cred->euid != current_euid()) || (bprm->cred->egid != current_egid())) &&
46926+ (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
46927+ current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
46928+#endif
46929+
46930+ if (!gr_tpe_allow(file)) {
46931+ retval = -EACCES;
46932+ goto out_fail;
46933+ }
46934+
46935+ if (gr_check_crash_exec(file)) {
46936+ retval = -EACCES;
46937+ goto out_fail;
46938+ }
46939+
46940+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
46941+ bprm->unsafe);
46942+ if (retval < 0)
46943+ goto out_fail;
46944+
46945 retval = copy_strings_kernel(1, &bprm->filename, bprm);
46946 if (retval < 0)
46947- goto out;
46948+ goto out_fail;
46949
46950 bprm->exec = bprm->p;
46951 retval = copy_strings(bprm->envc, envp, bprm);
46952 if (retval < 0)
46953- goto out;
46954+ goto out_fail;
46955
46956 retval = copy_strings(bprm->argc, argv, bprm);
46957 if (retval < 0)
46958- goto out;
46959+ goto out_fail;
46960+
46961+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
46962+
46963+ gr_handle_exec_args(bprm, argv);
46964
46965 retval = search_binary_handler(bprm,regs);
46966 if (retval < 0)
46967- goto out;
46968+ goto out_fail;
46969+#ifdef CONFIG_GRKERNSEC
46970+ if (old_exec_file)
46971+ fput(old_exec_file);
46972+#endif
46973
46974 /* execve succeeded */
46975+
46976+ increment_exec_counter();
46977 current->fs->in_exec = 0;
46978 current->in_execve = 0;
46979 acct_update_integrals(current);
46980@@ -1552,6 +1700,14 @@ static int do_execve_common(const char *filename,
46981 put_files_struct(displaced);
46982 return retval;
46983
46984+out_fail:
46985+#ifdef CONFIG_GRKERNSEC
46986+ current->acl = old_acl;
46987+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
46988+ fput(current->exec_file);
46989+ current->exec_file = old_exec_file;
46990+#endif
46991+
46992 out:
46993 if (bprm->mm) {
46994 acct_arg_size(bprm, 0);
46995@@ -1727,3 +1883,253 @@ int kernel_execve(const char *filename,
46996 ret_from_kernel_execve(p);
46997 }
46998 #endif
46999+
47000+int pax_check_flags(unsigned long *flags)
47001+{
47002+ int retval = 0;
47003+
47004+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
47005+ if (*flags & MF_PAX_SEGMEXEC)
47006+ {
47007+ *flags &= ~MF_PAX_SEGMEXEC;
47008+ retval = -EINVAL;
47009+ }
47010+#endif
47011+
47012+ if ((*flags & MF_PAX_PAGEEXEC)
47013+
47014+#ifdef CONFIG_PAX_PAGEEXEC
47015+ && (*flags & MF_PAX_SEGMEXEC)
47016+#endif
47017+
47018+ )
47019+ {
47020+ *flags &= ~MF_PAX_PAGEEXEC;
47021+ retval = -EINVAL;
47022+ }
47023+
47024+ if ((*flags & MF_PAX_MPROTECT)
47025+
47026+#ifdef CONFIG_PAX_MPROTECT
47027+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
47028+#endif
47029+
47030+ )
47031+ {
47032+ *flags &= ~MF_PAX_MPROTECT;
47033+ retval = -EINVAL;
47034+ }
47035+
47036+ if ((*flags & MF_PAX_EMUTRAMP)
47037+
47038+#ifdef CONFIG_PAX_EMUTRAMP
47039+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
47040+#endif
47041+
47042+ )
47043+ {
47044+ *flags &= ~MF_PAX_EMUTRAMP;
47045+ retval = -EINVAL;
47046+ }
47047+
47048+ return retval;
47049+}
47050+
47051+EXPORT_SYMBOL(pax_check_flags);
47052+
47053+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
47054+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
47055+{
47056+ struct task_struct *tsk = current;
47057+ struct mm_struct *mm = current->mm;
47058+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
47059+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
47060+ char *path_exec = NULL;
47061+ char *path_fault = NULL;
47062+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
47063+ siginfo_t info = { };
47064+
47065+ if (buffer_exec && buffer_fault) {
47066+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
47067+
47068+ down_read(&mm->mmap_sem);
47069+ vma = mm->mmap;
47070+ while (vma && (!vma_exec || !vma_fault)) {
47071+ if (vma->vm_file && mm->exe_file == vma->vm_file && (vma->vm_flags & VM_EXEC))
47072+ vma_exec = vma;
47073+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
47074+ vma_fault = vma;
47075+ vma = vma->vm_next;
47076+ }
47077+ if (vma_exec) {
47078+ path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
47079+ if (IS_ERR(path_exec))
47080+ path_exec = "<path too long>";
47081+ else {
47082+ path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
47083+ if (path_exec) {
47084+ *path_exec = 0;
47085+ path_exec = buffer_exec;
47086+ } else
47087+ path_exec = "<path too long>";
47088+ }
47089+ }
47090+ if (vma_fault) {
47091+ start = vma_fault->vm_start;
47092+ end = vma_fault->vm_end;
47093+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
47094+ if (vma_fault->vm_file) {
47095+ path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
47096+ if (IS_ERR(path_fault))
47097+ path_fault = "<path too long>";
47098+ else {
47099+ path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
47100+ if (path_fault) {
47101+ *path_fault = 0;
47102+ path_fault = buffer_fault;
47103+ } else
47104+ path_fault = "<path too long>";
47105+ }
47106+ } else
47107+ path_fault = "<anonymous mapping>";
47108+ }
47109+ up_read(&mm->mmap_sem);
47110+ }
47111+ if (tsk->signal->curr_ip)
47112+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
47113+ else
47114+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
47115+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
47116+ from_kuid(&init_user_ns, task_uid(tsk)), from_kuid(&init_user_ns, task_euid(tsk)), pc, sp);
47117+ free_page((unsigned long)buffer_exec);
47118+ free_page((unsigned long)buffer_fault);
47119+ pax_report_insns(regs, pc, sp);
47120+ info.si_signo = SIGKILL;
47121+ info.si_errno = 0;
47122+ info.si_code = SI_KERNEL;
47123+ info.si_pid = 0;
47124+ info.si_uid = 0;
47125+ do_coredump(&info, regs);
47126+}
47127+#endif
47128+
47129+#ifdef CONFIG_PAX_REFCOUNT
47130+void pax_report_refcount_overflow(struct pt_regs *regs)
47131+{
47132+ if (current->signal->curr_ip)
47133+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
47134+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
47135+ from_kuid(&init_user_ns, current_uid()), from_kuid(&init_user_ns, current_euid()));
47136+ else
47137+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n", current->comm, task_pid_nr(current),
47138+ from_kuid(&init_user_ns, current_uid()), from_kuid(&init_user_ns, current_euid()));
47139+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
47140+ show_regs(regs);
47141+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
47142+}
47143+#endif
47144+
47145+#ifdef CONFIG_PAX_USERCOPY
47146+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
47147+static noinline int check_stack_object(const void *obj, unsigned long len)
47148+{
47149+ const void * const stack = task_stack_page(current);
47150+ const void * const stackend = stack + THREAD_SIZE;
47151+
47152+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
47153+ const void *frame = NULL;
47154+ const void *oldframe;
47155+#endif
47156+
47157+ if (obj + len < obj)
47158+ return -1;
47159+
47160+ if (obj + len <= stack || stackend <= obj)
47161+ return 0;
47162+
47163+ if (obj < stack || stackend < obj + len)
47164+ return -1;
47165+
47166+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
47167+ oldframe = __builtin_frame_address(1);
47168+ if (oldframe)
47169+ frame = __builtin_frame_address(2);
47170+ /*
47171+ low ----------------------------------------------> high
47172+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
47173+ ^----------------^
47174+ allow copies only within here
47175+ */
47176+ while (stack <= frame && frame < stackend) {
47177+ /* if obj + len extends past the last frame, this
47178+ check won't pass and the next frame will be 0,
47179+ causing us to bail out and correctly report
47180+ the copy as invalid
47181+ */
47182+ if (obj + len <= frame)
47183+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
47184+ oldframe = frame;
47185+ frame = *(const void * const *)frame;
47186+ }
47187+ return -1;
47188+#else
47189+ return 1;
47190+#endif
47191+}
47192+
47193+static __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
47194+{
47195+ if (current->signal->curr_ip)
47196+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
47197+ &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
47198+ else
47199+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
47200+ to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
47201+ dump_stack();
47202+ gr_handle_kernel_exploit();
47203+ do_group_exit(SIGKILL);
47204+}
47205+#endif
47206+
47207+void __check_object_size(const void *ptr, unsigned long n, bool to)
47208+{
47209+
47210+#ifdef CONFIG_PAX_USERCOPY
47211+ const char *type;
47212+
47213+ if (!n)
47214+ return;
47215+
47216+ type = check_heap_object(ptr, n);
47217+ if (!type) {
47218+ if (check_stack_object(ptr, n) != -1)
47219+ return;
47220+ type = "<process stack>";
47221+ }
47222+
47223+ pax_report_usercopy(ptr, n, to, type);
47224+#endif
47225+
47226+}
47227+EXPORT_SYMBOL(__check_object_size);
47228+
47229+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
47230+void pax_track_stack(void)
47231+{
47232+ unsigned long sp = (unsigned long)&sp;
47233+ if (sp < current_thread_info()->lowest_stack &&
47234+ sp > (unsigned long)task_stack_page(current))
47235+ current_thread_info()->lowest_stack = sp;
47236+}
47237+EXPORT_SYMBOL(pax_track_stack);
47238+#endif
47239+
47240+#ifdef CONFIG_PAX_SIZE_OVERFLOW
47241+void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name)
47242+{
47243+ printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u %s", func, file, line, ssa_name);
47244+ dump_stack();
47245+ do_group_exit(SIGKILL);
47246+}
47247+EXPORT_SYMBOL(report_size_overflow);
47248+#endif
47249diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
47250index 2616d0e..2ffdec9 100644
47251--- a/fs/ext2/balloc.c
47252+++ b/fs/ext2/balloc.c
47253@@ -1190,10 +1190,10 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
47254
47255 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
47256 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
47257- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
47258+ if (free_blocks < root_blocks + 1 &&
47259 !uid_eq(sbi->s_resuid, current_fsuid()) &&
47260 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
47261- !in_group_p (sbi->s_resgid))) {
47262+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
47263 return 0;
47264 }
47265 return 1;
47266diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
47267index 22548f5..41521d8 100644
47268--- a/fs/ext3/balloc.c
47269+++ b/fs/ext3/balloc.c
47270@@ -1438,10 +1438,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
47271
47272 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
47273 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
47274- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
47275+ if (free_blocks < root_blocks + 1 &&
47276 !use_reservation && !uid_eq(sbi->s_resuid, current_fsuid()) &&
47277 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
47278- !in_group_p (sbi->s_resgid))) {
47279+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
47280 return 0;
47281 }
47282 return 1;
47283diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
47284index cf18217..8f6b9c3 100644
47285--- a/fs/ext4/balloc.c
47286+++ b/fs/ext4/balloc.c
47287@@ -498,8 +498,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
47288 /* Hm, nope. Are (enough) root reserved clusters available? */
47289 if (uid_eq(sbi->s_resuid, current_fsuid()) ||
47290 (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) ||
47291- capable(CAP_SYS_RESOURCE) ||
47292- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
47293+ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
47294+ capable_nolog(CAP_SYS_RESOURCE)) {
47295
47296 if (free_clusters >= (nclusters + dirty_clusters))
47297 return 1;
47298diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
47299index 3c20de1..6ff2460 100644
47300--- a/fs/ext4/ext4.h
47301+++ b/fs/ext4/ext4.h
47302@@ -1247,19 +1247,19 @@ struct ext4_sb_info {
47303 unsigned long s_mb_last_start;
47304
47305 /* stats for buddy allocator */
47306- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
47307- atomic_t s_bal_success; /* we found long enough chunks */
47308- atomic_t s_bal_allocated; /* in blocks */
47309- atomic_t s_bal_ex_scanned; /* total extents scanned */
47310- atomic_t s_bal_goals; /* goal hits */
47311- atomic_t s_bal_breaks; /* too long searches */
47312- atomic_t s_bal_2orders; /* 2^order hits */
47313+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
47314+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
47315+ atomic_unchecked_t s_bal_allocated; /* in blocks */
47316+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
47317+ atomic_unchecked_t s_bal_goals; /* goal hits */
47318+ atomic_unchecked_t s_bal_breaks; /* too long searches */
47319+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
47320 spinlock_t s_bal_lock;
47321 unsigned long s_mb_buddies_generated;
47322 unsigned long long s_mb_generation_time;
47323- atomic_t s_mb_lost_chunks;
47324- atomic_t s_mb_preallocated;
47325- atomic_t s_mb_discarded;
47326+ atomic_unchecked_t s_mb_lost_chunks;
47327+ atomic_unchecked_t s_mb_preallocated;
47328+ atomic_unchecked_t s_mb_discarded;
47329 atomic_t s_lock_busy;
47330
47331 /* locality groups */
47332diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
47333index 526e553..3f2de85 100644
47334--- a/fs/ext4/mballoc.c
47335+++ b/fs/ext4/mballoc.c
47336@@ -1747,7 +1747,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
47337 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
47338
47339 if (EXT4_SB(sb)->s_mb_stats)
47340- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
47341+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
47342
47343 break;
47344 }
47345@@ -2044,7 +2044,7 @@ repeat:
47346 ac->ac_status = AC_STATUS_CONTINUE;
47347 ac->ac_flags |= EXT4_MB_HINT_FIRST;
47348 cr = 3;
47349- atomic_inc(&sbi->s_mb_lost_chunks);
47350+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
47351 goto repeat;
47352 }
47353 }
47354@@ -2552,25 +2552,25 @@ int ext4_mb_release(struct super_block *sb)
47355 if (sbi->s_mb_stats) {
47356 ext4_msg(sb, KERN_INFO,
47357 "mballoc: %u blocks %u reqs (%u success)",
47358- atomic_read(&sbi->s_bal_allocated),
47359- atomic_read(&sbi->s_bal_reqs),
47360- atomic_read(&sbi->s_bal_success));
47361+ atomic_read_unchecked(&sbi->s_bal_allocated),
47362+ atomic_read_unchecked(&sbi->s_bal_reqs),
47363+ atomic_read_unchecked(&sbi->s_bal_success));
47364 ext4_msg(sb, KERN_INFO,
47365 "mballoc: %u extents scanned, %u goal hits, "
47366 "%u 2^N hits, %u breaks, %u lost",
47367- atomic_read(&sbi->s_bal_ex_scanned),
47368- atomic_read(&sbi->s_bal_goals),
47369- atomic_read(&sbi->s_bal_2orders),
47370- atomic_read(&sbi->s_bal_breaks),
47371- atomic_read(&sbi->s_mb_lost_chunks));
47372+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
47373+ atomic_read_unchecked(&sbi->s_bal_goals),
47374+ atomic_read_unchecked(&sbi->s_bal_2orders),
47375+ atomic_read_unchecked(&sbi->s_bal_breaks),
47376+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
47377 ext4_msg(sb, KERN_INFO,
47378 "mballoc: %lu generated and it took %Lu",
47379 sbi->s_mb_buddies_generated,
47380 sbi->s_mb_generation_time);
47381 ext4_msg(sb, KERN_INFO,
47382 "mballoc: %u preallocated, %u discarded",
47383- atomic_read(&sbi->s_mb_preallocated),
47384- atomic_read(&sbi->s_mb_discarded));
47385+ atomic_read_unchecked(&sbi->s_mb_preallocated),
47386+ atomic_read_unchecked(&sbi->s_mb_discarded));
47387 }
47388
47389 free_percpu(sbi->s_locality_groups);
47390@@ -3052,16 +3052,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
47391 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
47392
47393 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
47394- atomic_inc(&sbi->s_bal_reqs);
47395- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
47396+ atomic_inc_unchecked(&sbi->s_bal_reqs);
47397+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
47398 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
47399- atomic_inc(&sbi->s_bal_success);
47400- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
47401+ atomic_inc_unchecked(&sbi->s_bal_success);
47402+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
47403 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
47404 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
47405- atomic_inc(&sbi->s_bal_goals);
47406+ atomic_inc_unchecked(&sbi->s_bal_goals);
47407 if (ac->ac_found > sbi->s_mb_max_to_scan)
47408- atomic_inc(&sbi->s_bal_breaks);
47409+ atomic_inc_unchecked(&sbi->s_bal_breaks);
47410 }
47411
47412 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
47413@@ -3461,7 +3461,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
47414 trace_ext4_mb_new_inode_pa(ac, pa);
47415
47416 ext4_mb_use_inode_pa(ac, pa);
47417- atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
47418+ atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
47419
47420 ei = EXT4_I(ac->ac_inode);
47421 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
47422@@ -3521,7 +3521,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
47423 trace_ext4_mb_new_group_pa(ac, pa);
47424
47425 ext4_mb_use_group_pa(ac, pa);
47426- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
47427+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
47428
47429 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
47430 lg = ac->ac_lg;
47431@@ -3610,7 +3610,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
47432 * from the bitmap and continue.
47433 */
47434 }
47435- atomic_add(free, &sbi->s_mb_discarded);
47436+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
47437
47438 return err;
47439 }
47440@@ -3628,7 +3628,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
47441 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
47442 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
47443 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
47444- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
47445+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
47446 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
47447
47448 return 0;
47449diff --git a/fs/ext4/super.c b/fs/ext4/super.c
47450index d59b351..775f8c8 100644
47451--- a/fs/ext4/super.c
47452+++ b/fs/ext4/super.c
47453@@ -3212,7 +3212,6 @@ int ext4_calculate_overhead(struct super_block *sb)
47454 ext4_fsblk_t overhead = 0;
47455 char *buf = (char *) get_zeroed_page(GFP_KERNEL);
47456
47457- memset(buf, 0, PAGE_SIZE);
47458 if (!buf)
47459 return -ENOMEM;
47460
47461diff --git a/fs/fcntl.c b/fs/fcntl.c
47462index 71a600a..20d87b1 100644
47463--- a/fs/fcntl.c
47464+++ b/fs/fcntl.c
47465@@ -107,6 +107,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
47466 if (err)
47467 return err;
47468
47469+ if (gr_handle_chroot_fowner(pid, type))
47470+ return -ENOENT;
47471+ if (gr_check_protected_task_fowner(pid, type))
47472+ return -EACCES;
47473+
47474 f_modown(filp, pid, type, force);
47475 return 0;
47476 }
47477diff --git a/fs/fhandle.c b/fs/fhandle.c
47478index f775bfd..629bd4c 100644
47479--- a/fs/fhandle.c
47480+++ b/fs/fhandle.c
47481@@ -67,8 +67,7 @@ static long do_sys_name_to_handle(struct path *path,
47482 } else
47483 retval = 0;
47484 /* copy the mount id */
47485- if (copy_to_user(mnt_id, &real_mount(path->mnt)->mnt_id,
47486- sizeof(*mnt_id)) ||
47487+ if (put_user(real_mount(path->mnt)->mnt_id, mnt_id) ||
47488 copy_to_user(ufh, handle,
47489 sizeof(struct file_handle) + handle_bytes))
47490 retval = -EFAULT;
47491diff --git a/fs/fifo.c b/fs/fifo.c
47492index cf6f434..3d7942c 100644
47493--- a/fs/fifo.c
47494+++ b/fs/fifo.c
47495@@ -59,10 +59,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
47496 */
47497 filp->f_op = &read_pipefifo_fops;
47498 pipe->r_counter++;
47499- if (pipe->readers++ == 0)
47500+ if (atomic_inc_return(&pipe->readers) == 1)
47501 wake_up_partner(inode);
47502
47503- if (!pipe->writers) {
47504+ if (!atomic_read(&pipe->writers)) {
47505 if ((filp->f_flags & O_NONBLOCK)) {
47506 /* suppress POLLHUP until we have
47507 * seen a writer */
47508@@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
47509 * errno=ENXIO when there is no process reading the FIFO.
47510 */
47511 ret = -ENXIO;
47512- if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
47513+ if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
47514 goto err;
47515
47516 filp->f_op = &write_pipefifo_fops;
47517 pipe->w_counter++;
47518- if (!pipe->writers++)
47519+ if (atomic_inc_return(&pipe->writers) == 1)
47520 wake_up_partner(inode);
47521
47522- if (!pipe->readers) {
47523+ if (!atomic_read(&pipe->readers)) {
47524 if (wait_for_partner(inode, &pipe->r_counter))
47525 goto err_wr;
47526 }
47527@@ -104,11 +104,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
47528 */
47529 filp->f_op = &rdwr_pipefifo_fops;
47530
47531- pipe->readers++;
47532- pipe->writers++;
47533+ atomic_inc(&pipe->readers);
47534+ atomic_inc(&pipe->writers);
47535 pipe->r_counter++;
47536 pipe->w_counter++;
47537- if (pipe->readers == 1 || pipe->writers == 1)
47538+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
47539 wake_up_partner(inode);
47540 break;
47541
47542@@ -122,19 +122,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
47543 return 0;
47544
47545 err_rd:
47546- if (!--pipe->readers)
47547+ if (atomic_dec_and_test(&pipe->readers))
47548 wake_up_interruptible(&pipe->wait);
47549 ret = -ERESTARTSYS;
47550 goto err;
47551
47552 err_wr:
47553- if (!--pipe->writers)
47554+ if (atomic_dec_and_test(&pipe->writers))
47555 wake_up_interruptible(&pipe->wait);
47556 ret = -ERESTARTSYS;
47557 goto err;
47558
47559 err:
47560- if (!pipe->readers && !pipe->writers)
47561+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
47562 free_pipe_info(inode);
47563
47564 err_nocleanup:
47565diff --git a/fs/file.c b/fs/file.c
47566index eff2316..8c8930c 100644
47567--- a/fs/file.c
47568+++ b/fs/file.c
47569@@ -16,6 +16,7 @@
47570 #include <linux/slab.h>
47571 #include <linux/vmalloc.h>
47572 #include <linux/file.h>
47573+#include <linux/security.h>
47574 #include <linux/fdtable.h>
47575 #include <linux/bitops.h>
47576 #include <linux/interrupt.h>
47577@@ -898,6 +899,7 @@ int replace_fd(unsigned fd, struct file *file, unsigned flags)
47578 if (!file)
47579 return __close_fd(files, fd);
47580
47581+ gr_learn_resource(current, RLIMIT_NOFILE, fd, 0);
47582 if (fd >= rlimit(RLIMIT_NOFILE))
47583 return -EBADF;
47584
47585@@ -924,6 +926,7 @@ SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
47586 if (unlikely(oldfd == newfd))
47587 return -EINVAL;
47588
47589+ gr_learn_resource(current, RLIMIT_NOFILE, newfd, 0);
47590 if (newfd >= rlimit(RLIMIT_NOFILE))
47591 return -EBADF;
47592
47593@@ -979,6 +982,7 @@ SYSCALL_DEFINE1(dup, unsigned int, fildes)
47594 int f_dupfd(unsigned int from, struct file *file, unsigned flags)
47595 {
47596 int err;
47597+ gr_learn_resource(current, RLIMIT_NOFILE, from, 0);
47598 if (from >= rlimit(RLIMIT_NOFILE))
47599 return -EINVAL;
47600 err = alloc_fd(from, flags);
47601diff --git a/fs/filesystems.c b/fs/filesystems.c
47602index da165f6..3671bdb 100644
47603--- a/fs/filesystems.c
47604+++ b/fs/filesystems.c
47605@@ -273,7 +273,12 @@ struct file_system_type *get_fs_type(const char *name)
47606 int len = dot ? dot - name : strlen(name);
47607
47608 fs = __get_fs_type(name, len);
47609+
47610+#ifdef CONFIG_GRKERNSEC_MODHARDEN
47611+ if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
47612+#else
47613 if (!fs && (request_module("%.*s", len, name) == 0))
47614+#endif
47615 fs = __get_fs_type(name, len);
47616
47617 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
47618diff --git a/fs/fs_struct.c b/fs/fs_struct.c
47619index 5df4775..9d9336f 100644
47620--- a/fs/fs_struct.c
47621+++ b/fs/fs_struct.c
47622@@ -4,6 +4,7 @@
47623 #include <linux/path.h>
47624 #include <linux/slab.h>
47625 #include <linux/fs_struct.h>
47626+#include <linux/grsecurity.h>
47627 #include "internal.h"
47628
47629 /*
47630@@ -19,6 +20,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
47631 write_seqcount_begin(&fs->seq);
47632 old_root = fs->root;
47633 fs->root = *path;
47634+ gr_set_chroot_entries(current, path);
47635 write_seqcount_end(&fs->seq);
47636 spin_unlock(&fs->lock);
47637 if (old_root.dentry)
47638@@ -53,6 +55,21 @@ static inline int replace_path(struct path *p, const struct path *old, const str
47639 return 1;
47640 }
47641
47642+static inline int replace_root_path(struct task_struct *task, struct path *p, const struct path *old, struct path *new)
47643+{
47644+ if (likely(p->dentry != old->dentry || p->mnt != old->mnt))
47645+ return 0;
47646+ *p = *new;
47647+
47648+ /* This function is only called from pivot_root(). Leave our
47649+ gr_chroot_dentry and is_chrooted flags as-is, so that a
47650+ pivoted root isn't treated as a chroot
47651+ */
47652+ //gr_set_chroot_entries(task, new);
47653+
47654+ return 1;
47655+}
47656+
47657 void chroot_fs_refs(struct path *old_root, struct path *new_root)
47658 {
47659 struct task_struct *g, *p;
47660@@ -67,7 +84,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
47661 int hits = 0;
47662 spin_lock(&fs->lock);
47663 write_seqcount_begin(&fs->seq);
47664- hits += replace_path(&fs->root, old_root, new_root);
47665+ hits += replace_root_path(p, &fs->root, old_root, new_root);
47666 hits += replace_path(&fs->pwd, old_root, new_root);
47667 write_seqcount_end(&fs->seq);
47668 while (hits--) {
47669@@ -99,7 +116,8 @@ void exit_fs(struct task_struct *tsk)
47670 task_lock(tsk);
47671 spin_lock(&fs->lock);
47672 tsk->fs = NULL;
47673- kill = !--fs->users;
47674+ gr_clear_chroot_entries(tsk);
47675+ kill = !atomic_dec_return(&fs->users);
47676 spin_unlock(&fs->lock);
47677 task_unlock(tsk);
47678 if (kill)
47679@@ -112,7 +130,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
47680 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
47681 /* We don't need to lock fs - think why ;-) */
47682 if (fs) {
47683- fs->users = 1;
47684+ atomic_set(&fs->users, 1);
47685 fs->in_exec = 0;
47686 spin_lock_init(&fs->lock);
47687 seqcount_init(&fs->seq);
47688@@ -121,6 +139,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
47689 spin_lock(&old->lock);
47690 fs->root = old->root;
47691 path_get(&fs->root);
47692+ /* instead of calling gr_set_chroot_entries here,
47693+ we call it from every caller of this function
47694+ */
47695 fs->pwd = old->pwd;
47696 path_get(&fs->pwd);
47697 spin_unlock(&old->lock);
47698@@ -139,8 +160,9 @@ int unshare_fs_struct(void)
47699
47700 task_lock(current);
47701 spin_lock(&fs->lock);
47702- kill = !--fs->users;
47703+ kill = !atomic_dec_return(&fs->users);
47704 current->fs = new_fs;
47705+ gr_set_chroot_entries(current, &new_fs->root);
47706 spin_unlock(&fs->lock);
47707 task_unlock(current);
47708
47709@@ -153,13 +175,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
47710
47711 int current_umask(void)
47712 {
47713- return current->fs->umask;
47714+ return current->fs->umask | gr_acl_umask();
47715 }
47716 EXPORT_SYMBOL(current_umask);
47717
47718 /* to be mentioned only in INIT_TASK */
47719 struct fs_struct init_fs = {
47720- .users = 1,
47721+ .users = ATOMIC_INIT(1),
47722 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
47723 .seq = SEQCNT_ZERO,
47724 .umask = 0022,
47725@@ -175,12 +197,13 @@ void daemonize_fs_struct(void)
47726 task_lock(current);
47727
47728 spin_lock(&init_fs.lock);
47729- init_fs.users++;
47730+ atomic_inc(&init_fs.users);
47731 spin_unlock(&init_fs.lock);
47732
47733 spin_lock(&fs->lock);
47734 current->fs = &init_fs;
47735- kill = !--fs->users;
47736+ gr_set_chroot_entries(current, &current->fs->root);
47737+ kill = !atomic_dec_return(&fs->users);
47738 spin_unlock(&fs->lock);
47739
47740 task_unlock(current);
47741diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
47742index 9905350..02eaec4 100644
47743--- a/fs/fscache/cookie.c
47744+++ b/fs/fscache/cookie.c
47745@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
47746 parent ? (char *) parent->def->name : "<no-parent>",
47747 def->name, netfs_data);
47748
47749- fscache_stat(&fscache_n_acquires);
47750+ fscache_stat_unchecked(&fscache_n_acquires);
47751
47752 /* if there's no parent cookie, then we don't create one here either */
47753 if (!parent) {
47754- fscache_stat(&fscache_n_acquires_null);
47755+ fscache_stat_unchecked(&fscache_n_acquires_null);
47756 _leave(" [no parent]");
47757 return NULL;
47758 }
47759@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
47760 /* allocate and initialise a cookie */
47761 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
47762 if (!cookie) {
47763- fscache_stat(&fscache_n_acquires_oom);
47764+ fscache_stat_unchecked(&fscache_n_acquires_oom);
47765 _leave(" [ENOMEM]");
47766 return NULL;
47767 }
47768@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
47769
47770 switch (cookie->def->type) {
47771 case FSCACHE_COOKIE_TYPE_INDEX:
47772- fscache_stat(&fscache_n_cookie_index);
47773+ fscache_stat_unchecked(&fscache_n_cookie_index);
47774 break;
47775 case FSCACHE_COOKIE_TYPE_DATAFILE:
47776- fscache_stat(&fscache_n_cookie_data);
47777+ fscache_stat_unchecked(&fscache_n_cookie_data);
47778 break;
47779 default:
47780- fscache_stat(&fscache_n_cookie_special);
47781+ fscache_stat_unchecked(&fscache_n_cookie_special);
47782 break;
47783 }
47784
47785@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
47786 if (fscache_acquire_non_index_cookie(cookie) < 0) {
47787 atomic_dec(&parent->n_children);
47788 __fscache_cookie_put(cookie);
47789- fscache_stat(&fscache_n_acquires_nobufs);
47790+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
47791 _leave(" = NULL");
47792 return NULL;
47793 }
47794 }
47795
47796- fscache_stat(&fscache_n_acquires_ok);
47797+ fscache_stat_unchecked(&fscache_n_acquires_ok);
47798 _leave(" = %p", cookie);
47799 return cookie;
47800 }
47801@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
47802 cache = fscache_select_cache_for_object(cookie->parent);
47803 if (!cache) {
47804 up_read(&fscache_addremove_sem);
47805- fscache_stat(&fscache_n_acquires_no_cache);
47806+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
47807 _leave(" = -ENOMEDIUM [no cache]");
47808 return -ENOMEDIUM;
47809 }
47810@@ -256,12 +256,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
47811 object = cache->ops->alloc_object(cache, cookie);
47812 fscache_stat_d(&fscache_n_cop_alloc_object);
47813 if (IS_ERR(object)) {
47814- fscache_stat(&fscache_n_object_no_alloc);
47815+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
47816 ret = PTR_ERR(object);
47817 goto error;
47818 }
47819
47820- fscache_stat(&fscache_n_object_alloc);
47821+ fscache_stat_unchecked(&fscache_n_object_alloc);
47822
47823 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
47824
47825@@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
47826 struct fscache_object *object;
47827 struct hlist_node *_p;
47828
47829- fscache_stat(&fscache_n_updates);
47830+ fscache_stat_unchecked(&fscache_n_updates);
47831
47832 if (!cookie) {
47833- fscache_stat(&fscache_n_updates_null);
47834+ fscache_stat_unchecked(&fscache_n_updates_null);
47835 _leave(" [no cookie]");
47836 return;
47837 }
47838@@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
47839 struct fscache_object *object;
47840 unsigned long event;
47841
47842- fscache_stat(&fscache_n_relinquishes);
47843+ fscache_stat_unchecked(&fscache_n_relinquishes);
47844 if (retire)
47845- fscache_stat(&fscache_n_relinquishes_retire);
47846+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
47847
47848 if (!cookie) {
47849- fscache_stat(&fscache_n_relinquishes_null);
47850+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
47851 _leave(" [no cookie]");
47852 return;
47853 }
47854@@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
47855
47856 /* wait for the cookie to finish being instantiated (or to fail) */
47857 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
47858- fscache_stat(&fscache_n_relinquishes_waitcrt);
47859+ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
47860 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
47861 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
47862 }
47863diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
47864index f6aad48..88dcf26 100644
47865--- a/fs/fscache/internal.h
47866+++ b/fs/fscache/internal.h
47867@@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
47868 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
47869 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
47870
47871-extern atomic_t fscache_n_op_pend;
47872-extern atomic_t fscache_n_op_run;
47873-extern atomic_t fscache_n_op_enqueue;
47874-extern atomic_t fscache_n_op_deferred_release;
47875-extern atomic_t fscache_n_op_release;
47876-extern atomic_t fscache_n_op_gc;
47877-extern atomic_t fscache_n_op_cancelled;
47878-extern atomic_t fscache_n_op_rejected;
47879+extern atomic_unchecked_t fscache_n_op_pend;
47880+extern atomic_unchecked_t fscache_n_op_run;
47881+extern atomic_unchecked_t fscache_n_op_enqueue;
47882+extern atomic_unchecked_t fscache_n_op_deferred_release;
47883+extern atomic_unchecked_t fscache_n_op_release;
47884+extern atomic_unchecked_t fscache_n_op_gc;
47885+extern atomic_unchecked_t fscache_n_op_cancelled;
47886+extern atomic_unchecked_t fscache_n_op_rejected;
47887
47888-extern atomic_t fscache_n_attr_changed;
47889-extern atomic_t fscache_n_attr_changed_ok;
47890-extern atomic_t fscache_n_attr_changed_nobufs;
47891-extern atomic_t fscache_n_attr_changed_nomem;
47892-extern atomic_t fscache_n_attr_changed_calls;
47893+extern atomic_unchecked_t fscache_n_attr_changed;
47894+extern atomic_unchecked_t fscache_n_attr_changed_ok;
47895+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
47896+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
47897+extern atomic_unchecked_t fscache_n_attr_changed_calls;
47898
47899-extern atomic_t fscache_n_allocs;
47900-extern atomic_t fscache_n_allocs_ok;
47901-extern atomic_t fscache_n_allocs_wait;
47902-extern atomic_t fscache_n_allocs_nobufs;
47903-extern atomic_t fscache_n_allocs_intr;
47904-extern atomic_t fscache_n_allocs_object_dead;
47905-extern atomic_t fscache_n_alloc_ops;
47906-extern atomic_t fscache_n_alloc_op_waits;
47907+extern atomic_unchecked_t fscache_n_allocs;
47908+extern atomic_unchecked_t fscache_n_allocs_ok;
47909+extern atomic_unchecked_t fscache_n_allocs_wait;
47910+extern atomic_unchecked_t fscache_n_allocs_nobufs;
47911+extern atomic_unchecked_t fscache_n_allocs_intr;
47912+extern atomic_unchecked_t fscache_n_allocs_object_dead;
47913+extern atomic_unchecked_t fscache_n_alloc_ops;
47914+extern atomic_unchecked_t fscache_n_alloc_op_waits;
47915
47916-extern atomic_t fscache_n_retrievals;
47917-extern atomic_t fscache_n_retrievals_ok;
47918-extern atomic_t fscache_n_retrievals_wait;
47919-extern atomic_t fscache_n_retrievals_nodata;
47920-extern atomic_t fscache_n_retrievals_nobufs;
47921-extern atomic_t fscache_n_retrievals_intr;
47922-extern atomic_t fscache_n_retrievals_nomem;
47923-extern atomic_t fscache_n_retrievals_object_dead;
47924-extern atomic_t fscache_n_retrieval_ops;
47925-extern atomic_t fscache_n_retrieval_op_waits;
47926+extern atomic_unchecked_t fscache_n_retrievals;
47927+extern atomic_unchecked_t fscache_n_retrievals_ok;
47928+extern atomic_unchecked_t fscache_n_retrievals_wait;
47929+extern atomic_unchecked_t fscache_n_retrievals_nodata;
47930+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
47931+extern atomic_unchecked_t fscache_n_retrievals_intr;
47932+extern atomic_unchecked_t fscache_n_retrievals_nomem;
47933+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
47934+extern atomic_unchecked_t fscache_n_retrieval_ops;
47935+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
47936
47937-extern atomic_t fscache_n_stores;
47938-extern atomic_t fscache_n_stores_ok;
47939-extern atomic_t fscache_n_stores_again;
47940-extern atomic_t fscache_n_stores_nobufs;
47941-extern atomic_t fscache_n_stores_oom;
47942-extern atomic_t fscache_n_store_ops;
47943-extern atomic_t fscache_n_store_calls;
47944-extern atomic_t fscache_n_store_pages;
47945-extern atomic_t fscache_n_store_radix_deletes;
47946-extern atomic_t fscache_n_store_pages_over_limit;
47947+extern atomic_unchecked_t fscache_n_stores;
47948+extern atomic_unchecked_t fscache_n_stores_ok;
47949+extern atomic_unchecked_t fscache_n_stores_again;
47950+extern atomic_unchecked_t fscache_n_stores_nobufs;
47951+extern atomic_unchecked_t fscache_n_stores_oom;
47952+extern atomic_unchecked_t fscache_n_store_ops;
47953+extern atomic_unchecked_t fscache_n_store_calls;
47954+extern atomic_unchecked_t fscache_n_store_pages;
47955+extern atomic_unchecked_t fscache_n_store_radix_deletes;
47956+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
47957
47958-extern atomic_t fscache_n_store_vmscan_not_storing;
47959-extern atomic_t fscache_n_store_vmscan_gone;
47960-extern atomic_t fscache_n_store_vmscan_busy;
47961-extern atomic_t fscache_n_store_vmscan_cancelled;
47962+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
47963+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
47964+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
47965+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
47966
47967-extern atomic_t fscache_n_marks;
47968-extern atomic_t fscache_n_uncaches;
47969+extern atomic_unchecked_t fscache_n_marks;
47970+extern atomic_unchecked_t fscache_n_uncaches;
47971
47972-extern atomic_t fscache_n_acquires;
47973-extern atomic_t fscache_n_acquires_null;
47974-extern atomic_t fscache_n_acquires_no_cache;
47975-extern atomic_t fscache_n_acquires_ok;
47976-extern atomic_t fscache_n_acquires_nobufs;
47977-extern atomic_t fscache_n_acquires_oom;
47978+extern atomic_unchecked_t fscache_n_acquires;
47979+extern atomic_unchecked_t fscache_n_acquires_null;
47980+extern atomic_unchecked_t fscache_n_acquires_no_cache;
47981+extern atomic_unchecked_t fscache_n_acquires_ok;
47982+extern atomic_unchecked_t fscache_n_acquires_nobufs;
47983+extern atomic_unchecked_t fscache_n_acquires_oom;
47984
47985-extern atomic_t fscache_n_updates;
47986-extern atomic_t fscache_n_updates_null;
47987-extern atomic_t fscache_n_updates_run;
47988+extern atomic_unchecked_t fscache_n_updates;
47989+extern atomic_unchecked_t fscache_n_updates_null;
47990+extern atomic_unchecked_t fscache_n_updates_run;
47991
47992-extern atomic_t fscache_n_relinquishes;
47993-extern atomic_t fscache_n_relinquishes_null;
47994-extern atomic_t fscache_n_relinquishes_waitcrt;
47995-extern atomic_t fscache_n_relinquishes_retire;
47996+extern atomic_unchecked_t fscache_n_relinquishes;
47997+extern atomic_unchecked_t fscache_n_relinquishes_null;
47998+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
47999+extern atomic_unchecked_t fscache_n_relinquishes_retire;
48000
48001-extern atomic_t fscache_n_cookie_index;
48002-extern atomic_t fscache_n_cookie_data;
48003-extern atomic_t fscache_n_cookie_special;
48004+extern atomic_unchecked_t fscache_n_cookie_index;
48005+extern atomic_unchecked_t fscache_n_cookie_data;
48006+extern atomic_unchecked_t fscache_n_cookie_special;
48007
48008-extern atomic_t fscache_n_object_alloc;
48009-extern atomic_t fscache_n_object_no_alloc;
48010-extern atomic_t fscache_n_object_lookups;
48011-extern atomic_t fscache_n_object_lookups_negative;
48012-extern atomic_t fscache_n_object_lookups_positive;
48013-extern atomic_t fscache_n_object_lookups_timed_out;
48014-extern atomic_t fscache_n_object_created;
48015-extern atomic_t fscache_n_object_avail;
48016-extern atomic_t fscache_n_object_dead;
48017+extern atomic_unchecked_t fscache_n_object_alloc;
48018+extern atomic_unchecked_t fscache_n_object_no_alloc;
48019+extern atomic_unchecked_t fscache_n_object_lookups;
48020+extern atomic_unchecked_t fscache_n_object_lookups_negative;
48021+extern atomic_unchecked_t fscache_n_object_lookups_positive;
48022+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
48023+extern atomic_unchecked_t fscache_n_object_created;
48024+extern atomic_unchecked_t fscache_n_object_avail;
48025+extern atomic_unchecked_t fscache_n_object_dead;
48026
48027-extern atomic_t fscache_n_checkaux_none;
48028-extern atomic_t fscache_n_checkaux_okay;
48029-extern atomic_t fscache_n_checkaux_update;
48030-extern atomic_t fscache_n_checkaux_obsolete;
48031+extern atomic_unchecked_t fscache_n_checkaux_none;
48032+extern atomic_unchecked_t fscache_n_checkaux_okay;
48033+extern atomic_unchecked_t fscache_n_checkaux_update;
48034+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
48035
48036 extern atomic_t fscache_n_cop_alloc_object;
48037 extern atomic_t fscache_n_cop_lookup_object;
48038@@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t *stat)
48039 atomic_inc(stat);
48040 }
48041
48042+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
48043+{
48044+ atomic_inc_unchecked(stat);
48045+}
48046+
48047 static inline void fscache_stat_d(atomic_t *stat)
48048 {
48049 atomic_dec(stat);
48050@@ -267,6 +272,7 @@ extern const struct file_operations fscache_stats_fops;
48051
48052 #define __fscache_stat(stat) (NULL)
48053 #define fscache_stat(stat) do {} while (0)
48054+#define fscache_stat_unchecked(stat) do {} while (0)
48055 #define fscache_stat_d(stat) do {} while (0)
48056 #endif
48057
48058diff --git a/fs/fscache/object.c b/fs/fscache/object.c
48059index b6b897c..0ffff9c 100644
48060--- a/fs/fscache/object.c
48061+++ b/fs/fscache/object.c
48062@@ -128,7 +128,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
48063 /* update the object metadata on disk */
48064 case FSCACHE_OBJECT_UPDATING:
48065 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
48066- fscache_stat(&fscache_n_updates_run);
48067+ fscache_stat_unchecked(&fscache_n_updates_run);
48068 fscache_stat(&fscache_n_cop_update_object);
48069 object->cache->ops->update_object(object);
48070 fscache_stat_d(&fscache_n_cop_update_object);
48071@@ -217,7 +217,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
48072 spin_lock(&object->lock);
48073 object->state = FSCACHE_OBJECT_DEAD;
48074 spin_unlock(&object->lock);
48075- fscache_stat(&fscache_n_object_dead);
48076+ fscache_stat_unchecked(&fscache_n_object_dead);
48077 goto terminal_transit;
48078
48079 /* handle the parent cache of this object being withdrawn from
48080@@ -232,7 +232,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
48081 spin_lock(&object->lock);
48082 object->state = FSCACHE_OBJECT_DEAD;
48083 spin_unlock(&object->lock);
48084- fscache_stat(&fscache_n_object_dead);
48085+ fscache_stat_unchecked(&fscache_n_object_dead);
48086 goto terminal_transit;
48087
48088 /* complain about the object being woken up once it is
48089@@ -461,7 +461,7 @@ static void fscache_lookup_object(struct fscache_object *object)
48090 parent->cookie->def->name, cookie->def->name,
48091 object->cache->tag->name);
48092
48093- fscache_stat(&fscache_n_object_lookups);
48094+ fscache_stat_unchecked(&fscache_n_object_lookups);
48095 fscache_stat(&fscache_n_cop_lookup_object);
48096 ret = object->cache->ops->lookup_object(object);
48097 fscache_stat_d(&fscache_n_cop_lookup_object);
48098@@ -472,7 +472,7 @@ static void fscache_lookup_object(struct fscache_object *object)
48099 if (ret == -ETIMEDOUT) {
48100 /* probably stuck behind another object, so move this one to
48101 * the back of the queue */
48102- fscache_stat(&fscache_n_object_lookups_timed_out);
48103+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
48104 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
48105 }
48106
48107@@ -495,7 +495,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
48108
48109 spin_lock(&object->lock);
48110 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
48111- fscache_stat(&fscache_n_object_lookups_negative);
48112+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
48113
48114 /* transit here to allow write requests to begin stacking up
48115 * and read requests to begin returning ENODATA */
48116@@ -541,7 +541,7 @@ void fscache_obtained_object(struct fscache_object *object)
48117 * result, in which case there may be data available */
48118 spin_lock(&object->lock);
48119 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
48120- fscache_stat(&fscache_n_object_lookups_positive);
48121+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
48122
48123 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
48124
48125@@ -555,7 +555,7 @@ void fscache_obtained_object(struct fscache_object *object)
48126 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
48127 } else {
48128 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
48129- fscache_stat(&fscache_n_object_created);
48130+ fscache_stat_unchecked(&fscache_n_object_created);
48131
48132 object->state = FSCACHE_OBJECT_AVAILABLE;
48133 spin_unlock(&object->lock);
48134@@ -602,7 +602,7 @@ static void fscache_object_available(struct fscache_object *object)
48135 fscache_enqueue_dependents(object);
48136
48137 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
48138- fscache_stat(&fscache_n_object_avail);
48139+ fscache_stat_unchecked(&fscache_n_object_avail);
48140
48141 _leave("");
48142 }
48143@@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
48144 enum fscache_checkaux result;
48145
48146 if (!object->cookie->def->check_aux) {
48147- fscache_stat(&fscache_n_checkaux_none);
48148+ fscache_stat_unchecked(&fscache_n_checkaux_none);
48149 return FSCACHE_CHECKAUX_OKAY;
48150 }
48151
48152@@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
48153 switch (result) {
48154 /* entry okay as is */
48155 case FSCACHE_CHECKAUX_OKAY:
48156- fscache_stat(&fscache_n_checkaux_okay);
48157+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
48158 break;
48159
48160 /* entry requires update */
48161 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
48162- fscache_stat(&fscache_n_checkaux_update);
48163+ fscache_stat_unchecked(&fscache_n_checkaux_update);
48164 break;
48165
48166 /* entry requires deletion */
48167 case FSCACHE_CHECKAUX_OBSOLETE:
48168- fscache_stat(&fscache_n_checkaux_obsolete);
48169+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
48170 break;
48171
48172 default:
48173diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
48174index 30afdfa..2256596 100644
48175--- a/fs/fscache/operation.c
48176+++ b/fs/fscache/operation.c
48177@@ -17,7 +17,7 @@
48178 #include <linux/slab.h>
48179 #include "internal.h"
48180
48181-atomic_t fscache_op_debug_id;
48182+atomic_unchecked_t fscache_op_debug_id;
48183 EXPORT_SYMBOL(fscache_op_debug_id);
48184
48185 /**
48186@@ -38,7 +38,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
48187 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
48188 ASSERTCMP(atomic_read(&op->usage), >, 0);
48189
48190- fscache_stat(&fscache_n_op_enqueue);
48191+ fscache_stat_unchecked(&fscache_n_op_enqueue);
48192 switch (op->flags & FSCACHE_OP_TYPE) {
48193 case FSCACHE_OP_ASYNC:
48194 _debug("queue async");
48195@@ -69,7 +69,7 @@ static void fscache_run_op(struct fscache_object *object,
48196 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
48197 if (op->processor)
48198 fscache_enqueue_operation(op);
48199- fscache_stat(&fscache_n_op_run);
48200+ fscache_stat_unchecked(&fscache_n_op_run);
48201 }
48202
48203 /*
48204@@ -98,11 +98,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
48205 if (object->n_ops > 1) {
48206 atomic_inc(&op->usage);
48207 list_add_tail(&op->pend_link, &object->pending_ops);
48208- fscache_stat(&fscache_n_op_pend);
48209+ fscache_stat_unchecked(&fscache_n_op_pend);
48210 } else if (!list_empty(&object->pending_ops)) {
48211 atomic_inc(&op->usage);
48212 list_add_tail(&op->pend_link, &object->pending_ops);
48213- fscache_stat(&fscache_n_op_pend);
48214+ fscache_stat_unchecked(&fscache_n_op_pend);
48215 fscache_start_operations(object);
48216 } else {
48217 ASSERTCMP(object->n_in_progress, ==, 0);
48218@@ -118,7 +118,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
48219 object->n_exclusive++; /* reads and writes must wait */
48220 atomic_inc(&op->usage);
48221 list_add_tail(&op->pend_link, &object->pending_ops);
48222- fscache_stat(&fscache_n_op_pend);
48223+ fscache_stat_unchecked(&fscache_n_op_pend);
48224 ret = 0;
48225 } else {
48226 /* not allowed to submit ops in any other state */
48227@@ -203,11 +203,11 @@ int fscache_submit_op(struct fscache_object *object,
48228 if (object->n_exclusive > 0) {
48229 atomic_inc(&op->usage);
48230 list_add_tail(&op->pend_link, &object->pending_ops);
48231- fscache_stat(&fscache_n_op_pend);
48232+ fscache_stat_unchecked(&fscache_n_op_pend);
48233 } else if (!list_empty(&object->pending_ops)) {
48234 atomic_inc(&op->usage);
48235 list_add_tail(&op->pend_link, &object->pending_ops);
48236- fscache_stat(&fscache_n_op_pend);
48237+ fscache_stat_unchecked(&fscache_n_op_pend);
48238 fscache_start_operations(object);
48239 } else {
48240 ASSERTCMP(object->n_exclusive, ==, 0);
48241@@ -219,12 +219,12 @@ int fscache_submit_op(struct fscache_object *object,
48242 object->n_ops++;
48243 atomic_inc(&op->usage);
48244 list_add_tail(&op->pend_link, &object->pending_ops);
48245- fscache_stat(&fscache_n_op_pend);
48246+ fscache_stat_unchecked(&fscache_n_op_pend);
48247 ret = 0;
48248 } else if (object->state == FSCACHE_OBJECT_DYING ||
48249 object->state == FSCACHE_OBJECT_LC_DYING ||
48250 object->state == FSCACHE_OBJECT_WITHDRAWING) {
48251- fscache_stat(&fscache_n_op_rejected);
48252+ fscache_stat_unchecked(&fscache_n_op_rejected);
48253 ret = -ENOBUFS;
48254 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
48255 fscache_report_unexpected_submission(object, op, ostate);
48256@@ -294,7 +294,7 @@ int fscache_cancel_op(struct fscache_operation *op)
48257
48258 ret = -EBUSY;
48259 if (!list_empty(&op->pend_link)) {
48260- fscache_stat(&fscache_n_op_cancelled);
48261+ fscache_stat_unchecked(&fscache_n_op_cancelled);
48262 list_del_init(&op->pend_link);
48263 object->n_ops--;
48264 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
48265@@ -331,7 +331,7 @@ void fscache_put_operation(struct fscache_operation *op)
48266 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
48267 BUG();
48268
48269- fscache_stat(&fscache_n_op_release);
48270+ fscache_stat_unchecked(&fscache_n_op_release);
48271
48272 if (op->release) {
48273 op->release(op);
48274@@ -348,7 +348,7 @@ void fscache_put_operation(struct fscache_operation *op)
48275 * lock, and defer it otherwise */
48276 if (!spin_trylock(&object->lock)) {
48277 _debug("defer put");
48278- fscache_stat(&fscache_n_op_deferred_release);
48279+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
48280
48281 cache = object->cache;
48282 spin_lock(&cache->op_gc_list_lock);
48283@@ -410,7 +410,7 @@ void fscache_operation_gc(struct work_struct *work)
48284
48285 _debug("GC DEFERRED REL OBJ%x OP%x",
48286 object->debug_id, op->debug_id);
48287- fscache_stat(&fscache_n_op_gc);
48288+ fscache_stat_unchecked(&fscache_n_op_gc);
48289
48290 ASSERTCMP(atomic_read(&op->usage), ==, 0);
48291
48292diff --git a/fs/fscache/page.c b/fs/fscache/page.c
48293index 3f7a59b..cf196cc 100644
48294--- a/fs/fscache/page.c
48295+++ b/fs/fscache/page.c
48296@@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
48297 val = radix_tree_lookup(&cookie->stores, page->index);
48298 if (!val) {
48299 rcu_read_unlock();
48300- fscache_stat(&fscache_n_store_vmscan_not_storing);
48301+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
48302 __fscache_uncache_page(cookie, page);
48303 return true;
48304 }
48305@@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
48306 spin_unlock(&cookie->stores_lock);
48307
48308 if (xpage) {
48309- fscache_stat(&fscache_n_store_vmscan_cancelled);
48310- fscache_stat(&fscache_n_store_radix_deletes);
48311+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
48312+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
48313 ASSERTCMP(xpage, ==, page);
48314 } else {
48315- fscache_stat(&fscache_n_store_vmscan_gone);
48316+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
48317 }
48318
48319 wake_up_bit(&cookie->flags, 0);
48320@@ -107,7 +107,7 @@ page_busy:
48321 /* we might want to wait here, but that could deadlock the allocator as
48322 * the work threads writing to the cache may all end up sleeping
48323 * on memory allocation */
48324- fscache_stat(&fscache_n_store_vmscan_busy);
48325+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
48326 return false;
48327 }
48328 EXPORT_SYMBOL(__fscache_maybe_release_page);
48329@@ -131,7 +131,7 @@ static void fscache_end_page_write(struct fscache_object *object,
48330 FSCACHE_COOKIE_STORING_TAG);
48331 if (!radix_tree_tag_get(&cookie->stores, page->index,
48332 FSCACHE_COOKIE_PENDING_TAG)) {
48333- fscache_stat(&fscache_n_store_radix_deletes);
48334+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
48335 xpage = radix_tree_delete(&cookie->stores, page->index);
48336 }
48337 spin_unlock(&cookie->stores_lock);
48338@@ -152,7 +152,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
48339
48340 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
48341
48342- fscache_stat(&fscache_n_attr_changed_calls);
48343+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
48344
48345 if (fscache_object_is_active(object)) {
48346 fscache_stat(&fscache_n_cop_attr_changed);
48347@@ -177,11 +177,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
48348
48349 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
48350
48351- fscache_stat(&fscache_n_attr_changed);
48352+ fscache_stat_unchecked(&fscache_n_attr_changed);
48353
48354 op = kzalloc(sizeof(*op), GFP_KERNEL);
48355 if (!op) {
48356- fscache_stat(&fscache_n_attr_changed_nomem);
48357+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
48358 _leave(" = -ENOMEM");
48359 return -ENOMEM;
48360 }
48361@@ -199,7 +199,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
48362 if (fscache_submit_exclusive_op(object, op) < 0)
48363 goto nobufs;
48364 spin_unlock(&cookie->lock);
48365- fscache_stat(&fscache_n_attr_changed_ok);
48366+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
48367 fscache_put_operation(op);
48368 _leave(" = 0");
48369 return 0;
48370@@ -207,7 +207,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
48371 nobufs:
48372 spin_unlock(&cookie->lock);
48373 kfree(op);
48374- fscache_stat(&fscache_n_attr_changed_nobufs);
48375+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
48376 _leave(" = %d", -ENOBUFS);
48377 return -ENOBUFS;
48378 }
48379@@ -243,7 +243,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
48380 /* allocate a retrieval operation and attempt to submit it */
48381 op = kzalloc(sizeof(*op), GFP_NOIO);
48382 if (!op) {
48383- fscache_stat(&fscache_n_retrievals_nomem);
48384+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
48385 return NULL;
48386 }
48387
48388@@ -271,13 +271,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
48389 return 0;
48390 }
48391
48392- fscache_stat(&fscache_n_retrievals_wait);
48393+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
48394
48395 jif = jiffies;
48396 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
48397 fscache_wait_bit_interruptible,
48398 TASK_INTERRUPTIBLE) != 0) {
48399- fscache_stat(&fscache_n_retrievals_intr);
48400+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
48401 _leave(" = -ERESTARTSYS");
48402 return -ERESTARTSYS;
48403 }
48404@@ -295,8 +295,8 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
48405 */
48406 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
48407 struct fscache_retrieval *op,
48408- atomic_t *stat_op_waits,
48409- atomic_t *stat_object_dead)
48410+ atomic_unchecked_t *stat_op_waits,
48411+ atomic_unchecked_t *stat_object_dead)
48412 {
48413 int ret;
48414
48415@@ -304,7 +304,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
48416 goto check_if_dead;
48417
48418 _debug(">>> WT");
48419- fscache_stat(stat_op_waits);
48420+ fscache_stat_unchecked(stat_op_waits);
48421 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
48422 fscache_wait_bit_interruptible,
48423 TASK_INTERRUPTIBLE) < 0) {
48424@@ -321,7 +321,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
48425
48426 check_if_dead:
48427 if (unlikely(fscache_object_is_dead(object))) {
48428- fscache_stat(stat_object_dead);
48429+ fscache_stat_unchecked(stat_object_dead);
48430 return -ENOBUFS;
48431 }
48432 return 0;
48433@@ -348,7 +348,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
48434
48435 _enter("%p,%p,,,", cookie, page);
48436
48437- fscache_stat(&fscache_n_retrievals);
48438+ fscache_stat_unchecked(&fscache_n_retrievals);
48439
48440 if (hlist_empty(&cookie->backing_objects))
48441 goto nobufs;
48442@@ -381,7 +381,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
48443 goto nobufs_unlock;
48444 spin_unlock(&cookie->lock);
48445
48446- fscache_stat(&fscache_n_retrieval_ops);
48447+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
48448
48449 /* pin the netfs read context in case we need to do the actual netfs
48450 * read because we've encountered a cache read failure */
48451@@ -411,15 +411,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
48452
48453 error:
48454 if (ret == -ENOMEM)
48455- fscache_stat(&fscache_n_retrievals_nomem);
48456+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
48457 else if (ret == -ERESTARTSYS)
48458- fscache_stat(&fscache_n_retrievals_intr);
48459+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
48460 else if (ret == -ENODATA)
48461- fscache_stat(&fscache_n_retrievals_nodata);
48462+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
48463 else if (ret < 0)
48464- fscache_stat(&fscache_n_retrievals_nobufs);
48465+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
48466 else
48467- fscache_stat(&fscache_n_retrievals_ok);
48468+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
48469
48470 fscache_put_retrieval(op);
48471 _leave(" = %d", ret);
48472@@ -429,7 +429,7 @@ nobufs_unlock:
48473 spin_unlock(&cookie->lock);
48474 kfree(op);
48475 nobufs:
48476- fscache_stat(&fscache_n_retrievals_nobufs);
48477+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
48478 _leave(" = -ENOBUFS");
48479 return -ENOBUFS;
48480 }
48481@@ -467,7 +467,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
48482
48483 _enter("%p,,%d,,,", cookie, *nr_pages);
48484
48485- fscache_stat(&fscache_n_retrievals);
48486+ fscache_stat_unchecked(&fscache_n_retrievals);
48487
48488 if (hlist_empty(&cookie->backing_objects))
48489 goto nobufs;
48490@@ -497,7 +497,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
48491 goto nobufs_unlock;
48492 spin_unlock(&cookie->lock);
48493
48494- fscache_stat(&fscache_n_retrieval_ops);
48495+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
48496
48497 /* pin the netfs read context in case we need to do the actual netfs
48498 * read because we've encountered a cache read failure */
48499@@ -527,15 +527,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
48500
48501 error:
48502 if (ret == -ENOMEM)
48503- fscache_stat(&fscache_n_retrievals_nomem);
48504+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
48505 else if (ret == -ERESTARTSYS)
48506- fscache_stat(&fscache_n_retrievals_intr);
48507+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
48508 else if (ret == -ENODATA)
48509- fscache_stat(&fscache_n_retrievals_nodata);
48510+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
48511 else if (ret < 0)
48512- fscache_stat(&fscache_n_retrievals_nobufs);
48513+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
48514 else
48515- fscache_stat(&fscache_n_retrievals_ok);
48516+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
48517
48518 fscache_put_retrieval(op);
48519 _leave(" = %d", ret);
48520@@ -545,7 +545,7 @@ nobufs_unlock:
48521 spin_unlock(&cookie->lock);
48522 kfree(op);
48523 nobufs:
48524- fscache_stat(&fscache_n_retrievals_nobufs);
48525+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
48526 _leave(" = -ENOBUFS");
48527 return -ENOBUFS;
48528 }
48529@@ -569,7 +569,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
48530
48531 _enter("%p,%p,,,", cookie, page);
48532
48533- fscache_stat(&fscache_n_allocs);
48534+ fscache_stat_unchecked(&fscache_n_allocs);
48535
48536 if (hlist_empty(&cookie->backing_objects))
48537 goto nobufs;
48538@@ -595,7 +595,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
48539 goto nobufs_unlock;
48540 spin_unlock(&cookie->lock);
48541
48542- fscache_stat(&fscache_n_alloc_ops);
48543+ fscache_stat_unchecked(&fscache_n_alloc_ops);
48544
48545 ret = fscache_wait_for_retrieval_activation(
48546 object, op,
48547@@ -611,11 +611,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
48548
48549 error:
48550 if (ret == -ERESTARTSYS)
48551- fscache_stat(&fscache_n_allocs_intr);
48552+ fscache_stat_unchecked(&fscache_n_allocs_intr);
48553 else if (ret < 0)
48554- fscache_stat(&fscache_n_allocs_nobufs);
48555+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
48556 else
48557- fscache_stat(&fscache_n_allocs_ok);
48558+ fscache_stat_unchecked(&fscache_n_allocs_ok);
48559
48560 fscache_put_retrieval(op);
48561 _leave(" = %d", ret);
48562@@ -625,7 +625,7 @@ nobufs_unlock:
48563 spin_unlock(&cookie->lock);
48564 kfree(op);
48565 nobufs:
48566- fscache_stat(&fscache_n_allocs_nobufs);
48567+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
48568 _leave(" = -ENOBUFS");
48569 return -ENOBUFS;
48570 }
48571@@ -666,7 +666,7 @@ static void fscache_write_op(struct fscache_operation *_op)
48572
48573 spin_lock(&cookie->stores_lock);
48574
48575- fscache_stat(&fscache_n_store_calls);
48576+ fscache_stat_unchecked(&fscache_n_store_calls);
48577
48578 /* find a page to store */
48579 page = NULL;
48580@@ -677,7 +677,7 @@ static void fscache_write_op(struct fscache_operation *_op)
48581 page = results[0];
48582 _debug("gang %d [%lx]", n, page->index);
48583 if (page->index > op->store_limit) {
48584- fscache_stat(&fscache_n_store_pages_over_limit);
48585+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
48586 goto superseded;
48587 }
48588
48589@@ -689,7 +689,7 @@ static void fscache_write_op(struct fscache_operation *_op)
48590 spin_unlock(&cookie->stores_lock);
48591 spin_unlock(&object->lock);
48592
48593- fscache_stat(&fscache_n_store_pages);
48594+ fscache_stat_unchecked(&fscache_n_store_pages);
48595 fscache_stat(&fscache_n_cop_write_page);
48596 ret = object->cache->ops->write_page(op, page);
48597 fscache_stat_d(&fscache_n_cop_write_page);
48598@@ -757,7 +757,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
48599 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
48600 ASSERT(PageFsCache(page));
48601
48602- fscache_stat(&fscache_n_stores);
48603+ fscache_stat_unchecked(&fscache_n_stores);
48604
48605 op = kzalloc(sizeof(*op), GFP_NOIO);
48606 if (!op)
48607@@ -808,7 +808,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
48608 spin_unlock(&cookie->stores_lock);
48609 spin_unlock(&object->lock);
48610
48611- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
48612+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
48613 op->store_limit = object->store_limit;
48614
48615 if (fscache_submit_op(object, &op->op) < 0)
48616@@ -816,8 +816,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
48617
48618 spin_unlock(&cookie->lock);
48619 radix_tree_preload_end();
48620- fscache_stat(&fscache_n_store_ops);
48621- fscache_stat(&fscache_n_stores_ok);
48622+ fscache_stat_unchecked(&fscache_n_store_ops);
48623+ fscache_stat_unchecked(&fscache_n_stores_ok);
48624
48625 /* the work queue now carries its own ref on the object */
48626 fscache_put_operation(&op->op);
48627@@ -825,14 +825,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
48628 return 0;
48629
48630 already_queued:
48631- fscache_stat(&fscache_n_stores_again);
48632+ fscache_stat_unchecked(&fscache_n_stores_again);
48633 already_pending:
48634 spin_unlock(&cookie->stores_lock);
48635 spin_unlock(&object->lock);
48636 spin_unlock(&cookie->lock);
48637 radix_tree_preload_end();
48638 kfree(op);
48639- fscache_stat(&fscache_n_stores_ok);
48640+ fscache_stat_unchecked(&fscache_n_stores_ok);
48641 _leave(" = 0");
48642 return 0;
48643
48644@@ -851,14 +851,14 @@ nobufs:
48645 spin_unlock(&cookie->lock);
48646 radix_tree_preload_end();
48647 kfree(op);
48648- fscache_stat(&fscache_n_stores_nobufs);
48649+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
48650 _leave(" = -ENOBUFS");
48651 return -ENOBUFS;
48652
48653 nomem_free:
48654 kfree(op);
48655 nomem:
48656- fscache_stat(&fscache_n_stores_oom);
48657+ fscache_stat_unchecked(&fscache_n_stores_oom);
48658 _leave(" = -ENOMEM");
48659 return -ENOMEM;
48660 }
48661@@ -876,7 +876,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
48662 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
48663 ASSERTCMP(page, !=, NULL);
48664
48665- fscache_stat(&fscache_n_uncaches);
48666+ fscache_stat_unchecked(&fscache_n_uncaches);
48667
48668 /* cache withdrawal may beat us to it */
48669 if (!PageFsCache(page))
48670@@ -929,7 +929,7 @@ void fscache_mark_pages_cached(struct fscache_retrieval *op,
48671 unsigned long loop;
48672
48673 #ifdef CONFIG_FSCACHE_STATS
48674- atomic_add(pagevec->nr, &fscache_n_marks);
48675+ atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
48676 #endif
48677
48678 for (loop = 0; loop < pagevec->nr; loop++) {
48679diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
48680index 4765190..2a067f2 100644
48681--- a/fs/fscache/stats.c
48682+++ b/fs/fscache/stats.c
48683@@ -18,95 +18,95 @@
48684 /*
48685 * operation counters
48686 */
48687-atomic_t fscache_n_op_pend;
48688-atomic_t fscache_n_op_run;
48689-atomic_t fscache_n_op_enqueue;
48690-atomic_t fscache_n_op_requeue;
48691-atomic_t fscache_n_op_deferred_release;
48692-atomic_t fscache_n_op_release;
48693-atomic_t fscache_n_op_gc;
48694-atomic_t fscache_n_op_cancelled;
48695-atomic_t fscache_n_op_rejected;
48696+atomic_unchecked_t fscache_n_op_pend;
48697+atomic_unchecked_t fscache_n_op_run;
48698+atomic_unchecked_t fscache_n_op_enqueue;
48699+atomic_unchecked_t fscache_n_op_requeue;
48700+atomic_unchecked_t fscache_n_op_deferred_release;
48701+atomic_unchecked_t fscache_n_op_release;
48702+atomic_unchecked_t fscache_n_op_gc;
48703+atomic_unchecked_t fscache_n_op_cancelled;
48704+atomic_unchecked_t fscache_n_op_rejected;
48705
48706-atomic_t fscache_n_attr_changed;
48707-atomic_t fscache_n_attr_changed_ok;
48708-atomic_t fscache_n_attr_changed_nobufs;
48709-atomic_t fscache_n_attr_changed_nomem;
48710-atomic_t fscache_n_attr_changed_calls;
48711+atomic_unchecked_t fscache_n_attr_changed;
48712+atomic_unchecked_t fscache_n_attr_changed_ok;
48713+atomic_unchecked_t fscache_n_attr_changed_nobufs;
48714+atomic_unchecked_t fscache_n_attr_changed_nomem;
48715+atomic_unchecked_t fscache_n_attr_changed_calls;
48716
48717-atomic_t fscache_n_allocs;
48718-atomic_t fscache_n_allocs_ok;
48719-atomic_t fscache_n_allocs_wait;
48720-atomic_t fscache_n_allocs_nobufs;
48721-atomic_t fscache_n_allocs_intr;
48722-atomic_t fscache_n_allocs_object_dead;
48723-atomic_t fscache_n_alloc_ops;
48724-atomic_t fscache_n_alloc_op_waits;
48725+atomic_unchecked_t fscache_n_allocs;
48726+atomic_unchecked_t fscache_n_allocs_ok;
48727+atomic_unchecked_t fscache_n_allocs_wait;
48728+atomic_unchecked_t fscache_n_allocs_nobufs;
48729+atomic_unchecked_t fscache_n_allocs_intr;
48730+atomic_unchecked_t fscache_n_allocs_object_dead;
48731+atomic_unchecked_t fscache_n_alloc_ops;
48732+atomic_unchecked_t fscache_n_alloc_op_waits;
48733
48734-atomic_t fscache_n_retrievals;
48735-atomic_t fscache_n_retrievals_ok;
48736-atomic_t fscache_n_retrievals_wait;
48737-atomic_t fscache_n_retrievals_nodata;
48738-atomic_t fscache_n_retrievals_nobufs;
48739-atomic_t fscache_n_retrievals_intr;
48740-atomic_t fscache_n_retrievals_nomem;
48741-atomic_t fscache_n_retrievals_object_dead;
48742-atomic_t fscache_n_retrieval_ops;
48743-atomic_t fscache_n_retrieval_op_waits;
48744+atomic_unchecked_t fscache_n_retrievals;
48745+atomic_unchecked_t fscache_n_retrievals_ok;
48746+atomic_unchecked_t fscache_n_retrievals_wait;
48747+atomic_unchecked_t fscache_n_retrievals_nodata;
48748+atomic_unchecked_t fscache_n_retrievals_nobufs;
48749+atomic_unchecked_t fscache_n_retrievals_intr;
48750+atomic_unchecked_t fscache_n_retrievals_nomem;
48751+atomic_unchecked_t fscache_n_retrievals_object_dead;
48752+atomic_unchecked_t fscache_n_retrieval_ops;
48753+atomic_unchecked_t fscache_n_retrieval_op_waits;
48754
48755-atomic_t fscache_n_stores;
48756-atomic_t fscache_n_stores_ok;
48757-atomic_t fscache_n_stores_again;
48758-atomic_t fscache_n_stores_nobufs;
48759-atomic_t fscache_n_stores_oom;
48760-atomic_t fscache_n_store_ops;
48761-atomic_t fscache_n_store_calls;
48762-atomic_t fscache_n_store_pages;
48763-atomic_t fscache_n_store_radix_deletes;
48764-atomic_t fscache_n_store_pages_over_limit;
48765+atomic_unchecked_t fscache_n_stores;
48766+atomic_unchecked_t fscache_n_stores_ok;
48767+atomic_unchecked_t fscache_n_stores_again;
48768+atomic_unchecked_t fscache_n_stores_nobufs;
48769+atomic_unchecked_t fscache_n_stores_oom;
48770+atomic_unchecked_t fscache_n_store_ops;
48771+atomic_unchecked_t fscache_n_store_calls;
48772+atomic_unchecked_t fscache_n_store_pages;
48773+atomic_unchecked_t fscache_n_store_radix_deletes;
48774+atomic_unchecked_t fscache_n_store_pages_over_limit;
48775
48776-atomic_t fscache_n_store_vmscan_not_storing;
48777-atomic_t fscache_n_store_vmscan_gone;
48778-atomic_t fscache_n_store_vmscan_busy;
48779-atomic_t fscache_n_store_vmscan_cancelled;
48780+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
48781+atomic_unchecked_t fscache_n_store_vmscan_gone;
48782+atomic_unchecked_t fscache_n_store_vmscan_busy;
48783+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
48784
48785-atomic_t fscache_n_marks;
48786-atomic_t fscache_n_uncaches;
48787+atomic_unchecked_t fscache_n_marks;
48788+atomic_unchecked_t fscache_n_uncaches;
48789
48790-atomic_t fscache_n_acquires;
48791-atomic_t fscache_n_acquires_null;
48792-atomic_t fscache_n_acquires_no_cache;
48793-atomic_t fscache_n_acquires_ok;
48794-atomic_t fscache_n_acquires_nobufs;
48795-atomic_t fscache_n_acquires_oom;
48796+atomic_unchecked_t fscache_n_acquires;
48797+atomic_unchecked_t fscache_n_acquires_null;
48798+atomic_unchecked_t fscache_n_acquires_no_cache;
48799+atomic_unchecked_t fscache_n_acquires_ok;
48800+atomic_unchecked_t fscache_n_acquires_nobufs;
48801+atomic_unchecked_t fscache_n_acquires_oom;
48802
48803-atomic_t fscache_n_updates;
48804-atomic_t fscache_n_updates_null;
48805-atomic_t fscache_n_updates_run;
48806+atomic_unchecked_t fscache_n_updates;
48807+atomic_unchecked_t fscache_n_updates_null;
48808+atomic_unchecked_t fscache_n_updates_run;
48809
48810-atomic_t fscache_n_relinquishes;
48811-atomic_t fscache_n_relinquishes_null;
48812-atomic_t fscache_n_relinquishes_waitcrt;
48813-atomic_t fscache_n_relinquishes_retire;
48814+atomic_unchecked_t fscache_n_relinquishes;
48815+atomic_unchecked_t fscache_n_relinquishes_null;
48816+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
48817+atomic_unchecked_t fscache_n_relinquishes_retire;
48818
48819-atomic_t fscache_n_cookie_index;
48820-atomic_t fscache_n_cookie_data;
48821-atomic_t fscache_n_cookie_special;
48822+atomic_unchecked_t fscache_n_cookie_index;
48823+atomic_unchecked_t fscache_n_cookie_data;
48824+atomic_unchecked_t fscache_n_cookie_special;
48825
48826-atomic_t fscache_n_object_alloc;
48827-atomic_t fscache_n_object_no_alloc;
48828-atomic_t fscache_n_object_lookups;
48829-atomic_t fscache_n_object_lookups_negative;
48830-atomic_t fscache_n_object_lookups_positive;
48831-atomic_t fscache_n_object_lookups_timed_out;
48832-atomic_t fscache_n_object_created;
48833-atomic_t fscache_n_object_avail;
48834-atomic_t fscache_n_object_dead;
48835+atomic_unchecked_t fscache_n_object_alloc;
48836+atomic_unchecked_t fscache_n_object_no_alloc;
48837+atomic_unchecked_t fscache_n_object_lookups;
48838+atomic_unchecked_t fscache_n_object_lookups_negative;
48839+atomic_unchecked_t fscache_n_object_lookups_positive;
48840+atomic_unchecked_t fscache_n_object_lookups_timed_out;
48841+atomic_unchecked_t fscache_n_object_created;
48842+atomic_unchecked_t fscache_n_object_avail;
48843+atomic_unchecked_t fscache_n_object_dead;
48844
48845-atomic_t fscache_n_checkaux_none;
48846-atomic_t fscache_n_checkaux_okay;
48847-atomic_t fscache_n_checkaux_update;
48848-atomic_t fscache_n_checkaux_obsolete;
48849+atomic_unchecked_t fscache_n_checkaux_none;
48850+atomic_unchecked_t fscache_n_checkaux_okay;
48851+atomic_unchecked_t fscache_n_checkaux_update;
48852+atomic_unchecked_t fscache_n_checkaux_obsolete;
48853
48854 atomic_t fscache_n_cop_alloc_object;
48855 atomic_t fscache_n_cop_lookup_object;
48856@@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq_file *m, void *v)
48857 seq_puts(m, "FS-Cache statistics\n");
48858
48859 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
48860- atomic_read(&fscache_n_cookie_index),
48861- atomic_read(&fscache_n_cookie_data),
48862- atomic_read(&fscache_n_cookie_special));
48863+ atomic_read_unchecked(&fscache_n_cookie_index),
48864+ atomic_read_unchecked(&fscache_n_cookie_data),
48865+ atomic_read_unchecked(&fscache_n_cookie_special));
48866
48867 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
48868- atomic_read(&fscache_n_object_alloc),
48869- atomic_read(&fscache_n_object_no_alloc),
48870- atomic_read(&fscache_n_object_avail),
48871- atomic_read(&fscache_n_object_dead));
48872+ atomic_read_unchecked(&fscache_n_object_alloc),
48873+ atomic_read_unchecked(&fscache_n_object_no_alloc),
48874+ atomic_read_unchecked(&fscache_n_object_avail),
48875+ atomic_read_unchecked(&fscache_n_object_dead));
48876 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
48877- atomic_read(&fscache_n_checkaux_none),
48878- atomic_read(&fscache_n_checkaux_okay),
48879- atomic_read(&fscache_n_checkaux_update),
48880- atomic_read(&fscache_n_checkaux_obsolete));
48881+ atomic_read_unchecked(&fscache_n_checkaux_none),
48882+ atomic_read_unchecked(&fscache_n_checkaux_okay),
48883+ atomic_read_unchecked(&fscache_n_checkaux_update),
48884+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
48885
48886 seq_printf(m, "Pages : mrk=%u unc=%u\n",
48887- atomic_read(&fscache_n_marks),
48888- atomic_read(&fscache_n_uncaches));
48889+ atomic_read_unchecked(&fscache_n_marks),
48890+ atomic_read_unchecked(&fscache_n_uncaches));
48891
48892 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
48893 " oom=%u\n",
48894- atomic_read(&fscache_n_acquires),
48895- atomic_read(&fscache_n_acquires_null),
48896- atomic_read(&fscache_n_acquires_no_cache),
48897- atomic_read(&fscache_n_acquires_ok),
48898- atomic_read(&fscache_n_acquires_nobufs),
48899- atomic_read(&fscache_n_acquires_oom));
48900+ atomic_read_unchecked(&fscache_n_acquires),
48901+ atomic_read_unchecked(&fscache_n_acquires_null),
48902+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
48903+ atomic_read_unchecked(&fscache_n_acquires_ok),
48904+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
48905+ atomic_read_unchecked(&fscache_n_acquires_oom));
48906
48907 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
48908- atomic_read(&fscache_n_object_lookups),
48909- atomic_read(&fscache_n_object_lookups_negative),
48910- atomic_read(&fscache_n_object_lookups_positive),
48911- atomic_read(&fscache_n_object_created),
48912- atomic_read(&fscache_n_object_lookups_timed_out));
48913+ atomic_read_unchecked(&fscache_n_object_lookups),
48914+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
48915+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
48916+ atomic_read_unchecked(&fscache_n_object_created),
48917+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
48918
48919 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
48920- atomic_read(&fscache_n_updates),
48921- atomic_read(&fscache_n_updates_null),
48922- atomic_read(&fscache_n_updates_run));
48923+ atomic_read_unchecked(&fscache_n_updates),
48924+ atomic_read_unchecked(&fscache_n_updates_null),
48925+ atomic_read_unchecked(&fscache_n_updates_run));
48926
48927 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
48928- atomic_read(&fscache_n_relinquishes),
48929- atomic_read(&fscache_n_relinquishes_null),
48930- atomic_read(&fscache_n_relinquishes_waitcrt),
48931- atomic_read(&fscache_n_relinquishes_retire));
48932+ atomic_read_unchecked(&fscache_n_relinquishes),
48933+ atomic_read_unchecked(&fscache_n_relinquishes_null),
48934+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
48935+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
48936
48937 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
48938- atomic_read(&fscache_n_attr_changed),
48939- atomic_read(&fscache_n_attr_changed_ok),
48940- atomic_read(&fscache_n_attr_changed_nobufs),
48941- atomic_read(&fscache_n_attr_changed_nomem),
48942- atomic_read(&fscache_n_attr_changed_calls));
48943+ atomic_read_unchecked(&fscache_n_attr_changed),
48944+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
48945+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
48946+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
48947+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
48948
48949 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
48950- atomic_read(&fscache_n_allocs),
48951- atomic_read(&fscache_n_allocs_ok),
48952- atomic_read(&fscache_n_allocs_wait),
48953- atomic_read(&fscache_n_allocs_nobufs),
48954- atomic_read(&fscache_n_allocs_intr));
48955+ atomic_read_unchecked(&fscache_n_allocs),
48956+ atomic_read_unchecked(&fscache_n_allocs_ok),
48957+ atomic_read_unchecked(&fscache_n_allocs_wait),
48958+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
48959+ atomic_read_unchecked(&fscache_n_allocs_intr));
48960 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
48961- atomic_read(&fscache_n_alloc_ops),
48962- atomic_read(&fscache_n_alloc_op_waits),
48963- atomic_read(&fscache_n_allocs_object_dead));
48964+ atomic_read_unchecked(&fscache_n_alloc_ops),
48965+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
48966+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
48967
48968 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
48969 " int=%u oom=%u\n",
48970- atomic_read(&fscache_n_retrievals),
48971- atomic_read(&fscache_n_retrievals_ok),
48972- atomic_read(&fscache_n_retrievals_wait),
48973- atomic_read(&fscache_n_retrievals_nodata),
48974- atomic_read(&fscache_n_retrievals_nobufs),
48975- atomic_read(&fscache_n_retrievals_intr),
48976- atomic_read(&fscache_n_retrievals_nomem));
48977+ atomic_read_unchecked(&fscache_n_retrievals),
48978+ atomic_read_unchecked(&fscache_n_retrievals_ok),
48979+ atomic_read_unchecked(&fscache_n_retrievals_wait),
48980+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
48981+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
48982+ atomic_read_unchecked(&fscache_n_retrievals_intr),
48983+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
48984 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
48985- atomic_read(&fscache_n_retrieval_ops),
48986- atomic_read(&fscache_n_retrieval_op_waits),
48987- atomic_read(&fscache_n_retrievals_object_dead));
48988+ atomic_read_unchecked(&fscache_n_retrieval_ops),
48989+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
48990+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
48991
48992 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
48993- atomic_read(&fscache_n_stores),
48994- atomic_read(&fscache_n_stores_ok),
48995- atomic_read(&fscache_n_stores_again),
48996- atomic_read(&fscache_n_stores_nobufs),
48997- atomic_read(&fscache_n_stores_oom));
48998+ atomic_read_unchecked(&fscache_n_stores),
48999+ atomic_read_unchecked(&fscache_n_stores_ok),
49000+ atomic_read_unchecked(&fscache_n_stores_again),
49001+ atomic_read_unchecked(&fscache_n_stores_nobufs),
49002+ atomic_read_unchecked(&fscache_n_stores_oom));
49003 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
49004- atomic_read(&fscache_n_store_ops),
49005- atomic_read(&fscache_n_store_calls),
49006- atomic_read(&fscache_n_store_pages),
49007- atomic_read(&fscache_n_store_radix_deletes),
49008- atomic_read(&fscache_n_store_pages_over_limit));
49009+ atomic_read_unchecked(&fscache_n_store_ops),
49010+ atomic_read_unchecked(&fscache_n_store_calls),
49011+ atomic_read_unchecked(&fscache_n_store_pages),
49012+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
49013+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
49014
49015 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
49016- atomic_read(&fscache_n_store_vmscan_not_storing),
49017- atomic_read(&fscache_n_store_vmscan_gone),
49018- atomic_read(&fscache_n_store_vmscan_busy),
49019- atomic_read(&fscache_n_store_vmscan_cancelled));
49020+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
49021+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
49022+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
49023+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
49024
49025 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
49026- atomic_read(&fscache_n_op_pend),
49027- atomic_read(&fscache_n_op_run),
49028- atomic_read(&fscache_n_op_enqueue),
49029- atomic_read(&fscache_n_op_cancelled),
49030- atomic_read(&fscache_n_op_rejected));
49031+ atomic_read_unchecked(&fscache_n_op_pend),
49032+ atomic_read_unchecked(&fscache_n_op_run),
49033+ atomic_read_unchecked(&fscache_n_op_enqueue),
49034+ atomic_read_unchecked(&fscache_n_op_cancelled),
49035+ atomic_read_unchecked(&fscache_n_op_rejected));
49036 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
49037- atomic_read(&fscache_n_op_deferred_release),
49038- atomic_read(&fscache_n_op_release),
49039- atomic_read(&fscache_n_op_gc));
49040+ atomic_read_unchecked(&fscache_n_op_deferred_release),
49041+ atomic_read_unchecked(&fscache_n_op_release),
49042+ atomic_read_unchecked(&fscache_n_op_gc));
49043
49044 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
49045 atomic_read(&fscache_n_cop_alloc_object),
49046diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
49047index ee8d550..7189d8c 100644
49048--- a/fs/fuse/cuse.c
49049+++ b/fs/fuse/cuse.c
49050@@ -585,10 +585,12 @@ static int __init cuse_init(void)
49051 INIT_LIST_HEAD(&cuse_conntbl[i]);
49052
49053 /* inherit and extend fuse_dev_operations */
49054- cuse_channel_fops = fuse_dev_operations;
49055- cuse_channel_fops.owner = THIS_MODULE;
49056- cuse_channel_fops.open = cuse_channel_open;
49057- cuse_channel_fops.release = cuse_channel_release;
49058+ pax_open_kernel();
49059+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
49060+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
49061+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
49062+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
49063+ pax_close_kernel();
49064
49065 cuse_class = class_create(THIS_MODULE, "cuse");
49066 if (IS_ERR(cuse_class))
49067diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
49068index 8c23fa7..0e3aac7 100644
49069--- a/fs/fuse/dev.c
49070+++ b/fs/fuse/dev.c
49071@@ -1241,7 +1241,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
49072 ret = 0;
49073 pipe_lock(pipe);
49074
49075- if (!pipe->readers) {
49076+ if (!atomic_read(&pipe->readers)) {
49077 send_sig(SIGPIPE, current, 0);
49078 if (!ret)
49079 ret = -EPIPE;
49080diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
49081index 324bc08..4fdd56e 100644
49082--- a/fs/fuse/dir.c
49083+++ b/fs/fuse/dir.c
49084@@ -1226,7 +1226,7 @@ static char *read_link(struct dentry *dentry)
49085 return link;
49086 }
49087
49088-static void free_link(char *link)
49089+static void free_link(const char *link)
49090 {
49091 if (!IS_ERR(link))
49092 free_page((unsigned long) link);
49093diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
49094index 381893c..3793318 100644
49095--- a/fs/gfs2/inode.c
49096+++ b/fs/gfs2/inode.c
49097@@ -1490,7 +1490,7 @@ out:
49098
49099 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
49100 {
49101- char *s = nd_get_link(nd);
49102+ const char *s = nd_get_link(nd);
49103 if (!IS_ERR(s))
49104 kfree(s);
49105 }
49106diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
49107index c5bc355..163a13e 100644
49108--- a/fs/hugetlbfs/inode.c
49109+++ b/fs/hugetlbfs/inode.c
49110@@ -153,6 +153,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
49111 struct vm_area_struct *vma;
49112 unsigned long start_addr;
49113 struct hstate *h = hstate_file(file);
49114+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
49115
49116 if (len & ~huge_page_mask(h))
49117 return -EINVAL;
49118@@ -165,18 +166,21 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
49119 return addr;
49120 }
49121
49122+#ifdef CONFIG_PAX_RANDMMAP
49123+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
49124+#endif
49125+
49126 if (addr) {
49127 addr = ALIGN(addr, huge_page_size(h));
49128 vma = find_vma(mm, addr);
49129- if (TASK_SIZE - len >= addr &&
49130- (!vma || addr + len <= vma->vm_start))
49131+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
49132 return addr;
49133 }
49134
49135 if (len > mm->cached_hole_size)
49136 start_addr = mm->free_area_cache;
49137 else {
49138- start_addr = TASK_UNMAPPED_BASE;
49139+ start_addr = mm->mmap_base;
49140 mm->cached_hole_size = 0;
49141 }
49142
49143@@ -190,15 +194,15 @@ full_search:
49144 * Start a new search - just in case we missed
49145 * some holes.
49146 */
49147- if (start_addr != TASK_UNMAPPED_BASE) {
49148- start_addr = TASK_UNMAPPED_BASE;
49149+ if (start_addr != mm->mmap_base) {
49150+ start_addr = mm->mmap_base;
49151 mm->cached_hole_size = 0;
49152 goto full_search;
49153 }
49154 return -ENOMEM;
49155 }
49156
49157- if (!vma || addr + len <= vma->vm_start) {
49158+ if (check_heap_stack_gap(vma, addr, len, offset)) {
49159 mm->free_area_cache = addr + len;
49160 return addr;
49161 }
49162@@ -923,7 +927,7 @@ static struct file_system_type hugetlbfs_fs_type = {
49163 .kill_sb = kill_litter_super,
49164 };
49165
49166-static struct vfsmount *hugetlbfs_vfsmount;
49167+struct vfsmount *hugetlbfs_vfsmount;
49168
49169 static int can_do_hugetlb_shm(void)
49170 {
49171diff --git a/fs/inode.c b/fs/inode.c
49172index 64999f1..8fad608 100644
49173--- a/fs/inode.c
49174+++ b/fs/inode.c
49175@@ -880,8 +880,8 @@ unsigned int get_next_ino(void)
49176
49177 #ifdef CONFIG_SMP
49178 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
49179- static atomic_t shared_last_ino;
49180- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
49181+ static atomic_unchecked_t shared_last_ino;
49182+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
49183
49184 res = next - LAST_INO_BATCH;
49185 }
49186diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
49187index 4a6cf28..d3a29d3 100644
49188--- a/fs/jffs2/erase.c
49189+++ b/fs/jffs2/erase.c
49190@@ -452,7 +452,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
49191 struct jffs2_unknown_node marker = {
49192 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
49193 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
49194- .totlen = cpu_to_je32(c->cleanmarker_size)
49195+ .totlen = cpu_to_je32(c->cleanmarker_size),
49196+ .hdr_crc = cpu_to_je32(0)
49197 };
49198
49199 jffs2_prealloc_raw_node_refs(c, jeb, 1);
49200diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
49201index a6597d6..41b30ec 100644
49202--- a/fs/jffs2/wbuf.c
49203+++ b/fs/jffs2/wbuf.c
49204@@ -1023,7 +1023,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
49205 {
49206 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
49207 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
49208- .totlen = constant_cpu_to_je32(8)
49209+ .totlen = constant_cpu_to_je32(8),
49210+ .hdr_crc = constant_cpu_to_je32(0)
49211 };
49212
49213 /*
49214diff --git a/fs/jfs/super.c b/fs/jfs/super.c
49215index 1a543be..d803c40 100644
49216--- a/fs/jfs/super.c
49217+++ b/fs/jfs/super.c
49218@@ -855,7 +855,7 @@ static int __init init_jfs_fs(void)
49219
49220 jfs_inode_cachep =
49221 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
49222- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
49223+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
49224 init_once);
49225 if (jfs_inode_cachep == NULL)
49226 return -ENOMEM;
49227diff --git a/fs/libfs.c b/fs/libfs.c
49228index 7cc37ca..b3e3eec 100644
49229--- a/fs/libfs.c
49230+++ b/fs/libfs.c
49231@@ -165,6 +165,9 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
49232
49233 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
49234 struct dentry *next;
49235+ char d_name[sizeof(next->d_iname)];
49236+ const unsigned char *name;
49237+
49238 next = list_entry(p, struct dentry, d_u.d_child);
49239 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
49240 if (!simple_positive(next)) {
49241@@ -174,7 +177,12 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
49242
49243 spin_unlock(&next->d_lock);
49244 spin_unlock(&dentry->d_lock);
49245- if (filldir(dirent, next->d_name.name,
49246+ name = next->d_name.name;
49247+ if (name == next->d_iname) {
49248+ memcpy(d_name, name, next->d_name.len);
49249+ name = d_name;
49250+ }
49251+ if (filldir(dirent, name,
49252 next->d_name.len, filp->f_pos,
49253 next->d_inode->i_ino,
49254 dt_type(next->d_inode)) < 0)
49255diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
49256index 05d2912..760abfa 100644
49257--- a/fs/lockd/clntproc.c
49258+++ b/fs/lockd/clntproc.c
49259@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
49260 /*
49261 * Cookie counter for NLM requests
49262 */
49263-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
49264+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
49265
49266 void nlmclnt_next_cookie(struct nlm_cookie *c)
49267 {
49268- u32 cookie = atomic_inc_return(&nlm_cookie);
49269+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
49270
49271 memcpy(c->data, &cookie, 4);
49272 c->len=4;
49273diff --git a/fs/locks.c b/fs/locks.c
49274index a94e331..060bce3 100644
49275--- a/fs/locks.c
49276+++ b/fs/locks.c
49277@@ -2064,16 +2064,16 @@ void locks_remove_flock(struct file *filp)
49278 return;
49279
49280 if (filp->f_op && filp->f_op->flock) {
49281- struct file_lock fl = {
49282+ struct file_lock flock = {
49283 .fl_pid = current->tgid,
49284 .fl_file = filp,
49285 .fl_flags = FL_FLOCK,
49286 .fl_type = F_UNLCK,
49287 .fl_end = OFFSET_MAX,
49288 };
49289- filp->f_op->flock(filp, F_SETLKW, &fl);
49290- if (fl.fl_ops && fl.fl_ops->fl_release_private)
49291- fl.fl_ops->fl_release_private(&fl);
49292+ filp->f_op->flock(filp, F_SETLKW, &flock);
49293+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
49294+ flock.fl_ops->fl_release_private(&flock);
49295 }
49296
49297 lock_flocks();
49298diff --git a/fs/namei.c b/fs/namei.c
49299index 5f4cdf3..959a013 100644
49300--- a/fs/namei.c
49301+++ b/fs/namei.c
49302@@ -319,16 +319,32 @@ int generic_permission(struct inode *inode, int mask)
49303 if (ret != -EACCES)
49304 return ret;
49305
49306+#ifdef CONFIG_GRKERNSEC
49307+ /* we'll block if we have to log due to a denied capability use */
49308+ if (mask & MAY_NOT_BLOCK)
49309+ return -ECHILD;
49310+#endif
49311+
49312 if (S_ISDIR(inode->i_mode)) {
49313 /* DACs are overridable for directories */
49314- if (inode_capable(inode, CAP_DAC_OVERRIDE))
49315- return 0;
49316 if (!(mask & MAY_WRITE))
49317- if (inode_capable(inode, CAP_DAC_READ_SEARCH))
49318+ if (inode_capable_nolog(inode, CAP_DAC_OVERRIDE) ||
49319+ inode_capable(inode, CAP_DAC_READ_SEARCH))
49320 return 0;
49321+ if (inode_capable(inode, CAP_DAC_OVERRIDE))
49322+ return 0;
49323 return -EACCES;
49324 }
49325 /*
49326+ * Searching includes executable on directories, else just read.
49327+ */
49328+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
49329+ if (mask == MAY_READ)
49330+ if (inode_capable_nolog(inode, CAP_DAC_OVERRIDE) ||
49331+ inode_capable(inode, CAP_DAC_READ_SEARCH))
49332+ return 0;
49333+
49334+ /*
49335 * Read/write DACs are always overridable.
49336 * Executable DACs are overridable when there is
49337 * at least one exec bit set.
49338@@ -337,14 +353,6 @@ int generic_permission(struct inode *inode, int mask)
49339 if (inode_capable(inode, CAP_DAC_OVERRIDE))
49340 return 0;
49341
49342- /*
49343- * Searching includes executable on directories, else just read.
49344- */
49345- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
49346- if (mask == MAY_READ)
49347- if (inode_capable(inode, CAP_DAC_READ_SEARCH))
49348- return 0;
49349-
49350 return -EACCES;
49351 }
49352
49353@@ -826,7 +834,7 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
49354 {
49355 struct dentry *dentry = link->dentry;
49356 int error;
49357- char *s;
49358+ const char *s;
49359
49360 BUG_ON(nd->flags & LOOKUP_RCU);
49361
49362@@ -847,6 +855,12 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
49363 if (error)
49364 goto out_put_nd_path;
49365
49366+ if (gr_handle_follow_link(dentry->d_parent->d_inode,
49367+ dentry->d_inode, dentry, nd->path.mnt)) {
49368+ error = -EACCES;
49369+ goto out_put_nd_path;
49370+ }
49371+
49372 nd->last_type = LAST_BIND;
49373 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
49374 error = PTR_ERR(*p);
49375@@ -1605,6 +1619,8 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
49376 break;
49377 res = walk_component(nd, path, &nd->last,
49378 nd->last_type, LOOKUP_FOLLOW);
49379+ if (res >= 0 && gr_handle_symlink_owner(&link, nd->inode))
49380+ res = -EACCES;
49381 put_link(nd, &link, cookie);
49382 } while (res > 0);
49383
49384@@ -1703,7 +1719,7 @@ EXPORT_SYMBOL(full_name_hash);
49385 static inline unsigned long hash_name(const char *name, unsigned int *hashp)
49386 {
49387 unsigned long a, b, adata, bdata, mask, hash, len;
49388- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
49389+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
49390
49391 hash = a = 0;
49392 len = -sizeof(unsigned long);
49393@@ -1993,6 +2009,8 @@ static int path_lookupat(int dfd, const char *name,
49394 if (err)
49395 break;
49396 err = lookup_last(nd, &path);
49397+ if (!err && gr_handle_symlink_owner(&link, nd->inode))
49398+ err = -EACCES;
49399 put_link(nd, &link, cookie);
49400 }
49401 }
49402@@ -2000,6 +2018,21 @@ static int path_lookupat(int dfd, const char *name,
49403 if (!err)
49404 err = complete_walk(nd);
49405
49406+ if (!(nd->flags & LOOKUP_PARENT)) {
49407+#ifdef CONFIG_GRKERNSEC
49408+ if (flags & LOOKUP_RCU) {
49409+ if (!err)
49410+ path_put(&nd->path);
49411+ err = -ECHILD;
49412+ } else
49413+#endif
49414+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
49415+ if (!err)
49416+ path_put(&nd->path);
49417+ err = -ENOENT;
49418+ }
49419+ }
49420+
49421 if (!err && nd->flags & LOOKUP_DIRECTORY) {
49422 if (!nd->inode->i_op->lookup) {
49423 path_put(&nd->path);
49424@@ -2027,8 +2060,17 @@ static int filename_lookup(int dfd, struct filename *name,
49425 retval = path_lookupat(dfd, name->name,
49426 flags | LOOKUP_REVAL, nd);
49427
49428- if (likely(!retval))
49429+ if (likely(!retval)) {
49430+ if (name->name[0] != '/' && nd->path.dentry && nd->inode) {
49431+#ifdef CONFIG_GRKERNSEC
49432+ if (flags & LOOKUP_RCU)
49433+ return -ECHILD;
49434+#endif
49435+ if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
49436+ return -ENOENT;
49437+ }
49438 audit_inode(name, nd->path.dentry, flags & LOOKUP_PARENT);
49439+ }
49440 return retval;
49441 }
49442
49443@@ -2402,6 +2444,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
49444 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
49445 return -EPERM;
49446
49447+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
49448+ return -EPERM;
49449+ if (gr_handle_rawio(inode))
49450+ return -EPERM;
49451+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
49452+ return -EACCES;
49453+
49454 return 0;
49455 }
49456
49457@@ -2623,7 +2672,7 @@ looked_up:
49458 * cleared otherwise prior to returning.
49459 */
49460 static int lookup_open(struct nameidata *nd, struct path *path,
49461- struct file *file,
49462+ struct path *link, struct file *file,
49463 const struct open_flags *op,
49464 bool got_write, int *opened)
49465 {
49466@@ -2658,6 +2707,17 @@ static int lookup_open(struct nameidata *nd, struct path *path,
49467 /* Negative dentry, just create the file */
49468 if (!dentry->d_inode && (op->open_flag & O_CREAT)) {
49469 umode_t mode = op->mode;
49470+
49471+ if (link && gr_handle_symlink_owner(link, dir->d_inode)) {
49472+ error = -EACCES;
49473+ goto out_dput;
49474+ }
49475+
49476+ if (!gr_acl_handle_creat(dentry, dir, nd->path.mnt, op->open_flag, op->acc_mode, mode)) {
49477+ error = -EACCES;
49478+ goto out_dput;
49479+ }
49480+
49481 if (!IS_POSIXACL(dir->d_inode))
49482 mode &= ~current_umask();
49483 /*
49484@@ -2679,6 +2739,8 @@ static int lookup_open(struct nameidata *nd, struct path *path,
49485 nd->flags & LOOKUP_EXCL);
49486 if (error)
49487 goto out_dput;
49488+ else
49489+ gr_handle_create(dentry, nd->path.mnt);
49490 }
49491 out_no_open:
49492 path->dentry = dentry;
49493@@ -2693,7 +2755,7 @@ out_dput:
49494 /*
49495 * Handle the last step of open()
49496 */
49497-static int do_last(struct nameidata *nd, struct path *path,
49498+static int do_last(struct nameidata *nd, struct path *path, struct path *link,
49499 struct file *file, const struct open_flags *op,
49500 int *opened, struct filename *name)
49501 {
49502@@ -2722,16 +2784,44 @@ static int do_last(struct nameidata *nd, struct path *path,
49503 error = complete_walk(nd);
49504 if (error)
49505 return error;
49506+#ifdef CONFIG_GRKERNSEC
49507+ if (nd->flags & LOOKUP_RCU) {
49508+ error = -ECHILD;
49509+ goto out;
49510+ }
49511+#endif
49512+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
49513+ error = -ENOENT;
49514+ goto out;
49515+ }
49516 audit_inode(name, nd->path.dentry, 0);
49517 if (open_flag & O_CREAT) {
49518 error = -EISDIR;
49519 goto out;
49520 }
49521+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
49522+ error = -EACCES;
49523+ goto out;
49524+ }
49525 goto finish_open;
49526 case LAST_BIND:
49527 error = complete_walk(nd);
49528 if (error)
49529 return error;
49530+#ifdef CONFIG_GRKERNSEC
49531+ if (nd->flags & LOOKUP_RCU) {
49532+ error = -ECHILD;
49533+ goto out;
49534+ }
49535+#endif
49536+ if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
49537+ error = -ENOENT;
49538+ goto out;
49539+ }
49540+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
49541+ error = -EACCES;
49542+ goto out;
49543+ }
49544 audit_inode(name, dir, 0);
49545 goto finish_open;
49546 }
49547@@ -2780,7 +2870,7 @@ retry_lookup:
49548 */
49549 }
49550 mutex_lock(&dir->d_inode->i_mutex);
49551- error = lookup_open(nd, path, file, op, got_write, opened);
49552+ error = lookup_open(nd, path, link, file, op, got_write, opened);
49553 mutex_unlock(&dir->d_inode->i_mutex);
49554
49555 if (error <= 0) {
49556@@ -2804,11 +2894,28 @@ retry_lookup:
49557 goto finish_open_created;
49558 }
49559
49560+ if (!gr_acl_handle_hidden_file(path->dentry, nd->path.mnt)) {
49561+ error = -ENOENT;
49562+ goto exit_dput;
49563+ }
49564+ if (link && gr_handle_symlink_owner(link, path->dentry->d_inode)) {
49565+ error = -EACCES;
49566+ goto exit_dput;
49567+ }
49568+
49569 /*
49570 * create/update audit record if it already exists.
49571 */
49572- if (path->dentry->d_inode)
49573+ if (path->dentry->d_inode) {
49574+ /* only check if O_CREAT is specified, all other checks need to go
49575+ into may_open */
49576+ if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
49577+ error = -EACCES;
49578+ goto exit_dput;
49579+ }
49580+
49581 audit_inode(name, path->dentry, 0);
49582+ }
49583
49584 /*
49585 * If atomic_open() acquired write access it is dropped now due to
49586@@ -2849,6 +2956,11 @@ finish_lookup:
49587 }
49588 }
49589 BUG_ON(inode != path->dentry->d_inode);
49590+ /* if we're resolving a symlink to another symlink */
49591+ if (link && gr_handle_symlink_owner(link, inode)) {
49592+ error = -EACCES;
49593+ goto out;
49594+ }
49595 return 1;
49596 }
49597
49598@@ -2858,7 +2970,6 @@ finish_lookup:
49599 save_parent.dentry = nd->path.dentry;
49600 save_parent.mnt = mntget(path->mnt);
49601 nd->path.dentry = path->dentry;
49602-
49603 }
49604 nd->inode = inode;
49605 /* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */
49606@@ -2867,6 +2978,22 @@ finish_lookup:
49607 path_put(&save_parent);
49608 return error;
49609 }
49610+
49611+#ifdef CONFIG_GRKERNSEC
49612+ if (nd->flags & LOOKUP_RCU) {
49613+ error = -ECHILD;
49614+ goto out;
49615+ }
49616+#endif
49617+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
49618+ error = -ENOENT;
49619+ goto out;
49620+ }
49621+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
49622+ error = -EACCES;
49623+ goto out;
49624+ }
49625+
49626 error = -EISDIR;
49627 if ((open_flag & O_CREAT) && S_ISDIR(nd->inode->i_mode))
49628 goto out;
49629@@ -2965,7 +3092,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
49630 if (unlikely(error))
49631 goto out;
49632
49633- error = do_last(nd, &path, file, op, &opened, pathname);
49634+ error = do_last(nd, &path, NULL, file, op, &opened, pathname);
49635 while (unlikely(error > 0)) { /* trailing symlink */
49636 struct path link = path;
49637 void *cookie;
49638@@ -2983,7 +3110,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
49639 error = follow_link(&link, nd, &cookie);
49640 if (unlikely(error))
49641 break;
49642- error = do_last(nd, &path, file, op, &opened, pathname);
49643+ error = do_last(nd, &path, &link, file, op, &opened, pathname);
49644 put_link(nd, &link, cookie);
49645 }
49646 out:
49647@@ -3073,8 +3200,12 @@ struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path
49648 goto unlock;
49649
49650 error = -EEXIST;
49651- if (dentry->d_inode)
49652+ if (dentry->d_inode) {
49653+ if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt)) {
49654+ error = -ENOENT;
49655+ }
49656 goto fail;
49657+ }
49658 /*
49659 * Special case - lookup gave negative, but... we had foo/bar/
49660 * From the vfs_mknod() POV we just have a negative dentry -
49661@@ -3125,6 +3256,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname, struct pat
49662 }
49663 EXPORT_SYMBOL(user_path_create);
49664
49665+static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, struct filename **to, int is_dir)
49666+{
49667+ struct filename *tmp = getname(pathname);
49668+ struct dentry *res;
49669+ if (IS_ERR(tmp))
49670+ return ERR_CAST(tmp);
49671+ res = kern_path_create(dfd, tmp->name, path, is_dir);
49672+ if (IS_ERR(res))
49673+ putname(tmp);
49674+ else
49675+ *to = tmp;
49676+ return res;
49677+}
49678+
49679 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
49680 {
49681 int error = may_create(dir, dentry);
49682@@ -3186,6 +3331,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode,
49683
49684 if (!IS_POSIXACL(path.dentry->d_inode))
49685 mode &= ~current_umask();
49686+
49687+ if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
49688+ error = -EPERM;
49689+ goto out;
49690+ }
49691+
49692+ if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
49693+ error = -EACCES;
49694+ goto out;
49695+ }
49696+
49697 error = security_path_mknod(&path, dentry, mode, dev);
49698 if (error)
49699 goto out;
49700@@ -3202,6 +3358,8 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode,
49701 break;
49702 }
49703 out:
49704+ if (!error)
49705+ gr_handle_create(dentry, path.mnt);
49706 done_path_create(&path, dentry);
49707 return error;
49708 }
49709@@ -3248,9 +3406,18 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, umode_t, mode)
49710
49711 if (!IS_POSIXACL(path.dentry->d_inode))
49712 mode &= ~current_umask();
49713+
49714+ if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
49715+ error = -EACCES;
49716+ goto out;
49717+ }
49718+
49719 error = security_path_mkdir(&path, dentry, mode);
49720 if (!error)
49721 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
49722+ if (!error)
49723+ gr_handle_create(dentry, path.mnt);
49724+out:
49725 done_path_create(&path, dentry);
49726 return error;
49727 }
49728@@ -3327,6 +3494,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
49729 struct filename *name;
49730 struct dentry *dentry;
49731 struct nameidata nd;
49732+ ino_t saved_ino = 0;
49733+ dev_t saved_dev = 0;
49734
49735 name = user_path_parent(dfd, pathname, &nd);
49736 if (IS_ERR(name))
49737@@ -3358,10 +3527,21 @@ static long do_rmdir(int dfd, const char __user *pathname)
49738 error = -ENOENT;
49739 goto exit3;
49740 }
49741+
49742+ saved_ino = dentry->d_inode->i_ino;
49743+ saved_dev = gr_get_dev_from_dentry(dentry);
49744+
49745+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
49746+ error = -EACCES;
49747+ goto exit3;
49748+ }
49749+
49750 error = security_path_rmdir(&nd.path, dentry);
49751 if (error)
49752 goto exit3;
49753 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
49754+ if (!error && (saved_dev || saved_ino))
49755+ gr_handle_delete(saved_ino, saved_dev);
49756 exit3:
49757 dput(dentry);
49758 exit2:
49759@@ -3423,6 +3603,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
49760 struct dentry *dentry;
49761 struct nameidata nd;
49762 struct inode *inode = NULL;
49763+ ino_t saved_ino = 0;
49764+ dev_t saved_dev = 0;
49765
49766 name = user_path_parent(dfd, pathname, &nd);
49767 if (IS_ERR(name))
49768@@ -3448,10 +3630,22 @@ static long do_unlinkat(int dfd, const char __user *pathname)
49769 if (!inode)
49770 goto slashes;
49771 ihold(inode);
49772+
49773+ if (inode->i_nlink <= 1) {
49774+ saved_ino = inode->i_ino;
49775+ saved_dev = gr_get_dev_from_dentry(dentry);
49776+ }
49777+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
49778+ error = -EACCES;
49779+ goto exit2;
49780+ }
49781+
49782 error = security_path_unlink(&nd.path, dentry);
49783 if (error)
49784 goto exit2;
49785 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
49786+ if (!error && (saved_ino || saved_dev))
49787+ gr_handle_delete(saved_ino, saved_dev);
49788 exit2:
49789 dput(dentry);
49790 }
49791@@ -3523,9 +3717,17 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
49792 if (IS_ERR(dentry))
49793 goto out_putname;
49794
49795+ if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
49796+ error = -EACCES;
49797+ goto out;
49798+ }
49799+
49800 error = security_path_symlink(&path, dentry, from->name);
49801 if (!error)
49802 error = vfs_symlink(path.dentry->d_inode, dentry, from->name);
49803+ if (!error)
49804+ gr_handle_create(dentry, path.mnt);
49805+out:
49806 done_path_create(&path, dentry);
49807 out_putname:
49808 putname(from);
49809@@ -3595,6 +3797,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
49810 {
49811 struct dentry *new_dentry;
49812 struct path old_path, new_path;
49813+ struct filename *to = NULL;
49814 int how = 0;
49815 int error;
49816
49817@@ -3618,7 +3821,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
49818 if (error)
49819 return error;
49820
49821- new_dentry = user_path_create(newdfd, newname, &new_path, 0);
49822+ new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to, 0);
49823 error = PTR_ERR(new_dentry);
49824 if (IS_ERR(new_dentry))
49825 goto out;
49826@@ -3629,11 +3832,28 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
49827 error = may_linkat(&old_path);
49828 if (unlikely(error))
49829 goto out_dput;
49830+
49831+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
49832+ old_path.dentry->d_inode,
49833+ old_path.dentry->d_inode->i_mode, to)) {
49834+ error = -EACCES;
49835+ goto out_dput;
49836+ }
49837+
49838+ if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
49839+ old_path.dentry, old_path.mnt, to)) {
49840+ error = -EACCES;
49841+ goto out_dput;
49842+ }
49843+
49844 error = security_path_link(old_path.dentry, &new_path, new_dentry);
49845 if (error)
49846 goto out_dput;
49847 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry);
49848+ if (!error)
49849+ gr_handle_create(new_dentry, new_path.mnt);
49850 out_dput:
49851+ putname(to);
49852 done_path_create(&new_path, new_dentry);
49853 out:
49854 path_put(&old_path);
49855@@ -3873,12 +4093,21 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
49856 if (new_dentry == trap)
49857 goto exit5;
49858
49859+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
49860+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
49861+ to);
49862+ if (error)
49863+ goto exit5;
49864+
49865 error = security_path_rename(&oldnd.path, old_dentry,
49866 &newnd.path, new_dentry);
49867 if (error)
49868 goto exit5;
49869 error = vfs_rename(old_dir->d_inode, old_dentry,
49870 new_dir->d_inode, new_dentry);
49871+ if (!error)
49872+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
49873+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
49874 exit5:
49875 dput(new_dentry);
49876 exit4:
49877@@ -3903,6 +4132,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
49878
49879 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
49880 {
49881+ char tmpbuf[64];
49882+ const char *newlink;
49883 int len;
49884
49885 len = PTR_ERR(link);
49886@@ -3912,7 +4143,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
49887 len = strlen(link);
49888 if (len > (unsigned) buflen)
49889 len = buflen;
49890- if (copy_to_user(buffer, link, len))
49891+
49892+ if (len < sizeof(tmpbuf)) {
49893+ memcpy(tmpbuf, link, len);
49894+ newlink = tmpbuf;
49895+ } else
49896+ newlink = link;
49897+
49898+ if (copy_to_user(buffer, newlink, len))
49899 len = -EFAULT;
49900 out:
49901 return len;
49902diff --git a/fs/namespace.c b/fs/namespace.c
49903index 2496062..e26f6d6 100644
49904--- a/fs/namespace.c
49905+++ b/fs/namespace.c
49906@@ -1212,6 +1212,9 @@ static int do_umount(struct mount *mnt, int flags)
49907 if (!(sb->s_flags & MS_RDONLY))
49908 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
49909 up_write(&sb->s_umount);
49910+
49911+ gr_log_remount(mnt->mnt_devname, retval);
49912+
49913 return retval;
49914 }
49915
49916@@ -1231,6 +1234,9 @@ static int do_umount(struct mount *mnt, int flags)
49917 br_write_unlock(&vfsmount_lock);
49918 up_write(&namespace_sem);
49919 release_mounts(&umount_list);
49920+
49921+ gr_log_unmount(mnt->mnt_devname, retval);
49922+
49923 return retval;
49924 }
49925
49926@@ -2244,6 +2250,16 @@ long do_mount(const char *dev_name, const char *dir_name,
49927 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
49928 MS_STRICTATIME);
49929
49930+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
49931+ retval = -EPERM;
49932+ goto dput_out;
49933+ }
49934+
49935+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
49936+ retval = -EPERM;
49937+ goto dput_out;
49938+ }
49939+
49940 if (flags & MS_REMOUNT)
49941 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
49942 data_page);
49943@@ -2258,6 +2274,9 @@ long do_mount(const char *dev_name, const char *dir_name,
49944 dev_name, data_page);
49945 dput_out:
49946 path_put(&path);
49947+
49948+ gr_log_mount(dev_name, dir_name, retval);
49949+
49950 return retval;
49951 }
49952
49953@@ -2516,6 +2535,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
49954 if (error)
49955 goto out2;
49956
49957+ if (gr_handle_chroot_pivot()) {
49958+ error = -EPERM;
49959+ goto out2;
49960+ }
49961+
49962 get_fs_root(current->fs, &root);
49963 error = lock_mount(&old);
49964 if (error)
49965diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
49966index 6fa01ae..2790820 100644
49967--- a/fs/nfs/inode.c
49968+++ b/fs/nfs/inode.c
49969@@ -1029,16 +1029,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
49970 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
49971 }
49972
49973-static atomic_long_t nfs_attr_generation_counter;
49974+static atomic_long_unchecked_t nfs_attr_generation_counter;
49975
49976 static unsigned long nfs_read_attr_generation_counter(void)
49977 {
49978- return atomic_long_read(&nfs_attr_generation_counter);
49979+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
49980 }
49981
49982 unsigned long nfs_inc_attr_generation_counter(void)
49983 {
49984- return atomic_long_inc_return(&nfs_attr_generation_counter);
49985+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
49986 }
49987
49988 void nfs_fattr_init(struct nfs_fattr *fattr)
49989diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
49990index f59169e..fd7d359 100644
49991--- a/fs/nfsd/vfs.c
49992+++ b/fs/nfsd/vfs.c
49993@@ -941,7 +941,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
49994 } else {
49995 oldfs = get_fs();
49996 set_fs(KERNEL_DS);
49997- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
49998+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
49999 set_fs(oldfs);
50000 }
50001
50002@@ -1045,7 +1045,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
50003
50004 /* Write the data. */
50005 oldfs = get_fs(); set_fs(KERNEL_DS);
50006- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
50007+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
50008 set_fs(oldfs);
50009 if (host_err < 0)
50010 goto out_nfserr;
50011@@ -1587,7 +1587,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
50012 */
50013
50014 oldfs = get_fs(); set_fs(KERNEL_DS);
50015- host_err = inode->i_op->readlink(path.dentry, (char __user *)buf, *lenp);
50016+ host_err = inode->i_op->readlink(path.dentry, (char __force_user *)buf, *lenp);
50017 set_fs(oldfs);
50018
50019 if (host_err < 0)
50020diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
50021index 6fcaeb8..9d16d04 100644
50022--- a/fs/notify/fanotify/fanotify_user.c
50023+++ b/fs/notify/fanotify/fanotify_user.c
50024@@ -250,8 +250,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
50025
50026 fd = fanotify_event_metadata.fd;
50027 ret = -EFAULT;
50028- if (copy_to_user(buf, &fanotify_event_metadata,
50029- fanotify_event_metadata.event_len))
50030+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
50031+ copy_to_user(buf, &fanotify_event_metadata, fanotify_event_metadata.event_len))
50032 goto out_close_fd;
50033
50034 ret = prepare_for_access_response(group, event, fd);
50035diff --git a/fs/notify/notification.c b/fs/notify/notification.c
50036index c887b13..0fdf472 100644
50037--- a/fs/notify/notification.c
50038+++ b/fs/notify/notification.c
50039@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
50040 * get set to 0 so it will never get 'freed'
50041 */
50042 static struct fsnotify_event *q_overflow_event;
50043-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
50044+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
50045
50046 /**
50047 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
50048@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
50049 */
50050 u32 fsnotify_get_cookie(void)
50051 {
50052- return atomic_inc_return(&fsnotify_sync_cookie);
50053+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
50054 }
50055 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
50056
50057diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
50058index 99e3610..02c1068 100644
50059--- a/fs/ntfs/dir.c
50060+++ b/fs/ntfs/dir.c
50061@@ -1329,7 +1329,7 @@ find_next_index_buffer:
50062 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
50063 ~(s64)(ndir->itype.index.block_size - 1)));
50064 /* Bounds checks. */
50065- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
50066+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
50067 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
50068 "inode 0x%lx or driver bug.", vdir->i_ino);
50069 goto err_out;
50070diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
50071index 1ecf464..e1ff8bf 100644
50072--- a/fs/ntfs/file.c
50073+++ b/fs/ntfs/file.c
50074@@ -2232,6 +2232,6 @@ const struct inode_operations ntfs_file_inode_ops = {
50075 #endif /* NTFS_RW */
50076 };
50077
50078-const struct file_operations ntfs_empty_file_ops = {};
50079+const struct file_operations ntfs_empty_file_ops __read_only;
50080
50081-const struct inode_operations ntfs_empty_inode_ops = {};
50082+const struct inode_operations ntfs_empty_inode_ops __read_only;
50083diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
50084index a9f78c7..ed8a381 100644
50085--- a/fs/ocfs2/localalloc.c
50086+++ b/fs/ocfs2/localalloc.c
50087@@ -1279,7 +1279,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
50088 goto bail;
50089 }
50090
50091- atomic_inc(&osb->alloc_stats.moves);
50092+ atomic_inc_unchecked(&osb->alloc_stats.moves);
50093
50094 bail:
50095 if (handle)
50096diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
50097index d355e6e..578d905 100644
50098--- a/fs/ocfs2/ocfs2.h
50099+++ b/fs/ocfs2/ocfs2.h
50100@@ -235,11 +235,11 @@ enum ocfs2_vol_state
50101
50102 struct ocfs2_alloc_stats
50103 {
50104- atomic_t moves;
50105- atomic_t local_data;
50106- atomic_t bitmap_data;
50107- atomic_t bg_allocs;
50108- atomic_t bg_extends;
50109+ atomic_unchecked_t moves;
50110+ atomic_unchecked_t local_data;
50111+ atomic_unchecked_t bitmap_data;
50112+ atomic_unchecked_t bg_allocs;
50113+ atomic_unchecked_t bg_extends;
50114 };
50115
50116 enum ocfs2_local_alloc_state
50117diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
50118index f169da4..9112253 100644
50119--- a/fs/ocfs2/suballoc.c
50120+++ b/fs/ocfs2/suballoc.c
50121@@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
50122 mlog_errno(status);
50123 goto bail;
50124 }
50125- atomic_inc(&osb->alloc_stats.bg_extends);
50126+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
50127
50128 /* You should never ask for this much metadata */
50129 BUG_ON(bits_wanted >
50130@@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handle,
50131 mlog_errno(status);
50132 goto bail;
50133 }
50134- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
50135+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
50136
50137 *suballoc_loc = res.sr_bg_blkno;
50138 *suballoc_bit_start = res.sr_bit_offset;
50139@@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
50140 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
50141 res->sr_bits);
50142
50143- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
50144+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
50145
50146 BUG_ON(res->sr_bits != 1);
50147
50148@@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
50149 mlog_errno(status);
50150 goto bail;
50151 }
50152- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
50153+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
50154
50155 BUG_ON(res.sr_bits != 1);
50156
50157@@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
50158 cluster_start,
50159 num_clusters);
50160 if (!status)
50161- atomic_inc(&osb->alloc_stats.local_data);
50162+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
50163 } else {
50164 if (min_clusters > (osb->bitmap_cpg - 1)) {
50165 /* The only paths asking for contiguousness
50166@@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
50167 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
50168 res.sr_bg_blkno,
50169 res.sr_bit_offset);
50170- atomic_inc(&osb->alloc_stats.bitmap_data);
50171+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
50172 *num_clusters = res.sr_bits;
50173 }
50174 }
50175diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
50176index 0e91ec2..f4b3fc6 100644
50177--- a/fs/ocfs2/super.c
50178+++ b/fs/ocfs2/super.c
50179@@ -301,11 +301,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
50180 "%10s => GlobalAllocs: %d LocalAllocs: %d "
50181 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
50182 "Stats",
50183- atomic_read(&osb->alloc_stats.bitmap_data),
50184- atomic_read(&osb->alloc_stats.local_data),
50185- atomic_read(&osb->alloc_stats.bg_allocs),
50186- atomic_read(&osb->alloc_stats.moves),
50187- atomic_read(&osb->alloc_stats.bg_extends));
50188+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
50189+ atomic_read_unchecked(&osb->alloc_stats.local_data),
50190+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
50191+ atomic_read_unchecked(&osb->alloc_stats.moves),
50192+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
50193
50194 out += snprintf(buf + out, len - out,
50195 "%10s => State: %u Descriptor: %llu Size: %u bits "
50196@@ -2121,11 +2121,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
50197 spin_lock_init(&osb->osb_xattr_lock);
50198 ocfs2_init_steal_slots(osb);
50199
50200- atomic_set(&osb->alloc_stats.moves, 0);
50201- atomic_set(&osb->alloc_stats.local_data, 0);
50202- atomic_set(&osb->alloc_stats.bitmap_data, 0);
50203- atomic_set(&osb->alloc_stats.bg_allocs, 0);
50204- atomic_set(&osb->alloc_stats.bg_extends, 0);
50205+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
50206+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
50207+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
50208+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
50209+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
50210
50211 /* Copy the blockcheck stats from the superblock probe */
50212 osb->osb_ecc_stats = *stats;
50213diff --git a/fs/open.c b/fs/open.c
50214index 59071f5..c6229a0 100644
50215--- a/fs/open.c
50216+++ b/fs/open.c
50217@@ -31,6 +31,8 @@
50218 #include <linux/ima.h>
50219 #include <linux/dnotify.h>
50220
50221+#define CREATE_TRACE_POINTS
50222+#include <trace/events/fs.h>
50223 #include "internal.h"
50224
50225 int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
50226@@ -112,6 +114,10 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
50227 error = locks_verify_truncate(inode, NULL, length);
50228 if (!error)
50229 error = security_path_truncate(&path);
50230+
50231+ if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
50232+ error = -EACCES;
50233+
50234 if (!error)
50235 error = do_truncate(path.dentry, length, 0, NULL);
50236
50237@@ -362,6 +368,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
50238 if (__mnt_is_readonly(path.mnt))
50239 res = -EROFS;
50240
50241+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
50242+ res = -EACCES;
50243+
50244 out_path_release:
50245 path_put(&path);
50246 out:
50247@@ -388,6 +397,8 @@ SYSCALL_DEFINE1(chdir, const char __user *, filename)
50248 if (error)
50249 goto dput_and_out;
50250
50251+ gr_log_chdir(path.dentry, path.mnt);
50252+
50253 set_fs_pwd(current->fs, &path);
50254
50255 dput_and_out:
50256@@ -413,6 +424,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
50257 goto out_putf;
50258
50259 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
50260+
50261+ if (!error && !gr_chroot_fchdir(f.file->f_path.dentry, f.file->f_path.mnt))
50262+ error = -EPERM;
50263+
50264+ if (!error)
50265+ gr_log_chdir(f.file->f_path.dentry, f.file->f_path.mnt);
50266+
50267 if (!error)
50268 set_fs_pwd(current->fs, &f.file->f_path);
50269 out_putf:
50270@@ -441,7 +459,13 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename)
50271 if (error)
50272 goto dput_and_out;
50273
50274+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
50275+ goto dput_and_out;
50276+
50277 set_fs_root(current->fs, &path);
50278+
50279+ gr_handle_chroot_chdir(&path);
50280+
50281 error = 0;
50282 dput_and_out:
50283 path_put(&path);
50284@@ -459,6 +483,16 @@ static int chmod_common(struct path *path, umode_t mode)
50285 if (error)
50286 return error;
50287 mutex_lock(&inode->i_mutex);
50288+
50289+ if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
50290+ error = -EACCES;
50291+ goto out_unlock;
50292+ }
50293+ if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
50294+ error = -EACCES;
50295+ goto out_unlock;
50296+ }
50297+
50298 error = security_path_chmod(path, mode);
50299 if (error)
50300 goto out_unlock;
50301@@ -514,6 +548,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
50302 uid = make_kuid(current_user_ns(), user);
50303 gid = make_kgid(current_user_ns(), group);
50304
50305+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
50306+ return -EACCES;
50307+
50308 newattrs.ia_valid = ATTR_CTIME;
50309 if (user != (uid_t) -1) {
50310 if (!uid_valid(uid))
50311@@ -925,6 +962,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
50312 } else {
50313 fsnotify_open(f);
50314 fd_install(fd, f);
50315+ trace_do_sys_open(tmp->name, flags, mode);
50316 }
50317 }
50318 putname(tmp);
50319diff --git a/fs/pipe.c b/fs/pipe.c
50320index bd3479d..fb92c4d 100644
50321--- a/fs/pipe.c
50322+++ b/fs/pipe.c
50323@@ -438,9 +438,9 @@ redo:
50324 }
50325 if (bufs) /* More to do? */
50326 continue;
50327- if (!pipe->writers)
50328+ if (!atomic_read(&pipe->writers))
50329 break;
50330- if (!pipe->waiting_writers) {
50331+ if (!atomic_read(&pipe->waiting_writers)) {
50332 /* syscall merging: Usually we must not sleep
50333 * if O_NONBLOCK is set, or if we got some data.
50334 * But if a writer sleeps in kernel space, then
50335@@ -504,7 +504,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
50336 mutex_lock(&inode->i_mutex);
50337 pipe = inode->i_pipe;
50338
50339- if (!pipe->readers) {
50340+ if (!atomic_read(&pipe->readers)) {
50341 send_sig(SIGPIPE, current, 0);
50342 ret = -EPIPE;
50343 goto out;
50344@@ -553,7 +553,7 @@ redo1:
50345 for (;;) {
50346 int bufs;
50347
50348- if (!pipe->readers) {
50349+ if (!atomic_read(&pipe->readers)) {
50350 send_sig(SIGPIPE, current, 0);
50351 if (!ret)
50352 ret = -EPIPE;
50353@@ -644,9 +644,9 @@ redo2:
50354 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
50355 do_wakeup = 0;
50356 }
50357- pipe->waiting_writers++;
50358+ atomic_inc(&pipe->waiting_writers);
50359 pipe_wait(pipe);
50360- pipe->waiting_writers--;
50361+ atomic_dec(&pipe->waiting_writers);
50362 }
50363 out:
50364 mutex_unlock(&inode->i_mutex);
50365@@ -716,7 +716,7 @@ pipe_poll(struct file *filp, poll_table *wait)
50366 mask = 0;
50367 if (filp->f_mode & FMODE_READ) {
50368 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
50369- if (!pipe->writers && filp->f_version != pipe->w_counter)
50370+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
50371 mask |= POLLHUP;
50372 }
50373
50374@@ -726,7 +726,7 @@ pipe_poll(struct file *filp, poll_table *wait)
50375 * Most Unices do not set POLLERR for FIFOs but on Linux they
50376 * behave exactly like pipes for poll().
50377 */
50378- if (!pipe->readers)
50379+ if (!atomic_read(&pipe->readers))
50380 mask |= POLLERR;
50381 }
50382
50383@@ -740,10 +740,10 @@ pipe_release(struct inode *inode, int decr, int decw)
50384
50385 mutex_lock(&inode->i_mutex);
50386 pipe = inode->i_pipe;
50387- pipe->readers -= decr;
50388- pipe->writers -= decw;
50389+ atomic_sub(decr, &pipe->readers);
50390+ atomic_sub(decw, &pipe->writers);
50391
50392- if (!pipe->readers && !pipe->writers) {
50393+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
50394 free_pipe_info(inode);
50395 } else {
50396 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
50397@@ -833,7 +833,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
50398
50399 if (inode->i_pipe) {
50400 ret = 0;
50401- inode->i_pipe->readers++;
50402+ atomic_inc(&inode->i_pipe->readers);
50403 }
50404
50405 mutex_unlock(&inode->i_mutex);
50406@@ -850,7 +850,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
50407
50408 if (inode->i_pipe) {
50409 ret = 0;
50410- inode->i_pipe->writers++;
50411+ atomic_inc(&inode->i_pipe->writers);
50412 }
50413
50414 mutex_unlock(&inode->i_mutex);
50415@@ -868,9 +868,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
50416 if (inode->i_pipe) {
50417 ret = 0;
50418 if (filp->f_mode & FMODE_READ)
50419- inode->i_pipe->readers++;
50420+ atomic_inc(&inode->i_pipe->readers);
50421 if (filp->f_mode & FMODE_WRITE)
50422- inode->i_pipe->writers++;
50423+ atomic_inc(&inode->i_pipe->writers);
50424 }
50425
50426 mutex_unlock(&inode->i_mutex);
50427@@ -962,7 +962,7 @@ void free_pipe_info(struct inode *inode)
50428 inode->i_pipe = NULL;
50429 }
50430
50431-static struct vfsmount *pipe_mnt __read_mostly;
50432+struct vfsmount *pipe_mnt __read_mostly;
50433
50434 /*
50435 * pipefs_dname() is called from d_path().
50436@@ -992,7 +992,8 @@ static struct inode * get_pipe_inode(void)
50437 goto fail_iput;
50438 inode->i_pipe = pipe;
50439
50440- pipe->readers = pipe->writers = 1;
50441+ atomic_set(&pipe->readers, 1);
50442+ atomic_set(&pipe->writers, 1);
50443 inode->i_fop = &rdwr_pipefifo_fops;
50444
50445 /*
50446diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
50447index 15af622..0e9f4467 100644
50448--- a/fs/proc/Kconfig
50449+++ b/fs/proc/Kconfig
50450@@ -30,12 +30,12 @@ config PROC_FS
50451
50452 config PROC_KCORE
50453 bool "/proc/kcore support" if !ARM
50454- depends on PROC_FS && MMU
50455+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
50456
50457 config PROC_VMCORE
50458 bool "/proc/vmcore support"
50459- depends on PROC_FS && CRASH_DUMP
50460- default y
50461+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
50462+ default n
50463 help
50464 Exports the dump image of crashed kernel in ELF format.
50465
50466@@ -59,8 +59,8 @@ config PROC_SYSCTL
50467 limited in memory.
50468
50469 config PROC_PAGE_MONITOR
50470- default y
50471- depends on PROC_FS && MMU
50472+ default n
50473+ depends on PROC_FS && MMU && !GRKERNSEC
50474 bool "Enable /proc page monitoring" if EXPERT
50475 help
50476 Various /proc files exist to monitor process memory utilization:
50477diff --git a/fs/proc/array.c b/fs/proc/array.c
50478index bd31e02..15cae71 100644
50479--- a/fs/proc/array.c
50480+++ b/fs/proc/array.c
50481@@ -60,6 +60,7 @@
50482 #include <linux/tty.h>
50483 #include <linux/string.h>
50484 #include <linux/mman.h>
50485+#include <linux/grsecurity.h>
50486 #include <linux/proc_fs.h>
50487 #include <linux/ioport.h>
50488 #include <linux/uaccess.h>
50489@@ -346,6 +347,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
50490 seq_putc(m, '\n');
50491 }
50492
50493+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
50494+static inline void task_pax(struct seq_file *m, struct task_struct *p)
50495+{
50496+ if (p->mm)
50497+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
50498+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
50499+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
50500+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
50501+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
50502+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
50503+ else
50504+ seq_printf(m, "PaX:\t-----\n");
50505+}
50506+#endif
50507+
50508 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
50509 struct pid *pid, struct task_struct *task)
50510 {
50511@@ -363,9 +379,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
50512 task_cpus_allowed(m, task);
50513 cpuset_task_status_allowed(m, task);
50514 task_context_switch_counts(m, task);
50515+
50516+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
50517+ task_pax(m, task);
50518+#endif
50519+
50520+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
50521+ task_grsec_rbac(m, task);
50522+#endif
50523+
50524 return 0;
50525 }
50526
50527+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
50528+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
50529+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
50530+ _mm->pax_flags & MF_PAX_SEGMEXEC))
50531+#endif
50532+
50533 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
50534 struct pid *pid, struct task_struct *task, int whole)
50535 {
50536@@ -387,6 +418,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
50537 char tcomm[sizeof(task->comm)];
50538 unsigned long flags;
50539
50540+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
50541+ if (current->exec_id != m->exec_id) {
50542+ gr_log_badprocpid("stat");
50543+ return 0;
50544+ }
50545+#endif
50546+
50547 state = *get_task_state(task);
50548 vsize = eip = esp = 0;
50549 permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
50550@@ -458,6 +496,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
50551 gtime = task->gtime;
50552 }
50553
50554+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
50555+ if (PAX_RAND_FLAGS(mm)) {
50556+ eip = 0;
50557+ esp = 0;
50558+ wchan = 0;
50559+ }
50560+#endif
50561+#ifdef CONFIG_GRKERNSEC_HIDESYM
50562+ wchan = 0;
50563+ eip =0;
50564+ esp =0;
50565+#endif
50566+
50567 /* scale priority and nice values from timeslices to -20..20 */
50568 /* to make it look like a "normal" Unix priority/nice value */
50569 priority = task_prio(task);
50570@@ -494,9 +545,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
50571 seq_put_decimal_ull(m, ' ', vsize);
50572 seq_put_decimal_ull(m, ' ', mm ? get_mm_rss(mm) : 0);
50573 seq_put_decimal_ull(m, ' ', rsslim);
50574+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
50575+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0));
50576+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0));
50577+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0));
50578+#else
50579 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0);
50580 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0);
50581 seq_put_decimal_ull(m, ' ', (permitted && mm) ? mm->start_stack : 0);
50582+#endif
50583 seq_put_decimal_ull(m, ' ', esp);
50584 seq_put_decimal_ull(m, ' ', eip);
50585 /* The signal information here is obsolete.
50586@@ -518,7 +575,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
50587 seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime));
50588 seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime));
50589
50590- if (mm && permitted) {
50591+ if (mm && permitted
50592+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
50593+ && !PAX_RAND_FLAGS(mm)
50594+#endif
50595+ ) {
50596 seq_put_decimal_ull(m, ' ', mm->start_data);
50597 seq_put_decimal_ull(m, ' ', mm->end_data);
50598 seq_put_decimal_ull(m, ' ', mm->start_brk);
50599@@ -556,8 +617,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
50600 struct pid *pid, struct task_struct *task)
50601 {
50602 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
50603- struct mm_struct *mm = get_task_mm(task);
50604+ struct mm_struct *mm;
50605
50606+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
50607+ if (current->exec_id != m->exec_id) {
50608+ gr_log_badprocpid("statm");
50609+ return 0;
50610+ }
50611+#endif
50612+ mm = get_task_mm(task);
50613 if (mm) {
50614 size = task_statm(mm, &shared, &text, &data, &resident);
50615 mmput(mm);
50616@@ -580,6 +648,13 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
50617 return 0;
50618 }
50619
50620+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
50621+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
50622+{
50623+ return sprintf(buffer, "%pI4\n", &task->signal->curr_ip);
50624+}
50625+#endif
50626+
50627 #ifdef CONFIG_CHECKPOINT_RESTORE
50628 static struct pid *
50629 get_children_pid(struct inode *inode, struct pid *pid_prev, loff_t pos)
50630diff --git a/fs/proc/base.c b/fs/proc/base.c
50631index 9e28356..c485b3c 100644
50632--- a/fs/proc/base.c
50633+++ b/fs/proc/base.c
50634@@ -111,6 +111,14 @@ struct pid_entry {
50635 union proc_op op;
50636 };
50637
50638+struct getdents_callback {
50639+ struct linux_dirent __user * current_dir;
50640+ struct linux_dirent __user * previous;
50641+ struct file * file;
50642+ int count;
50643+ int error;
50644+};
50645+
50646 #define NOD(NAME, MODE, IOP, FOP, OP) { \
50647 .name = (NAME), \
50648 .len = sizeof(NAME) - 1, \
50649@@ -208,6 +216,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
50650 if (!mm->arg_end)
50651 goto out_mm; /* Shh! No looking before we're done */
50652
50653+ if (gr_acl_handle_procpidmem(task))
50654+ goto out_mm;
50655+
50656 len = mm->arg_end - mm->arg_start;
50657
50658 if (len > PAGE_SIZE)
50659@@ -235,12 +246,28 @@ out:
50660 return res;
50661 }
50662
50663+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
50664+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
50665+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
50666+ _mm->pax_flags & MF_PAX_SEGMEXEC))
50667+#endif
50668+
50669 static int proc_pid_auxv(struct task_struct *task, char *buffer)
50670 {
50671 struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ);
50672 int res = PTR_ERR(mm);
50673 if (mm && !IS_ERR(mm)) {
50674 unsigned int nwords = 0;
50675+
50676+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
50677+ /* allow if we're currently ptracing this task */
50678+ if (PAX_RAND_FLAGS(mm) &&
50679+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
50680+ mmput(mm);
50681+ return 0;
50682+ }
50683+#endif
50684+
50685 do {
50686 nwords += 2;
50687 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
50688@@ -254,7 +281,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
50689 }
50690
50691
50692-#ifdef CONFIG_KALLSYMS
50693+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
50694 /*
50695 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
50696 * Returns the resolved symbol. If that fails, simply return the address.
50697@@ -293,7 +320,7 @@ static void unlock_trace(struct task_struct *task)
50698 mutex_unlock(&task->signal->cred_guard_mutex);
50699 }
50700
50701-#ifdef CONFIG_STACKTRACE
50702+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
50703
50704 #define MAX_STACK_TRACE_DEPTH 64
50705
50706@@ -485,7 +512,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
50707 return count;
50708 }
50709
50710-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
50711+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
50712 static int proc_pid_syscall(struct task_struct *task, char *buffer)
50713 {
50714 long nr;
50715@@ -514,7 +541,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
50716 /************************************************************************/
50717
50718 /* permission checks */
50719-static int proc_fd_access_allowed(struct inode *inode)
50720+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
50721 {
50722 struct task_struct *task;
50723 int allowed = 0;
50724@@ -524,7 +551,10 @@ static int proc_fd_access_allowed(struct inode *inode)
50725 */
50726 task = get_proc_task(inode);
50727 if (task) {
50728- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
50729+ if (log)
50730+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
50731+ else
50732+ allowed = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
50733 put_task_struct(task);
50734 }
50735 return allowed;
50736@@ -562,10 +592,35 @@ static bool has_pid_permissions(struct pid_namespace *pid,
50737 struct task_struct *task,
50738 int hide_pid_min)
50739 {
50740+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
50741+ return false;
50742+
50743+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
50744+ rcu_read_lock();
50745+ {
50746+ const struct cred *tmpcred = current_cred();
50747+ const struct cred *cred = __task_cred(task);
50748+
50749+ if (!tmpcred->uid || (tmpcred->uid == cred->uid)
50750+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
50751+ || in_group_p(grsec_proc_gid)
50752+#endif
50753+ ) {
50754+ rcu_read_unlock();
50755+ return true;
50756+ }
50757+ }
50758+ rcu_read_unlock();
50759+
50760+ if (!pid->hide_pid)
50761+ return false;
50762+#endif
50763+
50764 if (pid->hide_pid < hide_pid_min)
50765 return true;
50766 if (in_group_p(pid->pid_gid))
50767 return true;
50768+
50769 return ptrace_may_access(task, PTRACE_MODE_READ);
50770 }
50771
50772@@ -583,7 +638,11 @@ static int proc_pid_permission(struct inode *inode, int mask)
50773 put_task_struct(task);
50774
50775 if (!has_perms) {
50776+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
50777+ {
50778+#else
50779 if (pid->hide_pid == 2) {
50780+#endif
50781 /*
50782 * Let's make getdents(), stat(), and open()
50783 * consistent with each other. If a process
50784@@ -681,6 +740,11 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
50785 if (!task)
50786 return -ESRCH;
50787
50788+ if (gr_acl_handle_procpidmem(task)) {
50789+ put_task_struct(task);
50790+ return -EPERM;
50791+ }
50792+
50793 mm = mm_access(task, mode);
50794 put_task_struct(task);
50795
50796@@ -696,6 +760,10 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
50797
50798 file->private_data = mm;
50799
50800+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
50801+ file->f_version = current->exec_id;
50802+#endif
50803+
50804 return 0;
50805 }
50806
50807@@ -717,6 +785,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
50808 ssize_t copied;
50809 char *page;
50810
50811+#ifdef CONFIG_GRKERNSEC
50812+ if (write)
50813+ return -EPERM;
50814+#endif
50815+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
50816+ if (file->f_version != current->exec_id) {
50817+ gr_log_badprocpid("mem");
50818+ return 0;
50819+ }
50820+#endif
50821+
50822 if (!mm)
50823 return 0;
50824
50825@@ -821,6 +900,13 @@ static ssize_t environ_read(struct file *file, char __user *buf,
50826 if (!mm)
50827 return 0;
50828
50829+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
50830+ if (file->f_version != current->exec_id) {
50831+ gr_log_badprocpid("environ");
50832+ return 0;
50833+ }
50834+#endif
50835+
50836 page = (char *)__get_free_page(GFP_TEMPORARY);
50837 if (!page)
50838 return -ENOMEM;
50839@@ -1436,7 +1522,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
50840 int error = -EACCES;
50841
50842 /* Are we allowed to snoop on the tasks file descriptors? */
50843- if (!proc_fd_access_allowed(inode))
50844+ if (!proc_fd_access_allowed(inode, 0))
50845 goto out;
50846
50847 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
50848@@ -1480,8 +1566,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
50849 struct path path;
50850
50851 /* Are we allowed to snoop on the tasks file descriptors? */
50852- if (!proc_fd_access_allowed(inode))
50853- goto out;
50854+ /* logging this is needed for learning on chromium to work properly,
50855+ but we don't want to flood the logs from 'ps' which does a readlink
50856+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
50857+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
50858+ */
50859+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
50860+ if (!proc_fd_access_allowed(inode,0))
50861+ goto out;
50862+ } else {
50863+ if (!proc_fd_access_allowed(inode,1))
50864+ goto out;
50865+ }
50866
50867 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
50868 if (error)
50869@@ -1531,7 +1627,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
50870 rcu_read_lock();
50871 cred = __task_cred(task);
50872 inode->i_uid = cred->euid;
50873+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
50874+ inode->i_gid = grsec_proc_gid;
50875+#else
50876 inode->i_gid = cred->egid;
50877+#endif
50878 rcu_read_unlock();
50879 }
50880 security_task_to_inode(task, inode);
50881@@ -1567,10 +1667,19 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
50882 return -ENOENT;
50883 }
50884 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
50885+#ifdef CONFIG_GRKERNSEC_PROC_USER
50886+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
50887+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
50888+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
50889+#endif
50890 task_dumpable(task)) {
50891 cred = __task_cred(task);
50892 stat->uid = cred->euid;
50893+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
50894+ stat->gid = grsec_proc_gid;
50895+#else
50896 stat->gid = cred->egid;
50897+#endif
50898 }
50899 }
50900 rcu_read_unlock();
50901@@ -1608,11 +1717,20 @@ int pid_revalidate(struct dentry *dentry, unsigned int flags)
50902
50903 if (task) {
50904 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
50905+#ifdef CONFIG_GRKERNSEC_PROC_USER
50906+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
50907+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
50908+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
50909+#endif
50910 task_dumpable(task)) {
50911 rcu_read_lock();
50912 cred = __task_cred(task);
50913 inode->i_uid = cred->euid;
50914+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
50915+ inode->i_gid = grsec_proc_gid;
50916+#else
50917 inode->i_gid = cred->egid;
50918+#endif
50919 rcu_read_unlock();
50920 } else {
50921 inode->i_uid = GLOBAL_ROOT_UID;
50922@@ -2065,6 +2183,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
50923 if (!task)
50924 goto out_no_task;
50925
50926+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
50927+ goto out;
50928+
50929 /*
50930 * Yes, it does not scale. And it should not. Don't add
50931 * new entries into /proc/<tgid>/ without very good reasons.
50932@@ -2109,6 +2230,9 @@ static int proc_pident_readdir(struct file *filp,
50933 if (!task)
50934 goto out_no_task;
50935
50936+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
50937+ goto out;
50938+
50939 ret = 0;
50940 i = filp->f_pos;
50941 switch (i) {
50942@@ -2380,7 +2504,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
50943 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
50944 void *cookie)
50945 {
50946- char *s = nd_get_link(nd);
50947+ const char *s = nd_get_link(nd);
50948 if (!IS_ERR(s))
50949 kfree(s);
50950 }
50951@@ -2662,7 +2786,7 @@ static const struct pid_entry tgid_base_stuff[] = {
50952 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
50953 #endif
50954 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
50955-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
50956+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
50957 INF("syscall", S_IRUGO, proc_pid_syscall),
50958 #endif
50959 INF("cmdline", S_IRUGO, proc_pid_cmdline),
50960@@ -2687,10 +2811,10 @@ static const struct pid_entry tgid_base_stuff[] = {
50961 #ifdef CONFIG_SECURITY
50962 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
50963 #endif
50964-#ifdef CONFIG_KALLSYMS
50965+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
50966 INF("wchan", S_IRUGO, proc_pid_wchan),
50967 #endif
50968-#ifdef CONFIG_STACKTRACE
50969+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
50970 ONE("stack", S_IRUGO, proc_pid_stack),
50971 #endif
50972 #ifdef CONFIG_SCHEDSTATS
50973@@ -2724,6 +2848,9 @@ static const struct pid_entry tgid_base_stuff[] = {
50974 #ifdef CONFIG_HARDWALL
50975 INF("hardwall", S_IRUGO, proc_pid_hardwall),
50976 #endif
50977+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
50978+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
50979+#endif
50980 #ifdef CONFIG_USER_NS
50981 REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations),
50982 REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations),
50983@@ -2856,7 +2983,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
50984 if (!inode)
50985 goto out;
50986
50987+#ifdef CONFIG_GRKERNSEC_PROC_USER
50988+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
50989+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
50990+ inode->i_gid = grsec_proc_gid;
50991+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
50992+#else
50993 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
50994+#endif
50995 inode->i_op = &proc_tgid_base_inode_operations;
50996 inode->i_fop = &proc_tgid_base_operations;
50997 inode->i_flags|=S_IMMUTABLE;
50998@@ -2898,7 +3032,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, unsign
50999 if (!task)
51000 goto out;
51001
51002+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
51003+ goto out_put_task;
51004+
51005 result = proc_pid_instantiate(dir, dentry, task, NULL);
51006+out_put_task:
51007 put_task_struct(task);
51008 out:
51009 return result;
51010@@ -2961,6 +3099,8 @@ static int proc_pid_fill_cache(struct file *filp, void *dirent, filldir_t filldi
51011 static int fake_filldir(void *buf, const char *name, int namelen,
51012 loff_t offset, u64 ino, unsigned d_type)
51013 {
51014+ struct getdents_callback * __buf = (struct getdents_callback *) buf;
51015+ __buf->error = -EINVAL;
51016 return 0;
51017 }
51018
51019@@ -3027,7 +3167,7 @@ static const struct pid_entry tid_base_stuff[] = {
51020 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
51021 #endif
51022 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
51023-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
51024+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
51025 INF("syscall", S_IRUGO, proc_pid_syscall),
51026 #endif
51027 INF("cmdline", S_IRUGO, proc_pid_cmdline),
51028@@ -3054,10 +3194,10 @@ static const struct pid_entry tid_base_stuff[] = {
51029 #ifdef CONFIG_SECURITY
51030 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
51031 #endif
51032-#ifdef CONFIG_KALLSYMS
51033+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
51034 INF("wchan", S_IRUGO, proc_pid_wchan),
51035 #endif
51036-#ifdef CONFIG_STACKTRACE
51037+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
51038 ONE("stack", S_IRUGO, proc_pid_stack),
51039 #endif
51040 #ifdef CONFIG_SCHEDSTATS
51041diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
51042index 82676e3..5f8518a 100644
51043--- a/fs/proc/cmdline.c
51044+++ b/fs/proc/cmdline.c
51045@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
51046
51047 static int __init proc_cmdline_init(void)
51048 {
51049+#ifdef CONFIG_GRKERNSEC_PROC_ADD
51050+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
51051+#else
51052 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
51053+#endif
51054 return 0;
51055 }
51056 module_init(proc_cmdline_init);
51057diff --git a/fs/proc/devices.c b/fs/proc/devices.c
51058index b143471..bb105e5 100644
51059--- a/fs/proc/devices.c
51060+++ b/fs/proc/devices.c
51061@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
51062
51063 static int __init proc_devices_init(void)
51064 {
51065+#ifdef CONFIG_GRKERNSEC_PROC_ADD
51066+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
51067+#else
51068 proc_create("devices", 0, NULL, &proc_devinfo_operations);
51069+#endif
51070 return 0;
51071 }
51072 module_init(proc_devices_init);
51073diff --git a/fs/proc/fd.c b/fs/proc/fd.c
51074index f28a875..c467953 100644
51075--- a/fs/proc/fd.c
51076+++ b/fs/proc/fd.c
51077@@ -25,7 +25,8 @@ static int seq_show(struct seq_file *m, void *v)
51078 if (!task)
51079 return -ENOENT;
51080
51081- files = get_files_struct(task);
51082+ if (!gr_acl_handle_procpidmem(task))
51083+ files = get_files_struct(task);
51084 put_task_struct(task);
51085
51086 if (files) {
51087@@ -300,11 +301,21 @@ static struct dentry *proc_lookupfd(struct inode *dir, struct dentry *dentry,
51088 */
51089 int proc_fd_permission(struct inode *inode, int mask)
51090 {
51091+ struct task_struct *task;
51092 int rv = generic_permission(inode, mask);
51093- if (rv == 0)
51094- return 0;
51095+
51096 if (task_pid(current) == proc_pid(inode))
51097 rv = 0;
51098+
51099+ task = get_proc_task(inode);
51100+ if (task == NULL)
51101+ return rv;
51102+
51103+ if (gr_acl_handle_procpidmem(task))
51104+ rv = -EACCES;
51105+
51106+ put_task_struct(task);
51107+
51108 return rv;
51109 }
51110
51111diff --git a/fs/proc/inode.c b/fs/proc/inode.c
51112index 3b22bbd..895b58c 100644
51113--- a/fs/proc/inode.c
51114+++ b/fs/proc/inode.c
51115@@ -21,11 +21,17 @@
51116 #include <linux/seq_file.h>
51117 #include <linux/slab.h>
51118 #include <linux/mount.h>
51119+#include <linux/grsecurity.h>
51120
51121 #include <asm/uaccess.h>
51122
51123 #include "internal.h"
51124
51125+#ifdef CONFIG_PROC_SYSCTL
51126+extern const struct inode_operations proc_sys_inode_operations;
51127+extern const struct inode_operations proc_sys_dir_operations;
51128+#endif
51129+
51130 static void proc_evict_inode(struct inode *inode)
51131 {
51132 struct proc_dir_entry *de;
51133@@ -51,6 +57,13 @@ static void proc_evict_inode(struct inode *inode)
51134 ns_ops = PROC_I(inode)->ns_ops;
51135 if (ns_ops && ns_ops->put)
51136 ns_ops->put(PROC_I(inode)->ns);
51137+
51138+#ifdef CONFIG_PROC_SYSCTL
51139+ if (inode->i_op == &proc_sys_inode_operations ||
51140+ inode->i_op == &proc_sys_dir_operations)
51141+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
51142+#endif
51143+
51144 }
51145
51146 static struct kmem_cache * proc_inode_cachep;
51147@@ -455,7 +468,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
51148 if (de->mode) {
51149 inode->i_mode = de->mode;
51150 inode->i_uid = de->uid;
51151+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
51152+ inode->i_gid = grsec_proc_gid;
51153+#else
51154 inode->i_gid = de->gid;
51155+#endif
51156 }
51157 if (de->size)
51158 inode->i_size = de->size;
51159diff --git a/fs/proc/internal.h b/fs/proc/internal.h
51160index 43973b0..a20e704 100644
51161--- a/fs/proc/internal.h
51162+++ b/fs/proc/internal.h
51163@@ -54,6 +54,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
51164 struct pid *pid, struct task_struct *task);
51165 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
51166 struct pid *pid, struct task_struct *task);
51167+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
51168+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
51169+#endif
51170 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
51171
51172 extern const struct file_operations proc_tid_children_operations;
51173diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
51174index 86c67ee..cdca321 100644
51175--- a/fs/proc/kcore.c
51176+++ b/fs/proc/kcore.c
51177@@ -480,9 +480,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
51178 * the addresses in the elf_phdr on our list.
51179 */
51180 start = kc_offset_to_vaddr(*fpos - elf_buflen);
51181- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
51182+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
51183+ if (tsz > buflen)
51184 tsz = buflen;
51185-
51186+
51187 while (buflen) {
51188 struct kcore_list *m;
51189
51190@@ -511,20 +512,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
51191 kfree(elf_buf);
51192 } else {
51193 if (kern_addr_valid(start)) {
51194- unsigned long n;
51195+ char *elf_buf;
51196+ mm_segment_t oldfs;
51197
51198- n = copy_to_user(buffer, (char *)start, tsz);
51199- /*
51200- * We cannot distinguish between fault on source
51201- * and fault on destination. When this happens
51202- * we clear too and hope it will trigger the
51203- * EFAULT again.
51204- */
51205- if (n) {
51206- if (clear_user(buffer + tsz - n,
51207- n))
51208+ elf_buf = kmalloc(tsz, GFP_KERNEL);
51209+ if (!elf_buf)
51210+ return -ENOMEM;
51211+ oldfs = get_fs();
51212+ set_fs(KERNEL_DS);
51213+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
51214+ set_fs(oldfs);
51215+ if (copy_to_user(buffer, elf_buf, tsz)) {
51216+ kfree(elf_buf);
51217 return -EFAULT;
51218+ }
51219 }
51220+ set_fs(oldfs);
51221+ kfree(elf_buf);
51222 } else {
51223 if (clear_user(buffer, tsz))
51224 return -EFAULT;
51225@@ -544,6 +548,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
51226
51227 static int open_kcore(struct inode *inode, struct file *filp)
51228 {
51229+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
51230+ return -EPERM;
51231+#endif
51232 if (!capable(CAP_SYS_RAWIO))
51233 return -EPERM;
51234 if (kcore_need_update)
51235diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
51236index 80e4645..53e5fcf 100644
51237--- a/fs/proc/meminfo.c
51238+++ b/fs/proc/meminfo.c
51239@@ -158,7 +158,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
51240 vmi.used >> 10,
51241 vmi.largest_chunk >> 10
51242 #ifdef CONFIG_MEMORY_FAILURE
51243- ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
51244+ ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
51245 #endif
51246 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
51247 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
51248diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
51249index b1822dd..df622cb 100644
51250--- a/fs/proc/nommu.c
51251+++ b/fs/proc/nommu.c
51252@@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
51253 if (len < 1)
51254 len = 1;
51255 seq_printf(m, "%*c", len, ' ');
51256- seq_path(m, &file->f_path, "");
51257+ seq_path(m, &file->f_path, "\n\\");
51258 }
51259
51260 seq_putc(m, '\n');
51261diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
51262index fe72cd0..cb9b67d 100644
51263--- a/fs/proc/proc_net.c
51264+++ b/fs/proc/proc_net.c
51265@@ -23,6 +23,7 @@
51266 #include <linux/nsproxy.h>
51267 #include <net/net_namespace.h>
51268 #include <linux/seq_file.h>
51269+#include <linux/grsecurity.h>
51270
51271 #include "internal.h"
51272
51273@@ -105,6 +106,17 @@ static struct net *get_proc_task_net(struct inode *dir)
51274 struct task_struct *task;
51275 struct nsproxy *ns;
51276 struct net *net = NULL;
51277+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
51278+ const struct cred *cred = current_cred();
51279+#endif
51280+
51281+#ifdef CONFIG_GRKERNSEC_PROC_USER
51282+ if (cred->fsuid)
51283+ return net;
51284+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
51285+ if (cred->fsuid && !in_group_p(grsec_proc_gid))
51286+ return net;
51287+#endif
51288
51289 rcu_read_lock();
51290 task = pid_task(proc_pid(dir), PIDTYPE_PID);
51291diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
51292index a781bdf..6665284 100644
51293--- a/fs/proc/proc_sysctl.c
51294+++ b/fs/proc/proc_sysctl.c
51295@@ -12,11 +12,15 @@
51296 #include <linux/module.h>
51297 #include "internal.h"
51298
51299+extern int gr_handle_chroot_sysctl(const int op);
51300+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
51301+ const int op);
51302+
51303 static const struct dentry_operations proc_sys_dentry_operations;
51304 static const struct file_operations proc_sys_file_operations;
51305-static const struct inode_operations proc_sys_inode_operations;
51306+const struct inode_operations proc_sys_inode_operations;
51307 static const struct file_operations proc_sys_dir_file_operations;
51308-static const struct inode_operations proc_sys_dir_operations;
51309+const struct inode_operations proc_sys_dir_operations;
51310
51311 void proc_sys_poll_notify(struct ctl_table_poll *poll)
51312 {
51313@@ -465,6 +469,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
51314
51315 err = NULL;
51316 d_set_d_op(dentry, &proc_sys_dentry_operations);
51317+
51318+ gr_handle_proc_create(dentry, inode);
51319+
51320 d_add(dentry, inode);
51321
51322 out:
51323@@ -480,18 +487,20 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
51324 struct inode *inode = filp->f_path.dentry->d_inode;
51325 struct ctl_table_header *head = grab_header(inode);
51326 struct ctl_table *table = PROC_I(inode)->sysctl_entry;
51327+ int op = write ? MAY_WRITE : MAY_READ;
51328 ssize_t error;
51329 size_t res;
51330
51331 if (IS_ERR(head))
51332 return PTR_ERR(head);
51333
51334+
51335 /*
51336 * At this point we know that the sysctl was not unregistered
51337 * and won't be until we finish.
51338 */
51339 error = -EPERM;
51340- if (sysctl_perm(head->root, table, write ? MAY_WRITE : MAY_READ))
51341+ if (sysctl_perm(head->root, table, op))
51342 goto out;
51343
51344 /* if that can happen at all, it should be -EINVAL, not -EISDIR */
51345@@ -499,6 +508,22 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
51346 if (!table->proc_handler)
51347 goto out;
51348
51349+#ifdef CONFIG_GRKERNSEC
51350+ error = -EPERM;
51351+ if (gr_handle_chroot_sysctl(op))
51352+ goto out;
51353+ dget(filp->f_path.dentry);
51354+ if (gr_handle_sysctl_mod(filp->f_path.dentry->d_parent->d_name.name, table->procname, op)) {
51355+ dput(filp->f_path.dentry);
51356+ goto out;
51357+ }
51358+ dput(filp->f_path.dentry);
51359+ if (!gr_acl_handle_open(filp->f_path.dentry, filp->f_path.mnt, op))
51360+ goto out;
51361+ if (write && !capable(CAP_SYS_ADMIN))
51362+ goto out;
51363+#endif
51364+
51365 /* careful: calling conventions are nasty here */
51366 res = count;
51367 error = table->proc_handler(table, write, buf, &res, ppos);
51368@@ -596,6 +621,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
51369 return -ENOMEM;
51370 } else {
51371 d_set_d_op(child, &proc_sys_dentry_operations);
51372+
51373+ gr_handle_proc_create(child, inode);
51374+
51375 d_add(child, inode);
51376 }
51377 } else {
51378@@ -639,6 +667,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
51379 if ((*pos)++ < file->f_pos)
51380 return 0;
51381
51382+ if (!gr_acl_handle_hidden_file(file->f_path.dentry, file->f_path.mnt))
51383+ return 0;
51384+
51385 if (unlikely(S_ISLNK(table->mode)))
51386 res = proc_sys_link_fill_cache(file, dirent, filldir, head, table);
51387 else
51388@@ -756,6 +787,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
51389 if (IS_ERR(head))
51390 return PTR_ERR(head);
51391
51392+ if (table && !gr_acl_handle_hidden_file(dentry, mnt))
51393+ return -ENOENT;
51394+
51395 generic_fillattr(inode, stat);
51396 if (table)
51397 stat->mode = (stat->mode & S_IFMT) | table->mode;
51398@@ -778,13 +812,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
51399 .llseek = generic_file_llseek,
51400 };
51401
51402-static const struct inode_operations proc_sys_inode_operations = {
51403+const struct inode_operations proc_sys_inode_operations = {
51404 .permission = proc_sys_permission,
51405 .setattr = proc_sys_setattr,
51406 .getattr = proc_sys_getattr,
51407 };
51408
51409-static const struct inode_operations proc_sys_dir_operations = {
51410+const struct inode_operations proc_sys_dir_operations = {
51411 .lookup = proc_sys_lookup,
51412 .permission = proc_sys_permission,
51413 .setattr = proc_sys_setattr,
51414diff --git a/fs/proc/root.c b/fs/proc/root.c
51415index 9889a92..2613b48 100644
51416--- a/fs/proc/root.c
51417+++ b/fs/proc/root.c
51418@@ -187,7 +187,15 @@ void __init proc_root_init(void)
51419 #ifdef CONFIG_PROC_DEVICETREE
51420 proc_device_tree_init();
51421 #endif
51422+#ifdef CONFIG_GRKERNSEC_PROC_ADD
51423+#ifdef CONFIG_GRKERNSEC_PROC_USER
51424+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
51425+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
51426+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
51427+#endif
51428+#else
51429 proc_mkdir("bus", NULL);
51430+#endif
51431 proc_sys_init();
51432 }
51433
51434diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
51435index 90c63f9..e662cfc 100644
51436--- a/fs/proc/task_mmu.c
51437+++ b/fs/proc/task_mmu.c
51438@@ -11,12 +11,19 @@
51439 #include <linux/rmap.h>
51440 #include <linux/swap.h>
51441 #include <linux/swapops.h>
51442+#include <linux/grsecurity.h>
51443
51444 #include <asm/elf.h>
51445 #include <asm/uaccess.h>
51446 #include <asm/tlbflush.h>
51447 #include "internal.h"
51448
51449+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
51450+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
51451+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
51452+ _mm->pax_flags & MF_PAX_SEGMEXEC))
51453+#endif
51454+
51455 void task_mem(struct seq_file *m, struct mm_struct *mm)
51456 {
51457 unsigned long data, text, lib, swap;
51458@@ -52,8 +59,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
51459 "VmExe:\t%8lu kB\n"
51460 "VmLib:\t%8lu kB\n"
51461 "VmPTE:\t%8lu kB\n"
51462- "VmSwap:\t%8lu kB\n",
51463- hiwater_vm << (PAGE_SHIFT-10),
51464+ "VmSwap:\t%8lu kB\n"
51465+
51466+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
51467+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
51468+#endif
51469+
51470+ ,hiwater_vm << (PAGE_SHIFT-10),
51471 total_vm << (PAGE_SHIFT-10),
51472 mm->locked_vm << (PAGE_SHIFT-10),
51473 mm->pinned_vm << (PAGE_SHIFT-10),
51474@@ -62,7 +74,19 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
51475 data << (PAGE_SHIFT-10),
51476 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
51477 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
51478- swap << (PAGE_SHIFT-10));
51479+ swap << (PAGE_SHIFT-10)
51480+
51481+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
51482+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
51483+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_base
51484+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_limit
51485+#else
51486+ , mm->context.user_cs_base
51487+ , mm->context.user_cs_limit
51488+#endif
51489+#endif
51490+
51491+ );
51492 }
51493
51494 unsigned long task_vsize(struct mm_struct *mm)
51495@@ -277,13 +301,13 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
51496 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
51497 }
51498
51499- /* We don't show the stack guard page in /proc/maps */
51500+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
51501+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
51502+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
51503+#else
51504 start = vma->vm_start;
51505- if (stack_guard_page_start(vma, start))
51506- start += PAGE_SIZE;
51507 end = vma->vm_end;
51508- if (stack_guard_page_end(vma, end))
51509- end -= PAGE_SIZE;
51510+#endif
51511
51512 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
51513 start,
51514@@ -292,7 +316,11 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
51515 flags & VM_WRITE ? 'w' : '-',
51516 flags & VM_EXEC ? 'x' : '-',
51517 flags & VM_MAYSHARE ? 's' : 'p',
51518+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
51519+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
51520+#else
51521 pgoff,
51522+#endif
51523 MAJOR(dev), MINOR(dev), ino, &len);
51524
51525 /*
51526@@ -301,7 +329,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
51527 */
51528 if (file) {
51529 pad_len_spaces(m, len);
51530- seq_path(m, &file->f_path, "\n");
51531+ seq_path(m, &file->f_path, "\n\\");
51532 goto done;
51533 }
51534
51535@@ -327,8 +355,9 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
51536 * Thread stack in /proc/PID/task/TID/maps or
51537 * the main process stack.
51538 */
51539- if (!is_pid || (vma->vm_start <= mm->start_stack &&
51540- vma->vm_end >= mm->start_stack)) {
51541+ if (!is_pid || (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
51542+ (vma->vm_start <= mm->start_stack &&
51543+ vma->vm_end >= mm->start_stack)) {
51544 name = "[stack]";
51545 } else {
51546 /* Thread stack in /proc/PID/maps */
51547@@ -352,6 +381,13 @@ static int show_map(struct seq_file *m, void *v, int is_pid)
51548 struct proc_maps_private *priv = m->private;
51549 struct task_struct *task = priv->task;
51550
51551+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
51552+ if (current->exec_id != m->exec_id) {
51553+ gr_log_badprocpid("maps");
51554+ return 0;
51555+ }
51556+#endif
51557+
51558 show_map_vma(m, vma, is_pid);
51559
51560 if (m->count < m->size) /* vma is copied successfully */
51561@@ -538,12 +574,23 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
51562 .private = &mss,
51563 };
51564
51565+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
51566+ if (current->exec_id != m->exec_id) {
51567+ gr_log_badprocpid("smaps");
51568+ return 0;
51569+ }
51570+#endif
51571 memset(&mss, 0, sizeof mss);
51572- mss.vma = vma;
51573- /* mmap_sem is held in m_start */
51574- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
51575- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
51576-
51577+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
51578+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
51579+#endif
51580+ mss.vma = vma;
51581+ /* mmap_sem is held in m_start */
51582+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
51583+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
51584+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
51585+ }
51586+#endif
51587 show_map_vma(m, vma, is_pid);
51588
51589 seq_printf(m,
51590@@ -561,7 +608,11 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
51591 "KernelPageSize: %8lu kB\n"
51592 "MMUPageSize: %8lu kB\n"
51593 "Locked: %8lu kB\n",
51594+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
51595+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
51596+#else
51597 (vma->vm_end - vma->vm_start) >> 10,
51598+#endif
51599 mss.resident >> 10,
51600 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
51601 mss.shared_clean >> 10,
51602@@ -1211,6 +1262,13 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
51603 int n;
51604 char buffer[50];
51605
51606+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
51607+ if (current->exec_id != m->exec_id) {
51608+ gr_log_badprocpid("numa_maps");
51609+ return 0;
51610+ }
51611+#endif
51612+
51613 if (!mm)
51614 return 0;
51615
51616@@ -1228,11 +1286,15 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
51617 mpol_to_str(buffer, sizeof(buffer), pol, 0);
51618 mpol_cond_put(pol);
51619
51620+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
51621+ seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
51622+#else
51623 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
51624+#endif
51625
51626 if (file) {
51627 seq_printf(m, " file=");
51628- seq_path(m, &file->f_path, "\n\t= ");
51629+ seq_path(m, &file->f_path, "\n\t\\= ");
51630 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
51631 seq_printf(m, " heap");
51632 } else {
51633diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
51634index 1ccfa53..0848f95 100644
51635--- a/fs/proc/task_nommu.c
51636+++ b/fs/proc/task_nommu.c
51637@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
51638 else
51639 bytes += kobjsize(mm);
51640
51641- if (current->fs && current->fs->users > 1)
51642+ if (current->fs && atomic_read(&current->fs->users) > 1)
51643 sbytes += kobjsize(current->fs);
51644 else
51645 bytes += kobjsize(current->fs);
51646@@ -168,7 +168,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
51647
51648 if (file) {
51649 pad_len_spaces(m, len);
51650- seq_path(m, &file->f_path, "");
51651+ seq_path(m, &file->f_path, "\n\\");
51652 } else if (mm) {
51653 pid_t tid = vm_is_stack(priv->task, vma, is_pid);
51654
51655diff --git a/fs/pstore/ftrace.c b/fs/pstore/ftrace.c
51656index 2d57e1a..43b1280 100644
51657--- a/fs/pstore/ftrace.c
51658+++ b/fs/pstore/ftrace.c
51659@@ -28,7 +28,9 @@
51660 #include "internal.h"
51661
51662 static void notrace pstore_ftrace_call(unsigned long ip,
51663- unsigned long parent_ip)
51664+ unsigned long parent_ip,
51665+ struct ftrace_ops *op,
51666+ struct pt_regs *regs)
51667 {
51668 unsigned long flags;
51669 struct pstore_ftrace_record rec = {};
51670diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
51671index 16e8abb..2dcf914 100644
51672--- a/fs/quota/netlink.c
51673+++ b/fs/quota/netlink.c
51674@@ -33,7 +33,7 @@ static struct genl_family quota_genl_family = {
51675 void quota_send_warning(struct kqid qid, dev_t dev,
51676 const char warntype)
51677 {
51678- static atomic_t seq;
51679+ static atomic_unchecked_t seq;
51680 struct sk_buff *skb;
51681 void *msg_head;
51682 int ret;
51683@@ -49,7 +49,7 @@ void quota_send_warning(struct kqid qid, dev_t dev,
51684 "VFS: Not enough memory to send quota warning.\n");
51685 return;
51686 }
51687- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
51688+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
51689 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
51690 if (!msg_head) {
51691 printk(KERN_ERR
51692diff --git a/fs/read_write.c b/fs/read_write.c
51693index d065348..8e2b43d 100644
51694--- a/fs/read_write.c
51695+++ b/fs/read_write.c
51696@@ -935,6 +935,8 @@ ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos, size_t count,
51697 if (retval > 0) {
51698 add_rchar(current, retval);
51699 add_wchar(current, retval);
51700+ fsnotify_access(in.file);
51701+ fsnotify_modify(out.file);
51702 }
51703
51704 inc_syscr(current);
51705diff --git a/fs/readdir.c b/fs/readdir.c
51706index 5e69ef5..e5d9099 100644
51707--- a/fs/readdir.c
51708+++ b/fs/readdir.c
51709@@ -17,6 +17,7 @@
51710 #include <linux/security.h>
51711 #include <linux/syscalls.h>
51712 #include <linux/unistd.h>
51713+#include <linux/namei.h>
51714
51715 #include <asm/uaccess.h>
51716
51717@@ -67,6 +68,7 @@ struct old_linux_dirent {
51718
51719 struct readdir_callback {
51720 struct old_linux_dirent __user * dirent;
51721+ struct file * file;
51722 int result;
51723 };
51724
51725@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
51726 buf->result = -EOVERFLOW;
51727 return -EOVERFLOW;
51728 }
51729+
51730+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
51731+ return 0;
51732+
51733 buf->result++;
51734 dirent = buf->dirent;
51735 if (!access_ok(VERIFY_WRITE, dirent,
51736@@ -114,6 +120,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
51737
51738 buf.result = 0;
51739 buf.dirent = dirent;
51740+ buf.file = f.file;
51741
51742 error = vfs_readdir(f.file, fillonedir, &buf);
51743 if (buf.result)
51744@@ -139,6 +146,7 @@ struct linux_dirent {
51745 struct getdents_callback {
51746 struct linux_dirent __user * current_dir;
51747 struct linux_dirent __user * previous;
51748+ struct file * file;
51749 int count;
51750 int error;
51751 };
51752@@ -160,6 +168,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
51753 buf->error = -EOVERFLOW;
51754 return -EOVERFLOW;
51755 }
51756+
51757+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
51758+ return 0;
51759+
51760 dirent = buf->previous;
51761 if (dirent) {
51762 if (__put_user(offset, &dirent->d_off))
51763@@ -205,6 +217,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
51764 buf.previous = NULL;
51765 buf.count = count;
51766 buf.error = 0;
51767+ buf.file = f.file;
51768
51769 error = vfs_readdir(f.file, filldir, &buf);
51770 if (error >= 0)
51771@@ -223,6 +236,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
51772 struct getdents_callback64 {
51773 struct linux_dirent64 __user * current_dir;
51774 struct linux_dirent64 __user * previous;
51775+ struct file *file;
51776 int count;
51777 int error;
51778 };
51779@@ -238,6 +252,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
51780 buf->error = -EINVAL; /* only used if we fail.. */
51781 if (reclen > buf->count)
51782 return -EINVAL;
51783+
51784+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
51785+ return 0;
51786+
51787 dirent = buf->previous;
51788 if (dirent) {
51789 if (__put_user(offset, &dirent->d_off))
51790@@ -283,6 +301,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
51791
51792 buf.current_dir = dirent;
51793 buf.previous = NULL;
51794+ buf.file = f.file;
51795 buf.count = count;
51796 buf.error = 0;
51797
51798@@ -291,7 +310,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
51799 error = buf.error;
51800 lastdirent = buf.previous;
51801 if (lastdirent) {
51802- typeof(lastdirent->d_off) d_off = f.file->f_pos;
51803+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = f.file->f_pos;
51804 if (__put_user(d_off, &lastdirent->d_off))
51805 error = -EFAULT;
51806 else
51807diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
51808index 2b7882b..1c5ef48 100644
51809--- a/fs/reiserfs/do_balan.c
51810+++ b/fs/reiserfs/do_balan.c
51811@@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
51812 return;
51813 }
51814
51815- atomic_inc(&(fs_generation(tb->tb_sb)));
51816+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
51817 do_balance_starts(tb);
51818
51819 /* balance leaf returns 0 except if combining L R and S into
51820diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
51821index e60e870..f40ac16 100644
51822--- a/fs/reiserfs/procfs.c
51823+++ b/fs/reiserfs/procfs.c
51824@@ -112,7 +112,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
51825 "SMALL_TAILS " : "NO_TAILS ",
51826 replay_only(sb) ? "REPLAY_ONLY " : "",
51827 convert_reiserfs(sb) ? "CONV " : "",
51828- atomic_read(&r->s_generation_counter),
51829+ atomic_read_unchecked(&r->s_generation_counter),
51830 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
51831 SF(s_do_balance), SF(s_unneeded_left_neighbor),
51832 SF(s_good_search_by_key_reada), SF(s_bmaps),
51833diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
51834index 33215f5..c5d427a 100644
51835--- a/fs/reiserfs/reiserfs.h
51836+++ b/fs/reiserfs/reiserfs.h
51837@@ -453,7 +453,7 @@ struct reiserfs_sb_info {
51838 /* Comment? -Hans */
51839 wait_queue_head_t s_wait;
51840 /* To be obsoleted soon by per buffer seals.. -Hans */
51841- atomic_t s_generation_counter; // increased by one every time the
51842+ atomic_unchecked_t s_generation_counter; // increased by one every time the
51843 // tree gets re-balanced
51844 unsigned long s_properties; /* File system properties. Currently holds
51845 on-disk FS format */
51846@@ -1978,7 +1978,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
51847 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
51848
51849 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
51850-#define get_generation(s) atomic_read (&fs_generation(s))
51851+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
51852 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
51853 #define __fs_changed(gen,s) (gen != get_generation (s))
51854 #define fs_changed(gen,s) \
51855diff --git a/fs/select.c b/fs/select.c
51856index 2ef72d9..f213b17 100644
51857--- a/fs/select.c
51858+++ b/fs/select.c
51859@@ -20,6 +20,7 @@
51860 #include <linux/export.h>
51861 #include <linux/slab.h>
51862 #include <linux/poll.h>
51863+#include <linux/security.h>
51864 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
51865 #include <linux/file.h>
51866 #include <linux/fdtable.h>
51867@@ -826,6 +827,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
51868 struct poll_list *walk = head;
51869 unsigned long todo = nfds;
51870
51871+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
51872 if (nfds > rlimit(RLIMIT_NOFILE))
51873 return -EINVAL;
51874
51875diff --git a/fs/seq_file.c b/fs/seq_file.c
51876index 99dffab..e4fcb71 100644
51877--- a/fs/seq_file.c
51878+++ b/fs/seq_file.c
51879@@ -10,6 +10,7 @@
51880 #include <linux/seq_file.h>
51881 #include <linux/slab.h>
51882 #include <linux/cred.h>
51883+#include <linux/sched.h>
51884
51885 #include <asm/uaccess.h>
51886 #include <asm/page.h>
51887@@ -60,6 +61,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
51888 #ifdef CONFIG_USER_NS
51889 p->user_ns = file->f_cred->user_ns;
51890 #endif
51891+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
51892+ p->exec_id = current->exec_id;
51893+#endif
51894
51895 /*
51896 * Wrappers around seq_open(e.g. swaps_open) need to be
51897@@ -96,7 +100,7 @@ static int traverse(struct seq_file *m, loff_t offset)
51898 return 0;
51899 }
51900 if (!m->buf) {
51901- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
51902+ m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL | GFP_USERCOPY);
51903 if (!m->buf)
51904 return -ENOMEM;
51905 }
51906@@ -136,7 +140,7 @@ static int traverse(struct seq_file *m, loff_t offset)
51907 Eoverflow:
51908 m->op->stop(m, p);
51909 kfree(m->buf);
51910- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
51911+ m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | GFP_USERCOPY);
51912 return !m->buf ? -ENOMEM : -EAGAIN;
51913 }
51914
51915@@ -191,7 +195,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
51916
51917 /* grab buffer if we didn't have one */
51918 if (!m->buf) {
51919- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
51920+ m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL | GFP_USERCOPY);
51921 if (!m->buf)
51922 goto Enomem;
51923 }
51924@@ -232,7 +236,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
51925 goto Fill;
51926 m->op->stop(m, p);
51927 kfree(m->buf);
51928- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
51929+ m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | GFP_USERCOPY);
51930 if (!m->buf)
51931 goto Enomem;
51932 m->count = 0;
51933@@ -581,7 +585,7 @@ static void single_stop(struct seq_file *p, void *v)
51934 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
51935 void *data)
51936 {
51937- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
51938+ seq_operations_no_const *op = kzalloc(sizeof(*op), GFP_KERNEL);
51939 int res = -ENOMEM;
51940
51941 if (op) {
51942diff --git a/fs/splice.c b/fs/splice.c
51943index 48c7bd1..d0740e4 100644
51944--- a/fs/splice.c
51945+++ b/fs/splice.c
51946@@ -194,7 +194,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
51947 pipe_lock(pipe);
51948
51949 for (;;) {
51950- if (!pipe->readers) {
51951+ if (!atomic_read(&pipe->readers)) {
51952 send_sig(SIGPIPE, current, 0);
51953 if (!ret)
51954 ret = -EPIPE;
51955@@ -248,9 +248,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
51956 do_wakeup = 0;
51957 }
51958
51959- pipe->waiting_writers++;
51960+ atomic_inc(&pipe->waiting_writers);
51961 pipe_wait(pipe);
51962- pipe->waiting_writers--;
51963+ atomic_dec(&pipe->waiting_writers);
51964 }
51965
51966 pipe_unlock(pipe);
51967@@ -563,7 +563,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
51968 old_fs = get_fs();
51969 set_fs(get_ds());
51970 /* The cast to a user pointer is valid due to the set_fs() */
51971- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
51972+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
51973 set_fs(old_fs);
51974
51975 return res;
51976@@ -578,7 +578,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
51977 old_fs = get_fs();
51978 set_fs(get_ds());
51979 /* The cast to a user pointer is valid due to the set_fs() */
51980- res = vfs_write(file, (const char __user *)buf, count, &pos);
51981+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
51982 set_fs(old_fs);
51983
51984 return res;
51985@@ -630,7 +630,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
51986 goto err;
51987
51988 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
51989- vec[i].iov_base = (void __user *) page_address(page);
51990+ vec[i].iov_base = (void __force_user *) page_address(page);
51991 vec[i].iov_len = this_len;
51992 spd.pages[i] = page;
51993 spd.nr_pages++;
51994@@ -851,10 +851,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
51995 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
51996 {
51997 while (!pipe->nrbufs) {
51998- if (!pipe->writers)
51999+ if (!atomic_read(&pipe->writers))
52000 return 0;
52001
52002- if (!pipe->waiting_writers && sd->num_spliced)
52003+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
52004 return 0;
52005
52006 if (sd->flags & SPLICE_F_NONBLOCK)
52007@@ -1192,7 +1192,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
52008 * out of the pipe right after the splice_to_pipe(). So set
52009 * PIPE_READERS appropriately.
52010 */
52011- pipe->readers = 1;
52012+ atomic_set(&pipe->readers, 1);
52013
52014 current->splice_pipe = pipe;
52015 }
52016@@ -1741,9 +1741,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
52017 ret = -ERESTARTSYS;
52018 break;
52019 }
52020- if (!pipe->writers)
52021+ if (!atomic_read(&pipe->writers))
52022 break;
52023- if (!pipe->waiting_writers) {
52024+ if (!atomic_read(&pipe->waiting_writers)) {
52025 if (flags & SPLICE_F_NONBLOCK) {
52026 ret = -EAGAIN;
52027 break;
52028@@ -1775,7 +1775,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
52029 pipe_lock(pipe);
52030
52031 while (pipe->nrbufs >= pipe->buffers) {
52032- if (!pipe->readers) {
52033+ if (!atomic_read(&pipe->readers)) {
52034 send_sig(SIGPIPE, current, 0);
52035 ret = -EPIPE;
52036 break;
52037@@ -1788,9 +1788,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
52038 ret = -ERESTARTSYS;
52039 break;
52040 }
52041- pipe->waiting_writers++;
52042+ atomic_inc(&pipe->waiting_writers);
52043 pipe_wait(pipe);
52044- pipe->waiting_writers--;
52045+ atomic_dec(&pipe->waiting_writers);
52046 }
52047
52048 pipe_unlock(pipe);
52049@@ -1826,14 +1826,14 @@ retry:
52050 pipe_double_lock(ipipe, opipe);
52051
52052 do {
52053- if (!opipe->readers) {
52054+ if (!atomic_read(&opipe->readers)) {
52055 send_sig(SIGPIPE, current, 0);
52056 if (!ret)
52057 ret = -EPIPE;
52058 break;
52059 }
52060
52061- if (!ipipe->nrbufs && !ipipe->writers)
52062+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
52063 break;
52064
52065 /*
52066@@ -1930,7 +1930,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
52067 pipe_double_lock(ipipe, opipe);
52068
52069 do {
52070- if (!opipe->readers) {
52071+ if (!atomic_read(&opipe->readers)) {
52072 send_sig(SIGPIPE, current, 0);
52073 if (!ret)
52074 ret = -EPIPE;
52075@@ -1975,7 +1975,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
52076 * return EAGAIN if we have the potential of some data in the
52077 * future, otherwise just return 0
52078 */
52079- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
52080+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
52081 ret = -EAGAIN;
52082
52083 pipe_unlock(ipipe);
52084diff --git a/fs/stat.c b/fs/stat.c
52085index eae4946..6198f55 100644
52086--- a/fs/stat.c
52087+++ b/fs/stat.c
52088@@ -28,8 +28,13 @@ void generic_fillattr(struct inode *inode, struct kstat *stat)
52089 stat->gid = inode->i_gid;
52090 stat->rdev = inode->i_rdev;
52091 stat->size = i_size_read(inode);
52092- stat->atime = inode->i_atime;
52093- stat->mtime = inode->i_mtime;
52094+ if (is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
52095+ stat->atime = inode->i_ctime;
52096+ stat->mtime = inode->i_ctime;
52097+ } else {
52098+ stat->atime = inode->i_atime;
52099+ stat->mtime = inode->i_mtime;
52100+ }
52101 stat->ctime = inode->i_ctime;
52102 stat->blksize = (1 << inode->i_blkbits);
52103 stat->blocks = inode->i_blocks;
52104@@ -46,8 +51,14 @@ int vfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
52105 if (retval)
52106 return retval;
52107
52108- if (inode->i_op->getattr)
52109- return inode->i_op->getattr(mnt, dentry, stat);
52110+ if (inode->i_op->getattr) {
52111+ retval = inode->i_op->getattr(mnt, dentry, stat);
52112+ if (!retval && is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
52113+ stat->atime = stat->ctime;
52114+ stat->mtime = stat->ctime;
52115+ }
52116+ return retval;
52117+ }
52118
52119 generic_fillattr(inode, stat);
52120 return 0;
52121diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
52122index 2fbdff6..5530a61 100644
52123--- a/fs/sysfs/dir.c
52124+++ b/fs/sysfs/dir.c
52125@@ -685,6 +685,18 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
52126 struct sysfs_dirent *sd;
52127 int rc;
52128
52129+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
52130+ const char *parent_name = parent_sd->s_name;
52131+
52132+ mode = S_IFDIR | S_IRWXU;
52133+
52134+ if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
52135+ (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
52136+ (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse"))) ||
52137+ (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
52138+ mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
52139+#endif
52140+
52141 /* allocate */
52142 sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
52143 if (!sd)
52144diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
52145index 00012e3..8392349 100644
52146--- a/fs/sysfs/file.c
52147+++ b/fs/sysfs/file.c
52148@@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
52149
52150 struct sysfs_open_dirent {
52151 atomic_t refcnt;
52152- atomic_t event;
52153+ atomic_unchecked_t event;
52154 wait_queue_head_t poll;
52155 struct list_head buffers; /* goes through sysfs_buffer.list */
52156 };
52157@@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
52158 if (!sysfs_get_active(attr_sd))
52159 return -ENODEV;
52160
52161- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
52162+ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
52163 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
52164
52165 sysfs_put_active(attr_sd);
52166@@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
52167 return -ENOMEM;
52168
52169 atomic_set(&new_od->refcnt, 0);
52170- atomic_set(&new_od->event, 1);
52171+ atomic_set_unchecked(&new_od->event, 1);
52172 init_waitqueue_head(&new_od->poll);
52173 INIT_LIST_HEAD(&new_od->buffers);
52174 goto retry;
52175@@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
52176
52177 sysfs_put_active(attr_sd);
52178
52179- if (buffer->event != atomic_read(&od->event))
52180+ if (buffer->event != atomic_read_unchecked(&od->event))
52181 goto trigger;
52182
52183 return DEFAULT_POLLMASK;
52184@@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
52185
52186 od = sd->s_attr.open;
52187 if (od) {
52188- atomic_inc(&od->event);
52189+ atomic_inc_unchecked(&od->event);
52190 wake_up_interruptible(&od->poll);
52191 }
52192
52193diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
52194index 3c9eb56..9dea5be 100644
52195--- a/fs/sysfs/symlink.c
52196+++ b/fs/sysfs/symlink.c
52197@@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
52198
52199 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
52200 {
52201- char *page = nd_get_link(nd);
52202+ const char *page = nd_get_link(nd);
52203 if (!IS_ERR(page))
52204 free_page((unsigned long)page);
52205 }
52206diff --git a/fs/udf/misc.c b/fs/udf/misc.c
52207index c175b4d..8f36a16 100644
52208--- a/fs/udf/misc.c
52209+++ b/fs/udf/misc.c
52210@@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
52211
52212 u8 udf_tag_checksum(const struct tag *t)
52213 {
52214- u8 *data = (u8 *)t;
52215+ const u8 *data = (const u8 *)t;
52216 u8 checksum = 0;
52217 int i;
52218 for (i = 0; i < sizeof(struct tag); ++i)
52219diff --git a/fs/utimes.c b/fs/utimes.c
52220index bb0696a..552054b 100644
52221--- a/fs/utimes.c
52222+++ b/fs/utimes.c
52223@@ -1,6 +1,7 @@
52224 #include <linux/compiler.h>
52225 #include <linux/file.h>
52226 #include <linux/fs.h>
52227+#include <linux/security.h>
52228 #include <linux/linkage.h>
52229 #include <linux/mount.h>
52230 #include <linux/namei.h>
52231@@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
52232 goto mnt_drop_write_and_out;
52233 }
52234 }
52235+
52236+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
52237+ error = -EACCES;
52238+ goto mnt_drop_write_and_out;
52239+ }
52240+
52241 mutex_lock(&inode->i_mutex);
52242 error = notify_change(path->dentry, &newattrs);
52243 mutex_unlock(&inode->i_mutex);
52244diff --git a/fs/xattr.c b/fs/xattr.c
52245index e21c119..21dfc7c 100644
52246--- a/fs/xattr.c
52247+++ b/fs/xattr.c
52248@@ -319,7 +319,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
52249 * Extended attribute SET operations
52250 */
52251 static long
52252-setxattr(struct dentry *d, const char __user *name, const void __user *value,
52253+setxattr(struct path *path, const char __user *name, const void __user *value,
52254 size_t size, int flags)
52255 {
52256 int error;
52257@@ -355,7 +355,12 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
52258 posix_acl_fix_xattr_from_user(kvalue, size);
52259 }
52260
52261- error = vfs_setxattr(d, kname, kvalue, size, flags);
52262+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
52263+ error = -EACCES;
52264+ goto out;
52265+ }
52266+
52267+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
52268 out:
52269 if (vvalue)
52270 vfree(vvalue);
52271@@ -376,7 +381,7 @@ SYSCALL_DEFINE5(setxattr, const char __user *, pathname,
52272 return error;
52273 error = mnt_want_write(path.mnt);
52274 if (!error) {
52275- error = setxattr(path.dentry, name, value, size, flags);
52276+ error = setxattr(&path, name, value, size, flags);
52277 mnt_drop_write(path.mnt);
52278 }
52279 path_put(&path);
52280@@ -395,7 +400,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
52281 return error;
52282 error = mnt_want_write(path.mnt);
52283 if (!error) {
52284- error = setxattr(path.dentry, name, value, size, flags);
52285+ error = setxattr(&path, name, value, size, flags);
52286 mnt_drop_write(path.mnt);
52287 }
52288 path_put(&path);
52289@@ -406,16 +411,14 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
52290 const void __user *,value, size_t, size, int, flags)
52291 {
52292 struct fd f = fdget(fd);
52293- struct dentry *dentry;
52294 int error = -EBADF;
52295
52296 if (!f.file)
52297 return error;
52298- dentry = f.file->f_path.dentry;
52299- audit_inode(NULL, dentry, 0);
52300+ audit_inode(NULL, f.file->f_path.dentry, 0);
52301 error = mnt_want_write_file(f.file);
52302 if (!error) {
52303- error = setxattr(dentry, name, value, size, flags);
52304+ error = setxattr(&f.file->f_path, name, value, size, flags);
52305 mnt_drop_write_file(f.file);
52306 }
52307 fdput(f);
52308diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
52309index 9fbea87..6b19972 100644
52310--- a/fs/xattr_acl.c
52311+++ b/fs/xattr_acl.c
52312@@ -76,8 +76,8 @@ struct posix_acl *
52313 posix_acl_from_xattr(struct user_namespace *user_ns,
52314 const void *value, size_t size)
52315 {
52316- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
52317- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
52318+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
52319+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
52320 int count;
52321 struct posix_acl *acl;
52322 struct posix_acl_entry *acl_e;
52323diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
52324index 83d0cf3..2ef526b 100644
52325--- a/fs/xfs/xfs_bmap.c
52326+++ b/fs/xfs/xfs_bmap.c
52327@@ -189,7 +189,7 @@ xfs_bmap_validate_ret(
52328 int nmap,
52329 int ret_nmap);
52330 #else
52331-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
52332+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
52333 #endif /* DEBUG */
52334
52335 STATIC int
52336diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
52337index 1b9fc3e..e1bdde0 100644
52338--- a/fs/xfs/xfs_dir2_sf.c
52339+++ b/fs/xfs/xfs_dir2_sf.c
52340@@ -851,7 +851,15 @@ xfs_dir2_sf_getdents(
52341 }
52342
52343 ino = xfs_dir2_sfe_get_ino(sfp, sfep);
52344- if (filldir(dirent, (char *)sfep->name, sfep->namelen,
52345+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
52346+ char name[sfep->namelen];
52347+ memcpy(name, sfep->name, sfep->namelen);
52348+ if (filldir(dirent, name, sfep->namelen,
52349+ off & 0x7fffffff, ino, DT_UNKNOWN)) {
52350+ *offset = off & 0x7fffffff;
52351+ return 0;
52352+ }
52353+ } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
52354 off & 0x7fffffff, ino, DT_UNKNOWN)) {
52355 *offset = off & 0x7fffffff;
52356 return 0;
52357diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
52358index c1df3c6..f987db6 100644
52359--- a/fs/xfs/xfs_ioctl.c
52360+++ b/fs/xfs/xfs_ioctl.c
52361@@ -126,7 +126,7 @@ xfs_find_handle(
52362 }
52363
52364 error = -EFAULT;
52365- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
52366+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
52367 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
52368 goto out_put;
52369
52370diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
52371index 4e00cf0..3374374 100644
52372--- a/fs/xfs/xfs_iops.c
52373+++ b/fs/xfs/xfs_iops.c
52374@@ -394,7 +394,7 @@ xfs_vn_put_link(
52375 struct nameidata *nd,
52376 void *p)
52377 {
52378- char *s = nd_get_link(nd);
52379+ const char *s = nd_get_link(nd);
52380
52381 if (!IS_ERR(s))
52382 kfree(s);
52383diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
52384new file mode 100644
52385index 0000000..92247e4
52386--- /dev/null
52387+++ b/grsecurity/Kconfig
52388@@ -0,0 +1,1021 @@
52389+#
52390+# grecurity configuration
52391+#
52392+menu "Memory Protections"
52393+depends on GRKERNSEC
52394+
52395+config GRKERNSEC_KMEM
52396+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
52397+ default y if GRKERNSEC_CONFIG_AUTO
52398+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
52399+ help
52400+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
52401+ be written to or read from to modify or leak the contents of the running
52402+ kernel. /dev/port will also not be allowed to be opened and support
52403+ for /dev/cpu/*/msr will be removed. If you have module
52404+ support disabled, enabling this will close up five ways that are
52405+ currently used to insert malicious code into the running kernel.
52406+
52407+ Even with all these features enabled, we still highly recommend that
52408+ you use the RBAC system, as it is still possible for an attacker to
52409+ modify the running kernel through privileged I/O granted by ioperm/iopl.
52410+
52411+ If you are not using XFree86, you may be able to stop this additional
52412+ case by enabling the 'Disable privileged I/O' option. Though nothing
52413+ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
52414+ but only to video memory, which is the only writing we allow in this
52415+ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
52416+ not be allowed to mprotect it with PROT_WRITE later.
52417+ Enabling this feature will prevent the "cpupower" and "powertop" tools
52418+ from working.
52419+
52420+ It is highly recommended that you say Y here if you meet all the
52421+ conditions above.
52422+
52423+config GRKERNSEC_VM86
52424+ bool "Restrict VM86 mode"
52425+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
52426+ depends on X86_32
52427+
52428+ help
52429+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
52430+ make use of a special execution mode on 32bit x86 processors called
52431+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
52432+ video cards and will still work with this option enabled. The purpose
52433+ of the option is to prevent exploitation of emulation errors in
52434+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
52435+ Nearly all users should be able to enable this option.
52436+
52437+config GRKERNSEC_IO
52438+ bool "Disable privileged I/O"
52439+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
52440+ depends on X86
52441+ select RTC_CLASS
52442+ select RTC_INTF_DEV
52443+ select RTC_DRV_CMOS
52444+
52445+ help
52446+ If you say Y here, all ioperm and iopl calls will return an error.
52447+ Ioperm and iopl can be used to modify the running kernel.
52448+ Unfortunately, some programs need this access to operate properly,
52449+ the most notable of which are XFree86 and hwclock. hwclock can be
52450+ remedied by having RTC support in the kernel, so real-time
52451+ clock support is enabled if this option is enabled, to ensure
52452+ that hwclock operates correctly. XFree86 still will not
52453+ operate correctly with this option enabled, so DO NOT CHOOSE Y
52454+ IF YOU USE XFree86. If you use XFree86 and you still want to
52455+ protect your kernel against modification, use the RBAC system.
52456+
52457+config GRKERNSEC_JIT_HARDEN
52458+ bool "Harden BPF JIT against spray attacks"
52459+ default y if GRKERNSEC_CONFIG_AUTO
52460+ depends on BPF_JIT
52461+ help
52462+ If you say Y here, the native code generated by the kernel's Berkeley
52463+ Packet Filter (BPF) JIT engine will be hardened against JIT-spraying
52464+ attacks that attempt to fit attacker-beneficial instructions in
52465+ 32bit immediate fields of JIT-generated native instructions. The
52466+ attacker will generally aim to cause an unintended instruction sequence
52467+ of JIT-generated native code to execute by jumping into the middle of
52468+ a generated instruction. This feature effectively randomizes the 32bit
52469+ immediate constants present in the generated code to thwart such attacks.
52470+
52471+ If you're using KERNEXEC, it's recommended that you enable this option
52472+ to supplement the hardening of the kernel.
52473+
52474+config GRKERNSEC_RAND_THREADSTACK
52475+ bool "Insert random gaps between thread stacks"
52476+ default y if GRKERNSEC_CONFIG_AUTO
52477+ depends on PAX_RANDMMAP && !PPC
52478+ help
52479+ If you say Y here, a random-sized gap will be enforced between allocated
52480+ thread stacks. Glibc's NPTL and other threading libraries that
52481+ pass MAP_STACK to the kernel for thread stack allocation are supported.
52482+ The implementation currently provides 8 bits of entropy for the gap.
52483+
52484+ Many distributions do not compile threaded remote services with the
52485+ -fstack-check argument to GCC, causing the variable-sized stack-based
52486+ allocator, alloca(), to not probe the stack on allocation. This
52487+ permits an unbounded alloca() to skip over any guard page and potentially
52488+ modify another thread's stack reliably. An enforced random gap
52489+ reduces the reliability of such an attack and increases the chance
52490+ that such a read/write to another thread's stack instead lands in
52491+ an unmapped area, causing a crash and triggering grsecurity's
52492+ anti-bruteforcing logic.
52493+
52494+config GRKERNSEC_PROC_MEMMAP
52495+ bool "Harden ASLR against information leaks and entropy reduction"
52496+ default y if (GRKERNSEC_CONFIG_AUTO || PAX_NOEXEC || PAX_ASLR)
52497+ depends on PAX_NOEXEC || PAX_ASLR
52498+ help
52499+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
52500+ give no information about the addresses of its mappings if
52501+ PaX features that rely on random addresses are enabled on the task.
52502+ In addition to sanitizing this information and disabling other
52503+ dangerous sources of information, this option causes reads of sensitive
52504+ /proc/<pid> entries where the file descriptor was opened in a different
52505+ task than the one performing the read. Such attempts are logged.
52506+ This option also limits argv/env strings for suid/sgid binaries
52507+ to 512KB to prevent a complete exhaustion of the stack entropy provided
52508+ by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
52509+ binaries to prevent alternative mmap layouts from being abused.
52510+
52511+ If you use PaX it is essential that you say Y here as it closes up
52512+ several holes that make full ASLR useless locally.
52513+
52514+config GRKERNSEC_BRUTE
52515+ bool "Deter exploit bruteforcing"
52516+ default y if GRKERNSEC_CONFIG_AUTO
52517+ help
52518+ If you say Y here, attempts to bruteforce exploits against forking
52519+ daemons such as apache or sshd, as well as against suid/sgid binaries
52520+ will be deterred. When a child of a forking daemon is killed by PaX
52521+ or crashes due to an illegal instruction or other suspicious signal,
52522+ the parent process will be delayed 30 seconds upon every subsequent
52523+ fork until the administrator is able to assess the situation and
52524+ restart the daemon.
52525+ In the suid/sgid case, the attempt is logged, the user has all their
52526+ processes terminated, and they are prevented from executing any further
52527+ processes for 15 minutes.
52528+ It is recommended that you also enable signal logging in the auditing
52529+ section so that logs are generated when a process triggers a suspicious
52530+ signal.
52531+ If the sysctl option is enabled, a sysctl option with name
52532+ "deter_bruteforce" is created.
52533+
52534+
52535+config GRKERNSEC_MODHARDEN
52536+ bool "Harden module auto-loading"
52537+ default y if GRKERNSEC_CONFIG_AUTO
52538+ depends on MODULES
52539+ help
52540+ If you say Y here, module auto-loading in response to use of some
52541+ feature implemented by an unloaded module will be restricted to
52542+ root users. Enabling this option helps defend against attacks
52543+ by unprivileged users who abuse the auto-loading behavior to
52544+ cause a vulnerable module to load that is then exploited.
52545+
52546+ If this option prevents a legitimate use of auto-loading for a
52547+ non-root user, the administrator can execute modprobe manually
52548+ with the exact name of the module mentioned in the alert log.
52549+ Alternatively, the administrator can add the module to the list
52550+ of modules loaded at boot by modifying init scripts.
52551+
52552+ Modification of init scripts will most likely be needed on
52553+ Ubuntu servers with encrypted home directory support enabled,
52554+ as the first non-root user logging in will cause the ecb(aes),
52555+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
52556+
52557+config GRKERNSEC_HIDESYM
52558+ bool "Hide kernel symbols"
52559+ default y if GRKERNSEC_CONFIG_AUTO
52560+ select PAX_USERCOPY_SLABS
52561+ help
52562+ If you say Y here, getting information on loaded modules, and
52563+ displaying all kernel symbols through a syscall will be restricted
52564+ to users with CAP_SYS_MODULE. For software compatibility reasons,
52565+ /proc/kallsyms will be restricted to the root user. The RBAC
52566+ system can hide that entry even from root.
52567+
52568+ This option also prevents leaking of kernel addresses through
52569+ several /proc entries.
52570+
52571+ Note that this option is only effective provided the following
52572+ conditions are met:
52573+ 1) The kernel using grsecurity is not precompiled by some distribution
52574+ 2) You have also enabled GRKERNSEC_DMESG
52575+ 3) You are using the RBAC system and hiding other files such as your
52576+ kernel image and System.map. Alternatively, enabling this option
52577+ causes the permissions on /boot, /lib/modules, and the kernel
52578+ source directory to change at compile time to prevent
52579+ reading by non-root users.
52580+ If the above conditions are met, this option will aid in providing a
52581+ useful protection against local kernel exploitation of overflows
52582+ and arbitrary read/write vulnerabilities.
52583+
52584+config GRKERNSEC_KERN_LOCKOUT
52585+ bool "Active kernel exploit response"
52586+ default y if GRKERNSEC_CONFIG_AUTO
52587+ depends on X86 || ARM || PPC || SPARC
52588+ help
52589+ If you say Y here, when a PaX alert is triggered due to suspicious
52590+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
52591+ or an OOPS occurs due to bad memory accesses, instead of just
52592+ terminating the offending process (and potentially allowing
52593+ a subsequent exploit from the same user), we will take one of two
52594+ actions:
52595+ If the user was root, we will panic the system
52596+ If the user was non-root, we will log the attempt, terminate
52597+ all processes owned by the user, then prevent them from creating
52598+ any new processes until the system is restarted
52599+ This deters repeated kernel exploitation/bruteforcing attempts
52600+ and is useful for later forensics.
52601+
52602+endmenu
52603+menu "Role Based Access Control Options"
52604+depends on GRKERNSEC
52605+
52606+config GRKERNSEC_RBAC_DEBUG
52607+ bool
52608+
52609+config GRKERNSEC_NO_RBAC
52610+ bool "Disable RBAC system"
52611+ help
52612+ If you say Y here, the /dev/grsec device will be removed from the kernel,
52613+ preventing the RBAC system from being enabled. You should only say Y
52614+ here if you have no intention of using the RBAC system, so as to prevent
52615+ an attacker with root access from misusing the RBAC system to hide files
52616+ and processes when loadable module support and /dev/[k]mem have been
52617+ locked down.
52618+
52619+config GRKERNSEC_ACL_HIDEKERN
52620+ bool "Hide kernel processes"
52621+ help
52622+ If you say Y here, all kernel threads will be hidden to all
52623+ processes but those whose subject has the "view hidden processes"
52624+ flag.
52625+
52626+config GRKERNSEC_ACL_MAXTRIES
52627+ int "Maximum tries before password lockout"
52628+ default 3
52629+ help
52630+ This option enforces the maximum number of times a user can attempt
52631+ to authorize themselves with the grsecurity RBAC system before being
52632+ denied the ability to attempt authorization again for a specified time.
52633+ The lower the number, the harder it will be to brute-force a password.
52634+
52635+config GRKERNSEC_ACL_TIMEOUT
52636+ int "Time to wait after max password tries, in seconds"
52637+ default 30
52638+ help
52639+ This option specifies the time the user must wait after attempting to
52640+ authorize to the RBAC system with the maximum number of invalid
52641+ passwords. The higher the number, the harder it will be to brute-force
52642+ a password.
52643+
52644+endmenu
52645+menu "Filesystem Protections"
52646+depends on GRKERNSEC
52647+
52648+config GRKERNSEC_PROC
52649+ bool "Proc restrictions"
52650+ default y if GRKERNSEC_CONFIG_AUTO
52651+ help
52652+ If you say Y here, the permissions of the /proc filesystem
52653+ will be altered to enhance system security and privacy. You MUST
52654+ choose either a user only restriction or a user and group restriction.
52655+ Depending upon the option you choose, you can either restrict users to
52656+ see only the processes they themselves run, or choose a group that can
52657+ view all processes and files normally restricted to root if you choose
52658+ the "restrict to user only" option. NOTE: If you're running identd or
52659+ ntpd as a non-root user, you will have to run it as the group you
52660+ specify here.
52661+
52662+config GRKERNSEC_PROC_USER
52663+ bool "Restrict /proc to user only"
52664+ depends on GRKERNSEC_PROC
52665+ help
52666+ If you say Y here, non-root users will only be able to view their own
52667+ processes, and restricts them from viewing network-related information,
52668+ and viewing kernel symbol and module information.
52669+
52670+config GRKERNSEC_PROC_USERGROUP
52671+ bool "Allow special group"
52672+ default y if GRKERNSEC_CONFIG_AUTO
52673+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
52674+ help
52675+ If you say Y here, you will be able to select a group that will be
52676+ able to view all processes and network-related information. If you've
52677+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
52678+ remain hidden. This option is useful if you want to run identd as
52679+ a non-root user. The group you select may also be chosen at boot time
52680+ via "grsec_proc_gid=" on the kernel commandline.
52681+
52682+config GRKERNSEC_PROC_GID
52683+ int "GID for special group"
52684+ depends on GRKERNSEC_PROC_USERGROUP
52685+ default 1001
52686+
52687+config GRKERNSEC_PROC_ADD
52688+ bool "Additional restrictions"
52689+ default y if GRKERNSEC_CONFIG_AUTO
52690+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
52691+ help
52692+ If you say Y here, additional restrictions will be placed on
52693+ /proc that keep normal users from viewing device information and
52694+ slabinfo information that could be useful for exploits.
52695+
52696+config GRKERNSEC_LINK
52697+ bool "Linking restrictions"
52698+ default y if GRKERNSEC_CONFIG_AUTO
52699+ help
52700+ If you say Y here, /tmp race exploits will be prevented, since users
52701+ will no longer be able to follow symlinks owned by other users in
52702+ world-writable +t directories (e.g. /tmp), unless the owner of the
52703+ symlink is the owner of the directory. users will also not be
52704+ able to hardlink to files they do not own. If the sysctl option is
52705+ enabled, a sysctl option with name "linking_restrictions" is created.
52706+
52707+config GRKERNSEC_SYMLINKOWN
52708+ bool "Kernel-enforced SymlinksIfOwnerMatch"
52709+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
52710+ help
52711+ Apache's SymlinksIfOwnerMatch option has an inherent race condition
52712+ that prevents it from being used as a security feature. As Apache
52713+ verifies the symlink by performing a stat() against the target of
52714+ the symlink before it is followed, an attacker can setup a symlink
52715+ to point to a same-owned file, then replace the symlink with one
52716+ that targets another user's file just after Apache "validates" the
52717+ symlink -- a classic TOCTOU race. If you say Y here, a complete,
52718+ race-free replacement for Apache's "SymlinksIfOwnerMatch" option
52719+ will be in place for the group you specify. If the sysctl option
52720+ is enabled, a sysctl option with name "enforce_symlinksifowner" is
52721+ created.
52722+
52723+config GRKERNSEC_SYMLINKOWN_GID
52724+ int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
52725+ depends on GRKERNSEC_SYMLINKOWN
52726+ default 1006
52727+ help
52728+ Setting this GID determines what group kernel-enforced
52729+ SymlinksIfOwnerMatch will be enabled for. If the sysctl option
52730+ is enabled, a sysctl option with name "symlinkown_gid" is created.
52731+
52732+config GRKERNSEC_FIFO
52733+ bool "FIFO restrictions"
52734+ default y if GRKERNSEC_CONFIG_AUTO
52735+ help
52736+ If you say Y here, users will not be able to write to FIFOs they don't
52737+ own in world-writable +t directories (e.g. /tmp), unless the owner of
52738+ the FIFO is the same owner of the directory it's held in. If the sysctl
52739+ option is enabled, a sysctl option with name "fifo_restrictions" is
52740+ created.
52741+
52742+config GRKERNSEC_SYSFS_RESTRICT
52743+ bool "Sysfs/debugfs restriction"
52744+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
52745+ depends on SYSFS
52746+ help
52747+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
52748+ any filesystem normally mounted under it (e.g. debugfs) will be
52749+ mostly accessible only by root. These filesystems generally provide access
52750+ to hardware and debug information that isn't appropriate for unprivileged
52751+ users of the system. Sysfs and debugfs have also become a large source
52752+ of new vulnerabilities, ranging from infoleaks to local compromise.
52753+ There has been very little oversight with an eye toward security involved
52754+ in adding new exporters of information to these filesystems, so their
52755+ use is discouraged.
52756+ For reasons of compatibility, a few directories have been whitelisted
52757+ for access by non-root users:
52758+ /sys/fs/selinux
52759+ /sys/fs/fuse
52760+ /sys/devices/system/cpu
52761+
52762+config GRKERNSEC_ROFS
52763+ bool "Runtime read-only mount protection"
52764+ help
52765+ If you say Y here, a sysctl option with name "romount_protect" will
52766+ be created. By setting this option to 1 at runtime, filesystems
52767+ will be protected in the following ways:
52768+ * No new writable mounts will be allowed
52769+ * Existing read-only mounts won't be able to be remounted read/write
52770+ * Write operations will be denied on all block devices
52771+ This option acts independently of grsec_lock: once it is set to 1,
52772+ it cannot be turned off. Therefore, please be mindful of the resulting
52773+ behavior if this option is enabled in an init script on a read-only
52774+ filesystem. This feature is mainly intended for secure embedded systems.
52775+
52776+config GRKERNSEC_DEVICE_SIDECHANNEL
52777+ bool "Eliminate stat/notify-based device sidechannels"
52778+ default y if GRKERNSEC_CONFIG_AUTO
52779+ help
52780+ If you say Y here, timing analyses on block or character
52781+ devices like /dev/ptmx using stat or inotify/dnotify/fanotify
52782+ will be thwarted for unprivileged users. If a process without
52783+ CAP_MKNOD stats such a device, the last access and last modify times
52784+ will match the device's create time. No access or modify events
52785+ will be triggered through inotify/dnotify/fanotify for such devices.
52786+ This feature will prevent attacks that may at a minimum
52787+ allow an attacker to determine the administrator's password length.
52788+
52789+config GRKERNSEC_CHROOT
52790+ bool "Chroot jail restrictions"
52791+ default y if GRKERNSEC_CONFIG_AUTO
52792+ help
52793+ If you say Y here, you will be able to choose several options that will
52794+ make breaking out of a chrooted jail much more difficult. If you
52795+ encounter no software incompatibilities with the following options, it
52796+ is recommended that you enable each one.
52797+
52798+config GRKERNSEC_CHROOT_MOUNT
52799+ bool "Deny mounts"
52800+ default y if GRKERNSEC_CONFIG_AUTO
52801+ depends on GRKERNSEC_CHROOT
52802+ help
52803+ If you say Y here, processes inside a chroot will not be able to
52804+ mount or remount filesystems. If the sysctl option is enabled, a
52805+ sysctl option with name "chroot_deny_mount" is created.
52806+
52807+config GRKERNSEC_CHROOT_DOUBLE
52808+ bool "Deny double-chroots"
52809+ default y if GRKERNSEC_CONFIG_AUTO
52810+ depends on GRKERNSEC_CHROOT
52811+ help
52812+ If you say Y here, processes inside a chroot will not be able to chroot
52813+ again outside the chroot. This is a widely used method of breaking
52814+ out of a chroot jail and should not be allowed. If the sysctl
52815+ option is enabled, a sysctl option with name
52816+ "chroot_deny_chroot" is created.
52817+
52818+config GRKERNSEC_CHROOT_PIVOT
52819+ bool "Deny pivot_root in chroot"
52820+ default y if GRKERNSEC_CONFIG_AUTO
52821+ depends on GRKERNSEC_CHROOT
52822+ help
52823+ If you say Y here, processes inside a chroot will not be able to use
52824+ a function called pivot_root() that was introduced in Linux 2.3.41. It
52825+ works similar to chroot in that it changes the root filesystem. This
52826+ function could be misused in a chrooted process to attempt to break out
52827+ of the chroot, and therefore should not be allowed. If the sysctl
52828+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
52829+ created.
52830+
52831+config GRKERNSEC_CHROOT_CHDIR
52832+ bool "Enforce chdir(\"/\") on all chroots"
52833+ default y if GRKERNSEC_CONFIG_AUTO
52834+ depends on GRKERNSEC_CHROOT
52835+ help
52836+ If you say Y here, the current working directory of all newly-chrooted
52837+ applications will be set to the the root directory of the chroot.
52838+ The man page on chroot(2) states:
52839+ Note that this call does not change the current working
52840+ directory, so that `.' can be outside the tree rooted at
52841+ `/'. In particular, the super-user can escape from a
52842+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
52843+
52844+ It is recommended that you say Y here, since it's not known to break
52845+ any software. If the sysctl option is enabled, a sysctl option with
52846+ name "chroot_enforce_chdir" is created.
52847+
52848+config GRKERNSEC_CHROOT_CHMOD
52849+ bool "Deny (f)chmod +s"
52850+ default y if GRKERNSEC_CONFIG_AUTO
52851+ depends on GRKERNSEC_CHROOT
52852+ help
52853+ If you say Y here, processes inside a chroot will not be able to chmod
52854+ or fchmod files to make them have suid or sgid bits. This protects
52855+ against another published method of breaking a chroot. If the sysctl
52856+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
52857+ created.
52858+
52859+config GRKERNSEC_CHROOT_FCHDIR
52860+ bool "Deny fchdir out of chroot"
52861+ default y if GRKERNSEC_CONFIG_AUTO
52862+ depends on GRKERNSEC_CHROOT
52863+ help
52864+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
52865+ to a file descriptor of the chrooting process that points to a directory
52866+ outside the filesystem will be stopped. If the sysctl option
52867+ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
52868+
52869+config GRKERNSEC_CHROOT_MKNOD
52870+ bool "Deny mknod"
52871+ default y if GRKERNSEC_CONFIG_AUTO
52872+ depends on GRKERNSEC_CHROOT
52873+ help
52874+ If you say Y here, processes inside a chroot will not be allowed to
52875+ mknod. The problem with using mknod inside a chroot is that it
52876+ would allow an attacker to create a device entry that is the same
52877+ as one on the physical root of your system, which could range from
52878+ anything from the console device to a device for your harddrive (which
52879+ they could then use to wipe the drive or steal data). It is recommended
52880+ that you say Y here, unless you run into software incompatibilities.
52881+ If the sysctl option is enabled, a sysctl option with name
52882+ "chroot_deny_mknod" is created.
52883+
52884+config GRKERNSEC_CHROOT_SHMAT
52885+ bool "Deny shmat() out of chroot"
52886+ default y if GRKERNSEC_CONFIG_AUTO
52887+ depends on GRKERNSEC_CHROOT
52888+ help
52889+ If you say Y here, processes inside a chroot will not be able to attach
52890+ to shared memory segments that were created outside of the chroot jail.
52891+ It is recommended that you say Y here. If the sysctl option is enabled,
52892+ a sysctl option with name "chroot_deny_shmat" is created.
52893+
52894+config GRKERNSEC_CHROOT_UNIX
52895+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
52896+ default y if GRKERNSEC_CONFIG_AUTO
52897+ depends on GRKERNSEC_CHROOT
52898+ help
52899+ If you say Y here, processes inside a chroot will not be able to
52900+ connect to abstract (meaning not belonging to a filesystem) Unix
52901+ domain sockets that were bound outside of a chroot. It is recommended
52902+ that you say Y here. If the sysctl option is enabled, a sysctl option
52903+ with name "chroot_deny_unix" is created.
52904+
52905+config GRKERNSEC_CHROOT_FINDTASK
52906+ bool "Protect outside processes"
52907+ default y if GRKERNSEC_CONFIG_AUTO
52908+ depends on GRKERNSEC_CHROOT
52909+ help
52910+ If you say Y here, processes inside a chroot will not be able to
52911+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
52912+ getsid, or view any process outside of the chroot. If the sysctl
52913+ option is enabled, a sysctl option with name "chroot_findtask" is
52914+ created.
52915+
52916+config GRKERNSEC_CHROOT_NICE
52917+ bool "Restrict priority changes"
52918+ default y if GRKERNSEC_CONFIG_AUTO
52919+ depends on GRKERNSEC_CHROOT
52920+ help
52921+ If you say Y here, processes inside a chroot will not be able to raise
52922+ the priority of processes in the chroot, or alter the priority of
52923+ processes outside the chroot. This provides more security than simply
52924+ removing CAP_SYS_NICE from the process' capability set. If the
52925+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
52926+ is created.
52927+
52928+config GRKERNSEC_CHROOT_SYSCTL
52929+ bool "Deny sysctl writes"
52930+ default y if GRKERNSEC_CONFIG_AUTO
52931+ depends on GRKERNSEC_CHROOT
52932+ help
52933+ If you say Y here, an attacker in a chroot will not be able to
52934+ write to sysctl entries, either by sysctl(2) or through a /proc
52935+ interface. It is strongly recommended that you say Y here. If the
52936+ sysctl option is enabled, a sysctl option with name
52937+ "chroot_deny_sysctl" is created.
52938+
52939+config GRKERNSEC_CHROOT_CAPS
52940+ bool "Capability restrictions"
52941+ default y if GRKERNSEC_CONFIG_AUTO
52942+ depends on GRKERNSEC_CHROOT
52943+ help
52944+ If you say Y here, the capabilities on all processes within a
52945+ chroot jail will be lowered to stop module insertion, raw i/o,
52946+ system and net admin tasks, rebooting the system, modifying immutable
52947+ files, modifying IPC owned by another, and changing the system time.
52948+ This is left an option because it can break some apps. Disable this
52949+ if your chrooted apps are having problems performing those kinds of
52950+ tasks. If the sysctl option is enabled, a sysctl option with
52951+ name "chroot_caps" is created.
52952+
52953+endmenu
52954+menu "Kernel Auditing"
52955+depends on GRKERNSEC
52956+
52957+config GRKERNSEC_AUDIT_GROUP
52958+ bool "Single group for auditing"
52959+ help
52960+ If you say Y here, the exec and chdir logging features will only operate
52961+ on a group you specify. This option is recommended if you only want to
52962+ watch certain users instead of having a large amount of logs from the
52963+ entire system. If the sysctl option is enabled, a sysctl option with
52964+ name "audit_group" is created.
52965+
52966+config GRKERNSEC_AUDIT_GID
52967+ int "GID for auditing"
52968+ depends on GRKERNSEC_AUDIT_GROUP
52969+ default 1007
52970+
52971+config GRKERNSEC_EXECLOG
52972+ bool "Exec logging"
52973+ help
52974+ If you say Y here, all execve() calls will be logged (since the
52975+ other exec*() calls are frontends to execve(), all execution
52976+ will be logged). Useful for shell-servers that like to keep track
52977+ of their users. If the sysctl option is enabled, a sysctl option with
52978+ name "exec_logging" is created.
52979+ WARNING: This option when enabled will produce a LOT of logs, especially
52980+ on an active system.
52981+
52982+config GRKERNSEC_RESLOG
52983+ bool "Resource logging"
52984+ default y if GRKERNSEC_CONFIG_AUTO
52985+ help
52986+ If you say Y here, all attempts to overstep resource limits will
52987+ be logged with the resource name, the requested size, and the current
52988+ limit. It is highly recommended that you say Y here. If the sysctl
52989+ option is enabled, a sysctl option with name "resource_logging" is
52990+ created. If the RBAC system is enabled, the sysctl value is ignored.
52991+
52992+config GRKERNSEC_CHROOT_EXECLOG
52993+ bool "Log execs within chroot"
52994+ help
52995+ If you say Y here, all executions inside a chroot jail will be logged
52996+ to syslog. This can cause a large amount of logs if certain
52997+ applications (eg. djb's daemontools) are installed on the system, and
52998+ is therefore left as an option. If the sysctl option is enabled, a
52999+ sysctl option with name "chroot_execlog" is created.
53000+
53001+config GRKERNSEC_AUDIT_PTRACE
53002+ bool "Ptrace logging"
53003+ help
53004+ If you say Y here, all attempts to attach to a process via ptrace
53005+ will be logged. If the sysctl option is enabled, a sysctl option
53006+ with name "audit_ptrace" is created.
53007+
53008+config GRKERNSEC_AUDIT_CHDIR
53009+ bool "Chdir logging"
53010+ help
53011+ If you say Y here, all chdir() calls will be logged. If the sysctl
53012+ option is enabled, a sysctl option with name "audit_chdir" is created.
53013+
53014+config GRKERNSEC_AUDIT_MOUNT
53015+ bool "(Un)Mount logging"
53016+ help
53017+ If you say Y here, all mounts and unmounts will be logged. If the
53018+ sysctl option is enabled, a sysctl option with name "audit_mount" is
53019+ created.
53020+
53021+config GRKERNSEC_SIGNAL
53022+ bool "Signal logging"
53023+ default y if GRKERNSEC_CONFIG_AUTO
53024+ help
53025+ If you say Y here, certain important signals will be logged, such as
53026+ SIGSEGV, which will as a result inform you of when a error in a program
53027+ occurred, which in some cases could mean a possible exploit attempt.
53028+ If the sysctl option is enabled, a sysctl option with name
53029+ "signal_logging" is created.
53030+
53031+config GRKERNSEC_FORKFAIL
53032+ bool "Fork failure logging"
53033+ help
53034+ If you say Y here, all failed fork() attempts will be logged.
53035+ This could suggest a fork bomb, or someone attempting to overstep
53036+ their process limit. If the sysctl option is enabled, a sysctl option
53037+ with name "forkfail_logging" is created.
53038+
53039+config GRKERNSEC_TIME
53040+ bool "Time change logging"
53041+ default y if GRKERNSEC_CONFIG_AUTO
53042+ help
53043+ If you say Y here, any changes of the system clock will be logged.
53044+ If the sysctl option is enabled, a sysctl option with name
53045+ "timechange_logging" is created.
53046+
53047+config GRKERNSEC_PROC_IPADDR
53048+ bool "/proc/<pid>/ipaddr support"
53049+ default y if GRKERNSEC_CONFIG_AUTO
53050+ help
53051+ If you say Y here, a new entry will be added to each /proc/<pid>
53052+ directory that contains the IP address of the person using the task.
53053+ The IP is carried across local TCP and AF_UNIX stream sockets.
53054+ This information can be useful for IDS/IPSes to perform remote response
53055+ to a local attack. The entry is readable by only the owner of the
53056+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
53057+ the RBAC system), and thus does not create privacy concerns.
53058+
53059+config GRKERNSEC_RWXMAP_LOG
53060+ bool 'Denied RWX mmap/mprotect logging'
53061+ default y if GRKERNSEC_CONFIG_AUTO
53062+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
53063+ help
53064+ If you say Y here, calls to mmap() and mprotect() with explicit
53065+ usage of PROT_WRITE and PROT_EXEC together will be logged when
53066+ denied by the PAX_MPROTECT feature. If the sysctl option is
53067+ enabled, a sysctl option with name "rwxmap_logging" is created.
53068+
53069+config GRKERNSEC_AUDIT_TEXTREL
53070+ bool 'ELF text relocations logging (READ HELP)'
53071+ depends on PAX_MPROTECT
53072+ help
53073+ If you say Y here, text relocations will be logged with the filename
53074+ of the offending library or binary. The purpose of the feature is
53075+ to help Linux distribution developers get rid of libraries and
53076+ binaries that need text relocations which hinder the future progress
53077+ of PaX. Only Linux distribution developers should say Y here, and
53078+ never on a production machine, as this option creates an information
53079+ leak that could aid an attacker in defeating the randomization of
53080+ a single memory region. If the sysctl option is enabled, a sysctl
53081+ option with name "audit_textrel" is created.
53082+
53083+endmenu
53084+
53085+menu "Executable Protections"
53086+depends on GRKERNSEC
53087+
53088+config GRKERNSEC_DMESG
53089+ bool "Dmesg(8) restriction"
53090+ default y if GRKERNSEC_CONFIG_AUTO
53091+ help
53092+ If you say Y here, non-root users will not be able to use dmesg(8)
53093+ to view the contents of the kernel's circular log buffer.
53094+ The kernel's log buffer often contains kernel addresses and other
53095+ identifying information useful to an attacker in fingerprinting a
53096+ system for a targeted exploit.
53097+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
53098+ created.
53099+
53100+config GRKERNSEC_HARDEN_PTRACE
53101+ bool "Deter ptrace-based process snooping"
53102+ default y if GRKERNSEC_CONFIG_AUTO
53103+ help
53104+ If you say Y here, TTY sniffers and other malicious monitoring
53105+ programs implemented through ptrace will be defeated. If you
53106+ have been using the RBAC system, this option has already been
53107+ enabled for several years for all users, with the ability to make
53108+ fine-grained exceptions.
53109+
53110+ This option only affects the ability of non-root users to ptrace
53111+ processes that are not a descendent of the ptracing process.
53112+ This means that strace ./binary and gdb ./binary will still work,
53113+ but attaching to arbitrary processes will not. If the sysctl
53114+ option is enabled, a sysctl option with name "harden_ptrace" is
53115+ created.
53116+
53117+config GRKERNSEC_PTRACE_READEXEC
53118+ bool "Require read access to ptrace sensitive binaries"
53119+ default y if GRKERNSEC_CONFIG_AUTO
53120+ help
53121+ If you say Y here, unprivileged users will not be able to ptrace unreadable
53122+ binaries. This option is useful in environments that
53123+ remove the read bits (e.g. file mode 4711) from suid binaries to
53124+ prevent infoleaking of their contents. This option adds
53125+ consistency to the use of that file mode, as the binary could normally
53126+ be read out when run without privileges while ptracing.
53127+
53128+ If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
53129+ is created.
53130+
53131+config GRKERNSEC_SETXID
53132+ bool "Enforce consistent multithreaded privileges"
53133+ default y if GRKERNSEC_CONFIG_AUTO
53134+ depends on (X86 || SPARC64 || PPC || ARM || MIPS)
53135+ help
53136+ If you say Y here, a change from a root uid to a non-root uid
53137+ in a multithreaded application will cause the resulting uids,
53138+ gids, supplementary groups, and capabilities in that thread
53139+ to be propagated to the other threads of the process. In most
53140+ cases this is unnecessary, as glibc will emulate this behavior
53141+ on behalf of the application. Other libcs do not act in the
53142+ same way, allowing the other threads of the process to continue
53143+ running with root privileges. If the sysctl option is enabled,
53144+ a sysctl option with name "consistent_setxid" is created.
53145+
53146+config GRKERNSEC_TPE
53147+ bool "Trusted Path Execution (TPE)"
53148+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
53149+ help
53150+ If you say Y here, you will be able to choose a gid to add to the
53151+ supplementary groups of users you want to mark as "untrusted."
53152+ These users will not be able to execute any files that are not in
53153+ root-owned directories writable only by root. If the sysctl option
53154+ is enabled, a sysctl option with name "tpe" is created.
53155+
53156+config GRKERNSEC_TPE_ALL
53157+ bool "Partially restrict all non-root users"
53158+ depends on GRKERNSEC_TPE
53159+ help
53160+ If you say Y here, all non-root users will be covered under
53161+ a weaker TPE restriction. This is separate from, and in addition to,
53162+ the main TPE options that you have selected elsewhere. Thus, if a
53163+ "trusted" GID is chosen, this restriction applies to even that GID.
53164+ Under this restriction, all non-root users will only be allowed to
53165+ execute files in directories they own that are not group or
53166+ world-writable, or in directories owned by root and writable only by
53167+ root. If the sysctl option is enabled, a sysctl option with name
53168+ "tpe_restrict_all" is created.
53169+
53170+config GRKERNSEC_TPE_INVERT
53171+ bool "Invert GID option"
53172+ depends on GRKERNSEC_TPE
53173+ help
53174+ If you say Y here, the group you specify in the TPE configuration will
53175+ decide what group TPE restrictions will be *disabled* for. This
53176+ option is useful if you want TPE restrictions to be applied to most
53177+ users on the system. If the sysctl option is enabled, a sysctl option
53178+ with name "tpe_invert" is created. Unlike other sysctl options, this
53179+ entry will default to on for backward-compatibility.
53180+
53181+config GRKERNSEC_TPE_GID
53182+ int
53183+ default GRKERNSEC_TPE_UNTRUSTED_GID if (GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT)
53184+ default GRKERNSEC_TPE_TRUSTED_GID if (GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT)
53185+
53186+config GRKERNSEC_TPE_UNTRUSTED_GID
53187+ int "GID for TPE-untrusted users"
53188+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
53189+ default 1005
53190+ help
53191+ Setting this GID determines what group TPE restrictions will be
53192+ *enabled* for. If the sysctl option is enabled, a sysctl option
53193+ with name "tpe_gid" is created.
53194+
53195+config GRKERNSEC_TPE_TRUSTED_GID
53196+ int "GID for TPE-trusted users"
53197+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
53198+ default 1005
53199+ help
53200+ Setting this GID determines what group TPE restrictions will be
53201+ *disabled* for. If the sysctl option is enabled, a sysctl option
53202+ with name "tpe_gid" is created.
53203+
53204+endmenu
53205+menu "Network Protections"
53206+depends on GRKERNSEC
53207+
53208+config GRKERNSEC_RANDNET
53209+ bool "Larger entropy pools"
53210+ default y if GRKERNSEC_CONFIG_AUTO
53211+ help
53212+ If you say Y here, the entropy pools used for many features of Linux
53213+ and grsecurity will be doubled in size. Since several grsecurity
53214+ features use additional randomness, it is recommended that you say Y
53215+ here. Saying Y here has a similar effect as modifying
53216+ /proc/sys/kernel/random/poolsize.
53217+
53218+config GRKERNSEC_BLACKHOLE
53219+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
53220+ default y if GRKERNSEC_CONFIG_AUTO
53221+ depends on NET
53222+ help
53223+ If you say Y here, neither TCP resets nor ICMP
53224+ destination-unreachable packets will be sent in response to packets
53225+ sent to ports for which no associated listening process exists.
53226+ This feature supports both IPV4 and IPV6 and exempts the
53227+ loopback interface from blackholing. Enabling this feature
53228+ makes a host more resilient to DoS attacks and reduces network
53229+ visibility against scanners.
53230+
53231+ The blackhole feature as-implemented is equivalent to the FreeBSD
53232+ blackhole feature, as it prevents RST responses to all packets, not
53233+ just SYNs. Under most application behavior this causes no
53234+ problems, but applications (like haproxy) may not close certain
53235+ connections in a way that cleanly terminates them on the remote
53236+ end, leaving the remote host in LAST_ACK state. Because of this
53237+ side-effect and to prevent intentional LAST_ACK DoSes, this
53238+ feature also adds automatic mitigation against such attacks.
53239+ The mitigation drastically reduces the amount of time a socket
53240+ can spend in LAST_ACK state. If you're using haproxy and not
53241+ all servers it connects to have this option enabled, consider
53242+ disabling this feature on the haproxy host.
53243+
53244+ If the sysctl option is enabled, two sysctl options with names
53245+ "ip_blackhole" and "lastack_retries" will be created.
53246+ While "ip_blackhole" takes the standard zero/non-zero on/off
53247+ toggle, "lastack_retries" uses the same kinds of values as
53248+ "tcp_retries1" and "tcp_retries2". The default value of 4
53249+ prevents a socket from lasting more than 45 seconds in LAST_ACK
53250+ state.
53251+
53252+config GRKERNSEC_NO_SIMULT_CONNECT
53253+ bool "Disable TCP Simultaneous Connect"
53254+ default y if GRKERNSEC_CONFIG_AUTO
53255+ depends on NET
53256+ help
53257+ If you say Y here, a feature by Willy Tarreau will be enabled that
53258+ removes a weakness in Linux's strict implementation of TCP that
53259+ allows two clients to connect to each other without either entering
53260+ a listening state. The weakness allows an attacker to easily prevent
53261+ a client from connecting to a known server provided the source port
53262+ for the connection is guessed correctly.
53263+
53264+ As the weakness could be used to prevent an antivirus or IPS from
53265+ fetching updates, or prevent an SSL gateway from fetching a CRL,
53266+ it should be eliminated by enabling this option. Though Linux is
53267+ one of few operating systems supporting simultaneous connect, it
53268+ has no legitimate use in practice and is rarely supported by firewalls.
53269+
53270+config GRKERNSEC_SOCKET
53271+ bool "Socket restrictions"
53272+ depends on NET
53273+ help
53274+ If you say Y here, you will be able to choose from several options.
53275+ If you assign a GID on your system and add it to the supplementary
53276+ groups of users you want to restrict socket access to, this patch
53277+ will perform up to three things, based on the option(s) you choose.
53278+
53279+config GRKERNSEC_SOCKET_ALL
53280+ bool "Deny any sockets to group"
53281+ depends on GRKERNSEC_SOCKET
53282+ help
53283+ If you say Y here, you will be able to choose a GID of whose users will
53284+ be unable to connect to other hosts from your machine or run server
53285+ applications from your machine. If the sysctl option is enabled, a
53286+ sysctl option with name "socket_all" is created.
53287+
53288+config GRKERNSEC_SOCKET_ALL_GID
53289+ int "GID to deny all sockets for"
53290+ depends on GRKERNSEC_SOCKET_ALL
53291+ default 1004
53292+ help
53293+ Here you can choose the GID to disable socket access for. Remember to
53294+ add the users you want socket access disabled for to the GID
53295+ specified here. If the sysctl option is enabled, a sysctl option
53296+ with name "socket_all_gid" is created.
53297+
53298+config GRKERNSEC_SOCKET_CLIENT
53299+ bool "Deny client sockets to group"
53300+ depends on GRKERNSEC_SOCKET
53301+ help
53302+ If you say Y here, you will be able to choose a GID of whose users will
53303+ be unable to connect to other hosts from your machine, but will be
53304+ able to run servers. If this option is enabled, all users in the group
53305+ you specify will have to use passive mode when initiating ftp transfers
53306+ from the shell on your machine. If the sysctl option is enabled, a
53307+ sysctl option with name "socket_client" is created.
53308+
53309+config GRKERNSEC_SOCKET_CLIENT_GID
53310+ int "GID to deny client sockets for"
53311+ depends on GRKERNSEC_SOCKET_CLIENT
53312+ default 1003
53313+ help
53314+ Here you can choose the GID to disable client socket access for.
53315+ Remember to add the users you want client socket access disabled for to
53316+ the GID specified here. If the sysctl option is enabled, a sysctl
53317+ option with name "socket_client_gid" is created.
53318+
53319+config GRKERNSEC_SOCKET_SERVER
53320+ bool "Deny server sockets to group"
53321+ depends on GRKERNSEC_SOCKET
53322+ help
53323+ If you say Y here, you will be able to choose a GID of whose users will
53324+ be unable to run server applications from your machine. If the sysctl
53325+ option is enabled, a sysctl option with name "socket_server" is created.
53326+
53327+config GRKERNSEC_SOCKET_SERVER_GID
53328+ int "GID to deny server sockets for"
53329+ depends on GRKERNSEC_SOCKET_SERVER
53330+ default 1002
53331+ help
53332+ Here you can choose the GID to disable server socket access for.
53333+ Remember to add the users you want server socket access disabled for to
53334+ the GID specified here. If the sysctl option is enabled, a sysctl
53335+ option with name "socket_server_gid" is created.
53336+
53337+endmenu
53338+menu "Sysctl Support"
53339+depends on GRKERNSEC && SYSCTL
53340+
53341+config GRKERNSEC_SYSCTL
53342+ bool "Sysctl support"
53343+ default y if GRKERNSEC_CONFIG_AUTO
53344+ help
53345+ If you say Y here, you will be able to change the options that
53346+ grsecurity runs with at bootup, without having to recompile your
53347+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
53348+ to enable (1) or disable (0) various features. All the sysctl entries
53349+ are mutable until the "grsec_lock" entry is set to a non-zero value.
53350+ All features enabled in the kernel configuration are disabled at boot
53351+ if you do not say Y to the "Turn on features by default" option.
53352+ All options should be set at startup, and the grsec_lock entry should
53353+ be set to a non-zero value after all the options are set.
53354+ *THIS IS EXTREMELY IMPORTANT*
53355+
53356+config GRKERNSEC_SYSCTL_DISTRO
53357+ bool "Extra sysctl support for distro makers (READ HELP)"
53358+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
53359+ help
53360+ If you say Y here, additional sysctl options will be created
53361+ for features that affect processes running as root. Therefore,
53362+ it is critical when using this option that the grsec_lock entry be
53363+ enabled after boot. Only distros with prebuilt kernel packages
53364+ with this option enabled that can ensure grsec_lock is enabled
53365+ after boot should use this option.
53366+ *Failure to set grsec_lock after boot makes all grsec features
53367+ this option covers useless*
53368+
53369+ Currently this option creates the following sysctl entries:
53370+ "Disable Privileged I/O": "disable_priv_io"
53371+
53372+config GRKERNSEC_SYSCTL_ON
53373+ bool "Turn on features by default"
53374+ default y if GRKERNSEC_CONFIG_AUTO
53375+ depends on GRKERNSEC_SYSCTL
53376+ help
53377+ If you say Y here, instead of having all features enabled in the
53378+ kernel configuration disabled at boot time, the features will be
53379+ enabled at boot time. It is recommended you say Y here unless
53380+ there is some reason you would want all sysctl-tunable features to
53381+ be disabled by default. As mentioned elsewhere, it is important
53382+ to enable the grsec_lock entry once you have finished modifying
53383+ the sysctl entries.
53384+
53385+endmenu
53386+menu "Logging Options"
53387+depends on GRKERNSEC
53388+
53389+config GRKERNSEC_FLOODTIME
53390+ int "Seconds in between log messages (minimum)"
53391+ default 10
53392+ help
53393+ This option allows you to enforce the number of seconds between
53394+ grsecurity log messages. The default should be suitable for most
53395+ people, however, if you choose to change it, choose a value small enough
53396+ to allow informative logs to be produced, but large enough to
53397+ prevent flooding.
53398+
53399+config GRKERNSEC_FLOODBURST
53400+ int "Number of messages in a burst (maximum)"
53401+ default 6
53402+ help
53403+ This option allows you to choose the maximum number of messages allowed
53404+ within the flood time interval you chose in a separate option. The
53405+ default should be suitable for most people, however if you find that
53406+ many of your logs are being interpreted as flooding, you may want to
53407+ raise this value.
53408+
53409+endmenu
53410diff --git a/grsecurity/Makefile b/grsecurity/Makefile
53411new file mode 100644
53412index 0000000..1b9afa9
53413--- /dev/null
53414+++ b/grsecurity/Makefile
53415@@ -0,0 +1,38 @@
53416+# grsecurity's ACL system was originally written in 2001 by Michael Dalton
53417+# during 2001-2009 it has been completely redesigned by Brad Spengler
53418+# into an RBAC system
53419+#
53420+# All code in this directory and various hooks inserted throughout the kernel
53421+# are copyright Brad Spengler - Open Source Security, Inc., and released
53422+# under the GPL v2 or higher
53423+
53424+KBUILD_CFLAGS += -Werror
53425+
53426+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
53427+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
53428+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
53429+
53430+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
53431+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
53432+ gracl_learn.o grsec_log.o
53433+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
53434+
53435+ifdef CONFIG_NET
53436+obj-y += grsec_sock.o
53437+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
53438+endif
53439+
53440+ifndef CONFIG_GRKERNSEC
53441+obj-y += grsec_disabled.o
53442+endif
53443+
53444+ifdef CONFIG_GRKERNSEC_HIDESYM
53445+extra-y := grsec_hidesym.o
53446+$(obj)/grsec_hidesym.o:
53447+ @-chmod -f 500 /boot
53448+ @-chmod -f 500 /lib/modules
53449+ @-chmod -f 500 /lib64/modules
53450+ @-chmod -f 500 /lib32/modules
53451+ @-chmod -f 700 .
53452+ @echo ' grsec: protected kernel image paths'
53453+endif
53454diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
53455new file mode 100644
53456index 0000000..960766a
53457--- /dev/null
53458+++ b/grsecurity/gracl.c
53459@@ -0,0 +1,4003 @@
53460+#include <linux/kernel.h>
53461+#include <linux/module.h>
53462+#include <linux/sched.h>
53463+#include <linux/mm.h>
53464+#include <linux/file.h>
53465+#include <linux/fs.h>
53466+#include <linux/namei.h>
53467+#include <linux/mount.h>
53468+#include <linux/tty.h>
53469+#include <linux/proc_fs.h>
53470+#include <linux/lglock.h>
53471+#include <linux/slab.h>
53472+#include <linux/vmalloc.h>
53473+#include <linux/types.h>
53474+#include <linux/sysctl.h>
53475+#include <linux/netdevice.h>
53476+#include <linux/ptrace.h>
53477+#include <linux/gracl.h>
53478+#include <linux/gralloc.h>
53479+#include <linux/security.h>
53480+#include <linux/grinternal.h>
53481+#include <linux/pid_namespace.h>
53482+#include <linux/stop_machine.h>
53483+#include <linux/fdtable.h>
53484+#include <linux/percpu.h>
53485+#include <linux/lglock.h>
53486+#include "../fs/mount.h"
53487+
53488+#include <asm/uaccess.h>
53489+#include <asm/errno.h>
53490+#include <asm/mman.h>
53491+
53492+extern struct lglock vfsmount_lock;
53493+
53494+static struct acl_role_db acl_role_set;
53495+static struct name_db name_set;
53496+static struct inodev_db inodev_set;
53497+
53498+/* for keeping track of userspace pointers used for subjects, so we
53499+ can share references in the kernel as well
53500+*/
53501+
53502+static struct path real_root;
53503+
53504+static struct acl_subj_map_db subj_map_set;
53505+
53506+static struct acl_role_label *default_role;
53507+
53508+static struct acl_role_label *role_list;
53509+
53510+static u16 acl_sp_role_value;
53511+
53512+extern char *gr_shared_page[4];
53513+static DEFINE_MUTEX(gr_dev_mutex);
53514+DEFINE_RWLOCK(gr_inode_lock);
53515+
53516+struct gr_arg *gr_usermode;
53517+
53518+static unsigned int gr_status __read_only = GR_STATUS_INIT;
53519+
53520+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
53521+extern void gr_clear_learn_entries(void);
53522+
53523+unsigned char *gr_system_salt;
53524+unsigned char *gr_system_sum;
53525+
53526+static struct sprole_pw **acl_special_roles = NULL;
53527+static __u16 num_sprole_pws = 0;
53528+
53529+static struct acl_role_label *kernel_role = NULL;
53530+
53531+static unsigned int gr_auth_attempts = 0;
53532+static unsigned long gr_auth_expires = 0UL;
53533+
53534+#ifdef CONFIG_NET
53535+extern struct vfsmount *sock_mnt;
53536+#endif
53537+
53538+extern struct vfsmount *pipe_mnt;
53539+extern struct vfsmount *shm_mnt;
53540+#ifdef CONFIG_HUGETLBFS
53541+extern struct vfsmount *hugetlbfs_vfsmount;
53542+#endif
53543+
53544+static struct acl_object_label *fakefs_obj_rw;
53545+static struct acl_object_label *fakefs_obj_rwx;
53546+
53547+extern int gr_init_uidset(void);
53548+extern void gr_free_uidset(void);
53549+extern void gr_remove_uid(uid_t uid);
53550+extern int gr_find_uid(uid_t uid);
53551+
53552+__inline__ int
53553+gr_acl_is_enabled(void)
53554+{
53555+ return (gr_status & GR_READY);
53556+}
53557+
53558+#ifdef CONFIG_BTRFS_FS
53559+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
53560+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
53561+#endif
53562+
53563+static inline dev_t __get_dev(const struct dentry *dentry)
53564+{
53565+#ifdef CONFIG_BTRFS_FS
53566+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
53567+ return get_btrfs_dev_from_inode(dentry->d_inode);
53568+ else
53569+#endif
53570+ return dentry->d_inode->i_sb->s_dev;
53571+}
53572+
53573+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
53574+{
53575+ return __get_dev(dentry);
53576+}
53577+
53578+static char gr_task_roletype_to_char(struct task_struct *task)
53579+{
53580+ switch (task->role->roletype &
53581+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
53582+ GR_ROLE_SPECIAL)) {
53583+ case GR_ROLE_DEFAULT:
53584+ return 'D';
53585+ case GR_ROLE_USER:
53586+ return 'U';
53587+ case GR_ROLE_GROUP:
53588+ return 'G';
53589+ case GR_ROLE_SPECIAL:
53590+ return 'S';
53591+ }
53592+
53593+ return 'X';
53594+}
53595+
53596+char gr_roletype_to_char(void)
53597+{
53598+ return gr_task_roletype_to_char(current);
53599+}
53600+
53601+__inline__ int
53602+gr_acl_tpe_check(void)
53603+{
53604+ if (unlikely(!(gr_status & GR_READY)))
53605+ return 0;
53606+ if (current->role->roletype & GR_ROLE_TPE)
53607+ return 1;
53608+ else
53609+ return 0;
53610+}
53611+
53612+int
53613+gr_handle_rawio(const struct inode *inode)
53614+{
53615+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
53616+ if (inode && S_ISBLK(inode->i_mode) &&
53617+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
53618+ !capable(CAP_SYS_RAWIO))
53619+ return 1;
53620+#endif
53621+ return 0;
53622+}
53623+
53624+static int
53625+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
53626+{
53627+ if (likely(lena != lenb))
53628+ return 0;
53629+
53630+ return !memcmp(a, b, lena);
53631+}
53632+
53633+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
53634+{
53635+ *buflen -= namelen;
53636+ if (*buflen < 0)
53637+ return -ENAMETOOLONG;
53638+ *buffer -= namelen;
53639+ memcpy(*buffer, str, namelen);
53640+ return 0;
53641+}
53642+
53643+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
53644+{
53645+ return prepend(buffer, buflen, name->name, name->len);
53646+}
53647+
53648+static int prepend_path(const struct path *path, struct path *root,
53649+ char **buffer, int *buflen)
53650+{
53651+ struct dentry *dentry = path->dentry;
53652+ struct vfsmount *vfsmnt = path->mnt;
53653+ struct mount *mnt = real_mount(vfsmnt);
53654+ bool slash = false;
53655+ int error = 0;
53656+
53657+ while (dentry != root->dentry || vfsmnt != root->mnt) {
53658+ struct dentry * parent;
53659+
53660+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
53661+ /* Global root? */
53662+ if (!mnt_has_parent(mnt)) {
53663+ goto out;
53664+ }
53665+ dentry = mnt->mnt_mountpoint;
53666+ mnt = mnt->mnt_parent;
53667+ vfsmnt = &mnt->mnt;
53668+ continue;
53669+ }
53670+ parent = dentry->d_parent;
53671+ prefetch(parent);
53672+ spin_lock(&dentry->d_lock);
53673+ error = prepend_name(buffer, buflen, &dentry->d_name);
53674+ spin_unlock(&dentry->d_lock);
53675+ if (!error)
53676+ error = prepend(buffer, buflen, "/", 1);
53677+ if (error)
53678+ break;
53679+
53680+ slash = true;
53681+ dentry = parent;
53682+ }
53683+
53684+out:
53685+ if (!error && !slash)
53686+ error = prepend(buffer, buflen, "/", 1);
53687+
53688+ return error;
53689+}
53690+
53691+/* this must be called with vfsmount_lock and rename_lock held */
53692+
53693+static char *__our_d_path(const struct path *path, struct path *root,
53694+ char *buf, int buflen)
53695+{
53696+ char *res = buf + buflen;
53697+ int error;
53698+
53699+ prepend(&res, &buflen, "\0", 1);
53700+ error = prepend_path(path, root, &res, &buflen);
53701+ if (error)
53702+ return ERR_PTR(error);
53703+
53704+ return res;
53705+}
53706+
53707+static char *
53708+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
53709+{
53710+ char *retval;
53711+
53712+ retval = __our_d_path(path, root, buf, buflen);
53713+ if (unlikely(IS_ERR(retval)))
53714+ retval = strcpy(buf, "<path too long>");
53715+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
53716+ retval[1] = '\0';
53717+
53718+ return retval;
53719+}
53720+
53721+static char *
53722+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
53723+ char *buf, int buflen)
53724+{
53725+ struct path path;
53726+ char *res;
53727+
53728+ path.dentry = (struct dentry *)dentry;
53729+ path.mnt = (struct vfsmount *)vfsmnt;
53730+
53731+ /* we can use real_root.dentry, real_root.mnt, because this is only called
53732+ by the RBAC system */
53733+ res = gen_full_path(&path, &real_root, buf, buflen);
53734+
53735+ return res;
53736+}
53737+
53738+static char *
53739+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
53740+ char *buf, int buflen)
53741+{
53742+ char *res;
53743+ struct path path;
53744+ struct path root;
53745+ struct task_struct *reaper = init_pid_ns.child_reaper;
53746+
53747+ path.dentry = (struct dentry *)dentry;
53748+ path.mnt = (struct vfsmount *)vfsmnt;
53749+
53750+ /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
53751+ get_fs_root(reaper->fs, &root);
53752+
53753+ write_seqlock(&rename_lock);
53754+ br_read_lock(&vfsmount_lock);
53755+ res = gen_full_path(&path, &root, buf, buflen);
53756+ br_read_unlock(&vfsmount_lock);
53757+ write_sequnlock(&rename_lock);
53758+
53759+ path_put(&root);
53760+ return res;
53761+}
53762+
53763+static char *
53764+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
53765+{
53766+ char *ret;
53767+ write_seqlock(&rename_lock);
53768+ br_read_lock(&vfsmount_lock);
53769+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
53770+ PAGE_SIZE);
53771+ br_read_unlock(&vfsmount_lock);
53772+ write_sequnlock(&rename_lock);
53773+ return ret;
53774+}
53775+
53776+static char *
53777+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
53778+{
53779+ char *ret;
53780+ char *buf;
53781+ int buflen;
53782+
53783+ write_seqlock(&rename_lock);
53784+ br_read_lock(&vfsmount_lock);
53785+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
53786+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
53787+ buflen = (int)(ret - buf);
53788+ if (buflen >= 5)
53789+ prepend(&ret, &buflen, "/proc", 5);
53790+ else
53791+ ret = strcpy(buf, "<path too long>");
53792+ br_read_unlock(&vfsmount_lock);
53793+ write_sequnlock(&rename_lock);
53794+ return ret;
53795+}
53796+
53797+char *
53798+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
53799+{
53800+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
53801+ PAGE_SIZE);
53802+}
53803+
53804+char *
53805+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
53806+{
53807+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
53808+ PAGE_SIZE);
53809+}
53810+
53811+char *
53812+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
53813+{
53814+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
53815+ PAGE_SIZE);
53816+}
53817+
53818+char *
53819+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
53820+{
53821+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
53822+ PAGE_SIZE);
53823+}
53824+
53825+char *
53826+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
53827+{
53828+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
53829+ PAGE_SIZE);
53830+}
53831+
53832+__inline__ __u32
53833+to_gr_audit(const __u32 reqmode)
53834+{
53835+ /* masks off auditable permission flags, then shifts them to create
53836+ auditing flags, and adds the special case of append auditing if
53837+ we're requesting write */
53838+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
53839+}
53840+
53841+struct acl_subject_label *
53842+lookup_subject_map(const struct acl_subject_label *userp)
53843+{
53844+ unsigned int index = gr_shash(userp, subj_map_set.s_size);
53845+ struct subject_map *match;
53846+
53847+ match = subj_map_set.s_hash[index];
53848+
53849+ while (match && match->user != userp)
53850+ match = match->next;
53851+
53852+ if (match != NULL)
53853+ return match->kernel;
53854+ else
53855+ return NULL;
53856+}
53857+
53858+static void
53859+insert_subj_map_entry(struct subject_map *subjmap)
53860+{
53861+ unsigned int index = gr_shash(subjmap->user, subj_map_set.s_size);
53862+ struct subject_map **curr;
53863+
53864+ subjmap->prev = NULL;
53865+
53866+ curr = &subj_map_set.s_hash[index];
53867+ if (*curr != NULL)
53868+ (*curr)->prev = subjmap;
53869+
53870+ subjmap->next = *curr;
53871+ *curr = subjmap;
53872+
53873+ return;
53874+}
53875+
53876+static struct acl_role_label *
53877+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
53878+ const gid_t gid)
53879+{
53880+ unsigned int index = gr_rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
53881+ struct acl_role_label *match;
53882+ struct role_allowed_ip *ipp;
53883+ unsigned int x;
53884+ u32 curr_ip = task->signal->curr_ip;
53885+
53886+ task->signal->saved_ip = curr_ip;
53887+
53888+ match = acl_role_set.r_hash[index];
53889+
53890+ while (match) {
53891+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
53892+ for (x = 0; x < match->domain_child_num; x++) {
53893+ if (match->domain_children[x] == uid)
53894+ goto found;
53895+ }
53896+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
53897+ break;
53898+ match = match->next;
53899+ }
53900+found:
53901+ if (match == NULL) {
53902+ try_group:
53903+ index = gr_rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
53904+ match = acl_role_set.r_hash[index];
53905+
53906+ while (match) {
53907+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
53908+ for (x = 0; x < match->domain_child_num; x++) {
53909+ if (match->domain_children[x] == gid)
53910+ goto found2;
53911+ }
53912+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
53913+ break;
53914+ match = match->next;
53915+ }
53916+found2:
53917+ if (match == NULL)
53918+ match = default_role;
53919+ if (match->allowed_ips == NULL)
53920+ return match;
53921+ else {
53922+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
53923+ if (likely
53924+ ((ntohl(curr_ip) & ipp->netmask) ==
53925+ (ntohl(ipp->addr) & ipp->netmask)))
53926+ return match;
53927+ }
53928+ match = default_role;
53929+ }
53930+ } else if (match->allowed_ips == NULL) {
53931+ return match;
53932+ } else {
53933+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
53934+ if (likely
53935+ ((ntohl(curr_ip) & ipp->netmask) ==
53936+ (ntohl(ipp->addr) & ipp->netmask)))
53937+ return match;
53938+ }
53939+ goto try_group;
53940+ }
53941+
53942+ return match;
53943+}
53944+
53945+struct acl_subject_label *
53946+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
53947+ const struct acl_role_label *role)
53948+{
53949+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
53950+ struct acl_subject_label *match;
53951+
53952+ match = role->subj_hash[index];
53953+
53954+ while (match && (match->inode != ino || match->device != dev ||
53955+ (match->mode & GR_DELETED))) {
53956+ match = match->next;
53957+ }
53958+
53959+ if (match && !(match->mode & GR_DELETED))
53960+ return match;
53961+ else
53962+ return NULL;
53963+}
53964+
53965+struct acl_subject_label *
53966+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
53967+ const struct acl_role_label *role)
53968+{
53969+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
53970+ struct acl_subject_label *match;
53971+
53972+ match = role->subj_hash[index];
53973+
53974+ while (match && (match->inode != ino || match->device != dev ||
53975+ !(match->mode & GR_DELETED))) {
53976+ match = match->next;
53977+ }
53978+
53979+ if (match && (match->mode & GR_DELETED))
53980+ return match;
53981+ else
53982+ return NULL;
53983+}
53984+
53985+static struct acl_object_label *
53986+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
53987+ const struct acl_subject_label *subj)
53988+{
53989+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
53990+ struct acl_object_label *match;
53991+
53992+ match = subj->obj_hash[index];
53993+
53994+ while (match && (match->inode != ino || match->device != dev ||
53995+ (match->mode & GR_DELETED))) {
53996+ match = match->next;
53997+ }
53998+
53999+ if (match && !(match->mode & GR_DELETED))
54000+ return match;
54001+ else
54002+ return NULL;
54003+}
54004+
54005+static struct acl_object_label *
54006+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
54007+ const struct acl_subject_label *subj)
54008+{
54009+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
54010+ struct acl_object_label *match;
54011+
54012+ match = subj->obj_hash[index];
54013+
54014+ while (match && (match->inode != ino || match->device != dev ||
54015+ !(match->mode & GR_DELETED))) {
54016+ match = match->next;
54017+ }
54018+
54019+ if (match && (match->mode & GR_DELETED))
54020+ return match;
54021+
54022+ match = subj->obj_hash[index];
54023+
54024+ while (match && (match->inode != ino || match->device != dev ||
54025+ (match->mode & GR_DELETED))) {
54026+ match = match->next;
54027+ }
54028+
54029+ if (match && !(match->mode & GR_DELETED))
54030+ return match;
54031+ else
54032+ return NULL;
54033+}
54034+
54035+static struct name_entry *
54036+lookup_name_entry(const char *name)
54037+{
54038+ unsigned int len = strlen(name);
54039+ unsigned int key = full_name_hash(name, len);
54040+ unsigned int index = key % name_set.n_size;
54041+ struct name_entry *match;
54042+
54043+ match = name_set.n_hash[index];
54044+
54045+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
54046+ match = match->next;
54047+
54048+ return match;
54049+}
54050+
54051+static struct name_entry *
54052+lookup_name_entry_create(const char *name)
54053+{
54054+ unsigned int len = strlen(name);
54055+ unsigned int key = full_name_hash(name, len);
54056+ unsigned int index = key % name_set.n_size;
54057+ struct name_entry *match;
54058+
54059+ match = name_set.n_hash[index];
54060+
54061+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
54062+ !match->deleted))
54063+ match = match->next;
54064+
54065+ if (match && match->deleted)
54066+ return match;
54067+
54068+ match = name_set.n_hash[index];
54069+
54070+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
54071+ match->deleted))
54072+ match = match->next;
54073+
54074+ if (match && !match->deleted)
54075+ return match;
54076+ else
54077+ return NULL;
54078+}
54079+
54080+static struct inodev_entry *
54081+lookup_inodev_entry(const ino_t ino, const dev_t dev)
54082+{
54083+ unsigned int index = gr_fhash(ino, dev, inodev_set.i_size);
54084+ struct inodev_entry *match;
54085+
54086+ match = inodev_set.i_hash[index];
54087+
54088+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
54089+ match = match->next;
54090+
54091+ return match;
54092+}
54093+
54094+static void
54095+insert_inodev_entry(struct inodev_entry *entry)
54096+{
54097+ unsigned int index = gr_fhash(entry->nentry->inode, entry->nentry->device,
54098+ inodev_set.i_size);
54099+ struct inodev_entry **curr;
54100+
54101+ entry->prev = NULL;
54102+
54103+ curr = &inodev_set.i_hash[index];
54104+ if (*curr != NULL)
54105+ (*curr)->prev = entry;
54106+
54107+ entry->next = *curr;
54108+ *curr = entry;
54109+
54110+ return;
54111+}
54112+
54113+static void
54114+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
54115+{
54116+ unsigned int index =
54117+ gr_rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
54118+ struct acl_role_label **curr;
54119+ struct acl_role_label *tmp, *tmp2;
54120+
54121+ curr = &acl_role_set.r_hash[index];
54122+
54123+ /* simple case, slot is empty, just set it to our role */
54124+ if (*curr == NULL) {
54125+ *curr = role;
54126+ } else {
54127+ /* example:
54128+ 1 -> 2 -> 3 (adding 2 -> 3 to here)
54129+ 2 -> 3
54130+ */
54131+ /* first check to see if we can already be reached via this slot */
54132+ tmp = *curr;
54133+ while (tmp && tmp != role)
54134+ tmp = tmp->next;
54135+ if (tmp == role) {
54136+ /* we don't need to add ourselves to this slot's chain */
54137+ return;
54138+ }
54139+ /* we need to add ourselves to this chain, two cases */
54140+ if (role->next == NULL) {
54141+ /* simple case, append the current chain to our role */
54142+ role->next = *curr;
54143+ *curr = role;
54144+ } else {
54145+ /* 1 -> 2 -> 3 -> 4
54146+ 2 -> 3 -> 4
54147+ 3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here)
54148+ */
54149+ /* trickier case: walk our role's chain until we find
54150+ the role for the start of the current slot's chain */
54151+ tmp = role;
54152+ tmp2 = *curr;
54153+ while (tmp->next && tmp->next != tmp2)
54154+ tmp = tmp->next;
54155+ if (tmp->next == tmp2) {
54156+ /* from example above, we found 3, so just
54157+ replace this slot's chain with ours */
54158+ *curr = role;
54159+ } else {
54160+ /* we didn't find a subset of our role's chain
54161+ in the current slot's chain, so append their
54162+ chain to ours, and set us as the first role in
54163+ the slot's chain
54164+
54165+ we could fold this case with the case above,
54166+ but making it explicit for clarity
54167+ */
54168+ tmp->next = tmp2;
54169+ *curr = role;
54170+ }
54171+ }
54172+ }
54173+
54174+ return;
54175+}
54176+
54177+static void
54178+insert_acl_role_label(struct acl_role_label *role)
54179+{
54180+ int i;
54181+
54182+ if (role_list == NULL) {
54183+ role_list = role;
54184+ role->prev = NULL;
54185+ } else {
54186+ role->prev = role_list;
54187+ role_list = role;
54188+ }
54189+
54190+ /* used for hash chains */
54191+ role->next = NULL;
54192+
54193+ if (role->roletype & GR_ROLE_DOMAIN) {
54194+ for (i = 0; i < role->domain_child_num; i++)
54195+ __insert_acl_role_label(role, role->domain_children[i]);
54196+ } else
54197+ __insert_acl_role_label(role, role->uidgid);
54198+}
54199+
54200+static int
54201+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
54202+{
54203+ struct name_entry **curr, *nentry;
54204+ struct inodev_entry *ientry;
54205+ unsigned int len = strlen(name);
54206+ unsigned int key = full_name_hash(name, len);
54207+ unsigned int index = key % name_set.n_size;
54208+
54209+ curr = &name_set.n_hash[index];
54210+
54211+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
54212+ curr = &((*curr)->next);
54213+
54214+ if (*curr != NULL)
54215+ return 1;
54216+
54217+ nentry = acl_alloc(sizeof (struct name_entry));
54218+ if (nentry == NULL)
54219+ return 0;
54220+ ientry = acl_alloc(sizeof (struct inodev_entry));
54221+ if (ientry == NULL)
54222+ return 0;
54223+ ientry->nentry = nentry;
54224+
54225+ nentry->key = key;
54226+ nentry->name = name;
54227+ nentry->inode = inode;
54228+ nentry->device = device;
54229+ nentry->len = len;
54230+ nentry->deleted = deleted;
54231+
54232+ nentry->prev = NULL;
54233+ curr = &name_set.n_hash[index];
54234+ if (*curr != NULL)
54235+ (*curr)->prev = nentry;
54236+ nentry->next = *curr;
54237+ *curr = nentry;
54238+
54239+ /* insert us into the table searchable by inode/dev */
54240+ insert_inodev_entry(ientry);
54241+
54242+ return 1;
54243+}
54244+
54245+static void
54246+insert_acl_obj_label(struct acl_object_label *obj,
54247+ struct acl_subject_label *subj)
54248+{
54249+ unsigned int index =
54250+ gr_fhash(obj->inode, obj->device, subj->obj_hash_size);
54251+ struct acl_object_label **curr;
54252+
54253+
54254+ obj->prev = NULL;
54255+
54256+ curr = &subj->obj_hash[index];
54257+ if (*curr != NULL)
54258+ (*curr)->prev = obj;
54259+
54260+ obj->next = *curr;
54261+ *curr = obj;
54262+
54263+ return;
54264+}
54265+
54266+static void
54267+insert_acl_subj_label(struct acl_subject_label *obj,
54268+ struct acl_role_label *role)
54269+{
54270+ unsigned int index = gr_fhash(obj->inode, obj->device, role->subj_hash_size);
54271+ struct acl_subject_label **curr;
54272+
54273+ obj->prev = NULL;
54274+
54275+ curr = &role->subj_hash[index];
54276+ if (*curr != NULL)
54277+ (*curr)->prev = obj;
54278+
54279+ obj->next = *curr;
54280+ *curr = obj;
54281+
54282+ return;
54283+}
54284+
54285+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
54286+
54287+static void *
54288+create_table(__u32 * len, int elementsize)
54289+{
54290+ unsigned int table_sizes[] = {
54291+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
54292+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
54293+ 4194301, 8388593, 16777213, 33554393, 67108859
54294+ };
54295+ void *newtable = NULL;
54296+ unsigned int pwr = 0;
54297+
54298+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
54299+ table_sizes[pwr] <= *len)
54300+ pwr++;
54301+
54302+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
54303+ return newtable;
54304+
54305+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
54306+ newtable =
54307+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
54308+ else
54309+ newtable = vmalloc(table_sizes[pwr] * elementsize);
54310+
54311+ *len = table_sizes[pwr];
54312+
54313+ return newtable;
54314+}
54315+
54316+static int
54317+init_variables(const struct gr_arg *arg)
54318+{
54319+ struct task_struct *reaper = init_pid_ns.child_reaper;
54320+ unsigned int stacksize;
54321+
54322+ subj_map_set.s_size = arg->role_db.num_subjects;
54323+ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
54324+ name_set.n_size = arg->role_db.num_objects;
54325+ inodev_set.i_size = arg->role_db.num_objects;
54326+
54327+ if (!subj_map_set.s_size || !acl_role_set.r_size ||
54328+ !name_set.n_size || !inodev_set.i_size)
54329+ return 1;
54330+
54331+ if (!gr_init_uidset())
54332+ return 1;
54333+
54334+ /* set up the stack that holds allocation info */
54335+
54336+ stacksize = arg->role_db.num_pointers + 5;
54337+
54338+ if (!acl_alloc_stack_init(stacksize))
54339+ return 1;
54340+
54341+ /* grab reference for the real root dentry and vfsmount */
54342+ get_fs_root(reaper->fs, &real_root);
54343+
54344+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
54345+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
54346+#endif
54347+
54348+ fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
54349+ if (fakefs_obj_rw == NULL)
54350+ return 1;
54351+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
54352+
54353+ fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
54354+ if (fakefs_obj_rwx == NULL)
54355+ return 1;
54356+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
54357+
54358+ subj_map_set.s_hash =
54359+ (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
54360+ acl_role_set.r_hash =
54361+ (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
54362+ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
54363+ inodev_set.i_hash =
54364+ (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
54365+
54366+ if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
54367+ !name_set.n_hash || !inodev_set.i_hash)
54368+ return 1;
54369+
54370+ memset(subj_map_set.s_hash, 0,
54371+ sizeof(struct subject_map *) * subj_map_set.s_size);
54372+ memset(acl_role_set.r_hash, 0,
54373+ sizeof (struct acl_role_label *) * acl_role_set.r_size);
54374+ memset(name_set.n_hash, 0,
54375+ sizeof (struct name_entry *) * name_set.n_size);
54376+ memset(inodev_set.i_hash, 0,
54377+ sizeof (struct inodev_entry *) * inodev_set.i_size);
54378+
54379+ return 0;
54380+}
54381+
54382+/* free information not needed after startup
54383+ currently contains user->kernel pointer mappings for subjects
54384+*/
54385+
54386+static void
54387+free_init_variables(void)
54388+{
54389+ __u32 i;
54390+
54391+ if (subj_map_set.s_hash) {
54392+ for (i = 0; i < subj_map_set.s_size; i++) {
54393+ if (subj_map_set.s_hash[i]) {
54394+ kfree(subj_map_set.s_hash[i]);
54395+ subj_map_set.s_hash[i] = NULL;
54396+ }
54397+ }
54398+
54399+ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
54400+ PAGE_SIZE)
54401+ kfree(subj_map_set.s_hash);
54402+ else
54403+ vfree(subj_map_set.s_hash);
54404+ }
54405+
54406+ return;
54407+}
54408+
54409+static void
54410+free_variables(void)
54411+{
54412+ struct acl_subject_label *s;
54413+ struct acl_role_label *r;
54414+ struct task_struct *task, *task2;
54415+ unsigned int x;
54416+
54417+ gr_clear_learn_entries();
54418+
54419+ read_lock(&tasklist_lock);
54420+ do_each_thread(task2, task) {
54421+ task->acl_sp_role = 0;
54422+ task->acl_role_id = 0;
54423+ task->acl = NULL;
54424+ task->role = NULL;
54425+ } while_each_thread(task2, task);
54426+ read_unlock(&tasklist_lock);
54427+
54428+ /* release the reference to the real root dentry and vfsmount */
54429+ path_put(&real_root);
54430+ memset(&real_root, 0, sizeof(real_root));
54431+
54432+ /* free all object hash tables */
54433+
54434+ FOR_EACH_ROLE_START(r)
54435+ if (r->subj_hash == NULL)
54436+ goto next_role;
54437+ FOR_EACH_SUBJECT_START(r, s, x)
54438+ if (s->obj_hash == NULL)
54439+ break;
54440+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
54441+ kfree(s->obj_hash);
54442+ else
54443+ vfree(s->obj_hash);
54444+ FOR_EACH_SUBJECT_END(s, x)
54445+ FOR_EACH_NESTED_SUBJECT_START(r, s)
54446+ if (s->obj_hash == NULL)
54447+ break;
54448+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
54449+ kfree(s->obj_hash);
54450+ else
54451+ vfree(s->obj_hash);
54452+ FOR_EACH_NESTED_SUBJECT_END(s)
54453+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
54454+ kfree(r->subj_hash);
54455+ else
54456+ vfree(r->subj_hash);
54457+ r->subj_hash = NULL;
54458+next_role:
54459+ FOR_EACH_ROLE_END(r)
54460+
54461+ acl_free_all();
54462+
54463+ if (acl_role_set.r_hash) {
54464+ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
54465+ PAGE_SIZE)
54466+ kfree(acl_role_set.r_hash);
54467+ else
54468+ vfree(acl_role_set.r_hash);
54469+ }
54470+ if (name_set.n_hash) {
54471+ if ((name_set.n_size * sizeof (struct name_entry *)) <=
54472+ PAGE_SIZE)
54473+ kfree(name_set.n_hash);
54474+ else
54475+ vfree(name_set.n_hash);
54476+ }
54477+
54478+ if (inodev_set.i_hash) {
54479+ if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
54480+ PAGE_SIZE)
54481+ kfree(inodev_set.i_hash);
54482+ else
54483+ vfree(inodev_set.i_hash);
54484+ }
54485+
54486+ gr_free_uidset();
54487+
54488+ memset(&name_set, 0, sizeof (struct name_db));
54489+ memset(&inodev_set, 0, sizeof (struct inodev_db));
54490+ memset(&acl_role_set, 0, sizeof (struct acl_role_db));
54491+ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
54492+
54493+ default_role = NULL;
54494+ kernel_role = NULL;
54495+ role_list = NULL;
54496+
54497+ return;
54498+}
54499+
54500+static __u32
54501+count_user_objs(struct acl_object_label *userp)
54502+{
54503+ struct acl_object_label o_tmp;
54504+ __u32 num = 0;
54505+
54506+ while (userp) {
54507+ if (copy_from_user(&o_tmp, userp,
54508+ sizeof (struct acl_object_label)))
54509+ break;
54510+
54511+ userp = o_tmp.prev;
54512+ num++;
54513+ }
54514+
54515+ return num;
54516+}
54517+
54518+static struct acl_subject_label *
54519+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied);
54520+
54521+static int
54522+copy_user_glob(struct acl_object_label *obj)
54523+{
54524+ struct acl_object_label *g_tmp, **guser;
54525+ unsigned int len;
54526+ char *tmp;
54527+
54528+ if (obj->globbed == NULL)
54529+ return 0;
54530+
54531+ guser = &obj->globbed;
54532+ while (*guser) {
54533+ g_tmp = (struct acl_object_label *)
54534+ acl_alloc(sizeof (struct acl_object_label));
54535+ if (g_tmp == NULL)
54536+ return -ENOMEM;
54537+
54538+ if (copy_from_user(g_tmp, *guser,
54539+ sizeof (struct acl_object_label)))
54540+ return -EFAULT;
54541+
54542+ len = strnlen_user(g_tmp->filename, PATH_MAX);
54543+
54544+ if (!len || len >= PATH_MAX)
54545+ return -EINVAL;
54546+
54547+ if ((tmp = (char *) acl_alloc(len)) == NULL)
54548+ return -ENOMEM;
54549+
54550+ if (copy_from_user(tmp, g_tmp->filename, len))
54551+ return -EFAULT;
54552+ tmp[len-1] = '\0';
54553+ g_tmp->filename = tmp;
54554+
54555+ *guser = g_tmp;
54556+ guser = &(g_tmp->next);
54557+ }
54558+
54559+ return 0;
54560+}
54561+
54562+static int
54563+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
54564+ struct acl_role_label *role)
54565+{
54566+ struct acl_object_label *o_tmp;
54567+ unsigned int len;
54568+ int ret;
54569+ char *tmp;
54570+
54571+ while (userp) {
54572+ if ((o_tmp = (struct acl_object_label *)
54573+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
54574+ return -ENOMEM;
54575+
54576+ if (copy_from_user(o_tmp, userp,
54577+ sizeof (struct acl_object_label)))
54578+ return -EFAULT;
54579+
54580+ userp = o_tmp->prev;
54581+
54582+ len = strnlen_user(o_tmp->filename, PATH_MAX);
54583+
54584+ if (!len || len >= PATH_MAX)
54585+ return -EINVAL;
54586+
54587+ if ((tmp = (char *) acl_alloc(len)) == NULL)
54588+ return -ENOMEM;
54589+
54590+ if (copy_from_user(tmp, o_tmp->filename, len))
54591+ return -EFAULT;
54592+ tmp[len-1] = '\0';
54593+ o_tmp->filename = tmp;
54594+
54595+ insert_acl_obj_label(o_tmp, subj);
54596+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
54597+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
54598+ return -ENOMEM;
54599+
54600+ ret = copy_user_glob(o_tmp);
54601+ if (ret)
54602+ return ret;
54603+
54604+ if (o_tmp->nested) {
54605+ int already_copied;
54606+
54607+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role, &already_copied);
54608+ if (IS_ERR(o_tmp->nested))
54609+ return PTR_ERR(o_tmp->nested);
54610+
54611+ /* insert into nested subject list if we haven't copied this one yet
54612+ to prevent duplicate entries */
54613+ if (!already_copied) {
54614+ o_tmp->nested->next = role->hash->first;
54615+ role->hash->first = o_tmp->nested;
54616+ }
54617+ }
54618+ }
54619+
54620+ return 0;
54621+}
54622+
54623+static __u32
54624+count_user_subjs(struct acl_subject_label *userp)
54625+{
54626+ struct acl_subject_label s_tmp;
54627+ __u32 num = 0;
54628+
54629+ while (userp) {
54630+ if (copy_from_user(&s_tmp, userp,
54631+ sizeof (struct acl_subject_label)))
54632+ break;
54633+
54634+ userp = s_tmp.prev;
54635+ }
54636+
54637+ return num;
54638+}
54639+
54640+static int
54641+copy_user_allowedips(struct acl_role_label *rolep)
54642+{
54643+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
54644+
54645+ ruserip = rolep->allowed_ips;
54646+
54647+ while (ruserip) {
54648+ rlast = rtmp;
54649+
54650+ if ((rtmp = (struct role_allowed_ip *)
54651+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
54652+ return -ENOMEM;
54653+
54654+ if (copy_from_user(rtmp, ruserip,
54655+ sizeof (struct role_allowed_ip)))
54656+ return -EFAULT;
54657+
54658+ ruserip = rtmp->prev;
54659+
54660+ if (!rlast) {
54661+ rtmp->prev = NULL;
54662+ rolep->allowed_ips = rtmp;
54663+ } else {
54664+ rlast->next = rtmp;
54665+ rtmp->prev = rlast;
54666+ }
54667+
54668+ if (!ruserip)
54669+ rtmp->next = NULL;
54670+ }
54671+
54672+ return 0;
54673+}
54674+
54675+static int
54676+copy_user_transitions(struct acl_role_label *rolep)
54677+{
54678+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
54679+
54680+ unsigned int len;
54681+ char *tmp;
54682+
54683+ rusertp = rolep->transitions;
54684+
54685+ while (rusertp) {
54686+ rlast = rtmp;
54687+
54688+ if ((rtmp = (struct role_transition *)
54689+ acl_alloc(sizeof (struct role_transition))) == NULL)
54690+ return -ENOMEM;
54691+
54692+ if (copy_from_user(rtmp, rusertp,
54693+ sizeof (struct role_transition)))
54694+ return -EFAULT;
54695+
54696+ rusertp = rtmp->prev;
54697+
54698+ len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
54699+
54700+ if (!len || len >= GR_SPROLE_LEN)
54701+ return -EINVAL;
54702+
54703+ if ((tmp = (char *) acl_alloc(len)) == NULL)
54704+ return -ENOMEM;
54705+
54706+ if (copy_from_user(tmp, rtmp->rolename, len))
54707+ return -EFAULT;
54708+ tmp[len-1] = '\0';
54709+ rtmp->rolename = tmp;
54710+
54711+ if (!rlast) {
54712+ rtmp->prev = NULL;
54713+ rolep->transitions = rtmp;
54714+ } else {
54715+ rlast->next = rtmp;
54716+ rtmp->prev = rlast;
54717+ }
54718+
54719+ if (!rusertp)
54720+ rtmp->next = NULL;
54721+ }
54722+
54723+ return 0;
54724+}
54725+
54726+static struct acl_subject_label *
54727+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied)
54728+{
54729+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
54730+ unsigned int len;
54731+ char *tmp;
54732+ __u32 num_objs;
54733+ struct acl_ip_label **i_tmp, *i_utmp2;
54734+ struct gr_hash_struct ghash;
54735+ struct subject_map *subjmap;
54736+ unsigned int i_num;
54737+ int err;
54738+
54739+ if (already_copied != NULL)
54740+ *already_copied = 0;
54741+
54742+ s_tmp = lookup_subject_map(userp);
54743+
54744+ /* we've already copied this subject into the kernel, just return
54745+ the reference to it, and don't copy it over again
54746+ */
54747+ if (s_tmp) {
54748+ if (already_copied != NULL)
54749+ *already_copied = 1;
54750+ return(s_tmp);
54751+ }
54752+
54753+ if ((s_tmp = (struct acl_subject_label *)
54754+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
54755+ return ERR_PTR(-ENOMEM);
54756+
54757+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
54758+ if (subjmap == NULL)
54759+ return ERR_PTR(-ENOMEM);
54760+
54761+ subjmap->user = userp;
54762+ subjmap->kernel = s_tmp;
54763+ insert_subj_map_entry(subjmap);
54764+
54765+ if (copy_from_user(s_tmp, userp,
54766+ sizeof (struct acl_subject_label)))
54767+ return ERR_PTR(-EFAULT);
54768+
54769+ len = strnlen_user(s_tmp->filename, PATH_MAX);
54770+
54771+ if (!len || len >= PATH_MAX)
54772+ return ERR_PTR(-EINVAL);
54773+
54774+ if ((tmp = (char *) acl_alloc(len)) == NULL)
54775+ return ERR_PTR(-ENOMEM);
54776+
54777+ if (copy_from_user(tmp, s_tmp->filename, len))
54778+ return ERR_PTR(-EFAULT);
54779+ tmp[len-1] = '\0';
54780+ s_tmp->filename = tmp;
54781+
54782+ if (!strcmp(s_tmp->filename, "/"))
54783+ role->root_label = s_tmp;
54784+
54785+ if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
54786+ return ERR_PTR(-EFAULT);
54787+
54788+ /* copy user and group transition tables */
54789+
54790+ if (s_tmp->user_trans_num) {
54791+ uid_t *uidlist;
54792+
54793+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
54794+ if (uidlist == NULL)
54795+ return ERR_PTR(-ENOMEM);
54796+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
54797+ return ERR_PTR(-EFAULT);
54798+
54799+ s_tmp->user_transitions = uidlist;
54800+ }
54801+
54802+ if (s_tmp->group_trans_num) {
54803+ gid_t *gidlist;
54804+
54805+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
54806+ if (gidlist == NULL)
54807+ return ERR_PTR(-ENOMEM);
54808+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
54809+ return ERR_PTR(-EFAULT);
54810+
54811+ s_tmp->group_transitions = gidlist;
54812+ }
54813+
54814+ /* set up object hash table */
54815+ num_objs = count_user_objs(ghash.first);
54816+
54817+ s_tmp->obj_hash_size = num_objs;
54818+ s_tmp->obj_hash =
54819+ (struct acl_object_label **)
54820+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
54821+
54822+ if (!s_tmp->obj_hash)
54823+ return ERR_PTR(-ENOMEM);
54824+
54825+ memset(s_tmp->obj_hash, 0,
54826+ s_tmp->obj_hash_size *
54827+ sizeof (struct acl_object_label *));
54828+
54829+ /* add in objects */
54830+ err = copy_user_objs(ghash.first, s_tmp, role);
54831+
54832+ if (err)
54833+ return ERR_PTR(err);
54834+
54835+ /* set pointer for parent subject */
54836+ if (s_tmp->parent_subject) {
54837+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role, NULL);
54838+
54839+ if (IS_ERR(s_tmp2))
54840+ return s_tmp2;
54841+
54842+ s_tmp->parent_subject = s_tmp2;
54843+ }
54844+
54845+ /* add in ip acls */
54846+
54847+ if (!s_tmp->ip_num) {
54848+ s_tmp->ips = NULL;
54849+ goto insert;
54850+ }
54851+
54852+ i_tmp =
54853+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
54854+ sizeof (struct acl_ip_label *));
54855+
54856+ if (!i_tmp)
54857+ return ERR_PTR(-ENOMEM);
54858+
54859+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
54860+ *(i_tmp + i_num) =
54861+ (struct acl_ip_label *)
54862+ acl_alloc(sizeof (struct acl_ip_label));
54863+ if (!*(i_tmp + i_num))
54864+ return ERR_PTR(-ENOMEM);
54865+
54866+ if (copy_from_user
54867+ (&i_utmp2, s_tmp->ips + i_num,
54868+ sizeof (struct acl_ip_label *)))
54869+ return ERR_PTR(-EFAULT);
54870+
54871+ if (copy_from_user
54872+ (*(i_tmp + i_num), i_utmp2,
54873+ sizeof (struct acl_ip_label)))
54874+ return ERR_PTR(-EFAULT);
54875+
54876+ if ((*(i_tmp + i_num))->iface == NULL)
54877+ continue;
54878+
54879+ len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
54880+ if (!len || len >= IFNAMSIZ)
54881+ return ERR_PTR(-EINVAL);
54882+ tmp = acl_alloc(len);
54883+ if (tmp == NULL)
54884+ return ERR_PTR(-ENOMEM);
54885+ if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
54886+ return ERR_PTR(-EFAULT);
54887+ (*(i_tmp + i_num))->iface = tmp;
54888+ }
54889+
54890+ s_tmp->ips = i_tmp;
54891+
54892+insert:
54893+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
54894+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
54895+ return ERR_PTR(-ENOMEM);
54896+
54897+ return s_tmp;
54898+}
54899+
54900+static int
54901+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
54902+{
54903+ struct acl_subject_label s_pre;
54904+ struct acl_subject_label * ret;
54905+ int err;
54906+
54907+ while (userp) {
54908+ if (copy_from_user(&s_pre, userp,
54909+ sizeof (struct acl_subject_label)))
54910+ return -EFAULT;
54911+
54912+ ret = do_copy_user_subj(userp, role, NULL);
54913+
54914+ err = PTR_ERR(ret);
54915+ if (IS_ERR(ret))
54916+ return err;
54917+
54918+ insert_acl_subj_label(ret, role);
54919+
54920+ userp = s_pre.prev;
54921+ }
54922+
54923+ return 0;
54924+}
54925+
54926+static int
54927+copy_user_acl(struct gr_arg *arg)
54928+{
54929+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
54930+ struct acl_subject_label *subj_list;
54931+ struct sprole_pw *sptmp;
54932+ struct gr_hash_struct *ghash;
54933+ uid_t *domainlist;
54934+ unsigned int r_num;
54935+ unsigned int len;
54936+ char *tmp;
54937+ int err = 0;
54938+ __u16 i;
54939+ __u32 num_subjs;
54940+
54941+ /* we need a default and kernel role */
54942+ if (arg->role_db.num_roles < 2)
54943+ return -EINVAL;
54944+
54945+ /* copy special role authentication info from userspace */
54946+
54947+ num_sprole_pws = arg->num_sprole_pws;
54948+ acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
54949+
54950+ if (!acl_special_roles && num_sprole_pws)
54951+ return -ENOMEM;
54952+
54953+ for (i = 0; i < num_sprole_pws; i++) {
54954+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
54955+ if (!sptmp)
54956+ return -ENOMEM;
54957+ if (copy_from_user(sptmp, arg->sprole_pws + i,
54958+ sizeof (struct sprole_pw)))
54959+ return -EFAULT;
54960+
54961+ len = strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
54962+
54963+ if (!len || len >= GR_SPROLE_LEN)
54964+ return -EINVAL;
54965+
54966+ if ((tmp = (char *) acl_alloc(len)) == NULL)
54967+ return -ENOMEM;
54968+
54969+ if (copy_from_user(tmp, sptmp->rolename, len))
54970+ return -EFAULT;
54971+
54972+ tmp[len-1] = '\0';
54973+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
54974+ printk(KERN_ALERT "Copying special role %s\n", tmp);
54975+#endif
54976+ sptmp->rolename = tmp;
54977+ acl_special_roles[i] = sptmp;
54978+ }
54979+
54980+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
54981+
54982+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
54983+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
54984+
54985+ if (!r_tmp)
54986+ return -ENOMEM;
54987+
54988+ if (copy_from_user(&r_utmp2, r_utmp + r_num,
54989+ sizeof (struct acl_role_label *)))
54990+ return -EFAULT;
54991+
54992+ if (copy_from_user(r_tmp, r_utmp2,
54993+ sizeof (struct acl_role_label)))
54994+ return -EFAULT;
54995+
54996+ len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
54997+
54998+ if (!len || len >= PATH_MAX)
54999+ return -EINVAL;
55000+
55001+ if ((tmp = (char *) acl_alloc(len)) == NULL)
55002+ return -ENOMEM;
55003+
55004+ if (copy_from_user(tmp, r_tmp->rolename, len))
55005+ return -EFAULT;
55006+
55007+ tmp[len-1] = '\0';
55008+ r_tmp->rolename = tmp;
55009+
55010+ if (!strcmp(r_tmp->rolename, "default")
55011+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
55012+ default_role = r_tmp;
55013+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
55014+ kernel_role = r_tmp;
55015+ }
55016+
55017+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL)
55018+ return -ENOMEM;
55019+
55020+ if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct)))
55021+ return -EFAULT;
55022+
55023+ r_tmp->hash = ghash;
55024+
55025+ num_subjs = count_user_subjs(r_tmp->hash->first);
55026+
55027+ r_tmp->subj_hash_size = num_subjs;
55028+ r_tmp->subj_hash =
55029+ (struct acl_subject_label **)
55030+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
55031+
55032+ if (!r_tmp->subj_hash)
55033+ return -ENOMEM;
55034+
55035+ err = copy_user_allowedips(r_tmp);
55036+ if (err)
55037+ return err;
55038+
55039+ /* copy domain info */
55040+ if (r_tmp->domain_children != NULL) {
55041+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
55042+ if (domainlist == NULL)
55043+ return -ENOMEM;
55044+
55045+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t)))
55046+ return -EFAULT;
55047+
55048+ r_tmp->domain_children = domainlist;
55049+ }
55050+
55051+ err = copy_user_transitions(r_tmp);
55052+ if (err)
55053+ return err;
55054+
55055+ memset(r_tmp->subj_hash, 0,
55056+ r_tmp->subj_hash_size *
55057+ sizeof (struct acl_subject_label *));
55058+
55059+ /* acquire the list of subjects, then NULL out
55060+ the list prior to parsing the subjects for this role,
55061+ as during this parsing the list is replaced with a list
55062+ of *nested* subjects for the role
55063+ */
55064+ subj_list = r_tmp->hash->first;
55065+
55066+ /* set nested subject list to null */
55067+ r_tmp->hash->first = NULL;
55068+
55069+ err = copy_user_subjs(subj_list, r_tmp);
55070+
55071+ if (err)
55072+ return err;
55073+
55074+ insert_acl_role_label(r_tmp);
55075+ }
55076+
55077+ if (default_role == NULL || kernel_role == NULL)
55078+ return -EINVAL;
55079+
55080+ return err;
55081+}
55082+
55083+static int
55084+gracl_init(struct gr_arg *args)
55085+{
55086+ int error = 0;
55087+
55088+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
55089+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
55090+
55091+ if (init_variables(args)) {
55092+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
55093+ error = -ENOMEM;
55094+ free_variables();
55095+ goto out;
55096+ }
55097+
55098+ error = copy_user_acl(args);
55099+ free_init_variables();
55100+ if (error) {
55101+ free_variables();
55102+ goto out;
55103+ }
55104+
55105+ if ((error = gr_set_acls(0))) {
55106+ free_variables();
55107+ goto out;
55108+ }
55109+
55110+ pax_open_kernel();
55111+ gr_status |= GR_READY;
55112+ pax_close_kernel();
55113+
55114+ out:
55115+ return error;
55116+}
55117+
55118+/* derived from glibc fnmatch() 0: match, 1: no match*/
55119+
55120+static int
55121+glob_match(const char *p, const char *n)
55122+{
55123+ char c;
55124+
55125+ while ((c = *p++) != '\0') {
55126+ switch (c) {
55127+ case '?':
55128+ if (*n == '\0')
55129+ return 1;
55130+ else if (*n == '/')
55131+ return 1;
55132+ break;
55133+ case '\\':
55134+ if (*n != c)
55135+ return 1;
55136+ break;
55137+ case '*':
55138+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
55139+ if (*n == '/')
55140+ return 1;
55141+ else if (c == '?') {
55142+ if (*n == '\0')
55143+ return 1;
55144+ else
55145+ ++n;
55146+ }
55147+ }
55148+ if (c == '\0') {
55149+ return 0;
55150+ } else {
55151+ const char *endp;
55152+
55153+ if ((endp = strchr(n, '/')) == NULL)
55154+ endp = n + strlen(n);
55155+
55156+ if (c == '[') {
55157+ for (--p; n < endp; ++n)
55158+ if (!glob_match(p, n))
55159+ return 0;
55160+ } else if (c == '/') {
55161+ while (*n != '\0' && *n != '/')
55162+ ++n;
55163+ if (*n == '/' && !glob_match(p, n + 1))
55164+ return 0;
55165+ } else {
55166+ for (--p; n < endp; ++n)
55167+ if (*n == c && !glob_match(p, n))
55168+ return 0;
55169+ }
55170+
55171+ return 1;
55172+ }
55173+ case '[':
55174+ {
55175+ int not;
55176+ char cold;
55177+
55178+ if (*n == '\0' || *n == '/')
55179+ return 1;
55180+
55181+ not = (*p == '!' || *p == '^');
55182+ if (not)
55183+ ++p;
55184+
55185+ c = *p++;
55186+ for (;;) {
55187+ unsigned char fn = (unsigned char)*n;
55188+
55189+ if (c == '\0')
55190+ return 1;
55191+ else {
55192+ if (c == fn)
55193+ goto matched;
55194+ cold = c;
55195+ c = *p++;
55196+
55197+ if (c == '-' && *p != ']') {
55198+ unsigned char cend = *p++;
55199+
55200+ if (cend == '\0')
55201+ return 1;
55202+
55203+ if (cold <= fn && fn <= cend)
55204+ goto matched;
55205+
55206+ c = *p++;
55207+ }
55208+ }
55209+
55210+ if (c == ']')
55211+ break;
55212+ }
55213+ if (!not)
55214+ return 1;
55215+ break;
55216+ matched:
55217+ while (c != ']') {
55218+ if (c == '\0')
55219+ return 1;
55220+
55221+ c = *p++;
55222+ }
55223+ if (not)
55224+ return 1;
55225+ }
55226+ break;
55227+ default:
55228+ if (c != *n)
55229+ return 1;
55230+ }
55231+
55232+ ++n;
55233+ }
55234+
55235+ if (*n == '\0')
55236+ return 0;
55237+
55238+ if (*n == '/')
55239+ return 0;
55240+
55241+ return 1;
55242+}
55243+
55244+static struct acl_object_label *
55245+chk_glob_label(struct acl_object_label *globbed,
55246+ const struct dentry *dentry, const struct vfsmount *mnt, char **path)
55247+{
55248+ struct acl_object_label *tmp;
55249+
55250+ if (*path == NULL)
55251+ *path = gr_to_filename_nolock(dentry, mnt);
55252+
55253+ tmp = globbed;
55254+
55255+ while (tmp) {
55256+ if (!glob_match(tmp->filename, *path))
55257+ return tmp;
55258+ tmp = tmp->next;
55259+ }
55260+
55261+ return NULL;
55262+}
55263+
55264+static struct acl_object_label *
55265+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
55266+ const ino_t curr_ino, const dev_t curr_dev,
55267+ const struct acl_subject_label *subj, char **path, const int checkglob)
55268+{
55269+ struct acl_subject_label *tmpsubj;
55270+ struct acl_object_label *retval;
55271+ struct acl_object_label *retval2;
55272+
55273+ tmpsubj = (struct acl_subject_label *) subj;
55274+ read_lock(&gr_inode_lock);
55275+ do {
55276+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
55277+ if (retval) {
55278+ if (checkglob && retval->globbed) {
55279+ retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
55280+ if (retval2)
55281+ retval = retval2;
55282+ }
55283+ break;
55284+ }
55285+ } while ((tmpsubj = tmpsubj->parent_subject));
55286+ read_unlock(&gr_inode_lock);
55287+
55288+ return retval;
55289+}
55290+
55291+static __inline__ struct acl_object_label *
55292+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
55293+ struct dentry *curr_dentry,
55294+ const struct acl_subject_label *subj, char **path, const int checkglob)
55295+{
55296+ int newglob = checkglob;
55297+ ino_t inode;
55298+ dev_t device;
55299+
55300+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
55301+ as we don't want a / * rule to match instead of the / object
55302+ don't do this for create lookups that call this function though, since they're looking up
55303+ on the parent and thus need globbing checks on all paths
55304+ */
55305+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
55306+ newglob = GR_NO_GLOB;
55307+
55308+ spin_lock(&curr_dentry->d_lock);
55309+ inode = curr_dentry->d_inode->i_ino;
55310+ device = __get_dev(curr_dentry);
55311+ spin_unlock(&curr_dentry->d_lock);
55312+
55313+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
55314+}
55315+
55316+static struct acl_object_label *
55317+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
55318+ const struct acl_subject_label *subj, char *path, const int checkglob)
55319+{
55320+ struct dentry *dentry = (struct dentry *) l_dentry;
55321+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
55322+ struct mount *real_mnt = real_mount(mnt);
55323+ struct acl_object_label *retval;
55324+ struct dentry *parent;
55325+
55326+ write_seqlock(&rename_lock);
55327+ br_read_lock(&vfsmount_lock);
55328+
55329+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
55330+#ifdef CONFIG_NET
55331+ mnt == sock_mnt ||
55332+#endif
55333+#ifdef CONFIG_HUGETLBFS
55334+ (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
55335+#endif
55336+ /* ignore Eric Biederman */
55337+ IS_PRIVATE(l_dentry->d_inode))) {
55338+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
55339+ goto out;
55340+ }
55341+
55342+ for (;;) {
55343+ if (dentry == real_root.dentry && mnt == real_root.mnt)
55344+ break;
55345+
55346+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
55347+ if (!mnt_has_parent(real_mnt))
55348+ break;
55349+
55350+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
55351+ if (retval != NULL)
55352+ goto out;
55353+
55354+ dentry = real_mnt->mnt_mountpoint;
55355+ real_mnt = real_mnt->mnt_parent;
55356+ mnt = &real_mnt->mnt;
55357+ continue;
55358+ }
55359+
55360+ parent = dentry->d_parent;
55361+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
55362+ if (retval != NULL)
55363+ goto out;
55364+
55365+ dentry = parent;
55366+ }
55367+
55368+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
55369+
55370+ /* real_root is pinned so we don't have to hold a reference */
55371+ if (retval == NULL)
55372+ retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
55373+out:
55374+ br_read_unlock(&vfsmount_lock);
55375+ write_sequnlock(&rename_lock);
55376+
55377+ BUG_ON(retval == NULL);
55378+
55379+ return retval;
55380+}
55381+
55382+static __inline__ struct acl_object_label *
55383+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
55384+ const struct acl_subject_label *subj)
55385+{
55386+ char *path = NULL;
55387+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
55388+}
55389+
55390+static __inline__ struct acl_object_label *
55391+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
55392+ const struct acl_subject_label *subj)
55393+{
55394+ char *path = NULL;
55395+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
55396+}
55397+
55398+static __inline__ struct acl_object_label *
55399+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
55400+ const struct acl_subject_label *subj, char *path)
55401+{
55402+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
55403+}
55404+
55405+static struct acl_subject_label *
55406+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
55407+ const struct acl_role_label *role)
55408+{
55409+ struct dentry *dentry = (struct dentry *) l_dentry;
55410+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
55411+ struct mount *real_mnt = real_mount(mnt);
55412+ struct acl_subject_label *retval;
55413+ struct dentry *parent;
55414+
55415+ write_seqlock(&rename_lock);
55416+ br_read_lock(&vfsmount_lock);
55417+
55418+ for (;;) {
55419+ if (dentry == real_root.dentry && mnt == real_root.mnt)
55420+ break;
55421+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
55422+ if (!mnt_has_parent(real_mnt))
55423+ break;
55424+
55425+ spin_lock(&dentry->d_lock);
55426+ read_lock(&gr_inode_lock);
55427+ retval =
55428+ lookup_acl_subj_label(dentry->d_inode->i_ino,
55429+ __get_dev(dentry), role);
55430+ read_unlock(&gr_inode_lock);
55431+ spin_unlock(&dentry->d_lock);
55432+ if (retval != NULL)
55433+ goto out;
55434+
55435+ dentry = real_mnt->mnt_mountpoint;
55436+ real_mnt = real_mnt->mnt_parent;
55437+ mnt = &real_mnt->mnt;
55438+ continue;
55439+ }
55440+
55441+ spin_lock(&dentry->d_lock);
55442+ read_lock(&gr_inode_lock);
55443+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
55444+ __get_dev(dentry), role);
55445+ read_unlock(&gr_inode_lock);
55446+ parent = dentry->d_parent;
55447+ spin_unlock(&dentry->d_lock);
55448+
55449+ if (retval != NULL)
55450+ goto out;
55451+
55452+ dentry = parent;
55453+ }
55454+
55455+ spin_lock(&dentry->d_lock);
55456+ read_lock(&gr_inode_lock);
55457+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
55458+ __get_dev(dentry), role);
55459+ read_unlock(&gr_inode_lock);
55460+ spin_unlock(&dentry->d_lock);
55461+
55462+ if (unlikely(retval == NULL)) {
55463+ /* real_root is pinned, we don't need to hold a reference */
55464+ read_lock(&gr_inode_lock);
55465+ retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
55466+ __get_dev(real_root.dentry), role);
55467+ read_unlock(&gr_inode_lock);
55468+ }
55469+out:
55470+ br_read_unlock(&vfsmount_lock);
55471+ write_sequnlock(&rename_lock);
55472+
55473+ BUG_ON(retval == NULL);
55474+
55475+ return retval;
55476+}
55477+
55478+static void
55479+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
55480+{
55481+ struct task_struct *task = current;
55482+ const struct cred *cred = current_cred();
55483+
55484+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
55485+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
55486+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
55487+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
55488+
55489+ return;
55490+}
55491+
55492+static void
55493+gr_log_learn_id_change(const char type, const unsigned int real,
55494+ const unsigned int effective, const unsigned int fs)
55495+{
55496+ struct task_struct *task = current;
55497+ const struct cred *cred = current_cred();
55498+
55499+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
55500+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
55501+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
55502+ type, real, effective, fs, &task->signal->saved_ip);
55503+
55504+ return;
55505+}
55506+
55507+__u32
55508+gr_search_file(const struct dentry * dentry, const __u32 mode,
55509+ const struct vfsmount * mnt)
55510+{
55511+ __u32 retval = mode;
55512+ struct acl_subject_label *curracl;
55513+ struct acl_object_label *currobj;
55514+
55515+ if (unlikely(!(gr_status & GR_READY)))
55516+ return (mode & ~GR_AUDITS);
55517+
55518+ curracl = current->acl;
55519+
55520+ currobj = chk_obj_label(dentry, mnt, curracl);
55521+ retval = currobj->mode & mode;
55522+
55523+ /* if we're opening a specified transfer file for writing
55524+ (e.g. /dev/initctl), then transfer our role to init
55525+ */
55526+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
55527+ current->role->roletype & GR_ROLE_PERSIST)) {
55528+ struct task_struct *task = init_pid_ns.child_reaper;
55529+
55530+ if (task->role != current->role) {
55531+ task->acl_sp_role = 0;
55532+ task->acl_role_id = current->acl_role_id;
55533+ task->role = current->role;
55534+ rcu_read_lock();
55535+ read_lock(&grsec_exec_file_lock);
55536+ gr_apply_subject_to_task(task);
55537+ read_unlock(&grsec_exec_file_lock);
55538+ rcu_read_unlock();
55539+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
55540+ }
55541+ }
55542+
55543+ if (unlikely
55544+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
55545+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
55546+ __u32 new_mode = mode;
55547+
55548+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
55549+
55550+ retval = new_mode;
55551+
55552+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
55553+ new_mode |= GR_INHERIT;
55554+
55555+ if (!(mode & GR_NOLEARN))
55556+ gr_log_learn(dentry, mnt, new_mode);
55557+ }
55558+
55559+ return retval;
55560+}
55561+
55562+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
55563+ const struct dentry *parent,
55564+ const struct vfsmount *mnt)
55565+{
55566+ struct name_entry *match;
55567+ struct acl_object_label *matchpo;
55568+ struct acl_subject_label *curracl;
55569+ char *path;
55570+
55571+ if (unlikely(!(gr_status & GR_READY)))
55572+ return NULL;
55573+
55574+ preempt_disable();
55575+ path = gr_to_filename_rbac(new_dentry, mnt);
55576+ match = lookup_name_entry_create(path);
55577+
55578+ curracl = current->acl;
55579+
55580+ if (match) {
55581+ read_lock(&gr_inode_lock);
55582+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
55583+ read_unlock(&gr_inode_lock);
55584+
55585+ if (matchpo) {
55586+ preempt_enable();
55587+ return matchpo;
55588+ }
55589+ }
55590+
55591+ // lookup parent
55592+
55593+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
55594+
55595+ preempt_enable();
55596+ return matchpo;
55597+}
55598+
55599+__u32
55600+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
55601+ const struct vfsmount * mnt, const __u32 mode)
55602+{
55603+ struct acl_object_label *matchpo;
55604+ __u32 retval;
55605+
55606+ if (unlikely(!(gr_status & GR_READY)))
55607+ return (mode & ~GR_AUDITS);
55608+
55609+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
55610+
55611+ retval = matchpo->mode & mode;
55612+
55613+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
55614+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
55615+ __u32 new_mode = mode;
55616+
55617+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
55618+
55619+ gr_log_learn(new_dentry, mnt, new_mode);
55620+ return new_mode;
55621+ }
55622+
55623+ return retval;
55624+}
55625+
55626+__u32
55627+gr_check_link(const struct dentry * new_dentry,
55628+ const struct dentry * parent_dentry,
55629+ const struct vfsmount * parent_mnt,
55630+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
55631+{
55632+ struct acl_object_label *obj;
55633+ __u32 oldmode, newmode;
55634+ __u32 needmode;
55635+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
55636+ GR_DELETE | GR_INHERIT;
55637+
55638+ if (unlikely(!(gr_status & GR_READY)))
55639+ return (GR_CREATE | GR_LINK);
55640+
55641+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
55642+ oldmode = obj->mode;
55643+
55644+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
55645+ newmode = obj->mode;
55646+
55647+ needmode = newmode & checkmodes;
55648+
55649+ // old name for hardlink must have at least the permissions of the new name
55650+ if ((oldmode & needmode) != needmode)
55651+ goto bad;
55652+
55653+ // if old name had restrictions/auditing, make sure the new name does as well
55654+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
55655+
55656+ // don't allow hardlinking of suid/sgid/fcapped files without permission
55657+ if (is_privileged_binary(old_dentry))
55658+ needmode |= GR_SETID;
55659+
55660+ if ((newmode & needmode) != needmode)
55661+ goto bad;
55662+
55663+ // enforce minimum permissions
55664+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
55665+ return newmode;
55666+bad:
55667+ needmode = oldmode;
55668+ if (is_privileged_binary(old_dentry))
55669+ needmode |= GR_SETID;
55670+
55671+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
55672+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
55673+ return (GR_CREATE | GR_LINK);
55674+ } else if (newmode & GR_SUPPRESS)
55675+ return GR_SUPPRESS;
55676+ else
55677+ return 0;
55678+}
55679+
55680+int
55681+gr_check_hidden_task(const struct task_struct *task)
55682+{
55683+ if (unlikely(!(gr_status & GR_READY)))
55684+ return 0;
55685+
55686+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
55687+ return 1;
55688+
55689+ return 0;
55690+}
55691+
55692+int
55693+gr_check_protected_task(const struct task_struct *task)
55694+{
55695+ if (unlikely(!(gr_status & GR_READY) || !task))
55696+ return 0;
55697+
55698+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
55699+ task->acl != current->acl)
55700+ return 1;
55701+
55702+ return 0;
55703+}
55704+
55705+int
55706+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
55707+{
55708+ struct task_struct *p;
55709+ int ret = 0;
55710+
55711+ if (unlikely(!(gr_status & GR_READY) || !pid))
55712+ return ret;
55713+
55714+ read_lock(&tasklist_lock);
55715+ do_each_pid_task(pid, type, p) {
55716+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
55717+ p->acl != current->acl) {
55718+ ret = 1;
55719+ goto out;
55720+ }
55721+ } while_each_pid_task(pid, type, p);
55722+out:
55723+ read_unlock(&tasklist_lock);
55724+
55725+ return ret;
55726+}
55727+
55728+void
55729+gr_copy_label(struct task_struct *tsk)
55730+{
55731+ tsk->signal->used_accept = 0;
55732+ tsk->acl_sp_role = 0;
55733+ tsk->acl_role_id = current->acl_role_id;
55734+ tsk->acl = current->acl;
55735+ tsk->role = current->role;
55736+ tsk->signal->curr_ip = current->signal->curr_ip;
55737+ tsk->signal->saved_ip = current->signal->saved_ip;
55738+ if (current->exec_file)
55739+ get_file(current->exec_file);
55740+ tsk->exec_file = current->exec_file;
55741+ tsk->is_writable = current->is_writable;
55742+ if (unlikely(current->signal->used_accept)) {
55743+ current->signal->curr_ip = 0;
55744+ current->signal->saved_ip = 0;
55745+ }
55746+
55747+ return;
55748+}
55749+
55750+static void
55751+gr_set_proc_res(struct task_struct *task)
55752+{
55753+ struct acl_subject_label *proc;
55754+ unsigned short i;
55755+
55756+ proc = task->acl;
55757+
55758+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
55759+ return;
55760+
55761+ for (i = 0; i < RLIM_NLIMITS; i++) {
55762+ if (!(proc->resmask & (1 << i)))
55763+ continue;
55764+
55765+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
55766+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
55767+ }
55768+
55769+ return;
55770+}
55771+
55772+extern int __gr_process_user_ban(struct user_struct *user);
55773+
55774+int
55775+gr_check_user_change(int real, int effective, int fs)
55776+{
55777+ unsigned int i;
55778+ __u16 num;
55779+ uid_t *uidlist;
55780+ int curuid;
55781+ int realok = 0;
55782+ int effectiveok = 0;
55783+ int fsok = 0;
55784+
55785+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
55786+ struct user_struct *user;
55787+
55788+ if (real == -1)
55789+ goto skipit;
55790+
55791+ user = find_user(real);
55792+ if (user == NULL)
55793+ goto skipit;
55794+
55795+ if (__gr_process_user_ban(user)) {
55796+ /* for find_user */
55797+ free_uid(user);
55798+ return 1;
55799+ }
55800+
55801+ /* for find_user */
55802+ free_uid(user);
55803+
55804+skipit:
55805+#endif
55806+
55807+ if (unlikely(!(gr_status & GR_READY)))
55808+ return 0;
55809+
55810+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
55811+ gr_log_learn_id_change('u', real, effective, fs);
55812+
55813+ num = current->acl->user_trans_num;
55814+ uidlist = current->acl->user_transitions;
55815+
55816+ if (uidlist == NULL)
55817+ return 0;
55818+
55819+ if (real == -1)
55820+ realok = 1;
55821+ if (effective == -1)
55822+ effectiveok = 1;
55823+ if (fs == -1)
55824+ fsok = 1;
55825+
55826+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
55827+ for (i = 0; i < num; i++) {
55828+ curuid = (int)uidlist[i];
55829+ if (real == curuid)
55830+ realok = 1;
55831+ if (effective == curuid)
55832+ effectiveok = 1;
55833+ if (fs == curuid)
55834+ fsok = 1;
55835+ }
55836+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
55837+ for (i = 0; i < num; i++) {
55838+ curuid = (int)uidlist[i];
55839+ if (real == curuid)
55840+ break;
55841+ if (effective == curuid)
55842+ break;
55843+ if (fs == curuid)
55844+ break;
55845+ }
55846+ /* not in deny list */
55847+ if (i == num) {
55848+ realok = 1;
55849+ effectiveok = 1;
55850+ fsok = 1;
55851+ }
55852+ }
55853+
55854+ if (realok && effectiveok && fsok)
55855+ return 0;
55856+ else {
55857+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
55858+ return 1;
55859+ }
55860+}
55861+
55862+int
55863+gr_check_group_change(int real, int effective, int fs)
55864+{
55865+ unsigned int i;
55866+ __u16 num;
55867+ gid_t *gidlist;
55868+ int curgid;
55869+ int realok = 0;
55870+ int effectiveok = 0;
55871+ int fsok = 0;
55872+
55873+ if (unlikely(!(gr_status & GR_READY)))
55874+ return 0;
55875+
55876+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
55877+ gr_log_learn_id_change('g', real, effective, fs);
55878+
55879+ num = current->acl->group_trans_num;
55880+ gidlist = current->acl->group_transitions;
55881+
55882+ if (gidlist == NULL)
55883+ return 0;
55884+
55885+ if (real == -1)
55886+ realok = 1;
55887+ if (effective == -1)
55888+ effectiveok = 1;
55889+ if (fs == -1)
55890+ fsok = 1;
55891+
55892+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
55893+ for (i = 0; i < num; i++) {
55894+ curgid = (int)gidlist[i];
55895+ if (real == curgid)
55896+ realok = 1;
55897+ if (effective == curgid)
55898+ effectiveok = 1;
55899+ if (fs == curgid)
55900+ fsok = 1;
55901+ }
55902+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
55903+ for (i = 0; i < num; i++) {
55904+ curgid = (int)gidlist[i];
55905+ if (real == curgid)
55906+ break;
55907+ if (effective == curgid)
55908+ break;
55909+ if (fs == curgid)
55910+ break;
55911+ }
55912+ /* not in deny list */
55913+ if (i == num) {
55914+ realok = 1;
55915+ effectiveok = 1;
55916+ fsok = 1;
55917+ }
55918+ }
55919+
55920+ if (realok && effectiveok && fsok)
55921+ return 0;
55922+ else {
55923+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
55924+ return 1;
55925+ }
55926+}
55927+
55928+extern int gr_acl_is_capable(const int cap);
55929+
55930+void
55931+gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
55932+{
55933+ struct acl_role_label *role = task->role;
55934+ struct acl_subject_label *subj = NULL;
55935+ struct acl_object_label *obj;
55936+ struct file *filp;
55937+
55938+ if (unlikely(!(gr_status & GR_READY)))
55939+ return;
55940+
55941+ filp = task->exec_file;
55942+
55943+ /* kernel process, we'll give them the kernel role */
55944+ if (unlikely(!filp)) {
55945+ task->role = kernel_role;
55946+ task->acl = kernel_role->root_label;
55947+ return;
55948+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
55949+ role = lookup_acl_role_label(task, uid, gid);
55950+
55951+ /* don't change the role if we're not a privileged process */
55952+ if (role && task->role != role &&
55953+ (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
55954+ ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
55955+ return;
55956+
55957+ /* perform subject lookup in possibly new role
55958+ we can use this result below in the case where role == task->role
55959+ */
55960+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
55961+
55962+ /* if we changed uid/gid, but result in the same role
55963+ and are using inheritance, don't lose the inherited subject
55964+ if current subject is other than what normal lookup
55965+ would result in, we arrived via inheritance, don't
55966+ lose subject
55967+ */
55968+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
55969+ (subj == task->acl)))
55970+ task->acl = subj;
55971+
55972+ task->role = role;
55973+
55974+ task->is_writable = 0;
55975+
55976+ /* ignore additional mmap checks for processes that are writable
55977+ by the default ACL */
55978+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
55979+ if (unlikely(obj->mode & GR_WRITE))
55980+ task->is_writable = 1;
55981+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
55982+ if (unlikely(obj->mode & GR_WRITE))
55983+ task->is_writable = 1;
55984+
55985+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
55986+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
55987+#endif
55988+
55989+ gr_set_proc_res(task);
55990+
55991+ return;
55992+}
55993+
55994+int
55995+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
55996+ const int unsafe_flags)
55997+{
55998+ struct task_struct *task = current;
55999+ struct acl_subject_label *newacl;
56000+ struct acl_object_label *obj;
56001+ __u32 retmode;
56002+
56003+ if (unlikely(!(gr_status & GR_READY)))
56004+ return 0;
56005+
56006+ newacl = chk_subj_label(dentry, mnt, task->role);
56007+
56008+ /* special handling for if we did an strace -f -p <pid> from an admin role, where pid then
56009+ did an exec
56010+ */
56011+ rcu_read_lock();
56012+ read_lock(&tasklist_lock);
56013+ if (task->ptrace && task->parent && ((task->parent->role->roletype & GR_ROLE_GOD) ||
56014+ (task->parent->acl->mode & GR_POVERRIDE))) {
56015+ read_unlock(&tasklist_lock);
56016+ rcu_read_unlock();
56017+ goto skip_check;
56018+ }
56019+ read_unlock(&tasklist_lock);
56020+ rcu_read_unlock();
56021+
56022+ if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
56023+ !(task->role->roletype & GR_ROLE_GOD) &&
56024+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
56025+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
56026+ if (unsafe_flags & LSM_UNSAFE_SHARE)
56027+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
56028+ else
56029+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
56030+ return -EACCES;
56031+ }
56032+
56033+skip_check:
56034+
56035+ obj = chk_obj_label(dentry, mnt, task->acl);
56036+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
56037+
56038+ if (!(task->acl->mode & GR_INHERITLEARN) &&
56039+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
56040+ if (obj->nested)
56041+ task->acl = obj->nested;
56042+ else
56043+ task->acl = newacl;
56044+ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
56045+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
56046+
56047+ task->is_writable = 0;
56048+
56049+ /* ignore additional mmap checks for processes that are writable
56050+ by the default ACL */
56051+ obj = chk_obj_label(dentry, mnt, default_role->root_label);
56052+ if (unlikely(obj->mode & GR_WRITE))
56053+ task->is_writable = 1;
56054+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
56055+ if (unlikely(obj->mode & GR_WRITE))
56056+ task->is_writable = 1;
56057+
56058+ gr_set_proc_res(task);
56059+
56060+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
56061+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
56062+#endif
56063+ return 0;
56064+}
56065+
56066+/* always called with valid inodev ptr */
56067+static void
56068+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
56069+{
56070+ struct acl_object_label *matchpo;
56071+ struct acl_subject_label *matchps;
56072+ struct acl_subject_label *subj;
56073+ struct acl_role_label *role;
56074+ unsigned int x;
56075+
56076+ FOR_EACH_ROLE_START(role)
56077+ FOR_EACH_SUBJECT_START(role, subj, x)
56078+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
56079+ matchpo->mode |= GR_DELETED;
56080+ FOR_EACH_SUBJECT_END(subj,x)
56081+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
56082+ /* nested subjects aren't in the role's subj_hash table */
56083+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
56084+ matchpo->mode |= GR_DELETED;
56085+ FOR_EACH_NESTED_SUBJECT_END(subj)
56086+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
56087+ matchps->mode |= GR_DELETED;
56088+ FOR_EACH_ROLE_END(role)
56089+
56090+ inodev->nentry->deleted = 1;
56091+
56092+ return;
56093+}
56094+
56095+void
56096+gr_handle_delete(const ino_t ino, const dev_t dev)
56097+{
56098+ struct inodev_entry *inodev;
56099+
56100+ if (unlikely(!(gr_status & GR_READY)))
56101+ return;
56102+
56103+ write_lock(&gr_inode_lock);
56104+ inodev = lookup_inodev_entry(ino, dev);
56105+ if (inodev != NULL)
56106+ do_handle_delete(inodev, ino, dev);
56107+ write_unlock(&gr_inode_lock);
56108+
56109+ return;
56110+}
56111+
56112+static void
56113+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
56114+ const ino_t newinode, const dev_t newdevice,
56115+ struct acl_subject_label *subj)
56116+{
56117+ unsigned int index = gr_fhash(oldinode, olddevice, subj->obj_hash_size);
56118+ struct acl_object_label *match;
56119+
56120+ match = subj->obj_hash[index];
56121+
56122+ while (match && (match->inode != oldinode ||
56123+ match->device != olddevice ||
56124+ !(match->mode & GR_DELETED)))
56125+ match = match->next;
56126+
56127+ if (match && (match->inode == oldinode)
56128+ && (match->device == olddevice)
56129+ && (match->mode & GR_DELETED)) {
56130+ if (match->prev == NULL) {
56131+ subj->obj_hash[index] = match->next;
56132+ if (match->next != NULL)
56133+ match->next->prev = NULL;
56134+ } else {
56135+ match->prev->next = match->next;
56136+ if (match->next != NULL)
56137+ match->next->prev = match->prev;
56138+ }
56139+ match->prev = NULL;
56140+ match->next = NULL;
56141+ match->inode = newinode;
56142+ match->device = newdevice;
56143+ match->mode &= ~GR_DELETED;
56144+
56145+ insert_acl_obj_label(match, subj);
56146+ }
56147+
56148+ return;
56149+}
56150+
56151+static void
56152+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
56153+ const ino_t newinode, const dev_t newdevice,
56154+ struct acl_role_label *role)
56155+{
56156+ unsigned int index = gr_fhash(oldinode, olddevice, role->subj_hash_size);
56157+ struct acl_subject_label *match;
56158+
56159+ match = role->subj_hash[index];
56160+
56161+ while (match && (match->inode != oldinode ||
56162+ match->device != olddevice ||
56163+ !(match->mode & GR_DELETED)))
56164+ match = match->next;
56165+
56166+ if (match && (match->inode == oldinode)
56167+ && (match->device == olddevice)
56168+ && (match->mode & GR_DELETED)) {
56169+ if (match->prev == NULL) {
56170+ role->subj_hash[index] = match->next;
56171+ if (match->next != NULL)
56172+ match->next->prev = NULL;
56173+ } else {
56174+ match->prev->next = match->next;
56175+ if (match->next != NULL)
56176+ match->next->prev = match->prev;
56177+ }
56178+ match->prev = NULL;
56179+ match->next = NULL;
56180+ match->inode = newinode;
56181+ match->device = newdevice;
56182+ match->mode &= ~GR_DELETED;
56183+
56184+ insert_acl_subj_label(match, role);
56185+ }
56186+
56187+ return;
56188+}
56189+
56190+static void
56191+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
56192+ const ino_t newinode, const dev_t newdevice)
56193+{
56194+ unsigned int index = gr_fhash(oldinode, olddevice, inodev_set.i_size);
56195+ struct inodev_entry *match;
56196+
56197+ match = inodev_set.i_hash[index];
56198+
56199+ while (match && (match->nentry->inode != oldinode ||
56200+ match->nentry->device != olddevice || !match->nentry->deleted))
56201+ match = match->next;
56202+
56203+ if (match && (match->nentry->inode == oldinode)
56204+ && (match->nentry->device == olddevice) &&
56205+ match->nentry->deleted) {
56206+ if (match->prev == NULL) {
56207+ inodev_set.i_hash[index] = match->next;
56208+ if (match->next != NULL)
56209+ match->next->prev = NULL;
56210+ } else {
56211+ match->prev->next = match->next;
56212+ if (match->next != NULL)
56213+ match->next->prev = match->prev;
56214+ }
56215+ match->prev = NULL;
56216+ match->next = NULL;
56217+ match->nentry->inode = newinode;
56218+ match->nentry->device = newdevice;
56219+ match->nentry->deleted = 0;
56220+
56221+ insert_inodev_entry(match);
56222+ }
56223+
56224+ return;
56225+}
56226+
56227+static void
56228+__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
56229+{
56230+ struct acl_subject_label *subj;
56231+ struct acl_role_label *role;
56232+ unsigned int x;
56233+
56234+ FOR_EACH_ROLE_START(role)
56235+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
56236+
56237+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
56238+ if ((subj->inode == ino) && (subj->device == dev)) {
56239+ subj->inode = ino;
56240+ subj->device = dev;
56241+ }
56242+ /* nested subjects aren't in the role's subj_hash table */
56243+ update_acl_obj_label(matchn->inode, matchn->device,
56244+ ino, dev, subj);
56245+ FOR_EACH_NESTED_SUBJECT_END(subj)
56246+ FOR_EACH_SUBJECT_START(role, subj, x)
56247+ update_acl_obj_label(matchn->inode, matchn->device,
56248+ ino, dev, subj);
56249+ FOR_EACH_SUBJECT_END(subj,x)
56250+ FOR_EACH_ROLE_END(role)
56251+
56252+ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
56253+
56254+ return;
56255+}
56256+
56257+static void
56258+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
56259+ const struct vfsmount *mnt)
56260+{
56261+ ino_t ino = dentry->d_inode->i_ino;
56262+ dev_t dev = __get_dev(dentry);
56263+
56264+ __do_handle_create(matchn, ino, dev);
56265+
56266+ return;
56267+}
56268+
56269+void
56270+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
56271+{
56272+ struct name_entry *matchn;
56273+
56274+ if (unlikely(!(gr_status & GR_READY)))
56275+ return;
56276+
56277+ preempt_disable();
56278+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
56279+
56280+ if (unlikely((unsigned long)matchn)) {
56281+ write_lock(&gr_inode_lock);
56282+ do_handle_create(matchn, dentry, mnt);
56283+ write_unlock(&gr_inode_lock);
56284+ }
56285+ preempt_enable();
56286+
56287+ return;
56288+}
56289+
56290+void
56291+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
56292+{
56293+ struct name_entry *matchn;
56294+
56295+ if (unlikely(!(gr_status & GR_READY)))
56296+ return;
56297+
56298+ preempt_disable();
56299+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
56300+
56301+ if (unlikely((unsigned long)matchn)) {
56302+ write_lock(&gr_inode_lock);
56303+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
56304+ write_unlock(&gr_inode_lock);
56305+ }
56306+ preempt_enable();
56307+
56308+ return;
56309+}
56310+
56311+void
56312+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
56313+ struct dentry *old_dentry,
56314+ struct dentry *new_dentry,
56315+ struct vfsmount *mnt, const __u8 replace)
56316+{
56317+ struct name_entry *matchn;
56318+ struct inodev_entry *inodev;
56319+ struct inode *inode = new_dentry->d_inode;
56320+ ino_t old_ino = old_dentry->d_inode->i_ino;
56321+ dev_t old_dev = __get_dev(old_dentry);
56322+
56323+ /* vfs_rename swaps the name and parent link for old_dentry and
56324+ new_dentry
56325+ at this point, old_dentry has the new name, parent link, and inode
56326+ for the renamed file
56327+ if a file is being replaced by a rename, new_dentry has the inode
56328+ and name for the replaced file
56329+ */
56330+
56331+ if (unlikely(!(gr_status & GR_READY)))
56332+ return;
56333+
56334+ preempt_disable();
56335+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
56336+
56337+ /* we wouldn't have to check d_inode if it weren't for
56338+ NFS silly-renaming
56339+ */
56340+
56341+ write_lock(&gr_inode_lock);
56342+ if (unlikely(replace && inode)) {
56343+ ino_t new_ino = inode->i_ino;
56344+ dev_t new_dev = __get_dev(new_dentry);
56345+
56346+ inodev = lookup_inodev_entry(new_ino, new_dev);
56347+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
56348+ do_handle_delete(inodev, new_ino, new_dev);
56349+ }
56350+
56351+ inodev = lookup_inodev_entry(old_ino, old_dev);
56352+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
56353+ do_handle_delete(inodev, old_ino, old_dev);
56354+
56355+ if (unlikely((unsigned long)matchn))
56356+ do_handle_create(matchn, old_dentry, mnt);
56357+
56358+ write_unlock(&gr_inode_lock);
56359+ preempt_enable();
56360+
56361+ return;
56362+}
56363+
56364+static int
56365+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
56366+ unsigned char **sum)
56367+{
56368+ struct acl_role_label *r;
56369+ struct role_allowed_ip *ipp;
56370+ struct role_transition *trans;
56371+ unsigned int i;
56372+ int found = 0;
56373+ u32 curr_ip = current->signal->curr_ip;
56374+
56375+ current->signal->saved_ip = curr_ip;
56376+
56377+ /* check transition table */
56378+
56379+ for (trans = current->role->transitions; trans; trans = trans->next) {
56380+ if (!strcmp(rolename, trans->rolename)) {
56381+ found = 1;
56382+ break;
56383+ }
56384+ }
56385+
56386+ if (!found)
56387+ return 0;
56388+
56389+ /* handle special roles that do not require authentication
56390+ and check ip */
56391+
56392+ FOR_EACH_ROLE_START(r)
56393+ if (!strcmp(rolename, r->rolename) &&
56394+ (r->roletype & GR_ROLE_SPECIAL)) {
56395+ found = 0;
56396+ if (r->allowed_ips != NULL) {
56397+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
56398+ if ((ntohl(curr_ip) & ipp->netmask) ==
56399+ (ntohl(ipp->addr) & ipp->netmask))
56400+ found = 1;
56401+ }
56402+ } else
56403+ found = 2;
56404+ if (!found)
56405+ return 0;
56406+
56407+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
56408+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
56409+ *salt = NULL;
56410+ *sum = NULL;
56411+ return 1;
56412+ }
56413+ }
56414+ FOR_EACH_ROLE_END(r)
56415+
56416+ for (i = 0; i < num_sprole_pws; i++) {
56417+ if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
56418+ *salt = acl_special_roles[i]->salt;
56419+ *sum = acl_special_roles[i]->sum;
56420+ return 1;
56421+ }
56422+ }
56423+
56424+ return 0;
56425+}
56426+
56427+static void
56428+assign_special_role(char *rolename)
56429+{
56430+ struct acl_object_label *obj;
56431+ struct acl_role_label *r;
56432+ struct acl_role_label *assigned = NULL;
56433+ struct task_struct *tsk;
56434+ struct file *filp;
56435+
56436+ FOR_EACH_ROLE_START(r)
56437+ if (!strcmp(rolename, r->rolename) &&
56438+ (r->roletype & GR_ROLE_SPECIAL)) {
56439+ assigned = r;
56440+ break;
56441+ }
56442+ FOR_EACH_ROLE_END(r)
56443+
56444+ if (!assigned)
56445+ return;
56446+
56447+ read_lock(&tasklist_lock);
56448+ read_lock(&grsec_exec_file_lock);
56449+
56450+ tsk = current->real_parent;
56451+ if (tsk == NULL)
56452+ goto out_unlock;
56453+
56454+ filp = tsk->exec_file;
56455+ if (filp == NULL)
56456+ goto out_unlock;
56457+
56458+ tsk->is_writable = 0;
56459+
56460+ tsk->acl_sp_role = 1;
56461+ tsk->acl_role_id = ++acl_sp_role_value;
56462+ tsk->role = assigned;
56463+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
56464+
56465+ /* ignore additional mmap checks for processes that are writable
56466+ by the default ACL */
56467+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
56468+ if (unlikely(obj->mode & GR_WRITE))
56469+ tsk->is_writable = 1;
56470+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
56471+ if (unlikely(obj->mode & GR_WRITE))
56472+ tsk->is_writable = 1;
56473+
56474+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
56475+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
56476+#endif
56477+
56478+out_unlock:
56479+ read_unlock(&grsec_exec_file_lock);
56480+ read_unlock(&tasklist_lock);
56481+ return;
56482+}
56483+
56484+int gr_check_secure_terminal(struct task_struct *task)
56485+{
56486+ struct task_struct *p, *p2, *p3;
56487+ struct files_struct *files;
56488+ struct fdtable *fdt;
56489+ struct file *our_file = NULL, *file;
56490+ int i;
56491+
56492+ if (task->signal->tty == NULL)
56493+ return 1;
56494+
56495+ files = get_files_struct(task);
56496+ if (files != NULL) {
56497+ rcu_read_lock();
56498+ fdt = files_fdtable(files);
56499+ for (i=0; i < fdt->max_fds; i++) {
56500+ file = fcheck_files(files, i);
56501+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
56502+ get_file(file);
56503+ our_file = file;
56504+ }
56505+ }
56506+ rcu_read_unlock();
56507+ put_files_struct(files);
56508+ }
56509+
56510+ if (our_file == NULL)
56511+ return 1;
56512+
56513+ read_lock(&tasklist_lock);
56514+ do_each_thread(p2, p) {
56515+ files = get_files_struct(p);
56516+ if (files == NULL ||
56517+ (p->signal && p->signal->tty == task->signal->tty)) {
56518+ if (files != NULL)
56519+ put_files_struct(files);
56520+ continue;
56521+ }
56522+ rcu_read_lock();
56523+ fdt = files_fdtable(files);
56524+ for (i=0; i < fdt->max_fds; i++) {
56525+ file = fcheck_files(files, i);
56526+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
56527+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
56528+ p3 = task;
56529+ while (p3->pid > 0) {
56530+ if (p3 == p)
56531+ break;
56532+ p3 = p3->real_parent;
56533+ }
56534+ if (p3 == p)
56535+ break;
56536+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
56537+ gr_handle_alertkill(p);
56538+ rcu_read_unlock();
56539+ put_files_struct(files);
56540+ read_unlock(&tasklist_lock);
56541+ fput(our_file);
56542+ return 0;
56543+ }
56544+ }
56545+ rcu_read_unlock();
56546+ put_files_struct(files);
56547+ } while_each_thread(p2, p);
56548+ read_unlock(&tasklist_lock);
56549+
56550+ fput(our_file);
56551+ return 1;
56552+}
56553+
56554+static int gr_rbac_disable(void *unused)
56555+{
56556+ pax_open_kernel();
56557+ gr_status &= ~GR_READY;
56558+ pax_close_kernel();
56559+
56560+ return 0;
56561+}
56562+
56563+ssize_t
56564+write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
56565+{
56566+ struct gr_arg_wrapper uwrap;
56567+ unsigned char *sprole_salt = NULL;
56568+ unsigned char *sprole_sum = NULL;
56569+ int error = sizeof (struct gr_arg_wrapper);
56570+ int error2 = 0;
56571+
56572+ mutex_lock(&gr_dev_mutex);
56573+
56574+ if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
56575+ error = -EPERM;
56576+ goto out;
56577+ }
56578+
56579+ if (count != sizeof (struct gr_arg_wrapper)) {
56580+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
56581+ error = -EINVAL;
56582+ goto out;
56583+ }
56584+
56585+
56586+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
56587+ gr_auth_expires = 0;
56588+ gr_auth_attempts = 0;
56589+ }
56590+
56591+ if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
56592+ error = -EFAULT;
56593+ goto out;
56594+ }
56595+
56596+ if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
56597+ error = -EINVAL;
56598+ goto out;
56599+ }
56600+
56601+ if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
56602+ error = -EFAULT;
56603+ goto out;
56604+ }
56605+
56606+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
56607+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
56608+ time_after(gr_auth_expires, get_seconds())) {
56609+ error = -EBUSY;
56610+ goto out;
56611+ }
56612+
56613+ /* if non-root trying to do anything other than use a special role,
56614+ do not attempt authentication, do not count towards authentication
56615+ locking
56616+ */
56617+
56618+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
56619+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
56620+ !uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
56621+ error = -EPERM;
56622+ goto out;
56623+ }
56624+
56625+ /* ensure pw and special role name are null terminated */
56626+
56627+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
56628+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
56629+
56630+ /* Okay.
56631+ * We have our enough of the argument structure..(we have yet
56632+ * to copy_from_user the tables themselves) . Copy the tables
56633+ * only if we need them, i.e. for loading operations. */
56634+
56635+ switch (gr_usermode->mode) {
56636+ case GR_STATUS:
56637+ if (gr_status & GR_READY) {
56638+ error = 1;
56639+ if (!gr_check_secure_terminal(current))
56640+ error = 3;
56641+ } else
56642+ error = 2;
56643+ goto out;
56644+ case GR_SHUTDOWN:
56645+ if ((gr_status & GR_READY)
56646+ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
56647+ stop_machine(gr_rbac_disable, NULL, NULL);
56648+ free_variables();
56649+ memset(gr_usermode, 0, sizeof (struct gr_arg));
56650+ memset(gr_system_salt, 0, GR_SALT_LEN);
56651+ memset(gr_system_sum, 0, GR_SHA_LEN);
56652+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
56653+ } else if (gr_status & GR_READY) {
56654+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
56655+ error = -EPERM;
56656+ } else {
56657+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
56658+ error = -EAGAIN;
56659+ }
56660+ break;
56661+ case GR_ENABLE:
56662+ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
56663+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
56664+ else {
56665+ if (gr_status & GR_READY)
56666+ error = -EAGAIN;
56667+ else
56668+ error = error2;
56669+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
56670+ }
56671+ break;
56672+ case GR_RELOAD:
56673+ if (!(gr_status & GR_READY)) {
56674+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
56675+ error = -EAGAIN;
56676+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
56677+ stop_machine(gr_rbac_disable, NULL, NULL);
56678+ free_variables();
56679+ error2 = gracl_init(gr_usermode);
56680+ if (!error2)
56681+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
56682+ else {
56683+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
56684+ error = error2;
56685+ }
56686+ } else {
56687+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
56688+ error = -EPERM;
56689+ }
56690+ break;
56691+ case GR_SEGVMOD:
56692+ if (unlikely(!(gr_status & GR_READY))) {
56693+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
56694+ error = -EAGAIN;
56695+ break;
56696+ }
56697+
56698+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
56699+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
56700+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
56701+ struct acl_subject_label *segvacl;
56702+ segvacl =
56703+ lookup_acl_subj_label(gr_usermode->segv_inode,
56704+ gr_usermode->segv_device,
56705+ current->role);
56706+ if (segvacl) {
56707+ segvacl->crashes = 0;
56708+ segvacl->expires = 0;
56709+ }
56710+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
56711+ gr_remove_uid(gr_usermode->segv_uid);
56712+ }
56713+ } else {
56714+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
56715+ error = -EPERM;
56716+ }
56717+ break;
56718+ case GR_SPROLE:
56719+ case GR_SPROLEPAM:
56720+ if (unlikely(!(gr_status & GR_READY))) {
56721+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
56722+ error = -EAGAIN;
56723+ break;
56724+ }
56725+
56726+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
56727+ current->role->expires = 0;
56728+ current->role->auth_attempts = 0;
56729+ }
56730+
56731+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
56732+ time_after(current->role->expires, get_seconds())) {
56733+ error = -EBUSY;
56734+ goto out;
56735+ }
56736+
56737+ if (lookup_special_role_auth
56738+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
56739+ && ((!sprole_salt && !sprole_sum)
56740+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
56741+ char *p = "";
56742+ assign_special_role(gr_usermode->sp_role);
56743+ read_lock(&tasklist_lock);
56744+ if (current->real_parent)
56745+ p = current->real_parent->role->rolename;
56746+ read_unlock(&tasklist_lock);
56747+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
56748+ p, acl_sp_role_value);
56749+ } else {
56750+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
56751+ error = -EPERM;
56752+ if(!(current->role->auth_attempts++))
56753+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
56754+
56755+ goto out;
56756+ }
56757+ break;
56758+ case GR_UNSPROLE:
56759+ if (unlikely(!(gr_status & GR_READY))) {
56760+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
56761+ error = -EAGAIN;
56762+ break;
56763+ }
56764+
56765+ if (current->role->roletype & GR_ROLE_SPECIAL) {
56766+ char *p = "";
56767+ int i = 0;
56768+
56769+ read_lock(&tasklist_lock);
56770+ if (current->real_parent) {
56771+ p = current->real_parent->role->rolename;
56772+ i = current->real_parent->acl_role_id;
56773+ }
56774+ read_unlock(&tasklist_lock);
56775+
56776+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
56777+ gr_set_acls(1);
56778+ } else {
56779+ error = -EPERM;
56780+ goto out;
56781+ }
56782+ break;
56783+ default:
56784+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
56785+ error = -EINVAL;
56786+ break;
56787+ }
56788+
56789+ if (error != -EPERM)
56790+ goto out;
56791+
56792+ if(!(gr_auth_attempts++))
56793+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
56794+
56795+ out:
56796+ mutex_unlock(&gr_dev_mutex);
56797+ return error;
56798+}
56799+
56800+/* must be called with
56801+ rcu_read_lock();
56802+ read_lock(&tasklist_lock);
56803+ read_lock(&grsec_exec_file_lock);
56804+*/
56805+int gr_apply_subject_to_task(struct task_struct *task)
56806+{
56807+ struct acl_object_label *obj;
56808+ char *tmpname;
56809+ struct acl_subject_label *tmpsubj;
56810+ struct file *filp;
56811+ struct name_entry *nmatch;
56812+
56813+ filp = task->exec_file;
56814+ if (filp == NULL)
56815+ return 0;
56816+
56817+ /* the following is to apply the correct subject
56818+ on binaries running when the RBAC system
56819+ is enabled, when the binaries have been
56820+ replaced or deleted since their execution
56821+ -----
56822+ when the RBAC system starts, the inode/dev
56823+ from exec_file will be one the RBAC system
56824+ is unaware of. It only knows the inode/dev
56825+ of the present file on disk, or the absence
56826+ of it.
56827+ */
56828+ preempt_disable();
56829+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
56830+
56831+ nmatch = lookup_name_entry(tmpname);
56832+ preempt_enable();
56833+ tmpsubj = NULL;
56834+ if (nmatch) {
56835+ if (nmatch->deleted)
56836+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
56837+ else
56838+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
56839+ if (tmpsubj != NULL)
56840+ task->acl = tmpsubj;
56841+ }
56842+ if (tmpsubj == NULL)
56843+ task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
56844+ task->role);
56845+ if (task->acl) {
56846+ task->is_writable = 0;
56847+ /* ignore additional mmap checks for processes that are writable
56848+ by the default ACL */
56849+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
56850+ if (unlikely(obj->mode & GR_WRITE))
56851+ task->is_writable = 1;
56852+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
56853+ if (unlikely(obj->mode & GR_WRITE))
56854+ task->is_writable = 1;
56855+
56856+ gr_set_proc_res(task);
56857+
56858+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
56859+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
56860+#endif
56861+ } else {
56862+ return 1;
56863+ }
56864+
56865+ return 0;
56866+}
56867+
56868+int
56869+gr_set_acls(const int type)
56870+{
56871+ struct task_struct *task, *task2;
56872+ struct acl_role_label *role = current->role;
56873+ __u16 acl_role_id = current->acl_role_id;
56874+ const struct cred *cred;
56875+ int ret;
56876+
56877+ rcu_read_lock();
56878+ read_lock(&tasklist_lock);
56879+ read_lock(&grsec_exec_file_lock);
56880+ do_each_thread(task2, task) {
56881+ /* check to see if we're called from the exit handler,
56882+ if so, only replace ACLs that have inherited the admin
56883+ ACL */
56884+
56885+ if (type && (task->role != role ||
56886+ task->acl_role_id != acl_role_id))
56887+ continue;
56888+
56889+ task->acl_role_id = 0;
56890+ task->acl_sp_role = 0;
56891+
56892+ if (task->exec_file) {
56893+ cred = __task_cred(task);
56894+ task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
56895+ ret = gr_apply_subject_to_task(task);
56896+ if (ret) {
56897+ read_unlock(&grsec_exec_file_lock);
56898+ read_unlock(&tasklist_lock);
56899+ rcu_read_unlock();
56900+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
56901+ return ret;
56902+ }
56903+ } else {
56904+ // it's a kernel process
56905+ task->role = kernel_role;
56906+ task->acl = kernel_role->root_label;
56907+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
56908+ task->acl->mode &= ~GR_PROCFIND;
56909+#endif
56910+ }
56911+ } while_each_thread(task2, task);
56912+ read_unlock(&grsec_exec_file_lock);
56913+ read_unlock(&tasklist_lock);
56914+ rcu_read_unlock();
56915+
56916+ return 0;
56917+}
56918+
56919+#if defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC)
56920+static const unsigned long res_learn_bumps[GR_NLIMITS] = {
56921+ [RLIMIT_CPU] = GR_RLIM_CPU_BUMP,
56922+ [RLIMIT_FSIZE] = GR_RLIM_FSIZE_BUMP,
56923+ [RLIMIT_DATA] = GR_RLIM_DATA_BUMP,
56924+ [RLIMIT_STACK] = GR_RLIM_STACK_BUMP,
56925+ [RLIMIT_CORE] = GR_RLIM_CORE_BUMP,
56926+ [RLIMIT_RSS] = GR_RLIM_RSS_BUMP,
56927+ [RLIMIT_NPROC] = GR_RLIM_NPROC_BUMP,
56928+ [RLIMIT_NOFILE] = GR_RLIM_NOFILE_BUMP,
56929+ [RLIMIT_MEMLOCK] = GR_RLIM_MEMLOCK_BUMP,
56930+ [RLIMIT_AS] = GR_RLIM_AS_BUMP,
56931+ [RLIMIT_LOCKS] = GR_RLIM_LOCKS_BUMP,
56932+ [RLIMIT_SIGPENDING] = GR_RLIM_SIGPENDING_BUMP,
56933+ [RLIMIT_MSGQUEUE] = GR_RLIM_MSGQUEUE_BUMP,
56934+ [RLIMIT_NICE] = GR_RLIM_NICE_BUMP,
56935+ [RLIMIT_RTPRIO] = GR_RLIM_RTPRIO_BUMP,
56936+ [RLIMIT_RTTIME] = GR_RLIM_RTTIME_BUMP
56937+};
56938+
56939+void
56940+gr_learn_resource(const struct task_struct *task,
56941+ const int res, const unsigned long wanted, const int gt)
56942+{
56943+ struct acl_subject_label *acl;
56944+ const struct cred *cred;
56945+
56946+ if (unlikely((gr_status & GR_READY) &&
56947+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
56948+ goto skip_reslog;
56949+
56950+ gr_log_resource(task, res, wanted, gt);
56951+skip_reslog:
56952+
56953+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
56954+ return;
56955+
56956+ acl = task->acl;
56957+
56958+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
56959+ !(acl->resmask & (1 << (unsigned short) res))))
56960+ return;
56961+
56962+ if (wanted >= acl->res[res].rlim_cur) {
56963+ unsigned long res_add;
56964+
56965+ res_add = wanted + res_learn_bumps[res];
56966+
56967+ acl->res[res].rlim_cur = res_add;
56968+
56969+ if (wanted > acl->res[res].rlim_max)
56970+ acl->res[res].rlim_max = res_add;
56971+
56972+ /* only log the subject filename, since resource logging is supported for
56973+ single-subject learning only */
56974+ rcu_read_lock();
56975+ cred = __task_cred(task);
56976+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
56977+ task->role->roletype, cred->uid, cred->gid, acl->filename,
56978+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
56979+ "", (unsigned long) res, &task->signal->saved_ip);
56980+ rcu_read_unlock();
56981+ }
56982+
56983+ return;
56984+}
56985+EXPORT_SYMBOL(gr_learn_resource);
56986+#endif
56987+
56988+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
56989+void
56990+pax_set_initial_flags(struct linux_binprm *bprm)
56991+{
56992+ struct task_struct *task = current;
56993+ struct acl_subject_label *proc;
56994+ unsigned long flags;
56995+
56996+ if (unlikely(!(gr_status & GR_READY)))
56997+ return;
56998+
56999+ flags = pax_get_flags(task);
57000+
57001+ proc = task->acl;
57002+
57003+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
57004+ flags &= ~MF_PAX_PAGEEXEC;
57005+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
57006+ flags &= ~MF_PAX_SEGMEXEC;
57007+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
57008+ flags &= ~MF_PAX_RANDMMAP;
57009+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
57010+ flags &= ~MF_PAX_EMUTRAMP;
57011+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
57012+ flags &= ~MF_PAX_MPROTECT;
57013+
57014+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
57015+ flags |= MF_PAX_PAGEEXEC;
57016+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
57017+ flags |= MF_PAX_SEGMEXEC;
57018+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
57019+ flags |= MF_PAX_RANDMMAP;
57020+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
57021+ flags |= MF_PAX_EMUTRAMP;
57022+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
57023+ flags |= MF_PAX_MPROTECT;
57024+
57025+ pax_set_flags(task, flags);
57026+
57027+ return;
57028+}
57029+#endif
57030+
57031+int
57032+gr_handle_proc_ptrace(struct task_struct *task)
57033+{
57034+ struct file *filp;
57035+ struct task_struct *tmp = task;
57036+ struct task_struct *curtemp = current;
57037+ __u32 retmode;
57038+
57039+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
57040+ if (unlikely(!(gr_status & GR_READY)))
57041+ return 0;
57042+#endif
57043+
57044+ read_lock(&tasklist_lock);
57045+ read_lock(&grsec_exec_file_lock);
57046+ filp = task->exec_file;
57047+
57048+ while (tmp->pid > 0) {
57049+ if (tmp == curtemp)
57050+ break;
57051+ tmp = tmp->real_parent;
57052+ }
57053+
57054+ if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && !uid_eq(current_uid(), GLOBAL_ROOT_UID) && !(gr_status & GR_READY)) ||
57055+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
57056+ read_unlock(&grsec_exec_file_lock);
57057+ read_unlock(&tasklist_lock);
57058+ return 1;
57059+ }
57060+
57061+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
57062+ if (!(gr_status & GR_READY)) {
57063+ read_unlock(&grsec_exec_file_lock);
57064+ read_unlock(&tasklist_lock);
57065+ return 0;
57066+ }
57067+#endif
57068+
57069+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
57070+ read_unlock(&grsec_exec_file_lock);
57071+ read_unlock(&tasklist_lock);
57072+
57073+ if (retmode & GR_NOPTRACE)
57074+ return 1;
57075+
57076+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
57077+ && (current->acl != task->acl || (current->acl != current->role->root_label
57078+ && current->pid != task->pid)))
57079+ return 1;
57080+
57081+ return 0;
57082+}
57083+
57084+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
57085+{
57086+ if (unlikely(!(gr_status & GR_READY)))
57087+ return;
57088+
57089+ if (!(current->role->roletype & GR_ROLE_GOD))
57090+ return;
57091+
57092+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
57093+ p->role->rolename, gr_task_roletype_to_char(p),
57094+ p->acl->filename);
57095+}
57096+
57097+int
57098+gr_handle_ptrace(struct task_struct *task, const long request)
57099+{
57100+ struct task_struct *tmp = task;
57101+ struct task_struct *curtemp = current;
57102+ __u32 retmode;
57103+
57104+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
57105+ if (unlikely(!(gr_status & GR_READY)))
57106+ return 0;
57107+#endif
57108+ if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
57109+ read_lock(&tasklist_lock);
57110+ while (tmp->pid > 0) {
57111+ if (tmp == curtemp)
57112+ break;
57113+ tmp = tmp->real_parent;
57114+ }
57115+
57116+ if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && !uid_eq(current_uid(), GLOBAL_ROOT_UID) && !(gr_status & GR_READY)) ||
57117+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
57118+ read_unlock(&tasklist_lock);
57119+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
57120+ return 1;
57121+ }
57122+ read_unlock(&tasklist_lock);
57123+ }
57124+
57125+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
57126+ if (!(gr_status & GR_READY))
57127+ return 0;
57128+#endif
57129+
57130+ read_lock(&grsec_exec_file_lock);
57131+ if (unlikely(!task->exec_file)) {
57132+ read_unlock(&grsec_exec_file_lock);
57133+ return 0;
57134+ }
57135+
57136+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
57137+ read_unlock(&grsec_exec_file_lock);
57138+
57139+ if (retmode & GR_NOPTRACE) {
57140+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
57141+ return 1;
57142+ }
57143+
57144+ if (retmode & GR_PTRACERD) {
57145+ switch (request) {
57146+ case PTRACE_SEIZE:
57147+ case PTRACE_POKETEXT:
57148+ case PTRACE_POKEDATA:
57149+ case PTRACE_POKEUSR:
57150+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
57151+ case PTRACE_SETREGS:
57152+ case PTRACE_SETFPREGS:
57153+#endif
57154+#ifdef CONFIG_X86
57155+ case PTRACE_SETFPXREGS:
57156+#endif
57157+#ifdef CONFIG_ALTIVEC
57158+ case PTRACE_SETVRREGS:
57159+#endif
57160+ return 1;
57161+ default:
57162+ return 0;
57163+ }
57164+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
57165+ !(current->role->roletype & GR_ROLE_GOD) &&
57166+ (current->acl != task->acl)) {
57167+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
57168+ return 1;
57169+ }
57170+
57171+ return 0;
57172+}
57173+
57174+static int is_writable_mmap(const struct file *filp)
57175+{
57176+ struct task_struct *task = current;
57177+ struct acl_object_label *obj, *obj2;
57178+
57179+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
57180+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
57181+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
57182+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
57183+ task->role->root_label);
57184+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
57185+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
57186+ return 1;
57187+ }
57188+ }
57189+ return 0;
57190+}
57191+
57192+int
57193+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
57194+{
57195+ __u32 mode;
57196+
57197+ if (unlikely(!file || !(prot & PROT_EXEC)))
57198+ return 1;
57199+
57200+ if (is_writable_mmap(file))
57201+ return 0;
57202+
57203+ mode =
57204+ gr_search_file(file->f_path.dentry,
57205+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
57206+ file->f_path.mnt);
57207+
57208+ if (!gr_tpe_allow(file))
57209+ return 0;
57210+
57211+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
57212+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
57213+ return 0;
57214+ } else if (unlikely(!(mode & GR_EXEC))) {
57215+ return 0;
57216+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
57217+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
57218+ return 1;
57219+ }
57220+
57221+ return 1;
57222+}
57223+
57224+int
57225+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
57226+{
57227+ __u32 mode;
57228+
57229+ if (unlikely(!file || !(prot & PROT_EXEC)))
57230+ return 1;
57231+
57232+ if (is_writable_mmap(file))
57233+ return 0;
57234+
57235+ mode =
57236+ gr_search_file(file->f_path.dentry,
57237+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
57238+ file->f_path.mnt);
57239+
57240+ if (!gr_tpe_allow(file))
57241+ return 0;
57242+
57243+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
57244+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
57245+ return 0;
57246+ } else if (unlikely(!(mode & GR_EXEC))) {
57247+ return 0;
57248+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
57249+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
57250+ return 1;
57251+ }
57252+
57253+ return 1;
57254+}
57255+
57256+void
57257+gr_acl_handle_psacct(struct task_struct *task, const long code)
57258+{
57259+ unsigned long runtime;
57260+ unsigned long cputime;
57261+ unsigned int wday, cday;
57262+ __u8 whr, chr;
57263+ __u8 wmin, cmin;
57264+ __u8 wsec, csec;
57265+ struct timespec timeval;
57266+
57267+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
57268+ !(task->acl->mode & GR_PROCACCT)))
57269+ return;
57270+
57271+ do_posix_clock_monotonic_gettime(&timeval);
57272+ runtime = timeval.tv_sec - task->start_time.tv_sec;
57273+ wday = runtime / (3600 * 24);
57274+ runtime -= wday * (3600 * 24);
57275+ whr = runtime / 3600;
57276+ runtime -= whr * 3600;
57277+ wmin = runtime / 60;
57278+ runtime -= wmin * 60;
57279+ wsec = runtime;
57280+
57281+ cputime = (task->utime + task->stime) / HZ;
57282+ cday = cputime / (3600 * 24);
57283+ cputime -= cday * (3600 * 24);
57284+ chr = cputime / 3600;
57285+ cputime -= chr * 3600;
57286+ cmin = cputime / 60;
57287+ cputime -= cmin * 60;
57288+ csec = cputime;
57289+
57290+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
57291+
57292+ return;
57293+}
57294+
57295+void gr_set_kernel_label(struct task_struct *task)
57296+{
57297+ if (gr_status & GR_READY) {
57298+ task->role = kernel_role;
57299+ task->acl = kernel_role->root_label;
57300+ }
57301+ return;
57302+}
57303+
57304+#ifdef CONFIG_TASKSTATS
57305+int gr_is_taskstats_denied(int pid)
57306+{
57307+ struct task_struct *task;
57308+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
57309+ const struct cred *cred;
57310+#endif
57311+ int ret = 0;
57312+
57313+ /* restrict taskstats viewing to un-chrooted root users
57314+ who have the 'view' subject flag if the RBAC system is enabled
57315+ */
57316+
57317+ rcu_read_lock();
57318+ read_lock(&tasklist_lock);
57319+ task = find_task_by_vpid(pid);
57320+ if (task) {
57321+#ifdef CONFIG_GRKERNSEC_CHROOT
57322+ if (proc_is_chrooted(task))
57323+ ret = -EACCES;
57324+#endif
57325+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
57326+ cred = __task_cred(task);
57327+#ifdef CONFIG_GRKERNSEC_PROC_USER
57328+ if (!uid_eq(cred->uid, GLOBAL_ROOT_UID))
57329+ ret = -EACCES;
57330+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
57331+ if (!uid_eq(cred->uid, GLOBAL_ROOT_UID) && !groups_search(cred->group_info, grsec_proc_gid))
57332+ ret = -EACCES;
57333+#endif
57334+#endif
57335+ if (gr_status & GR_READY) {
57336+ if (!(task->acl->mode & GR_VIEW))
57337+ ret = -EACCES;
57338+ }
57339+ } else
57340+ ret = -ENOENT;
57341+
57342+ read_unlock(&tasklist_lock);
57343+ rcu_read_unlock();
57344+
57345+ return ret;
57346+}
57347+#endif
57348+
57349+/* AUXV entries are filled via a descendant of search_binary_handler
57350+ after we've already applied the subject for the target
57351+*/
57352+int gr_acl_enable_at_secure(void)
57353+{
57354+ if (unlikely(!(gr_status & GR_READY)))
57355+ return 0;
57356+
57357+ if (current->acl->mode & GR_ATSECURE)
57358+ return 1;
57359+
57360+ return 0;
57361+}
57362+
57363+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
57364+{
57365+ struct task_struct *task = current;
57366+ struct dentry *dentry = file->f_path.dentry;
57367+ struct vfsmount *mnt = file->f_path.mnt;
57368+ struct acl_object_label *obj, *tmp;
57369+ struct acl_subject_label *subj;
57370+ unsigned int bufsize;
57371+ int is_not_root;
57372+ char *path;
57373+ dev_t dev = __get_dev(dentry);
57374+
57375+ if (unlikely(!(gr_status & GR_READY)))
57376+ return 1;
57377+
57378+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
57379+ return 1;
57380+
57381+ /* ignore Eric Biederman */
57382+ if (IS_PRIVATE(dentry->d_inode))
57383+ return 1;
57384+
57385+ subj = task->acl;
57386+ read_lock(&gr_inode_lock);
57387+ do {
57388+ obj = lookup_acl_obj_label(ino, dev, subj);
57389+ if (obj != NULL) {
57390+ read_unlock(&gr_inode_lock);
57391+ return (obj->mode & GR_FIND) ? 1 : 0;
57392+ }
57393+ } while ((subj = subj->parent_subject));
57394+ read_unlock(&gr_inode_lock);
57395+
57396+ /* this is purely an optimization since we're looking for an object
57397+ for the directory we're doing a readdir on
57398+ if it's possible for any globbed object to match the entry we're
57399+ filling into the directory, then the object we find here will be
57400+ an anchor point with attached globbed objects
57401+ */
57402+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
57403+ if (obj->globbed == NULL)
57404+ return (obj->mode & GR_FIND) ? 1 : 0;
57405+
57406+ is_not_root = ((obj->filename[0] == '/') &&
57407+ (obj->filename[1] == '\0')) ? 0 : 1;
57408+ bufsize = PAGE_SIZE - namelen - is_not_root;
57409+
57410+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
57411+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
57412+ return 1;
57413+
57414+ preempt_disable();
57415+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
57416+ bufsize);
57417+
57418+ bufsize = strlen(path);
57419+
57420+ /* if base is "/", don't append an additional slash */
57421+ if (is_not_root)
57422+ *(path + bufsize) = '/';
57423+ memcpy(path + bufsize + is_not_root, name, namelen);
57424+ *(path + bufsize + namelen + is_not_root) = '\0';
57425+
57426+ tmp = obj->globbed;
57427+ while (tmp) {
57428+ if (!glob_match(tmp->filename, path)) {
57429+ preempt_enable();
57430+ return (tmp->mode & GR_FIND) ? 1 : 0;
57431+ }
57432+ tmp = tmp->next;
57433+ }
57434+ preempt_enable();
57435+ return (obj->mode & GR_FIND) ? 1 : 0;
57436+}
57437+
57438+void gr_put_exec_file(struct task_struct *task)
57439+{
57440+ struct file *filp;
57441+
57442+ write_lock(&grsec_exec_file_lock);
57443+ filp = task->exec_file;
57444+ task->exec_file = NULL;
57445+ write_unlock(&grsec_exec_file_lock);
57446+
57447+ if (filp)
57448+ fput(filp);
57449+
57450+ return;
57451+}
57452+
57453+
57454+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
57455+EXPORT_SYMBOL(gr_acl_is_enabled);
57456+#endif
57457+EXPORT_SYMBOL(gr_set_kernel_label);
57458+#ifdef CONFIG_SECURITY
57459+EXPORT_SYMBOL(gr_check_user_change);
57460+EXPORT_SYMBOL(gr_check_group_change);
57461+#endif
57462+
57463diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
57464new file mode 100644
57465index 0000000..34fefda
57466--- /dev/null
57467+++ b/grsecurity/gracl_alloc.c
57468@@ -0,0 +1,105 @@
57469+#include <linux/kernel.h>
57470+#include <linux/mm.h>
57471+#include <linux/slab.h>
57472+#include <linux/vmalloc.h>
57473+#include <linux/gracl.h>
57474+#include <linux/grsecurity.h>
57475+
57476+static unsigned long alloc_stack_next = 1;
57477+static unsigned long alloc_stack_size = 1;
57478+static void **alloc_stack;
57479+
57480+static __inline__ int
57481+alloc_pop(void)
57482+{
57483+ if (alloc_stack_next == 1)
57484+ return 0;
57485+
57486+ kfree(alloc_stack[alloc_stack_next - 2]);
57487+
57488+ alloc_stack_next--;
57489+
57490+ return 1;
57491+}
57492+
57493+static __inline__ int
57494+alloc_push(void *buf)
57495+{
57496+ if (alloc_stack_next >= alloc_stack_size)
57497+ return 1;
57498+
57499+ alloc_stack[alloc_stack_next - 1] = buf;
57500+
57501+ alloc_stack_next++;
57502+
57503+ return 0;
57504+}
57505+
57506+void *
57507+acl_alloc(unsigned long len)
57508+{
57509+ void *ret = NULL;
57510+
57511+ if (!len || len > PAGE_SIZE)
57512+ goto out;
57513+
57514+ ret = kmalloc(len, GFP_KERNEL);
57515+
57516+ if (ret) {
57517+ if (alloc_push(ret)) {
57518+ kfree(ret);
57519+ ret = NULL;
57520+ }
57521+ }
57522+
57523+out:
57524+ return ret;
57525+}
57526+
57527+void *
57528+acl_alloc_num(unsigned long num, unsigned long len)
57529+{
57530+ if (!len || (num > (PAGE_SIZE / len)))
57531+ return NULL;
57532+
57533+ return acl_alloc(num * len);
57534+}
57535+
57536+void
57537+acl_free_all(void)
57538+{
57539+ if (gr_acl_is_enabled() || !alloc_stack)
57540+ return;
57541+
57542+ while (alloc_pop()) ;
57543+
57544+ if (alloc_stack) {
57545+ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
57546+ kfree(alloc_stack);
57547+ else
57548+ vfree(alloc_stack);
57549+ }
57550+
57551+ alloc_stack = NULL;
57552+ alloc_stack_size = 1;
57553+ alloc_stack_next = 1;
57554+
57555+ return;
57556+}
57557+
57558+int
57559+acl_alloc_stack_init(unsigned long size)
57560+{
57561+ if ((size * sizeof (void *)) <= PAGE_SIZE)
57562+ alloc_stack =
57563+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
57564+ else
57565+ alloc_stack = (void **) vmalloc(size * sizeof (void *));
57566+
57567+ alloc_stack_size = size;
57568+
57569+ if (!alloc_stack)
57570+ return 0;
57571+ else
57572+ return 1;
57573+}
57574diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
57575new file mode 100644
57576index 0000000..6d21049
57577--- /dev/null
57578+++ b/grsecurity/gracl_cap.c
57579@@ -0,0 +1,110 @@
57580+#include <linux/kernel.h>
57581+#include <linux/module.h>
57582+#include <linux/sched.h>
57583+#include <linux/gracl.h>
57584+#include <linux/grsecurity.h>
57585+#include <linux/grinternal.h>
57586+
57587+extern const char *captab_log[];
57588+extern int captab_log_entries;
57589+
57590+int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
57591+{
57592+ struct acl_subject_label *curracl;
57593+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
57594+ kernel_cap_t cap_audit = __cap_empty_set;
57595+
57596+ if (!gr_acl_is_enabled())
57597+ return 1;
57598+
57599+ curracl = task->acl;
57600+
57601+ cap_drop = curracl->cap_lower;
57602+ cap_mask = curracl->cap_mask;
57603+ cap_audit = curracl->cap_invert_audit;
57604+
57605+ while ((curracl = curracl->parent_subject)) {
57606+ /* if the cap isn't specified in the current computed mask but is specified in the
57607+ current level subject, and is lowered in the current level subject, then add
57608+ it to the set of dropped capabilities
57609+ otherwise, add the current level subject's mask to the current computed mask
57610+ */
57611+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
57612+ cap_raise(cap_mask, cap);
57613+ if (cap_raised(curracl->cap_lower, cap))
57614+ cap_raise(cap_drop, cap);
57615+ if (cap_raised(curracl->cap_invert_audit, cap))
57616+ cap_raise(cap_audit, cap);
57617+ }
57618+ }
57619+
57620+ if (!cap_raised(cap_drop, cap)) {
57621+ if (cap_raised(cap_audit, cap))
57622+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
57623+ return 1;
57624+ }
57625+
57626+ curracl = task->acl;
57627+
57628+ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
57629+ && cap_raised(cred->cap_effective, cap)) {
57630+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
57631+ task->role->roletype, cred->uid,
57632+ cred->gid, task->exec_file ?
57633+ gr_to_filename(task->exec_file->f_path.dentry,
57634+ task->exec_file->f_path.mnt) : curracl->filename,
57635+ curracl->filename, 0UL,
57636+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
57637+ return 1;
57638+ }
57639+
57640+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
57641+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
57642+
57643+ return 0;
57644+}
57645+
57646+int
57647+gr_acl_is_capable(const int cap)
57648+{
57649+ return gr_task_acl_is_capable(current, current_cred(), cap);
57650+}
57651+
57652+int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap)
57653+{
57654+ struct acl_subject_label *curracl;
57655+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
57656+
57657+ if (!gr_acl_is_enabled())
57658+ return 1;
57659+
57660+ curracl = task->acl;
57661+
57662+ cap_drop = curracl->cap_lower;
57663+ cap_mask = curracl->cap_mask;
57664+
57665+ while ((curracl = curracl->parent_subject)) {
57666+ /* if the cap isn't specified in the current computed mask but is specified in the
57667+ current level subject, and is lowered in the current level subject, then add
57668+ it to the set of dropped capabilities
57669+ otherwise, add the current level subject's mask to the current computed mask
57670+ */
57671+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
57672+ cap_raise(cap_mask, cap);
57673+ if (cap_raised(curracl->cap_lower, cap))
57674+ cap_raise(cap_drop, cap);
57675+ }
57676+ }
57677+
57678+ if (!cap_raised(cap_drop, cap))
57679+ return 1;
57680+
57681+ return 0;
57682+}
57683+
57684+int
57685+gr_acl_is_capable_nolog(const int cap)
57686+{
57687+ return gr_task_acl_is_capable_nolog(current, cap);
57688+}
57689+
57690diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
57691new file mode 100644
57692index 0000000..a340c17
57693--- /dev/null
57694+++ b/grsecurity/gracl_fs.c
57695@@ -0,0 +1,431 @@
57696+#include <linux/kernel.h>
57697+#include <linux/sched.h>
57698+#include <linux/types.h>
57699+#include <linux/fs.h>
57700+#include <linux/file.h>
57701+#include <linux/stat.h>
57702+#include <linux/grsecurity.h>
57703+#include <linux/grinternal.h>
57704+#include <linux/gracl.h>
57705+
57706+umode_t
57707+gr_acl_umask(void)
57708+{
57709+ if (unlikely(!gr_acl_is_enabled()))
57710+ return 0;
57711+
57712+ return current->role->umask;
57713+}
57714+
57715+__u32
57716+gr_acl_handle_hidden_file(const struct dentry * dentry,
57717+ const struct vfsmount * mnt)
57718+{
57719+ __u32 mode;
57720+
57721+ if (unlikely(!dentry->d_inode))
57722+ return GR_FIND;
57723+
57724+ mode =
57725+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
57726+
57727+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
57728+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
57729+ return mode;
57730+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
57731+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
57732+ return 0;
57733+ } else if (unlikely(!(mode & GR_FIND)))
57734+ return 0;
57735+
57736+ return GR_FIND;
57737+}
57738+
57739+__u32
57740+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
57741+ int acc_mode)
57742+{
57743+ __u32 reqmode = GR_FIND;
57744+ __u32 mode;
57745+
57746+ if (unlikely(!dentry->d_inode))
57747+ return reqmode;
57748+
57749+ if (acc_mode & MAY_APPEND)
57750+ reqmode |= GR_APPEND;
57751+ else if (acc_mode & MAY_WRITE)
57752+ reqmode |= GR_WRITE;
57753+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
57754+ reqmode |= GR_READ;
57755+
57756+ mode =
57757+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
57758+ mnt);
57759+
57760+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
57761+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
57762+ reqmode & GR_READ ? " reading" : "",
57763+ reqmode & GR_WRITE ? " writing" : reqmode &
57764+ GR_APPEND ? " appending" : "");
57765+ return reqmode;
57766+ } else
57767+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
57768+ {
57769+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
57770+ reqmode & GR_READ ? " reading" : "",
57771+ reqmode & GR_WRITE ? " writing" : reqmode &
57772+ GR_APPEND ? " appending" : "");
57773+ return 0;
57774+ } else if (unlikely((mode & reqmode) != reqmode))
57775+ return 0;
57776+
57777+ return reqmode;
57778+}
57779+
57780+__u32
57781+gr_acl_handle_creat(const struct dentry * dentry,
57782+ const struct dentry * p_dentry,
57783+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
57784+ const int imode)
57785+{
57786+ __u32 reqmode = GR_WRITE | GR_CREATE;
57787+ __u32 mode;
57788+
57789+ if (acc_mode & MAY_APPEND)
57790+ reqmode |= GR_APPEND;
57791+ // if a directory was required or the directory already exists, then
57792+ // don't count this open as a read
57793+ if ((acc_mode & MAY_READ) &&
57794+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
57795+ reqmode |= GR_READ;
57796+ if ((open_flags & O_CREAT) &&
57797+ ((imode & S_ISUID) || ((imode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
57798+ reqmode |= GR_SETID;
57799+
57800+ mode =
57801+ gr_check_create(dentry, p_dentry, p_mnt,
57802+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
57803+
57804+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
57805+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
57806+ reqmode & GR_READ ? " reading" : "",
57807+ reqmode & GR_WRITE ? " writing" : reqmode &
57808+ GR_APPEND ? " appending" : "");
57809+ return reqmode;
57810+ } else
57811+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
57812+ {
57813+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
57814+ reqmode & GR_READ ? " reading" : "",
57815+ reqmode & GR_WRITE ? " writing" : reqmode &
57816+ GR_APPEND ? " appending" : "");
57817+ return 0;
57818+ } else if (unlikely((mode & reqmode) != reqmode))
57819+ return 0;
57820+
57821+ return reqmode;
57822+}
57823+
57824+__u32
57825+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
57826+ const int fmode)
57827+{
57828+ __u32 mode, reqmode = GR_FIND;
57829+
57830+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
57831+ reqmode |= GR_EXEC;
57832+ if (fmode & S_IWOTH)
57833+ reqmode |= GR_WRITE;
57834+ if (fmode & S_IROTH)
57835+ reqmode |= GR_READ;
57836+
57837+ mode =
57838+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
57839+ mnt);
57840+
57841+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
57842+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
57843+ reqmode & GR_READ ? " reading" : "",
57844+ reqmode & GR_WRITE ? " writing" : "",
57845+ reqmode & GR_EXEC ? " executing" : "");
57846+ return reqmode;
57847+ } else
57848+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
57849+ {
57850+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
57851+ reqmode & GR_READ ? " reading" : "",
57852+ reqmode & GR_WRITE ? " writing" : "",
57853+ reqmode & GR_EXEC ? " executing" : "");
57854+ return 0;
57855+ } else if (unlikely((mode & reqmode) != reqmode))
57856+ return 0;
57857+
57858+ return reqmode;
57859+}
57860+
57861+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
57862+{
57863+ __u32 mode;
57864+
57865+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
57866+
57867+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
57868+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
57869+ return mode;
57870+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
57871+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
57872+ return 0;
57873+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
57874+ return 0;
57875+
57876+ return (reqmode);
57877+}
57878+
57879+__u32
57880+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
57881+{
57882+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
57883+}
57884+
57885+__u32
57886+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
57887+{
57888+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
57889+}
57890+
57891+__u32
57892+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
57893+{
57894+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
57895+}
57896+
57897+__u32
57898+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
57899+{
57900+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
57901+}
57902+
57903+__u32
57904+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
57905+ umode_t *modeptr)
57906+{
57907+ umode_t mode;
57908+
57909+ *modeptr &= ~gr_acl_umask();
57910+ mode = *modeptr;
57911+
57912+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
57913+ return 1;
57914+
57915+ if (unlikely(dentry->d_inode && !S_ISDIR(dentry->d_inode->i_mode) &&
57916+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))) {
57917+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
57918+ GR_CHMOD_ACL_MSG);
57919+ } else {
57920+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
57921+ }
57922+}
57923+
57924+__u32
57925+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
57926+{
57927+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
57928+}
57929+
57930+__u32
57931+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
57932+{
57933+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
57934+}
57935+
57936+__u32
57937+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
57938+{
57939+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
57940+}
57941+
57942+__u32
57943+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
57944+{
57945+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
57946+ GR_UNIXCONNECT_ACL_MSG);
57947+}
57948+
57949+/* hardlinks require at minimum create and link permission,
57950+ any additional privilege required is based on the
57951+ privilege of the file being linked to
57952+*/
57953+__u32
57954+gr_acl_handle_link(const struct dentry * new_dentry,
57955+ const struct dentry * parent_dentry,
57956+ const struct vfsmount * parent_mnt,
57957+ const struct dentry * old_dentry,
57958+ const struct vfsmount * old_mnt, const struct filename *to)
57959+{
57960+ __u32 mode;
57961+ __u32 needmode = GR_CREATE | GR_LINK;
57962+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
57963+
57964+ mode =
57965+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
57966+ old_mnt);
57967+
57968+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
57969+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
57970+ return mode;
57971+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
57972+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
57973+ return 0;
57974+ } else if (unlikely((mode & needmode) != needmode))
57975+ return 0;
57976+
57977+ return 1;
57978+}
57979+
57980+__u32
57981+gr_acl_handle_symlink(const struct dentry * new_dentry,
57982+ const struct dentry * parent_dentry,
57983+ const struct vfsmount * parent_mnt, const struct filename *from)
57984+{
57985+ __u32 needmode = GR_WRITE | GR_CREATE;
57986+ __u32 mode;
57987+
57988+ mode =
57989+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
57990+ GR_CREATE | GR_AUDIT_CREATE |
57991+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
57992+
57993+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
57994+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
57995+ return mode;
57996+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
57997+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
57998+ return 0;
57999+ } else if (unlikely((mode & needmode) != needmode))
58000+ return 0;
58001+
58002+ return (GR_WRITE | GR_CREATE);
58003+}
58004+
58005+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
58006+{
58007+ __u32 mode;
58008+
58009+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
58010+
58011+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
58012+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
58013+ return mode;
58014+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
58015+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
58016+ return 0;
58017+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
58018+ return 0;
58019+
58020+ return (reqmode);
58021+}
58022+
58023+__u32
58024+gr_acl_handle_mknod(const struct dentry * new_dentry,
58025+ const struct dentry * parent_dentry,
58026+ const struct vfsmount * parent_mnt,
58027+ const int mode)
58028+{
58029+ __u32 reqmode = GR_WRITE | GR_CREATE;
58030+ if (unlikely((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
58031+ reqmode |= GR_SETID;
58032+
58033+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
58034+ reqmode, GR_MKNOD_ACL_MSG);
58035+}
58036+
58037+__u32
58038+gr_acl_handle_mkdir(const struct dentry *new_dentry,
58039+ const struct dentry *parent_dentry,
58040+ const struct vfsmount *parent_mnt)
58041+{
58042+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
58043+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
58044+}
58045+
58046+#define RENAME_CHECK_SUCCESS(old, new) \
58047+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
58048+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
58049+
58050+int
58051+gr_acl_handle_rename(struct dentry *new_dentry,
58052+ struct dentry *parent_dentry,
58053+ const struct vfsmount *parent_mnt,
58054+ struct dentry *old_dentry,
58055+ struct inode *old_parent_inode,
58056+ struct vfsmount *old_mnt, const struct filename *newname)
58057+{
58058+ __u32 comp1, comp2;
58059+ int error = 0;
58060+
58061+ if (unlikely(!gr_acl_is_enabled()))
58062+ return 0;
58063+
58064+ if (!new_dentry->d_inode) {
58065+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
58066+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
58067+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
58068+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
58069+ GR_DELETE | GR_AUDIT_DELETE |
58070+ GR_AUDIT_READ | GR_AUDIT_WRITE |
58071+ GR_SUPPRESS, old_mnt);
58072+ } else {
58073+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
58074+ GR_CREATE | GR_DELETE |
58075+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
58076+ GR_AUDIT_READ | GR_AUDIT_WRITE |
58077+ GR_SUPPRESS, parent_mnt);
58078+ comp2 =
58079+ gr_search_file(old_dentry,
58080+ GR_READ | GR_WRITE | GR_AUDIT_READ |
58081+ GR_DELETE | GR_AUDIT_DELETE |
58082+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
58083+ }
58084+
58085+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
58086+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
58087+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
58088+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
58089+ && !(comp2 & GR_SUPPRESS)) {
58090+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
58091+ error = -EACCES;
58092+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
58093+ error = -EACCES;
58094+
58095+ return error;
58096+}
58097+
58098+void
58099+gr_acl_handle_exit(void)
58100+{
58101+ u16 id;
58102+ char *rolename;
58103+
58104+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
58105+ !(current->role->roletype & GR_ROLE_PERSIST))) {
58106+ id = current->acl_role_id;
58107+ rolename = current->role->rolename;
58108+ gr_set_acls(1);
58109+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
58110+ }
58111+
58112+ gr_put_exec_file(current);
58113+ return;
58114+}
58115+
58116+int
58117+gr_acl_handle_procpidmem(const struct task_struct *task)
58118+{
58119+ if (unlikely(!gr_acl_is_enabled()))
58120+ return 0;
58121+
58122+ if (task != current && task->acl->mode & GR_PROTPROCFD)
58123+ return -EACCES;
58124+
58125+ return 0;
58126+}
58127diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
58128new file mode 100644
58129index 0000000..58800a7
58130--- /dev/null
58131+++ b/grsecurity/gracl_ip.c
58132@@ -0,0 +1,384 @@
58133+#include <linux/kernel.h>
58134+#include <asm/uaccess.h>
58135+#include <asm/errno.h>
58136+#include <net/sock.h>
58137+#include <linux/file.h>
58138+#include <linux/fs.h>
58139+#include <linux/net.h>
58140+#include <linux/in.h>
58141+#include <linux/skbuff.h>
58142+#include <linux/ip.h>
58143+#include <linux/udp.h>
58144+#include <linux/types.h>
58145+#include <linux/sched.h>
58146+#include <linux/netdevice.h>
58147+#include <linux/inetdevice.h>
58148+#include <linux/gracl.h>
58149+#include <linux/grsecurity.h>
58150+#include <linux/grinternal.h>
58151+
58152+#define GR_BIND 0x01
58153+#define GR_CONNECT 0x02
58154+#define GR_INVERT 0x04
58155+#define GR_BINDOVERRIDE 0x08
58156+#define GR_CONNECTOVERRIDE 0x10
58157+#define GR_SOCK_FAMILY 0x20
58158+
58159+static const char * gr_protocols[IPPROTO_MAX] = {
58160+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
58161+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
58162+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
58163+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
58164+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
58165+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
58166+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
58167+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
58168+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
58169+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
58170+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
58171+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
58172+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
58173+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
58174+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
58175+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
58176+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
58177+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
58178+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
58179+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
58180+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
58181+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
58182+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
58183+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
58184+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
58185+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
58186+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
58187+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
58188+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
58189+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
58190+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
58191+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
58192+ };
58193+
58194+static const char * gr_socktypes[SOCK_MAX] = {
58195+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
58196+ "unknown:7", "unknown:8", "unknown:9", "packet"
58197+ };
58198+
58199+static const char * gr_sockfamilies[AF_MAX+1] = {
58200+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
58201+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
58202+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
58203+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
58204+ };
58205+
58206+const char *
58207+gr_proto_to_name(unsigned char proto)
58208+{
58209+ return gr_protocols[proto];
58210+}
58211+
58212+const char *
58213+gr_socktype_to_name(unsigned char type)
58214+{
58215+ return gr_socktypes[type];
58216+}
58217+
58218+const char *
58219+gr_sockfamily_to_name(unsigned char family)
58220+{
58221+ return gr_sockfamilies[family];
58222+}
58223+
58224+int
58225+gr_search_socket(const int domain, const int type, const int protocol)
58226+{
58227+ struct acl_subject_label *curr;
58228+ const struct cred *cred = current_cred();
58229+
58230+ if (unlikely(!gr_acl_is_enabled()))
58231+ goto exit;
58232+
58233+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
58234+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
58235+ goto exit; // let the kernel handle it
58236+
58237+ curr = current->acl;
58238+
58239+ if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
58240+ /* the family is allowed, if this is PF_INET allow it only if
58241+ the extra sock type/protocol checks pass */
58242+ if (domain == PF_INET)
58243+ goto inet_check;
58244+ goto exit;
58245+ } else {
58246+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
58247+ __u32 fakeip = 0;
58248+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
58249+ current->role->roletype, cred->uid,
58250+ cred->gid, current->exec_file ?
58251+ gr_to_filename(current->exec_file->f_path.dentry,
58252+ current->exec_file->f_path.mnt) :
58253+ curr->filename, curr->filename,
58254+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
58255+ &current->signal->saved_ip);
58256+ goto exit;
58257+ }
58258+ goto exit_fail;
58259+ }
58260+
58261+inet_check:
58262+ /* the rest of this checking is for IPv4 only */
58263+ if (!curr->ips)
58264+ goto exit;
58265+
58266+ if ((curr->ip_type & (1 << type)) &&
58267+ (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
58268+ goto exit;
58269+
58270+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
58271+ /* we don't place acls on raw sockets , and sometimes
58272+ dgram/ip sockets are opened for ioctl and not
58273+ bind/connect, so we'll fake a bind learn log */
58274+ if (type == SOCK_RAW || type == SOCK_PACKET) {
58275+ __u32 fakeip = 0;
58276+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
58277+ current->role->roletype, cred->uid,
58278+ cred->gid, current->exec_file ?
58279+ gr_to_filename(current->exec_file->f_path.dentry,
58280+ current->exec_file->f_path.mnt) :
58281+ curr->filename, curr->filename,
58282+ &fakeip, 0, type,
58283+ protocol, GR_CONNECT, &current->signal->saved_ip);
58284+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
58285+ __u32 fakeip = 0;
58286+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
58287+ current->role->roletype, cred->uid,
58288+ cred->gid, current->exec_file ?
58289+ gr_to_filename(current->exec_file->f_path.dentry,
58290+ current->exec_file->f_path.mnt) :
58291+ curr->filename, curr->filename,
58292+ &fakeip, 0, type,
58293+ protocol, GR_BIND, &current->signal->saved_ip);
58294+ }
58295+ /* we'll log when they use connect or bind */
58296+ goto exit;
58297+ }
58298+
58299+exit_fail:
58300+ if (domain == PF_INET)
58301+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
58302+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
58303+ else
58304+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
58305+ gr_socktype_to_name(type), protocol);
58306+
58307+ return 0;
58308+exit:
58309+ return 1;
58310+}
58311+
58312+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
58313+{
58314+ if ((ip->mode & mode) &&
58315+ (ip_port >= ip->low) &&
58316+ (ip_port <= ip->high) &&
58317+ ((ntohl(ip_addr) & our_netmask) ==
58318+ (ntohl(our_addr) & our_netmask))
58319+ && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
58320+ && (ip->type & (1 << type))) {
58321+ if (ip->mode & GR_INVERT)
58322+ return 2; // specifically denied
58323+ else
58324+ return 1; // allowed
58325+ }
58326+
58327+ return 0; // not specifically allowed, may continue parsing
58328+}
58329+
58330+static int
58331+gr_search_connectbind(const int full_mode, struct sock *sk,
58332+ struct sockaddr_in *addr, const int type)
58333+{
58334+ char iface[IFNAMSIZ] = {0};
58335+ struct acl_subject_label *curr;
58336+ struct acl_ip_label *ip;
58337+ struct inet_sock *isk;
58338+ struct net_device *dev;
58339+ struct in_device *idev;
58340+ unsigned long i;
58341+ int ret;
58342+ int mode = full_mode & (GR_BIND | GR_CONNECT);
58343+ __u32 ip_addr = 0;
58344+ __u32 our_addr;
58345+ __u32 our_netmask;
58346+ char *p;
58347+ __u16 ip_port = 0;
58348+ const struct cred *cred = current_cred();
58349+
58350+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
58351+ return 0;
58352+
58353+ curr = current->acl;
58354+ isk = inet_sk(sk);
58355+
58356+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
58357+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
58358+ addr->sin_addr.s_addr = curr->inaddr_any_override;
58359+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
58360+ struct sockaddr_in saddr;
58361+ int err;
58362+
58363+ saddr.sin_family = AF_INET;
58364+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
58365+ saddr.sin_port = isk->inet_sport;
58366+
58367+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
58368+ if (err)
58369+ return err;
58370+
58371+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
58372+ if (err)
58373+ return err;
58374+ }
58375+
58376+ if (!curr->ips)
58377+ return 0;
58378+
58379+ ip_addr = addr->sin_addr.s_addr;
58380+ ip_port = ntohs(addr->sin_port);
58381+
58382+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
58383+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
58384+ current->role->roletype, cred->uid,
58385+ cred->gid, current->exec_file ?
58386+ gr_to_filename(current->exec_file->f_path.dentry,
58387+ current->exec_file->f_path.mnt) :
58388+ curr->filename, curr->filename,
58389+ &ip_addr, ip_port, type,
58390+ sk->sk_protocol, mode, &current->signal->saved_ip);
58391+ return 0;
58392+ }
58393+
58394+ for (i = 0; i < curr->ip_num; i++) {
58395+ ip = *(curr->ips + i);
58396+ if (ip->iface != NULL) {
58397+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
58398+ p = strchr(iface, ':');
58399+ if (p != NULL)
58400+ *p = '\0';
58401+ dev = dev_get_by_name(sock_net(sk), iface);
58402+ if (dev == NULL)
58403+ continue;
58404+ idev = in_dev_get(dev);
58405+ if (idev == NULL) {
58406+ dev_put(dev);
58407+ continue;
58408+ }
58409+ rcu_read_lock();
58410+ for_ifa(idev) {
58411+ if (!strcmp(ip->iface, ifa->ifa_label)) {
58412+ our_addr = ifa->ifa_address;
58413+ our_netmask = 0xffffffff;
58414+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
58415+ if (ret == 1) {
58416+ rcu_read_unlock();
58417+ in_dev_put(idev);
58418+ dev_put(dev);
58419+ return 0;
58420+ } else if (ret == 2) {
58421+ rcu_read_unlock();
58422+ in_dev_put(idev);
58423+ dev_put(dev);
58424+ goto denied;
58425+ }
58426+ }
58427+ } endfor_ifa(idev);
58428+ rcu_read_unlock();
58429+ in_dev_put(idev);
58430+ dev_put(dev);
58431+ } else {
58432+ our_addr = ip->addr;
58433+ our_netmask = ip->netmask;
58434+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
58435+ if (ret == 1)
58436+ return 0;
58437+ else if (ret == 2)
58438+ goto denied;
58439+ }
58440+ }
58441+
58442+denied:
58443+ if (mode == GR_BIND)
58444+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
58445+ else if (mode == GR_CONNECT)
58446+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
58447+
58448+ return -EACCES;
58449+}
58450+
58451+int
58452+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
58453+{
58454+ /* always allow disconnection of dgram sockets with connect */
58455+ if (addr->sin_family == AF_UNSPEC)
58456+ return 0;
58457+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
58458+}
58459+
58460+int
58461+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
58462+{
58463+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
58464+}
58465+
58466+int gr_search_listen(struct socket *sock)
58467+{
58468+ struct sock *sk = sock->sk;
58469+ struct sockaddr_in addr;
58470+
58471+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
58472+ addr.sin_port = inet_sk(sk)->inet_sport;
58473+
58474+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
58475+}
58476+
58477+int gr_search_accept(struct socket *sock)
58478+{
58479+ struct sock *sk = sock->sk;
58480+ struct sockaddr_in addr;
58481+
58482+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
58483+ addr.sin_port = inet_sk(sk)->inet_sport;
58484+
58485+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
58486+}
58487+
58488+int
58489+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
58490+{
58491+ if (addr)
58492+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
58493+ else {
58494+ struct sockaddr_in sin;
58495+ const struct inet_sock *inet = inet_sk(sk);
58496+
58497+ sin.sin_addr.s_addr = inet->inet_daddr;
58498+ sin.sin_port = inet->inet_dport;
58499+
58500+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
58501+ }
58502+}
58503+
58504+int
58505+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
58506+{
58507+ struct sockaddr_in sin;
58508+
58509+ if (unlikely(skb->len < sizeof (struct udphdr)))
58510+ return 0; // skip this packet
58511+
58512+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
58513+ sin.sin_port = udp_hdr(skb)->source;
58514+
58515+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
58516+}
58517diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
58518new file mode 100644
58519index 0000000..25f54ef
58520--- /dev/null
58521+++ b/grsecurity/gracl_learn.c
58522@@ -0,0 +1,207 @@
58523+#include <linux/kernel.h>
58524+#include <linux/mm.h>
58525+#include <linux/sched.h>
58526+#include <linux/poll.h>
58527+#include <linux/string.h>
58528+#include <linux/file.h>
58529+#include <linux/types.h>
58530+#include <linux/vmalloc.h>
58531+#include <linux/grinternal.h>
58532+
58533+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
58534+ size_t count, loff_t *ppos);
58535+extern int gr_acl_is_enabled(void);
58536+
58537+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
58538+static int gr_learn_attached;
58539+
58540+/* use a 512k buffer */
58541+#define LEARN_BUFFER_SIZE (512 * 1024)
58542+
58543+static DEFINE_SPINLOCK(gr_learn_lock);
58544+static DEFINE_MUTEX(gr_learn_user_mutex);
58545+
58546+/* we need to maintain two buffers, so that the kernel context of grlearn
58547+ uses a semaphore around the userspace copying, and the other kernel contexts
58548+ use a spinlock when copying into the buffer, since they cannot sleep
58549+*/
58550+static char *learn_buffer;
58551+static char *learn_buffer_user;
58552+static int learn_buffer_len;
58553+static int learn_buffer_user_len;
58554+
58555+static ssize_t
58556+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
58557+{
58558+ DECLARE_WAITQUEUE(wait, current);
58559+ ssize_t retval = 0;
58560+
58561+ add_wait_queue(&learn_wait, &wait);
58562+ set_current_state(TASK_INTERRUPTIBLE);
58563+ do {
58564+ mutex_lock(&gr_learn_user_mutex);
58565+ spin_lock(&gr_learn_lock);
58566+ if (learn_buffer_len)
58567+ break;
58568+ spin_unlock(&gr_learn_lock);
58569+ mutex_unlock(&gr_learn_user_mutex);
58570+ if (file->f_flags & O_NONBLOCK) {
58571+ retval = -EAGAIN;
58572+ goto out;
58573+ }
58574+ if (signal_pending(current)) {
58575+ retval = -ERESTARTSYS;
58576+ goto out;
58577+ }
58578+
58579+ schedule();
58580+ } while (1);
58581+
58582+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
58583+ learn_buffer_user_len = learn_buffer_len;
58584+ retval = learn_buffer_len;
58585+ learn_buffer_len = 0;
58586+
58587+ spin_unlock(&gr_learn_lock);
58588+
58589+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
58590+ retval = -EFAULT;
58591+
58592+ mutex_unlock(&gr_learn_user_mutex);
58593+out:
58594+ set_current_state(TASK_RUNNING);
58595+ remove_wait_queue(&learn_wait, &wait);
58596+ return retval;
58597+}
58598+
58599+static unsigned int
58600+poll_learn(struct file * file, poll_table * wait)
58601+{
58602+ poll_wait(file, &learn_wait, wait);
58603+
58604+ if (learn_buffer_len)
58605+ return (POLLIN | POLLRDNORM);
58606+
58607+ return 0;
58608+}
58609+
58610+void
58611+gr_clear_learn_entries(void)
58612+{
58613+ char *tmp;
58614+
58615+ mutex_lock(&gr_learn_user_mutex);
58616+ spin_lock(&gr_learn_lock);
58617+ tmp = learn_buffer;
58618+ learn_buffer = NULL;
58619+ spin_unlock(&gr_learn_lock);
58620+ if (tmp)
58621+ vfree(tmp);
58622+ if (learn_buffer_user != NULL) {
58623+ vfree(learn_buffer_user);
58624+ learn_buffer_user = NULL;
58625+ }
58626+ learn_buffer_len = 0;
58627+ mutex_unlock(&gr_learn_user_mutex);
58628+
58629+ return;
58630+}
58631+
58632+void
58633+gr_add_learn_entry(const char *fmt, ...)
58634+{
58635+ va_list args;
58636+ unsigned int len;
58637+
58638+ if (!gr_learn_attached)
58639+ return;
58640+
58641+ spin_lock(&gr_learn_lock);
58642+
58643+ /* leave a gap at the end so we know when it's "full" but don't have to
58644+ compute the exact length of the string we're trying to append
58645+ */
58646+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
58647+ spin_unlock(&gr_learn_lock);
58648+ wake_up_interruptible(&learn_wait);
58649+ return;
58650+ }
58651+ if (learn_buffer == NULL) {
58652+ spin_unlock(&gr_learn_lock);
58653+ return;
58654+ }
58655+
58656+ va_start(args, fmt);
58657+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
58658+ va_end(args);
58659+
58660+ learn_buffer_len += len + 1;
58661+
58662+ spin_unlock(&gr_learn_lock);
58663+ wake_up_interruptible(&learn_wait);
58664+
58665+ return;
58666+}
58667+
58668+static int
58669+open_learn(struct inode *inode, struct file *file)
58670+{
58671+ if (file->f_mode & FMODE_READ && gr_learn_attached)
58672+ return -EBUSY;
58673+ if (file->f_mode & FMODE_READ) {
58674+ int retval = 0;
58675+ mutex_lock(&gr_learn_user_mutex);
58676+ if (learn_buffer == NULL)
58677+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
58678+ if (learn_buffer_user == NULL)
58679+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
58680+ if (learn_buffer == NULL) {
58681+ retval = -ENOMEM;
58682+ goto out_error;
58683+ }
58684+ if (learn_buffer_user == NULL) {
58685+ retval = -ENOMEM;
58686+ goto out_error;
58687+ }
58688+ learn_buffer_len = 0;
58689+ learn_buffer_user_len = 0;
58690+ gr_learn_attached = 1;
58691+out_error:
58692+ mutex_unlock(&gr_learn_user_mutex);
58693+ return retval;
58694+ }
58695+ return 0;
58696+}
58697+
58698+static int
58699+close_learn(struct inode *inode, struct file *file)
58700+{
58701+ if (file->f_mode & FMODE_READ) {
58702+ char *tmp = NULL;
58703+ mutex_lock(&gr_learn_user_mutex);
58704+ spin_lock(&gr_learn_lock);
58705+ tmp = learn_buffer;
58706+ learn_buffer = NULL;
58707+ spin_unlock(&gr_learn_lock);
58708+ if (tmp)
58709+ vfree(tmp);
58710+ if (learn_buffer_user != NULL) {
58711+ vfree(learn_buffer_user);
58712+ learn_buffer_user = NULL;
58713+ }
58714+ learn_buffer_len = 0;
58715+ learn_buffer_user_len = 0;
58716+ gr_learn_attached = 0;
58717+ mutex_unlock(&gr_learn_user_mutex);
58718+ }
58719+
58720+ return 0;
58721+}
58722+
58723+const struct file_operations grsec_fops = {
58724+ .read = read_learn,
58725+ .write = write_grsec_handler,
58726+ .open = open_learn,
58727+ .release = close_learn,
58728+ .poll = poll_learn,
58729+};
58730diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
58731new file mode 100644
58732index 0000000..39645c9
58733--- /dev/null
58734+++ b/grsecurity/gracl_res.c
58735@@ -0,0 +1,68 @@
58736+#include <linux/kernel.h>
58737+#include <linux/sched.h>
58738+#include <linux/gracl.h>
58739+#include <linux/grinternal.h>
58740+
58741+static const char *restab_log[] = {
58742+ [RLIMIT_CPU] = "RLIMIT_CPU",
58743+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
58744+ [RLIMIT_DATA] = "RLIMIT_DATA",
58745+ [RLIMIT_STACK] = "RLIMIT_STACK",
58746+ [RLIMIT_CORE] = "RLIMIT_CORE",
58747+ [RLIMIT_RSS] = "RLIMIT_RSS",
58748+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
58749+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
58750+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
58751+ [RLIMIT_AS] = "RLIMIT_AS",
58752+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
58753+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
58754+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
58755+ [RLIMIT_NICE] = "RLIMIT_NICE",
58756+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
58757+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
58758+ [GR_CRASH_RES] = "RLIMIT_CRASH"
58759+};
58760+
58761+void
58762+gr_log_resource(const struct task_struct *task,
58763+ const int res, const unsigned long wanted, const int gt)
58764+{
58765+ const struct cred *cred;
58766+ unsigned long rlim;
58767+
58768+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
58769+ return;
58770+
58771+ // not yet supported resource
58772+ if (unlikely(!restab_log[res]))
58773+ return;
58774+
58775+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
58776+ rlim = task_rlimit_max(task, res);
58777+ else
58778+ rlim = task_rlimit(task, res);
58779+
58780+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
58781+ return;
58782+
58783+ rcu_read_lock();
58784+ cred = __task_cred(task);
58785+
58786+ if (res == RLIMIT_NPROC &&
58787+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
58788+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
58789+ goto out_rcu_unlock;
58790+ else if (res == RLIMIT_MEMLOCK &&
58791+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
58792+ goto out_rcu_unlock;
58793+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
58794+ goto out_rcu_unlock;
58795+ rcu_read_unlock();
58796+
58797+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
58798+
58799+ return;
58800+out_rcu_unlock:
58801+ rcu_read_unlock();
58802+ return;
58803+}
58804diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
58805new file mode 100644
58806index 0000000..25197e9
58807--- /dev/null
58808+++ b/grsecurity/gracl_segv.c
58809@@ -0,0 +1,299 @@
58810+#include <linux/kernel.h>
58811+#include <linux/mm.h>
58812+#include <asm/uaccess.h>
58813+#include <asm/errno.h>
58814+#include <asm/mman.h>
58815+#include <net/sock.h>
58816+#include <linux/file.h>
58817+#include <linux/fs.h>
58818+#include <linux/net.h>
58819+#include <linux/in.h>
58820+#include <linux/slab.h>
58821+#include <linux/types.h>
58822+#include <linux/sched.h>
58823+#include <linux/timer.h>
58824+#include <linux/gracl.h>
58825+#include <linux/grsecurity.h>
58826+#include <linux/grinternal.h>
58827+
58828+static struct crash_uid *uid_set;
58829+static unsigned short uid_used;
58830+static DEFINE_SPINLOCK(gr_uid_lock);
58831+extern rwlock_t gr_inode_lock;
58832+extern struct acl_subject_label *
58833+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
58834+ struct acl_role_label *role);
58835+
58836+#ifdef CONFIG_BTRFS_FS
58837+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
58838+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
58839+#endif
58840+
58841+static inline dev_t __get_dev(const struct dentry *dentry)
58842+{
58843+#ifdef CONFIG_BTRFS_FS
58844+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
58845+ return get_btrfs_dev_from_inode(dentry->d_inode);
58846+ else
58847+#endif
58848+ return dentry->d_inode->i_sb->s_dev;
58849+}
58850+
58851+int
58852+gr_init_uidset(void)
58853+{
58854+ uid_set =
58855+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
58856+ uid_used = 0;
58857+
58858+ return uid_set ? 1 : 0;
58859+}
58860+
58861+void
58862+gr_free_uidset(void)
58863+{
58864+ if (uid_set)
58865+ kfree(uid_set);
58866+
58867+ return;
58868+}
58869+
58870+int
58871+gr_find_uid(const uid_t uid)
58872+{
58873+ struct crash_uid *tmp = uid_set;
58874+ uid_t buid;
58875+ int low = 0, high = uid_used - 1, mid;
58876+
58877+ while (high >= low) {
58878+ mid = (low + high) >> 1;
58879+ buid = tmp[mid].uid;
58880+ if (buid == uid)
58881+ return mid;
58882+ if (buid > uid)
58883+ high = mid - 1;
58884+ if (buid < uid)
58885+ low = mid + 1;
58886+ }
58887+
58888+ return -1;
58889+}
58890+
58891+static __inline__ void
58892+gr_insertsort(void)
58893+{
58894+ unsigned short i, j;
58895+ struct crash_uid index;
58896+
58897+ for (i = 1; i < uid_used; i++) {
58898+ index = uid_set[i];
58899+ j = i;
58900+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
58901+ uid_set[j] = uid_set[j - 1];
58902+ j--;
58903+ }
58904+ uid_set[j] = index;
58905+ }
58906+
58907+ return;
58908+}
58909+
58910+static __inline__ void
58911+gr_insert_uid(const uid_t uid, const unsigned long expires)
58912+{
58913+ int loc;
58914+
58915+ if (uid_used == GR_UIDTABLE_MAX)
58916+ return;
58917+
58918+ loc = gr_find_uid(uid);
58919+
58920+ if (loc >= 0) {
58921+ uid_set[loc].expires = expires;
58922+ return;
58923+ }
58924+
58925+ uid_set[uid_used].uid = uid;
58926+ uid_set[uid_used].expires = expires;
58927+ uid_used++;
58928+
58929+ gr_insertsort();
58930+
58931+ return;
58932+}
58933+
58934+void
58935+gr_remove_uid(const unsigned short loc)
58936+{
58937+ unsigned short i;
58938+
58939+ for (i = loc + 1; i < uid_used; i++)
58940+ uid_set[i - 1] = uid_set[i];
58941+
58942+ uid_used--;
58943+
58944+ return;
58945+}
58946+
58947+int
58948+gr_check_crash_uid(const uid_t uid)
58949+{
58950+ int loc;
58951+ int ret = 0;
58952+
58953+ if (unlikely(!gr_acl_is_enabled()))
58954+ return 0;
58955+
58956+ spin_lock(&gr_uid_lock);
58957+ loc = gr_find_uid(uid);
58958+
58959+ if (loc < 0)
58960+ goto out_unlock;
58961+
58962+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
58963+ gr_remove_uid(loc);
58964+ else
58965+ ret = 1;
58966+
58967+out_unlock:
58968+ spin_unlock(&gr_uid_lock);
58969+ return ret;
58970+}
58971+
58972+static __inline__ int
58973+proc_is_setxid(const struct cred *cred)
58974+{
58975+ if (!uid_eq(cred->uid, cred->euid) || !uid_eq(cred->uid, cred->suid) ||
58976+ !uid_eq(cred->uid, cred->fsuid))
58977+ return 1;
58978+ if (!uid_eq(cred->gid, cred->egid) || !uid_eq(cred->gid, cred->sgid) ||
58979+ !uid_eq(cred->gid, cred->fsgid))
58980+ return 1;
58981+
58982+ return 0;
58983+}
58984+
58985+extern int gr_fake_force_sig(int sig, struct task_struct *t);
58986+
58987+void
58988+gr_handle_crash(struct task_struct *task, const int sig)
58989+{
58990+ struct acl_subject_label *curr;
58991+ struct task_struct *tsk, *tsk2;
58992+ const struct cred *cred;
58993+ const struct cred *cred2;
58994+
58995+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
58996+ return;
58997+
58998+ if (unlikely(!gr_acl_is_enabled()))
58999+ return;
59000+
59001+ curr = task->acl;
59002+
59003+ if (!(curr->resmask & (1 << GR_CRASH_RES)))
59004+ return;
59005+
59006+ if (time_before_eq(curr->expires, get_seconds())) {
59007+ curr->expires = 0;
59008+ curr->crashes = 0;
59009+ }
59010+
59011+ curr->crashes++;
59012+
59013+ if (!curr->expires)
59014+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
59015+
59016+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
59017+ time_after(curr->expires, get_seconds())) {
59018+ rcu_read_lock();
59019+ cred = __task_cred(task);
59020+ if (!uid_eq(cred->uid, GLOBAL_ROOT_UID) && proc_is_setxid(cred)) {
59021+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
59022+ spin_lock(&gr_uid_lock);
59023+ gr_insert_uid(cred->uid, curr->expires);
59024+ spin_unlock(&gr_uid_lock);
59025+ curr->expires = 0;
59026+ curr->crashes = 0;
59027+ read_lock(&tasklist_lock);
59028+ do_each_thread(tsk2, tsk) {
59029+ cred2 = __task_cred(tsk);
59030+ if (tsk != task && uid_eq(cred2->uid, cred->uid))
59031+ gr_fake_force_sig(SIGKILL, tsk);
59032+ } while_each_thread(tsk2, tsk);
59033+ read_unlock(&tasklist_lock);
59034+ } else {
59035+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
59036+ read_lock(&tasklist_lock);
59037+ read_lock(&grsec_exec_file_lock);
59038+ do_each_thread(tsk2, tsk) {
59039+ if (likely(tsk != task)) {
59040+ // if this thread has the same subject as the one that triggered
59041+ // RES_CRASH and it's the same binary, kill it
59042+ if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
59043+ gr_fake_force_sig(SIGKILL, tsk);
59044+ }
59045+ } while_each_thread(tsk2, tsk);
59046+ read_unlock(&grsec_exec_file_lock);
59047+ read_unlock(&tasklist_lock);
59048+ }
59049+ rcu_read_unlock();
59050+ }
59051+
59052+ return;
59053+}
59054+
59055+int
59056+gr_check_crash_exec(const struct file *filp)
59057+{
59058+ struct acl_subject_label *curr;
59059+
59060+ if (unlikely(!gr_acl_is_enabled()))
59061+ return 0;
59062+
59063+ read_lock(&gr_inode_lock);
59064+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
59065+ __get_dev(filp->f_path.dentry),
59066+ current->role);
59067+ read_unlock(&gr_inode_lock);
59068+
59069+ if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
59070+ (!curr->crashes && !curr->expires))
59071+ return 0;
59072+
59073+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
59074+ time_after(curr->expires, get_seconds()))
59075+ return 1;
59076+ else if (time_before_eq(curr->expires, get_seconds())) {
59077+ curr->crashes = 0;
59078+ curr->expires = 0;
59079+ }
59080+
59081+ return 0;
59082+}
59083+
59084+void
59085+gr_handle_alertkill(struct task_struct *task)
59086+{
59087+ struct acl_subject_label *curracl;
59088+ __u32 curr_ip;
59089+ struct task_struct *p, *p2;
59090+
59091+ if (unlikely(!gr_acl_is_enabled()))
59092+ return;
59093+
59094+ curracl = task->acl;
59095+ curr_ip = task->signal->curr_ip;
59096+
59097+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
59098+ read_lock(&tasklist_lock);
59099+ do_each_thread(p2, p) {
59100+ if (p->signal->curr_ip == curr_ip)
59101+ gr_fake_force_sig(SIGKILL, p);
59102+ } while_each_thread(p2, p);
59103+ read_unlock(&tasklist_lock);
59104+ } else if (curracl->mode & GR_KILLPROC)
59105+ gr_fake_force_sig(SIGKILL, task);
59106+
59107+ return;
59108+}
59109diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
59110new file mode 100644
59111index 0000000..9d83a69
59112--- /dev/null
59113+++ b/grsecurity/gracl_shm.c
59114@@ -0,0 +1,40 @@
59115+#include <linux/kernel.h>
59116+#include <linux/mm.h>
59117+#include <linux/sched.h>
59118+#include <linux/file.h>
59119+#include <linux/ipc.h>
59120+#include <linux/gracl.h>
59121+#include <linux/grsecurity.h>
59122+#include <linux/grinternal.h>
59123+
59124+int
59125+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
59126+ const time_t shm_createtime, const uid_t cuid, const int shmid)
59127+{
59128+ struct task_struct *task;
59129+
59130+ if (!gr_acl_is_enabled())
59131+ return 1;
59132+
59133+ rcu_read_lock();
59134+ read_lock(&tasklist_lock);
59135+
59136+ task = find_task_by_vpid(shm_cprid);
59137+
59138+ if (unlikely(!task))
59139+ task = find_task_by_vpid(shm_lapid);
59140+
59141+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
59142+ (task->pid == shm_lapid)) &&
59143+ (task->acl->mode & GR_PROTSHM) &&
59144+ (task->acl != current->acl))) {
59145+ read_unlock(&tasklist_lock);
59146+ rcu_read_unlock();
59147+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
59148+ return 0;
59149+ }
59150+ read_unlock(&tasklist_lock);
59151+ rcu_read_unlock();
59152+
59153+ return 1;
59154+}
59155diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
59156new file mode 100644
59157index 0000000..bc0be01
59158--- /dev/null
59159+++ b/grsecurity/grsec_chdir.c
59160@@ -0,0 +1,19 @@
59161+#include <linux/kernel.h>
59162+#include <linux/sched.h>
59163+#include <linux/fs.h>
59164+#include <linux/file.h>
59165+#include <linux/grsecurity.h>
59166+#include <linux/grinternal.h>
59167+
59168+void
59169+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
59170+{
59171+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
59172+ if ((grsec_enable_chdir && grsec_enable_group &&
59173+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
59174+ !grsec_enable_group)) {
59175+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
59176+ }
59177+#endif
59178+ return;
59179+}
59180diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
59181new file mode 100644
59182index 0000000..70fe0ae
59183--- /dev/null
59184+++ b/grsecurity/grsec_chroot.c
59185@@ -0,0 +1,357 @@
59186+#include <linux/kernel.h>
59187+#include <linux/module.h>
59188+#include <linux/sched.h>
59189+#include <linux/file.h>
59190+#include <linux/fs.h>
59191+#include <linux/mount.h>
59192+#include <linux/types.h>
59193+#include "../fs/mount.h"
59194+#include <linux/grsecurity.h>
59195+#include <linux/grinternal.h>
59196+
59197+void gr_set_chroot_entries(struct task_struct *task, struct path *path)
59198+{
59199+#ifdef CONFIG_GRKERNSEC
59200+ if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
59201+ path->dentry != task->nsproxy->mnt_ns->root->mnt.mnt_root)
59202+ task->gr_is_chrooted = 1;
59203+ else
59204+ task->gr_is_chrooted = 0;
59205+
59206+ task->gr_chroot_dentry = path->dentry;
59207+#endif
59208+ return;
59209+}
59210+
59211+void gr_clear_chroot_entries(struct task_struct *task)
59212+{
59213+#ifdef CONFIG_GRKERNSEC
59214+ task->gr_is_chrooted = 0;
59215+ task->gr_chroot_dentry = NULL;
59216+#endif
59217+ return;
59218+}
59219+
59220+int
59221+gr_handle_chroot_unix(const pid_t pid)
59222+{
59223+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
59224+ struct task_struct *p;
59225+
59226+ if (unlikely(!grsec_enable_chroot_unix))
59227+ return 1;
59228+
59229+ if (likely(!proc_is_chrooted(current)))
59230+ return 1;
59231+
59232+ rcu_read_lock();
59233+ read_lock(&tasklist_lock);
59234+ p = find_task_by_vpid_unrestricted(pid);
59235+ if (unlikely(p && !have_same_root(current, p))) {
59236+ read_unlock(&tasklist_lock);
59237+ rcu_read_unlock();
59238+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
59239+ return 0;
59240+ }
59241+ read_unlock(&tasklist_lock);
59242+ rcu_read_unlock();
59243+#endif
59244+ return 1;
59245+}
59246+
59247+int
59248+gr_handle_chroot_nice(void)
59249+{
59250+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
59251+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
59252+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
59253+ return -EPERM;
59254+ }
59255+#endif
59256+ return 0;
59257+}
59258+
59259+int
59260+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
59261+{
59262+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
59263+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
59264+ && proc_is_chrooted(current)) {
59265+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
59266+ return -EACCES;
59267+ }
59268+#endif
59269+ return 0;
59270+}
59271+
59272+int
59273+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
59274+{
59275+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
59276+ struct task_struct *p;
59277+ int ret = 0;
59278+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
59279+ return ret;
59280+
59281+ read_lock(&tasklist_lock);
59282+ do_each_pid_task(pid, type, p) {
59283+ if (!have_same_root(current, p)) {
59284+ ret = 1;
59285+ goto out;
59286+ }
59287+ } while_each_pid_task(pid, type, p);
59288+out:
59289+ read_unlock(&tasklist_lock);
59290+ return ret;
59291+#endif
59292+ return 0;
59293+}
59294+
59295+int
59296+gr_pid_is_chrooted(struct task_struct *p)
59297+{
59298+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
59299+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
59300+ return 0;
59301+
59302+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
59303+ !have_same_root(current, p)) {
59304+ return 1;
59305+ }
59306+#endif
59307+ return 0;
59308+}
59309+
59310+EXPORT_SYMBOL(gr_pid_is_chrooted);
59311+
59312+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
59313+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
59314+{
59315+ struct path path, currentroot;
59316+ int ret = 0;
59317+
59318+ path.dentry = (struct dentry *)u_dentry;
59319+ path.mnt = (struct vfsmount *)u_mnt;
59320+ get_fs_root(current->fs, &currentroot);
59321+ if (path_is_under(&path, &currentroot))
59322+ ret = 1;
59323+ path_put(&currentroot);
59324+
59325+ return ret;
59326+}
59327+#endif
59328+
59329+int
59330+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
59331+{
59332+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
59333+ if (!grsec_enable_chroot_fchdir)
59334+ return 1;
59335+
59336+ if (!proc_is_chrooted(current))
59337+ return 1;
59338+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
59339+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
59340+ return 0;
59341+ }
59342+#endif
59343+ return 1;
59344+}
59345+
59346+int
59347+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
59348+ const time_t shm_createtime)
59349+{
59350+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
59351+ struct task_struct *p;
59352+ time_t starttime;
59353+
59354+ if (unlikely(!grsec_enable_chroot_shmat))
59355+ return 1;
59356+
59357+ if (likely(!proc_is_chrooted(current)))
59358+ return 1;
59359+
59360+ rcu_read_lock();
59361+ read_lock(&tasklist_lock);
59362+
59363+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
59364+ starttime = p->start_time.tv_sec;
59365+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
59366+ if (have_same_root(current, p)) {
59367+ goto allow;
59368+ } else {
59369+ read_unlock(&tasklist_lock);
59370+ rcu_read_unlock();
59371+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
59372+ return 0;
59373+ }
59374+ }
59375+ /* creator exited, pid reuse, fall through to next check */
59376+ }
59377+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
59378+ if (unlikely(!have_same_root(current, p))) {
59379+ read_unlock(&tasklist_lock);
59380+ rcu_read_unlock();
59381+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
59382+ return 0;
59383+ }
59384+ }
59385+
59386+allow:
59387+ read_unlock(&tasklist_lock);
59388+ rcu_read_unlock();
59389+#endif
59390+ return 1;
59391+}
59392+
59393+void
59394+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
59395+{
59396+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
59397+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
59398+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
59399+#endif
59400+ return;
59401+}
59402+
59403+int
59404+gr_handle_chroot_mknod(const struct dentry *dentry,
59405+ const struct vfsmount *mnt, const int mode)
59406+{
59407+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
59408+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
59409+ proc_is_chrooted(current)) {
59410+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
59411+ return -EPERM;
59412+ }
59413+#endif
59414+ return 0;
59415+}
59416+
59417+int
59418+gr_handle_chroot_mount(const struct dentry *dentry,
59419+ const struct vfsmount *mnt, const char *dev_name)
59420+{
59421+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
59422+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
59423+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
59424+ return -EPERM;
59425+ }
59426+#endif
59427+ return 0;
59428+}
59429+
59430+int
59431+gr_handle_chroot_pivot(void)
59432+{
59433+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
59434+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
59435+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
59436+ return -EPERM;
59437+ }
59438+#endif
59439+ return 0;
59440+}
59441+
59442+int
59443+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
59444+{
59445+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
59446+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
59447+ !gr_is_outside_chroot(dentry, mnt)) {
59448+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
59449+ return -EPERM;
59450+ }
59451+#endif
59452+ return 0;
59453+}
59454+
59455+extern const char *captab_log[];
59456+extern int captab_log_entries;
59457+
59458+int
59459+gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
59460+{
59461+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
59462+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
59463+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
59464+ if (cap_raised(chroot_caps, cap)) {
59465+ if (cap_raised(cred->cap_effective, cap) && cap < captab_log_entries) {
59466+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, task, captab_log[cap]);
59467+ }
59468+ return 0;
59469+ }
59470+ }
59471+#endif
59472+ return 1;
59473+}
59474+
59475+int
59476+gr_chroot_is_capable(const int cap)
59477+{
59478+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
59479+ return gr_task_chroot_is_capable(current, current_cred(), cap);
59480+#endif
59481+ return 1;
59482+}
59483+
59484+int
59485+gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap)
59486+{
59487+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
59488+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
59489+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
59490+ if (cap_raised(chroot_caps, cap)) {
59491+ return 0;
59492+ }
59493+ }
59494+#endif
59495+ return 1;
59496+}
59497+
59498+int
59499+gr_chroot_is_capable_nolog(const int cap)
59500+{
59501+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
59502+ return gr_task_chroot_is_capable_nolog(current, cap);
59503+#endif
59504+ return 1;
59505+}
59506+
59507+int
59508+gr_handle_chroot_sysctl(const int op)
59509+{
59510+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
59511+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
59512+ proc_is_chrooted(current))
59513+ return -EACCES;
59514+#endif
59515+ return 0;
59516+}
59517+
59518+void
59519+gr_handle_chroot_chdir(struct path *path)
59520+{
59521+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
59522+ if (grsec_enable_chroot_chdir)
59523+ set_fs_pwd(current->fs, path);
59524+#endif
59525+ return;
59526+}
59527+
59528+int
59529+gr_handle_chroot_chmod(const struct dentry *dentry,
59530+ const struct vfsmount *mnt, const int mode)
59531+{
59532+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
59533+ /* allow chmod +s on directories, but not files */
59534+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
59535+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
59536+ proc_is_chrooted(current)) {
59537+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
59538+ return -EPERM;
59539+ }
59540+#endif
59541+ return 0;
59542+}
59543diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
59544new file mode 100644
59545index 0000000..e6796b3
59546--- /dev/null
59547+++ b/grsecurity/grsec_disabled.c
59548@@ -0,0 +1,434 @@
59549+#include <linux/kernel.h>
59550+#include <linux/module.h>
59551+#include <linux/sched.h>
59552+#include <linux/file.h>
59553+#include <linux/fs.h>
59554+#include <linux/kdev_t.h>
59555+#include <linux/net.h>
59556+#include <linux/in.h>
59557+#include <linux/ip.h>
59558+#include <linux/skbuff.h>
59559+#include <linux/sysctl.h>
59560+
59561+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
59562+void
59563+pax_set_initial_flags(struct linux_binprm *bprm)
59564+{
59565+ return;
59566+}
59567+#endif
59568+
59569+#ifdef CONFIG_SYSCTL
59570+__u32
59571+gr_handle_sysctl(const struct ctl_table * table, const int op)
59572+{
59573+ return 0;
59574+}
59575+#endif
59576+
59577+#ifdef CONFIG_TASKSTATS
59578+int gr_is_taskstats_denied(int pid)
59579+{
59580+ return 0;
59581+}
59582+#endif
59583+
59584+int
59585+gr_acl_is_enabled(void)
59586+{
59587+ return 0;
59588+}
59589+
59590+void
59591+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
59592+{
59593+ return;
59594+}
59595+
59596+int
59597+gr_handle_rawio(const struct inode *inode)
59598+{
59599+ return 0;
59600+}
59601+
59602+void
59603+gr_acl_handle_psacct(struct task_struct *task, const long code)
59604+{
59605+ return;
59606+}
59607+
59608+int
59609+gr_handle_ptrace(struct task_struct *task, const long request)
59610+{
59611+ return 0;
59612+}
59613+
59614+int
59615+gr_handle_proc_ptrace(struct task_struct *task)
59616+{
59617+ return 0;
59618+}
59619+
59620+int
59621+gr_set_acls(const int type)
59622+{
59623+ return 0;
59624+}
59625+
59626+int
59627+gr_check_hidden_task(const struct task_struct *tsk)
59628+{
59629+ return 0;
59630+}
59631+
59632+int
59633+gr_check_protected_task(const struct task_struct *task)
59634+{
59635+ return 0;
59636+}
59637+
59638+int
59639+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
59640+{
59641+ return 0;
59642+}
59643+
59644+void
59645+gr_copy_label(struct task_struct *tsk)
59646+{
59647+ return;
59648+}
59649+
59650+void
59651+gr_set_pax_flags(struct task_struct *task)
59652+{
59653+ return;
59654+}
59655+
59656+int
59657+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
59658+ const int unsafe_share)
59659+{
59660+ return 0;
59661+}
59662+
59663+void
59664+gr_handle_delete(const ino_t ino, const dev_t dev)
59665+{
59666+ return;
59667+}
59668+
59669+void
59670+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
59671+{
59672+ return;
59673+}
59674+
59675+void
59676+gr_handle_crash(struct task_struct *task, const int sig)
59677+{
59678+ return;
59679+}
59680+
59681+int
59682+gr_check_crash_exec(const struct file *filp)
59683+{
59684+ return 0;
59685+}
59686+
59687+int
59688+gr_check_crash_uid(const uid_t uid)
59689+{
59690+ return 0;
59691+}
59692+
59693+void
59694+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
59695+ struct dentry *old_dentry,
59696+ struct dentry *new_dentry,
59697+ struct vfsmount *mnt, const __u8 replace)
59698+{
59699+ return;
59700+}
59701+
59702+int
59703+gr_search_socket(const int family, const int type, const int protocol)
59704+{
59705+ return 1;
59706+}
59707+
59708+int
59709+gr_search_connectbind(const int mode, const struct socket *sock,
59710+ const struct sockaddr_in *addr)
59711+{
59712+ return 0;
59713+}
59714+
59715+void
59716+gr_handle_alertkill(struct task_struct *task)
59717+{
59718+ return;
59719+}
59720+
59721+__u32
59722+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
59723+{
59724+ return 1;
59725+}
59726+
59727+__u32
59728+gr_acl_handle_hidden_file(const struct dentry * dentry,
59729+ const struct vfsmount * mnt)
59730+{
59731+ return 1;
59732+}
59733+
59734+__u32
59735+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
59736+ int acc_mode)
59737+{
59738+ return 1;
59739+}
59740+
59741+__u32
59742+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
59743+{
59744+ return 1;
59745+}
59746+
59747+__u32
59748+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
59749+{
59750+ return 1;
59751+}
59752+
59753+int
59754+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
59755+ unsigned int *vm_flags)
59756+{
59757+ return 1;
59758+}
59759+
59760+__u32
59761+gr_acl_handle_truncate(const struct dentry * dentry,
59762+ const struct vfsmount * mnt)
59763+{
59764+ return 1;
59765+}
59766+
59767+__u32
59768+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
59769+{
59770+ return 1;
59771+}
59772+
59773+__u32
59774+gr_acl_handle_access(const struct dentry * dentry,
59775+ const struct vfsmount * mnt, const int fmode)
59776+{
59777+ return 1;
59778+}
59779+
59780+__u32
59781+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
59782+ umode_t *mode)
59783+{
59784+ return 1;
59785+}
59786+
59787+__u32
59788+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
59789+{
59790+ return 1;
59791+}
59792+
59793+__u32
59794+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
59795+{
59796+ return 1;
59797+}
59798+
59799+void
59800+grsecurity_init(void)
59801+{
59802+ return;
59803+}
59804+
59805+umode_t gr_acl_umask(void)
59806+{
59807+ return 0;
59808+}
59809+
59810+__u32
59811+gr_acl_handle_mknod(const struct dentry * new_dentry,
59812+ const struct dentry * parent_dentry,
59813+ const struct vfsmount * parent_mnt,
59814+ const int mode)
59815+{
59816+ return 1;
59817+}
59818+
59819+__u32
59820+gr_acl_handle_mkdir(const struct dentry * new_dentry,
59821+ const struct dentry * parent_dentry,
59822+ const struct vfsmount * parent_mnt)
59823+{
59824+ return 1;
59825+}
59826+
59827+__u32
59828+gr_acl_handle_symlink(const struct dentry * new_dentry,
59829+ const struct dentry * parent_dentry,
59830+ const struct vfsmount * parent_mnt, const struct filename *from)
59831+{
59832+ return 1;
59833+}
59834+
59835+__u32
59836+gr_acl_handle_link(const struct dentry * new_dentry,
59837+ const struct dentry * parent_dentry,
59838+ const struct vfsmount * parent_mnt,
59839+ const struct dentry * old_dentry,
59840+ const struct vfsmount * old_mnt, const struct filename *to)
59841+{
59842+ return 1;
59843+}
59844+
59845+int
59846+gr_acl_handle_rename(const struct dentry *new_dentry,
59847+ const struct dentry *parent_dentry,
59848+ const struct vfsmount *parent_mnt,
59849+ const struct dentry *old_dentry,
59850+ const struct inode *old_parent_inode,
59851+ const struct vfsmount *old_mnt, const struct filename *newname)
59852+{
59853+ return 0;
59854+}
59855+
59856+int
59857+gr_acl_handle_filldir(const struct file *file, const char *name,
59858+ const int namelen, const ino_t ino)
59859+{
59860+ return 1;
59861+}
59862+
59863+int
59864+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
59865+ const time_t shm_createtime, const uid_t cuid, const int shmid)
59866+{
59867+ return 1;
59868+}
59869+
59870+int
59871+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
59872+{
59873+ return 0;
59874+}
59875+
59876+int
59877+gr_search_accept(const struct socket *sock)
59878+{
59879+ return 0;
59880+}
59881+
59882+int
59883+gr_search_listen(const struct socket *sock)
59884+{
59885+ return 0;
59886+}
59887+
59888+int
59889+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
59890+{
59891+ return 0;
59892+}
59893+
59894+__u32
59895+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
59896+{
59897+ return 1;
59898+}
59899+
59900+__u32
59901+gr_acl_handle_creat(const struct dentry * dentry,
59902+ const struct dentry * p_dentry,
59903+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
59904+ const int imode)
59905+{
59906+ return 1;
59907+}
59908+
59909+void
59910+gr_acl_handle_exit(void)
59911+{
59912+ return;
59913+}
59914+
59915+int
59916+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
59917+{
59918+ return 1;
59919+}
59920+
59921+void
59922+gr_set_role_label(const uid_t uid, const gid_t gid)
59923+{
59924+ return;
59925+}
59926+
59927+int
59928+gr_acl_handle_procpidmem(const struct task_struct *task)
59929+{
59930+ return 0;
59931+}
59932+
59933+int
59934+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
59935+{
59936+ return 0;
59937+}
59938+
59939+int
59940+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
59941+{
59942+ return 0;
59943+}
59944+
59945+void
59946+gr_set_kernel_label(struct task_struct *task)
59947+{
59948+ return;
59949+}
59950+
59951+int
59952+gr_check_user_change(int real, int effective, int fs)
59953+{
59954+ return 0;
59955+}
59956+
59957+int
59958+gr_check_group_change(int real, int effective, int fs)
59959+{
59960+ return 0;
59961+}
59962+
59963+int gr_acl_enable_at_secure(void)
59964+{
59965+ return 0;
59966+}
59967+
59968+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
59969+{
59970+ return dentry->d_inode->i_sb->s_dev;
59971+}
59972+
59973+void gr_put_exec_file(struct task_struct *task)
59974+{
59975+ return;
59976+}
59977+
59978+EXPORT_SYMBOL(gr_set_kernel_label);
59979+#ifdef CONFIG_SECURITY
59980+EXPORT_SYMBOL(gr_check_user_change);
59981+EXPORT_SYMBOL(gr_check_group_change);
59982+#endif
59983diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
59984new file mode 100644
59985index 0000000..abfa971
59986--- /dev/null
59987+++ b/grsecurity/grsec_exec.c
59988@@ -0,0 +1,174 @@
59989+#include <linux/kernel.h>
59990+#include <linux/sched.h>
59991+#include <linux/file.h>
59992+#include <linux/binfmts.h>
59993+#include <linux/fs.h>
59994+#include <linux/types.h>
59995+#include <linux/grdefs.h>
59996+#include <linux/grsecurity.h>
59997+#include <linux/grinternal.h>
59998+#include <linux/capability.h>
59999+#include <linux/module.h>
60000+
60001+#include <asm/uaccess.h>
60002+
60003+#ifdef CONFIG_GRKERNSEC_EXECLOG
60004+static char gr_exec_arg_buf[132];
60005+static DEFINE_MUTEX(gr_exec_arg_mutex);
60006+#endif
60007+
60008+extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
60009+
60010+void
60011+gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
60012+{
60013+#ifdef CONFIG_GRKERNSEC_EXECLOG
60014+ char *grarg = gr_exec_arg_buf;
60015+ unsigned int i, x, execlen = 0;
60016+ char c;
60017+
60018+ if (!((grsec_enable_execlog && grsec_enable_group &&
60019+ in_group_p(grsec_audit_gid))
60020+ || (grsec_enable_execlog && !grsec_enable_group)))
60021+ return;
60022+
60023+ mutex_lock(&gr_exec_arg_mutex);
60024+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
60025+
60026+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
60027+ const char __user *p;
60028+ unsigned int len;
60029+
60030+ p = get_user_arg_ptr(argv, i);
60031+ if (IS_ERR(p))
60032+ goto log;
60033+
60034+ len = strnlen_user(p, 128 - execlen);
60035+ if (len > 128 - execlen)
60036+ len = 128 - execlen;
60037+ else if (len > 0)
60038+ len--;
60039+ if (copy_from_user(grarg + execlen, p, len))
60040+ goto log;
60041+
60042+ /* rewrite unprintable characters */
60043+ for (x = 0; x < len; x++) {
60044+ c = *(grarg + execlen + x);
60045+ if (c < 32 || c > 126)
60046+ *(grarg + execlen + x) = ' ';
60047+ }
60048+
60049+ execlen += len;
60050+ *(grarg + execlen) = ' ';
60051+ *(grarg + execlen + 1) = '\0';
60052+ execlen++;
60053+ }
60054+
60055+ log:
60056+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
60057+ bprm->file->f_path.mnt, grarg);
60058+ mutex_unlock(&gr_exec_arg_mutex);
60059+#endif
60060+ return;
60061+}
60062+
60063+#ifdef CONFIG_GRKERNSEC
60064+extern int gr_acl_is_capable(const int cap);
60065+extern int gr_acl_is_capable_nolog(const int cap);
60066+extern int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
60067+extern int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap);
60068+extern int gr_chroot_is_capable(const int cap);
60069+extern int gr_chroot_is_capable_nolog(const int cap);
60070+extern int gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
60071+extern int gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap);
60072+#endif
60073+
60074+const char *captab_log[] = {
60075+ "CAP_CHOWN",
60076+ "CAP_DAC_OVERRIDE",
60077+ "CAP_DAC_READ_SEARCH",
60078+ "CAP_FOWNER",
60079+ "CAP_FSETID",
60080+ "CAP_KILL",
60081+ "CAP_SETGID",
60082+ "CAP_SETUID",
60083+ "CAP_SETPCAP",
60084+ "CAP_LINUX_IMMUTABLE",
60085+ "CAP_NET_BIND_SERVICE",
60086+ "CAP_NET_BROADCAST",
60087+ "CAP_NET_ADMIN",
60088+ "CAP_NET_RAW",
60089+ "CAP_IPC_LOCK",
60090+ "CAP_IPC_OWNER",
60091+ "CAP_SYS_MODULE",
60092+ "CAP_SYS_RAWIO",
60093+ "CAP_SYS_CHROOT",
60094+ "CAP_SYS_PTRACE",
60095+ "CAP_SYS_PACCT",
60096+ "CAP_SYS_ADMIN",
60097+ "CAP_SYS_BOOT",
60098+ "CAP_SYS_NICE",
60099+ "CAP_SYS_RESOURCE",
60100+ "CAP_SYS_TIME",
60101+ "CAP_SYS_TTY_CONFIG",
60102+ "CAP_MKNOD",
60103+ "CAP_LEASE",
60104+ "CAP_AUDIT_WRITE",
60105+ "CAP_AUDIT_CONTROL",
60106+ "CAP_SETFCAP",
60107+ "CAP_MAC_OVERRIDE",
60108+ "CAP_MAC_ADMIN",
60109+ "CAP_SYSLOG",
60110+ "CAP_WAKE_ALARM"
60111+};
60112+
60113+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
60114+
60115+int gr_is_capable(const int cap)
60116+{
60117+#ifdef CONFIG_GRKERNSEC
60118+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
60119+ return 1;
60120+ return 0;
60121+#else
60122+ return 1;
60123+#endif
60124+}
60125+
60126+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
60127+{
60128+#ifdef CONFIG_GRKERNSEC
60129+ if (gr_task_acl_is_capable(task, cred, cap) && gr_task_chroot_is_capable(task, cred, cap))
60130+ return 1;
60131+ return 0;
60132+#else
60133+ return 1;
60134+#endif
60135+}
60136+
60137+int gr_is_capable_nolog(const int cap)
60138+{
60139+#ifdef CONFIG_GRKERNSEC
60140+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
60141+ return 1;
60142+ return 0;
60143+#else
60144+ return 1;
60145+#endif
60146+}
60147+
60148+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap)
60149+{
60150+#ifdef CONFIG_GRKERNSEC
60151+ if (gr_task_acl_is_capable_nolog(task, cap) && gr_task_chroot_is_capable_nolog(task, cap))
60152+ return 1;
60153+ return 0;
60154+#else
60155+ return 1;
60156+#endif
60157+}
60158+
60159+EXPORT_SYMBOL(gr_is_capable);
60160+EXPORT_SYMBOL(gr_is_capable_nolog);
60161+EXPORT_SYMBOL(gr_task_is_capable);
60162+EXPORT_SYMBOL(gr_task_is_capable_nolog);
60163diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
60164new file mode 100644
60165index 0000000..d3ee748
60166--- /dev/null
60167+++ b/grsecurity/grsec_fifo.c
60168@@ -0,0 +1,24 @@
60169+#include <linux/kernel.h>
60170+#include <linux/sched.h>
60171+#include <linux/fs.h>
60172+#include <linux/file.h>
60173+#include <linux/grinternal.h>
60174+
60175+int
60176+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
60177+ const struct dentry *dir, const int flag, const int acc_mode)
60178+{
60179+#ifdef CONFIG_GRKERNSEC_FIFO
60180+ const struct cred *cred = current_cred();
60181+
60182+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
60183+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
60184+ (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
60185+ (cred->fsuid != dentry->d_inode->i_uid)) {
60186+ if (!inode_permission(dentry->d_inode, acc_mode))
60187+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
60188+ return -EACCES;
60189+ }
60190+#endif
60191+ return 0;
60192+}
60193diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
60194new file mode 100644
60195index 0000000..8ca18bf
60196--- /dev/null
60197+++ b/grsecurity/grsec_fork.c
60198@@ -0,0 +1,23 @@
60199+#include <linux/kernel.h>
60200+#include <linux/sched.h>
60201+#include <linux/grsecurity.h>
60202+#include <linux/grinternal.h>
60203+#include <linux/errno.h>
60204+
60205+void
60206+gr_log_forkfail(const int retval)
60207+{
60208+#ifdef CONFIG_GRKERNSEC_FORKFAIL
60209+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
60210+ switch (retval) {
60211+ case -EAGAIN:
60212+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
60213+ break;
60214+ case -ENOMEM:
60215+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
60216+ break;
60217+ }
60218+ }
60219+#endif
60220+ return;
60221+}
60222diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
60223new file mode 100644
60224index 0000000..05a6015
60225--- /dev/null
60226+++ b/grsecurity/grsec_init.c
60227@@ -0,0 +1,283 @@
60228+#include <linux/kernel.h>
60229+#include <linux/sched.h>
60230+#include <linux/mm.h>
60231+#include <linux/gracl.h>
60232+#include <linux/slab.h>
60233+#include <linux/vmalloc.h>
60234+#include <linux/percpu.h>
60235+#include <linux/module.h>
60236+
60237+int grsec_enable_ptrace_readexec;
60238+int grsec_enable_setxid;
60239+int grsec_enable_symlinkown;
60240+int grsec_symlinkown_gid;
60241+int grsec_enable_brute;
60242+int grsec_enable_link;
60243+int grsec_enable_dmesg;
60244+int grsec_enable_harden_ptrace;
60245+int grsec_enable_fifo;
60246+int grsec_enable_execlog;
60247+int grsec_enable_signal;
60248+int grsec_enable_forkfail;
60249+int grsec_enable_audit_ptrace;
60250+int grsec_enable_time;
60251+int grsec_enable_audit_textrel;
60252+int grsec_enable_group;
60253+int grsec_audit_gid;
60254+int grsec_enable_chdir;
60255+int grsec_enable_mount;
60256+int grsec_enable_rofs;
60257+int grsec_enable_chroot_findtask;
60258+int grsec_enable_chroot_mount;
60259+int grsec_enable_chroot_shmat;
60260+int grsec_enable_chroot_fchdir;
60261+int grsec_enable_chroot_double;
60262+int grsec_enable_chroot_pivot;
60263+int grsec_enable_chroot_chdir;
60264+int grsec_enable_chroot_chmod;
60265+int grsec_enable_chroot_mknod;
60266+int grsec_enable_chroot_nice;
60267+int grsec_enable_chroot_execlog;
60268+int grsec_enable_chroot_caps;
60269+int grsec_enable_chroot_sysctl;
60270+int grsec_enable_chroot_unix;
60271+int grsec_enable_tpe;
60272+int grsec_tpe_gid;
60273+int grsec_enable_blackhole;
60274+#ifdef CONFIG_IPV6_MODULE
60275+EXPORT_SYMBOL(grsec_enable_blackhole);
60276+#endif
60277+int grsec_lastack_retries;
60278+int grsec_enable_tpe_all;
60279+int grsec_enable_tpe_invert;
60280+int grsec_enable_socket_all;
60281+int grsec_socket_all_gid;
60282+int grsec_enable_socket_client;
60283+int grsec_socket_client_gid;
60284+int grsec_enable_socket_server;
60285+int grsec_socket_server_gid;
60286+int grsec_resource_logging;
60287+int grsec_disable_privio;
60288+int grsec_enable_log_rwxmaps;
60289+int grsec_lock;
60290+
60291+DEFINE_SPINLOCK(grsec_alert_lock);
60292+unsigned long grsec_alert_wtime = 0;
60293+unsigned long grsec_alert_fyet = 0;
60294+
60295+DEFINE_SPINLOCK(grsec_audit_lock);
60296+
60297+DEFINE_RWLOCK(grsec_exec_file_lock);
60298+
60299+char *gr_shared_page[4];
60300+
60301+char *gr_alert_log_fmt;
60302+char *gr_audit_log_fmt;
60303+char *gr_alert_log_buf;
60304+char *gr_audit_log_buf;
60305+
60306+extern struct gr_arg *gr_usermode;
60307+extern unsigned char *gr_system_salt;
60308+extern unsigned char *gr_system_sum;
60309+
60310+void __init
60311+grsecurity_init(void)
60312+{
60313+ int j;
60314+ /* create the per-cpu shared pages */
60315+
60316+#ifdef CONFIG_X86
60317+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
60318+#endif
60319+
60320+ for (j = 0; j < 4; j++) {
60321+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
60322+ if (gr_shared_page[j] == NULL) {
60323+ panic("Unable to allocate grsecurity shared page");
60324+ return;
60325+ }
60326+ }
60327+
60328+ /* allocate log buffers */
60329+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
60330+ if (!gr_alert_log_fmt) {
60331+ panic("Unable to allocate grsecurity alert log format buffer");
60332+ return;
60333+ }
60334+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
60335+ if (!gr_audit_log_fmt) {
60336+ panic("Unable to allocate grsecurity audit log format buffer");
60337+ return;
60338+ }
60339+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
60340+ if (!gr_alert_log_buf) {
60341+ panic("Unable to allocate grsecurity alert log buffer");
60342+ return;
60343+ }
60344+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
60345+ if (!gr_audit_log_buf) {
60346+ panic("Unable to allocate grsecurity audit log buffer");
60347+ return;
60348+ }
60349+
60350+ /* allocate memory for authentication structure */
60351+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
60352+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
60353+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
60354+
60355+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
60356+ panic("Unable to allocate grsecurity authentication structure");
60357+ return;
60358+ }
60359+
60360+
60361+#ifdef CONFIG_GRKERNSEC_IO
60362+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
60363+ grsec_disable_privio = 1;
60364+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
60365+ grsec_disable_privio = 1;
60366+#else
60367+ grsec_disable_privio = 0;
60368+#endif
60369+#endif
60370+
60371+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
60372+ /* for backward compatibility, tpe_invert always defaults to on if
60373+ enabled in the kernel
60374+ */
60375+ grsec_enable_tpe_invert = 1;
60376+#endif
60377+
60378+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
60379+#ifndef CONFIG_GRKERNSEC_SYSCTL
60380+ grsec_lock = 1;
60381+#endif
60382+
60383+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
60384+ grsec_enable_audit_textrel = 1;
60385+#endif
60386+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
60387+ grsec_enable_log_rwxmaps = 1;
60388+#endif
60389+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
60390+ grsec_enable_group = 1;
60391+ grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
60392+#endif
60393+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
60394+ grsec_enable_ptrace_readexec = 1;
60395+#endif
60396+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
60397+ grsec_enable_chdir = 1;
60398+#endif
60399+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
60400+ grsec_enable_harden_ptrace = 1;
60401+#endif
60402+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
60403+ grsec_enable_mount = 1;
60404+#endif
60405+#ifdef CONFIG_GRKERNSEC_LINK
60406+ grsec_enable_link = 1;
60407+#endif
60408+#ifdef CONFIG_GRKERNSEC_BRUTE
60409+ grsec_enable_brute = 1;
60410+#endif
60411+#ifdef CONFIG_GRKERNSEC_DMESG
60412+ grsec_enable_dmesg = 1;
60413+#endif
60414+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
60415+ grsec_enable_blackhole = 1;
60416+ grsec_lastack_retries = 4;
60417+#endif
60418+#ifdef CONFIG_GRKERNSEC_FIFO
60419+ grsec_enable_fifo = 1;
60420+#endif
60421+#ifdef CONFIG_GRKERNSEC_EXECLOG
60422+ grsec_enable_execlog = 1;
60423+#endif
60424+#ifdef CONFIG_GRKERNSEC_SETXID
60425+ grsec_enable_setxid = 1;
60426+#endif
60427+#ifdef CONFIG_GRKERNSEC_SIGNAL
60428+ grsec_enable_signal = 1;
60429+#endif
60430+#ifdef CONFIG_GRKERNSEC_FORKFAIL
60431+ grsec_enable_forkfail = 1;
60432+#endif
60433+#ifdef CONFIG_GRKERNSEC_TIME
60434+ grsec_enable_time = 1;
60435+#endif
60436+#ifdef CONFIG_GRKERNSEC_RESLOG
60437+ grsec_resource_logging = 1;
60438+#endif
60439+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
60440+ grsec_enable_chroot_findtask = 1;
60441+#endif
60442+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
60443+ grsec_enable_chroot_unix = 1;
60444+#endif
60445+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
60446+ grsec_enable_chroot_mount = 1;
60447+#endif
60448+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
60449+ grsec_enable_chroot_fchdir = 1;
60450+#endif
60451+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
60452+ grsec_enable_chroot_shmat = 1;
60453+#endif
60454+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
60455+ grsec_enable_audit_ptrace = 1;
60456+#endif
60457+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
60458+ grsec_enable_chroot_double = 1;
60459+#endif
60460+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
60461+ grsec_enable_chroot_pivot = 1;
60462+#endif
60463+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
60464+ grsec_enable_chroot_chdir = 1;
60465+#endif
60466+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
60467+ grsec_enable_chroot_chmod = 1;
60468+#endif
60469+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
60470+ grsec_enable_chroot_mknod = 1;
60471+#endif
60472+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
60473+ grsec_enable_chroot_nice = 1;
60474+#endif
60475+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
60476+ grsec_enable_chroot_execlog = 1;
60477+#endif
60478+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
60479+ grsec_enable_chroot_caps = 1;
60480+#endif
60481+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
60482+ grsec_enable_chroot_sysctl = 1;
60483+#endif
60484+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
60485+ grsec_enable_symlinkown = 1;
60486+ grsec_symlinkown_gid = CONFIG_GRKERNSEC_SYMLINKOWN_GID;
60487+#endif
60488+#ifdef CONFIG_GRKERNSEC_TPE
60489+ grsec_enable_tpe = 1;
60490+ grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
60491+#ifdef CONFIG_GRKERNSEC_TPE_ALL
60492+ grsec_enable_tpe_all = 1;
60493+#endif
60494+#endif
60495+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
60496+ grsec_enable_socket_all = 1;
60497+ grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
60498+#endif
60499+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
60500+ grsec_enable_socket_client = 1;
60501+ grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
60502+#endif
60503+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
60504+ grsec_enable_socket_server = 1;
60505+ grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
60506+#endif
60507+#endif
60508+
60509+ return;
60510+}
60511diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
60512new file mode 100644
60513index 0000000..6095407
60514--- /dev/null
60515+++ b/grsecurity/grsec_link.c
60516@@ -0,0 +1,58 @@
60517+#include <linux/kernel.h>
60518+#include <linux/sched.h>
60519+#include <linux/fs.h>
60520+#include <linux/file.h>
60521+#include <linux/grinternal.h>
60522+
60523+int gr_handle_symlink_owner(const struct path *link, const struct inode *target)
60524+{
60525+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
60526+ const struct inode *link_inode = link->dentry->d_inode;
60527+
60528+ if (grsec_enable_symlinkown && in_group_p(grsec_symlinkown_gid) &&
60529+ /* ignore root-owned links, e.g. /proc/self */
60530+ !uid_eq(link_inode->i_uid, GLOBAL_ROOT_UID) && target &&
60531+ !uid_eq(link_inode->i_uid, target->i_uid)) {
60532+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINKOWNER_MSG, link->dentry, link->mnt, link_inode->i_uid, target->i_uid);
60533+ return 1;
60534+ }
60535+#endif
60536+ return 0;
60537+}
60538+
60539+int
60540+gr_handle_follow_link(const struct inode *parent,
60541+ const struct inode *inode,
60542+ const struct dentry *dentry, const struct vfsmount *mnt)
60543+{
60544+#ifdef CONFIG_GRKERNSEC_LINK
60545+ const struct cred *cred = current_cred();
60546+
60547+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
60548+ (parent->i_mode & S_ISVTX) && !uid_eq(parent->i_uid, inode->i_uid) &&
60549+ (parent->i_mode & S_IWOTH) && !uid_eq(cred->fsuid, inode->i_uid)) {
60550+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
60551+ return -EACCES;
60552+ }
60553+#endif
60554+ return 0;
60555+}
60556+
60557+int
60558+gr_handle_hardlink(const struct dentry *dentry,
60559+ const struct vfsmount *mnt,
60560+ struct inode *inode, const int mode, const struct filename *to)
60561+{
60562+#ifdef CONFIG_GRKERNSEC_LINK
60563+ const struct cred *cred = current_cred();
60564+
60565+ if (grsec_enable_link && !uid_eq(cred->fsuid, inode->i_uid) &&
60566+ (!S_ISREG(mode) || is_privileged_binary(dentry) ||
60567+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
60568+ !capable(CAP_FOWNER) && !uid_eq(cred->uid, GLOBAL_ROOT_UID)) {
60569+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to->name);
60570+ return -EPERM;
60571+ }
60572+#endif
60573+ return 0;
60574+}
60575diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
60576new file mode 100644
60577index 0000000..7bd6c2b
60578--- /dev/null
60579+++ b/grsecurity/grsec_log.c
60580@@ -0,0 +1,329 @@
60581+#include <linux/kernel.h>
60582+#include <linux/sched.h>
60583+#include <linux/file.h>
60584+#include <linux/tty.h>
60585+#include <linux/fs.h>
60586+#include <linux/grinternal.h>
60587+
60588+#ifdef CONFIG_TREE_PREEMPT_RCU
60589+#define DISABLE_PREEMPT() preempt_disable()
60590+#define ENABLE_PREEMPT() preempt_enable()
60591+#else
60592+#define DISABLE_PREEMPT()
60593+#define ENABLE_PREEMPT()
60594+#endif
60595+
60596+#define GR_GLOBAL_UID(x) from_kuid_munged(&init_user_ns, (x))
60597+#define GR_GLOBAL_GID(x) from_kgid_munged(&init_user_ns, (x))
60598+
60599+#define BEGIN_LOCKS(x) \
60600+ DISABLE_PREEMPT(); \
60601+ rcu_read_lock(); \
60602+ read_lock(&tasklist_lock); \
60603+ read_lock(&grsec_exec_file_lock); \
60604+ if (x != GR_DO_AUDIT) \
60605+ spin_lock(&grsec_alert_lock); \
60606+ else \
60607+ spin_lock(&grsec_audit_lock)
60608+
60609+#define END_LOCKS(x) \
60610+ if (x != GR_DO_AUDIT) \
60611+ spin_unlock(&grsec_alert_lock); \
60612+ else \
60613+ spin_unlock(&grsec_audit_lock); \
60614+ read_unlock(&grsec_exec_file_lock); \
60615+ read_unlock(&tasklist_lock); \
60616+ rcu_read_unlock(); \
60617+ ENABLE_PREEMPT(); \
60618+ if (x == GR_DONT_AUDIT) \
60619+ gr_handle_alertkill(current)
60620+
60621+enum {
60622+ FLOODING,
60623+ NO_FLOODING
60624+};
60625+
60626+extern char *gr_alert_log_fmt;
60627+extern char *gr_audit_log_fmt;
60628+extern char *gr_alert_log_buf;
60629+extern char *gr_audit_log_buf;
60630+
60631+static int gr_log_start(int audit)
60632+{
60633+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
60634+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
60635+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
60636+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
60637+ unsigned long curr_secs = get_seconds();
60638+
60639+ if (audit == GR_DO_AUDIT)
60640+ goto set_fmt;
60641+
60642+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
60643+ grsec_alert_wtime = curr_secs;
60644+ grsec_alert_fyet = 0;
60645+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
60646+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
60647+ grsec_alert_fyet++;
60648+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
60649+ grsec_alert_wtime = curr_secs;
60650+ grsec_alert_fyet++;
60651+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
60652+ return FLOODING;
60653+ }
60654+ else return FLOODING;
60655+
60656+set_fmt:
60657+#endif
60658+ memset(buf, 0, PAGE_SIZE);
60659+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
60660+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
60661+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
60662+ } else if (current->signal->curr_ip) {
60663+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
60664+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
60665+ } else if (gr_acl_is_enabled()) {
60666+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
60667+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
60668+ } else {
60669+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
60670+ strcpy(buf, fmt);
60671+ }
60672+
60673+ return NO_FLOODING;
60674+}
60675+
60676+static void gr_log_middle(int audit, const char *msg, va_list ap)
60677+ __attribute__ ((format (printf, 2, 0)));
60678+
60679+static void gr_log_middle(int audit, const char *msg, va_list ap)
60680+{
60681+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
60682+ unsigned int len = strlen(buf);
60683+
60684+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
60685+
60686+ return;
60687+}
60688+
60689+static void gr_log_middle_varargs(int audit, const char *msg, ...)
60690+ __attribute__ ((format (printf, 2, 3)));
60691+
60692+static void gr_log_middle_varargs(int audit, const char *msg, ...)
60693+{
60694+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
60695+ unsigned int len = strlen(buf);
60696+ va_list ap;
60697+
60698+ va_start(ap, msg);
60699+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
60700+ va_end(ap);
60701+
60702+ return;
60703+}
60704+
60705+static void gr_log_end(int audit, int append_default)
60706+{
60707+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
60708+ if (append_default) {
60709+ struct task_struct *task = current;
60710+ struct task_struct *parent = task->real_parent;
60711+ const struct cred *cred = __task_cred(task);
60712+ const struct cred *pcred = __task_cred(parent);
60713+ unsigned int len = strlen(buf);
60714+
60715+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
60716+ }
60717+
60718+ printk("%s\n", buf);
60719+
60720+ return;
60721+}
60722+
60723+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
60724+{
60725+ int logtype;
60726+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
60727+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
60728+ void *voidptr = NULL;
60729+ int num1 = 0, num2 = 0;
60730+ unsigned long ulong1 = 0, ulong2 = 0;
60731+ struct dentry *dentry = NULL;
60732+ struct vfsmount *mnt = NULL;
60733+ struct file *file = NULL;
60734+ struct task_struct *task = NULL;
60735+ const struct cred *cred, *pcred;
60736+ va_list ap;
60737+
60738+ BEGIN_LOCKS(audit);
60739+ logtype = gr_log_start(audit);
60740+ if (logtype == FLOODING) {
60741+ END_LOCKS(audit);
60742+ return;
60743+ }
60744+ va_start(ap, argtypes);
60745+ switch (argtypes) {
60746+ case GR_TTYSNIFF:
60747+ task = va_arg(ap, struct task_struct *);
60748+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task_pid_nr(task), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent));
60749+ break;
60750+ case GR_SYSCTL_HIDDEN:
60751+ str1 = va_arg(ap, char *);
60752+ gr_log_middle_varargs(audit, msg, result, str1);
60753+ break;
60754+ case GR_RBAC:
60755+ dentry = va_arg(ap, struct dentry *);
60756+ mnt = va_arg(ap, struct vfsmount *);
60757+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
60758+ break;
60759+ case GR_RBAC_STR:
60760+ dentry = va_arg(ap, struct dentry *);
60761+ mnt = va_arg(ap, struct vfsmount *);
60762+ str1 = va_arg(ap, char *);
60763+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
60764+ break;
60765+ case GR_STR_RBAC:
60766+ str1 = va_arg(ap, char *);
60767+ dentry = va_arg(ap, struct dentry *);
60768+ mnt = va_arg(ap, struct vfsmount *);
60769+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
60770+ break;
60771+ case GR_RBAC_MODE2:
60772+ dentry = va_arg(ap, struct dentry *);
60773+ mnt = va_arg(ap, struct vfsmount *);
60774+ str1 = va_arg(ap, char *);
60775+ str2 = va_arg(ap, char *);
60776+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
60777+ break;
60778+ case GR_RBAC_MODE3:
60779+ dentry = va_arg(ap, struct dentry *);
60780+ mnt = va_arg(ap, struct vfsmount *);
60781+ str1 = va_arg(ap, char *);
60782+ str2 = va_arg(ap, char *);
60783+ str3 = va_arg(ap, char *);
60784+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
60785+ break;
60786+ case GR_FILENAME:
60787+ dentry = va_arg(ap, struct dentry *);
60788+ mnt = va_arg(ap, struct vfsmount *);
60789+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
60790+ break;
60791+ case GR_STR_FILENAME:
60792+ str1 = va_arg(ap, char *);
60793+ dentry = va_arg(ap, struct dentry *);
60794+ mnt = va_arg(ap, struct vfsmount *);
60795+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
60796+ break;
60797+ case GR_FILENAME_STR:
60798+ dentry = va_arg(ap, struct dentry *);
60799+ mnt = va_arg(ap, struct vfsmount *);
60800+ str1 = va_arg(ap, char *);
60801+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
60802+ break;
60803+ case GR_FILENAME_TWO_INT:
60804+ dentry = va_arg(ap, struct dentry *);
60805+ mnt = va_arg(ap, struct vfsmount *);
60806+ num1 = va_arg(ap, int);
60807+ num2 = va_arg(ap, int);
60808+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
60809+ break;
60810+ case GR_FILENAME_TWO_INT_STR:
60811+ dentry = va_arg(ap, struct dentry *);
60812+ mnt = va_arg(ap, struct vfsmount *);
60813+ num1 = va_arg(ap, int);
60814+ num2 = va_arg(ap, int);
60815+ str1 = va_arg(ap, char *);
60816+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
60817+ break;
60818+ case GR_TEXTREL:
60819+ file = va_arg(ap, struct file *);
60820+ ulong1 = va_arg(ap, unsigned long);
60821+ ulong2 = va_arg(ap, unsigned long);
60822+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
60823+ break;
60824+ case GR_PTRACE:
60825+ task = va_arg(ap, struct task_struct *);
60826+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task_pid_nr(task));
60827+ break;
60828+ case GR_RESOURCE:
60829+ task = va_arg(ap, struct task_struct *);
60830+ cred = __task_cred(task);
60831+ pcred = __task_cred(task->real_parent);
60832+ ulong1 = va_arg(ap, unsigned long);
60833+ str1 = va_arg(ap, char *);
60834+ ulong2 = va_arg(ap, unsigned long);
60835+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
60836+ break;
60837+ case GR_CAP:
60838+ task = va_arg(ap, struct task_struct *);
60839+ cred = __task_cred(task);
60840+ pcred = __task_cred(task->real_parent);
60841+ str1 = va_arg(ap, char *);
60842+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
60843+ break;
60844+ case GR_SIG:
60845+ str1 = va_arg(ap, char *);
60846+ voidptr = va_arg(ap, void *);
60847+ gr_log_middle_varargs(audit, msg, str1, voidptr);
60848+ break;
60849+ case GR_SIG2:
60850+ task = va_arg(ap, struct task_struct *);
60851+ cred = __task_cred(task);
60852+ pcred = __task_cred(task->real_parent);
60853+ num1 = va_arg(ap, int);
60854+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
60855+ break;
60856+ case GR_CRASH1:
60857+ task = va_arg(ap, struct task_struct *);
60858+ cred = __task_cred(task);
60859+ pcred = __task_cred(task->real_parent);
60860+ ulong1 = va_arg(ap, unsigned long);
60861+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), GR_GLOBAL_UID(cred->uid), ulong1);
60862+ break;
60863+ case GR_CRASH2:
60864+ task = va_arg(ap, struct task_struct *);
60865+ cred = __task_cred(task);
60866+ pcred = __task_cred(task->real_parent);
60867+ ulong1 = va_arg(ap, unsigned long);
60868+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), ulong1);
60869+ break;
60870+ case GR_RWXMAP:
60871+ file = va_arg(ap, struct file *);
60872+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
60873+ break;
60874+ case GR_PSACCT:
60875+ {
60876+ unsigned int wday, cday;
60877+ __u8 whr, chr;
60878+ __u8 wmin, cmin;
60879+ __u8 wsec, csec;
60880+ char cur_tty[64] = { 0 };
60881+ char parent_tty[64] = { 0 };
60882+
60883+ task = va_arg(ap, struct task_struct *);
60884+ wday = va_arg(ap, unsigned int);
60885+ cday = va_arg(ap, unsigned int);
60886+ whr = va_arg(ap, int);
60887+ chr = va_arg(ap, int);
60888+ wmin = va_arg(ap, int);
60889+ cmin = va_arg(ap, int);
60890+ wsec = va_arg(ap, int);
60891+ csec = va_arg(ap, int);
60892+ ulong1 = va_arg(ap, unsigned long);
60893+ cred = __task_cred(task);
60894+ pcred = __task_cred(task->real_parent);
60895+
60896+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
60897+ }
60898+ break;
60899+ default:
60900+ gr_log_middle(audit, msg, ap);
60901+ }
60902+ va_end(ap);
60903+ // these don't need DEFAULTSECARGS printed on the end
60904+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
60905+ gr_log_end(audit, 0);
60906+ else
60907+ gr_log_end(audit, 1);
60908+ END_LOCKS(audit);
60909+}
60910diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
60911new file mode 100644
60912index 0000000..f536303
60913--- /dev/null
60914+++ b/grsecurity/grsec_mem.c
60915@@ -0,0 +1,40 @@
60916+#include <linux/kernel.h>
60917+#include <linux/sched.h>
60918+#include <linux/mm.h>
60919+#include <linux/mman.h>
60920+#include <linux/grinternal.h>
60921+
60922+void
60923+gr_handle_ioperm(void)
60924+{
60925+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
60926+ return;
60927+}
60928+
60929+void
60930+gr_handle_iopl(void)
60931+{
60932+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
60933+ return;
60934+}
60935+
60936+void
60937+gr_handle_mem_readwrite(u64 from, u64 to)
60938+{
60939+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
60940+ return;
60941+}
60942+
60943+void
60944+gr_handle_vm86(void)
60945+{
60946+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
60947+ return;
60948+}
60949+
60950+void
60951+gr_log_badprocpid(const char *entry)
60952+{
60953+ gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
60954+ return;
60955+}
60956diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
60957new file mode 100644
60958index 0000000..2131422
60959--- /dev/null
60960+++ b/grsecurity/grsec_mount.c
60961@@ -0,0 +1,62 @@
60962+#include <linux/kernel.h>
60963+#include <linux/sched.h>
60964+#include <linux/mount.h>
60965+#include <linux/grsecurity.h>
60966+#include <linux/grinternal.h>
60967+
60968+void
60969+gr_log_remount(const char *devname, const int retval)
60970+{
60971+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
60972+ if (grsec_enable_mount && (retval >= 0))
60973+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
60974+#endif
60975+ return;
60976+}
60977+
60978+void
60979+gr_log_unmount(const char *devname, const int retval)
60980+{
60981+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
60982+ if (grsec_enable_mount && (retval >= 0))
60983+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
60984+#endif
60985+ return;
60986+}
60987+
60988+void
60989+gr_log_mount(const char *from, const char *to, const int retval)
60990+{
60991+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
60992+ if (grsec_enable_mount && (retval >= 0))
60993+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
60994+#endif
60995+ return;
60996+}
60997+
60998+int
60999+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
61000+{
61001+#ifdef CONFIG_GRKERNSEC_ROFS
61002+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
61003+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
61004+ return -EPERM;
61005+ } else
61006+ return 0;
61007+#endif
61008+ return 0;
61009+}
61010+
61011+int
61012+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
61013+{
61014+#ifdef CONFIG_GRKERNSEC_ROFS
61015+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
61016+ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
61017+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
61018+ return -EPERM;
61019+ } else
61020+ return 0;
61021+#endif
61022+ return 0;
61023+}
61024diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
61025new file mode 100644
61026index 0000000..a3b12a0
61027--- /dev/null
61028+++ b/grsecurity/grsec_pax.c
61029@@ -0,0 +1,36 @@
61030+#include <linux/kernel.h>
61031+#include <linux/sched.h>
61032+#include <linux/mm.h>
61033+#include <linux/file.h>
61034+#include <linux/grinternal.h>
61035+#include <linux/grsecurity.h>
61036+
61037+void
61038+gr_log_textrel(struct vm_area_struct * vma)
61039+{
61040+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
61041+ if (grsec_enable_audit_textrel)
61042+ gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
61043+#endif
61044+ return;
61045+}
61046+
61047+void
61048+gr_log_rwxmmap(struct file *file)
61049+{
61050+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
61051+ if (grsec_enable_log_rwxmaps)
61052+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
61053+#endif
61054+ return;
61055+}
61056+
61057+void
61058+gr_log_rwxmprotect(struct file *file)
61059+{
61060+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
61061+ if (grsec_enable_log_rwxmaps)
61062+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
61063+#endif
61064+ return;
61065+}
61066diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
61067new file mode 100644
61068index 0000000..f7f29aa
61069--- /dev/null
61070+++ b/grsecurity/grsec_ptrace.c
61071@@ -0,0 +1,30 @@
61072+#include <linux/kernel.h>
61073+#include <linux/sched.h>
61074+#include <linux/grinternal.h>
61075+#include <linux/security.h>
61076+
61077+void
61078+gr_audit_ptrace(struct task_struct *task)
61079+{
61080+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
61081+ if (grsec_enable_audit_ptrace)
61082+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
61083+#endif
61084+ return;
61085+}
61086+
61087+int
61088+gr_ptrace_readexec(struct file *file, int unsafe_flags)
61089+{
61090+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
61091+ const struct dentry *dentry = file->f_path.dentry;
61092+ const struct vfsmount *mnt = file->f_path.mnt;
61093+
61094+ if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
61095+ (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
61096+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
61097+ return -EACCES;
61098+ }
61099+#endif
61100+ return 0;
61101+}
61102diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
61103new file mode 100644
61104index 0000000..5c00416
61105--- /dev/null
61106+++ b/grsecurity/grsec_sig.c
61107@@ -0,0 +1,222 @@
61108+#include <linux/kernel.h>
61109+#include <linux/sched.h>
61110+#include <linux/delay.h>
61111+#include <linux/grsecurity.h>
61112+#include <linux/grinternal.h>
61113+#include <linux/hardirq.h>
61114+
61115+char *signames[] = {
61116+ [SIGSEGV] = "Segmentation fault",
61117+ [SIGILL] = "Illegal instruction",
61118+ [SIGABRT] = "Abort",
61119+ [SIGBUS] = "Invalid alignment/Bus error"
61120+};
61121+
61122+void
61123+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
61124+{
61125+#ifdef CONFIG_GRKERNSEC_SIGNAL
61126+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
61127+ (sig == SIGABRT) || (sig == SIGBUS))) {
61128+ if (t->pid == current->pid) {
61129+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
61130+ } else {
61131+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
61132+ }
61133+ }
61134+#endif
61135+ return;
61136+}
61137+
61138+int
61139+gr_handle_signal(const struct task_struct *p, const int sig)
61140+{
61141+#ifdef CONFIG_GRKERNSEC
61142+ /* ignore the 0 signal for protected task checks */
61143+ if (current->pid > 1 && sig && gr_check_protected_task(p)) {
61144+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
61145+ return -EPERM;
61146+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
61147+ return -EPERM;
61148+ }
61149+#endif
61150+ return 0;
61151+}
61152+
61153+#ifdef CONFIG_GRKERNSEC
61154+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
61155+
61156+int gr_fake_force_sig(int sig, struct task_struct *t)
61157+{
61158+ unsigned long int flags;
61159+ int ret, blocked, ignored;
61160+ struct k_sigaction *action;
61161+
61162+ spin_lock_irqsave(&t->sighand->siglock, flags);
61163+ action = &t->sighand->action[sig-1];
61164+ ignored = action->sa.sa_handler == SIG_IGN;
61165+ blocked = sigismember(&t->blocked, sig);
61166+ if (blocked || ignored) {
61167+ action->sa.sa_handler = SIG_DFL;
61168+ if (blocked) {
61169+ sigdelset(&t->blocked, sig);
61170+ recalc_sigpending_and_wake(t);
61171+ }
61172+ }
61173+ if (action->sa.sa_handler == SIG_DFL)
61174+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
61175+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
61176+
61177+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
61178+
61179+ return ret;
61180+}
61181+#endif
61182+
61183+#ifdef CONFIG_GRKERNSEC_BRUTE
61184+#define GR_USER_BAN_TIME (15 * 60)
61185+#define GR_DAEMON_BRUTE_TIME (30 * 60)
61186+
61187+static int __get_dumpable(unsigned long mm_flags)
61188+{
61189+ int ret;
61190+
61191+ ret = mm_flags & MMF_DUMPABLE_MASK;
61192+ return (ret >= 2) ? 2 : ret;
61193+}
61194+#endif
61195+
61196+void gr_handle_brute_attach(unsigned long mm_flags)
61197+{
61198+#ifdef CONFIG_GRKERNSEC_BRUTE
61199+ struct task_struct *p = current;
61200+ kuid_t uid = GLOBAL_ROOT_UID;
61201+ int daemon = 0;
61202+
61203+ if (!grsec_enable_brute)
61204+ return;
61205+
61206+ rcu_read_lock();
61207+ read_lock(&tasklist_lock);
61208+ read_lock(&grsec_exec_file_lock);
61209+ if (p->real_parent && p->real_parent->exec_file == p->exec_file) {
61210+ p->real_parent->brute_expires = get_seconds() + GR_DAEMON_BRUTE_TIME;
61211+ p->real_parent->brute = 1;
61212+ daemon = 1;
61213+ } else {
61214+ const struct cred *cred = __task_cred(p), *cred2;
61215+ struct task_struct *tsk, *tsk2;
61216+
61217+ if (!__get_dumpable(mm_flags) && !uid_eq(cred->uid, GLOBAL_ROOT_UID)) {
61218+ struct user_struct *user;
61219+
61220+ uid = cred->uid;
61221+
61222+ /* this is put upon execution past expiration */
61223+ user = find_user(uid);
61224+ if (user == NULL)
61225+ goto unlock;
61226+ user->banned = 1;
61227+ user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
61228+ if (user->ban_expires == ~0UL)
61229+ user->ban_expires--;
61230+
61231+ do_each_thread(tsk2, tsk) {
61232+ cred2 = __task_cred(tsk);
61233+ if (tsk != p && uid_eq(cred2->uid, uid))
61234+ gr_fake_force_sig(SIGKILL, tsk);
61235+ } while_each_thread(tsk2, tsk);
61236+ }
61237+ }
61238+unlock:
61239+ read_unlock(&grsec_exec_file_lock);
61240+ read_unlock(&tasklist_lock);
61241+ rcu_read_unlock();
61242+
61243+ if (!uid_eq(uid, GLOBAL_ROOT_UID))
61244+ printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n",
61245+ from_kuid_munged(&init_user_ns, uid), GR_USER_BAN_TIME / 60);
61246+ else if (daemon)
61247+ gr_log_noargs(GR_DONT_AUDIT, GR_BRUTE_DAEMON_MSG);
61248+
61249+#endif
61250+ return;
61251+}
61252+
61253+void gr_handle_brute_check(void)
61254+{
61255+#ifdef CONFIG_GRKERNSEC_BRUTE
61256+ struct task_struct *p = current;
61257+
61258+ if (unlikely(p->brute)) {
61259+ if (!grsec_enable_brute)
61260+ p->brute = 0;
61261+ else if (time_before(get_seconds(), p->brute_expires))
61262+ msleep(30 * 1000);
61263+ }
61264+#endif
61265+ return;
61266+}
61267+
61268+void gr_handle_kernel_exploit(void)
61269+{
61270+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
61271+ const struct cred *cred;
61272+ struct task_struct *tsk, *tsk2;
61273+ struct user_struct *user;
61274+ kuid_t uid;
61275+
61276+ if (in_irq() || in_serving_softirq() || in_nmi())
61277+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
61278+
61279+ uid = current_uid();
61280+
61281+ if (uid_eq(uid, GLOBAL_ROOT_UID))
61282+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
61283+ else {
61284+ /* kill all the processes of this user, hold a reference
61285+ to their creds struct, and prevent them from creating
61286+ another process until system reset
61287+ */
61288+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n",
61289+ from_kuid_munged(&init_user_ns, uid));
61290+ /* we intentionally leak this ref */
61291+ user = get_uid(current->cred->user);
61292+ if (user) {
61293+ user->banned = 1;
61294+ user->ban_expires = ~0UL;
61295+ }
61296+
61297+ read_lock(&tasklist_lock);
61298+ do_each_thread(tsk2, tsk) {
61299+ cred = __task_cred(tsk);
61300+ if (uid_eq(cred->uid, uid))
61301+ gr_fake_force_sig(SIGKILL, tsk);
61302+ } while_each_thread(tsk2, tsk);
61303+ read_unlock(&tasklist_lock);
61304+ }
61305+#endif
61306+}
61307+
61308+int __gr_process_user_ban(struct user_struct *user)
61309+{
61310+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
61311+ if (unlikely(user->banned)) {
61312+ if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
61313+ user->banned = 0;
61314+ user->ban_expires = 0;
61315+ free_uid(user);
61316+ } else
61317+ return -EPERM;
61318+ }
61319+#endif
61320+ return 0;
61321+}
61322+
61323+int gr_process_user_ban(void)
61324+{
61325+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
61326+ return __gr_process_user_ban(current->cred->user);
61327+#endif
61328+ return 0;
61329+}
61330diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
61331new file mode 100644
61332index 0000000..4030d57
61333--- /dev/null
61334+++ b/grsecurity/grsec_sock.c
61335@@ -0,0 +1,244 @@
61336+#include <linux/kernel.h>
61337+#include <linux/module.h>
61338+#include <linux/sched.h>
61339+#include <linux/file.h>
61340+#include <linux/net.h>
61341+#include <linux/in.h>
61342+#include <linux/ip.h>
61343+#include <net/sock.h>
61344+#include <net/inet_sock.h>
61345+#include <linux/grsecurity.h>
61346+#include <linux/grinternal.h>
61347+#include <linux/gracl.h>
61348+
61349+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
61350+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
61351+
61352+EXPORT_SYMBOL(gr_search_udp_recvmsg);
61353+EXPORT_SYMBOL(gr_search_udp_sendmsg);
61354+
61355+#ifdef CONFIG_UNIX_MODULE
61356+EXPORT_SYMBOL(gr_acl_handle_unix);
61357+EXPORT_SYMBOL(gr_acl_handle_mknod);
61358+EXPORT_SYMBOL(gr_handle_chroot_unix);
61359+EXPORT_SYMBOL(gr_handle_create);
61360+#endif
61361+
61362+#ifdef CONFIG_GRKERNSEC
61363+#define gr_conn_table_size 32749
61364+struct conn_table_entry {
61365+ struct conn_table_entry *next;
61366+ struct signal_struct *sig;
61367+};
61368+
61369+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
61370+DEFINE_SPINLOCK(gr_conn_table_lock);
61371+
61372+extern const char * gr_socktype_to_name(unsigned char type);
61373+extern const char * gr_proto_to_name(unsigned char proto);
61374+extern const char * gr_sockfamily_to_name(unsigned char family);
61375+
61376+static __inline__ int
61377+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
61378+{
61379+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
61380+}
61381+
61382+static __inline__ int
61383+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
61384+ __u16 sport, __u16 dport)
61385+{
61386+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
61387+ sig->gr_sport == sport && sig->gr_dport == dport))
61388+ return 1;
61389+ else
61390+ return 0;
61391+}
61392+
61393+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
61394+{
61395+ struct conn_table_entry **match;
61396+ unsigned int index;
61397+
61398+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
61399+ sig->gr_sport, sig->gr_dport,
61400+ gr_conn_table_size);
61401+
61402+ newent->sig = sig;
61403+
61404+ match = &gr_conn_table[index];
61405+ newent->next = *match;
61406+ *match = newent;
61407+
61408+ return;
61409+}
61410+
61411+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
61412+{
61413+ struct conn_table_entry *match, *last = NULL;
61414+ unsigned int index;
61415+
61416+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
61417+ sig->gr_sport, sig->gr_dport,
61418+ gr_conn_table_size);
61419+
61420+ match = gr_conn_table[index];
61421+ while (match && !conn_match(match->sig,
61422+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
61423+ sig->gr_dport)) {
61424+ last = match;
61425+ match = match->next;
61426+ }
61427+
61428+ if (match) {
61429+ if (last)
61430+ last->next = match->next;
61431+ else
61432+ gr_conn_table[index] = NULL;
61433+ kfree(match);
61434+ }
61435+
61436+ return;
61437+}
61438+
61439+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
61440+ __u16 sport, __u16 dport)
61441+{
61442+ struct conn_table_entry *match;
61443+ unsigned int index;
61444+
61445+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
61446+
61447+ match = gr_conn_table[index];
61448+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
61449+ match = match->next;
61450+
61451+ if (match)
61452+ return match->sig;
61453+ else
61454+ return NULL;
61455+}
61456+
61457+#endif
61458+
61459+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
61460+{
61461+#ifdef CONFIG_GRKERNSEC
61462+ struct signal_struct *sig = task->signal;
61463+ struct conn_table_entry *newent;
61464+
61465+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
61466+ if (newent == NULL)
61467+ return;
61468+ /* no bh lock needed since we are called with bh disabled */
61469+ spin_lock(&gr_conn_table_lock);
61470+ gr_del_task_from_ip_table_nolock(sig);
61471+ sig->gr_saddr = inet->inet_rcv_saddr;
61472+ sig->gr_daddr = inet->inet_daddr;
61473+ sig->gr_sport = inet->inet_sport;
61474+ sig->gr_dport = inet->inet_dport;
61475+ gr_add_to_task_ip_table_nolock(sig, newent);
61476+ spin_unlock(&gr_conn_table_lock);
61477+#endif
61478+ return;
61479+}
61480+
61481+void gr_del_task_from_ip_table(struct task_struct *task)
61482+{
61483+#ifdef CONFIG_GRKERNSEC
61484+ spin_lock_bh(&gr_conn_table_lock);
61485+ gr_del_task_from_ip_table_nolock(task->signal);
61486+ spin_unlock_bh(&gr_conn_table_lock);
61487+#endif
61488+ return;
61489+}
61490+
61491+void
61492+gr_attach_curr_ip(const struct sock *sk)
61493+{
61494+#ifdef CONFIG_GRKERNSEC
61495+ struct signal_struct *p, *set;
61496+ const struct inet_sock *inet = inet_sk(sk);
61497+
61498+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
61499+ return;
61500+
61501+ set = current->signal;
61502+
61503+ spin_lock_bh(&gr_conn_table_lock);
61504+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
61505+ inet->inet_dport, inet->inet_sport);
61506+ if (unlikely(p != NULL)) {
61507+ set->curr_ip = p->curr_ip;
61508+ set->used_accept = 1;
61509+ gr_del_task_from_ip_table_nolock(p);
61510+ spin_unlock_bh(&gr_conn_table_lock);
61511+ return;
61512+ }
61513+ spin_unlock_bh(&gr_conn_table_lock);
61514+
61515+ set->curr_ip = inet->inet_daddr;
61516+ set->used_accept = 1;
61517+#endif
61518+ return;
61519+}
61520+
61521+int
61522+gr_handle_sock_all(const int family, const int type, const int protocol)
61523+{
61524+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
61525+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
61526+ (family != AF_UNIX)) {
61527+ if (family == AF_INET)
61528+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
61529+ else
61530+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
61531+ return -EACCES;
61532+ }
61533+#endif
61534+ return 0;
61535+}
61536+
61537+int
61538+gr_handle_sock_server(const struct sockaddr *sck)
61539+{
61540+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
61541+ if (grsec_enable_socket_server &&
61542+ in_group_p(grsec_socket_server_gid) &&
61543+ sck && (sck->sa_family != AF_UNIX) &&
61544+ (sck->sa_family != AF_LOCAL)) {
61545+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
61546+ return -EACCES;
61547+ }
61548+#endif
61549+ return 0;
61550+}
61551+
61552+int
61553+gr_handle_sock_server_other(const struct sock *sck)
61554+{
61555+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
61556+ if (grsec_enable_socket_server &&
61557+ in_group_p(grsec_socket_server_gid) &&
61558+ sck && (sck->sk_family != AF_UNIX) &&
61559+ (sck->sk_family != AF_LOCAL)) {
61560+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
61561+ return -EACCES;
61562+ }
61563+#endif
61564+ return 0;
61565+}
61566+
61567+int
61568+gr_handle_sock_client(const struct sockaddr *sck)
61569+{
61570+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
61571+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
61572+ sck && (sck->sa_family != AF_UNIX) &&
61573+ (sck->sa_family != AF_LOCAL)) {
61574+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
61575+ return -EACCES;
61576+ }
61577+#endif
61578+ return 0;
61579+}
61580diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
61581new file mode 100644
61582index 0000000..f55ef0f
61583--- /dev/null
61584+++ b/grsecurity/grsec_sysctl.c
61585@@ -0,0 +1,469 @@
61586+#include <linux/kernel.h>
61587+#include <linux/sched.h>
61588+#include <linux/sysctl.h>
61589+#include <linux/grsecurity.h>
61590+#include <linux/grinternal.h>
61591+
61592+int
61593+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
61594+{
61595+#ifdef CONFIG_GRKERNSEC_SYSCTL
61596+ if (dirname == NULL || name == NULL)
61597+ return 0;
61598+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
61599+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
61600+ return -EACCES;
61601+ }
61602+#endif
61603+ return 0;
61604+}
61605+
61606+#ifdef CONFIG_GRKERNSEC_ROFS
61607+static int __maybe_unused one = 1;
61608+#endif
61609+
61610+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
61611+struct ctl_table grsecurity_table[] = {
61612+#ifdef CONFIG_GRKERNSEC_SYSCTL
61613+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
61614+#ifdef CONFIG_GRKERNSEC_IO
61615+ {
61616+ .procname = "disable_priv_io",
61617+ .data = &grsec_disable_privio,
61618+ .maxlen = sizeof(int),
61619+ .mode = 0600,
61620+ .proc_handler = &proc_dointvec,
61621+ },
61622+#endif
61623+#endif
61624+#ifdef CONFIG_GRKERNSEC_LINK
61625+ {
61626+ .procname = "linking_restrictions",
61627+ .data = &grsec_enable_link,
61628+ .maxlen = sizeof(int),
61629+ .mode = 0600,
61630+ .proc_handler = &proc_dointvec,
61631+ },
61632+#endif
61633+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
61634+ {
61635+ .procname = "enforce_symlinksifowner",
61636+ .data = &grsec_enable_symlinkown,
61637+ .maxlen = sizeof(int),
61638+ .mode = 0600,
61639+ .proc_handler = &proc_dointvec,
61640+ },
61641+ {
61642+ .procname = "symlinkown_gid",
61643+ .data = &grsec_symlinkown_gid,
61644+ .maxlen = sizeof(int),
61645+ .mode = 0600,
61646+ .proc_handler = &proc_dointvec,
61647+ },
61648+#endif
61649+#ifdef CONFIG_GRKERNSEC_BRUTE
61650+ {
61651+ .procname = "deter_bruteforce",
61652+ .data = &grsec_enable_brute,
61653+ .maxlen = sizeof(int),
61654+ .mode = 0600,
61655+ .proc_handler = &proc_dointvec,
61656+ },
61657+#endif
61658+#ifdef CONFIG_GRKERNSEC_FIFO
61659+ {
61660+ .procname = "fifo_restrictions",
61661+ .data = &grsec_enable_fifo,
61662+ .maxlen = sizeof(int),
61663+ .mode = 0600,
61664+ .proc_handler = &proc_dointvec,
61665+ },
61666+#endif
61667+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
61668+ {
61669+ .procname = "ptrace_readexec",
61670+ .data = &grsec_enable_ptrace_readexec,
61671+ .maxlen = sizeof(int),
61672+ .mode = 0600,
61673+ .proc_handler = &proc_dointvec,
61674+ },
61675+#endif
61676+#ifdef CONFIG_GRKERNSEC_SETXID
61677+ {
61678+ .procname = "consistent_setxid",
61679+ .data = &grsec_enable_setxid,
61680+ .maxlen = sizeof(int),
61681+ .mode = 0600,
61682+ .proc_handler = &proc_dointvec,
61683+ },
61684+#endif
61685+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
61686+ {
61687+ .procname = "ip_blackhole",
61688+ .data = &grsec_enable_blackhole,
61689+ .maxlen = sizeof(int),
61690+ .mode = 0600,
61691+ .proc_handler = &proc_dointvec,
61692+ },
61693+ {
61694+ .procname = "lastack_retries",
61695+ .data = &grsec_lastack_retries,
61696+ .maxlen = sizeof(int),
61697+ .mode = 0600,
61698+ .proc_handler = &proc_dointvec,
61699+ },
61700+#endif
61701+#ifdef CONFIG_GRKERNSEC_EXECLOG
61702+ {
61703+ .procname = "exec_logging",
61704+ .data = &grsec_enable_execlog,
61705+ .maxlen = sizeof(int),
61706+ .mode = 0600,
61707+ .proc_handler = &proc_dointvec,
61708+ },
61709+#endif
61710+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
61711+ {
61712+ .procname = "rwxmap_logging",
61713+ .data = &grsec_enable_log_rwxmaps,
61714+ .maxlen = sizeof(int),
61715+ .mode = 0600,
61716+ .proc_handler = &proc_dointvec,
61717+ },
61718+#endif
61719+#ifdef CONFIG_GRKERNSEC_SIGNAL
61720+ {
61721+ .procname = "signal_logging",
61722+ .data = &grsec_enable_signal,
61723+ .maxlen = sizeof(int),
61724+ .mode = 0600,
61725+ .proc_handler = &proc_dointvec,
61726+ },
61727+#endif
61728+#ifdef CONFIG_GRKERNSEC_FORKFAIL
61729+ {
61730+ .procname = "forkfail_logging",
61731+ .data = &grsec_enable_forkfail,
61732+ .maxlen = sizeof(int),
61733+ .mode = 0600,
61734+ .proc_handler = &proc_dointvec,
61735+ },
61736+#endif
61737+#ifdef CONFIG_GRKERNSEC_TIME
61738+ {
61739+ .procname = "timechange_logging",
61740+ .data = &grsec_enable_time,
61741+ .maxlen = sizeof(int),
61742+ .mode = 0600,
61743+ .proc_handler = &proc_dointvec,
61744+ },
61745+#endif
61746+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
61747+ {
61748+ .procname = "chroot_deny_shmat",
61749+ .data = &grsec_enable_chroot_shmat,
61750+ .maxlen = sizeof(int),
61751+ .mode = 0600,
61752+ .proc_handler = &proc_dointvec,
61753+ },
61754+#endif
61755+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
61756+ {
61757+ .procname = "chroot_deny_unix",
61758+ .data = &grsec_enable_chroot_unix,
61759+ .maxlen = sizeof(int),
61760+ .mode = 0600,
61761+ .proc_handler = &proc_dointvec,
61762+ },
61763+#endif
61764+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
61765+ {
61766+ .procname = "chroot_deny_mount",
61767+ .data = &grsec_enable_chroot_mount,
61768+ .maxlen = sizeof(int),
61769+ .mode = 0600,
61770+ .proc_handler = &proc_dointvec,
61771+ },
61772+#endif
61773+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
61774+ {
61775+ .procname = "chroot_deny_fchdir",
61776+ .data = &grsec_enable_chroot_fchdir,
61777+ .maxlen = sizeof(int),
61778+ .mode = 0600,
61779+ .proc_handler = &proc_dointvec,
61780+ },
61781+#endif
61782+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
61783+ {
61784+ .procname = "chroot_deny_chroot",
61785+ .data = &grsec_enable_chroot_double,
61786+ .maxlen = sizeof(int),
61787+ .mode = 0600,
61788+ .proc_handler = &proc_dointvec,
61789+ },
61790+#endif
61791+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
61792+ {
61793+ .procname = "chroot_deny_pivot",
61794+ .data = &grsec_enable_chroot_pivot,
61795+ .maxlen = sizeof(int),
61796+ .mode = 0600,
61797+ .proc_handler = &proc_dointvec,
61798+ },
61799+#endif
61800+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
61801+ {
61802+ .procname = "chroot_enforce_chdir",
61803+ .data = &grsec_enable_chroot_chdir,
61804+ .maxlen = sizeof(int),
61805+ .mode = 0600,
61806+ .proc_handler = &proc_dointvec,
61807+ },
61808+#endif
61809+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
61810+ {
61811+ .procname = "chroot_deny_chmod",
61812+ .data = &grsec_enable_chroot_chmod,
61813+ .maxlen = sizeof(int),
61814+ .mode = 0600,
61815+ .proc_handler = &proc_dointvec,
61816+ },
61817+#endif
61818+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
61819+ {
61820+ .procname = "chroot_deny_mknod",
61821+ .data = &grsec_enable_chroot_mknod,
61822+ .maxlen = sizeof(int),
61823+ .mode = 0600,
61824+ .proc_handler = &proc_dointvec,
61825+ },
61826+#endif
61827+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
61828+ {
61829+ .procname = "chroot_restrict_nice",
61830+ .data = &grsec_enable_chroot_nice,
61831+ .maxlen = sizeof(int),
61832+ .mode = 0600,
61833+ .proc_handler = &proc_dointvec,
61834+ },
61835+#endif
61836+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
61837+ {
61838+ .procname = "chroot_execlog",
61839+ .data = &grsec_enable_chroot_execlog,
61840+ .maxlen = sizeof(int),
61841+ .mode = 0600,
61842+ .proc_handler = &proc_dointvec,
61843+ },
61844+#endif
61845+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
61846+ {
61847+ .procname = "chroot_caps",
61848+ .data = &grsec_enable_chroot_caps,
61849+ .maxlen = sizeof(int),
61850+ .mode = 0600,
61851+ .proc_handler = &proc_dointvec,
61852+ },
61853+#endif
61854+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
61855+ {
61856+ .procname = "chroot_deny_sysctl",
61857+ .data = &grsec_enable_chroot_sysctl,
61858+ .maxlen = sizeof(int),
61859+ .mode = 0600,
61860+ .proc_handler = &proc_dointvec,
61861+ },
61862+#endif
61863+#ifdef CONFIG_GRKERNSEC_TPE
61864+ {
61865+ .procname = "tpe",
61866+ .data = &grsec_enable_tpe,
61867+ .maxlen = sizeof(int),
61868+ .mode = 0600,
61869+ .proc_handler = &proc_dointvec,
61870+ },
61871+ {
61872+ .procname = "tpe_gid",
61873+ .data = &grsec_tpe_gid,
61874+ .maxlen = sizeof(int),
61875+ .mode = 0600,
61876+ .proc_handler = &proc_dointvec,
61877+ },
61878+#endif
61879+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
61880+ {
61881+ .procname = "tpe_invert",
61882+ .data = &grsec_enable_tpe_invert,
61883+ .maxlen = sizeof(int),
61884+ .mode = 0600,
61885+ .proc_handler = &proc_dointvec,
61886+ },
61887+#endif
61888+#ifdef CONFIG_GRKERNSEC_TPE_ALL
61889+ {
61890+ .procname = "tpe_restrict_all",
61891+ .data = &grsec_enable_tpe_all,
61892+ .maxlen = sizeof(int),
61893+ .mode = 0600,
61894+ .proc_handler = &proc_dointvec,
61895+ },
61896+#endif
61897+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
61898+ {
61899+ .procname = "socket_all",
61900+ .data = &grsec_enable_socket_all,
61901+ .maxlen = sizeof(int),
61902+ .mode = 0600,
61903+ .proc_handler = &proc_dointvec,
61904+ },
61905+ {
61906+ .procname = "socket_all_gid",
61907+ .data = &grsec_socket_all_gid,
61908+ .maxlen = sizeof(int),
61909+ .mode = 0600,
61910+ .proc_handler = &proc_dointvec,
61911+ },
61912+#endif
61913+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
61914+ {
61915+ .procname = "socket_client",
61916+ .data = &grsec_enable_socket_client,
61917+ .maxlen = sizeof(int),
61918+ .mode = 0600,
61919+ .proc_handler = &proc_dointvec,
61920+ },
61921+ {
61922+ .procname = "socket_client_gid",
61923+ .data = &grsec_socket_client_gid,
61924+ .maxlen = sizeof(int),
61925+ .mode = 0600,
61926+ .proc_handler = &proc_dointvec,
61927+ },
61928+#endif
61929+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
61930+ {
61931+ .procname = "socket_server",
61932+ .data = &grsec_enable_socket_server,
61933+ .maxlen = sizeof(int),
61934+ .mode = 0600,
61935+ .proc_handler = &proc_dointvec,
61936+ },
61937+ {
61938+ .procname = "socket_server_gid",
61939+ .data = &grsec_socket_server_gid,
61940+ .maxlen = sizeof(int),
61941+ .mode = 0600,
61942+ .proc_handler = &proc_dointvec,
61943+ },
61944+#endif
61945+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
61946+ {
61947+ .procname = "audit_group",
61948+ .data = &grsec_enable_group,
61949+ .maxlen = sizeof(int),
61950+ .mode = 0600,
61951+ .proc_handler = &proc_dointvec,
61952+ },
61953+ {
61954+ .procname = "audit_gid",
61955+ .data = &grsec_audit_gid,
61956+ .maxlen = sizeof(int),
61957+ .mode = 0600,
61958+ .proc_handler = &proc_dointvec,
61959+ },
61960+#endif
61961+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
61962+ {
61963+ .procname = "audit_chdir",
61964+ .data = &grsec_enable_chdir,
61965+ .maxlen = sizeof(int),
61966+ .mode = 0600,
61967+ .proc_handler = &proc_dointvec,
61968+ },
61969+#endif
61970+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
61971+ {
61972+ .procname = "audit_mount",
61973+ .data = &grsec_enable_mount,
61974+ .maxlen = sizeof(int),
61975+ .mode = 0600,
61976+ .proc_handler = &proc_dointvec,
61977+ },
61978+#endif
61979+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
61980+ {
61981+ .procname = "audit_textrel",
61982+ .data = &grsec_enable_audit_textrel,
61983+ .maxlen = sizeof(int),
61984+ .mode = 0600,
61985+ .proc_handler = &proc_dointvec,
61986+ },
61987+#endif
61988+#ifdef CONFIG_GRKERNSEC_DMESG
61989+ {
61990+ .procname = "dmesg",
61991+ .data = &grsec_enable_dmesg,
61992+ .maxlen = sizeof(int),
61993+ .mode = 0600,
61994+ .proc_handler = &proc_dointvec,
61995+ },
61996+#endif
61997+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
61998+ {
61999+ .procname = "chroot_findtask",
62000+ .data = &grsec_enable_chroot_findtask,
62001+ .maxlen = sizeof(int),
62002+ .mode = 0600,
62003+ .proc_handler = &proc_dointvec,
62004+ },
62005+#endif
62006+#ifdef CONFIG_GRKERNSEC_RESLOG
62007+ {
62008+ .procname = "resource_logging",
62009+ .data = &grsec_resource_logging,
62010+ .maxlen = sizeof(int),
62011+ .mode = 0600,
62012+ .proc_handler = &proc_dointvec,
62013+ },
62014+#endif
62015+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
62016+ {
62017+ .procname = "audit_ptrace",
62018+ .data = &grsec_enable_audit_ptrace,
62019+ .maxlen = sizeof(int),
62020+ .mode = 0600,
62021+ .proc_handler = &proc_dointvec,
62022+ },
62023+#endif
62024+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
62025+ {
62026+ .procname = "harden_ptrace",
62027+ .data = &grsec_enable_harden_ptrace,
62028+ .maxlen = sizeof(int),
62029+ .mode = 0600,
62030+ .proc_handler = &proc_dointvec,
62031+ },
62032+#endif
62033+ {
62034+ .procname = "grsec_lock",
62035+ .data = &grsec_lock,
62036+ .maxlen = sizeof(int),
62037+ .mode = 0600,
62038+ .proc_handler = &proc_dointvec,
62039+ },
62040+#endif
62041+#ifdef CONFIG_GRKERNSEC_ROFS
62042+ {
62043+ .procname = "romount_protect",
62044+ .data = &grsec_enable_rofs,
62045+ .maxlen = sizeof(int),
62046+ .mode = 0600,
62047+ .proc_handler = &proc_dointvec_minmax,
62048+ .extra1 = &one,
62049+ .extra2 = &one,
62050+ },
62051+#endif
62052+ { }
62053+};
62054+#endif
62055diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
62056new file mode 100644
62057index 0000000..0dc13c3
62058--- /dev/null
62059+++ b/grsecurity/grsec_time.c
62060@@ -0,0 +1,16 @@
62061+#include <linux/kernel.h>
62062+#include <linux/sched.h>
62063+#include <linux/grinternal.h>
62064+#include <linux/module.h>
62065+
62066+void
62067+gr_log_timechange(void)
62068+{
62069+#ifdef CONFIG_GRKERNSEC_TIME
62070+ if (grsec_enable_time)
62071+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
62072+#endif
62073+ return;
62074+}
62075+
62076+EXPORT_SYMBOL(gr_log_timechange);
62077diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
62078new file mode 100644
62079index 0000000..07e0dc0
62080--- /dev/null
62081+++ b/grsecurity/grsec_tpe.c
62082@@ -0,0 +1,73 @@
62083+#include <linux/kernel.h>
62084+#include <linux/sched.h>
62085+#include <linux/file.h>
62086+#include <linux/fs.h>
62087+#include <linux/grinternal.h>
62088+
62089+extern int gr_acl_tpe_check(void);
62090+
62091+int
62092+gr_tpe_allow(const struct file *file)
62093+{
62094+#ifdef CONFIG_GRKERNSEC
62095+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
62096+ const struct cred *cred = current_cred();
62097+ char *msg = NULL;
62098+ char *msg2 = NULL;
62099+
62100+ // never restrict root
62101+ if (!cred->uid)
62102+ return 1;
62103+
62104+ if (grsec_enable_tpe) {
62105+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
62106+ if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
62107+ msg = "not being in trusted group";
62108+ else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
62109+ msg = "being in untrusted group";
62110+#else
62111+ if (in_group_p(grsec_tpe_gid))
62112+ msg = "being in untrusted group";
62113+#endif
62114+ }
62115+ if (!msg && gr_acl_tpe_check())
62116+ msg = "being in untrusted role";
62117+
62118+ // not in any affected group/role
62119+ if (!msg)
62120+ goto next_check;
62121+
62122+ if (inode->i_uid)
62123+ msg2 = "file in non-root-owned directory";
62124+ else if (inode->i_mode & S_IWOTH)
62125+ msg2 = "file in world-writable directory";
62126+ else if (inode->i_mode & S_IWGRP)
62127+ msg2 = "file in group-writable directory";
62128+
62129+ if (msg && msg2) {
62130+ char fullmsg[70] = {0};
62131+ snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
62132+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
62133+ return 0;
62134+ }
62135+ msg = NULL;
62136+next_check:
62137+#ifdef CONFIG_GRKERNSEC_TPE_ALL
62138+ if (!grsec_enable_tpe || !grsec_enable_tpe_all)
62139+ return 1;
62140+
62141+ if (inode->i_uid && (inode->i_uid != cred->uid))
62142+ msg = "directory not owned by user";
62143+ else if (inode->i_mode & S_IWOTH)
62144+ msg = "file in world-writable directory";
62145+ else if (inode->i_mode & S_IWGRP)
62146+ msg = "file in group-writable directory";
62147+
62148+ if (msg) {
62149+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
62150+ return 0;
62151+ }
62152+#endif
62153+#endif
62154+ return 1;
62155+}
62156diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
62157new file mode 100644
62158index 0000000..9f7b1ac
62159--- /dev/null
62160+++ b/grsecurity/grsum.c
62161@@ -0,0 +1,61 @@
62162+#include <linux/err.h>
62163+#include <linux/kernel.h>
62164+#include <linux/sched.h>
62165+#include <linux/mm.h>
62166+#include <linux/scatterlist.h>
62167+#include <linux/crypto.h>
62168+#include <linux/gracl.h>
62169+
62170+
62171+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
62172+#error "crypto and sha256 must be built into the kernel"
62173+#endif
62174+
62175+int
62176+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
62177+{
62178+ char *p;
62179+ struct crypto_hash *tfm;
62180+ struct hash_desc desc;
62181+ struct scatterlist sg;
62182+ unsigned char temp_sum[GR_SHA_LEN];
62183+ volatile int retval = 0;
62184+ volatile int dummy = 0;
62185+ unsigned int i;
62186+
62187+ sg_init_table(&sg, 1);
62188+
62189+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
62190+ if (IS_ERR(tfm)) {
62191+ /* should never happen, since sha256 should be built in */
62192+ return 1;
62193+ }
62194+
62195+ desc.tfm = tfm;
62196+ desc.flags = 0;
62197+
62198+ crypto_hash_init(&desc);
62199+
62200+ p = salt;
62201+ sg_set_buf(&sg, p, GR_SALT_LEN);
62202+ crypto_hash_update(&desc, &sg, sg.length);
62203+
62204+ p = entry->pw;
62205+ sg_set_buf(&sg, p, strlen(p));
62206+
62207+ crypto_hash_update(&desc, &sg, sg.length);
62208+
62209+ crypto_hash_final(&desc, temp_sum);
62210+
62211+ memset(entry->pw, 0, GR_PW_LEN);
62212+
62213+ for (i = 0; i < GR_SHA_LEN; i++)
62214+ if (sum[i] != temp_sum[i])
62215+ retval = 1;
62216+ else
62217+ dummy = 1; // waste a cycle
62218+
62219+ crypto_free_hash(tfm);
62220+
62221+ return retval;
62222+}
62223diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
62224index 77ff547..181834f 100644
62225--- a/include/asm-generic/4level-fixup.h
62226+++ b/include/asm-generic/4level-fixup.h
62227@@ -13,8 +13,10 @@
62228 #define pmd_alloc(mm, pud, address) \
62229 ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \
62230 NULL: pmd_offset(pud, address))
62231+#define pmd_alloc_kernel(mm, pud, address) pmd_alloc((mm), (pud), (address))
62232
62233 #define pud_alloc(mm, pgd, address) (pgd)
62234+#define pud_alloc_kernel(mm, pgd, address) pud_alloc((mm), (pgd), (address))
62235 #define pud_offset(pgd, start) (pgd)
62236 #define pud_none(pud) 0
62237 #define pud_bad(pud) 0
62238diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
62239index b7babf0..04ad282 100644
62240--- a/include/asm-generic/atomic-long.h
62241+++ b/include/asm-generic/atomic-long.h
62242@@ -22,6 +22,12 @@
62243
62244 typedef atomic64_t atomic_long_t;
62245
62246+#ifdef CONFIG_PAX_REFCOUNT
62247+typedef atomic64_unchecked_t atomic_long_unchecked_t;
62248+#else
62249+typedef atomic64_t atomic_long_unchecked_t;
62250+#endif
62251+
62252 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
62253
62254 static inline long atomic_long_read(atomic_long_t *l)
62255@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
62256 return (long)atomic64_read(v);
62257 }
62258
62259+#ifdef CONFIG_PAX_REFCOUNT
62260+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
62261+{
62262+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
62263+
62264+ return (long)atomic64_read_unchecked(v);
62265+}
62266+#endif
62267+
62268 static inline void atomic_long_set(atomic_long_t *l, long i)
62269 {
62270 atomic64_t *v = (atomic64_t *)l;
62271@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
62272 atomic64_set(v, i);
62273 }
62274
62275+#ifdef CONFIG_PAX_REFCOUNT
62276+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
62277+{
62278+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
62279+
62280+ atomic64_set_unchecked(v, i);
62281+}
62282+#endif
62283+
62284 static inline void atomic_long_inc(atomic_long_t *l)
62285 {
62286 atomic64_t *v = (atomic64_t *)l;
62287@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
62288 atomic64_inc(v);
62289 }
62290
62291+#ifdef CONFIG_PAX_REFCOUNT
62292+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
62293+{
62294+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
62295+
62296+ atomic64_inc_unchecked(v);
62297+}
62298+#endif
62299+
62300 static inline void atomic_long_dec(atomic_long_t *l)
62301 {
62302 atomic64_t *v = (atomic64_t *)l;
62303@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
62304 atomic64_dec(v);
62305 }
62306
62307+#ifdef CONFIG_PAX_REFCOUNT
62308+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
62309+{
62310+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
62311+
62312+ atomic64_dec_unchecked(v);
62313+}
62314+#endif
62315+
62316 static inline void atomic_long_add(long i, atomic_long_t *l)
62317 {
62318 atomic64_t *v = (atomic64_t *)l;
62319@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
62320 atomic64_add(i, v);
62321 }
62322
62323+#ifdef CONFIG_PAX_REFCOUNT
62324+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
62325+{
62326+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
62327+
62328+ atomic64_add_unchecked(i, v);
62329+}
62330+#endif
62331+
62332 static inline void atomic_long_sub(long i, atomic_long_t *l)
62333 {
62334 atomic64_t *v = (atomic64_t *)l;
62335@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
62336 atomic64_sub(i, v);
62337 }
62338
62339+#ifdef CONFIG_PAX_REFCOUNT
62340+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
62341+{
62342+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
62343+
62344+ atomic64_sub_unchecked(i, v);
62345+}
62346+#endif
62347+
62348 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
62349 {
62350 atomic64_t *v = (atomic64_t *)l;
62351@@ -101,6 +161,15 @@ static inline long atomic_long_add_return(long i, atomic_long_t *l)
62352 return (long)atomic64_add_return(i, v);
62353 }
62354
62355+#ifdef CONFIG_PAX_REFCOUNT
62356+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
62357+{
62358+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
62359+
62360+ return (long)atomic64_add_return_unchecked(i, v);
62361+}
62362+#endif
62363+
62364 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
62365 {
62366 atomic64_t *v = (atomic64_t *)l;
62367@@ -115,6 +184,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
62368 return (long)atomic64_inc_return(v);
62369 }
62370
62371+#ifdef CONFIG_PAX_REFCOUNT
62372+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
62373+{
62374+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
62375+
62376+ return (long)atomic64_inc_return_unchecked(v);
62377+}
62378+#endif
62379+
62380 static inline long atomic_long_dec_return(atomic_long_t *l)
62381 {
62382 atomic64_t *v = (atomic64_t *)l;
62383@@ -140,6 +218,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
62384
62385 typedef atomic_t atomic_long_t;
62386
62387+#ifdef CONFIG_PAX_REFCOUNT
62388+typedef atomic_unchecked_t atomic_long_unchecked_t;
62389+#else
62390+typedef atomic_t atomic_long_unchecked_t;
62391+#endif
62392+
62393 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
62394 static inline long atomic_long_read(atomic_long_t *l)
62395 {
62396@@ -148,6 +232,15 @@ static inline long atomic_long_read(atomic_long_t *l)
62397 return (long)atomic_read(v);
62398 }
62399
62400+#ifdef CONFIG_PAX_REFCOUNT
62401+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
62402+{
62403+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
62404+
62405+ return (long)atomic_read_unchecked(v);
62406+}
62407+#endif
62408+
62409 static inline void atomic_long_set(atomic_long_t *l, long i)
62410 {
62411 atomic_t *v = (atomic_t *)l;
62412@@ -155,6 +248,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
62413 atomic_set(v, i);
62414 }
62415
62416+#ifdef CONFIG_PAX_REFCOUNT
62417+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
62418+{
62419+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
62420+
62421+ atomic_set_unchecked(v, i);
62422+}
62423+#endif
62424+
62425 static inline void atomic_long_inc(atomic_long_t *l)
62426 {
62427 atomic_t *v = (atomic_t *)l;
62428@@ -162,6 +264,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
62429 atomic_inc(v);
62430 }
62431
62432+#ifdef CONFIG_PAX_REFCOUNT
62433+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
62434+{
62435+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
62436+
62437+ atomic_inc_unchecked(v);
62438+}
62439+#endif
62440+
62441 static inline void atomic_long_dec(atomic_long_t *l)
62442 {
62443 atomic_t *v = (atomic_t *)l;
62444@@ -169,6 +280,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
62445 atomic_dec(v);
62446 }
62447
62448+#ifdef CONFIG_PAX_REFCOUNT
62449+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
62450+{
62451+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
62452+
62453+ atomic_dec_unchecked(v);
62454+}
62455+#endif
62456+
62457 static inline void atomic_long_add(long i, atomic_long_t *l)
62458 {
62459 atomic_t *v = (atomic_t *)l;
62460@@ -176,6 +296,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
62461 atomic_add(i, v);
62462 }
62463
62464+#ifdef CONFIG_PAX_REFCOUNT
62465+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
62466+{
62467+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
62468+
62469+ atomic_add_unchecked(i, v);
62470+}
62471+#endif
62472+
62473 static inline void atomic_long_sub(long i, atomic_long_t *l)
62474 {
62475 atomic_t *v = (atomic_t *)l;
62476@@ -183,6 +312,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
62477 atomic_sub(i, v);
62478 }
62479
62480+#ifdef CONFIG_PAX_REFCOUNT
62481+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
62482+{
62483+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
62484+
62485+ atomic_sub_unchecked(i, v);
62486+}
62487+#endif
62488+
62489 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
62490 {
62491 atomic_t *v = (atomic_t *)l;
62492@@ -218,6 +356,16 @@ static inline long atomic_long_add_return(long i, atomic_long_t *l)
62493 return (long)atomic_add_return(i, v);
62494 }
62495
62496+#ifdef CONFIG_PAX_REFCOUNT
62497+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
62498+{
62499+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
62500+
62501+ return (long)atomic_add_return_unchecked(i, v);
62502+}
62503+
62504+#endif
62505+
62506 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
62507 {
62508 atomic_t *v = (atomic_t *)l;
62509@@ -232,6 +380,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
62510 return (long)atomic_inc_return(v);
62511 }
62512
62513+#ifdef CONFIG_PAX_REFCOUNT
62514+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
62515+{
62516+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
62517+
62518+ return (long)atomic_inc_return_unchecked(v);
62519+}
62520+#endif
62521+
62522 static inline long atomic_long_dec_return(atomic_long_t *l)
62523 {
62524 atomic_t *v = (atomic_t *)l;
62525@@ -255,4 +412,57 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
62526
62527 #endif /* BITS_PER_LONG == 64 */
62528
62529+#ifdef CONFIG_PAX_REFCOUNT
62530+static inline void pax_refcount_needs_these_functions(void)
62531+{
62532+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
62533+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
62534+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
62535+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
62536+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
62537+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
62538+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
62539+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
62540+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
62541+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
62542+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
62543+#ifdef CONFIG_X86
62544+ atomic_clear_mask_unchecked(0, NULL);
62545+ atomic_set_mask_unchecked(0, NULL);
62546+#endif
62547+
62548+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
62549+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
62550+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
62551+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
62552+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
62553+ atomic_long_add_return_unchecked(0, (atomic_long_unchecked_t *)NULL);
62554+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
62555+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
62556+}
62557+#else
62558+#define atomic_read_unchecked(v) atomic_read(v)
62559+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
62560+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
62561+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
62562+#define atomic_inc_unchecked(v) atomic_inc(v)
62563+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
62564+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
62565+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
62566+#define atomic_dec_unchecked(v) atomic_dec(v)
62567+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
62568+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
62569+#define atomic_clear_mask_unchecked(mask, v) atomic_clear_mask((mask), (v))
62570+#define atomic_set_mask_unchecked(mask, v) atomic_set_mask((mask), (v))
62571+
62572+#define atomic_long_read_unchecked(v) atomic_long_read(v)
62573+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
62574+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
62575+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
62576+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
62577+#define atomic_long_add_return_unchecked(i, v) atomic_long_add_return((i), (v))
62578+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
62579+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
62580+#endif
62581+
62582 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
62583diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
62584index 1ced641..c896ee8 100644
62585--- a/include/asm-generic/atomic.h
62586+++ b/include/asm-generic/atomic.h
62587@@ -159,7 +159,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
62588 * Atomically clears the bits set in @mask from @v
62589 */
62590 #ifndef atomic_clear_mask
62591-static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
62592+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
62593 {
62594 unsigned long flags;
62595
62596diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
62597index b18ce4f..2ee2843 100644
62598--- a/include/asm-generic/atomic64.h
62599+++ b/include/asm-generic/atomic64.h
62600@@ -16,6 +16,8 @@ typedef struct {
62601 long long counter;
62602 } atomic64_t;
62603
62604+typedef atomic64_t atomic64_unchecked_t;
62605+
62606 #define ATOMIC64_INIT(i) { (i) }
62607
62608 extern long long atomic64_read(const atomic64_t *v);
62609@@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
62610 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
62611 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
62612
62613+#define atomic64_read_unchecked(v) atomic64_read(v)
62614+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
62615+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
62616+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
62617+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
62618+#define atomic64_inc_unchecked(v) atomic64_inc(v)
62619+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
62620+#define atomic64_dec_unchecked(v) atomic64_dec(v)
62621+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
62622+
62623 #endif /* _ASM_GENERIC_ATOMIC64_H */
62624diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
62625index 1bfcfe5..e04c5c9 100644
62626--- a/include/asm-generic/cache.h
62627+++ b/include/asm-generic/cache.h
62628@@ -6,7 +6,7 @@
62629 * cache lines need to provide their own cache.h.
62630 */
62631
62632-#define L1_CACHE_SHIFT 5
62633-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
62634+#define L1_CACHE_SHIFT 5UL
62635+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
62636
62637 #endif /* __ASM_GENERIC_CACHE_H */
62638diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
62639index 0d68a1e..b74a761 100644
62640--- a/include/asm-generic/emergency-restart.h
62641+++ b/include/asm-generic/emergency-restart.h
62642@@ -1,7 +1,7 @@
62643 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
62644 #define _ASM_GENERIC_EMERGENCY_RESTART_H
62645
62646-static inline void machine_emergency_restart(void)
62647+static inline __noreturn void machine_emergency_restart(void)
62648 {
62649 machine_restart(NULL);
62650 }
62651diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
62652index 90f99c7..00ce236 100644
62653--- a/include/asm-generic/kmap_types.h
62654+++ b/include/asm-generic/kmap_types.h
62655@@ -2,9 +2,9 @@
62656 #define _ASM_GENERIC_KMAP_TYPES_H
62657
62658 #ifdef __WITH_KM_FENCE
62659-# define KM_TYPE_NR 41
62660+# define KM_TYPE_NR 42
62661 #else
62662-# define KM_TYPE_NR 20
62663+# define KM_TYPE_NR 21
62664 #endif
62665
62666 #endif
62667diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
62668index 9ceb03b..62b0b8f 100644
62669--- a/include/asm-generic/local.h
62670+++ b/include/asm-generic/local.h
62671@@ -23,24 +23,37 @@ typedef struct
62672 atomic_long_t a;
62673 } local_t;
62674
62675+typedef struct {
62676+ atomic_long_unchecked_t a;
62677+} local_unchecked_t;
62678+
62679 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
62680
62681 #define local_read(l) atomic_long_read(&(l)->a)
62682+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
62683 #define local_set(l,i) atomic_long_set((&(l)->a),(i))
62684+#define local_set_unchecked(l,i) atomic_long_set_unchecked((&(l)->a),(i))
62685 #define local_inc(l) atomic_long_inc(&(l)->a)
62686+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
62687 #define local_dec(l) atomic_long_dec(&(l)->a)
62688+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
62689 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
62690+#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
62691 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
62692+#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
62693
62694 #define local_sub_and_test(i, l) atomic_long_sub_and_test((i), (&(l)->a))
62695 #define local_dec_and_test(l) atomic_long_dec_and_test(&(l)->a)
62696 #define local_inc_and_test(l) atomic_long_inc_and_test(&(l)->a)
62697 #define local_add_negative(i, l) atomic_long_add_negative((i), (&(l)->a))
62698 #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
62699+#define local_add_return_unchecked(i, l) atomic_long_add_return_unchecked((i), (&(l)->a))
62700 #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
62701 #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
62702+#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
62703
62704 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
62705+#define local_cmpxchg_unchecked(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
62706 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
62707 #define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u))
62708 #define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a)
62709diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
62710index 725612b..9cc513a 100644
62711--- a/include/asm-generic/pgtable-nopmd.h
62712+++ b/include/asm-generic/pgtable-nopmd.h
62713@@ -1,14 +1,19 @@
62714 #ifndef _PGTABLE_NOPMD_H
62715 #define _PGTABLE_NOPMD_H
62716
62717-#ifndef __ASSEMBLY__
62718-
62719 #include <asm-generic/pgtable-nopud.h>
62720
62721-struct mm_struct;
62722-
62723 #define __PAGETABLE_PMD_FOLDED
62724
62725+#define PMD_SHIFT PUD_SHIFT
62726+#define PTRS_PER_PMD 1
62727+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
62728+#define PMD_MASK (~(PMD_SIZE-1))
62729+
62730+#ifndef __ASSEMBLY__
62731+
62732+struct mm_struct;
62733+
62734 /*
62735 * Having the pmd type consist of a pud gets the size right, and allows
62736 * us to conceptually access the pud entry that this pmd is folded into
62737@@ -16,11 +21,6 @@ struct mm_struct;
62738 */
62739 typedef struct { pud_t pud; } pmd_t;
62740
62741-#define PMD_SHIFT PUD_SHIFT
62742-#define PTRS_PER_PMD 1
62743-#define PMD_SIZE (1UL << PMD_SHIFT)
62744-#define PMD_MASK (~(PMD_SIZE-1))
62745-
62746 /*
62747 * The "pud_xxx()" functions here are trivial for a folded two-level
62748 * setup: the pmd is never bad, and a pmd always exists (as it's folded
62749diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
62750index 810431d..0ec4804f 100644
62751--- a/include/asm-generic/pgtable-nopud.h
62752+++ b/include/asm-generic/pgtable-nopud.h
62753@@ -1,10 +1,15 @@
62754 #ifndef _PGTABLE_NOPUD_H
62755 #define _PGTABLE_NOPUD_H
62756
62757-#ifndef __ASSEMBLY__
62758-
62759 #define __PAGETABLE_PUD_FOLDED
62760
62761+#define PUD_SHIFT PGDIR_SHIFT
62762+#define PTRS_PER_PUD 1
62763+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
62764+#define PUD_MASK (~(PUD_SIZE-1))
62765+
62766+#ifndef __ASSEMBLY__
62767+
62768 /*
62769 * Having the pud type consist of a pgd gets the size right, and allows
62770 * us to conceptually access the pgd entry that this pud is folded into
62771@@ -12,11 +17,6 @@
62772 */
62773 typedef struct { pgd_t pgd; } pud_t;
62774
62775-#define PUD_SHIFT PGDIR_SHIFT
62776-#define PTRS_PER_PUD 1
62777-#define PUD_SIZE (1UL << PUD_SHIFT)
62778-#define PUD_MASK (~(PUD_SIZE-1))
62779-
62780 /*
62781 * The "pgd_xxx()" functions here are trivial for a folded two-level
62782 * setup: the pud is never bad, and a pud always exists (as it's folded
62783@@ -29,6 +29,7 @@ static inline void pgd_clear(pgd_t *pgd) { }
62784 #define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
62785
62786 #define pgd_populate(mm, pgd, pud) do { } while (0)
62787+#define pgd_populate_kernel(mm, pgd, pud) do { } while (0)
62788 /*
62789 * (puds are folded into pgds so this doesn't get actually called,
62790 * but the define is needed for a generic inline function.)
62791diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
62792index b36ce40..019426d 100644
62793--- a/include/asm-generic/pgtable.h
62794+++ b/include/asm-generic/pgtable.h
62795@@ -554,6 +554,14 @@ static inline int pmd_trans_unstable(pmd_t *pmd)
62796 #endif
62797 }
62798
62799+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
62800+static inline unsigned long pax_open_kernel(void) { return 0; }
62801+#endif
62802+
62803+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
62804+static inline unsigned long pax_close_kernel(void) { return 0; }
62805+#endif
62806+
62807 #endif /* CONFIG_MMU */
62808
62809 #endif /* !__ASSEMBLY__ */
62810diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
62811index d1ea7ce..b1ebf2a 100644
62812--- a/include/asm-generic/vmlinux.lds.h
62813+++ b/include/asm-generic/vmlinux.lds.h
62814@@ -218,6 +218,7 @@
62815 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
62816 VMLINUX_SYMBOL(__start_rodata) = .; \
62817 *(.rodata) *(.rodata.*) \
62818+ *(.data..read_only) \
62819 *(__vermagic) /* Kernel version magic */ \
62820 . = ALIGN(8); \
62821 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
62822@@ -725,17 +726,18 @@
62823 * section in the linker script will go there too. @phdr should have
62824 * a leading colon.
62825 *
62826- * Note that this macros defines __per_cpu_load as an absolute symbol.
62827+ * Note that this macros defines per_cpu_load as an absolute symbol.
62828 * If there is no need to put the percpu section at a predetermined
62829 * address, use PERCPU_SECTION.
62830 */
62831 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
62832- VMLINUX_SYMBOL(__per_cpu_load) = .; \
62833- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
62834+ per_cpu_load = .; \
62835+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
62836 - LOAD_OFFSET) { \
62837+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
62838 PERCPU_INPUT(cacheline) \
62839 } phdr \
62840- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
62841+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
62842
62843 /**
62844 * PERCPU_SECTION - define output section for percpu area, simple version
62845diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
62846index 418d270..bfd2794 100644
62847--- a/include/crypto/algapi.h
62848+++ b/include/crypto/algapi.h
62849@@ -34,7 +34,7 @@ struct crypto_type {
62850 unsigned int maskclear;
62851 unsigned int maskset;
62852 unsigned int tfmsize;
62853-};
62854+} __do_const;
62855
62856 struct crypto_instance {
62857 struct crypto_alg alg;
62858diff --git a/include/drm/drmP.h b/include/drm/drmP.h
62859index 3fd8280..2b3c415 100644
62860--- a/include/drm/drmP.h
62861+++ b/include/drm/drmP.h
62862@@ -72,6 +72,7 @@
62863 #include <linux/workqueue.h>
62864 #include <linux/poll.h>
62865 #include <asm/pgalloc.h>
62866+#include <asm/local.h>
62867 #include <drm/drm.h>
62868 #include <drm/drm_sarea.h>
62869
62870@@ -1068,7 +1069,7 @@ struct drm_device {
62871
62872 /** \name Usage Counters */
62873 /*@{ */
62874- int open_count; /**< Outstanding files open */
62875+ local_t open_count; /**< Outstanding files open */
62876 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
62877 atomic_t vma_count; /**< Outstanding vma areas open */
62878 int buf_use; /**< Buffers in use -- cannot alloc */
62879@@ -1079,7 +1080,7 @@ struct drm_device {
62880 /*@{ */
62881 unsigned long counters;
62882 enum drm_stat_type types[15];
62883- atomic_t counts[15];
62884+ atomic_unchecked_t counts[15];
62885 /*@} */
62886
62887 struct list_head filelist;
62888diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
62889index e01cc80..6fb6f25 100644
62890--- a/include/drm/drm_crtc_helper.h
62891+++ b/include/drm/drm_crtc_helper.h
62892@@ -109,7 +109,7 @@ struct drm_encoder_helper_funcs {
62893 struct drm_connector *connector);
62894 /* disable encoder when not in use - more explicit than dpms off */
62895 void (*disable)(struct drm_encoder *encoder);
62896-};
62897+} __no_const;
62898
62899 /**
62900 * drm_connector_helper_funcs - helper operations for connectors
62901diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
62902index d6d1da4..fdd1ac5 100644
62903--- a/include/drm/ttm/ttm_memory.h
62904+++ b/include/drm/ttm/ttm_memory.h
62905@@ -48,7 +48,7 @@
62906
62907 struct ttm_mem_shrink {
62908 int (*do_shrink) (struct ttm_mem_shrink *);
62909-};
62910+} __no_const;
62911
62912 /**
62913 * struct ttm_mem_global - Global memory accounting structure.
62914diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
62915index 22ef21c..75904ba 100644
62916--- a/include/linux/atmdev.h
62917+++ b/include/linux/atmdev.h
62918@@ -28,7 +28,7 @@ struct compat_atm_iobuf {
62919 #endif
62920
62921 struct k_atm_aal_stats {
62922-#define __HANDLE_ITEM(i) atomic_t i
62923+#define __HANDLE_ITEM(i) atomic_unchecked_t i
62924 __AAL_STAT_ITEMS
62925 #undef __HANDLE_ITEM
62926 };
62927diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
62928index de0628e..38f42eb 100644
62929--- a/include/linux/binfmts.h
62930+++ b/include/linux/binfmts.h
62931@@ -75,6 +75,7 @@ struct linux_binfmt {
62932 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
62933 int (*load_shlib)(struct file *);
62934 int (*core_dump)(struct coredump_params *cprm);
62935+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
62936 unsigned long min_coredump; /* minimal dump size */
62937 };
62938
62939diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
62940index 1756001..ab117ec 100644
62941--- a/include/linux/blkdev.h
62942+++ b/include/linux/blkdev.h
62943@@ -1478,7 +1478,7 @@ struct block_device_operations {
62944 /* this callback is with swap_lock and sometimes page table lock held */
62945 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
62946 struct module *owner;
62947-};
62948+} __do_const;
62949
62950 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
62951 unsigned long);
62952diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
62953index 7c2e030..b72475d 100644
62954--- a/include/linux/blktrace_api.h
62955+++ b/include/linux/blktrace_api.h
62956@@ -23,7 +23,7 @@ struct blk_trace {
62957 struct dentry *dir;
62958 struct dentry *dropped_file;
62959 struct dentry *msg_file;
62960- atomic_t dropped;
62961+ atomic_unchecked_t dropped;
62962 };
62963
62964 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
62965diff --git a/include/linux/cache.h b/include/linux/cache.h
62966index 4c57065..4307975 100644
62967--- a/include/linux/cache.h
62968+++ b/include/linux/cache.h
62969@@ -16,6 +16,10 @@
62970 #define __read_mostly
62971 #endif
62972
62973+#ifndef __read_only
62974+#define __read_only __read_mostly
62975+#endif
62976+
62977 #ifndef ____cacheline_aligned
62978 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
62979 #endif
62980diff --git a/include/linux/capability.h b/include/linux/capability.h
62981index 98503b7..cc36d18 100644
62982--- a/include/linux/capability.h
62983+++ b/include/linux/capability.h
62984@@ -211,8 +211,13 @@ extern bool capable(int cap);
62985 extern bool ns_capable(struct user_namespace *ns, int cap);
62986 extern bool nsown_capable(int cap);
62987 extern bool inode_capable(const struct inode *inode, int cap);
62988+extern bool capable_nolog(int cap);
62989+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
62990+extern bool inode_capable_nolog(const struct inode *inode, int cap);
62991
62992 /* audit system wants to get cap info from files as well */
62993 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
62994
62995+extern int is_privileged_binary(const struct dentry *dentry);
62996+
62997 #endif /* !_LINUX_CAPABILITY_H */
62998diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
62999index 8609d57..86e4d79 100644
63000--- a/include/linux/cdrom.h
63001+++ b/include/linux/cdrom.h
63002@@ -87,7 +87,6 @@ struct cdrom_device_ops {
63003
63004 /* driver specifications */
63005 const int capability; /* capability flags */
63006- int n_minors; /* number of active minor devices */
63007 /* handle uniform packets for scsi type devices (scsi,atapi) */
63008 int (*generic_packet) (struct cdrom_device_info *,
63009 struct packet_command *);
63010diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
63011index 42e55de..1cd0e66 100644
63012--- a/include/linux/cleancache.h
63013+++ b/include/linux/cleancache.h
63014@@ -31,7 +31,7 @@ struct cleancache_ops {
63015 void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t);
63016 void (*invalidate_inode)(int, struct cleancache_filekey);
63017 void (*invalidate_fs)(int);
63018-};
63019+} __no_const;
63020
63021 extern struct cleancache_ops
63022 cleancache_register_ops(struct cleancache_ops *ops);
63023diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
63024index 412bc6c..c31666e 100644
63025--- a/include/linux/compiler-gcc4.h
63026+++ b/include/linux/compiler-gcc4.h
63027@@ -32,6 +32,21 @@
63028 #define __linktime_error(message) __attribute__((__error__(message)))
63029
63030 #if __GNUC_MINOR__ >= 5
63031+
63032+#ifdef CONSTIFY_PLUGIN
63033+#define __no_const __attribute__((no_const))
63034+#define __do_const __attribute__((do_const))
63035+#endif
63036+
63037+#ifdef SIZE_OVERFLOW_PLUGIN
63038+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
63039+#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__)))
63040+#endif
63041+
63042+#ifdef LATENT_ENTROPY_PLUGIN
63043+#define __latent_entropy __attribute__((latent_entropy))
63044+#endif
63045+
63046 /*
63047 * Mark a position in code as unreachable. This can be used to
63048 * suppress control flow warnings after asm blocks that transfer
63049@@ -47,6 +62,11 @@
63050 #define __noclone __attribute__((__noclone__))
63051
63052 #endif
63053+
63054+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
63055+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
63056+#define __bos0(ptr) __bos((ptr), 0)
63057+#define __bos1(ptr) __bos((ptr), 1)
63058 #endif
63059
63060 #if __GNUC_MINOR__ >= 6
63061diff --git a/include/linux/compiler.h b/include/linux/compiler.h
63062index f430e41..38be90f 100644
63063--- a/include/linux/compiler.h
63064+++ b/include/linux/compiler.h
63065@@ -5,31 +5,62 @@
63066
63067 #ifdef __CHECKER__
63068 # define __user __attribute__((noderef, address_space(1)))
63069+# define __force_user __force __user
63070 # define __kernel __attribute__((address_space(0)))
63071+# define __force_kernel __force __kernel
63072 # define __safe __attribute__((safe))
63073 # define __force __attribute__((force))
63074 # define __nocast __attribute__((nocast))
63075 # define __iomem __attribute__((noderef, address_space(2)))
63076+# define __force_iomem __force __iomem
63077 # define __acquires(x) __attribute__((context(x,0,1)))
63078 # define __releases(x) __attribute__((context(x,1,0)))
63079 # define __acquire(x) __context__(x,1)
63080 # define __release(x) __context__(x,-1)
63081 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
63082 # define __percpu __attribute__((noderef, address_space(3)))
63083+# define __force_percpu __force __percpu
63084 #ifdef CONFIG_SPARSE_RCU_POINTER
63085 # define __rcu __attribute__((noderef, address_space(4)))
63086+# define __force_rcu __force __rcu
63087 #else
63088 # define __rcu
63089+# define __force_rcu
63090 #endif
63091 extern void __chk_user_ptr(const volatile void __user *);
63092 extern void __chk_io_ptr(const volatile void __iomem *);
63093+#elif defined(CHECKER_PLUGIN)
63094+//# define __user
63095+//# define __force_user
63096+//# define __kernel
63097+//# define __force_kernel
63098+# define __safe
63099+# define __force
63100+# define __nocast
63101+# define __iomem
63102+# define __force_iomem
63103+# define __chk_user_ptr(x) (void)0
63104+# define __chk_io_ptr(x) (void)0
63105+# define __builtin_warning(x, y...) (1)
63106+# define __acquires(x)
63107+# define __releases(x)
63108+# define __acquire(x) (void)0
63109+# define __release(x) (void)0
63110+# define __cond_lock(x,c) (c)
63111+# define __percpu
63112+# define __force_percpu
63113+# define __rcu
63114+# define __force_rcu
63115 #else
63116 # define __user
63117+# define __force_user
63118 # define __kernel
63119+# define __force_kernel
63120 # define __safe
63121 # define __force
63122 # define __nocast
63123 # define __iomem
63124+# define __force_iomem
63125 # define __chk_user_ptr(x) (void)0
63126 # define __chk_io_ptr(x) (void)0
63127 # define __builtin_warning(x, y...) (1)
63128@@ -39,7 +70,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
63129 # define __release(x) (void)0
63130 # define __cond_lock(x,c) (c)
63131 # define __percpu
63132+# define __force_percpu
63133 # define __rcu
63134+# define __force_rcu
63135 #endif
63136
63137 #ifdef __KERNEL__
63138@@ -264,6 +297,26 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
63139 # define __attribute_const__ /* unimplemented */
63140 #endif
63141
63142+#ifndef __no_const
63143+# define __no_const
63144+#endif
63145+
63146+#ifndef __do_const
63147+# define __do_const
63148+#endif
63149+
63150+#ifndef __size_overflow
63151+# define __size_overflow(...)
63152+#endif
63153+
63154+#ifndef __intentional_overflow
63155+# define __intentional_overflow(...)
63156+#endif
63157+
63158+#ifndef __latent_entropy
63159+# define __latent_entropy
63160+#endif
63161+
63162 /*
63163 * Tell gcc if a function is cold. The compiler will assume any path
63164 * directly leading to the call is unlikely.
63165@@ -273,6 +326,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
63166 #define __cold
63167 #endif
63168
63169+#ifndef __alloc_size
63170+#define __alloc_size(...)
63171+#endif
63172+
63173+#ifndef __bos
63174+#define __bos(ptr, arg)
63175+#endif
63176+
63177+#ifndef __bos0
63178+#define __bos0(ptr)
63179+#endif
63180+
63181+#ifndef __bos1
63182+#define __bos1(ptr)
63183+#endif
63184+
63185 /* Simple shorthand for a section definition */
63186 #ifndef __section
63187 # define __section(S) __attribute__ ((__section__(#S)))
63188@@ -312,6 +381,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
63189 * use is to mediate communication between process-level code and irq/NMI
63190 * handlers, all running on the same CPU.
63191 */
63192-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
63193+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
63194+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
63195
63196 #endif /* __LINUX_COMPILER_H */
63197diff --git a/include/linux/cred.h b/include/linux/cred.h
63198index ebbed2c..908cc2c 100644
63199--- a/include/linux/cred.h
63200+++ b/include/linux/cred.h
63201@@ -208,6 +208,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
63202 static inline void validate_process_creds(void)
63203 {
63204 }
63205+static inline void validate_task_creds(struct task_struct *task)
63206+{
63207+}
63208 #endif
63209
63210 /**
63211diff --git a/include/linux/crypto.h b/include/linux/crypto.h
63212index b92eadf..b4ecdc1 100644
63213--- a/include/linux/crypto.h
63214+++ b/include/linux/crypto.h
63215@@ -373,7 +373,7 @@ struct cipher_tfm {
63216 const u8 *key, unsigned int keylen);
63217 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
63218 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
63219-};
63220+} __no_const;
63221
63222 struct hash_tfm {
63223 int (*init)(struct hash_desc *desc);
63224@@ -394,13 +394,13 @@ struct compress_tfm {
63225 int (*cot_decompress)(struct crypto_tfm *tfm,
63226 const u8 *src, unsigned int slen,
63227 u8 *dst, unsigned int *dlen);
63228-};
63229+} __no_const;
63230
63231 struct rng_tfm {
63232 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
63233 unsigned int dlen);
63234 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
63235-};
63236+} __no_const;
63237
63238 #define crt_ablkcipher crt_u.ablkcipher
63239 #define crt_aead crt_u.aead
63240diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
63241index 7925bf0..d5143d2 100644
63242--- a/include/linux/decompress/mm.h
63243+++ b/include/linux/decompress/mm.h
63244@@ -77,7 +77,7 @@ static void free(void *where)
63245 * warnings when not needed (indeed large_malloc / large_free are not
63246 * needed by inflate */
63247
63248-#define malloc(a) kmalloc(a, GFP_KERNEL)
63249+#define malloc(a) kmalloc((a), GFP_KERNEL)
63250 #define free(a) kfree(a)
63251
63252 #define large_malloc(a) vmalloc(a)
63253diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
63254index 94af418..b1ca7a2 100644
63255--- a/include/linux/dma-mapping.h
63256+++ b/include/linux/dma-mapping.h
63257@@ -54,7 +54,7 @@ struct dma_map_ops {
63258 u64 (*get_required_mask)(struct device *dev);
63259 #endif
63260 int is_phys;
63261-};
63262+} __do_const;
63263
63264 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
63265
63266diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
63267index d3201e4..8281e63 100644
63268--- a/include/linux/dmaengine.h
63269+++ b/include/linux/dmaengine.h
63270@@ -1018,9 +1018,9 @@ struct dma_pinned_list {
63271 struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
63272 void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
63273
63274-dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
63275+dma_cookie_t __intentional_overflow(0) dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
63276 struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
63277-dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
63278+dma_cookie_t __intentional_overflow(0) dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
63279 struct dma_pinned_list *pinned_list, struct page *page,
63280 unsigned int offset, size_t len);
63281
63282diff --git a/include/linux/efi.h b/include/linux/efi.h
63283index b424f64..fd36c1b 100644
63284--- a/include/linux/efi.h
63285+++ b/include/linux/efi.h
63286@@ -656,6 +656,7 @@ struct efivar_operations {
63287 efi_get_next_variable_t *get_next_variable;
63288 efi_set_variable_t *set_variable;
63289 };
63290+typedef struct efivar_operations __no_const efivar_operations_no_const;
63291
63292 struct efivars {
63293 /*
63294diff --git a/include/linux/elf.h b/include/linux/elf.h
63295index 8c9048e..16a4665 100644
63296--- a/include/linux/elf.h
63297+++ b/include/linux/elf.h
63298@@ -20,6 +20,7 @@ extern Elf32_Dyn _DYNAMIC [];
63299 #define elf_note elf32_note
63300 #define elf_addr_t Elf32_Off
63301 #define Elf_Half Elf32_Half
63302+#define elf_dyn Elf32_Dyn
63303
63304 #else
63305
63306@@ -30,6 +31,7 @@ extern Elf64_Dyn _DYNAMIC [];
63307 #define elf_note elf64_note
63308 #define elf_addr_t Elf64_Off
63309 #define Elf_Half Elf64_Half
63310+#define elf_dyn Elf64_Dyn
63311
63312 #endif
63313
63314diff --git a/include/linux/filter.h b/include/linux/filter.h
63315index 24d251f..7afb83d 100644
63316--- a/include/linux/filter.h
63317+++ b/include/linux/filter.h
63318@@ -20,6 +20,7 @@ struct compat_sock_fprog {
63319
63320 struct sk_buff;
63321 struct sock;
63322+struct bpf_jit_work;
63323
63324 struct sk_filter
63325 {
63326@@ -27,6 +28,9 @@ struct sk_filter
63327 unsigned int len; /* Number of filter blocks */
63328 unsigned int (*bpf_func)(const struct sk_buff *skb,
63329 const struct sock_filter *filter);
63330+#ifdef CONFIG_BPF_JIT
63331+ struct bpf_jit_work *work;
63332+#endif
63333 struct rcu_head rcu;
63334 struct sock_filter insns[0];
63335 };
63336diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
63337index 3044254..9767f41 100644
63338--- a/include/linux/frontswap.h
63339+++ b/include/linux/frontswap.h
63340@@ -11,7 +11,7 @@ struct frontswap_ops {
63341 int (*load)(unsigned, pgoff_t, struct page *);
63342 void (*invalidate_page)(unsigned, pgoff_t);
63343 void (*invalidate_area)(unsigned);
63344-};
63345+} __no_const;
63346
63347 extern bool frontswap_enabled;
63348 extern struct frontswap_ops
63349diff --git a/include/linux/fs.h b/include/linux/fs.h
63350index 75fe9a1..8417cac 100644
63351--- a/include/linux/fs.h
63352+++ b/include/linux/fs.h
63353@@ -1543,7 +1543,8 @@ struct file_operations {
63354 int (*setlease)(struct file *, long, struct file_lock **);
63355 long (*fallocate)(struct file *file, int mode, loff_t offset,
63356 loff_t len);
63357-};
63358+} __do_const;
63359+typedef struct file_operations __no_const file_operations_no_const;
63360
63361 struct inode_operations {
63362 struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
63363@@ -2667,4 +2668,14 @@ static inline void inode_has_no_xattr(struct inode *inode)
63364 inode->i_flags |= S_NOSEC;
63365 }
63366
63367+static inline bool is_sidechannel_device(const struct inode *inode)
63368+{
63369+#ifdef CONFIG_GRKERNSEC_DEVICE_SIDECHANNEL
63370+ umode_t mode = inode->i_mode;
63371+ return ((S_ISCHR(mode) || S_ISBLK(mode)) && (mode & (S_IROTH | S_IWOTH)));
63372+#else
63373+ return false;
63374+#endif
63375+}
63376+
63377 #endif /* _LINUX_FS_H */
63378diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
63379index 003dc0f..3c4ea97 100644
63380--- a/include/linux/fs_struct.h
63381+++ b/include/linux/fs_struct.h
63382@@ -6,7 +6,7 @@
63383 #include <linux/seqlock.h>
63384
63385 struct fs_struct {
63386- int users;
63387+ atomic_t users;
63388 spinlock_t lock;
63389 seqcount_t seq;
63390 int umask;
63391diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
63392index ce31408..b1ad003 100644
63393--- a/include/linux/fscache-cache.h
63394+++ b/include/linux/fscache-cache.h
63395@@ -102,7 +102,7 @@ struct fscache_operation {
63396 fscache_operation_release_t release;
63397 };
63398
63399-extern atomic_t fscache_op_debug_id;
63400+extern atomic_unchecked_t fscache_op_debug_id;
63401 extern void fscache_op_work_func(struct work_struct *work);
63402
63403 extern void fscache_enqueue_operation(struct fscache_operation *);
63404@@ -122,7 +122,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
63405 {
63406 INIT_WORK(&op->work, fscache_op_work_func);
63407 atomic_set(&op->usage, 1);
63408- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
63409+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
63410 op->processor = processor;
63411 op->release = release;
63412 INIT_LIST_HEAD(&op->pend_link);
63413diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
63414index 0fbfb46..508eb0d 100644
63415--- a/include/linux/fsnotify.h
63416+++ b/include/linux/fsnotify.h
63417@@ -195,6 +195,9 @@ static inline void fsnotify_access(struct file *file)
63418 struct inode *inode = path->dentry->d_inode;
63419 __u32 mask = FS_ACCESS;
63420
63421+ if (is_sidechannel_device(inode))
63422+ return;
63423+
63424 if (S_ISDIR(inode->i_mode))
63425 mask |= FS_ISDIR;
63426
63427@@ -213,6 +216,9 @@ static inline void fsnotify_modify(struct file *file)
63428 struct inode *inode = path->dentry->d_inode;
63429 __u32 mask = FS_MODIFY;
63430
63431+ if (is_sidechannel_device(inode))
63432+ return;
63433+
63434 if (S_ISDIR(inode->i_mode))
63435 mask |= FS_ISDIR;
63436
63437@@ -315,7 +321,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
63438 */
63439 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
63440 {
63441- return kstrdup(name, GFP_KERNEL);
63442+ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
63443 }
63444
63445 /*
63446diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
63447index 642928c..93afe6a 100644
63448--- a/include/linux/ftrace_event.h
63449+++ b/include/linux/ftrace_event.h
63450@@ -266,7 +266,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type,
63451 extern int trace_add_event_call(struct ftrace_event_call *call);
63452 extern void trace_remove_event_call(struct ftrace_event_call *call);
63453
63454-#define is_signed_type(type) (((type)(-1)) < 0)
63455+#define is_signed_type(type) (((type)(-1)) < (type)1)
63456
63457 int trace_set_clr_event(const char *system, const char *event, int set);
63458
63459diff --git a/include/linux/genhd.h b/include/linux/genhd.h
63460index 4f440b3..342233a 100644
63461--- a/include/linux/genhd.h
63462+++ b/include/linux/genhd.h
63463@@ -190,7 +190,7 @@ struct gendisk {
63464 struct kobject *slave_dir;
63465
63466 struct timer_rand_state *random;
63467- atomic_t sync_io; /* RAID */
63468+ atomic_unchecked_t sync_io; /* RAID */
63469 struct disk_events *ev;
63470 #ifdef CONFIG_BLK_DEV_INTEGRITY
63471 struct blk_integrity *integrity;
63472diff --git a/include/linux/gfp.h b/include/linux/gfp.h
63473index d0a7967..63c4c47 100644
63474--- a/include/linux/gfp.h
63475+++ b/include/linux/gfp.h
63476@@ -35,6 +35,12 @@ struct vm_area_struct;
63477 #define ___GFP_OTHER_NODE 0x800000u
63478 #define ___GFP_WRITE 0x1000000u
63479
63480+#ifdef CONFIG_PAX_USERCOPY_SLABS
63481+#define ___GFP_USERCOPY 0x2000000u
63482+#else
63483+#define ___GFP_USERCOPY 0
63484+#endif
63485+
63486 /*
63487 * GFP bitmasks..
63488 *
63489@@ -89,6 +95,7 @@ struct vm_area_struct;
63490 #define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD)
63491 #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
63492 #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
63493+#define __GFP_USERCOPY ((__force gfp_t)___GFP_USERCOPY)/* Allocator intends to copy page to/from userland */
63494
63495 /*
63496 * This may seem redundant, but it's a way of annotating false positives vs.
63497@@ -96,7 +103,7 @@ struct vm_area_struct;
63498 */
63499 #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
63500
63501-#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */
63502+#define __GFP_BITS_SHIFT 26 /* Room for N __GFP_FOO bits */
63503 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
63504
63505 /* This equals 0, but use constants in case they ever change */
63506@@ -150,6 +157,8 @@ struct vm_area_struct;
63507 /* 4GB DMA on some platforms */
63508 #define GFP_DMA32 __GFP_DMA32
63509
63510+#define GFP_USERCOPY __GFP_USERCOPY
63511+
63512 /* Convert GFP flags to their corresponding migrate type */
63513 static inline int allocflags_to_migratetype(gfp_t gfp_flags)
63514 {
63515diff --git a/include/linux/gracl.h b/include/linux/gracl.h
63516new file mode 100644
63517index 0000000..ebe6d72
63518--- /dev/null
63519+++ b/include/linux/gracl.h
63520@@ -0,0 +1,319 @@
63521+#ifndef GR_ACL_H
63522+#define GR_ACL_H
63523+
63524+#include <linux/grdefs.h>
63525+#include <linux/resource.h>
63526+#include <linux/capability.h>
63527+#include <linux/dcache.h>
63528+#include <asm/resource.h>
63529+
63530+/* Major status information */
63531+
63532+#define GR_VERSION "grsecurity 2.9.1"
63533+#define GRSECURITY_VERSION 0x2901
63534+
63535+enum {
63536+ GR_SHUTDOWN = 0,
63537+ GR_ENABLE = 1,
63538+ GR_SPROLE = 2,
63539+ GR_RELOAD = 3,
63540+ GR_SEGVMOD = 4,
63541+ GR_STATUS = 5,
63542+ GR_UNSPROLE = 6,
63543+ GR_PASSSET = 7,
63544+ GR_SPROLEPAM = 8,
63545+};
63546+
63547+/* Password setup definitions
63548+ * kernel/grhash.c */
63549+enum {
63550+ GR_PW_LEN = 128,
63551+ GR_SALT_LEN = 16,
63552+ GR_SHA_LEN = 32,
63553+};
63554+
63555+enum {
63556+ GR_SPROLE_LEN = 64,
63557+};
63558+
63559+enum {
63560+ GR_NO_GLOB = 0,
63561+ GR_REG_GLOB,
63562+ GR_CREATE_GLOB
63563+};
63564+
63565+#define GR_NLIMITS 32
63566+
63567+/* Begin Data Structures */
63568+
63569+struct sprole_pw {
63570+ unsigned char *rolename;
63571+ unsigned char salt[GR_SALT_LEN];
63572+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
63573+};
63574+
63575+struct name_entry {
63576+ __u32 key;
63577+ ino_t inode;
63578+ dev_t device;
63579+ char *name;
63580+ __u16 len;
63581+ __u8 deleted;
63582+ struct name_entry *prev;
63583+ struct name_entry *next;
63584+};
63585+
63586+struct inodev_entry {
63587+ struct name_entry *nentry;
63588+ struct inodev_entry *prev;
63589+ struct inodev_entry *next;
63590+};
63591+
63592+struct acl_role_db {
63593+ struct acl_role_label **r_hash;
63594+ __u32 r_size;
63595+};
63596+
63597+struct inodev_db {
63598+ struct inodev_entry **i_hash;
63599+ __u32 i_size;
63600+};
63601+
63602+struct name_db {
63603+ struct name_entry **n_hash;
63604+ __u32 n_size;
63605+};
63606+
63607+struct crash_uid {
63608+ uid_t uid;
63609+ unsigned long expires;
63610+};
63611+
63612+struct gr_hash_struct {
63613+ void **table;
63614+ void **nametable;
63615+ void *first;
63616+ __u32 table_size;
63617+ __u32 used_size;
63618+ int type;
63619+};
63620+
63621+/* Userspace Grsecurity ACL data structures */
63622+
63623+struct acl_subject_label {
63624+ char *filename;
63625+ ino_t inode;
63626+ dev_t device;
63627+ __u32 mode;
63628+ kernel_cap_t cap_mask;
63629+ kernel_cap_t cap_lower;
63630+ kernel_cap_t cap_invert_audit;
63631+
63632+ struct rlimit res[GR_NLIMITS];
63633+ __u32 resmask;
63634+
63635+ __u8 user_trans_type;
63636+ __u8 group_trans_type;
63637+ uid_t *user_transitions;
63638+ gid_t *group_transitions;
63639+ __u16 user_trans_num;
63640+ __u16 group_trans_num;
63641+
63642+ __u32 sock_families[2];
63643+ __u32 ip_proto[8];
63644+ __u32 ip_type;
63645+ struct acl_ip_label **ips;
63646+ __u32 ip_num;
63647+ __u32 inaddr_any_override;
63648+
63649+ __u32 crashes;
63650+ unsigned long expires;
63651+
63652+ struct acl_subject_label *parent_subject;
63653+ struct gr_hash_struct *hash;
63654+ struct acl_subject_label *prev;
63655+ struct acl_subject_label *next;
63656+
63657+ struct acl_object_label **obj_hash;
63658+ __u32 obj_hash_size;
63659+ __u16 pax_flags;
63660+};
63661+
63662+struct role_allowed_ip {
63663+ __u32 addr;
63664+ __u32 netmask;
63665+
63666+ struct role_allowed_ip *prev;
63667+ struct role_allowed_ip *next;
63668+};
63669+
63670+struct role_transition {
63671+ char *rolename;
63672+
63673+ struct role_transition *prev;
63674+ struct role_transition *next;
63675+};
63676+
63677+struct acl_role_label {
63678+ char *rolename;
63679+ uid_t uidgid;
63680+ __u16 roletype;
63681+
63682+ __u16 auth_attempts;
63683+ unsigned long expires;
63684+
63685+ struct acl_subject_label *root_label;
63686+ struct gr_hash_struct *hash;
63687+
63688+ struct acl_role_label *prev;
63689+ struct acl_role_label *next;
63690+
63691+ struct role_transition *transitions;
63692+ struct role_allowed_ip *allowed_ips;
63693+ uid_t *domain_children;
63694+ __u16 domain_child_num;
63695+
63696+ umode_t umask;
63697+
63698+ struct acl_subject_label **subj_hash;
63699+ __u32 subj_hash_size;
63700+};
63701+
63702+struct user_acl_role_db {
63703+ struct acl_role_label **r_table;
63704+ __u32 num_pointers; /* Number of allocations to track */
63705+ __u32 num_roles; /* Number of roles */
63706+ __u32 num_domain_children; /* Number of domain children */
63707+ __u32 num_subjects; /* Number of subjects */
63708+ __u32 num_objects; /* Number of objects */
63709+};
63710+
63711+struct acl_object_label {
63712+ char *filename;
63713+ ino_t inode;
63714+ dev_t device;
63715+ __u32 mode;
63716+
63717+ struct acl_subject_label *nested;
63718+ struct acl_object_label *globbed;
63719+
63720+ /* next two structures not used */
63721+
63722+ struct acl_object_label *prev;
63723+ struct acl_object_label *next;
63724+};
63725+
63726+struct acl_ip_label {
63727+ char *iface;
63728+ __u32 addr;
63729+ __u32 netmask;
63730+ __u16 low, high;
63731+ __u8 mode;
63732+ __u32 type;
63733+ __u32 proto[8];
63734+
63735+ /* next two structures not used */
63736+
63737+ struct acl_ip_label *prev;
63738+ struct acl_ip_label *next;
63739+};
63740+
63741+struct gr_arg {
63742+ struct user_acl_role_db role_db;
63743+ unsigned char pw[GR_PW_LEN];
63744+ unsigned char salt[GR_SALT_LEN];
63745+ unsigned char sum[GR_SHA_LEN];
63746+ unsigned char sp_role[GR_SPROLE_LEN];
63747+ struct sprole_pw *sprole_pws;
63748+ dev_t segv_device;
63749+ ino_t segv_inode;
63750+ uid_t segv_uid;
63751+ __u16 num_sprole_pws;
63752+ __u16 mode;
63753+};
63754+
63755+struct gr_arg_wrapper {
63756+ struct gr_arg *arg;
63757+ __u32 version;
63758+ __u32 size;
63759+};
63760+
63761+struct subject_map {
63762+ struct acl_subject_label *user;
63763+ struct acl_subject_label *kernel;
63764+ struct subject_map *prev;
63765+ struct subject_map *next;
63766+};
63767+
63768+struct acl_subj_map_db {
63769+ struct subject_map **s_hash;
63770+ __u32 s_size;
63771+};
63772+
63773+/* End Data Structures Section */
63774+
63775+/* Hash functions generated by empirical testing by Brad Spengler
63776+ Makes good use of the low bits of the inode. Generally 0-1 times
63777+ in loop for successful match. 0-3 for unsuccessful match.
63778+ Shift/add algorithm with modulus of table size and an XOR*/
63779+
63780+static __inline__ unsigned int
63781+gr_rhash(const uid_t uid, const __u16 type, const unsigned int sz)
63782+{
63783+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
63784+}
63785+
63786+ static __inline__ unsigned int
63787+gr_shash(const struct acl_subject_label *userp, const unsigned int sz)
63788+{
63789+ return ((const unsigned long)userp % sz);
63790+}
63791+
63792+static __inline__ unsigned int
63793+gr_fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
63794+{
63795+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
63796+}
63797+
63798+static __inline__ unsigned int
63799+gr_nhash(const char *name, const __u16 len, const unsigned int sz)
63800+{
63801+ return full_name_hash((const unsigned char *)name, len) % sz;
63802+}
63803+
63804+#define FOR_EACH_ROLE_START(role) \
63805+ role = role_list; \
63806+ while (role) {
63807+
63808+#define FOR_EACH_ROLE_END(role) \
63809+ role = role->prev; \
63810+ }
63811+
63812+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
63813+ subj = NULL; \
63814+ iter = 0; \
63815+ while (iter < role->subj_hash_size) { \
63816+ if (subj == NULL) \
63817+ subj = role->subj_hash[iter]; \
63818+ if (subj == NULL) { \
63819+ iter++; \
63820+ continue; \
63821+ }
63822+
63823+#define FOR_EACH_SUBJECT_END(subj,iter) \
63824+ subj = subj->next; \
63825+ if (subj == NULL) \
63826+ iter++; \
63827+ }
63828+
63829+
63830+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
63831+ subj = role->hash->first; \
63832+ while (subj != NULL) {
63833+
63834+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
63835+ subj = subj->next; \
63836+ }
63837+
63838+#endif
63839+
63840diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
63841new file mode 100644
63842index 0000000..323ecf2
63843--- /dev/null
63844+++ b/include/linux/gralloc.h
63845@@ -0,0 +1,9 @@
63846+#ifndef __GRALLOC_H
63847+#define __GRALLOC_H
63848+
63849+void acl_free_all(void);
63850+int acl_alloc_stack_init(unsigned long size);
63851+void *acl_alloc(unsigned long len);
63852+void *acl_alloc_num(unsigned long num, unsigned long len);
63853+
63854+#endif
63855diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
63856new file mode 100644
63857index 0000000..be66033
63858--- /dev/null
63859+++ b/include/linux/grdefs.h
63860@@ -0,0 +1,140 @@
63861+#ifndef GRDEFS_H
63862+#define GRDEFS_H
63863+
63864+/* Begin grsecurity status declarations */
63865+
63866+enum {
63867+ GR_READY = 0x01,
63868+ GR_STATUS_INIT = 0x00 // disabled state
63869+};
63870+
63871+/* Begin ACL declarations */
63872+
63873+/* Role flags */
63874+
63875+enum {
63876+ GR_ROLE_USER = 0x0001,
63877+ GR_ROLE_GROUP = 0x0002,
63878+ GR_ROLE_DEFAULT = 0x0004,
63879+ GR_ROLE_SPECIAL = 0x0008,
63880+ GR_ROLE_AUTH = 0x0010,
63881+ GR_ROLE_NOPW = 0x0020,
63882+ GR_ROLE_GOD = 0x0040,
63883+ GR_ROLE_LEARN = 0x0080,
63884+ GR_ROLE_TPE = 0x0100,
63885+ GR_ROLE_DOMAIN = 0x0200,
63886+ GR_ROLE_PAM = 0x0400,
63887+ GR_ROLE_PERSIST = 0x0800
63888+};
63889+
63890+/* ACL Subject and Object mode flags */
63891+enum {
63892+ GR_DELETED = 0x80000000
63893+};
63894+
63895+/* ACL Object-only mode flags */
63896+enum {
63897+ GR_READ = 0x00000001,
63898+ GR_APPEND = 0x00000002,
63899+ GR_WRITE = 0x00000004,
63900+ GR_EXEC = 0x00000008,
63901+ GR_FIND = 0x00000010,
63902+ GR_INHERIT = 0x00000020,
63903+ GR_SETID = 0x00000040,
63904+ GR_CREATE = 0x00000080,
63905+ GR_DELETE = 0x00000100,
63906+ GR_LINK = 0x00000200,
63907+ GR_AUDIT_READ = 0x00000400,
63908+ GR_AUDIT_APPEND = 0x00000800,
63909+ GR_AUDIT_WRITE = 0x00001000,
63910+ GR_AUDIT_EXEC = 0x00002000,
63911+ GR_AUDIT_FIND = 0x00004000,
63912+ GR_AUDIT_INHERIT= 0x00008000,
63913+ GR_AUDIT_SETID = 0x00010000,
63914+ GR_AUDIT_CREATE = 0x00020000,
63915+ GR_AUDIT_DELETE = 0x00040000,
63916+ GR_AUDIT_LINK = 0x00080000,
63917+ GR_PTRACERD = 0x00100000,
63918+ GR_NOPTRACE = 0x00200000,
63919+ GR_SUPPRESS = 0x00400000,
63920+ GR_NOLEARN = 0x00800000,
63921+ GR_INIT_TRANSFER= 0x01000000
63922+};
63923+
63924+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
63925+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
63926+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
63927+
63928+/* ACL subject-only mode flags */
63929+enum {
63930+ GR_KILL = 0x00000001,
63931+ GR_VIEW = 0x00000002,
63932+ GR_PROTECTED = 0x00000004,
63933+ GR_LEARN = 0x00000008,
63934+ GR_OVERRIDE = 0x00000010,
63935+ /* just a placeholder, this mode is only used in userspace */
63936+ GR_DUMMY = 0x00000020,
63937+ GR_PROTSHM = 0x00000040,
63938+ GR_KILLPROC = 0x00000080,
63939+ GR_KILLIPPROC = 0x00000100,
63940+ /* just a placeholder, this mode is only used in userspace */
63941+ GR_NOTROJAN = 0x00000200,
63942+ GR_PROTPROCFD = 0x00000400,
63943+ GR_PROCACCT = 0x00000800,
63944+ GR_RELAXPTRACE = 0x00001000,
63945+ //GR_NESTED = 0x00002000,
63946+ GR_INHERITLEARN = 0x00004000,
63947+ GR_PROCFIND = 0x00008000,
63948+ GR_POVERRIDE = 0x00010000,
63949+ GR_KERNELAUTH = 0x00020000,
63950+ GR_ATSECURE = 0x00040000,
63951+ GR_SHMEXEC = 0x00080000
63952+};
63953+
63954+enum {
63955+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
63956+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
63957+ GR_PAX_ENABLE_MPROTECT = 0x0004,
63958+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
63959+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
63960+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
63961+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
63962+ GR_PAX_DISABLE_MPROTECT = 0x0400,
63963+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
63964+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
63965+};
63966+
63967+enum {
63968+ GR_ID_USER = 0x01,
63969+ GR_ID_GROUP = 0x02,
63970+};
63971+
63972+enum {
63973+ GR_ID_ALLOW = 0x01,
63974+ GR_ID_DENY = 0x02,
63975+};
63976+
63977+#define GR_CRASH_RES 31
63978+#define GR_UIDTABLE_MAX 500
63979+
63980+/* begin resource learning section */
63981+enum {
63982+ GR_RLIM_CPU_BUMP = 60,
63983+ GR_RLIM_FSIZE_BUMP = 50000,
63984+ GR_RLIM_DATA_BUMP = 10000,
63985+ GR_RLIM_STACK_BUMP = 1000,
63986+ GR_RLIM_CORE_BUMP = 10000,
63987+ GR_RLIM_RSS_BUMP = 500000,
63988+ GR_RLIM_NPROC_BUMP = 1,
63989+ GR_RLIM_NOFILE_BUMP = 5,
63990+ GR_RLIM_MEMLOCK_BUMP = 50000,
63991+ GR_RLIM_AS_BUMP = 500000,
63992+ GR_RLIM_LOCKS_BUMP = 2,
63993+ GR_RLIM_SIGPENDING_BUMP = 5,
63994+ GR_RLIM_MSGQUEUE_BUMP = 10000,
63995+ GR_RLIM_NICE_BUMP = 1,
63996+ GR_RLIM_RTPRIO_BUMP = 1,
63997+ GR_RLIM_RTTIME_BUMP = 1000000
63998+};
63999+
64000+#endif
64001diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
64002new file mode 100644
64003index 0000000..baa6e96
64004--- /dev/null
64005+++ b/include/linux/grinternal.h
64006@@ -0,0 +1,215 @@
64007+#ifndef __GRINTERNAL_H
64008+#define __GRINTERNAL_H
64009+
64010+#ifdef CONFIG_GRKERNSEC
64011+
64012+#include <linux/fs.h>
64013+#include <linux/mnt_namespace.h>
64014+#include <linux/nsproxy.h>
64015+#include <linux/gracl.h>
64016+#include <linux/grdefs.h>
64017+#include <linux/grmsg.h>
64018+
64019+void gr_add_learn_entry(const char *fmt, ...)
64020+ __attribute__ ((format (printf, 1, 2)));
64021+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
64022+ const struct vfsmount *mnt);
64023+__u32 gr_check_create(const struct dentry *new_dentry,
64024+ const struct dentry *parent,
64025+ const struct vfsmount *mnt, const __u32 mode);
64026+int gr_check_protected_task(const struct task_struct *task);
64027+__u32 to_gr_audit(const __u32 reqmode);
64028+int gr_set_acls(const int type);
64029+int gr_apply_subject_to_task(struct task_struct *task);
64030+int gr_acl_is_enabled(void);
64031+char gr_roletype_to_char(void);
64032+
64033+void gr_handle_alertkill(struct task_struct *task);
64034+char *gr_to_filename(const struct dentry *dentry,
64035+ const struct vfsmount *mnt);
64036+char *gr_to_filename1(const struct dentry *dentry,
64037+ const struct vfsmount *mnt);
64038+char *gr_to_filename2(const struct dentry *dentry,
64039+ const struct vfsmount *mnt);
64040+char *gr_to_filename3(const struct dentry *dentry,
64041+ const struct vfsmount *mnt);
64042+
64043+extern int grsec_enable_ptrace_readexec;
64044+extern int grsec_enable_harden_ptrace;
64045+extern int grsec_enable_link;
64046+extern int grsec_enable_fifo;
64047+extern int grsec_enable_execve;
64048+extern int grsec_enable_shm;
64049+extern int grsec_enable_execlog;
64050+extern int grsec_enable_signal;
64051+extern int grsec_enable_audit_ptrace;
64052+extern int grsec_enable_forkfail;
64053+extern int grsec_enable_time;
64054+extern int grsec_enable_rofs;
64055+extern int grsec_enable_chroot_shmat;
64056+extern int grsec_enable_chroot_mount;
64057+extern int grsec_enable_chroot_double;
64058+extern int grsec_enable_chroot_pivot;
64059+extern int grsec_enable_chroot_chdir;
64060+extern int grsec_enable_chroot_chmod;
64061+extern int grsec_enable_chroot_mknod;
64062+extern int grsec_enable_chroot_fchdir;
64063+extern int grsec_enable_chroot_nice;
64064+extern int grsec_enable_chroot_execlog;
64065+extern int grsec_enable_chroot_caps;
64066+extern int grsec_enable_chroot_sysctl;
64067+extern int grsec_enable_chroot_unix;
64068+extern int grsec_enable_symlinkown;
64069+extern int grsec_symlinkown_gid;
64070+extern int grsec_enable_tpe;
64071+extern int grsec_tpe_gid;
64072+extern int grsec_enable_tpe_all;
64073+extern int grsec_enable_tpe_invert;
64074+extern int grsec_enable_socket_all;
64075+extern int grsec_socket_all_gid;
64076+extern int grsec_enable_socket_client;
64077+extern int grsec_socket_client_gid;
64078+extern int grsec_enable_socket_server;
64079+extern int grsec_socket_server_gid;
64080+extern int grsec_audit_gid;
64081+extern int grsec_enable_group;
64082+extern int grsec_enable_audit_textrel;
64083+extern int grsec_enable_log_rwxmaps;
64084+extern int grsec_enable_mount;
64085+extern int grsec_enable_chdir;
64086+extern int grsec_resource_logging;
64087+extern int grsec_enable_blackhole;
64088+extern int grsec_lastack_retries;
64089+extern int grsec_enable_brute;
64090+extern int grsec_lock;
64091+
64092+extern spinlock_t grsec_alert_lock;
64093+extern unsigned long grsec_alert_wtime;
64094+extern unsigned long grsec_alert_fyet;
64095+
64096+extern spinlock_t grsec_audit_lock;
64097+
64098+extern rwlock_t grsec_exec_file_lock;
64099+
64100+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
64101+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
64102+ (tsk)->exec_file->f_vfsmnt) : "/")
64103+
64104+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
64105+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
64106+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
64107+
64108+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
64109+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
64110+ (tsk)->exec_file->f_vfsmnt) : "/")
64111+
64112+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
64113+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
64114+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
64115+
64116+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
64117+
64118+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
64119+
64120+#define GR_CHROOT_CAPS {{ \
64121+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
64122+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
64123+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
64124+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
64125+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
64126+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
64127+ CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
64128+
64129+#define security_learn(normal_msg,args...) \
64130+({ \
64131+ read_lock(&grsec_exec_file_lock); \
64132+ gr_add_learn_entry(normal_msg "\n", ## args); \
64133+ read_unlock(&grsec_exec_file_lock); \
64134+})
64135+
64136+enum {
64137+ GR_DO_AUDIT,
64138+ GR_DONT_AUDIT,
64139+ /* used for non-audit messages that we shouldn't kill the task on */
64140+ GR_DONT_AUDIT_GOOD
64141+};
64142+
64143+enum {
64144+ GR_TTYSNIFF,
64145+ GR_RBAC,
64146+ GR_RBAC_STR,
64147+ GR_STR_RBAC,
64148+ GR_RBAC_MODE2,
64149+ GR_RBAC_MODE3,
64150+ GR_FILENAME,
64151+ GR_SYSCTL_HIDDEN,
64152+ GR_NOARGS,
64153+ GR_ONE_INT,
64154+ GR_ONE_INT_TWO_STR,
64155+ GR_ONE_STR,
64156+ GR_STR_INT,
64157+ GR_TWO_STR_INT,
64158+ GR_TWO_INT,
64159+ GR_TWO_U64,
64160+ GR_THREE_INT,
64161+ GR_FIVE_INT_TWO_STR,
64162+ GR_TWO_STR,
64163+ GR_THREE_STR,
64164+ GR_FOUR_STR,
64165+ GR_STR_FILENAME,
64166+ GR_FILENAME_STR,
64167+ GR_FILENAME_TWO_INT,
64168+ GR_FILENAME_TWO_INT_STR,
64169+ GR_TEXTREL,
64170+ GR_PTRACE,
64171+ GR_RESOURCE,
64172+ GR_CAP,
64173+ GR_SIG,
64174+ GR_SIG2,
64175+ GR_CRASH1,
64176+ GR_CRASH2,
64177+ GR_PSACCT,
64178+ GR_RWXMAP
64179+};
64180+
64181+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
64182+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
64183+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
64184+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
64185+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
64186+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
64187+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
64188+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
64189+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
64190+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
64191+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
64192+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
64193+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
64194+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
64195+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
64196+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
64197+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
64198+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
64199+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
64200+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
64201+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
64202+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
64203+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
64204+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
64205+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
64206+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
64207+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
64208+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
64209+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
64210+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
64211+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
64212+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
64213+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
64214+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
64215+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
64216+
64217+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
64218+
64219+#endif
64220+
64221+#endif
64222diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
64223new file mode 100644
64224index 0000000..2bd4c8d
64225--- /dev/null
64226+++ b/include/linux/grmsg.h
64227@@ -0,0 +1,111 @@
64228+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
64229+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
64230+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
64231+#define GR_STOPMOD_MSG "denied modification of module state by "
64232+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
64233+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
64234+#define GR_IOPERM_MSG "denied use of ioperm() by "
64235+#define GR_IOPL_MSG "denied use of iopl() by "
64236+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
64237+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
64238+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
64239+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
64240+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
64241+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
64242+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
64243+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
64244+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
64245+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
64246+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
64247+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
64248+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
64249+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
64250+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
64251+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
64252+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
64253+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
64254+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
64255+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
64256+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
64257+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
64258+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
64259+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
64260+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
64261+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
64262+#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
64263+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
64264+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
64265+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
64266+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
64267+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
64268+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
64269+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
64270+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
64271+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
64272+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
64273+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
64274+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
64275+#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
64276+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
64277+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
64278+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
64279+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
64280+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
64281+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
64282+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
64283+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
64284+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
64285+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
64286+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
64287+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
64288+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
64289+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
64290+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
64291+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
64292+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
64293+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
64294+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
64295+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
64296+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
64297+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
64298+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
64299+#define GR_FAILFORK_MSG "failed fork with errno %s by "
64300+#define GR_NICE_CHROOT_MSG "denied priority change by "
64301+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
64302+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
64303+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
64304+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
64305+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
64306+#define GR_TIME_MSG "time set by "
64307+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
64308+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
64309+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
64310+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
64311+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
64312+#define GR_BIND_MSG "denied bind() by "
64313+#define GR_CONNECT_MSG "denied connect() by "
64314+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
64315+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
64316+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
64317+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
64318+#define GR_CAP_ACL_MSG "use of %s denied for "
64319+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
64320+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
64321+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
64322+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
64323+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
64324+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
64325+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
64326+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
64327+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
64328+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
64329+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
64330+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
64331+#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
64332+#define GR_VM86_MSG "denied use of vm86 by "
64333+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
64334+#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
64335+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
64336+#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
64337+#define GR_SYMLINKOWNER_MSG "denied following symlink %.950s since symlink owner %u does not match target owner %u, by "
64338+#define GR_BRUTE_DAEMON_MSG "bruteforce prevention initiated for the next 30 minutes or until service restarted, stalling each fork 30 seconds. Please investigate the crash report for "
64339diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
64340new file mode 100644
64341index 0000000..c5e5913
64342--- /dev/null
64343+++ b/include/linux/grsecurity.h
64344@@ -0,0 +1,257 @@
64345+#ifndef GR_SECURITY_H
64346+#define GR_SECURITY_H
64347+#include <linux/fs.h>
64348+#include <linux/fs_struct.h>
64349+#include <linux/binfmts.h>
64350+#include <linux/gracl.h>
64351+
64352+/* notify of brain-dead configs */
64353+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
64354+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
64355+#endif
64356+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
64357+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
64358+#endif
64359+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
64360+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
64361+#endif
64362+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
64363+#error "CONFIG_PAX enabled, but no PaX options are enabled."
64364+#endif
64365+
64366+#include <linux/compat.h>
64367+
64368+struct user_arg_ptr {
64369+#ifdef CONFIG_COMPAT
64370+ bool is_compat;
64371+#endif
64372+ union {
64373+ const char __user *const __user *native;
64374+#ifdef CONFIG_COMPAT
64375+ const compat_uptr_t __user *compat;
64376+#endif
64377+ } ptr;
64378+};
64379+
64380+void gr_handle_brute_attach(unsigned long mm_flags);
64381+void gr_handle_brute_check(void);
64382+void gr_handle_kernel_exploit(void);
64383+int gr_process_user_ban(void);
64384+
64385+char gr_roletype_to_char(void);
64386+
64387+int gr_acl_enable_at_secure(void);
64388+
64389+int gr_check_user_change(int real, int effective, int fs);
64390+int gr_check_group_change(int real, int effective, int fs);
64391+
64392+void gr_del_task_from_ip_table(struct task_struct *p);
64393+
64394+int gr_pid_is_chrooted(struct task_struct *p);
64395+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
64396+int gr_handle_chroot_nice(void);
64397+int gr_handle_chroot_sysctl(const int op);
64398+int gr_handle_chroot_setpriority(struct task_struct *p,
64399+ const int niceval);
64400+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
64401+int gr_handle_chroot_chroot(const struct dentry *dentry,
64402+ const struct vfsmount *mnt);
64403+void gr_handle_chroot_chdir(struct path *path);
64404+int gr_handle_chroot_chmod(const struct dentry *dentry,
64405+ const struct vfsmount *mnt, const int mode);
64406+int gr_handle_chroot_mknod(const struct dentry *dentry,
64407+ const struct vfsmount *mnt, const int mode);
64408+int gr_handle_chroot_mount(const struct dentry *dentry,
64409+ const struct vfsmount *mnt,
64410+ const char *dev_name);
64411+int gr_handle_chroot_pivot(void);
64412+int gr_handle_chroot_unix(const pid_t pid);
64413+
64414+int gr_handle_rawio(const struct inode *inode);
64415+
64416+void gr_handle_ioperm(void);
64417+void gr_handle_iopl(void);
64418+
64419+umode_t gr_acl_umask(void);
64420+
64421+int gr_tpe_allow(const struct file *file);
64422+
64423+void gr_set_chroot_entries(struct task_struct *task, struct path *path);
64424+void gr_clear_chroot_entries(struct task_struct *task);
64425+
64426+void gr_log_forkfail(const int retval);
64427+void gr_log_timechange(void);
64428+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
64429+void gr_log_chdir(const struct dentry *dentry,
64430+ const struct vfsmount *mnt);
64431+void gr_log_chroot_exec(const struct dentry *dentry,
64432+ const struct vfsmount *mnt);
64433+void gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv);
64434+void gr_log_remount(const char *devname, const int retval);
64435+void gr_log_unmount(const char *devname, const int retval);
64436+void gr_log_mount(const char *from, const char *to, const int retval);
64437+void gr_log_textrel(struct vm_area_struct *vma);
64438+void gr_log_rwxmmap(struct file *file);
64439+void gr_log_rwxmprotect(struct file *file);
64440+
64441+int gr_handle_follow_link(const struct inode *parent,
64442+ const struct inode *inode,
64443+ const struct dentry *dentry,
64444+ const struct vfsmount *mnt);
64445+int gr_handle_fifo(const struct dentry *dentry,
64446+ const struct vfsmount *mnt,
64447+ const struct dentry *dir, const int flag,
64448+ const int acc_mode);
64449+int gr_handle_hardlink(const struct dentry *dentry,
64450+ const struct vfsmount *mnt,
64451+ struct inode *inode,
64452+ const int mode, const struct filename *to);
64453+
64454+int gr_is_capable(const int cap);
64455+int gr_is_capable_nolog(const int cap);
64456+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
64457+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap);
64458+
64459+void gr_copy_label(struct task_struct *tsk);
64460+void gr_handle_crash(struct task_struct *task, const int sig);
64461+int gr_handle_signal(const struct task_struct *p, const int sig);
64462+int gr_check_crash_uid(const uid_t uid);
64463+int gr_check_protected_task(const struct task_struct *task);
64464+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
64465+int gr_acl_handle_mmap(const struct file *file,
64466+ const unsigned long prot);
64467+int gr_acl_handle_mprotect(const struct file *file,
64468+ const unsigned long prot);
64469+int gr_check_hidden_task(const struct task_struct *tsk);
64470+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
64471+ const struct vfsmount *mnt);
64472+__u32 gr_acl_handle_utime(const struct dentry *dentry,
64473+ const struct vfsmount *mnt);
64474+__u32 gr_acl_handle_access(const struct dentry *dentry,
64475+ const struct vfsmount *mnt, const int fmode);
64476+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
64477+ const struct vfsmount *mnt, umode_t *mode);
64478+__u32 gr_acl_handle_chown(const struct dentry *dentry,
64479+ const struct vfsmount *mnt);
64480+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
64481+ const struct vfsmount *mnt);
64482+int gr_handle_ptrace(struct task_struct *task, const long request);
64483+int gr_handle_proc_ptrace(struct task_struct *task);
64484+__u32 gr_acl_handle_execve(const struct dentry *dentry,
64485+ const struct vfsmount *mnt);
64486+int gr_check_crash_exec(const struct file *filp);
64487+int gr_acl_is_enabled(void);
64488+void gr_set_kernel_label(struct task_struct *task);
64489+void gr_set_role_label(struct task_struct *task, const uid_t uid,
64490+ const gid_t gid);
64491+int gr_set_proc_label(const struct dentry *dentry,
64492+ const struct vfsmount *mnt,
64493+ const int unsafe_flags);
64494+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
64495+ const struct vfsmount *mnt);
64496+__u32 gr_acl_handle_open(const struct dentry *dentry,
64497+ const struct vfsmount *mnt, int acc_mode);
64498+__u32 gr_acl_handle_creat(const struct dentry *dentry,
64499+ const struct dentry *p_dentry,
64500+ const struct vfsmount *p_mnt,
64501+ int open_flags, int acc_mode, const int imode);
64502+void gr_handle_create(const struct dentry *dentry,
64503+ const struct vfsmount *mnt);
64504+void gr_handle_proc_create(const struct dentry *dentry,
64505+ const struct inode *inode);
64506+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
64507+ const struct dentry *parent_dentry,
64508+ const struct vfsmount *parent_mnt,
64509+ const int mode);
64510+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
64511+ const struct dentry *parent_dentry,
64512+ const struct vfsmount *parent_mnt);
64513+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
64514+ const struct vfsmount *mnt);
64515+void gr_handle_delete(const ino_t ino, const dev_t dev);
64516+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
64517+ const struct vfsmount *mnt);
64518+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
64519+ const struct dentry *parent_dentry,
64520+ const struct vfsmount *parent_mnt,
64521+ const struct filename *from);
64522+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
64523+ const struct dentry *parent_dentry,
64524+ const struct vfsmount *parent_mnt,
64525+ const struct dentry *old_dentry,
64526+ const struct vfsmount *old_mnt, const struct filename *to);
64527+int gr_handle_symlink_owner(const struct path *link, const struct inode *target);
64528+int gr_acl_handle_rename(struct dentry *new_dentry,
64529+ struct dentry *parent_dentry,
64530+ const struct vfsmount *parent_mnt,
64531+ struct dentry *old_dentry,
64532+ struct inode *old_parent_inode,
64533+ struct vfsmount *old_mnt, const struct filename *newname);
64534+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
64535+ struct dentry *old_dentry,
64536+ struct dentry *new_dentry,
64537+ struct vfsmount *mnt, const __u8 replace);
64538+__u32 gr_check_link(const struct dentry *new_dentry,
64539+ const struct dentry *parent_dentry,
64540+ const struct vfsmount *parent_mnt,
64541+ const struct dentry *old_dentry,
64542+ const struct vfsmount *old_mnt);
64543+int gr_acl_handle_filldir(const struct file *file, const char *name,
64544+ const unsigned int namelen, const ino_t ino);
64545+
64546+__u32 gr_acl_handle_unix(const struct dentry *dentry,
64547+ const struct vfsmount *mnt);
64548+void gr_acl_handle_exit(void);
64549+void gr_acl_handle_psacct(struct task_struct *task, const long code);
64550+int gr_acl_handle_procpidmem(const struct task_struct *task);
64551+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
64552+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
64553+void gr_audit_ptrace(struct task_struct *task);
64554+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
64555+void gr_put_exec_file(struct task_struct *task);
64556+
64557+int gr_ptrace_readexec(struct file *file, int unsafe_flags);
64558+
64559+#if defined(CONFIG_GRKERNSEC) && (defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC))
64560+extern void gr_learn_resource(const struct task_struct *task, const int res,
64561+ const unsigned long wanted, const int gt);
64562+#else
64563+static inline void gr_learn_resource(const struct task_struct *task, const int res,
64564+ const unsigned long wanted, const int gt)
64565+{
64566+}
64567+#endif
64568+
64569+#ifdef CONFIG_GRKERNSEC_RESLOG
64570+extern void gr_log_resource(const struct task_struct *task, const int res,
64571+ const unsigned long wanted, const int gt);
64572+#else
64573+static inline void gr_log_resource(const struct task_struct *task, const int res,
64574+ const unsigned long wanted, const int gt)
64575+{
64576+}
64577+#endif
64578+
64579+#ifdef CONFIG_GRKERNSEC
64580+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
64581+void gr_handle_vm86(void);
64582+void gr_handle_mem_readwrite(u64 from, u64 to);
64583+
64584+void gr_log_badprocpid(const char *entry);
64585+
64586+extern int grsec_enable_dmesg;
64587+extern int grsec_disable_privio;
64588+
64589+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
64590+extern int grsec_proc_gid;
64591+#endif
64592+
64593+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
64594+extern int grsec_enable_chroot_findtask;
64595+#endif
64596+#ifdef CONFIG_GRKERNSEC_SETXID
64597+extern int grsec_enable_setxid;
64598+#endif
64599+#endif
64600+
64601+#endif
64602diff --git a/include/linux/grsock.h b/include/linux/grsock.h
64603new file mode 100644
64604index 0000000..e7ffaaf
64605--- /dev/null
64606+++ b/include/linux/grsock.h
64607@@ -0,0 +1,19 @@
64608+#ifndef __GRSOCK_H
64609+#define __GRSOCK_H
64610+
64611+extern void gr_attach_curr_ip(const struct sock *sk);
64612+extern int gr_handle_sock_all(const int family, const int type,
64613+ const int protocol);
64614+extern int gr_handle_sock_server(const struct sockaddr *sck);
64615+extern int gr_handle_sock_server_other(const struct sock *sck);
64616+extern int gr_handle_sock_client(const struct sockaddr *sck);
64617+extern int gr_search_connect(struct socket * sock,
64618+ struct sockaddr_in * addr);
64619+extern int gr_search_bind(struct socket * sock,
64620+ struct sockaddr_in * addr);
64621+extern int gr_search_listen(struct socket * sock);
64622+extern int gr_search_accept(struct socket * sock);
64623+extern int gr_search_socket(const int domain, const int type,
64624+ const int protocol);
64625+
64626+#endif
64627diff --git a/include/linux/highmem.h b/include/linux/highmem.h
64628index ef788b5..ac41b7b 100644
64629--- a/include/linux/highmem.h
64630+++ b/include/linux/highmem.h
64631@@ -189,6 +189,18 @@ static inline void clear_highpage(struct page *page)
64632 kunmap_atomic(kaddr);
64633 }
64634
64635+static inline void sanitize_highpage(struct page *page)
64636+{
64637+ void *kaddr;
64638+ unsigned long flags;
64639+
64640+ local_irq_save(flags);
64641+ kaddr = kmap_atomic(page);
64642+ clear_page(kaddr);
64643+ kunmap_atomic(kaddr);
64644+ local_irq_restore(flags);
64645+}
64646+
64647 static inline void zero_user_segments(struct page *page,
64648 unsigned start1, unsigned end1,
64649 unsigned start2, unsigned end2)
64650diff --git a/include/linux/i2c.h b/include/linux/i2c.h
64651index 800de22..7a2fa46 100644
64652--- a/include/linux/i2c.h
64653+++ b/include/linux/i2c.h
64654@@ -367,6 +367,7 @@ struct i2c_algorithm {
64655 /* To determine what the adapter supports */
64656 u32 (*functionality) (struct i2c_adapter *);
64657 };
64658+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
64659
64660 /*
64661 * i2c_adapter is the structure used to identify a physical i2c bus along
64662diff --git a/include/linux/i2o.h b/include/linux/i2o.h
64663index d23c3c2..eb63c81 100644
64664--- a/include/linux/i2o.h
64665+++ b/include/linux/i2o.h
64666@@ -565,7 +565,7 @@ struct i2o_controller {
64667 struct i2o_device *exec; /* Executive */
64668 #if BITS_PER_LONG == 64
64669 spinlock_t context_list_lock; /* lock for context_list */
64670- atomic_t context_list_counter; /* needed for unique contexts */
64671+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
64672 struct list_head context_list; /* list of context id's
64673 and pointers */
64674 #endif
64675diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
64676index aff7ad8..3942bbd 100644
64677--- a/include/linux/if_pppox.h
64678+++ b/include/linux/if_pppox.h
64679@@ -76,7 +76,7 @@ struct pppox_proto {
64680 int (*ioctl)(struct socket *sock, unsigned int cmd,
64681 unsigned long arg);
64682 struct module *owner;
64683-};
64684+} __do_const;
64685
64686 extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp);
64687 extern void unregister_pppox_proto(int proto_num);
64688diff --git a/include/linux/init.h b/include/linux/init.h
64689index e59041e..df0a975 100644
64690--- a/include/linux/init.h
64691+++ b/include/linux/init.h
64692@@ -39,9 +39,36 @@
64693 * Also note, that this data cannot be "const".
64694 */
64695
64696+#ifdef MODULE
64697+#define add_init_latent_entropy
64698+#define add_devinit_latent_entropy
64699+#define add_cpuinit_latent_entropy
64700+#define add_meminit_latent_entropy
64701+#else
64702+#define add_init_latent_entropy __latent_entropy
64703+
64704+#ifdef CONFIG_HOTPLUG
64705+#define add_devinit_latent_entropy
64706+#else
64707+#define add_devinit_latent_entropy __latent_entropy
64708+#endif
64709+
64710+#ifdef CONFIG_HOTPLUG_CPU
64711+#define add_cpuinit_latent_entropy
64712+#else
64713+#define add_cpuinit_latent_entropy __latent_entropy
64714+#endif
64715+
64716+#ifdef CONFIG_MEMORY_HOTPLUG
64717+#define add_meminit_latent_entropy
64718+#else
64719+#define add_meminit_latent_entropy __latent_entropy
64720+#endif
64721+#endif
64722+
64723 /* These are for everybody (although not all archs will actually
64724 discard it in modules) */
64725-#define __init __section(.init.text) __cold notrace
64726+#define __init __section(.init.text) __cold notrace add_init_latent_entropy
64727 #define __initdata __section(.init.data)
64728 #define __initconst __constsection(.init.rodata)
64729 #define __exitdata __section(.exit.data)
64730@@ -94,7 +121,7 @@
64731 #define __exit __section(.exit.text) __exitused __cold notrace
64732
64733 /* Used for HOTPLUG */
64734-#define __devinit __section(.devinit.text) __cold notrace
64735+#define __devinit __section(.devinit.text) __cold notrace add_devinit_latent_entropy
64736 #define __devinitdata __section(.devinit.data)
64737 #define __devinitconst __constsection(.devinit.rodata)
64738 #define __devexit __section(.devexit.text) __exitused __cold notrace
64739@@ -102,7 +129,7 @@
64740 #define __devexitconst __constsection(.devexit.rodata)
64741
64742 /* Used for HOTPLUG_CPU */
64743-#define __cpuinit __section(.cpuinit.text) __cold notrace
64744+#define __cpuinit __section(.cpuinit.text) __cold notrace add_cpuinit_latent_entropy
64745 #define __cpuinitdata __section(.cpuinit.data)
64746 #define __cpuinitconst __constsection(.cpuinit.rodata)
64747 #define __cpuexit __section(.cpuexit.text) __exitused __cold notrace
64748@@ -110,7 +137,7 @@
64749 #define __cpuexitconst __constsection(.cpuexit.rodata)
64750
64751 /* Used for MEMORY_HOTPLUG */
64752-#define __meminit __section(.meminit.text) __cold notrace
64753+#define __meminit __section(.meminit.text) __cold notrace add_meminit_latent_entropy
64754 #define __meminitdata __section(.meminit.data)
64755 #define __meminitconst __constsection(.meminit.rodata)
64756 #define __memexit __section(.memexit.text) __exitused __cold notrace
64757diff --git a/include/linux/init_task.h b/include/linux/init_task.h
64758index 6d087c5..401cab8 100644
64759--- a/include/linux/init_task.h
64760+++ b/include/linux/init_task.h
64761@@ -143,6 +143,12 @@ extern struct task_group root_task_group;
64762
64763 #define INIT_TASK_COMM "swapper"
64764
64765+#ifdef CONFIG_X86
64766+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
64767+#else
64768+#define INIT_TASK_THREAD_INFO
64769+#endif
64770+
64771 /*
64772 * INIT_TASK is used to set up the first task table, touch at
64773 * your own risk!. Base=0, limit=0x1fffff (=2MB)
64774@@ -182,6 +188,7 @@ extern struct task_group root_task_group;
64775 RCU_POINTER_INITIALIZER(cred, &init_cred), \
64776 .comm = INIT_TASK_COMM, \
64777 .thread = INIT_THREAD, \
64778+ INIT_TASK_THREAD_INFO \
64779 .fs = &init_fs, \
64780 .files = &init_files, \
64781 .signal = &init_signals, \
64782diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
64783index 5e4e617..073b866 100644
64784--- a/include/linux/interrupt.h
64785+++ b/include/linux/interrupt.h
64786@@ -435,7 +435,7 @@ enum
64787 /* map softirq index to softirq name. update 'softirq_to_name' in
64788 * kernel/softirq.c when adding a new softirq.
64789 */
64790-extern char *softirq_to_name[NR_SOFTIRQS];
64791+extern const char * const softirq_to_name[NR_SOFTIRQS];
64792
64793 /* softirq mask and active fields moved to irq_cpustat_t in
64794 * asm/hardirq.h to get better cache usage. KAO
64795@@ -443,12 +443,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
64796
64797 struct softirq_action
64798 {
64799- void (*action)(struct softirq_action *);
64800-};
64801+ void (*action)(void);
64802+} __no_const;
64803
64804 asmlinkage void do_softirq(void);
64805 asmlinkage void __do_softirq(void);
64806-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
64807+extern void open_softirq(int nr, void (*action)(void));
64808 extern void softirq_init(void);
64809 extern void __raise_softirq_irqoff(unsigned int nr);
64810
64811diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
64812index 6883e19..06992b1 100644
64813--- a/include/linux/kallsyms.h
64814+++ b/include/linux/kallsyms.h
64815@@ -15,7 +15,8 @@
64816
64817 struct module;
64818
64819-#ifdef CONFIG_KALLSYMS
64820+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
64821+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
64822 /* Lookup the address for a symbol. Returns 0 if not found. */
64823 unsigned long kallsyms_lookup_name(const char *name);
64824
64825@@ -106,6 +107,17 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
64826 /* Stupid that this does nothing, but I didn't create this mess. */
64827 #define __print_symbol(fmt, addr)
64828 #endif /*CONFIG_KALLSYMS*/
64829+#else /* when included by kallsyms.c, vsnprintf.c, or
64830+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
64831+extern void __print_symbol(const char *fmt, unsigned long address);
64832+extern int sprint_backtrace(char *buffer, unsigned long address);
64833+extern int sprint_symbol(char *buffer, unsigned long address);
64834+extern int sprint_symbol_no_offset(char *buffer, unsigned long address);
64835+const char *kallsyms_lookup(unsigned long addr,
64836+ unsigned long *symbolsize,
64837+ unsigned long *offset,
64838+ char **modname, char *namebuf);
64839+#endif
64840
64841 /* This macro allows us to keep printk typechecking */
64842 static __printf(1, 2)
64843diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
64844index 4dff0c6..1ca9b72 100644
64845--- a/include/linux/kgdb.h
64846+++ b/include/linux/kgdb.h
64847@@ -53,7 +53,7 @@ extern int kgdb_connected;
64848 extern int kgdb_io_module_registered;
64849
64850 extern atomic_t kgdb_setting_breakpoint;
64851-extern atomic_t kgdb_cpu_doing_single_step;
64852+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
64853
64854 extern struct task_struct *kgdb_usethread;
64855 extern struct task_struct *kgdb_contthread;
64856@@ -255,7 +255,7 @@ struct kgdb_arch {
64857 void (*correct_hw_break)(void);
64858
64859 void (*enable_nmi)(bool on);
64860-};
64861+} __do_const;
64862
64863 /**
64864 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
64865@@ -280,7 +280,7 @@ struct kgdb_io {
64866 void (*pre_exception) (void);
64867 void (*post_exception) (void);
64868 int is_console;
64869-};
64870+} __do_const;
64871
64872 extern struct kgdb_arch arch_kgdb_ops;
64873
64874diff --git a/include/linux/kmod.h b/include/linux/kmod.h
64875index 5398d58..5883a34 100644
64876--- a/include/linux/kmod.h
64877+++ b/include/linux/kmod.h
64878@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
64879 * usually useless though. */
64880 extern __printf(2, 3)
64881 int __request_module(bool wait, const char *name, ...);
64882+extern __printf(3, 4)
64883+int ___request_module(bool wait, char *param_name, const char *name, ...);
64884 #define request_module(mod...) __request_module(true, mod)
64885 #define request_module_nowait(mod...) __request_module(false, mod)
64886 #define try_then_request_module(x, mod...) \
64887diff --git a/include/linux/kobject.h b/include/linux/kobject.h
64888index 1e57449..4fede7b 100644
64889--- a/include/linux/kobject.h
64890+++ b/include/linux/kobject.h
64891@@ -111,7 +111,7 @@ struct kobj_type {
64892 struct attribute **default_attrs;
64893 const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
64894 const void *(*namespace)(struct kobject *kobj);
64895-};
64896+} __do_const;
64897
64898 struct kobj_uevent_env {
64899 char *envp[UEVENT_NUM_ENVP];
64900diff --git a/include/linux/kref.h b/include/linux/kref.h
64901index 65af688..0592677 100644
64902--- a/include/linux/kref.h
64903+++ b/include/linux/kref.h
64904@@ -64,7 +64,7 @@ static inline void kref_get(struct kref *kref)
64905 static inline int kref_sub(struct kref *kref, unsigned int count,
64906 void (*release)(struct kref *kref))
64907 {
64908- WARN_ON(release == NULL);
64909+ BUG_ON(release == NULL);
64910
64911 if (atomic_sub_and_test((int) count, &kref->refcount)) {
64912 release(kref);
64913diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
64914index ecc5543..0e96bcc 100644
64915--- a/include/linux/kvm_host.h
64916+++ b/include/linux/kvm_host.h
64917@@ -403,7 +403,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
64918 int __must_check vcpu_load(struct kvm_vcpu *vcpu);
64919 void vcpu_put(struct kvm_vcpu *vcpu);
64920
64921-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
64922+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
64923 struct module *module);
64924 void kvm_exit(void);
64925
64926@@ -558,7 +558,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
64927 struct kvm_guest_debug *dbg);
64928 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
64929
64930-int kvm_arch_init(void *opaque);
64931+int kvm_arch_init(const void *opaque);
64932 void kvm_arch_exit(void);
64933
64934 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
64935diff --git a/include/linux/libata.h b/include/linux/libata.h
64936index 1e36c63..0c5046e 100644
64937--- a/include/linux/libata.h
64938+++ b/include/linux/libata.h
64939@@ -915,7 +915,7 @@ struct ata_port_operations {
64940 * fields must be pointers.
64941 */
64942 const struct ata_port_operations *inherits;
64943-};
64944+} __do_const;
64945
64946 struct ata_port_info {
64947 unsigned long flags;
64948diff --git a/include/linux/list.h b/include/linux/list.h
64949index cc6d2aa..71febca 100644
64950--- a/include/linux/list.h
64951+++ b/include/linux/list.h
64952@@ -112,6 +112,9 @@ extern void __list_del_entry(struct list_head *entry);
64953 extern void list_del(struct list_head *entry);
64954 #endif
64955
64956+extern void pax_list_add_tail(struct list_head *new, struct list_head *head);
64957+extern void pax_list_del(struct list_head *entry);
64958+
64959 /**
64960 * list_replace - replace old entry by new one
64961 * @old : the element to be replaced
64962diff --git a/include/linux/mm.h b/include/linux/mm.h
64963index 280dae5..baea6c8 100644
64964--- a/include/linux/mm.h
64965+++ b/include/linux/mm.h
64966@@ -101,6 +101,11 @@ extern unsigned int kobjsize(const void *objp);
64967 #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
64968 #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */
64969 #define VM_ARCH_1 0x01000000 /* Architecture-specific flag */
64970+
64971+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
64972+#define VM_PAGEEXEC 0x02000000 /* vma->vm_page_prot needs special handling */
64973+#endif
64974+
64975 #define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */
64976
64977 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
64978@@ -231,6 +236,7 @@ struct vm_operations_struct {
64979 int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr,
64980 unsigned long size, pgoff_t pgoff);
64981 };
64982+typedef struct vm_operations_struct __no_const vm_operations_struct_no_const;
64983
64984 struct mmu_gather;
64985 struct inode;
64986@@ -1039,34 +1045,6 @@ int set_page_dirty(struct page *page);
64987 int set_page_dirty_lock(struct page *page);
64988 int clear_page_dirty_for_io(struct page *page);
64989
64990-/* Is the vma a continuation of the stack vma above it? */
64991-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
64992-{
64993- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
64994-}
64995-
64996-static inline int stack_guard_page_start(struct vm_area_struct *vma,
64997- unsigned long addr)
64998-{
64999- return (vma->vm_flags & VM_GROWSDOWN) &&
65000- (vma->vm_start == addr) &&
65001- !vma_growsdown(vma->vm_prev, addr);
65002-}
65003-
65004-/* Is the vma a continuation of the stack vma below it? */
65005-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
65006-{
65007- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
65008-}
65009-
65010-static inline int stack_guard_page_end(struct vm_area_struct *vma,
65011- unsigned long addr)
65012-{
65013- return (vma->vm_flags & VM_GROWSUP) &&
65014- (vma->vm_end == addr) &&
65015- !vma_growsup(vma->vm_next, addr);
65016-}
65017-
65018 extern pid_t
65019 vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group);
65020
65021@@ -1166,6 +1144,15 @@ static inline void sync_mm_rss(struct mm_struct *mm)
65022 }
65023 #endif
65024
65025+#ifdef CONFIG_MMU
65026+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
65027+#else
65028+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
65029+{
65030+ return __pgprot(0);
65031+}
65032+#endif
65033+
65034 int vma_wants_writenotify(struct vm_area_struct *vma);
65035
65036 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
65037@@ -1184,8 +1171,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
65038 {
65039 return 0;
65040 }
65041+
65042+static inline int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd,
65043+ unsigned long address)
65044+{
65045+ return 0;
65046+}
65047 #else
65048 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
65049+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
65050 #endif
65051
65052 #ifdef __PAGETABLE_PMD_FOLDED
65053@@ -1194,8 +1188,15 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
65054 {
65055 return 0;
65056 }
65057+
65058+static inline int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud,
65059+ unsigned long address)
65060+{
65061+ return 0;
65062+}
65063 #else
65064 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
65065+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address);
65066 #endif
65067
65068 int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
65069@@ -1213,11 +1214,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
65070 NULL: pud_offset(pgd, address);
65071 }
65072
65073+static inline pud_t *pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
65074+{
65075+ return (unlikely(pgd_none(*pgd)) && __pud_alloc_kernel(mm, pgd, address))?
65076+ NULL: pud_offset(pgd, address);
65077+}
65078+
65079 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
65080 {
65081 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
65082 NULL: pmd_offset(pud, address);
65083 }
65084+
65085+static inline pmd_t *pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
65086+{
65087+ return (unlikely(pud_none(*pud)) && __pmd_alloc_kernel(mm, pud, address))?
65088+ NULL: pmd_offset(pud, address);
65089+}
65090 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
65091
65092 #if USE_SPLIT_PTLOCKS
65093@@ -1447,6 +1460,7 @@ extern unsigned long do_mmap_pgoff(struct file *, unsigned long,
65094 unsigned long, unsigned long,
65095 unsigned long, unsigned long);
65096 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
65097+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
65098
65099 /* These take the mm semaphore themselves */
65100 extern unsigned long vm_brk(unsigned long, unsigned long);
65101@@ -1510,6 +1524,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
65102 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
65103 struct vm_area_struct **pprev);
65104
65105+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
65106+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
65107+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
65108+
65109 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
65110 NULL if none. Assume start_addr < end_addr. */
65111 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
65112@@ -1538,15 +1556,6 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
65113 return vma;
65114 }
65115
65116-#ifdef CONFIG_MMU
65117-pgprot_t vm_get_page_prot(unsigned long vm_flags);
65118-#else
65119-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
65120-{
65121- return __pgprot(0);
65122-}
65123-#endif
65124-
65125 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
65126 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
65127 unsigned long pfn, unsigned long size, pgprot_t);
65128@@ -1652,7 +1661,7 @@ extern int unpoison_memory(unsigned long pfn);
65129 extern int sysctl_memory_failure_early_kill;
65130 extern int sysctl_memory_failure_recovery;
65131 extern void shake_page(struct page *p, int access);
65132-extern atomic_long_t mce_bad_pages;
65133+extern atomic_long_unchecked_t mce_bad_pages;
65134 extern int soft_offline_page(struct page *page, int flags);
65135
65136 extern void dump_page(struct page *page);
65137@@ -1683,5 +1692,11 @@ static inline unsigned int debug_guardpage_minorder(void) { return 0; }
65138 static inline bool page_is_guard(struct page *page) { return false; }
65139 #endif /* CONFIG_DEBUG_PAGEALLOC */
65140
65141+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
65142+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
65143+#else
65144+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
65145+#endif
65146+
65147 #endif /* __KERNEL__ */
65148 #endif /* _LINUX_MM_H */
65149diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
65150index 31f8a3a..499f1db 100644
65151--- a/include/linux/mm_types.h
65152+++ b/include/linux/mm_types.h
65153@@ -275,6 +275,8 @@ struct vm_area_struct {
65154 #ifdef CONFIG_NUMA
65155 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
65156 #endif
65157+
65158+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
65159 };
65160
65161 struct core_thread {
65162@@ -348,7 +350,7 @@ struct mm_struct {
65163 unsigned long def_flags;
65164 unsigned long nr_ptes; /* Page table pages */
65165 unsigned long start_code, end_code, start_data, end_data;
65166- unsigned long start_brk, brk, start_stack;
65167+ unsigned long brk_gap, start_brk, brk, start_stack;
65168 unsigned long arg_start, arg_end, env_start, env_end;
65169
65170 unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
65171@@ -399,6 +401,24 @@ struct mm_struct {
65172 struct cpumask cpumask_allocation;
65173 #endif
65174 struct uprobes_state uprobes_state;
65175+
65176+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
65177+ unsigned long pax_flags;
65178+#endif
65179+
65180+#ifdef CONFIG_PAX_DLRESOLVE
65181+ unsigned long call_dl_resolve;
65182+#endif
65183+
65184+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
65185+ unsigned long call_syscall;
65186+#endif
65187+
65188+#ifdef CONFIG_PAX_ASLR
65189+ unsigned long delta_mmap; /* randomized offset */
65190+ unsigned long delta_stack; /* randomized offset */
65191+#endif
65192+
65193 };
65194
65195 static inline void mm_init_cpumask(struct mm_struct *mm)
65196diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h
65197index c5d5278..f0b68c8 100644
65198--- a/include/linux/mmiotrace.h
65199+++ b/include/linux/mmiotrace.h
65200@@ -46,7 +46,7 @@ extern int kmmio_handler(struct pt_regs *regs, unsigned long addr);
65201 /* Called from ioremap.c */
65202 extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
65203 void __iomem *addr);
65204-extern void mmiotrace_iounmap(volatile void __iomem *addr);
65205+extern void mmiotrace_iounmap(const volatile void __iomem *addr);
65206
65207 /* For anyone to insert markers. Remember trailing newline. */
65208 extern __printf(1, 2) int mmiotrace_printk(const char *fmt, ...);
65209@@ -66,7 +66,7 @@ static inline void mmiotrace_ioremap(resource_size_t offset,
65210 {
65211 }
65212
65213-static inline void mmiotrace_iounmap(volatile void __iomem *addr)
65214+static inline void mmiotrace_iounmap(const volatile void __iomem *addr)
65215 {
65216 }
65217
65218diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
65219index a23923b..073fee4 100644
65220--- a/include/linux/mmzone.h
65221+++ b/include/linux/mmzone.h
65222@@ -421,7 +421,7 @@ struct zone {
65223 unsigned long flags; /* zone flags, see below */
65224
65225 /* Zone statistics */
65226- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
65227+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
65228
65229 /*
65230 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
65231diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
65232index fed3def..7cc3f93 100644
65233--- a/include/linux/mod_devicetable.h
65234+++ b/include/linux/mod_devicetable.h
65235@@ -12,7 +12,7 @@
65236 typedef unsigned long kernel_ulong_t;
65237 #endif
65238
65239-#define PCI_ANY_ID (~0)
65240+#define PCI_ANY_ID ((__u16)~0)
65241
65242 struct pci_device_id {
65243 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
65244@@ -139,7 +139,7 @@ struct usb_device_id {
65245 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
65246 #define USB_DEVICE_ID_MATCH_INT_NUMBER 0x0400
65247
65248-#define HID_ANY_ID (~0)
65249+#define HID_ANY_ID (~0U)
65250 #define HID_BUS_ANY 0xffff
65251 #define HID_GROUP_ANY 0x0000
65252
65253diff --git a/include/linux/module.h b/include/linux/module.h
65254index 1375ee3..d631af0 100644
65255--- a/include/linux/module.h
65256+++ b/include/linux/module.h
65257@@ -17,9 +17,11 @@
65258 #include <linux/moduleparam.h>
65259 #include <linux/tracepoint.h>
65260 #include <linux/export.h>
65261+#include <linux/fs.h>
65262
65263 #include <linux/percpu.h>
65264 #include <asm/module.h>
65265+#include <asm/pgtable.h>
65266
65267 /* In stripped ARM and x86-64 modules, ~ is surprisingly rare. */
65268 #define MODULE_SIG_STRING "~Module signature appended~\n"
65269@@ -281,19 +283,16 @@ struct module
65270 int (*init)(void);
65271
65272 /* If this is non-NULL, vfree after init() returns */
65273- void *module_init;
65274+ void *module_init_rx, *module_init_rw;
65275
65276 /* Here is the actual code + data, vfree'd on unload. */
65277- void *module_core;
65278+ void *module_core_rx, *module_core_rw;
65279
65280 /* Here are the sizes of the init and core sections */
65281- unsigned int init_size, core_size;
65282+ unsigned int init_size_rw, core_size_rw;
65283
65284 /* The size of the executable code in each section. */
65285- unsigned int init_text_size, core_text_size;
65286-
65287- /* Size of RO sections of the module (text+rodata) */
65288- unsigned int init_ro_size, core_ro_size;
65289+ unsigned int init_size_rx, core_size_rx;
65290
65291 /* Arch-specific module values */
65292 struct mod_arch_specific arch;
65293@@ -349,6 +348,10 @@ struct module
65294 #ifdef CONFIG_EVENT_TRACING
65295 struct ftrace_event_call **trace_events;
65296 unsigned int num_trace_events;
65297+ struct file_operations trace_id;
65298+ struct file_operations trace_enable;
65299+ struct file_operations trace_format;
65300+ struct file_operations trace_filter;
65301 #endif
65302 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
65303 unsigned int num_ftrace_callsites;
65304@@ -396,16 +399,46 @@ bool is_module_address(unsigned long addr);
65305 bool is_module_percpu_address(unsigned long addr);
65306 bool is_module_text_address(unsigned long addr);
65307
65308+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
65309+{
65310+
65311+#ifdef CONFIG_PAX_KERNEXEC
65312+ if (ktla_ktva(addr) >= (unsigned long)start &&
65313+ ktla_ktva(addr) < (unsigned long)start + size)
65314+ return 1;
65315+#endif
65316+
65317+ return ((void *)addr >= start && (void *)addr < start + size);
65318+}
65319+
65320+static inline int within_module_core_rx(unsigned long addr, struct module *mod)
65321+{
65322+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
65323+}
65324+
65325+static inline int within_module_core_rw(unsigned long addr, struct module *mod)
65326+{
65327+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
65328+}
65329+
65330+static inline int within_module_init_rx(unsigned long addr, struct module *mod)
65331+{
65332+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
65333+}
65334+
65335+static inline int within_module_init_rw(unsigned long addr, struct module *mod)
65336+{
65337+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
65338+}
65339+
65340 static inline int within_module_core(unsigned long addr, struct module *mod)
65341 {
65342- return (unsigned long)mod->module_core <= addr &&
65343- addr < (unsigned long)mod->module_core + mod->core_size;
65344+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
65345 }
65346
65347 static inline int within_module_init(unsigned long addr, struct module *mod)
65348 {
65349- return (unsigned long)mod->module_init <= addr &&
65350- addr < (unsigned long)mod->module_init + mod->init_size;
65351+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
65352 }
65353
65354 /* Search for module by name: must hold module_mutex. */
65355diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
65356index 560ca53..5ee8d73 100644
65357--- a/include/linux/moduleloader.h
65358+++ b/include/linux/moduleloader.h
65359@@ -23,11 +23,23 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
65360
65361 /* Allocator used for allocating struct module, core sections and init
65362 sections. Returns NULL on failure. */
65363-void *module_alloc(unsigned long size);
65364+void *module_alloc(unsigned long size) __size_overflow(1);
65365+
65366+#ifdef CONFIG_PAX_KERNEXEC
65367+void *module_alloc_exec(unsigned long size) __size_overflow(1);
65368+#else
65369+#define module_alloc_exec(x) module_alloc(x)
65370+#endif
65371
65372 /* Free memory returned from module_alloc. */
65373 void module_free(struct module *mod, void *module_region);
65374
65375+#ifdef CONFIG_PAX_KERNEXEC
65376+void module_free_exec(struct module *mod, void *module_region);
65377+#else
65378+#define module_free_exec(x, y) module_free((x), (y))
65379+#endif
65380+
65381 /*
65382 * Apply the given relocation to the (simplified) ELF. Return -error
65383 * or 0.
65384@@ -45,7 +57,9 @@ static inline int apply_relocate(Elf_Shdr *sechdrs,
65385 unsigned int relsec,
65386 struct module *me)
65387 {
65388+#ifdef CONFIG_MODULES
65389 printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name);
65390+#endif
65391 return -ENOEXEC;
65392 }
65393 #endif
65394@@ -67,7 +81,9 @@ static inline int apply_relocate_add(Elf_Shdr *sechdrs,
65395 unsigned int relsec,
65396 struct module *me)
65397 {
65398+#ifdef CONFIG_MODULES
65399 printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name);
65400+#endif
65401 return -ENOEXEC;
65402 }
65403 #endif
65404diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
65405index d6a5806..7c13347 100644
65406--- a/include/linux/moduleparam.h
65407+++ b/include/linux/moduleparam.h
65408@@ -286,7 +286,7 @@ static inline void __kernel_param_unlock(void)
65409 * @len is usually just sizeof(string).
65410 */
65411 #define module_param_string(name, string, len, perm) \
65412- static const struct kparam_string __param_string_##name \
65413+ static const struct kparam_string __param_string_##name __used \
65414 = { len, string }; \
65415 __module_param_call(MODULE_PARAM_PREFIX, name, \
65416 &param_ops_string, \
65417@@ -425,7 +425,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
65418 */
65419 #define module_param_array_named(name, array, type, nump, perm) \
65420 param_check_##type(name, &(array)[0]); \
65421- static const struct kparam_array __param_arr_##name \
65422+ static const struct kparam_array __param_arr_##name __used \
65423 = { .max = ARRAY_SIZE(array), .num = nump, \
65424 .ops = &param_ops_##type, \
65425 .elemsize = sizeof(array[0]), .elem = array }; \
65426diff --git a/include/linux/namei.h b/include/linux/namei.h
65427index 4bf19d8..5268cea 100644
65428--- a/include/linux/namei.h
65429+++ b/include/linux/namei.h
65430@@ -18,7 +18,7 @@ struct nameidata {
65431 unsigned seq;
65432 int last_type;
65433 unsigned depth;
65434- char *saved_names[MAX_NESTED_LINKS + 1];
65435+ const char *saved_names[MAX_NESTED_LINKS + 1];
65436 };
65437
65438 /*
65439@@ -83,12 +83,12 @@ extern void unlock_rename(struct dentry *, struct dentry *);
65440
65441 extern void nd_jump_link(struct nameidata *nd, struct path *path);
65442
65443-static inline void nd_set_link(struct nameidata *nd, char *path)
65444+static inline void nd_set_link(struct nameidata *nd, const char *path)
65445 {
65446 nd->saved_names[nd->depth] = path;
65447 }
65448
65449-static inline char *nd_get_link(struct nameidata *nd)
65450+static inline const char *nd_get_link(const struct nameidata *nd)
65451 {
65452 return nd->saved_names[nd->depth];
65453 }
65454diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
65455index 825fb7e..24cdd41 100644
65456--- a/include/linux/netdevice.h
65457+++ b/include/linux/netdevice.h
65458@@ -1002,6 +1002,7 @@ struct net_device_ops {
65459 struct net_device *dev,
65460 int idx);
65461 };
65462+typedef struct net_device_ops __no_const net_device_ops_no_const;
65463
65464 /*
65465 * The DEVICE structure.
65466@@ -1062,7 +1063,7 @@ struct net_device {
65467 int iflink;
65468
65469 struct net_device_stats stats;
65470- atomic_long_t rx_dropped; /* dropped packets by core network
65471+ atomic_long_unchecked_t rx_dropped; /* dropped packets by core network
65472 * Do not use this in drivers.
65473 */
65474
65475diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
65476index 7958e84..ed74d7a 100644
65477--- a/include/linux/netfilter/ipset/ip_set.h
65478+++ b/include/linux/netfilter/ipset/ip_set.h
65479@@ -98,7 +98,7 @@ struct ip_set_type_variant {
65480 /* Return true if "b" set is the same as "a"
65481 * according to the create set parameters */
65482 bool (*same_set)(const struct ip_set *a, const struct ip_set *b);
65483-};
65484+} __do_const;
65485
65486 /* The core set type structure */
65487 struct ip_set_type {
65488diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
65489index 4966dde..7d8ce06 100644
65490--- a/include/linux/netfilter/nfnetlink.h
65491+++ b/include/linux/netfilter/nfnetlink.h
65492@@ -16,7 +16,7 @@ struct nfnl_callback {
65493 const struct nlattr * const cda[]);
65494 const struct nla_policy *policy; /* netlink attribute policy */
65495 const u_int16_t attr_count; /* number of nlattr's */
65496-};
65497+} __do_const;
65498
65499 struct nfnetlink_subsystem {
65500 const char *name;
65501diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
65502new file mode 100644
65503index 0000000..33f4af8
65504--- /dev/null
65505+++ b/include/linux/netfilter/xt_gradm.h
65506@@ -0,0 +1,9 @@
65507+#ifndef _LINUX_NETFILTER_XT_GRADM_H
65508+#define _LINUX_NETFILTER_XT_GRADM_H 1
65509+
65510+struct xt_gradm_mtinfo {
65511+ __u16 flags;
65512+ __u16 invflags;
65513+};
65514+
65515+#endif
65516diff --git a/include/linux/notifier.h b/include/linux/notifier.h
65517index d65746e..62e72c2 100644
65518--- a/include/linux/notifier.h
65519+++ b/include/linux/notifier.h
65520@@ -51,7 +51,8 @@ struct notifier_block {
65521 int (*notifier_call)(struct notifier_block *, unsigned long, void *);
65522 struct notifier_block __rcu *next;
65523 int priority;
65524-};
65525+} __do_const;
65526+typedef struct notifier_block __no_const notifier_block_no_const;
65527
65528 struct atomic_notifier_head {
65529 spinlock_t lock;
65530diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
65531index a4c5624..79d6d88 100644
65532--- a/include/linux/oprofile.h
65533+++ b/include/linux/oprofile.h
65534@@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
65535 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
65536 char const * name, ulong * val);
65537
65538-/** Create a file for read-only access to an atomic_t. */
65539+/** Create a file for read-only access to an atomic_unchecked_t. */
65540 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
65541- char const * name, atomic_t * val);
65542+ char const * name, atomic_unchecked_t * val);
65543
65544 /** create a directory */
65545 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
65546diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
65547index 6bfb2faa..1204767 100644
65548--- a/include/linux/perf_event.h
65549+++ b/include/linux/perf_event.h
65550@@ -328,8 +328,8 @@ struct perf_event {
65551
65552 enum perf_event_active_state state;
65553 unsigned int attach_state;
65554- local64_t count;
65555- atomic64_t child_count;
65556+ local64_t count; /* PaX: fix it one day */
65557+ atomic64_unchecked_t child_count;
65558
65559 /*
65560 * These are the total time in nanoseconds that the event
65561@@ -380,8 +380,8 @@ struct perf_event {
65562 * These accumulate total time (in nanoseconds) that children
65563 * events have been enabled and running, respectively.
65564 */
65565- atomic64_t child_total_time_enabled;
65566- atomic64_t child_total_time_running;
65567+ atomic64_unchecked_t child_total_time_enabled;
65568+ atomic64_unchecked_t child_total_time_running;
65569
65570 /*
65571 * Protect attach/detach and child_list:
65572diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
65573index ad1a427..6419649 100644
65574--- a/include/linux/pipe_fs_i.h
65575+++ b/include/linux/pipe_fs_i.h
65576@@ -45,9 +45,9 @@ struct pipe_buffer {
65577 struct pipe_inode_info {
65578 wait_queue_head_t wait;
65579 unsigned int nrbufs, curbuf, buffers;
65580- unsigned int readers;
65581- unsigned int writers;
65582- unsigned int waiting_writers;
65583+ atomic_t readers;
65584+ atomic_t writers;
65585+ atomic_t waiting_writers;
65586 unsigned int r_counter;
65587 unsigned int w_counter;
65588 struct page *tmp_page;
65589diff --git a/include/linux/platform_data/usb-ehci-s5p.h b/include/linux/platform_data/usb-ehci-s5p.h
65590index 5f28cae..3d23723 100644
65591--- a/include/linux/platform_data/usb-ehci-s5p.h
65592+++ b/include/linux/platform_data/usb-ehci-s5p.h
65593@@ -14,7 +14,7 @@
65594 struct s5p_ehci_platdata {
65595 int (*phy_init)(struct platform_device *pdev, int type);
65596 int (*phy_exit)(struct platform_device *pdev, int type);
65597-};
65598+} __no_const;
65599
65600 extern void s5p_ehci_set_platdata(struct s5p_ehci_platdata *pd);
65601
65602diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
65603index f271860..6b3bec5 100644
65604--- a/include/linux/pm_runtime.h
65605+++ b/include/linux/pm_runtime.h
65606@@ -97,7 +97,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
65607
65608 static inline void pm_runtime_mark_last_busy(struct device *dev)
65609 {
65610- ACCESS_ONCE(dev->power.last_busy) = jiffies;
65611+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
65612 }
65613
65614 #else /* !CONFIG_PM_RUNTIME */
65615diff --git a/include/linux/poison.h b/include/linux/poison.h
65616index 2110a81..13a11bb 100644
65617--- a/include/linux/poison.h
65618+++ b/include/linux/poison.h
65619@@ -19,8 +19,8 @@
65620 * under normal circumstances, used to verify that nobody uses
65621 * non-initialized list entries.
65622 */
65623-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
65624-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
65625+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
65626+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
65627
65628 /********** include/linux/timer.h **********/
65629 /*
65630diff --git a/include/linux/power/smartreflex.h b/include/linux/power/smartreflex.h
65631index 4a496eb..d9c5659 100644
65632--- a/include/linux/power/smartreflex.h
65633+++ b/include/linux/power/smartreflex.h
65634@@ -238,7 +238,7 @@ struct omap_sr_class_data {
65635 int (*notify)(struct omap_sr *sr, u32 status);
65636 u8 notify_flags;
65637 u8 class_type;
65638-};
65639+} __do_const;
65640
65641 /**
65642 * struct omap_sr_nvalue_table - Smartreflex n-target value info
65643diff --git a/include/linux/printk.h b/include/linux/printk.h
65644index 9afc01e..92c32e8 100644
65645--- a/include/linux/printk.h
65646+++ b/include/linux/printk.h
65647@@ -101,6 +101,8 @@ void early_printk(const char *fmt, ...);
65648 extern int printk_needs_cpu(int cpu);
65649 extern void printk_tick(void);
65650
65651+extern int kptr_restrict;
65652+
65653 #ifdef CONFIG_PRINTK
65654 asmlinkage __printf(5, 0)
65655 int vprintk_emit(int facility, int level,
65656@@ -135,7 +137,6 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
65657
65658 extern int printk_delay_msec;
65659 extern int dmesg_restrict;
65660-extern int kptr_restrict;
65661
65662 void log_buf_kexec_setup(void);
65663 void __init setup_log_buf(int early);
65664diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
65665index 3fd2e87..75db910 100644
65666--- a/include/linux/proc_fs.h
65667+++ b/include/linux/proc_fs.h
65668@@ -155,6 +155,18 @@ static inline struct proc_dir_entry *proc_create(const char *name, umode_t mode,
65669 return proc_create_data(name, mode, parent, proc_fops, NULL);
65670 }
65671
65672+static inline struct proc_dir_entry *proc_create_grsec(const char *name, umode_t mode,
65673+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
65674+{
65675+#ifdef CONFIG_GRKERNSEC_PROC_USER
65676+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
65677+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65678+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
65679+#else
65680+ return proc_create_data(name, mode, parent, proc_fops, NULL);
65681+#endif
65682+}
65683+
65684 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
65685 umode_t mode, struct proc_dir_entry *base,
65686 read_proc_t *read_proc, void * data)
65687diff --git a/include/linux/random.h b/include/linux/random.h
65688index 6330ed4..419c6c3 100644
65689--- a/include/linux/random.h
65690+++ b/include/linux/random.h
65691@@ -30,12 +30,17 @@ void srandom32(u32 seed);
65692
65693 u32 prandom32(struct rnd_state *);
65694
65695+static inline unsigned long pax_get_random_long(void)
65696+{
65697+ return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
65698+}
65699+
65700 /*
65701 * Handle minimum values for seeds
65702 */
65703 static inline u32 __seed(u32 x, u32 m)
65704 {
65705- return (x < m) ? x + m : x;
65706+ return (x <= m) ? x + m + 1 : x;
65707 }
65708
65709 /**
65710diff --git a/include/linux/reboot.h b/include/linux/reboot.h
65711index 23b3630..e1bc12b 100644
65712--- a/include/linux/reboot.h
65713+++ b/include/linux/reboot.h
65714@@ -18,9 +18,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
65715 * Architecture-specific implementations of sys_reboot commands.
65716 */
65717
65718-extern void machine_restart(char *cmd);
65719-extern void machine_halt(void);
65720-extern void machine_power_off(void);
65721+extern void machine_restart(char *cmd) __noreturn;
65722+extern void machine_halt(void) __noreturn;
65723+extern void machine_power_off(void) __noreturn;
65724
65725 extern void machine_shutdown(void);
65726 struct pt_regs;
65727@@ -31,9 +31,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
65728 */
65729
65730 extern void kernel_restart_prepare(char *cmd);
65731-extern void kernel_restart(char *cmd);
65732-extern void kernel_halt(void);
65733-extern void kernel_power_off(void);
65734+extern void kernel_restart(char *cmd) __noreturn;
65735+extern void kernel_halt(void) __noreturn;
65736+extern void kernel_power_off(void) __noreturn;
65737
65738 extern int C_A_D; /* for sysctl */
65739 void ctrl_alt_del(void);
65740@@ -47,7 +47,7 @@ extern int orderly_poweroff(bool force);
65741 * Emergency restart, callable from an interrupt handler.
65742 */
65743
65744-extern void emergency_restart(void);
65745+extern void emergency_restart(void) __noreturn;
65746 #include <asm/emergency-restart.h>
65747
65748 #endif /* _LINUX_REBOOT_H */
65749diff --git a/include/linux/regset.h b/include/linux/regset.h
65750index 8e0c9fe..ac4d221 100644
65751--- a/include/linux/regset.h
65752+++ b/include/linux/regset.h
65753@@ -161,7 +161,8 @@ struct user_regset {
65754 unsigned int align;
65755 unsigned int bias;
65756 unsigned int core_note_type;
65757-};
65758+} __do_const;
65759+typedef struct user_regset __no_const user_regset_no_const;
65760
65761 /**
65762 * struct user_regset_view - available regsets
65763diff --git a/include/linux/relay.h b/include/linux/relay.h
65764index 91cacc3..b55ff74 100644
65765--- a/include/linux/relay.h
65766+++ b/include/linux/relay.h
65767@@ -160,7 +160,7 @@ struct rchan_callbacks
65768 * The callback should return 0 if successful, negative if not.
65769 */
65770 int (*remove_buf_file)(struct dentry *dentry);
65771-};
65772+} __no_const;
65773
65774 /*
65775 * CONFIG_RELAY kernel API, kernel/relay.c
65776diff --git a/include/linux/rio.h b/include/linux/rio.h
65777index a3e7842..d973ca6 100644
65778--- a/include/linux/rio.h
65779+++ b/include/linux/rio.h
65780@@ -339,7 +339,7 @@ struct rio_ops {
65781 int (*map_inb)(struct rio_mport *mport, dma_addr_t lstart,
65782 u64 rstart, u32 size, u32 flags);
65783 void (*unmap_inb)(struct rio_mport *mport, dma_addr_t lstart);
65784-};
65785+} __no_const;
65786
65787 #define RIO_RESOURCE_MEM 0x00000100
65788 #define RIO_RESOURCE_DOORBELL 0x00000200
65789diff --git a/include/linux/rmap.h b/include/linux/rmap.h
65790index bfe1f47..6a33ee3 100644
65791--- a/include/linux/rmap.h
65792+++ b/include/linux/rmap.h
65793@@ -134,8 +134,8 @@ static inline void anon_vma_unlock(struct anon_vma *anon_vma)
65794 void anon_vma_init(void); /* create anon_vma_cachep */
65795 int anon_vma_prepare(struct vm_area_struct *);
65796 void unlink_anon_vmas(struct vm_area_struct *);
65797-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
65798-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
65799+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
65800+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
65801
65802 static inline void anon_vma_merge(struct vm_area_struct *vma,
65803 struct vm_area_struct *next)
65804diff --git a/include/linux/sched.h b/include/linux/sched.h
65805index 3e63925..6c93b17 100644
65806--- a/include/linux/sched.h
65807+++ b/include/linux/sched.h
65808@@ -61,6 +61,7 @@ struct bio_list;
65809 struct fs_struct;
65810 struct perf_event_context;
65811 struct blk_plug;
65812+struct linux_binprm;
65813
65814 /*
65815 * List of flags we want to share for kernel threads,
65816@@ -344,10 +345,23 @@ struct user_namespace;
65817 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
65818
65819 extern int sysctl_max_map_count;
65820+extern unsigned long sysctl_heap_stack_gap;
65821
65822 #include <linux/aio.h>
65823
65824 #ifdef CONFIG_MMU
65825+
65826+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
65827+extern unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags);
65828+#else
65829+static inline unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
65830+{
65831+ return 0;
65832+}
65833+#endif
65834+
65835+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset);
65836+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset);
65837 extern void arch_pick_mmap_layout(struct mm_struct *mm);
65838 extern unsigned long
65839 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
65840@@ -614,6 +628,17 @@ struct signal_struct {
65841 #ifdef CONFIG_TASKSTATS
65842 struct taskstats *stats;
65843 #endif
65844+
65845+#ifdef CONFIG_GRKERNSEC
65846+ u32 curr_ip;
65847+ u32 saved_ip;
65848+ u32 gr_saddr;
65849+ u32 gr_daddr;
65850+ u16 gr_sport;
65851+ u16 gr_dport;
65852+ u8 used_accept:1;
65853+#endif
65854+
65855 #ifdef CONFIG_AUDIT
65856 unsigned audit_tty;
65857 struct tty_audit_buf *tty_audit_buf;
65858@@ -691,6 +716,11 @@ struct user_struct {
65859 struct key *session_keyring; /* UID's default session keyring */
65860 #endif
65861
65862+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
65863+ unsigned int banned;
65864+ unsigned long ban_expires;
65865+#endif
65866+
65867 /* Hash table maintenance information */
65868 struct hlist_node uidhash_node;
65869 kuid_t uid;
65870@@ -1312,8 +1342,8 @@ struct task_struct {
65871 struct list_head thread_group;
65872
65873 struct completion *vfork_done; /* for vfork() */
65874- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
65875- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
65876+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
65877+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
65878
65879 cputime_t utime, stime, utimescaled, stimescaled;
65880 cputime_t gtime;
65881@@ -1329,11 +1359,6 @@ struct task_struct {
65882 struct task_cputime cputime_expires;
65883 struct list_head cpu_timers[3];
65884
65885-/* process credentials */
65886- const struct cred __rcu *real_cred; /* objective and real subjective task
65887- * credentials (COW) */
65888- const struct cred __rcu *cred; /* effective (overridable) subjective task
65889- * credentials (COW) */
65890 char comm[TASK_COMM_LEN]; /* executable name excluding path
65891 - access with [gs]et_task_comm (which lock
65892 it with task_lock())
65893@@ -1350,6 +1375,10 @@ struct task_struct {
65894 #endif
65895 /* CPU-specific state of this task */
65896 struct thread_struct thread;
65897+/* thread_info moved to task_struct */
65898+#ifdef CONFIG_X86
65899+ struct thread_info tinfo;
65900+#endif
65901 /* filesystem information */
65902 struct fs_struct *fs;
65903 /* open file information */
65904@@ -1423,6 +1452,10 @@ struct task_struct {
65905 gfp_t lockdep_reclaim_gfp;
65906 #endif
65907
65908+/* process credentials */
65909+ const struct cred __rcu *real_cred; /* objective and real subjective task
65910+ * credentials (COW) */
65911+
65912 /* journalling filesystem info */
65913 void *journal_info;
65914
65915@@ -1461,6 +1494,10 @@ struct task_struct {
65916 /* cg_list protected by css_set_lock and tsk->alloc_lock */
65917 struct list_head cg_list;
65918 #endif
65919+
65920+ const struct cred __rcu *cred; /* effective (overridable) subjective task
65921+ * credentials (COW) */
65922+
65923 #ifdef CONFIG_FUTEX
65924 struct robust_list_head __user *robust_list;
65925 #ifdef CONFIG_COMPAT
65926@@ -1548,8 +1585,74 @@ struct task_struct {
65927 #ifdef CONFIG_UPROBES
65928 struct uprobe_task *utask;
65929 #endif
65930+
65931+#ifdef CONFIG_GRKERNSEC
65932+ /* grsecurity */
65933+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65934+ u64 exec_id;
65935+#endif
65936+#ifdef CONFIG_GRKERNSEC_SETXID
65937+ const struct cred *delayed_cred;
65938+#endif
65939+ struct dentry *gr_chroot_dentry;
65940+ struct acl_subject_label *acl;
65941+ struct acl_role_label *role;
65942+ struct file *exec_file;
65943+ unsigned long brute_expires;
65944+ u16 acl_role_id;
65945+ /* is this the task that authenticated to the special role */
65946+ u8 acl_sp_role;
65947+ u8 is_writable;
65948+ u8 brute;
65949+ u8 gr_is_chrooted;
65950+#endif
65951+
65952 };
65953
65954+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
65955+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
65956+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
65957+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
65958+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
65959+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
65960+
65961+#ifdef CONFIG_PAX_SOFTMODE
65962+extern int pax_softmode;
65963+#endif
65964+
65965+extern int pax_check_flags(unsigned long *);
65966+
65967+/* if tsk != current then task_lock must be held on it */
65968+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
65969+static inline unsigned long pax_get_flags(struct task_struct *tsk)
65970+{
65971+ if (likely(tsk->mm))
65972+ return tsk->mm->pax_flags;
65973+ else
65974+ return 0UL;
65975+}
65976+
65977+/* if tsk != current then task_lock must be held on it */
65978+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
65979+{
65980+ if (likely(tsk->mm)) {
65981+ tsk->mm->pax_flags = flags;
65982+ return 0;
65983+ }
65984+ return -EINVAL;
65985+}
65986+#endif
65987+
65988+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
65989+extern void pax_set_initial_flags(struct linux_binprm *bprm);
65990+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
65991+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
65992+#endif
65993+
65994+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
65995+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
65996+extern void pax_report_refcount_overflow(struct pt_regs *regs);
65997+
65998 /* Future-safe accessor for struct task_struct's cpus_allowed. */
65999 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
66000
66001@@ -2092,7 +2195,9 @@ void yield(void);
66002 extern struct exec_domain default_exec_domain;
66003
66004 union thread_union {
66005+#ifndef CONFIG_X86
66006 struct thread_info thread_info;
66007+#endif
66008 unsigned long stack[THREAD_SIZE/sizeof(long)];
66009 };
66010
66011@@ -2125,6 +2230,7 @@ extern struct pid_namespace init_pid_ns;
66012 */
66013
66014 extern struct task_struct *find_task_by_vpid(pid_t nr);
66015+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
66016 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
66017 struct pid_namespace *ns);
66018
66019@@ -2281,7 +2387,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
66020 extern void exit_itimers(struct signal_struct *);
66021 extern void flush_itimer_signals(void);
66022
66023-extern void do_group_exit(int);
66024+extern __noreturn void do_group_exit(int);
66025
66026 extern void daemonize(const char *, ...);
66027 extern int allow_signal(int);
66028@@ -2485,9 +2591,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
66029
66030 #endif
66031
66032-static inline int object_is_on_stack(void *obj)
66033+static inline int object_starts_on_stack(void *obj)
66034 {
66035- void *stack = task_stack_page(current);
66036+ const void *stack = task_stack_page(current);
66037
66038 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
66039 }
66040diff --git a/include/linux/security.h b/include/linux/security.h
66041index 05e88bd..5cda002 100644
66042--- a/include/linux/security.h
66043+++ b/include/linux/security.h
66044@@ -26,6 +26,7 @@
66045 #include <linux/capability.h>
66046 #include <linux/slab.h>
66047 #include <linux/err.h>
66048+#include <linux/grsecurity.h>
66049
66050 struct linux_binprm;
66051 struct cred;
66052diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
66053index 68a04a3..866e6a1 100644
66054--- a/include/linux/seq_file.h
66055+++ b/include/linux/seq_file.h
66056@@ -26,6 +26,9 @@ struct seq_file {
66057 struct mutex lock;
66058 const struct seq_operations *op;
66059 int poll_event;
66060+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66061+ u64 exec_id;
66062+#endif
66063 #ifdef CONFIG_USER_NS
66064 struct user_namespace *user_ns;
66065 #endif
66066@@ -38,6 +41,7 @@ struct seq_operations {
66067 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
66068 int (*show) (struct seq_file *m, void *v);
66069 };
66070+typedef struct seq_operations __no_const seq_operations_no_const;
66071
66072 #define SEQ_SKIP 1
66073
66074diff --git a/include/linux/shm.h b/include/linux/shm.h
66075index bcf8a6a..4d0af77 100644
66076--- a/include/linux/shm.h
66077+++ b/include/linux/shm.h
66078@@ -21,6 +21,10 @@ struct shmid_kernel /* private to the kernel */
66079
66080 /* The task created the shm object. NULL if the task is dead. */
66081 struct task_struct *shm_creator;
66082+#ifdef CONFIG_GRKERNSEC
66083+ time_t shm_createtime;
66084+ pid_t shm_lapid;
66085+#endif
66086 };
66087
66088 /* shm_mode upper byte flags */
66089diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
66090index 6a2c34e..a1f320f 100644
66091--- a/include/linux/skbuff.h
66092+++ b/include/linux/skbuff.h
66093@@ -577,7 +577,7 @@ extern bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
66094 extern struct sk_buff *__alloc_skb(unsigned int size,
66095 gfp_t priority, int flags, int node);
66096 extern struct sk_buff *build_skb(void *data, unsigned int frag_size);
66097-static inline struct sk_buff *alloc_skb(unsigned int size,
66098+static inline struct sk_buff * __intentional_overflow(0) alloc_skb(unsigned int size,
66099 gfp_t priority)
66100 {
66101 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
66102@@ -687,7 +687,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
66103 */
66104 static inline int skb_queue_empty(const struct sk_buff_head *list)
66105 {
66106- return list->next == (struct sk_buff *)list;
66107+ return list->next == (const struct sk_buff *)list;
66108 }
66109
66110 /**
66111@@ -700,7 +700,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
66112 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
66113 const struct sk_buff *skb)
66114 {
66115- return skb->next == (struct sk_buff *)list;
66116+ return skb->next == (const struct sk_buff *)list;
66117 }
66118
66119 /**
66120@@ -713,7 +713,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
66121 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
66122 const struct sk_buff *skb)
66123 {
66124- return skb->prev == (struct sk_buff *)list;
66125+ return skb->prev == (const struct sk_buff *)list;
66126 }
66127
66128 /**
66129@@ -1626,7 +1626,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
66130 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
66131 */
66132 #ifndef NET_SKB_PAD
66133-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
66134+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
66135 #endif
66136
66137 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
66138@@ -2204,7 +2204,7 @@ extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
66139 int noblock, int *err);
66140 extern unsigned int datagram_poll(struct file *file, struct socket *sock,
66141 struct poll_table_struct *wait);
66142-extern int skb_copy_datagram_iovec(const struct sk_buff *from,
66143+extern int __intentional_overflow(0) skb_copy_datagram_iovec(const struct sk_buff *from,
66144 int offset, struct iovec *to,
66145 int size);
66146 extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
66147diff --git a/include/linux/slab.h b/include/linux/slab.h
66148index 83d1a14..e23d723 100644
66149--- a/include/linux/slab.h
66150+++ b/include/linux/slab.h
66151@@ -11,12 +11,20 @@
66152
66153 #include <linux/gfp.h>
66154 #include <linux/types.h>
66155+#include <linux/err.h>
66156
66157 /*
66158 * Flags to pass to kmem_cache_create().
66159 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
66160 */
66161 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
66162+
66163+#ifdef CONFIG_PAX_USERCOPY_SLABS
66164+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
66165+#else
66166+#define SLAB_USERCOPY 0x00000000UL
66167+#endif
66168+
66169 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
66170 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
66171 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
66172@@ -87,10 +95,13 @@
66173 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
66174 * Both make kfree a no-op.
66175 */
66176-#define ZERO_SIZE_PTR ((void *)16)
66177+#define ZERO_SIZE_PTR \
66178+({ \
66179+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
66180+ (void *)(-MAX_ERRNO-1L); \
66181+})
66182
66183-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
66184- (unsigned long)ZERO_SIZE_PTR)
66185+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
66186
66187 /*
66188 * Common fields provided in kmem_cache by all slab allocators
66189@@ -110,7 +121,7 @@ struct kmem_cache {
66190 unsigned int align; /* Alignment as calculated */
66191 unsigned long flags; /* Active flags on the slab */
66192 const char *name; /* Slab name for sysfs */
66193- int refcount; /* Use counter */
66194+ atomic_t refcount; /* Use counter */
66195 void (*ctor)(void *); /* Called on object slot creation */
66196 struct list_head list; /* List of all slab caches on the system */
66197 };
66198@@ -185,6 +196,8 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
66199 void kfree(const void *);
66200 void kzfree(const void *);
66201 size_t ksize(const void *);
66202+const char *check_heap_object(const void *ptr, unsigned long n);
66203+bool is_usercopy_object(const void *ptr);
66204
66205 /*
66206 * Allocator specific definitions. These are mainly used to establish optimized
66207@@ -264,8 +277,18 @@ size_t ksize(const void *);
66208 * for general use, and so are not documented here. For a full list of
66209 * potential flags, always refer to linux/gfp.h.
66210 */
66211+
66212+extern void kmalloc_array_error(void)
66213+#if defined(CONFIG_GCOV_KERNEL) && defined(CONFIG_PAX_SIZE_OVERFLOW)
66214+__compiletime_warning("kmalloc_array called with swapped arguments?");
66215+#else
66216+__compiletime_error("kmalloc_array called with swapped arguments?");
66217+#endif
66218+
66219 static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
66220 {
66221+ if (__builtin_constant_p(n) && !__builtin_constant_p(size))
66222+ kmalloc_array_error();
66223 if (size != 0 && n > SIZE_MAX / size)
66224 return NULL;
66225 return __kmalloc(n * size, flags);
66226@@ -323,7 +346,7 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
66227 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
66228 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
66229 (defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
66230-extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
66231+extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long) __size_overflow(1);
66232 #define kmalloc_track_caller(size, flags) \
66233 __kmalloc_track_caller(size, flags, _RET_IP_)
66234 #else
66235@@ -343,7 +366,7 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
66236 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
66237 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
66238 (defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
66239-extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
66240+extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long) __size_overflow(1);
66241 #define kmalloc_node_track_caller(size, flags, node) \
66242 __kmalloc_node_track_caller(size, flags, node, \
66243 _RET_IP_)
66244diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
66245index cc290f0..0ba60931 100644
66246--- a/include/linux/slab_def.h
66247+++ b/include/linux/slab_def.h
66248@@ -52,7 +52,7 @@ struct kmem_cache {
66249 /* 4) cache creation/removal */
66250 const char *name;
66251 struct list_head list;
66252- int refcount;
66253+ atomic_t refcount;
66254 int object_size;
66255 int align;
66256
66257@@ -68,10 +68,10 @@ struct kmem_cache {
66258 unsigned long node_allocs;
66259 unsigned long node_frees;
66260 unsigned long node_overflow;
66261- atomic_t allochit;
66262- atomic_t allocmiss;
66263- atomic_t freehit;
66264- atomic_t freemiss;
66265+ atomic_unchecked_t allochit;
66266+ atomic_unchecked_t allocmiss;
66267+ atomic_unchecked_t freehit;
66268+ atomic_unchecked_t freemiss;
66269
66270 /*
66271 * If debugging is enabled, then the allocator can add additional
66272@@ -104,11 +104,16 @@ struct cache_sizes {
66273 #ifdef CONFIG_ZONE_DMA
66274 struct kmem_cache *cs_dmacachep;
66275 #endif
66276+
66277+#ifdef CONFIG_PAX_USERCOPY_SLABS
66278+ struct kmem_cache *cs_usercopycachep;
66279+#endif
66280+
66281 };
66282 extern struct cache_sizes malloc_sizes[];
66283
66284 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
66285-void *__kmalloc(size_t size, gfp_t flags);
66286+void *__kmalloc(size_t size, gfp_t flags) __size_overflow(1);
66287
66288 #ifdef CONFIG_TRACING
66289 extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t);
66290@@ -145,6 +150,13 @@ found:
66291 cachep = malloc_sizes[i].cs_dmacachep;
66292 else
66293 #endif
66294+
66295+#ifdef CONFIG_PAX_USERCOPY_SLABS
66296+ if (flags & GFP_USERCOPY)
66297+ cachep = malloc_sizes[i].cs_usercopycachep;
66298+ else
66299+#endif
66300+
66301 cachep = malloc_sizes[i].cs_cachep;
66302
66303 ret = kmem_cache_alloc_trace(cachep, flags, size);
66304@@ -155,7 +167,7 @@ found:
66305 }
66306
66307 #ifdef CONFIG_NUMA
66308-extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
66309+extern void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
66310 extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
66311
66312 #ifdef CONFIG_TRACING
66313@@ -198,6 +210,13 @@ found:
66314 cachep = malloc_sizes[i].cs_dmacachep;
66315 else
66316 #endif
66317+
66318+#ifdef CONFIG_PAX_USERCOPY_SLABS
66319+ if (flags & GFP_USERCOPY)
66320+ cachep = malloc_sizes[i].cs_usercopycachep;
66321+ else
66322+#endif
66323+
66324 cachep = malloc_sizes[i].cs_cachep;
66325
66326 return kmem_cache_alloc_node_trace(cachep, flags, node, size);
66327diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
66328index f28e14a..7831211 100644
66329--- a/include/linux/slob_def.h
66330+++ b/include/linux/slob_def.h
66331@@ -11,7 +11,7 @@ static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
66332 return kmem_cache_alloc_node(cachep, flags, NUMA_NO_NODE);
66333 }
66334
66335-void *__kmalloc_node(size_t size, gfp_t flags, int node);
66336+void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
66337
66338 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
66339 {
66340@@ -31,7 +31,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
66341 return __kmalloc_node(size, flags, NUMA_NO_NODE);
66342 }
66343
66344-static __always_inline void *__kmalloc(size_t size, gfp_t flags)
66345+static __always_inline __size_overflow(1) void *__kmalloc(size_t size, gfp_t flags)
66346 {
66347 return kmalloc(size, flags);
66348 }
66349diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
66350index df448ad..b99e7f6 100644
66351--- a/include/linux/slub_def.h
66352+++ b/include/linux/slub_def.h
66353@@ -91,7 +91,7 @@ struct kmem_cache {
66354 struct kmem_cache_order_objects max;
66355 struct kmem_cache_order_objects min;
66356 gfp_t allocflags; /* gfp flags to use on each alloc */
66357- int refcount; /* Refcount for slab cache destroy */
66358+ atomic_t refcount; /* Refcount for slab cache destroy */
66359 void (*ctor)(void *);
66360 int inuse; /* Offset to metadata */
66361 int align; /* Alignment */
66362@@ -152,7 +152,7 @@ extern struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT];
66363 * Sorry that the following has to be that ugly but some versions of GCC
66364 * have trouble with constant propagation and loops.
66365 */
66366-static __always_inline int kmalloc_index(size_t size)
66367+static __always_inline __size_overflow(1) int kmalloc_index(size_t size)
66368 {
66369 if (!size)
66370 return 0;
66371@@ -217,7 +217,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
66372 }
66373
66374 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
66375-void *__kmalloc(size_t size, gfp_t flags);
66376+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1) __size_overflow(1);
66377
66378 static __always_inline void *
66379 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
66380@@ -258,7 +258,7 @@ kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
66381 }
66382 #endif
66383
66384-static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
66385+static __always_inline __size_overflow(1) void *kmalloc_large(size_t size, gfp_t flags)
66386 {
66387 unsigned int order = get_order(size);
66388 return kmalloc_order_trace(size, flags, order);
66389@@ -283,7 +283,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
66390 }
66391
66392 #ifdef CONFIG_NUMA
66393-void *__kmalloc_node(size_t size, gfp_t flags, int node);
66394+void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
66395 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
66396
66397 #ifdef CONFIG_TRACING
66398diff --git a/include/linux/sonet.h b/include/linux/sonet.h
66399index 680f9a3..f13aeb0 100644
66400--- a/include/linux/sonet.h
66401+++ b/include/linux/sonet.h
66402@@ -7,7 +7,7 @@
66403 #include <uapi/linux/sonet.h>
66404
66405 struct k_sonet_stats {
66406-#define __HANDLE_ITEM(i) atomic_t i
66407+#define __HANDLE_ITEM(i) atomic_unchecked_t i
66408 __SONET_ITEMS
66409 #undef __HANDLE_ITEM
66410 };
66411diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
66412index 34206b8..f019e06 100644
66413--- a/include/linux/sunrpc/clnt.h
66414+++ b/include/linux/sunrpc/clnt.h
66415@@ -176,9 +176,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
66416 {
66417 switch (sap->sa_family) {
66418 case AF_INET:
66419- return ntohs(((struct sockaddr_in *)sap)->sin_port);
66420+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
66421 case AF_INET6:
66422- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
66423+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
66424 }
66425 return 0;
66426 }
66427@@ -211,7 +211,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
66428 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
66429 const struct sockaddr *src)
66430 {
66431- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
66432+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
66433 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
66434
66435 dsin->sin_family = ssin->sin_family;
66436@@ -314,7 +314,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
66437 if (sa->sa_family != AF_INET6)
66438 return 0;
66439
66440- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
66441+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
66442 }
66443
66444 #endif /* __KERNEL__ */
66445diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
66446index 0b8e3e6..33e0a01 100644
66447--- a/include/linux/sunrpc/svc_rdma.h
66448+++ b/include/linux/sunrpc/svc_rdma.h
66449@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
66450 extern unsigned int svcrdma_max_requests;
66451 extern unsigned int svcrdma_max_req_size;
66452
66453-extern atomic_t rdma_stat_recv;
66454-extern atomic_t rdma_stat_read;
66455-extern atomic_t rdma_stat_write;
66456-extern atomic_t rdma_stat_sq_starve;
66457-extern atomic_t rdma_stat_rq_starve;
66458-extern atomic_t rdma_stat_rq_poll;
66459-extern atomic_t rdma_stat_rq_prod;
66460-extern atomic_t rdma_stat_sq_poll;
66461-extern atomic_t rdma_stat_sq_prod;
66462+extern atomic_unchecked_t rdma_stat_recv;
66463+extern atomic_unchecked_t rdma_stat_read;
66464+extern atomic_unchecked_t rdma_stat_write;
66465+extern atomic_unchecked_t rdma_stat_sq_starve;
66466+extern atomic_unchecked_t rdma_stat_rq_starve;
66467+extern atomic_unchecked_t rdma_stat_rq_poll;
66468+extern atomic_unchecked_t rdma_stat_rq_prod;
66469+extern atomic_unchecked_t rdma_stat_sq_poll;
66470+extern atomic_unchecked_t rdma_stat_sq_prod;
66471
66472 #define RPCRDMA_VERSION 1
66473
66474diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
66475index cd844a6..3ca3592 100644
66476--- a/include/linux/sysctl.h
66477+++ b/include/linux/sysctl.h
66478@@ -41,6 +41,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
66479
66480 extern int proc_dostring(struct ctl_table *, int,
66481 void __user *, size_t *, loff_t *);
66482+extern int proc_dostring_modpriv(struct ctl_table *, int,
66483+ void __user *, size_t *, loff_t *);
66484 extern int proc_dointvec(struct ctl_table *, int,
66485 void __user *, size_t *, loff_t *);
66486 extern int proc_dointvec_minmax(struct ctl_table *, int,
66487diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
66488index 7faf933..eb6f5e3 100644
66489--- a/include/linux/sysrq.h
66490+++ b/include/linux/sysrq.h
66491@@ -36,7 +36,7 @@ struct sysrq_key_op {
66492 char *help_msg;
66493 char *action_msg;
66494 int enable_mask;
66495-};
66496+} __do_const;
66497
66498 #ifdef CONFIG_MAGIC_SYSRQ
66499
66500diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
66501index ccc1899..b1aaceb 100644
66502--- a/include/linux/thread_info.h
66503+++ b/include/linux/thread_info.h
66504@@ -146,6 +146,15 @@ static inline bool test_and_clear_restore_sigmask(void)
66505 #error "no set_restore_sigmask() provided and default one won't work"
66506 #endif
66507
66508+extern void __check_object_size(const void *ptr, unsigned long n, bool to);
66509+static inline void check_object_size(const void *ptr, unsigned long n, bool to)
66510+{
66511+#ifndef CONFIG_PAX_USERCOPY_DEBUG
66512+ if (!__builtin_constant_p(n))
66513+#endif
66514+ __check_object_size(ptr, n, to);
66515+}
66516+
66517 #endif /* __KERNEL__ */
66518
66519 #endif /* _LINUX_THREAD_INFO_H */
66520diff --git a/include/linux/tty.h b/include/linux/tty.h
66521index f0b4eb4..1c4854e 100644
66522--- a/include/linux/tty.h
66523+++ b/include/linux/tty.h
66524@@ -192,7 +192,7 @@ struct tty_port {
66525 const struct tty_port_operations *ops; /* Port operations */
66526 spinlock_t lock; /* Lock protecting tty field */
66527 int blocked_open; /* Waiting to open */
66528- int count; /* Usage count */
66529+ atomic_t count; /* Usage count */
66530 wait_queue_head_t open_wait; /* Open waiters */
66531 wait_queue_head_t close_wait; /* Close waiters */
66532 wait_queue_head_t delta_msr_wait; /* Modem status change */
66533@@ -513,7 +513,7 @@ extern int tty_port_open(struct tty_port *port,
66534 struct tty_struct *tty, struct file *filp);
66535 static inline int tty_port_users(struct tty_port *port)
66536 {
66537- return port->count + port->blocked_open;
66538+ return atomic_read(&port->count) + port->blocked_open;
66539 }
66540
66541 extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc);
66542diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
66543index dd976cf..e272742 100644
66544--- a/include/linux/tty_driver.h
66545+++ b/include/linux/tty_driver.h
66546@@ -284,7 +284,7 @@ struct tty_operations {
66547 void (*poll_put_char)(struct tty_driver *driver, int line, char ch);
66548 #endif
66549 const struct file_operations *proc_fops;
66550-};
66551+} __do_const;
66552
66553 struct tty_driver {
66554 int magic; /* magic number for this structure */
66555diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
66556index fb79dd8d..07d4773 100644
66557--- a/include/linux/tty_ldisc.h
66558+++ b/include/linux/tty_ldisc.h
66559@@ -149,7 +149,7 @@ struct tty_ldisc_ops {
66560
66561 struct module *owner;
66562
66563- int refcount;
66564+ atomic_t refcount;
66565 };
66566
66567 struct tty_ldisc {
66568diff --git a/include/linux/types.h b/include/linux/types.h
66569index 1cc0e4b..0d50edf 100644
66570--- a/include/linux/types.h
66571+++ b/include/linux/types.h
66572@@ -175,10 +175,26 @@ typedef struct {
66573 int counter;
66574 } atomic_t;
66575
66576+#ifdef CONFIG_PAX_REFCOUNT
66577+typedef struct {
66578+ int counter;
66579+} atomic_unchecked_t;
66580+#else
66581+typedef atomic_t atomic_unchecked_t;
66582+#endif
66583+
66584 #ifdef CONFIG_64BIT
66585 typedef struct {
66586 long counter;
66587 } atomic64_t;
66588+
66589+#ifdef CONFIG_PAX_REFCOUNT
66590+typedef struct {
66591+ long counter;
66592+} atomic64_unchecked_t;
66593+#else
66594+typedef atomic64_t atomic64_unchecked_t;
66595+#endif
66596 #endif
66597
66598 struct list_head {
66599diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
66600index 5ca0951..ab496a5 100644
66601--- a/include/linux/uaccess.h
66602+++ b/include/linux/uaccess.h
66603@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
66604 long ret; \
66605 mm_segment_t old_fs = get_fs(); \
66606 \
66607- set_fs(KERNEL_DS); \
66608 pagefault_disable(); \
66609- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
66610- pagefault_enable(); \
66611+ set_fs(KERNEL_DS); \
66612+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
66613 set_fs(old_fs); \
66614+ pagefault_enable(); \
66615 ret; \
66616 })
66617
66618diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
66619index 99c1b4d..bb94261 100644
66620--- a/include/linux/unaligned/access_ok.h
66621+++ b/include/linux/unaligned/access_ok.h
66622@@ -6,32 +6,32 @@
66623
66624 static inline u16 get_unaligned_le16(const void *p)
66625 {
66626- return le16_to_cpup((__le16 *)p);
66627+ return le16_to_cpup((const __le16 *)p);
66628 }
66629
66630 static inline u32 get_unaligned_le32(const void *p)
66631 {
66632- return le32_to_cpup((__le32 *)p);
66633+ return le32_to_cpup((const __le32 *)p);
66634 }
66635
66636 static inline u64 get_unaligned_le64(const void *p)
66637 {
66638- return le64_to_cpup((__le64 *)p);
66639+ return le64_to_cpup((const __le64 *)p);
66640 }
66641
66642 static inline u16 get_unaligned_be16(const void *p)
66643 {
66644- return be16_to_cpup((__be16 *)p);
66645+ return be16_to_cpup((const __be16 *)p);
66646 }
66647
66648 static inline u32 get_unaligned_be32(const void *p)
66649 {
66650- return be32_to_cpup((__be32 *)p);
66651+ return be32_to_cpup((const __be32 *)p);
66652 }
66653
66654 static inline u64 get_unaligned_be64(const void *p)
66655 {
66656- return be64_to_cpup((__be64 *)p);
66657+ return be64_to_cpup((const __be64 *)p);
66658 }
66659
66660 static inline void put_unaligned_le16(u16 val, void *p)
66661diff --git a/include/linux/usb.h b/include/linux/usb.h
66662index 10278d1..e21ec3c 100644
66663--- a/include/linux/usb.h
66664+++ b/include/linux/usb.h
66665@@ -551,7 +551,7 @@ struct usb_device {
66666 int maxchild;
66667
66668 u32 quirks;
66669- atomic_t urbnum;
66670+ atomic_unchecked_t urbnum;
66671
66672 unsigned long active_duration;
66673
66674diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
66675index c5d36c6..108f4f9 100644
66676--- a/include/linux/usb/renesas_usbhs.h
66677+++ b/include/linux/usb/renesas_usbhs.h
66678@@ -39,7 +39,7 @@ enum {
66679 */
66680 struct renesas_usbhs_driver_callback {
66681 int (*notify_hotplug)(struct platform_device *pdev);
66682-};
66683+} __no_const;
66684
66685 /*
66686 * callback functions for platform
66687diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
66688index ddbbb7d..9134611 100644
66689--- a/include/linux/usb/usbnet.h
66690+++ b/include/linux/usb/usbnet.h
66691@@ -33,6 +33,7 @@ struct usbnet {
66692 wait_queue_head_t *wait;
66693 struct mutex phy_mutex;
66694 unsigned char suspend_count;
66695+ unsigned char pkt_cnt, pkt_err;
66696
66697 /* i/o info: pipes etc */
66698 unsigned in, out;
66699@@ -69,6 +70,8 @@ struct usbnet {
66700 # define EVENT_DEV_ASLEEP 6
66701 # define EVENT_DEV_OPEN 7
66702 # define EVENT_DEVICE_REPORT_IDLE 8
66703+# define EVENT_NO_RUNTIME_PM 9
66704+# define EVENT_RX_KILL 10
66705 };
66706
66707 static inline struct usb_driver *driver_of(struct usb_interface *intf)
66708diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
66709index 6f8fbcf..8259001 100644
66710--- a/include/linux/vermagic.h
66711+++ b/include/linux/vermagic.h
66712@@ -25,9 +25,35 @@
66713 #define MODULE_ARCH_VERMAGIC ""
66714 #endif
66715
66716+#ifdef CONFIG_PAX_REFCOUNT
66717+#define MODULE_PAX_REFCOUNT "REFCOUNT "
66718+#else
66719+#define MODULE_PAX_REFCOUNT ""
66720+#endif
66721+
66722+#ifdef CONSTIFY_PLUGIN
66723+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
66724+#else
66725+#define MODULE_CONSTIFY_PLUGIN ""
66726+#endif
66727+
66728+#ifdef STACKLEAK_PLUGIN
66729+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
66730+#else
66731+#define MODULE_STACKLEAK_PLUGIN ""
66732+#endif
66733+
66734+#ifdef CONFIG_GRKERNSEC
66735+#define MODULE_GRSEC "GRSEC "
66736+#else
66737+#define MODULE_GRSEC ""
66738+#endif
66739+
66740 #define VERMAGIC_STRING \
66741 UTS_RELEASE " " \
66742 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
66743 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
66744- MODULE_ARCH_VERMAGIC
66745+ MODULE_ARCH_VERMAGIC \
66746+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
66747+ MODULE_GRSEC
66748
66749diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
66750index 6071e91..ca6a489 100644
66751--- a/include/linux/vmalloc.h
66752+++ b/include/linux/vmalloc.h
66753@@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
66754 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
66755 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
66756 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
66757+
66758+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
66759+#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
66760+#endif
66761+
66762 /* bits [20..32] reserved for arch specific ioremap internals */
66763
66764 /*
66765@@ -62,7 +67,7 @@ extern void *vmalloc_32_user(unsigned long size);
66766 extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
66767 extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
66768 unsigned long start, unsigned long end, gfp_t gfp_mask,
66769- pgprot_t prot, int node, const void *caller);
66770+ pgprot_t prot, int node, const void *caller) __size_overflow(1);
66771 extern void vfree(const void *addr);
66772
66773 extern void *vmap(struct page **pages, unsigned int count,
66774@@ -124,8 +129,8 @@ extern struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes);
66775 extern void free_vm_area(struct vm_struct *area);
66776
66777 /* for /dev/kmem */
66778-extern long vread(char *buf, char *addr, unsigned long count);
66779-extern long vwrite(char *buf, char *addr, unsigned long count);
66780+extern long vread(char *buf, char *addr, unsigned long count) __size_overflow(3);
66781+extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
66782
66783 /*
66784 * Internals. Dont't use..
66785diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
66786index 92a86b2..1d9eb3c 100644
66787--- a/include/linux/vmstat.h
66788+++ b/include/linux/vmstat.h
66789@@ -87,18 +87,18 @@ static inline void vm_events_fold_cpu(int cpu)
66790 /*
66791 * Zone based page accounting with per cpu differentials.
66792 */
66793-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
66794+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
66795
66796 static inline void zone_page_state_add(long x, struct zone *zone,
66797 enum zone_stat_item item)
66798 {
66799- atomic_long_add(x, &zone->vm_stat[item]);
66800- atomic_long_add(x, &vm_stat[item]);
66801+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
66802+ atomic_long_add_unchecked(x, &vm_stat[item]);
66803 }
66804
66805 static inline unsigned long global_page_state(enum zone_stat_item item)
66806 {
66807- long x = atomic_long_read(&vm_stat[item]);
66808+ long x = atomic_long_read_unchecked(&vm_stat[item]);
66809 #ifdef CONFIG_SMP
66810 if (x < 0)
66811 x = 0;
66812@@ -109,7 +109,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
66813 static inline unsigned long zone_page_state(struct zone *zone,
66814 enum zone_stat_item item)
66815 {
66816- long x = atomic_long_read(&zone->vm_stat[item]);
66817+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
66818 #ifdef CONFIG_SMP
66819 if (x < 0)
66820 x = 0;
66821@@ -126,7 +126,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
66822 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
66823 enum zone_stat_item item)
66824 {
66825- long x = atomic_long_read(&zone->vm_stat[item]);
66826+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
66827
66828 #ifdef CONFIG_SMP
66829 int cpu;
66830@@ -218,8 +218,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
66831
66832 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
66833 {
66834- atomic_long_inc(&zone->vm_stat[item]);
66835- atomic_long_inc(&vm_stat[item]);
66836+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
66837+ atomic_long_inc_unchecked(&vm_stat[item]);
66838 }
66839
66840 static inline void __inc_zone_page_state(struct page *page,
66841@@ -230,8 +230,8 @@ static inline void __inc_zone_page_state(struct page *page,
66842
66843 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
66844 {
66845- atomic_long_dec(&zone->vm_stat[item]);
66846- atomic_long_dec(&vm_stat[item]);
66847+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
66848+ atomic_long_dec_unchecked(&vm_stat[item]);
66849 }
66850
66851 static inline void __dec_zone_page_state(struct page *page,
66852diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
66853index 95d1c91..6798cca 100644
66854--- a/include/media/v4l2-dev.h
66855+++ b/include/media/v4l2-dev.h
66856@@ -76,7 +76,7 @@ struct v4l2_file_operations {
66857 int (*mmap) (struct file *, struct vm_area_struct *);
66858 int (*open) (struct file *);
66859 int (*release) (struct file *);
66860-};
66861+} __do_const;
66862
66863 /*
66864 * Newer version of video_device, handled by videodev2.c
66865diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
66866index e48b571..7e40de4 100644
66867--- a/include/media/v4l2-ioctl.h
66868+++ b/include/media/v4l2-ioctl.h
66869@@ -282,7 +282,6 @@ struct v4l2_ioctl_ops {
66870 bool valid_prio, int cmd, void *arg);
66871 };
66872
66873-
66874 /* v4l debugging and diagnostics */
66875
66876 /* Debug bitmask flags to be used on V4L2 */
66877diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
66878index 9e5425b..8136ffc 100644
66879--- a/include/net/caif/cfctrl.h
66880+++ b/include/net/caif/cfctrl.h
66881@@ -52,7 +52,7 @@ struct cfctrl_rsp {
66882 void (*radioset_rsp)(void);
66883 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
66884 struct cflayer *client_layer);
66885-};
66886+} __no_const;
66887
66888 /* Link Setup Parameters for CAIF-Links. */
66889 struct cfctrl_link_param {
66890@@ -101,8 +101,8 @@ struct cfctrl_request_info {
66891 struct cfctrl {
66892 struct cfsrvl serv;
66893 struct cfctrl_rsp res;
66894- atomic_t req_seq_no;
66895- atomic_t rsp_seq_no;
66896+ atomic_unchecked_t req_seq_no;
66897+ atomic_unchecked_t rsp_seq_no;
66898 struct list_head list;
66899 /* Protects from simultaneous access to first_req list */
66900 spinlock_t info_list_lock;
66901diff --git a/include/net/flow.h b/include/net/flow.h
66902index 628e11b..4c475df 100644
66903--- a/include/net/flow.h
66904+++ b/include/net/flow.h
66905@@ -221,6 +221,6 @@ extern struct flow_cache_object *flow_cache_lookup(
66906
66907 extern void flow_cache_flush(void);
66908 extern void flow_cache_flush_deferred(void);
66909-extern atomic_t flow_cache_genid;
66910+extern atomic_unchecked_t flow_cache_genid;
66911
66912 #endif
66913diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h
66914index e5062c9..48a9a4b 100644
66915--- a/include/net/gro_cells.h
66916+++ b/include/net/gro_cells.h
66917@@ -29,7 +29,7 @@ static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *s
66918 cell += skb_get_rx_queue(skb) & gcells->gro_cells_mask;
66919
66920 if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
66921- atomic_long_inc(&dev->rx_dropped);
66922+ atomic_long_inc_unchecked(&dev->rx_dropped);
66923 kfree_skb(skb);
66924 return;
66925 }
66926@@ -73,8 +73,8 @@ static inline int gro_cells_init(struct gro_cells *gcells, struct net_device *de
66927 int i;
66928
66929 gcells->gro_cells_mask = roundup_pow_of_two(netif_get_num_default_rss_queues()) - 1;
66930- gcells->cells = kcalloc(sizeof(struct gro_cell),
66931- gcells->gro_cells_mask + 1,
66932+ gcells->cells = kcalloc(gcells->gro_cells_mask + 1,
66933+ sizeof(struct gro_cell),
66934 GFP_KERNEL);
66935 if (!gcells->cells)
66936 return -ENOMEM;
66937diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
66938index 1832927..ce39aea 100644
66939--- a/include/net/inet_connection_sock.h
66940+++ b/include/net/inet_connection_sock.h
66941@@ -62,7 +62,7 @@ struct inet_connection_sock_af_ops {
66942 void (*addr2sockaddr)(struct sock *sk, struct sockaddr *);
66943 int (*bind_conflict)(const struct sock *sk,
66944 const struct inet_bind_bucket *tb, bool relax);
66945-};
66946+} __do_const;
66947
66948 /** inet_connection_sock - INET connection oriented sock
66949 *
66950diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
66951index 53f464d..ba76aaa 100644
66952--- a/include/net/inetpeer.h
66953+++ b/include/net/inetpeer.h
66954@@ -47,8 +47,8 @@ struct inet_peer {
66955 */
66956 union {
66957 struct {
66958- atomic_t rid; /* Frag reception counter */
66959- atomic_t ip_id_count; /* IP ID for the next packet */
66960+ atomic_unchecked_t rid; /* Frag reception counter */
66961+ atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
66962 };
66963 struct rcu_head rcu;
66964 struct inet_peer *gc_next;
66965@@ -182,11 +182,11 @@ static inline int inet_getid(struct inet_peer *p, int more)
66966 more++;
66967 inet_peer_refcheck(p);
66968 do {
66969- old = atomic_read(&p->ip_id_count);
66970+ old = atomic_read_unchecked(&p->ip_id_count);
66971 new = old + more;
66972 if (!new)
66973 new = 1;
66974- } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
66975+ } while (atomic_cmpxchg_unchecked(&p->ip_id_count, old, new) != old);
66976 return new;
66977 }
66978
66979diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
66980index 9497be1..5a4fafe 100644
66981--- a/include/net/ip_fib.h
66982+++ b/include/net/ip_fib.h
66983@@ -169,7 +169,7 @@ extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
66984
66985 #define FIB_RES_SADDR(net, res) \
66986 ((FIB_RES_NH(res).nh_saddr_genid == \
66987- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
66988+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
66989 FIB_RES_NH(res).nh_saddr : \
66990 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
66991 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
66992diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
66993index ee75ccd..2cc2b95 100644
66994--- a/include/net/ip_vs.h
66995+++ b/include/net/ip_vs.h
66996@@ -510,7 +510,7 @@ struct ip_vs_conn {
66997 struct ip_vs_conn *control; /* Master control connection */
66998 atomic_t n_control; /* Number of controlled ones */
66999 struct ip_vs_dest *dest; /* real server */
67000- atomic_t in_pkts; /* incoming packet counter */
67001+ atomic_unchecked_t in_pkts; /* incoming packet counter */
67002
67003 /* packet transmitter for different forwarding methods. If it
67004 mangles the packet, it must return NF_DROP or better NF_STOLEN,
67005@@ -648,7 +648,7 @@ struct ip_vs_dest {
67006 __be16 port; /* port number of the server */
67007 union nf_inet_addr addr; /* IP address of the server */
67008 volatile unsigned int flags; /* dest status flags */
67009- atomic_t conn_flags; /* flags to copy to conn */
67010+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
67011 atomic_t weight; /* server weight */
67012
67013 atomic_t refcnt; /* reference counter */
67014diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
67015index 80ffde3..968b0f4 100644
67016--- a/include/net/irda/ircomm_tty.h
67017+++ b/include/net/irda/ircomm_tty.h
67018@@ -35,6 +35,7 @@
67019 #include <linux/termios.h>
67020 #include <linux/timer.h>
67021 #include <linux/tty.h> /* struct tty_struct */
67022+#include <asm/local.h>
67023
67024 #include <net/irda/irias_object.h>
67025 #include <net/irda/ircomm_core.h>
67026diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
67027index cc7c197..9f2da2a 100644
67028--- a/include/net/iucv/af_iucv.h
67029+++ b/include/net/iucv/af_iucv.h
67030@@ -141,7 +141,7 @@ struct iucv_sock {
67031 struct iucv_sock_list {
67032 struct hlist_head head;
67033 rwlock_t lock;
67034- atomic_t autobind_name;
67035+ atomic_unchecked_t autobind_name;
67036 };
67037
67038 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
67039diff --git a/include/net/neighbour.h b/include/net/neighbour.h
67040index 0dab173..1b76af0 100644
67041--- a/include/net/neighbour.h
67042+++ b/include/net/neighbour.h
67043@@ -123,7 +123,7 @@ struct neigh_ops {
67044 void (*error_report)(struct neighbour *, struct sk_buff *);
67045 int (*output)(struct neighbour *, struct sk_buff *);
67046 int (*connected_output)(struct neighbour *, struct sk_buff *);
67047-};
67048+} __do_const;
67049
67050 struct pneigh_entry {
67051 struct pneigh_entry *next;
67052diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
67053index 95e6466..251016d 100644
67054--- a/include/net/net_namespace.h
67055+++ b/include/net/net_namespace.h
67056@@ -110,7 +110,7 @@ struct net {
67057 #endif
67058 struct netns_ipvs *ipvs;
67059 struct sock *diag_nlsk;
67060- atomic_t rt_genid;
67061+ atomic_unchecked_t rt_genid;
67062 };
67063
67064 /*
67065@@ -320,12 +320,12 @@ static inline void unregister_net_sysctl_table(struct ctl_table_header *header)
67066
67067 static inline int rt_genid(struct net *net)
67068 {
67069- return atomic_read(&net->rt_genid);
67070+ return atomic_read_unchecked(&net->rt_genid);
67071 }
67072
67073 static inline void rt_genid_bump(struct net *net)
67074 {
67075- atomic_inc(&net->rt_genid);
67076+ atomic_inc_unchecked(&net->rt_genid);
67077 }
67078
67079 #endif /* __NET_NET_NAMESPACE_H */
67080diff --git a/include/net/netdma.h b/include/net/netdma.h
67081index 8ba8ce2..99b7fff 100644
67082--- a/include/net/netdma.h
67083+++ b/include/net/netdma.h
67084@@ -24,7 +24,7 @@
67085 #include <linux/dmaengine.h>
67086 #include <linux/skbuff.h>
67087
67088-int dma_skb_copy_datagram_iovec(struct dma_chan* chan,
67089+int __intentional_overflow(3,5) dma_skb_copy_datagram_iovec(struct dma_chan* chan,
67090 struct sk_buff *skb, int offset, struct iovec *to,
67091 size_t len, struct dma_pinned_list *pinned_list);
67092
67093diff --git a/include/net/netfilter/nf_queue.h b/include/net/netfilter/nf_queue.h
67094index 252fd10..aa1421f 100644
67095--- a/include/net/netfilter/nf_queue.h
67096+++ b/include/net/netfilter/nf_queue.h
67097@@ -22,7 +22,7 @@ struct nf_queue_handler {
67098 int (*outfn)(struct nf_queue_entry *entry,
67099 unsigned int queuenum);
67100 char *name;
67101-};
67102+} __do_const;
67103
67104 extern int nf_register_queue_handler(u_int8_t pf,
67105 const struct nf_queue_handler *qh);
67106diff --git a/include/net/netlink.h b/include/net/netlink.h
67107index 9690b0f..87aded7 100644
67108--- a/include/net/netlink.h
67109+++ b/include/net/netlink.h
67110@@ -534,7 +534,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
67111 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
67112 {
67113 if (mark)
67114- skb_trim(skb, (unsigned char *) mark - skb->data);
67115+ skb_trim(skb, (const unsigned char *) mark - skb->data);
67116 }
67117
67118 /**
67119diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
67120index 2ae2b83..dbdc85e 100644
67121--- a/include/net/netns/ipv4.h
67122+++ b/include/net/netns/ipv4.h
67123@@ -64,7 +64,7 @@ struct netns_ipv4 {
67124 kgid_t sysctl_ping_group_range[2];
67125 long sysctl_tcp_mem[3];
67126
67127- atomic_t dev_addr_genid;
67128+ atomic_unchecked_t dev_addr_genid;
67129
67130 #ifdef CONFIG_IP_MROUTE
67131 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
67132diff --git a/include/net/protocol.h b/include/net/protocol.h
67133index 929528c..c84d4f6 100644
67134--- a/include/net/protocol.h
67135+++ b/include/net/protocol.h
67136@@ -48,7 +48,7 @@ struct net_protocol {
67137 int (*gro_complete)(struct sk_buff *skb);
67138 unsigned int no_policy:1,
67139 netns_ok:1;
67140-};
67141+} __do_const;
67142
67143 #if IS_ENABLED(CONFIG_IPV6)
67144 struct inet6_protocol {
67145@@ -69,7 +69,7 @@ struct inet6_protocol {
67146 int (*gro_complete)(struct sk_buff *skb);
67147
67148 unsigned int flags; /* INET6_PROTO_xxx */
67149-};
67150+} __do_const;
67151
67152 #define INET6_PROTO_NOPOLICY 0x1
67153 #define INET6_PROTO_FINAL 0x2
67154diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
67155index 9c6414f..fbd0524 100644
67156--- a/include/net/sctp/sctp.h
67157+++ b/include/net/sctp/sctp.h
67158@@ -318,9 +318,9 @@ do { \
67159
67160 #else /* SCTP_DEBUG */
67161
67162-#define SCTP_DEBUG_PRINTK(whatever...)
67163-#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
67164-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
67165+#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
67166+#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
67167+#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
67168 #define SCTP_ENABLE_DEBUG
67169 #define SCTP_DISABLE_DEBUG
67170 #define SCTP_ASSERT(expr, str, func)
67171diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
67172index 64158aa..b65533c 100644
67173--- a/include/net/sctp/structs.h
67174+++ b/include/net/sctp/structs.h
67175@@ -496,7 +496,7 @@ struct sctp_af {
67176 int sockaddr_len;
67177 sa_family_t sa_family;
67178 struct list_head list;
67179-};
67180+} __do_const;
67181
67182 struct sctp_af *sctp_get_af_specific(sa_family_t);
67183 int sctp_register_af(struct sctp_af *);
67184@@ -516,7 +516,7 @@ struct sctp_pf {
67185 struct sctp_association *asoc);
67186 void (*addr_v4map) (struct sctp_sock *, union sctp_addr *);
67187 struct sctp_af *af;
67188-};
67189+} __do_const;
67190
67191
67192 /* Structure to track chunk fragments that have been acked, but peer
67193diff --git a/include/net/sock.h b/include/net/sock.h
67194index c945fba..e162e56 100644
67195--- a/include/net/sock.h
67196+++ b/include/net/sock.h
67197@@ -304,7 +304,7 @@ struct sock {
67198 #ifdef CONFIG_RPS
67199 __u32 sk_rxhash;
67200 #endif
67201- atomic_t sk_drops;
67202+ atomic_unchecked_t sk_drops;
67203 int sk_rcvbuf;
67204
67205 struct sk_filter __rcu *sk_filter;
67206@@ -1763,7 +1763,7 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
67207 }
67208
67209 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
67210- char __user *from, char *to,
67211+ char __user *from, unsigned char *to,
67212 int copy, int offset)
67213 {
67214 if (skb->ip_summed == CHECKSUM_NONE) {
67215@@ -2022,7 +2022,7 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
67216 }
67217 }
67218
67219-struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
67220+struct sk_buff * __intentional_overflow(0) sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
67221
67222 /**
67223 * sk_page_frag - return an appropriate page_frag
67224diff --git a/include/net/tcp.h b/include/net/tcp.h
67225index 4af45e3..af97861 100644
67226--- a/include/net/tcp.h
67227+++ b/include/net/tcp.h
67228@@ -531,7 +531,7 @@ extern void tcp_retransmit_timer(struct sock *sk);
67229 extern void tcp_xmit_retransmit_queue(struct sock *);
67230 extern void tcp_simple_retransmit(struct sock *);
67231 extern int tcp_trim_head(struct sock *, struct sk_buff *, u32);
67232-extern int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
67233+extern int __intentional_overflow(3) tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
67234
67235 extern void tcp_send_probe0(struct sock *);
67236 extern void tcp_send_partial(struct sock *);
67237@@ -702,8 +702,8 @@ struct tcp_skb_cb {
67238 struct inet6_skb_parm h6;
67239 #endif
67240 } header; /* For incoming frames */
67241- __u32 seq; /* Starting sequence number */
67242- __u32 end_seq; /* SEQ + FIN + SYN + datalen */
67243+ __u32 seq __intentional_overflow(0); /* Starting sequence number */
67244+ __u32 end_seq __intentional_overflow(0); /* SEQ + FIN + SYN + datalen */
67245 __u32 when; /* used to compute rtt's */
67246 __u8 tcp_flags; /* TCP header flags. (tcp[13]) */
67247
67248@@ -717,7 +717,7 @@ struct tcp_skb_cb {
67249
67250 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
67251 /* 1 byte hole */
67252- __u32 ack_seq; /* Sequence number ACK'd */
67253+ __u32 ack_seq __intentional_overflow(0); /* Sequence number ACK'd */
67254 };
67255
67256 #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
67257diff --git a/include/net/xfrm.h b/include/net/xfrm.h
67258index 63445ed..74ef61d 100644
67259--- a/include/net/xfrm.h
67260+++ b/include/net/xfrm.h
67261@@ -423,7 +423,7 @@ struct xfrm_mode {
67262 struct module *owner;
67263 unsigned int encap;
67264 int flags;
67265-};
67266+} __do_const;
67267
67268 /* Flags for xfrm_mode. */
67269 enum {
67270@@ -514,7 +514,7 @@ struct xfrm_policy {
67271 struct timer_list timer;
67272
67273 struct flow_cache_object flo;
67274- atomic_t genid;
67275+ atomic_unchecked_t genid;
67276 u32 priority;
67277 u32 index;
67278 struct xfrm_mark mark;
67279diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
67280index 1a046b1..ee0bef0 100644
67281--- a/include/rdma/iw_cm.h
67282+++ b/include/rdma/iw_cm.h
67283@@ -122,7 +122,7 @@ struct iw_cm_verbs {
67284 int backlog);
67285
67286 int (*destroy_listen)(struct iw_cm_id *cm_id);
67287-};
67288+} __no_const;
67289
67290 /**
67291 * iw_create_cm_id - Create an IW CM identifier.
67292diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
67293index 399162b..b337f1a 100644
67294--- a/include/scsi/libfc.h
67295+++ b/include/scsi/libfc.h
67296@@ -762,6 +762,7 @@ struct libfc_function_template {
67297 */
67298 void (*disc_stop_final) (struct fc_lport *);
67299 };
67300+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
67301
67302 /**
67303 * struct fc_disc - Discovery context
67304@@ -866,7 +867,7 @@ struct fc_lport {
67305 struct fc_vport *vport;
67306
67307 /* Operational Information */
67308- struct libfc_function_template tt;
67309+ libfc_function_template_no_const tt;
67310 u8 link_up;
67311 u8 qfull;
67312 enum fc_lport_state state;
67313diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
67314index 55367b0..d97bd2a 100644
67315--- a/include/scsi/scsi_device.h
67316+++ b/include/scsi/scsi_device.h
67317@@ -169,9 +169,9 @@ struct scsi_device {
67318 unsigned int max_device_blocked; /* what device_blocked counts down from */
67319 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
67320
67321- atomic_t iorequest_cnt;
67322- atomic_t iodone_cnt;
67323- atomic_t ioerr_cnt;
67324+ atomic_unchecked_t iorequest_cnt;
67325+ atomic_unchecked_t iodone_cnt;
67326+ atomic_unchecked_t ioerr_cnt;
67327
67328 struct device sdev_gendev,
67329 sdev_dev;
67330diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
67331index b797e8f..8e2c3aa 100644
67332--- a/include/scsi/scsi_transport_fc.h
67333+++ b/include/scsi/scsi_transport_fc.h
67334@@ -751,7 +751,8 @@ struct fc_function_template {
67335 unsigned long show_host_system_hostname:1;
67336
67337 unsigned long disable_target_scan:1;
67338-};
67339+} __do_const;
67340+typedef struct fc_function_template __no_const fc_function_template_no_const;
67341
67342
67343 /**
67344diff --git a/include/sound/soc.h b/include/sound/soc.h
67345index 91244a0..89ca1a7 100644
67346--- a/include/sound/soc.h
67347+++ b/include/sound/soc.h
67348@@ -769,7 +769,7 @@ struct snd_soc_codec_driver {
67349 /* probe ordering - for components with runtime dependencies */
67350 int probe_order;
67351 int remove_order;
67352-};
67353+} __do_const;
67354
67355 /* SoC platform interface */
67356 struct snd_soc_platform_driver {
67357@@ -815,7 +815,7 @@ struct snd_soc_platform_driver {
67358 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
67359 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
67360 int (*bespoke_trigger)(struct snd_pcm_substream *, int);
67361-};
67362+} __do_const;
67363
67364 struct snd_soc_platform {
67365 const char *name;
67366diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
67367index fca8bbe..c0242ea 100644
67368--- a/include/target/target_core_base.h
67369+++ b/include/target/target_core_base.h
67370@@ -760,7 +760,7 @@ struct se_device {
67371 spinlock_t stats_lock;
67372 /* Active commands on this virtual SE device */
67373 atomic_t simple_cmds;
67374- atomic_t dev_ordered_id;
67375+ atomic_unchecked_t dev_ordered_id;
67376 atomic_t dev_ordered_sync;
67377 atomic_t dev_qf_count;
67378 struct se_obj dev_obj;
67379diff --git a/include/trace/events/fs.h b/include/trace/events/fs.h
67380new file mode 100644
67381index 0000000..fb634b7
67382--- /dev/null
67383+++ b/include/trace/events/fs.h
67384@@ -0,0 +1,53 @@
67385+#undef TRACE_SYSTEM
67386+#define TRACE_SYSTEM fs
67387+
67388+#if !defined(_TRACE_FS_H) || defined(TRACE_HEADER_MULTI_READ)
67389+#define _TRACE_FS_H
67390+
67391+#include <linux/fs.h>
67392+#include <linux/tracepoint.h>
67393+
67394+TRACE_EVENT(do_sys_open,
67395+
67396+ TP_PROTO(const char *filename, int flags, int mode),
67397+
67398+ TP_ARGS(filename, flags, mode),
67399+
67400+ TP_STRUCT__entry(
67401+ __string( filename, filename )
67402+ __field( int, flags )
67403+ __field( int, mode )
67404+ ),
67405+
67406+ TP_fast_assign(
67407+ __assign_str(filename, filename);
67408+ __entry->flags = flags;
67409+ __entry->mode = mode;
67410+ ),
67411+
67412+ TP_printk("\"%s\" %x %o",
67413+ __get_str(filename), __entry->flags, __entry->mode)
67414+);
67415+
67416+TRACE_EVENT(open_exec,
67417+
67418+ TP_PROTO(const char *filename),
67419+
67420+ TP_ARGS(filename),
67421+
67422+ TP_STRUCT__entry(
67423+ __string( filename, filename )
67424+ ),
67425+
67426+ TP_fast_assign(
67427+ __assign_str(filename, filename);
67428+ ),
67429+
67430+ TP_printk("\"%s\"",
67431+ __get_str(filename))
67432+);
67433+
67434+#endif /* _TRACE_FS_H */
67435+
67436+/* This part must be outside protection */
67437+#include <trace/define_trace.h>
67438diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
67439index 1c09820..7f5ec79 100644
67440--- a/include/trace/events/irq.h
67441+++ b/include/trace/events/irq.h
67442@@ -36,7 +36,7 @@ struct softirq_action;
67443 */
67444 TRACE_EVENT(irq_handler_entry,
67445
67446- TP_PROTO(int irq, struct irqaction *action),
67447+ TP_PROTO(int irq, const struct irqaction *action),
67448
67449 TP_ARGS(irq, action),
67450
67451@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
67452 */
67453 TRACE_EVENT(irq_handler_exit,
67454
67455- TP_PROTO(int irq, struct irqaction *action, int ret),
67456+ TP_PROTO(int irq, const struct irqaction *action, int ret),
67457
67458 TP_ARGS(irq, action, ret),
67459
67460diff --git a/include/uapi/linux/a.out.h b/include/uapi/linux/a.out.h
67461index 7caf44c..23c6f27 100644
67462--- a/include/uapi/linux/a.out.h
67463+++ b/include/uapi/linux/a.out.h
67464@@ -39,6 +39,14 @@ enum machine_type {
67465 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
67466 };
67467
67468+/* Constants for the N_FLAGS field */
67469+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
67470+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
67471+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
67472+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
67473+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
67474+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
67475+
67476 #if !defined (N_MAGIC)
67477 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
67478 #endif
67479diff --git a/include/uapi/linux/byteorder/little_endian.h b/include/uapi/linux/byteorder/little_endian.h
67480index d876736..b36014e 100644
67481--- a/include/uapi/linux/byteorder/little_endian.h
67482+++ b/include/uapi/linux/byteorder/little_endian.h
67483@@ -42,51 +42,51 @@
67484
67485 static inline __le64 __cpu_to_le64p(const __u64 *p)
67486 {
67487- return (__force __le64)*p;
67488+ return (__force const __le64)*p;
67489 }
67490 static inline __u64 __le64_to_cpup(const __le64 *p)
67491 {
67492- return (__force __u64)*p;
67493+ return (__force const __u64)*p;
67494 }
67495 static inline __le32 __cpu_to_le32p(const __u32 *p)
67496 {
67497- return (__force __le32)*p;
67498+ return (__force const __le32)*p;
67499 }
67500 static inline __u32 __le32_to_cpup(const __le32 *p)
67501 {
67502- return (__force __u32)*p;
67503+ return (__force const __u32)*p;
67504 }
67505 static inline __le16 __cpu_to_le16p(const __u16 *p)
67506 {
67507- return (__force __le16)*p;
67508+ return (__force const __le16)*p;
67509 }
67510 static inline __u16 __le16_to_cpup(const __le16 *p)
67511 {
67512- return (__force __u16)*p;
67513+ return (__force const __u16)*p;
67514 }
67515 static inline __be64 __cpu_to_be64p(const __u64 *p)
67516 {
67517- return (__force __be64)__swab64p(p);
67518+ return (__force const __be64)__swab64p(p);
67519 }
67520 static inline __u64 __be64_to_cpup(const __be64 *p)
67521 {
67522- return __swab64p((__u64 *)p);
67523+ return __swab64p((const __u64 *)p);
67524 }
67525 static inline __be32 __cpu_to_be32p(const __u32 *p)
67526 {
67527- return (__force __be32)__swab32p(p);
67528+ return (__force const __be32)__swab32p(p);
67529 }
67530 static inline __u32 __be32_to_cpup(const __be32 *p)
67531 {
67532- return __swab32p((__u32 *)p);
67533+ return __swab32p((const __u32 *)p);
67534 }
67535 static inline __be16 __cpu_to_be16p(const __u16 *p)
67536 {
67537- return (__force __be16)__swab16p(p);
67538+ return (__force const __be16)__swab16p(p);
67539 }
67540 static inline __u16 __be16_to_cpup(const __be16 *p)
67541 {
67542- return __swab16p((__u16 *)p);
67543+ return __swab16p((const __u16 *)p);
67544 }
67545 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
67546 #define __le64_to_cpus(x) do { (void)(x); } while (0)
67547diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
67548index 126a817..d522bd1 100644
67549--- a/include/uapi/linux/elf.h
67550+++ b/include/uapi/linux/elf.h
67551@@ -37,6 +37,17 @@ typedef __s64 Elf64_Sxword;
67552 #define PT_GNU_EH_FRAME 0x6474e550
67553
67554 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
67555+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
67556+
67557+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
67558+
67559+/* Constants for the e_flags field */
67560+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
67561+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
67562+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
67563+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
67564+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
67565+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
67566
67567 /*
67568 * Extended Numbering
67569@@ -94,6 +105,8 @@ typedef __s64 Elf64_Sxword;
67570 #define DT_DEBUG 21
67571 #define DT_TEXTREL 22
67572 #define DT_JMPREL 23
67573+#define DT_FLAGS 30
67574+ #define DF_TEXTREL 0x00000004
67575 #define DT_ENCODING 32
67576 #define OLD_DT_LOOS 0x60000000
67577 #define DT_LOOS 0x6000000d
67578@@ -240,6 +253,19 @@ typedef struct elf64_hdr {
67579 #define PF_W 0x2
67580 #define PF_X 0x1
67581
67582+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
67583+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
67584+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
67585+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
67586+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
67587+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
67588+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
67589+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
67590+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
67591+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
67592+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
67593+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
67594+
67595 typedef struct elf32_phdr{
67596 Elf32_Word p_type;
67597 Elf32_Off p_offset;
67598@@ -332,6 +358,8 @@ typedef struct elf64_shdr {
67599 #define EI_OSABI 7
67600 #define EI_PAD 8
67601
67602+#define EI_PAX 14
67603+
67604 #define ELFMAG0 0x7f /* EI_MAG */
67605 #define ELFMAG1 'E'
67606 #define ELFMAG2 'L'
67607diff --git a/include/uapi/linux/personality.h b/include/uapi/linux/personality.h
67608index aa169c4..6a2771d 100644
67609--- a/include/uapi/linux/personality.h
67610+++ b/include/uapi/linux/personality.h
67611@@ -30,6 +30,7 @@ enum {
67612 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
67613 ADDR_NO_RANDOMIZE | \
67614 ADDR_COMPAT_LAYOUT | \
67615+ ADDR_LIMIT_3GB | \
67616 MMAP_PAGE_ZERO)
67617
67618 /*
67619diff --git a/include/uapi/linux/screen_info.h b/include/uapi/linux/screen_info.h
67620index 7530e74..e714828 100644
67621--- a/include/uapi/linux/screen_info.h
67622+++ b/include/uapi/linux/screen_info.h
67623@@ -43,7 +43,8 @@ struct screen_info {
67624 __u16 pages; /* 0x32 */
67625 __u16 vesa_attributes; /* 0x34 */
67626 __u32 capabilities; /* 0x36 */
67627- __u8 _reserved[6]; /* 0x3a */
67628+ __u16 vesapm_size; /* 0x3a */
67629+ __u8 _reserved[4]; /* 0x3c */
67630 } __attribute__((packed));
67631
67632 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
67633diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h
67634index 6d67213..8dab561 100644
67635--- a/include/uapi/linux/sysctl.h
67636+++ b/include/uapi/linux/sysctl.h
67637@@ -155,7 +155,11 @@ enum
67638 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
67639 };
67640
67641-
67642+#ifdef CONFIG_PAX_SOFTMODE
67643+enum {
67644+ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
67645+};
67646+#endif
67647
67648 /* CTL_VM names: */
67649 enum
67650diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
67651index 26607bd..588b65f 100644
67652--- a/include/uapi/linux/xattr.h
67653+++ b/include/uapi/linux/xattr.h
67654@@ -60,5 +60,9 @@
67655 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
67656 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
67657
67658+/* User namespace */
67659+#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
67660+#define XATTR_PAX_FLAGS_SUFFIX "flags"
67661+#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
67662
67663 #endif /* _UAPI_LINUX_XATTR_H */
67664diff --git a/include/video/udlfb.h b/include/video/udlfb.h
67665index f9466fa..f4e2b81 100644
67666--- a/include/video/udlfb.h
67667+++ b/include/video/udlfb.h
67668@@ -53,10 +53,10 @@ struct dlfb_data {
67669 u32 pseudo_palette[256];
67670 int blank_mode; /*one of FB_BLANK_ */
67671 /* blit-only rendering path metrics, exposed through sysfs */
67672- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
67673- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
67674- atomic_t bytes_sent; /* to usb, after compression including overhead */
67675- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
67676+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
67677+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
67678+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
67679+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
67680 };
67681
67682 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
67683diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
67684index 0993a22..32ba2fe 100644
67685--- a/include/video/uvesafb.h
67686+++ b/include/video/uvesafb.h
67687@@ -177,6 +177,7 @@ struct uvesafb_par {
67688 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
67689 u8 pmi_setpal; /* PMI for palette changes */
67690 u16 *pmi_base; /* protected mode interface location */
67691+ u8 *pmi_code; /* protected mode code location */
67692 void *pmi_start;
67693 void *pmi_pal;
67694 u8 *vbe_state_orig; /*
67695diff --git a/init/Kconfig b/init/Kconfig
67696index 6fdd6e3..5b01610 100644
67697--- a/init/Kconfig
67698+++ b/init/Kconfig
67699@@ -925,6 +925,7 @@ endif # CGROUPS
67700
67701 config CHECKPOINT_RESTORE
67702 bool "Checkpoint/restore support" if EXPERT
67703+ depends on !GRKERNSEC
67704 default n
67705 help
67706 Enables additional kernel features in a sake of checkpoint/restore.
67707@@ -1016,6 +1017,8 @@ config UIDGID_CONVERTED
67708 depends on OCFS2_FS = n
67709 depends on XFS_FS = n
67710
67711+ depends on GRKERNSEC = n
67712+
67713 config UIDGID_STRICT_TYPE_CHECKS
67714 bool "Require conversions between uid/gids and their internal representation"
67715 depends on UIDGID_CONVERTED
67716@@ -1405,7 +1408,7 @@ config SLUB_DEBUG
67717
67718 config COMPAT_BRK
67719 bool "Disable heap randomization"
67720- default y
67721+ default n
67722 help
67723 Randomizing heap placement makes heap exploits harder, but it
67724 also breaks ancient binaries (including anything libc5 based).
67725@@ -1648,7 +1651,7 @@ config INIT_ALL_POSSIBLE
67726 config STOP_MACHINE
67727 bool
67728 default y
67729- depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU
67730+ depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU || GRKERNSEC
67731 help
67732 Need stop_machine() primitive.
67733
67734diff --git a/init/Makefile b/init/Makefile
67735index 7bc47ee..6da2dc7 100644
67736--- a/init/Makefile
67737+++ b/init/Makefile
67738@@ -2,6 +2,9 @@
67739 # Makefile for the linux kernel.
67740 #
67741
67742+ccflags-y := $(GCC_PLUGINS_CFLAGS)
67743+asflags-y := $(GCC_PLUGINS_AFLAGS)
67744+
67745 obj-y := main.o version.o mounts.o
67746 ifneq ($(CONFIG_BLK_DEV_INITRD),y)
67747 obj-y += noinitramfs.o
67748diff --git a/init/do_mounts.c b/init/do_mounts.c
67749index f8a6642..4e5ee1b 100644
67750--- a/init/do_mounts.c
67751+++ b/init/do_mounts.c
67752@@ -336,11 +336,11 @@ static void __init get_fs_names(char *page)
67753 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
67754 {
67755 struct super_block *s;
67756- int err = sys_mount(name, "/root", fs, flags, data);
67757+ int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
67758 if (err)
67759 return err;
67760
67761- sys_chdir("/root");
67762+ sys_chdir((const char __force_user *)"/root");
67763 s = current->fs->pwd.dentry->d_sb;
67764 ROOT_DEV = s->s_dev;
67765 printk(KERN_INFO
67766@@ -461,18 +461,18 @@ void __init change_floppy(char *fmt, ...)
67767 va_start(args, fmt);
67768 vsprintf(buf, fmt, args);
67769 va_end(args);
67770- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
67771+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
67772 if (fd >= 0) {
67773 sys_ioctl(fd, FDEJECT, 0);
67774 sys_close(fd);
67775 }
67776 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
67777- fd = sys_open("/dev/console", O_RDWR, 0);
67778+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
67779 if (fd >= 0) {
67780 sys_ioctl(fd, TCGETS, (long)&termios);
67781 termios.c_lflag &= ~ICANON;
67782 sys_ioctl(fd, TCSETSF, (long)&termios);
67783- sys_read(fd, &c, 1);
67784+ sys_read(fd, (char __user *)&c, 1);
67785 termios.c_lflag |= ICANON;
67786 sys_ioctl(fd, TCSETSF, (long)&termios);
67787 sys_close(fd);
67788@@ -566,6 +566,6 @@ void __init prepare_namespace(void)
67789 mount_root();
67790 out:
67791 devtmpfs_mount("dev");
67792- sys_mount(".", "/", NULL, MS_MOVE, NULL);
67793- sys_chroot(".");
67794+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
67795+ sys_chroot((const char __force_user *)".");
67796 }
67797diff --git a/init/do_mounts.h b/init/do_mounts.h
67798index f5b978a..69dbfe8 100644
67799--- a/init/do_mounts.h
67800+++ b/init/do_mounts.h
67801@@ -15,15 +15,15 @@ extern int root_mountflags;
67802
67803 static inline int create_dev(char *name, dev_t dev)
67804 {
67805- sys_unlink(name);
67806- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
67807+ sys_unlink((char __force_user *)name);
67808+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
67809 }
67810
67811 #if BITS_PER_LONG == 32
67812 static inline u32 bstat(char *name)
67813 {
67814 struct stat64 stat;
67815- if (sys_stat64(name, &stat) != 0)
67816+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
67817 return 0;
67818 if (!S_ISBLK(stat.st_mode))
67819 return 0;
67820@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
67821 static inline u32 bstat(char *name)
67822 {
67823 struct stat stat;
67824- if (sys_newstat(name, &stat) != 0)
67825+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
67826 return 0;
67827 if (!S_ISBLK(stat.st_mode))
67828 return 0;
67829diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
67830index f9acf71..1e19144 100644
67831--- a/init/do_mounts_initrd.c
67832+++ b/init/do_mounts_initrd.c
67833@@ -58,8 +58,8 @@ static void __init handle_initrd(void)
67834 create_dev("/dev/root.old", Root_RAM0);
67835 /* mount initrd on rootfs' /root */
67836 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
67837- sys_mkdir("/old", 0700);
67838- sys_chdir("/old");
67839+ sys_mkdir((const char __force_user *)"/old", 0700);
67840+ sys_chdir((const char __force_user *)"/old");
67841
67842 /*
67843 * In case that a resume from disk is carried out by linuxrc or one of
67844@@ -73,31 +73,31 @@ static void __init handle_initrd(void)
67845 current->flags &= ~PF_FREEZER_SKIP;
67846
67847 /* move initrd to rootfs' /old */
67848- sys_mount("..", ".", NULL, MS_MOVE, NULL);
67849+ sys_mount((char __force_user *)"..", (char __force_user *)".", NULL, MS_MOVE, NULL);
67850 /* switch root and cwd back to / of rootfs */
67851- sys_chroot("..");
67852+ sys_chroot((const char __force_user *)"..");
67853
67854 if (new_decode_dev(real_root_dev) == Root_RAM0) {
67855- sys_chdir("/old");
67856+ sys_chdir((const char __force_user *)"/old");
67857 return;
67858 }
67859
67860- sys_chdir("/");
67861+ sys_chdir((const char __force_user *)"/");
67862 ROOT_DEV = new_decode_dev(real_root_dev);
67863 mount_root();
67864
67865 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
67866- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
67867+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
67868 if (!error)
67869 printk("okay\n");
67870 else {
67871- int fd = sys_open("/dev/root.old", O_RDWR, 0);
67872+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
67873 if (error == -ENOENT)
67874 printk("/initrd does not exist. Ignored.\n");
67875 else
67876 printk("failed\n");
67877 printk(KERN_NOTICE "Unmounting old root\n");
67878- sys_umount("/old", MNT_DETACH);
67879+ sys_umount((char __force_user *)"/old", MNT_DETACH);
67880 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
67881 if (fd < 0) {
67882 error = fd;
67883@@ -120,11 +120,11 @@ int __init initrd_load(void)
67884 * mounted in the normal path.
67885 */
67886 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
67887- sys_unlink("/initrd.image");
67888+ sys_unlink((const char __force_user *)"/initrd.image");
67889 handle_initrd();
67890 return 1;
67891 }
67892 }
67893- sys_unlink("/initrd.image");
67894+ sys_unlink((const char __force_user *)"/initrd.image");
67895 return 0;
67896 }
67897diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
67898index 8cb6db5..d729f50 100644
67899--- a/init/do_mounts_md.c
67900+++ b/init/do_mounts_md.c
67901@@ -180,7 +180,7 @@ static void __init md_setup_drive(void)
67902 partitioned ? "_d" : "", minor,
67903 md_setup_args[ent].device_names);
67904
67905- fd = sys_open(name, 0, 0);
67906+ fd = sys_open((char __force_user *)name, 0, 0);
67907 if (fd < 0) {
67908 printk(KERN_ERR "md: open failed - cannot start "
67909 "array %s\n", name);
67910@@ -243,7 +243,7 @@ static void __init md_setup_drive(void)
67911 * array without it
67912 */
67913 sys_close(fd);
67914- fd = sys_open(name, 0, 0);
67915+ fd = sys_open((char __force_user *)name, 0, 0);
67916 sys_ioctl(fd, BLKRRPART, 0);
67917 }
67918 sys_close(fd);
67919@@ -293,7 +293,7 @@ static void __init autodetect_raid(void)
67920
67921 wait_for_device_probe();
67922
67923- fd = sys_open("/dev/md0", 0, 0);
67924+ fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
67925 if (fd >= 0) {
67926 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
67927 sys_close(fd);
67928diff --git a/init/init_task.c b/init/init_task.c
67929index 8b2f399..f0797c9 100644
67930--- a/init/init_task.c
67931+++ b/init/init_task.c
67932@@ -20,5 +20,9 @@ EXPORT_SYMBOL(init_task);
67933 * Initial thread structure. Alignment of this is handled by a special
67934 * linker map entry.
67935 */
67936+#ifdef CONFIG_X86
67937+union thread_union init_thread_union __init_task_data;
67938+#else
67939 union thread_union init_thread_union __init_task_data =
67940 { INIT_THREAD_INFO(init_task) };
67941+#endif
67942diff --git a/init/initramfs.c b/init/initramfs.c
67943index 84c6bf1..8899338 100644
67944--- a/init/initramfs.c
67945+++ b/init/initramfs.c
67946@@ -84,7 +84,7 @@ static void __init free_hash(void)
67947 }
67948 }
67949
67950-static long __init do_utime(char *filename, time_t mtime)
67951+static long __init do_utime(char __force_user *filename, time_t mtime)
67952 {
67953 struct timespec t[2];
67954
67955@@ -119,7 +119,7 @@ static void __init dir_utime(void)
67956 struct dir_entry *de, *tmp;
67957 list_for_each_entry_safe(de, tmp, &dir_list, list) {
67958 list_del(&de->list);
67959- do_utime(de->name, de->mtime);
67960+ do_utime((char __force_user *)de->name, de->mtime);
67961 kfree(de->name);
67962 kfree(de);
67963 }
67964@@ -281,7 +281,7 @@ static int __init maybe_link(void)
67965 if (nlink >= 2) {
67966 char *old = find_link(major, minor, ino, mode, collected);
67967 if (old)
67968- return (sys_link(old, collected) < 0) ? -1 : 1;
67969+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
67970 }
67971 return 0;
67972 }
67973@@ -290,11 +290,11 @@ static void __init clean_path(char *path, umode_t mode)
67974 {
67975 struct stat st;
67976
67977- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
67978+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
67979 if (S_ISDIR(st.st_mode))
67980- sys_rmdir(path);
67981+ sys_rmdir((char __force_user *)path);
67982 else
67983- sys_unlink(path);
67984+ sys_unlink((char __force_user *)path);
67985 }
67986 }
67987
67988@@ -315,7 +315,7 @@ static int __init do_name(void)
67989 int openflags = O_WRONLY|O_CREAT;
67990 if (ml != 1)
67991 openflags |= O_TRUNC;
67992- wfd = sys_open(collected, openflags, mode);
67993+ wfd = sys_open((char __force_user *)collected, openflags, mode);
67994
67995 if (wfd >= 0) {
67996 sys_fchown(wfd, uid, gid);
67997@@ -327,17 +327,17 @@ static int __init do_name(void)
67998 }
67999 }
68000 } else if (S_ISDIR(mode)) {
68001- sys_mkdir(collected, mode);
68002- sys_chown(collected, uid, gid);
68003- sys_chmod(collected, mode);
68004+ sys_mkdir((char __force_user *)collected, mode);
68005+ sys_chown((char __force_user *)collected, uid, gid);
68006+ sys_chmod((char __force_user *)collected, mode);
68007 dir_add(collected, mtime);
68008 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
68009 S_ISFIFO(mode) || S_ISSOCK(mode)) {
68010 if (maybe_link() == 0) {
68011- sys_mknod(collected, mode, rdev);
68012- sys_chown(collected, uid, gid);
68013- sys_chmod(collected, mode);
68014- do_utime(collected, mtime);
68015+ sys_mknod((char __force_user *)collected, mode, rdev);
68016+ sys_chown((char __force_user *)collected, uid, gid);
68017+ sys_chmod((char __force_user *)collected, mode);
68018+ do_utime((char __force_user *)collected, mtime);
68019 }
68020 }
68021 return 0;
68022@@ -346,15 +346,15 @@ static int __init do_name(void)
68023 static int __init do_copy(void)
68024 {
68025 if (count >= body_len) {
68026- sys_write(wfd, victim, body_len);
68027+ sys_write(wfd, (char __force_user *)victim, body_len);
68028 sys_close(wfd);
68029- do_utime(vcollected, mtime);
68030+ do_utime((char __force_user *)vcollected, mtime);
68031 kfree(vcollected);
68032 eat(body_len);
68033 state = SkipIt;
68034 return 0;
68035 } else {
68036- sys_write(wfd, victim, count);
68037+ sys_write(wfd, (char __force_user *)victim, count);
68038 body_len -= count;
68039 eat(count);
68040 return 1;
68041@@ -365,9 +365,9 @@ static int __init do_symlink(void)
68042 {
68043 collected[N_ALIGN(name_len) + body_len] = '\0';
68044 clean_path(collected, 0);
68045- sys_symlink(collected + N_ALIGN(name_len), collected);
68046- sys_lchown(collected, uid, gid);
68047- do_utime(collected, mtime);
68048+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
68049+ sys_lchown((char __force_user *)collected, uid, gid);
68050+ do_utime((char __force_user *)collected, mtime);
68051 state = SkipIt;
68052 next_state = Reset;
68053 return 0;
68054diff --git a/init/main.c b/init/main.c
68055index 857166f..9df1d8e 100644
68056--- a/init/main.c
68057+++ b/init/main.c
68058@@ -96,6 +96,8 @@ static inline void mark_rodata_ro(void) { }
68059 extern void tc_init(void);
68060 #endif
68061
68062+extern void grsecurity_init(void);
68063+
68064 /*
68065 * Debug helper: via this flag we know that we are in 'early bootup code'
68066 * where only the boot processor is running with IRQ disabled. This means
68067@@ -149,6 +151,61 @@ static int __init set_reset_devices(char *str)
68068
68069 __setup("reset_devices", set_reset_devices);
68070
68071+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
68072+int grsec_proc_gid = CONFIG_GRKERNSEC_PROC_GID;
68073+static int __init setup_grsec_proc_gid(char *str)
68074+{
68075+ grsec_proc_gid = (int)simple_strtol(str, NULL, 0);
68076+ return 1;
68077+}
68078+__setup("grsec_proc_gid=", setup_grsec_proc_gid);
68079+#endif
68080+
68081+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
68082+extern char pax_enter_kernel_user[];
68083+extern char pax_exit_kernel_user[];
68084+extern pgdval_t clone_pgd_mask;
68085+#endif
68086+
68087+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
68088+static int __init setup_pax_nouderef(char *str)
68089+{
68090+#ifdef CONFIG_X86_32
68091+ unsigned int cpu;
68092+ struct desc_struct *gdt;
68093+
68094+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
68095+ gdt = get_cpu_gdt_table(cpu);
68096+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
68097+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
68098+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
68099+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
68100+ }
68101+ loadsegment(ds, __KERNEL_DS);
68102+ loadsegment(es, __KERNEL_DS);
68103+ loadsegment(ss, __KERNEL_DS);
68104+#else
68105+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
68106+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
68107+ clone_pgd_mask = ~(pgdval_t)0UL;
68108+#endif
68109+
68110+ return 0;
68111+}
68112+early_param("pax_nouderef", setup_pax_nouderef);
68113+#endif
68114+
68115+#ifdef CONFIG_PAX_SOFTMODE
68116+int pax_softmode;
68117+
68118+static int __init setup_pax_softmode(char *str)
68119+{
68120+ get_option(&str, &pax_softmode);
68121+ return 1;
68122+}
68123+__setup("pax_softmode=", setup_pax_softmode);
68124+#endif
68125+
68126 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
68127 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
68128 static const char *panic_later, *panic_param;
68129@@ -681,6 +738,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
68130 {
68131 int count = preempt_count();
68132 int ret;
68133+ const char *msg1 = "", *msg2 = "";
68134
68135 if (initcall_debug)
68136 ret = do_one_initcall_debug(fn);
68137@@ -693,15 +751,15 @@ int __init_or_module do_one_initcall(initcall_t fn)
68138 sprintf(msgbuf, "error code %d ", ret);
68139
68140 if (preempt_count() != count) {
68141- strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
68142+ msg1 = " preemption imbalance";
68143 preempt_count() = count;
68144 }
68145 if (irqs_disabled()) {
68146- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
68147+ msg2 = " disabled interrupts";
68148 local_irq_enable();
68149 }
68150- if (msgbuf[0]) {
68151- printk("initcall %pF returned with %s\n", fn, msgbuf);
68152+ if (msgbuf[0] || *msg1 || *msg2) {
68153+ printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
68154 }
68155
68156 return ret;
68157@@ -743,6 +801,10 @@ static char *initcall_level_names[] __initdata = {
68158 "late",
68159 };
68160
68161+#ifdef CONFIG_PAX_LATENT_ENTROPY
68162+u64 latent_entropy;
68163+#endif
68164+
68165 static void __init do_initcall_level(int level)
68166 {
68167 extern const struct kernel_param __start___param[], __stop___param[];
68168@@ -755,8 +817,14 @@ static void __init do_initcall_level(int level)
68169 level, level,
68170 &repair_env_string);
68171
68172- for (fn = initcall_levels[level]; fn < initcall_levels[level+1]; fn++)
68173+ for (fn = initcall_levels[level]; fn < initcall_levels[level+1]; fn++) {
68174 do_one_initcall(*fn);
68175+
68176+#ifdef CONFIG_PAX_LATENT_ENTROPY
68177+ add_device_randomness(&latent_entropy, sizeof(latent_entropy));
68178+#endif
68179+
68180+ }
68181 }
68182
68183 static void __init do_initcalls(void)
68184@@ -790,8 +858,14 @@ static void __init do_pre_smp_initcalls(void)
68185 {
68186 initcall_t *fn;
68187
68188- for (fn = __initcall_start; fn < __initcall0_start; fn++)
68189+ for (fn = __initcall_start; fn < __initcall0_start; fn++) {
68190 do_one_initcall(*fn);
68191+
68192+#ifdef CONFIG_PAX_LATENT_ENTROPY
68193+ add_device_randomness(&latent_entropy, sizeof(latent_entropy));
68194+#endif
68195+
68196+ }
68197 }
68198
68199 static int run_init_process(const char *init_filename)
68200@@ -876,7 +950,7 @@ static noinline void __init kernel_init_freeable(void)
68201 do_basic_setup();
68202
68203 /* Open the /dev/console on the rootfs, this should never fail */
68204- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
68205+ if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
68206 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
68207
68208 (void) sys_dup(0);
68209@@ -889,11 +963,13 @@ static noinline void __init kernel_init_freeable(void)
68210 if (!ramdisk_execute_command)
68211 ramdisk_execute_command = "/init";
68212
68213- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
68214+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
68215 ramdisk_execute_command = NULL;
68216 prepare_namespace();
68217 }
68218
68219+ grsecurity_init();
68220+
68221 /*
68222 * Ok, we have completed the initial bootup, and
68223 * we're essentially up and running. Get rid of the
68224diff --git a/ipc/mqueue.c b/ipc/mqueue.c
68225index 71a3ca1..cc330ee 100644
68226--- a/ipc/mqueue.c
68227+++ b/ipc/mqueue.c
68228@@ -278,6 +278,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
68229 mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
68230 info->attr.mq_msgsize);
68231
68232+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
68233 spin_lock(&mq_lock);
68234 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
68235 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
68236diff --git a/ipc/msg.c b/ipc/msg.c
68237index a71af5a..a90a110 100644
68238--- a/ipc/msg.c
68239+++ b/ipc/msg.c
68240@@ -309,18 +309,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
68241 return security_msg_queue_associate(msq, msgflg);
68242 }
68243
68244+static struct ipc_ops msg_ops = {
68245+ .getnew = newque,
68246+ .associate = msg_security,
68247+ .more_checks = NULL
68248+};
68249+
68250 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
68251 {
68252 struct ipc_namespace *ns;
68253- struct ipc_ops msg_ops;
68254 struct ipc_params msg_params;
68255
68256 ns = current->nsproxy->ipc_ns;
68257
68258- msg_ops.getnew = newque;
68259- msg_ops.associate = msg_security;
68260- msg_ops.more_checks = NULL;
68261-
68262 msg_params.key = key;
68263 msg_params.flg = msgflg;
68264
68265diff --git a/ipc/sem.c b/ipc/sem.c
68266index 58d31f1..cce7a55 100644
68267--- a/ipc/sem.c
68268+++ b/ipc/sem.c
68269@@ -364,10 +364,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
68270 return 0;
68271 }
68272
68273+static struct ipc_ops sem_ops = {
68274+ .getnew = newary,
68275+ .associate = sem_security,
68276+ .more_checks = sem_more_checks
68277+};
68278+
68279 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
68280 {
68281 struct ipc_namespace *ns;
68282- struct ipc_ops sem_ops;
68283 struct ipc_params sem_params;
68284
68285 ns = current->nsproxy->ipc_ns;
68286@@ -375,10 +380,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
68287 if (nsems < 0 || nsems > ns->sc_semmsl)
68288 return -EINVAL;
68289
68290- sem_ops.getnew = newary;
68291- sem_ops.associate = sem_security;
68292- sem_ops.more_checks = sem_more_checks;
68293-
68294 sem_params.key = key;
68295 sem_params.flg = semflg;
68296 sem_params.u.nsems = nsems;
68297diff --git a/ipc/shm.c b/ipc/shm.c
68298index dff40c9..9450e27 100644
68299--- a/ipc/shm.c
68300+++ b/ipc/shm.c
68301@@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
68302 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
68303 #endif
68304
68305+#ifdef CONFIG_GRKERNSEC
68306+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
68307+ const time_t shm_createtime, const uid_t cuid,
68308+ const int shmid);
68309+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
68310+ const time_t shm_createtime);
68311+#endif
68312+
68313 void shm_init_ns(struct ipc_namespace *ns)
68314 {
68315 ns->shm_ctlmax = SHMMAX;
68316@@ -520,6 +528,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
68317 shp->shm_lprid = 0;
68318 shp->shm_atim = shp->shm_dtim = 0;
68319 shp->shm_ctim = get_seconds();
68320+#ifdef CONFIG_GRKERNSEC
68321+ {
68322+ struct timespec timeval;
68323+ do_posix_clock_monotonic_gettime(&timeval);
68324+
68325+ shp->shm_createtime = timeval.tv_sec;
68326+ }
68327+#endif
68328 shp->shm_segsz = size;
68329 shp->shm_nattch = 0;
68330 shp->shm_file = file;
68331@@ -571,18 +587,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
68332 return 0;
68333 }
68334
68335+static struct ipc_ops shm_ops = {
68336+ .getnew = newseg,
68337+ .associate = shm_security,
68338+ .more_checks = shm_more_checks
68339+};
68340+
68341 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
68342 {
68343 struct ipc_namespace *ns;
68344- struct ipc_ops shm_ops;
68345 struct ipc_params shm_params;
68346
68347 ns = current->nsproxy->ipc_ns;
68348
68349- shm_ops.getnew = newseg;
68350- shm_ops.associate = shm_security;
68351- shm_ops.more_checks = shm_more_checks;
68352-
68353 shm_params.key = key;
68354 shm_params.flg = shmflg;
68355 shm_params.u.size = size;
68356@@ -1003,6 +1020,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
68357 f_mode = FMODE_READ | FMODE_WRITE;
68358 }
68359 if (shmflg & SHM_EXEC) {
68360+
68361+#ifdef CONFIG_PAX_MPROTECT
68362+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
68363+ goto out;
68364+#endif
68365+
68366 prot |= PROT_EXEC;
68367 acc_mode |= S_IXUGO;
68368 }
68369@@ -1026,9 +1049,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
68370 if (err)
68371 goto out_unlock;
68372
68373+#ifdef CONFIG_GRKERNSEC
68374+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
68375+ shp->shm_perm.cuid, shmid) ||
68376+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
68377+ err = -EACCES;
68378+ goto out_unlock;
68379+ }
68380+#endif
68381+
68382 path = shp->shm_file->f_path;
68383 path_get(&path);
68384 shp->shm_nattch++;
68385+#ifdef CONFIG_GRKERNSEC
68386+ shp->shm_lapid = current->pid;
68387+#endif
68388 size = i_size_read(path.dentry->d_inode);
68389 shm_unlock(shp);
68390
68391diff --git a/kernel/acct.c b/kernel/acct.c
68392index 051e071..15e0920 100644
68393--- a/kernel/acct.c
68394+++ b/kernel/acct.c
68395@@ -550,7 +550,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
68396 */
68397 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
68398 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
68399- file->f_op->write(file, (char *)&ac,
68400+ file->f_op->write(file, (char __force_user *)&ac,
68401 sizeof(acct_t), &file->f_pos);
68402 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
68403 set_fs(fs);
68404diff --git a/kernel/audit.c b/kernel/audit.c
68405index 40414e9..c920b72 100644
68406--- a/kernel/audit.c
68407+++ b/kernel/audit.c
68408@@ -116,7 +116,7 @@ u32 audit_sig_sid = 0;
68409 3) suppressed due to audit_rate_limit
68410 4) suppressed due to audit_backlog_limit
68411 */
68412-static atomic_t audit_lost = ATOMIC_INIT(0);
68413+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
68414
68415 /* The netlink socket. */
68416 static struct sock *audit_sock;
68417@@ -238,7 +238,7 @@ void audit_log_lost(const char *message)
68418 unsigned long now;
68419 int print;
68420
68421- atomic_inc(&audit_lost);
68422+ atomic_inc_unchecked(&audit_lost);
68423
68424 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
68425
68426@@ -257,7 +257,7 @@ void audit_log_lost(const char *message)
68427 printk(KERN_WARNING
68428 "audit: audit_lost=%d audit_rate_limit=%d "
68429 "audit_backlog_limit=%d\n",
68430- atomic_read(&audit_lost),
68431+ atomic_read_unchecked(&audit_lost),
68432 audit_rate_limit,
68433 audit_backlog_limit);
68434 audit_panic(message);
68435@@ -677,7 +677,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
68436 status_set.pid = audit_pid;
68437 status_set.rate_limit = audit_rate_limit;
68438 status_set.backlog_limit = audit_backlog_limit;
68439- status_set.lost = atomic_read(&audit_lost);
68440+ status_set.lost = atomic_read_unchecked(&audit_lost);
68441 status_set.backlog = skb_queue_len(&audit_skb_queue);
68442 audit_send_reply(NETLINK_CB(skb).portid, seq, AUDIT_GET, 0, 0,
68443 &status_set, sizeof(status_set));
68444diff --git a/kernel/auditsc.c b/kernel/auditsc.c
68445index 157e989..b28b365 100644
68446--- a/kernel/auditsc.c
68447+++ b/kernel/auditsc.c
68448@@ -2352,7 +2352,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
68449 }
68450
68451 /* global counter which is incremented every time something logs in */
68452-static atomic_t session_id = ATOMIC_INIT(0);
68453+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
68454
68455 /**
68456 * audit_set_loginuid - set current task's audit_context loginuid
68457@@ -2376,7 +2376,7 @@ int audit_set_loginuid(kuid_t loginuid)
68458 return -EPERM;
68459 #endif /* CONFIG_AUDIT_LOGINUID_IMMUTABLE */
68460
68461- sessionid = atomic_inc_return(&session_id);
68462+ sessionid = atomic_inc_return_unchecked(&session_id);
68463 if (context && context->in_syscall) {
68464 struct audit_buffer *ab;
68465
68466diff --git a/kernel/capability.c b/kernel/capability.c
68467index 493d972..f87dfbd 100644
68468--- a/kernel/capability.c
68469+++ b/kernel/capability.c
68470@@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
68471 * before modification is attempted and the application
68472 * fails.
68473 */
68474+ if (tocopy > ARRAY_SIZE(kdata))
68475+ return -EFAULT;
68476+
68477 if (copy_to_user(dataptr, kdata, tocopy
68478 * sizeof(struct __user_cap_data_struct))) {
68479 return -EFAULT;
68480@@ -303,10 +306,11 @@ bool has_ns_capability(struct task_struct *t,
68481 int ret;
68482
68483 rcu_read_lock();
68484- ret = security_capable(__task_cred(t), ns, cap);
68485+ ret = security_capable(__task_cred(t), ns, cap) == 0 &&
68486+ gr_task_is_capable(t, __task_cred(t), cap);
68487 rcu_read_unlock();
68488
68489- return (ret == 0);
68490+ return ret;
68491 }
68492
68493 /**
68494@@ -343,10 +347,10 @@ bool has_ns_capability_noaudit(struct task_struct *t,
68495 int ret;
68496
68497 rcu_read_lock();
68498- ret = security_capable_noaudit(__task_cred(t), ns, cap);
68499+ ret = security_capable_noaudit(__task_cred(t), ns, cap) == 0 && gr_task_is_capable_nolog(t, cap);
68500 rcu_read_unlock();
68501
68502- return (ret == 0);
68503+ return ret;
68504 }
68505
68506 /**
68507@@ -384,7 +388,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
68508 BUG();
68509 }
68510
68511- if (security_capable(current_cred(), ns, cap) == 0) {
68512+ if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable(cap)) {
68513 current->flags |= PF_SUPERPRIV;
68514 return true;
68515 }
68516@@ -392,6 +396,21 @@ bool ns_capable(struct user_namespace *ns, int cap)
68517 }
68518 EXPORT_SYMBOL(ns_capable);
68519
68520+bool ns_capable_nolog(struct user_namespace *ns, int cap)
68521+{
68522+ if (unlikely(!cap_valid(cap))) {
68523+ printk(KERN_CRIT "capable_nolog() called with invalid cap=%u\n", cap);
68524+ BUG();
68525+ }
68526+
68527+ if (security_capable_noaudit(current_cred(), ns, cap) == 0 && gr_is_capable_nolog(cap)) {
68528+ current->flags |= PF_SUPERPRIV;
68529+ return true;
68530+ }
68531+ return false;
68532+}
68533+EXPORT_SYMBOL(ns_capable_nolog);
68534+
68535 /**
68536 * capable - Determine if the current task has a superior capability in effect
68537 * @cap: The capability to be tested for
68538@@ -408,6 +427,12 @@ bool capable(int cap)
68539 }
68540 EXPORT_SYMBOL(capable);
68541
68542+bool capable_nolog(int cap)
68543+{
68544+ return ns_capable_nolog(&init_user_ns, cap);
68545+}
68546+EXPORT_SYMBOL(capable_nolog);
68547+
68548 /**
68549 * nsown_capable - Check superior capability to one's own user_ns
68550 * @cap: The capability in question
68551@@ -440,3 +465,10 @@ bool inode_capable(const struct inode *inode, int cap)
68552
68553 return ns_capable(ns, cap) && kuid_has_mapping(ns, inode->i_uid);
68554 }
68555+
68556+bool inode_capable_nolog(const struct inode *inode, int cap)
68557+{
68558+ struct user_namespace *ns = current_user_ns();
68559+
68560+ return ns_capable_nolog(ns, cap) && kuid_has_mapping(ns, inode->i_uid);
68561+}
68562diff --git a/kernel/cgroup.c b/kernel/cgroup.c
68563index ad99830..992d8a7 100644
68564--- a/kernel/cgroup.c
68565+++ b/kernel/cgroup.c
68566@@ -5514,7 +5514,7 @@ static int cgroup_css_links_read(struct cgroup *cont,
68567 struct css_set *cg = link->cg;
68568 struct task_struct *task;
68569 int count = 0;
68570- seq_printf(seq, "css_set %p\n", cg);
68571+ seq_printf(seq, "css_set %pK\n", cg);
68572 list_for_each_entry(task, &cg->tasks, cg_list) {
68573 if (count++ > MAX_TASKS_SHOWN_PER_CSS) {
68574 seq_puts(seq, " ...\n");
68575diff --git a/kernel/compat.c b/kernel/compat.c
68576index c28a306..b4d0cf3 100644
68577--- a/kernel/compat.c
68578+++ b/kernel/compat.c
68579@@ -13,6 +13,7 @@
68580
68581 #include <linux/linkage.h>
68582 #include <linux/compat.h>
68583+#include <linux/module.h>
68584 #include <linux/errno.h>
68585 #include <linux/time.h>
68586 #include <linux/signal.h>
68587@@ -220,7 +221,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
68588 mm_segment_t oldfs;
68589 long ret;
68590
68591- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
68592+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
68593 oldfs = get_fs();
68594 set_fs(KERNEL_DS);
68595 ret = hrtimer_nanosleep_restart(restart);
68596@@ -252,7 +253,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
68597 oldfs = get_fs();
68598 set_fs(KERNEL_DS);
68599 ret = hrtimer_nanosleep(&tu,
68600- rmtp ? (struct timespec __user *)&rmt : NULL,
68601+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
68602 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
68603 set_fs(oldfs);
68604
68605@@ -361,7 +362,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
68606 mm_segment_t old_fs = get_fs();
68607
68608 set_fs(KERNEL_DS);
68609- ret = sys_sigpending((old_sigset_t __user *) &s);
68610+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
68611 set_fs(old_fs);
68612 if (ret == 0)
68613 ret = put_user(s, set);
68614@@ -451,7 +452,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
68615 mm_segment_t old_fs = get_fs();
68616
68617 set_fs(KERNEL_DS);
68618- ret = sys_old_getrlimit(resource, &r);
68619+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
68620 set_fs(old_fs);
68621
68622 if (!ret) {
68623@@ -523,7 +524,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
68624 mm_segment_t old_fs = get_fs();
68625
68626 set_fs(KERNEL_DS);
68627- ret = sys_getrusage(who, (struct rusage __user *) &r);
68628+ ret = sys_getrusage(who, (struct rusage __force_user *) &r);
68629 set_fs(old_fs);
68630
68631 if (ret)
68632@@ -550,8 +551,8 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,
68633 set_fs (KERNEL_DS);
68634 ret = sys_wait4(pid,
68635 (stat_addr ?
68636- (unsigned int __user *) &status : NULL),
68637- options, (struct rusage __user *) &r);
68638+ (unsigned int __force_user *) &status : NULL),
68639+ options, (struct rusage __force_user *) &r);
68640 set_fs (old_fs);
68641
68642 if (ret > 0) {
68643@@ -576,8 +577,8 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
68644 memset(&info, 0, sizeof(info));
68645
68646 set_fs(KERNEL_DS);
68647- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
68648- uru ? (struct rusage __user *)&ru : NULL);
68649+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
68650+ uru ? (struct rusage __force_user *)&ru : NULL);
68651 set_fs(old_fs);
68652
68653 if ((ret < 0) || (info.si_signo == 0))
68654@@ -707,8 +708,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
68655 oldfs = get_fs();
68656 set_fs(KERNEL_DS);
68657 err = sys_timer_settime(timer_id, flags,
68658- (struct itimerspec __user *) &newts,
68659- (struct itimerspec __user *) &oldts);
68660+ (struct itimerspec __force_user *) &newts,
68661+ (struct itimerspec __force_user *) &oldts);
68662 set_fs(oldfs);
68663 if (!err && old && put_compat_itimerspec(old, &oldts))
68664 return -EFAULT;
68665@@ -725,7 +726,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
68666 oldfs = get_fs();
68667 set_fs(KERNEL_DS);
68668 err = sys_timer_gettime(timer_id,
68669- (struct itimerspec __user *) &ts);
68670+ (struct itimerspec __force_user *) &ts);
68671 set_fs(oldfs);
68672 if (!err && put_compat_itimerspec(setting, &ts))
68673 return -EFAULT;
68674@@ -744,7 +745,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
68675 oldfs = get_fs();
68676 set_fs(KERNEL_DS);
68677 err = sys_clock_settime(which_clock,
68678- (struct timespec __user *) &ts);
68679+ (struct timespec __force_user *) &ts);
68680 set_fs(oldfs);
68681 return err;
68682 }
68683@@ -759,7 +760,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
68684 oldfs = get_fs();
68685 set_fs(KERNEL_DS);
68686 err = sys_clock_gettime(which_clock,
68687- (struct timespec __user *) &ts);
68688+ (struct timespec __force_user *) &ts);
68689 set_fs(oldfs);
68690 if (!err && put_compat_timespec(&ts, tp))
68691 return -EFAULT;
68692@@ -779,7 +780,7 @@ long compat_sys_clock_adjtime(clockid_t which_clock,
68693
68694 oldfs = get_fs();
68695 set_fs(KERNEL_DS);
68696- ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
68697+ ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
68698 set_fs(oldfs);
68699
68700 err = compat_put_timex(utp, &txc);
68701@@ -799,7 +800,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
68702 oldfs = get_fs();
68703 set_fs(KERNEL_DS);
68704 err = sys_clock_getres(which_clock,
68705- (struct timespec __user *) &ts);
68706+ (struct timespec __force_user *) &ts);
68707 set_fs(oldfs);
68708 if (!err && tp && put_compat_timespec(&ts, tp))
68709 return -EFAULT;
68710@@ -811,9 +812,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
68711 long err;
68712 mm_segment_t oldfs;
68713 struct timespec tu;
68714- struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
68715+ struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
68716
68717- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
68718+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
68719 oldfs = get_fs();
68720 set_fs(KERNEL_DS);
68721 err = clock_nanosleep_restart(restart);
68722@@ -845,8 +846,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
68723 oldfs = get_fs();
68724 set_fs(KERNEL_DS);
68725 err = sys_clock_nanosleep(which_clock, flags,
68726- (struct timespec __user *) &in,
68727- (struct timespec __user *) &out);
68728+ (struct timespec __force_user *) &in,
68729+ (struct timespec __force_user *) &out);
68730 set_fs(oldfs);
68731
68732 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
68733diff --git a/kernel/configs.c b/kernel/configs.c
68734index 42e8fa0..9e7406b 100644
68735--- a/kernel/configs.c
68736+++ b/kernel/configs.c
68737@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
68738 struct proc_dir_entry *entry;
68739
68740 /* create the current config file */
68741+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
68742+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
68743+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
68744+ &ikconfig_file_ops);
68745+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
68746+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
68747+ &ikconfig_file_ops);
68748+#endif
68749+#else
68750 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
68751 &ikconfig_file_ops);
68752+#endif
68753+
68754 if (!entry)
68755 return -ENOMEM;
68756
68757diff --git a/kernel/cred.c b/kernel/cred.c
68758index 48cea3d..3476734 100644
68759--- a/kernel/cred.c
68760+++ b/kernel/cred.c
68761@@ -207,6 +207,16 @@ void exit_creds(struct task_struct *tsk)
68762 validate_creds(cred);
68763 alter_cred_subscribers(cred, -1);
68764 put_cred(cred);
68765+
68766+#ifdef CONFIG_GRKERNSEC_SETXID
68767+ cred = (struct cred *) tsk->delayed_cred;
68768+ if (cred != NULL) {
68769+ tsk->delayed_cred = NULL;
68770+ validate_creds(cred);
68771+ alter_cred_subscribers(cred, -1);
68772+ put_cred(cred);
68773+ }
68774+#endif
68775 }
68776
68777 /**
68778@@ -469,7 +479,7 @@ error_put:
68779 * Always returns 0 thus allowing this function to be tail-called at the end
68780 * of, say, sys_setgid().
68781 */
68782-int commit_creds(struct cred *new)
68783+static int __commit_creds(struct cred *new)
68784 {
68785 struct task_struct *task = current;
68786 const struct cred *old = task->real_cred;
68787@@ -488,6 +498,8 @@ int commit_creds(struct cred *new)
68788
68789 get_cred(new); /* we will require a ref for the subj creds too */
68790
68791+ gr_set_role_label(task, new->uid, new->gid);
68792+
68793 /* dumpability changes */
68794 if (!uid_eq(old->euid, new->euid) ||
68795 !gid_eq(old->egid, new->egid) ||
68796@@ -537,6 +549,101 @@ int commit_creds(struct cred *new)
68797 put_cred(old);
68798 return 0;
68799 }
68800+#ifdef CONFIG_GRKERNSEC_SETXID
68801+extern int set_user(struct cred *new);
68802+
68803+void gr_delayed_cred_worker(void)
68804+{
68805+ const struct cred *new = current->delayed_cred;
68806+ struct cred *ncred;
68807+
68808+ current->delayed_cred = NULL;
68809+
68810+ if (current_uid() && new != NULL) {
68811+ // from doing get_cred on it when queueing this
68812+ put_cred(new);
68813+ return;
68814+ } else if (new == NULL)
68815+ return;
68816+
68817+ ncred = prepare_creds();
68818+ if (!ncred)
68819+ goto die;
68820+ // uids
68821+ ncred->uid = new->uid;
68822+ ncred->euid = new->euid;
68823+ ncred->suid = new->suid;
68824+ ncred->fsuid = new->fsuid;
68825+ // gids
68826+ ncred->gid = new->gid;
68827+ ncred->egid = new->egid;
68828+ ncred->sgid = new->sgid;
68829+ ncred->fsgid = new->fsgid;
68830+ // groups
68831+ if (set_groups(ncred, new->group_info) < 0) {
68832+ abort_creds(ncred);
68833+ goto die;
68834+ }
68835+ // caps
68836+ ncred->securebits = new->securebits;
68837+ ncred->cap_inheritable = new->cap_inheritable;
68838+ ncred->cap_permitted = new->cap_permitted;
68839+ ncred->cap_effective = new->cap_effective;
68840+ ncred->cap_bset = new->cap_bset;
68841+
68842+ if (set_user(ncred)) {
68843+ abort_creds(ncred);
68844+ goto die;
68845+ }
68846+
68847+ // from doing get_cred on it when queueing this
68848+ put_cred(new);
68849+
68850+ __commit_creds(ncred);
68851+ return;
68852+die:
68853+ // from doing get_cred on it when queueing this
68854+ put_cred(new);
68855+ do_group_exit(SIGKILL);
68856+}
68857+#endif
68858+
68859+int commit_creds(struct cred *new)
68860+{
68861+#ifdef CONFIG_GRKERNSEC_SETXID
68862+ int ret;
68863+ int schedule_it = 0;
68864+ struct task_struct *t;
68865+
68866+ /* we won't get called with tasklist_lock held for writing
68867+ and interrupts disabled as the cred struct in that case is
68868+ init_cred
68869+ */
68870+ if (grsec_enable_setxid && !current_is_single_threaded() &&
68871+ !current_uid() && new->uid) {
68872+ schedule_it = 1;
68873+ }
68874+ ret = __commit_creds(new);
68875+ if (schedule_it) {
68876+ rcu_read_lock();
68877+ read_lock(&tasklist_lock);
68878+ for (t = next_thread(current); t != current;
68879+ t = next_thread(t)) {
68880+ if (t->delayed_cred == NULL) {
68881+ t->delayed_cred = get_cred(new);
68882+ set_tsk_thread_flag(t, TIF_GRSEC_SETXID);
68883+ set_tsk_need_resched(t);
68884+ }
68885+ }
68886+ read_unlock(&tasklist_lock);
68887+ rcu_read_unlock();
68888+ }
68889+ return ret;
68890+#else
68891+ return __commit_creds(new);
68892+#endif
68893+}
68894+
68895 EXPORT_SYMBOL(commit_creds);
68896
68897 /**
68898diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
68899index 9a61738..c5c8f3a 100644
68900--- a/kernel/debug/debug_core.c
68901+++ b/kernel/debug/debug_core.c
68902@@ -122,7 +122,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
68903 */
68904 static atomic_t masters_in_kgdb;
68905 static atomic_t slaves_in_kgdb;
68906-static atomic_t kgdb_break_tasklet_var;
68907+static atomic_unchecked_t kgdb_break_tasklet_var;
68908 atomic_t kgdb_setting_breakpoint;
68909
68910 struct task_struct *kgdb_usethread;
68911@@ -132,7 +132,7 @@ int kgdb_single_step;
68912 static pid_t kgdb_sstep_pid;
68913
68914 /* to keep track of the CPU which is doing the single stepping*/
68915-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
68916+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
68917
68918 /*
68919 * If you are debugging a problem where roundup (the collection of
68920@@ -540,7 +540,7 @@ return_normal:
68921 * kernel will only try for the value of sstep_tries before
68922 * giving up and continuing on.
68923 */
68924- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
68925+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
68926 (kgdb_info[cpu].task &&
68927 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
68928 atomic_set(&kgdb_active, -1);
68929@@ -634,8 +634,8 @@ cpu_master_loop:
68930 }
68931
68932 kgdb_restore:
68933- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
68934- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
68935+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
68936+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
68937 if (kgdb_info[sstep_cpu].task)
68938 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
68939 else
68940@@ -887,18 +887,18 @@ static void kgdb_unregister_callbacks(void)
68941 static void kgdb_tasklet_bpt(unsigned long ing)
68942 {
68943 kgdb_breakpoint();
68944- atomic_set(&kgdb_break_tasklet_var, 0);
68945+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
68946 }
68947
68948 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
68949
68950 void kgdb_schedule_breakpoint(void)
68951 {
68952- if (atomic_read(&kgdb_break_tasklet_var) ||
68953+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
68954 atomic_read(&kgdb_active) != -1 ||
68955 atomic_read(&kgdb_setting_breakpoint))
68956 return;
68957- atomic_inc(&kgdb_break_tasklet_var);
68958+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
68959 tasklet_schedule(&kgdb_tasklet_breakpoint);
68960 }
68961 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
68962diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
68963index 8875254..7cf4928 100644
68964--- a/kernel/debug/kdb/kdb_main.c
68965+++ b/kernel/debug/kdb/kdb_main.c
68966@@ -1974,7 +1974,7 @@ static int kdb_lsmod(int argc, const char **argv)
68967 continue;
68968
68969 kdb_printf("%-20s%8u 0x%p ", mod->name,
68970- mod->core_size, (void *)mod);
68971+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
68972 #ifdef CONFIG_MODULE_UNLOAD
68973 kdb_printf("%4ld ", module_refcount(mod));
68974 #endif
68975@@ -1984,7 +1984,7 @@ static int kdb_lsmod(int argc, const char **argv)
68976 kdb_printf(" (Loading)");
68977 else
68978 kdb_printf(" (Live)");
68979- kdb_printf(" 0x%p", mod->module_core);
68980+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
68981
68982 #ifdef CONFIG_MODULE_UNLOAD
68983 {
68984diff --git a/kernel/events/core.c b/kernel/events/core.c
68985index dbccf83..8c66482 100644
68986--- a/kernel/events/core.c
68987+++ b/kernel/events/core.c
68988@@ -182,7 +182,7 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
68989 return 0;
68990 }
68991
68992-static atomic64_t perf_event_id;
68993+static atomic64_unchecked_t perf_event_id;
68994
68995 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
68996 enum event_type_t event_type);
68997@@ -2668,7 +2668,7 @@ static void __perf_event_read(void *info)
68998
68999 static inline u64 perf_event_count(struct perf_event *event)
69000 {
69001- return local64_read(&event->count) + atomic64_read(&event->child_count);
69002+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
69003 }
69004
69005 static u64 perf_event_read(struct perf_event *event)
69006@@ -2998,9 +2998,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
69007 mutex_lock(&event->child_mutex);
69008 total += perf_event_read(event);
69009 *enabled += event->total_time_enabled +
69010- atomic64_read(&event->child_total_time_enabled);
69011+ atomic64_read_unchecked(&event->child_total_time_enabled);
69012 *running += event->total_time_running +
69013- atomic64_read(&event->child_total_time_running);
69014+ atomic64_read_unchecked(&event->child_total_time_running);
69015
69016 list_for_each_entry(child, &event->child_list, child_list) {
69017 total += perf_event_read(child);
69018@@ -3403,10 +3403,10 @@ void perf_event_update_userpage(struct perf_event *event)
69019 userpg->offset -= local64_read(&event->hw.prev_count);
69020
69021 userpg->time_enabled = enabled +
69022- atomic64_read(&event->child_total_time_enabled);
69023+ atomic64_read_unchecked(&event->child_total_time_enabled);
69024
69025 userpg->time_running = running +
69026- atomic64_read(&event->child_total_time_running);
69027+ atomic64_read_unchecked(&event->child_total_time_running);
69028
69029 arch_perf_update_userpage(userpg, now);
69030
69031@@ -3965,11 +3965,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
69032 values[n++] = perf_event_count(event);
69033 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
69034 values[n++] = enabled +
69035- atomic64_read(&event->child_total_time_enabled);
69036+ atomic64_read_unchecked(&event->child_total_time_enabled);
69037 }
69038 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
69039 values[n++] = running +
69040- atomic64_read(&event->child_total_time_running);
69041+ atomic64_read_unchecked(&event->child_total_time_running);
69042 }
69043 if (read_format & PERF_FORMAT_ID)
69044 values[n++] = primary_event_id(event);
69045@@ -4712,12 +4712,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
69046 * need to add enough zero bytes after the string to handle
69047 * the 64bit alignment we do later.
69048 */
69049- buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
69050+ buf = kzalloc(PATH_MAX, GFP_KERNEL);
69051 if (!buf) {
69052 name = strncpy(tmp, "//enomem", sizeof(tmp));
69053 goto got_name;
69054 }
69055- name = d_path(&file->f_path, buf, PATH_MAX);
69056+ name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
69057 if (IS_ERR(name)) {
69058 name = strncpy(tmp, "//toolong", sizeof(tmp));
69059 goto got_name;
69060@@ -6156,7 +6156,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
69061 event->parent = parent_event;
69062
69063 event->ns = get_pid_ns(current->nsproxy->pid_ns);
69064- event->id = atomic64_inc_return(&perf_event_id);
69065+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
69066
69067 event->state = PERF_EVENT_STATE_INACTIVE;
69068
69069@@ -6774,10 +6774,10 @@ static void sync_child_event(struct perf_event *child_event,
69070 /*
69071 * Add back the child's count to the parent's count:
69072 */
69073- atomic64_add(child_val, &parent_event->child_count);
69074- atomic64_add(child_event->total_time_enabled,
69075+ atomic64_add_unchecked(child_val, &parent_event->child_count);
69076+ atomic64_add_unchecked(child_event->total_time_enabled,
69077 &parent_event->child_total_time_enabled);
69078- atomic64_add(child_event->total_time_running,
69079+ atomic64_add_unchecked(child_event->total_time_running,
69080 &parent_event->child_total_time_running);
69081
69082 /*
69083diff --git a/kernel/exit.c b/kernel/exit.c
69084index 346616c..f103b28 100644
69085--- a/kernel/exit.c
69086+++ b/kernel/exit.c
69087@@ -182,6 +182,10 @@ void release_task(struct task_struct * p)
69088 struct task_struct *leader;
69089 int zap_leader;
69090 repeat:
69091+#ifdef CONFIG_NET
69092+ gr_del_task_from_ip_table(p);
69093+#endif
69094+
69095 /* don't need to get the RCU readlock here - the process is dead and
69096 * can't be modifying its own credentials. But shut RCU-lockdep up */
69097 rcu_read_lock();
69098@@ -394,7 +398,7 @@ int allow_signal(int sig)
69099 * know it'll be handled, so that they don't get converted to
69100 * SIGKILL or just silently dropped.
69101 */
69102- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
69103+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
69104 recalc_sigpending();
69105 spin_unlock_irq(&current->sighand->siglock);
69106 return 0;
69107@@ -430,6 +434,9 @@ void daemonize(const char *name, ...)
69108 vsnprintf(current->comm, sizeof(current->comm), name, args);
69109 va_end(args);
69110
69111+ gr_put_exec_file(current);
69112+ gr_set_kernel_label(current);
69113+
69114 /*
69115 * If we were started as result of loading a module, close all of the
69116 * user space pages. We don't need them, and if we didn't close them
69117@@ -812,6 +819,8 @@ void do_exit(long code)
69118 struct task_struct *tsk = current;
69119 int group_dead;
69120
69121+ set_fs(USER_DS);
69122+
69123 profile_task_exit(tsk);
69124
69125 WARN_ON(blk_needs_flush_plug(tsk));
69126@@ -828,7 +837,6 @@ void do_exit(long code)
69127 * mm_release()->clear_child_tid() from writing to a user-controlled
69128 * kernel address.
69129 */
69130- set_fs(USER_DS);
69131
69132 ptrace_event(PTRACE_EVENT_EXIT, code);
69133
69134@@ -887,6 +895,9 @@ void do_exit(long code)
69135 tsk->exit_code = code;
69136 taskstats_exit(tsk, group_dead);
69137
69138+ gr_acl_handle_psacct(tsk, code);
69139+ gr_acl_handle_exit();
69140+
69141 exit_mm(tsk);
69142
69143 if (group_dead)
69144@@ -1007,7 +1018,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
69145 * Take down every thread in the group. This is called by fatal signals
69146 * as well as by sys_exit_group (below).
69147 */
69148-void
69149+__noreturn void
69150 do_group_exit(int exit_code)
69151 {
69152 struct signal_struct *sig = current->signal;
69153diff --git a/kernel/fork.c b/kernel/fork.c
69154index acc4cb6..b524cb5 100644
69155--- a/kernel/fork.c
69156+++ b/kernel/fork.c
69157@@ -318,7 +318,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
69158 *stackend = STACK_END_MAGIC; /* for overflow detection */
69159
69160 #ifdef CONFIG_CC_STACKPROTECTOR
69161- tsk->stack_canary = get_random_int();
69162+ tsk->stack_canary = pax_get_random_long();
69163 #endif
69164
69165 /*
69166@@ -344,13 +344,81 @@ free_tsk:
69167 }
69168
69169 #ifdef CONFIG_MMU
69170+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct mm_struct *oldmm, struct vm_area_struct *mpnt)
69171+{
69172+ struct vm_area_struct *tmp;
69173+ unsigned long charge;
69174+ struct mempolicy *pol;
69175+ struct file *file;
69176+
69177+ charge = 0;
69178+ if (mpnt->vm_flags & VM_ACCOUNT) {
69179+ unsigned long len = vma_pages(mpnt);
69180+
69181+ if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
69182+ goto fail_nomem;
69183+ charge = len;
69184+ }
69185+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
69186+ if (!tmp)
69187+ goto fail_nomem;
69188+ *tmp = *mpnt;
69189+ tmp->vm_mm = mm;
69190+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
69191+ pol = mpol_dup(vma_policy(mpnt));
69192+ if (IS_ERR(pol))
69193+ goto fail_nomem_policy;
69194+ vma_set_policy(tmp, pol);
69195+ if (anon_vma_fork(tmp, mpnt))
69196+ goto fail_nomem_anon_vma_fork;
69197+ tmp->vm_flags &= ~VM_LOCKED;
69198+ tmp->vm_next = tmp->vm_prev = NULL;
69199+ tmp->vm_mirror = NULL;
69200+ file = tmp->vm_file;
69201+ if (file) {
69202+ struct inode *inode = file->f_path.dentry->d_inode;
69203+ struct address_space *mapping = file->f_mapping;
69204+
69205+ get_file(file);
69206+ if (tmp->vm_flags & VM_DENYWRITE)
69207+ atomic_dec(&inode->i_writecount);
69208+ mutex_lock(&mapping->i_mmap_mutex);
69209+ if (tmp->vm_flags & VM_SHARED)
69210+ mapping->i_mmap_writable++;
69211+ flush_dcache_mmap_lock(mapping);
69212+ /* insert tmp into the share list, just after mpnt */
69213+ if (unlikely(tmp->vm_flags & VM_NONLINEAR))
69214+ vma_nonlinear_insert(tmp, &mapping->i_mmap_nonlinear);
69215+ else
69216+ vma_interval_tree_insert_after(tmp, mpnt, &mapping->i_mmap);
69217+ flush_dcache_mmap_unlock(mapping);
69218+ mutex_unlock(&mapping->i_mmap_mutex);
69219+ }
69220+
69221+ /*
69222+ * Clear hugetlb-related page reserves for children. This only
69223+ * affects MAP_PRIVATE mappings. Faults generated by the child
69224+ * are not guaranteed to succeed, even if read-only
69225+ */
69226+ if (is_vm_hugetlb_page(tmp))
69227+ reset_vma_resv_huge_pages(tmp);
69228+
69229+ return tmp;
69230+
69231+fail_nomem_anon_vma_fork:
69232+ mpol_put(pol);
69233+fail_nomem_policy:
69234+ kmem_cache_free(vm_area_cachep, tmp);
69235+fail_nomem:
69236+ vm_unacct_memory(charge);
69237+ return NULL;
69238+}
69239+
69240 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
69241 {
69242 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
69243 struct rb_node **rb_link, *rb_parent;
69244 int retval;
69245- unsigned long charge;
69246- struct mempolicy *pol;
69247
69248 down_write(&oldmm->mmap_sem);
69249 flush_cache_dup_mm(oldmm);
69250@@ -363,8 +431,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
69251 mm->locked_vm = 0;
69252 mm->mmap = NULL;
69253 mm->mmap_cache = NULL;
69254- mm->free_area_cache = oldmm->mmap_base;
69255- mm->cached_hole_size = ~0UL;
69256+ mm->free_area_cache = oldmm->free_area_cache;
69257+ mm->cached_hole_size = oldmm->cached_hole_size;
69258 mm->map_count = 0;
69259 cpumask_clear(mm_cpumask(mm));
69260 mm->mm_rb = RB_ROOT;
69261@@ -380,57 +448,15 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
69262
69263 prev = NULL;
69264 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
69265- struct file *file;
69266-
69267 if (mpnt->vm_flags & VM_DONTCOPY) {
69268 vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
69269 -vma_pages(mpnt));
69270 continue;
69271 }
69272- charge = 0;
69273- if (mpnt->vm_flags & VM_ACCOUNT) {
69274- unsigned long len = vma_pages(mpnt);
69275-
69276- if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
69277- goto fail_nomem;
69278- charge = len;
69279- }
69280- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
69281- if (!tmp)
69282- goto fail_nomem;
69283- *tmp = *mpnt;
69284- INIT_LIST_HEAD(&tmp->anon_vma_chain);
69285- pol = mpol_dup(vma_policy(mpnt));
69286- retval = PTR_ERR(pol);
69287- if (IS_ERR(pol))
69288- goto fail_nomem_policy;
69289- vma_set_policy(tmp, pol);
69290- tmp->vm_mm = mm;
69291- if (anon_vma_fork(tmp, mpnt))
69292- goto fail_nomem_anon_vma_fork;
69293- tmp->vm_flags &= ~VM_LOCKED;
69294- tmp->vm_next = tmp->vm_prev = NULL;
69295- file = tmp->vm_file;
69296- if (file) {
69297- struct inode *inode = file->f_path.dentry->d_inode;
69298- struct address_space *mapping = file->f_mapping;
69299-
69300- get_file(file);
69301- if (tmp->vm_flags & VM_DENYWRITE)
69302- atomic_dec(&inode->i_writecount);
69303- mutex_lock(&mapping->i_mmap_mutex);
69304- if (tmp->vm_flags & VM_SHARED)
69305- mapping->i_mmap_writable++;
69306- flush_dcache_mmap_lock(mapping);
69307- /* insert tmp into the share list, just after mpnt */
69308- if (unlikely(tmp->vm_flags & VM_NONLINEAR))
69309- vma_nonlinear_insert(tmp,
69310- &mapping->i_mmap_nonlinear);
69311- else
69312- vma_interval_tree_insert_after(tmp, mpnt,
69313- &mapping->i_mmap);
69314- flush_dcache_mmap_unlock(mapping);
69315- mutex_unlock(&mapping->i_mmap_mutex);
69316+ tmp = dup_vma(mm, oldmm, mpnt);
69317+ if (!tmp) {
69318+ retval = -ENOMEM;
69319+ goto out;
69320 }
69321
69322 /*
69323@@ -462,6 +488,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
69324 if (retval)
69325 goto out;
69326 }
69327+
69328+#ifdef CONFIG_PAX_SEGMEXEC
69329+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
69330+ struct vm_area_struct *mpnt_m;
69331+
69332+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
69333+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
69334+
69335+ if (!mpnt->vm_mirror)
69336+ continue;
69337+
69338+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
69339+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
69340+ mpnt->vm_mirror = mpnt_m;
69341+ } else {
69342+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
69343+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
69344+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
69345+ mpnt->vm_mirror->vm_mirror = mpnt;
69346+ }
69347+ }
69348+ BUG_ON(mpnt_m);
69349+ }
69350+#endif
69351+
69352 /* a new mm has just been created */
69353 arch_dup_mmap(oldmm, mm);
69354 retval = 0;
69355@@ -470,14 +521,6 @@ out:
69356 flush_tlb_mm(oldmm);
69357 up_write(&oldmm->mmap_sem);
69358 return retval;
69359-fail_nomem_anon_vma_fork:
69360- mpol_put(pol);
69361-fail_nomem_policy:
69362- kmem_cache_free(vm_area_cachep, tmp);
69363-fail_nomem:
69364- retval = -ENOMEM;
69365- vm_unacct_memory(charge);
69366- goto out;
69367 }
69368
69369 static inline int mm_alloc_pgd(struct mm_struct *mm)
69370@@ -692,8 +735,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
69371 return ERR_PTR(err);
69372
69373 mm = get_task_mm(task);
69374- if (mm && mm != current->mm &&
69375- !ptrace_may_access(task, mode)) {
69376+ if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
69377+ (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
69378 mmput(mm);
69379 mm = ERR_PTR(-EACCES);
69380 }
69381@@ -912,13 +955,20 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
69382 spin_unlock(&fs->lock);
69383 return -EAGAIN;
69384 }
69385- fs->users++;
69386+ atomic_inc(&fs->users);
69387 spin_unlock(&fs->lock);
69388 return 0;
69389 }
69390 tsk->fs = copy_fs_struct(fs);
69391 if (!tsk->fs)
69392 return -ENOMEM;
69393+ /* Carry through gr_chroot_dentry and is_chrooted instead
69394+ of recomputing it here. Already copied when the task struct
69395+ is duplicated. This allows pivot_root to not be treated as
69396+ a chroot
69397+ */
69398+ //gr_set_chroot_entries(tsk, &tsk->fs->root);
69399+
69400 return 0;
69401 }
69402
69403@@ -1183,6 +1233,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
69404 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
69405 #endif
69406 retval = -EAGAIN;
69407+
69408+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
69409+
69410 if (atomic_read(&p->real_cred->user->processes) >=
69411 task_rlimit(p, RLIMIT_NPROC)) {
69412 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
69413@@ -1422,6 +1475,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,
69414 goto bad_fork_free_pid;
69415 }
69416
69417+ /* synchronizes with gr_set_acls()
69418+ we need to call this past the point of no return for fork()
69419+ */
69420+ gr_copy_label(p);
69421+
69422 if (clone_flags & CLONE_THREAD) {
69423 current->signal->nr_threads++;
69424 atomic_inc(&current->signal->live);
69425@@ -1505,6 +1563,8 @@ bad_fork_cleanup_count:
69426 bad_fork_free:
69427 free_task(p);
69428 fork_out:
69429+ gr_log_forkfail(retval);
69430+
69431 return ERR_PTR(retval);
69432 }
69433
69434@@ -1605,6 +1665,8 @@ long do_fork(unsigned long clone_flags,
69435 if (clone_flags & CLONE_PARENT_SETTID)
69436 put_user(nr, parent_tidptr);
69437
69438+ gr_handle_brute_check();
69439+
69440 if (clone_flags & CLONE_VFORK) {
69441 p->vfork_done = &vfork;
69442 init_completion(&vfork);
69443@@ -1714,7 +1776,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
69444 return 0;
69445
69446 /* don't need lock here; in the worst case we'll do useless copy */
69447- if (fs->users == 1)
69448+ if (atomic_read(&fs->users) == 1)
69449 return 0;
69450
69451 *new_fsp = copy_fs_struct(fs);
69452@@ -1803,7 +1865,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
69453 fs = current->fs;
69454 spin_lock(&fs->lock);
69455 current->fs = new_fs;
69456- if (--fs->users)
69457+ gr_set_chroot_entries(current, &current->fs->root);
69458+ if (atomic_dec_return(&fs->users))
69459 new_fs = NULL;
69460 else
69461 new_fs = fs;
69462diff --git a/kernel/futex.c b/kernel/futex.c
69463index 19eb089..b8c65ea 100644
69464--- a/kernel/futex.c
69465+++ b/kernel/futex.c
69466@@ -54,6 +54,7 @@
69467 #include <linux/mount.h>
69468 #include <linux/pagemap.h>
69469 #include <linux/syscalls.h>
69470+#include <linux/ptrace.h>
69471 #include <linux/signal.h>
69472 #include <linux/export.h>
69473 #include <linux/magic.h>
69474@@ -239,6 +240,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
69475 struct page *page, *page_head;
69476 int err, ro = 0;
69477
69478+#ifdef CONFIG_PAX_SEGMEXEC
69479+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
69480+ return -EFAULT;
69481+#endif
69482+
69483 /*
69484 * The futex address must be "naturally" aligned.
69485 */
69486@@ -2733,6 +2739,7 @@ static int __init futex_init(void)
69487 {
69488 u32 curval;
69489 int i;
69490+ mm_segment_t oldfs;
69491
69492 /*
69493 * This will fail and we want it. Some arch implementations do
69494@@ -2744,8 +2751,11 @@ static int __init futex_init(void)
69495 * implementation, the non-functional ones will return
69496 * -ENOSYS.
69497 */
69498+ oldfs = get_fs();
69499+ set_fs(USER_DS);
69500 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
69501 futex_cmpxchg_enabled = 1;
69502+ set_fs(oldfs);
69503
69504 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
69505 plist_head_init(&futex_queues[i].chain);
69506diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
69507index 9b22d03..6295b62 100644
69508--- a/kernel/gcov/base.c
69509+++ b/kernel/gcov/base.c
69510@@ -102,11 +102,6 @@ void gcov_enable_events(void)
69511 }
69512
69513 #ifdef CONFIG_MODULES
69514-static inline int within(void *addr, void *start, unsigned long size)
69515-{
69516- return ((addr >= start) && (addr < start + size));
69517-}
69518-
69519 /* Update list and generate events when modules are unloaded. */
69520 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
69521 void *data)
69522@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
69523 prev = NULL;
69524 /* Remove entries located in module from linked list. */
69525 for (info = gcov_info_head; info; info = info->next) {
69526- if (within(info, mod->module_core, mod->core_size)) {
69527+ if (within_module_core_rw((unsigned long)info, mod)) {
69528 if (prev)
69529 prev->next = info->next;
69530 else
69531diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
69532index 6db7a5e..25b6648 100644
69533--- a/kernel/hrtimer.c
69534+++ b/kernel/hrtimer.c
69535@@ -1407,7 +1407,7 @@ void hrtimer_peek_ahead_timers(void)
69536 local_irq_restore(flags);
69537 }
69538
69539-static void run_hrtimer_softirq(struct softirq_action *h)
69540+static void run_hrtimer_softirq(void)
69541 {
69542 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
69543
69544diff --git a/kernel/jump_label.c b/kernel/jump_label.c
69545index 60f48fa..7f3a770 100644
69546--- a/kernel/jump_label.c
69547+++ b/kernel/jump_label.c
69548@@ -13,6 +13,7 @@
69549 #include <linux/sort.h>
69550 #include <linux/err.h>
69551 #include <linux/static_key.h>
69552+#include <linux/mm.h>
69553
69554 #ifdef HAVE_JUMP_LABEL
69555
69556@@ -50,7 +51,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
69557
69558 size = (((unsigned long)stop - (unsigned long)start)
69559 / sizeof(struct jump_entry));
69560+ pax_open_kernel();
69561 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
69562+ pax_close_kernel();
69563 }
69564
69565 static void jump_label_update(struct static_key *key, int enable);
69566@@ -357,10 +360,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
69567 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
69568 struct jump_entry *iter;
69569
69570+ pax_open_kernel();
69571 for (iter = iter_start; iter < iter_stop; iter++) {
69572 if (within_module_init(iter->code, mod))
69573 iter->code = 0;
69574 }
69575+ pax_close_kernel();
69576 }
69577
69578 static int
69579diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
69580index 2169fee..45c017a 100644
69581--- a/kernel/kallsyms.c
69582+++ b/kernel/kallsyms.c
69583@@ -11,6 +11,9 @@
69584 * Changed the compression method from stem compression to "table lookup"
69585 * compression (see scripts/kallsyms.c for a more complete description)
69586 */
69587+#ifdef CONFIG_GRKERNSEC_HIDESYM
69588+#define __INCLUDED_BY_HIDESYM 1
69589+#endif
69590 #include <linux/kallsyms.h>
69591 #include <linux/module.h>
69592 #include <linux/init.h>
69593@@ -53,12 +56,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
69594
69595 static inline int is_kernel_inittext(unsigned long addr)
69596 {
69597+ if (system_state != SYSTEM_BOOTING)
69598+ return 0;
69599+
69600 if (addr >= (unsigned long)_sinittext
69601 && addr <= (unsigned long)_einittext)
69602 return 1;
69603 return 0;
69604 }
69605
69606+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
69607+#ifdef CONFIG_MODULES
69608+static inline int is_module_text(unsigned long addr)
69609+{
69610+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
69611+ return 1;
69612+
69613+ addr = ktla_ktva(addr);
69614+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
69615+}
69616+#else
69617+static inline int is_module_text(unsigned long addr)
69618+{
69619+ return 0;
69620+}
69621+#endif
69622+#endif
69623+
69624 static inline int is_kernel_text(unsigned long addr)
69625 {
69626 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
69627@@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigned long addr)
69628
69629 static inline int is_kernel(unsigned long addr)
69630 {
69631+
69632+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
69633+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
69634+ return 1;
69635+
69636+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
69637+#else
69638 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
69639+#endif
69640+
69641 return 1;
69642 return in_gate_area_no_mm(addr);
69643 }
69644
69645 static int is_ksym_addr(unsigned long addr)
69646 {
69647+
69648+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
69649+ if (is_module_text(addr))
69650+ return 0;
69651+#endif
69652+
69653 if (all_var)
69654 return is_kernel(addr);
69655
69656@@ -470,7 +509,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
69657
69658 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
69659 {
69660- iter->name[0] = '\0';
69661 iter->nameoff = get_symbol_offset(new_pos);
69662 iter->pos = new_pos;
69663 }
69664@@ -518,6 +556,11 @@ static int s_show(struct seq_file *m, void *p)
69665 {
69666 struct kallsym_iter *iter = m->private;
69667
69668+#ifdef CONFIG_GRKERNSEC_HIDESYM
69669+ if (current_uid())
69670+ return 0;
69671+#endif
69672+
69673 /* Some debugging symbols have no name. Ignore them. */
69674 if (!iter->name[0])
69675 return 0;
69676@@ -531,6 +574,7 @@ static int s_show(struct seq_file *m, void *p)
69677 */
69678 type = iter->exported ? toupper(iter->type) :
69679 tolower(iter->type);
69680+
69681 seq_printf(m, "%pK %c %s\t[%s]\n", (void *)iter->value,
69682 type, iter->name, iter->module_name);
69683 } else
69684@@ -556,7 +600,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
69685 struct kallsym_iter *iter;
69686 int ret;
69687
69688- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
69689+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
69690 if (!iter)
69691 return -ENOMEM;
69692 reset_iter(iter, 0);
69693diff --git a/kernel/kcmp.c b/kernel/kcmp.c
69694index 30b7b22..c726387 100644
69695--- a/kernel/kcmp.c
69696+++ b/kernel/kcmp.c
69697@@ -98,6 +98,10 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
69698 struct task_struct *task1, *task2;
69699 int ret;
69700
69701+#ifdef CONFIG_GRKERNSEC
69702+ return -ENOSYS;
69703+#endif
69704+
69705 rcu_read_lock();
69706
69707 /*
69708diff --git a/kernel/kexec.c b/kernel/kexec.c
69709index 5e4bd78..00c5b91 100644
69710--- a/kernel/kexec.c
69711+++ b/kernel/kexec.c
69712@@ -1045,7 +1045,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
69713 unsigned long flags)
69714 {
69715 struct compat_kexec_segment in;
69716- struct kexec_segment out, __user *ksegments;
69717+ struct kexec_segment out;
69718+ struct kexec_segment __user *ksegments;
69719 unsigned long i, result;
69720
69721 /* Don't allow clients that don't understand the native
69722diff --git a/kernel/kmod.c b/kernel/kmod.c
69723index 1c317e3..4a92a55 100644
69724--- a/kernel/kmod.c
69725+++ b/kernel/kmod.c
69726@@ -74,7 +74,7 @@ static void free_modprobe_argv(struct subprocess_info *info)
69727 kfree(info->argv);
69728 }
69729
69730-static int call_modprobe(char *module_name, int wait)
69731+static int call_modprobe(char *module_name, char *module_param, int wait)
69732 {
69733 static char *envp[] = {
69734 "HOME=/",
69735@@ -83,7 +83,7 @@ static int call_modprobe(char *module_name, int wait)
69736 NULL
69737 };
69738
69739- char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
69740+ char **argv = kmalloc(sizeof(char *[6]), GFP_KERNEL);
69741 if (!argv)
69742 goto out;
69743
69744@@ -95,7 +95,8 @@ static int call_modprobe(char *module_name, int wait)
69745 argv[1] = "-q";
69746 argv[2] = "--";
69747 argv[3] = module_name; /* check free_modprobe_argv() */
69748- argv[4] = NULL;
69749+ argv[4] = module_param;
69750+ argv[5] = NULL;
69751
69752 return call_usermodehelper_fns(modprobe_path, argv, envp,
69753 wait | UMH_KILLABLE, NULL, free_modprobe_argv, NULL);
69754@@ -120,9 +121,8 @@ out:
69755 * If module auto-loading support is disabled then this function
69756 * becomes a no-operation.
69757 */
69758-int __request_module(bool wait, const char *fmt, ...)
69759+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
69760 {
69761- va_list args;
69762 char module_name[MODULE_NAME_LEN];
69763 unsigned int max_modprobes;
69764 int ret;
69765@@ -130,9 +130,7 @@ int __request_module(bool wait, const char *fmt, ...)
69766 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
69767 static int kmod_loop_msg;
69768
69769- va_start(args, fmt);
69770- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
69771- va_end(args);
69772+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
69773 if (ret >= MODULE_NAME_LEN)
69774 return -ENAMETOOLONG;
69775
69776@@ -140,6 +138,20 @@ int __request_module(bool wait, const char *fmt, ...)
69777 if (ret)
69778 return ret;
69779
69780+#ifdef CONFIG_GRKERNSEC_MODHARDEN
69781+ if (!current_uid()) {
69782+ /* hack to workaround consolekit/udisks stupidity */
69783+ read_lock(&tasklist_lock);
69784+ if (!strcmp(current->comm, "mount") &&
69785+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
69786+ read_unlock(&tasklist_lock);
69787+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
69788+ return -EPERM;
69789+ }
69790+ read_unlock(&tasklist_lock);
69791+ }
69792+#endif
69793+
69794 /* If modprobe needs a service that is in a module, we get a recursive
69795 * loop. Limit the number of running kmod threads to max_threads/2 or
69796 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
69797@@ -168,11 +180,52 @@ int __request_module(bool wait, const char *fmt, ...)
69798
69799 trace_module_request(module_name, wait, _RET_IP_);
69800
69801- ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
69802+ ret = call_modprobe(module_name, module_param, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
69803
69804 atomic_dec(&kmod_concurrent);
69805 return ret;
69806 }
69807+
69808+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
69809+{
69810+ va_list args;
69811+ int ret;
69812+
69813+ va_start(args, fmt);
69814+ ret = ____request_module(wait, module_param, fmt, args);
69815+ va_end(args);
69816+
69817+ return ret;
69818+}
69819+
69820+int __request_module(bool wait, const char *fmt, ...)
69821+{
69822+ va_list args;
69823+ int ret;
69824+
69825+#ifdef CONFIG_GRKERNSEC_MODHARDEN
69826+ if (current_uid()) {
69827+ char module_param[MODULE_NAME_LEN];
69828+
69829+ memset(module_param, 0, sizeof(module_param));
69830+
69831+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
69832+
69833+ va_start(args, fmt);
69834+ ret = ____request_module(wait, module_param, fmt, args);
69835+ va_end(args);
69836+
69837+ return ret;
69838+ }
69839+#endif
69840+
69841+ va_start(args, fmt);
69842+ ret = ____request_module(wait, NULL, fmt, args);
69843+ va_end(args);
69844+
69845+ return ret;
69846+}
69847+
69848 EXPORT_SYMBOL(__request_module);
69849 #endif /* CONFIG_MODULES */
69850
69851@@ -283,7 +336,7 @@ static int wait_for_helper(void *data)
69852 *
69853 * Thus the __user pointer cast is valid here.
69854 */
69855- sys_wait4(pid, (int __user *)&ret, 0, NULL);
69856+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
69857
69858 /*
69859 * If ret is 0, either ____call_usermodehelper failed and the
69860diff --git a/kernel/kprobes.c b/kernel/kprobes.c
69861index 098f396..fe85ff1 100644
69862--- a/kernel/kprobes.c
69863+++ b/kernel/kprobes.c
69864@@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
69865 * kernel image and loaded module images reside. This is required
69866 * so x86_64 can correctly handle the %rip-relative fixups.
69867 */
69868- kip->insns = module_alloc(PAGE_SIZE);
69869+ kip->insns = module_alloc_exec(PAGE_SIZE);
69870 if (!kip->insns) {
69871 kfree(kip);
69872 return NULL;
69873@@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
69874 */
69875 if (!list_is_singular(&kip->list)) {
69876 list_del(&kip->list);
69877- module_free(NULL, kip->insns);
69878+ module_free_exec(NULL, kip->insns);
69879 kfree(kip);
69880 }
69881 return 1;
69882@@ -2063,7 +2063,7 @@ static int __init init_kprobes(void)
69883 {
69884 int i, err = 0;
69885 unsigned long offset = 0, size = 0;
69886- char *modname, namebuf[128];
69887+ char *modname, namebuf[KSYM_NAME_LEN];
69888 const char *symbol_name;
69889 void *addr;
69890 struct kprobe_blackpoint *kb;
69891@@ -2148,11 +2148,11 @@ static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
69892 kprobe_type = "k";
69893
69894 if (sym)
69895- seq_printf(pi, "%p %s %s+0x%x %s ",
69896+ seq_printf(pi, "%pK %s %s+0x%x %s ",
69897 p->addr, kprobe_type, sym, offset,
69898 (modname ? modname : " "));
69899 else
69900- seq_printf(pi, "%p %s %p ",
69901+ seq_printf(pi, "%pK %s %pK ",
69902 p->addr, kprobe_type, p->addr);
69903
69904 if (!pp)
69905@@ -2190,7 +2190,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
69906 const char *sym = NULL;
69907 unsigned int i = *(loff_t *) v;
69908 unsigned long offset = 0;
69909- char *modname, namebuf[128];
69910+ char *modname, namebuf[KSYM_NAME_LEN];
69911
69912 head = &kprobe_table[i];
69913 preempt_disable();
69914diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
69915index 4e316e1..5501eef 100644
69916--- a/kernel/ksysfs.c
69917+++ b/kernel/ksysfs.c
69918@@ -47,6 +47,8 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
69919 {
69920 if (count+1 > UEVENT_HELPER_PATH_LEN)
69921 return -ENOENT;
69922+ if (!capable(CAP_SYS_ADMIN))
69923+ return -EPERM;
69924 memcpy(uevent_helper, buf, count);
69925 uevent_helper[count] = '\0';
69926 if (count && uevent_helper[count-1] == '\n')
69927diff --git a/kernel/lockdep.c b/kernel/lockdep.c
69928index 7981e5b..7f2105c 100644
69929--- a/kernel/lockdep.c
69930+++ b/kernel/lockdep.c
69931@@ -590,6 +590,10 @@ static int static_obj(void *obj)
69932 end = (unsigned long) &_end,
69933 addr = (unsigned long) obj;
69934
69935+#ifdef CONFIG_PAX_KERNEXEC
69936+ start = ktla_ktva(start);
69937+#endif
69938+
69939 /*
69940 * static variable?
69941 */
69942@@ -730,6 +734,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
69943 if (!static_obj(lock->key)) {
69944 debug_locks_off();
69945 printk("INFO: trying to register non-static key.\n");
69946+ printk("lock:%pS key:%pS.\n", lock, lock->key);
69947 printk("the code is fine but needs lockdep annotation.\n");
69948 printk("turning off the locking correctness validator.\n");
69949 dump_stack();
69950@@ -3078,7 +3083,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
69951 if (!class)
69952 return 0;
69953 }
69954- atomic_inc((atomic_t *)&class->ops);
69955+ atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
69956 if (very_verbose(class)) {
69957 printk("\nacquire class [%p] %s", class->key, class->name);
69958 if (class->name_version > 1)
69959diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
69960index 91c32a0..7b88d63 100644
69961--- a/kernel/lockdep_proc.c
69962+++ b/kernel/lockdep_proc.c
69963@@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, void *v)
69964
69965 static void print_name(struct seq_file *m, struct lock_class *class)
69966 {
69967- char str[128];
69968+ char str[KSYM_NAME_LEN];
69969 const char *name = class->name;
69970
69971 if (!name) {
69972@@ -65,7 +65,7 @@ static int l_show(struct seq_file *m, void *v)
69973 return 0;
69974 }
69975
69976- seq_printf(m, "%p", class->key);
69977+ seq_printf(m, "%pK", class->key);
69978 #ifdef CONFIG_DEBUG_LOCKDEP
69979 seq_printf(m, " OPS:%8ld", class->ops);
69980 #endif
69981@@ -83,7 +83,7 @@ static int l_show(struct seq_file *m, void *v)
69982
69983 list_for_each_entry(entry, &class->locks_after, entry) {
69984 if (entry->distance == 1) {
69985- seq_printf(m, " -> [%p] ", entry->class->key);
69986+ seq_printf(m, " -> [%pK] ", entry->class->key);
69987 print_name(m, entry->class);
69988 seq_puts(m, "\n");
69989 }
69990@@ -152,7 +152,7 @@ static int lc_show(struct seq_file *m, void *v)
69991 if (!class->key)
69992 continue;
69993
69994- seq_printf(m, "[%p] ", class->key);
69995+ seq_printf(m, "[%pK] ", class->key);
69996 print_name(m, class);
69997 seq_puts(m, "\n");
69998 }
69999@@ -495,7 +495,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
70000 if (!i)
70001 seq_line(m, '-', 40-namelen, namelen);
70002
70003- snprintf(ip, sizeof(ip), "[<%p>]",
70004+ snprintf(ip, sizeof(ip), "[<%pK>]",
70005 (void *)class->contention_point[i]);
70006 seq_printf(m, "%40s %14lu %29s %pS\n",
70007 name, stats->contention_point[i],
70008@@ -510,7 +510,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
70009 if (!i)
70010 seq_line(m, '-', 40-namelen, namelen);
70011
70012- snprintf(ip, sizeof(ip), "[<%p>]",
70013+ snprintf(ip, sizeof(ip), "[<%pK>]",
70014 (void *)class->contending_point[i]);
70015 seq_printf(m, "%40s %14lu %29s %pS\n",
70016 name, stats->contending_point[i],
70017diff --git a/kernel/module.c b/kernel/module.c
70018index 3e544f4..34c3008 100644
70019--- a/kernel/module.c
70020+++ b/kernel/module.c
70021@@ -59,6 +59,7 @@
70022 #include <linux/pfn.h>
70023 #include <linux/bsearch.h>
70024 #include <linux/fips.h>
70025+#include <linux/grsecurity.h>
70026 #include "module-internal.h"
70027
70028 #define CREATE_TRACE_POINTS
70029@@ -153,7 +154,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
70030
70031 /* Bounds of module allocation, for speeding __module_address.
70032 * Protected by module_mutex. */
70033-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
70034+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
70035+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
70036
70037 int register_module_notifier(struct notifier_block * nb)
70038 {
70039@@ -319,7 +321,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
70040 return true;
70041
70042 list_for_each_entry_rcu(mod, &modules, list) {
70043- struct symsearch arr[] = {
70044+ struct symsearch modarr[] = {
70045 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
70046 NOT_GPL_ONLY, false },
70047 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
70048@@ -344,7 +346,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
70049 if (mod->state == MODULE_STATE_UNFORMED)
70050 continue;
70051
70052- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
70053+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
70054 return true;
70055 }
70056 return false;
70057@@ -484,7 +486,7 @@ static inline void __percpu *mod_percpu(struct module *mod)
70058 static int percpu_modalloc(struct module *mod,
70059 unsigned long size, unsigned long align)
70060 {
70061- if (align > PAGE_SIZE) {
70062+ if (align-1 >= PAGE_SIZE) {
70063 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
70064 mod->name, align, PAGE_SIZE);
70065 align = PAGE_SIZE;
70066@@ -1088,7 +1090,7 @@ struct module_attribute module_uevent =
70067 static ssize_t show_coresize(struct module_attribute *mattr,
70068 struct module_kobject *mk, char *buffer)
70069 {
70070- return sprintf(buffer, "%u\n", mk->mod->core_size);
70071+ return sprintf(buffer, "%u\n", mk->mod->core_size_rx + mk->mod->core_size_rw);
70072 }
70073
70074 static struct module_attribute modinfo_coresize =
70075@@ -1097,7 +1099,7 @@ static struct module_attribute modinfo_coresize =
70076 static ssize_t show_initsize(struct module_attribute *mattr,
70077 struct module_kobject *mk, char *buffer)
70078 {
70079- return sprintf(buffer, "%u\n", mk->mod->init_size);
70080+ return sprintf(buffer, "%u\n", mk->mod->init_size_rx + mk->mod->init_size_rw);
70081 }
70082
70083 static struct module_attribute modinfo_initsize =
70084@@ -1311,7 +1313,7 @@ resolve_symbol_wait(struct module *mod,
70085 */
70086 #ifdef CONFIG_SYSFS
70087
70088-#ifdef CONFIG_KALLSYMS
70089+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
70090 static inline bool sect_empty(const Elf_Shdr *sect)
70091 {
70092 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
70093@@ -1777,21 +1779,21 @@ static void set_section_ro_nx(void *base,
70094
70095 static void unset_module_core_ro_nx(struct module *mod)
70096 {
70097- set_page_attributes(mod->module_core + mod->core_text_size,
70098- mod->module_core + mod->core_size,
70099+ set_page_attributes(mod->module_core_rw,
70100+ mod->module_core_rw + mod->core_size_rw,
70101 set_memory_x);
70102- set_page_attributes(mod->module_core,
70103- mod->module_core + mod->core_ro_size,
70104+ set_page_attributes(mod->module_core_rx,
70105+ mod->module_core_rx + mod->core_size_rx,
70106 set_memory_rw);
70107 }
70108
70109 static void unset_module_init_ro_nx(struct module *mod)
70110 {
70111- set_page_attributes(mod->module_init + mod->init_text_size,
70112- mod->module_init + mod->init_size,
70113+ set_page_attributes(mod->module_init_rw,
70114+ mod->module_init_rw + mod->init_size_rw,
70115 set_memory_x);
70116- set_page_attributes(mod->module_init,
70117- mod->module_init + mod->init_ro_size,
70118+ set_page_attributes(mod->module_init_rx,
70119+ mod->module_init_rx + mod->init_size_rx,
70120 set_memory_rw);
70121 }
70122
70123@@ -1804,14 +1806,14 @@ void set_all_modules_text_rw(void)
70124 list_for_each_entry_rcu(mod, &modules, list) {
70125 if (mod->state == MODULE_STATE_UNFORMED)
70126 continue;
70127- if ((mod->module_core) && (mod->core_text_size)) {
70128- set_page_attributes(mod->module_core,
70129- mod->module_core + mod->core_text_size,
70130+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
70131+ set_page_attributes(mod->module_core_rx,
70132+ mod->module_core_rx + mod->core_size_rx,
70133 set_memory_rw);
70134 }
70135- if ((mod->module_init) && (mod->init_text_size)) {
70136- set_page_attributes(mod->module_init,
70137- mod->module_init + mod->init_text_size,
70138+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
70139+ set_page_attributes(mod->module_init_rx,
70140+ mod->module_init_rx + mod->init_size_rx,
70141 set_memory_rw);
70142 }
70143 }
70144@@ -1827,14 +1829,14 @@ void set_all_modules_text_ro(void)
70145 list_for_each_entry_rcu(mod, &modules, list) {
70146 if (mod->state == MODULE_STATE_UNFORMED)
70147 continue;
70148- if ((mod->module_core) && (mod->core_text_size)) {
70149- set_page_attributes(mod->module_core,
70150- mod->module_core + mod->core_text_size,
70151+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
70152+ set_page_attributes(mod->module_core_rx,
70153+ mod->module_core_rx + mod->core_size_rx,
70154 set_memory_ro);
70155 }
70156- if ((mod->module_init) && (mod->init_text_size)) {
70157- set_page_attributes(mod->module_init,
70158- mod->module_init + mod->init_text_size,
70159+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
70160+ set_page_attributes(mod->module_init_rx,
70161+ mod->module_init_rx + mod->init_size_rx,
70162 set_memory_ro);
70163 }
70164 }
70165@@ -1880,16 +1882,19 @@ static void free_module(struct module *mod)
70166
70167 /* This may be NULL, but that's OK */
70168 unset_module_init_ro_nx(mod);
70169- module_free(mod, mod->module_init);
70170+ module_free(mod, mod->module_init_rw);
70171+ module_free_exec(mod, mod->module_init_rx);
70172 kfree(mod->args);
70173 percpu_modfree(mod);
70174
70175 /* Free lock-classes: */
70176- lockdep_free_key_range(mod->module_core, mod->core_size);
70177+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
70178+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
70179
70180 /* Finally, free the core (containing the module structure) */
70181 unset_module_core_ro_nx(mod);
70182- module_free(mod, mod->module_core);
70183+ module_free_exec(mod, mod->module_core_rx);
70184+ module_free(mod, mod->module_core_rw);
70185
70186 #ifdef CONFIG_MPU
70187 update_protections(current->mm);
70188@@ -1959,9 +1964,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
70189 int ret = 0;
70190 const struct kernel_symbol *ksym;
70191
70192+#ifdef CONFIG_GRKERNSEC_MODHARDEN
70193+ int is_fs_load = 0;
70194+ int register_filesystem_found = 0;
70195+ char *p;
70196+
70197+ p = strstr(mod->args, "grsec_modharden_fs");
70198+ if (p) {
70199+ char *endptr = p + sizeof("grsec_modharden_fs") - 1;
70200+ /* copy \0 as well */
70201+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
70202+ is_fs_load = 1;
70203+ }
70204+#endif
70205+
70206 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
70207 const char *name = info->strtab + sym[i].st_name;
70208
70209+#ifdef CONFIG_GRKERNSEC_MODHARDEN
70210+ /* it's a real shame this will never get ripped and copied
70211+ upstream! ;(
70212+ */
70213+ if (is_fs_load && !strcmp(name, "register_filesystem"))
70214+ register_filesystem_found = 1;
70215+#endif
70216+
70217 switch (sym[i].st_shndx) {
70218 case SHN_COMMON:
70219 /* We compiled with -fno-common. These are not
70220@@ -1982,7 +2009,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
70221 ksym = resolve_symbol_wait(mod, info, name);
70222 /* Ok if resolved. */
70223 if (ksym && !IS_ERR(ksym)) {
70224+ pax_open_kernel();
70225 sym[i].st_value = ksym->value;
70226+ pax_close_kernel();
70227 break;
70228 }
70229
70230@@ -2001,11 +2030,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
70231 secbase = (unsigned long)mod_percpu(mod);
70232 else
70233 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
70234+ pax_open_kernel();
70235 sym[i].st_value += secbase;
70236+ pax_close_kernel();
70237 break;
70238 }
70239 }
70240
70241+#ifdef CONFIG_GRKERNSEC_MODHARDEN
70242+ if (is_fs_load && !register_filesystem_found) {
70243+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
70244+ ret = -EPERM;
70245+ }
70246+#endif
70247+
70248 return ret;
70249 }
70250
70251@@ -2089,22 +2127,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
70252 || s->sh_entsize != ~0UL
70253 || strstarts(sname, ".init"))
70254 continue;
70255- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
70256+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
70257+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
70258+ else
70259+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
70260 pr_debug("\t%s\n", sname);
70261 }
70262- switch (m) {
70263- case 0: /* executable */
70264- mod->core_size = debug_align(mod->core_size);
70265- mod->core_text_size = mod->core_size;
70266- break;
70267- case 1: /* RO: text and ro-data */
70268- mod->core_size = debug_align(mod->core_size);
70269- mod->core_ro_size = mod->core_size;
70270- break;
70271- case 3: /* whole core */
70272- mod->core_size = debug_align(mod->core_size);
70273- break;
70274- }
70275 }
70276
70277 pr_debug("Init section allocation order:\n");
70278@@ -2118,23 +2146,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
70279 || s->sh_entsize != ~0UL
70280 || !strstarts(sname, ".init"))
70281 continue;
70282- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
70283- | INIT_OFFSET_MASK);
70284+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
70285+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
70286+ else
70287+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
70288+ s->sh_entsize |= INIT_OFFSET_MASK;
70289 pr_debug("\t%s\n", sname);
70290 }
70291- switch (m) {
70292- case 0: /* executable */
70293- mod->init_size = debug_align(mod->init_size);
70294- mod->init_text_size = mod->init_size;
70295- break;
70296- case 1: /* RO: text and ro-data */
70297- mod->init_size = debug_align(mod->init_size);
70298- mod->init_ro_size = mod->init_size;
70299- break;
70300- case 3: /* whole init */
70301- mod->init_size = debug_align(mod->init_size);
70302- break;
70303- }
70304 }
70305 }
70306
70307@@ -2306,7 +2324,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
70308
70309 /* Put symbol section at end of init part of module. */
70310 symsect->sh_flags |= SHF_ALLOC;
70311- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
70312+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
70313 info->index.sym) | INIT_OFFSET_MASK;
70314 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
70315
70316@@ -2326,13 +2344,13 @@ static void layout_symtab(struct module *mod, struct load_info *info)
70317 }
70318
70319 /* Append room for core symbols at end of core part. */
70320- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
70321- info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
70322- mod->core_size += strtab_size;
70323+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
70324+ info->stroffs = mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
70325+ mod->core_size_rx += strtab_size;
70326
70327 /* Put string table section at end of init part of module. */
70328 strsect->sh_flags |= SHF_ALLOC;
70329- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
70330+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
70331 info->index.str) | INIT_OFFSET_MASK;
70332 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
70333 }
70334@@ -2350,12 +2368,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
70335 /* Make sure we get permanent strtab: don't use info->strtab. */
70336 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
70337
70338+ pax_open_kernel();
70339+
70340 /* Set types up while we still have access to sections. */
70341 for (i = 0; i < mod->num_symtab; i++)
70342 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
70343
70344- mod->core_symtab = dst = mod->module_core + info->symoffs;
70345- mod->core_strtab = s = mod->module_core + info->stroffs;
70346+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
70347+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
70348 src = mod->symtab;
70349 *s++ = 0;
70350 for (ndst = i = 0; i < mod->num_symtab; i++) {
70351@@ -2368,6 +2388,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
70352 }
70353 }
70354 mod->core_num_syms = ndst;
70355+
70356+ pax_close_kernel();
70357 }
70358 #else
70359 static inline void layout_symtab(struct module *mod, struct load_info *info)
70360@@ -2401,17 +2423,33 @@ void * __weak module_alloc(unsigned long size)
70361 return size == 0 ? NULL : vmalloc_exec(size);
70362 }
70363
70364-static void *module_alloc_update_bounds(unsigned long size)
70365+static void *module_alloc_update_bounds_rw(unsigned long size)
70366 {
70367 void *ret = module_alloc(size);
70368
70369 if (ret) {
70370 mutex_lock(&module_mutex);
70371 /* Update module bounds. */
70372- if ((unsigned long)ret < module_addr_min)
70373- module_addr_min = (unsigned long)ret;
70374- if ((unsigned long)ret + size > module_addr_max)
70375- module_addr_max = (unsigned long)ret + size;
70376+ if ((unsigned long)ret < module_addr_min_rw)
70377+ module_addr_min_rw = (unsigned long)ret;
70378+ if ((unsigned long)ret + size > module_addr_max_rw)
70379+ module_addr_max_rw = (unsigned long)ret + size;
70380+ mutex_unlock(&module_mutex);
70381+ }
70382+ return ret;
70383+}
70384+
70385+static void *module_alloc_update_bounds_rx(unsigned long size)
70386+{
70387+ void *ret = module_alloc_exec(size);
70388+
70389+ if (ret) {
70390+ mutex_lock(&module_mutex);
70391+ /* Update module bounds. */
70392+ if ((unsigned long)ret < module_addr_min_rx)
70393+ module_addr_min_rx = (unsigned long)ret;
70394+ if ((unsigned long)ret + size > module_addr_max_rx)
70395+ module_addr_max_rx = (unsigned long)ret + size;
70396 mutex_unlock(&module_mutex);
70397 }
70398 return ret;
70399@@ -2630,8 +2668,14 @@ static struct module *setup_load_info(struct load_info *info)
70400 static int check_modinfo(struct module *mod, struct load_info *info)
70401 {
70402 const char *modmagic = get_modinfo(info, "vermagic");
70403+ const char *license = get_modinfo(info, "license");
70404 int err;
70405
70406+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
70407+ if (!license || !license_is_gpl_compatible(license))
70408+ return -ENOEXEC;
70409+#endif
70410+
70411 /* This is allowed: modprobe --force will invalidate it. */
70412 if (!modmagic) {
70413 err = try_to_force_load(mod, "bad vermagic");
70414@@ -2654,7 +2698,7 @@ static int check_modinfo(struct module *mod, struct load_info *info)
70415 }
70416
70417 /* Set up license info based on the info section */
70418- set_license(mod, get_modinfo(info, "license"));
70419+ set_license(mod, license);
70420
70421 return 0;
70422 }
70423@@ -2748,7 +2792,7 @@ static int move_module(struct module *mod, struct load_info *info)
70424 void *ptr;
70425
70426 /* Do the allocs. */
70427- ptr = module_alloc_update_bounds(mod->core_size);
70428+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
70429 /*
70430 * The pointer to this block is stored in the module structure
70431 * which is inside the block. Just mark it as not being a
70432@@ -2758,10 +2802,10 @@ static int move_module(struct module *mod, struct load_info *info)
70433 if (!ptr)
70434 return -ENOMEM;
70435
70436- memset(ptr, 0, mod->core_size);
70437- mod->module_core = ptr;
70438+ memset(ptr, 0, mod->core_size_rw);
70439+ mod->module_core_rw = ptr;
70440
70441- ptr = module_alloc_update_bounds(mod->init_size);
70442+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
70443 /*
70444 * The pointer to this block is stored in the module structure
70445 * which is inside the block. This block doesn't need to be
70446@@ -2769,12 +2813,39 @@ static int move_module(struct module *mod, struct load_info *info)
70447 * after the module is initialized.
70448 */
70449 kmemleak_ignore(ptr);
70450- if (!ptr && mod->init_size) {
70451- module_free(mod, mod->module_core);
70452+ if (!ptr && mod->init_size_rw) {
70453+ module_free(mod, mod->module_core_rw);
70454 return -ENOMEM;
70455 }
70456- memset(ptr, 0, mod->init_size);
70457- mod->module_init = ptr;
70458+ memset(ptr, 0, mod->init_size_rw);
70459+ mod->module_init_rw = ptr;
70460+
70461+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
70462+ kmemleak_not_leak(ptr);
70463+ if (!ptr) {
70464+ module_free(mod, mod->module_init_rw);
70465+ module_free(mod, mod->module_core_rw);
70466+ return -ENOMEM;
70467+ }
70468+
70469+ pax_open_kernel();
70470+ memset(ptr, 0, mod->core_size_rx);
70471+ pax_close_kernel();
70472+ mod->module_core_rx = ptr;
70473+
70474+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
70475+ kmemleak_ignore(ptr);
70476+ if (!ptr && mod->init_size_rx) {
70477+ module_free_exec(mod, mod->module_core_rx);
70478+ module_free(mod, mod->module_init_rw);
70479+ module_free(mod, mod->module_core_rw);
70480+ return -ENOMEM;
70481+ }
70482+
70483+ pax_open_kernel();
70484+ memset(ptr, 0, mod->init_size_rx);
70485+ pax_close_kernel();
70486+ mod->module_init_rx = ptr;
70487
70488 /* Transfer each section which specifies SHF_ALLOC */
70489 pr_debug("final section addresses:\n");
70490@@ -2785,16 +2856,45 @@ static int move_module(struct module *mod, struct load_info *info)
70491 if (!(shdr->sh_flags & SHF_ALLOC))
70492 continue;
70493
70494- if (shdr->sh_entsize & INIT_OFFSET_MASK)
70495- dest = mod->module_init
70496- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
70497- else
70498- dest = mod->module_core + shdr->sh_entsize;
70499+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
70500+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
70501+ dest = mod->module_init_rw
70502+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
70503+ else
70504+ dest = mod->module_init_rx
70505+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
70506+ } else {
70507+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
70508+ dest = mod->module_core_rw + shdr->sh_entsize;
70509+ else
70510+ dest = mod->module_core_rx + shdr->sh_entsize;
70511+ }
70512+
70513+ if (shdr->sh_type != SHT_NOBITS) {
70514+
70515+#ifdef CONFIG_PAX_KERNEXEC
70516+#ifdef CONFIG_X86_64
70517+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
70518+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
70519+#endif
70520+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
70521+ pax_open_kernel();
70522+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
70523+ pax_close_kernel();
70524+ } else
70525+#endif
70526
70527- if (shdr->sh_type != SHT_NOBITS)
70528 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
70529+ }
70530 /* Update sh_addr to point to copy in image. */
70531- shdr->sh_addr = (unsigned long)dest;
70532+
70533+#ifdef CONFIG_PAX_KERNEXEC
70534+ if (shdr->sh_flags & SHF_EXECINSTR)
70535+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
70536+ else
70537+#endif
70538+
70539+ shdr->sh_addr = (unsigned long)dest;
70540 pr_debug("\t0x%lx %s\n",
70541 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
70542 }
70543@@ -2849,12 +2949,12 @@ static void flush_module_icache(const struct module *mod)
70544 * Do it before processing of module parameters, so the module
70545 * can provide parameter accessor functions of its own.
70546 */
70547- if (mod->module_init)
70548- flush_icache_range((unsigned long)mod->module_init,
70549- (unsigned long)mod->module_init
70550- + mod->init_size);
70551- flush_icache_range((unsigned long)mod->module_core,
70552- (unsigned long)mod->module_core + mod->core_size);
70553+ if (mod->module_init_rx)
70554+ flush_icache_range((unsigned long)mod->module_init_rx,
70555+ (unsigned long)mod->module_init_rx
70556+ + mod->init_size_rx);
70557+ flush_icache_range((unsigned long)mod->module_core_rx,
70558+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
70559
70560 set_fs(old_fs);
70561 }
70562@@ -2924,8 +3024,10 @@ out:
70563 static void module_deallocate(struct module *mod, struct load_info *info)
70564 {
70565 percpu_modfree(mod);
70566- module_free(mod, mod->module_init);
70567- module_free(mod, mod->module_core);
70568+ module_free_exec(mod, mod->module_init_rx);
70569+ module_free_exec(mod, mod->module_core_rx);
70570+ module_free(mod, mod->module_init_rw);
70571+ module_free(mod, mod->module_core_rw);
70572 }
70573
70574 int __weak module_finalize(const Elf_Ehdr *hdr,
70575@@ -2938,7 +3040,9 @@ int __weak module_finalize(const Elf_Ehdr *hdr,
70576 static int post_relocation(struct module *mod, const struct load_info *info)
70577 {
70578 /* Sort exception table now relocations are done. */
70579+ pax_open_kernel();
70580 sort_extable(mod->extable, mod->extable + mod->num_exentries);
70581+ pax_close_kernel();
70582
70583 /* Copy relocated percpu area over. */
70584 percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
70585@@ -3036,9 +3140,38 @@ again:
70586 if (err)
70587 goto free_unload;
70588
70589+ /* Now copy in args */
70590+ mod->args = strndup_user(uargs, ~0UL >> 1);
70591+ if (IS_ERR(mod->args)) {
70592+ err = PTR_ERR(mod->args);
70593+ goto free_unload;
70594+ }
70595+
70596 /* Set up MODINFO_ATTR fields */
70597 setup_modinfo(mod, &info);
70598
70599+#ifdef CONFIG_GRKERNSEC_MODHARDEN
70600+ {
70601+ char *p, *p2;
70602+
70603+ if (strstr(mod->args, "grsec_modharden_netdev")) {
70604+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
70605+ err = -EPERM;
70606+ goto free_modinfo;
70607+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
70608+ p += sizeof("grsec_modharden_normal") - 1;
70609+ p2 = strstr(p, "_");
70610+ if (p2) {
70611+ *p2 = '\0';
70612+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
70613+ *p2 = '_';
70614+ }
70615+ err = -EPERM;
70616+ goto free_modinfo;
70617+ }
70618+ }
70619+#endif
70620+
70621 /* Fix up syms, so that st_value is a pointer to location. */
70622 err = simplify_symbols(mod, &info);
70623 if (err < 0)
70624@@ -3104,11 +3237,11 @@ again:
70625 mutex_unlock(&module_mutex);
70626 dynamic_debug_remove(info.debug);
70627 synchronize_sched();
70628- kfree(mod->args);
70629 free_arch_cleanup:
70630 module_arch_cleanup(mod);
70631 free_modinfo:
70632 free_modinfo(mod);
70633+ kfree(mod->args);
70634 free_unload:
70635 module_unload_free(mod);
70636 unlink_mod:
70637@@ -3155,16 +3288,16 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
70638 MODULE_STATE_COMING, mod);
70639
70640 /* Set RO and NX regions for core */
70641- set_section_ro_nx(mod->module_core,
70642- mod->core_text_size,
70643- mod->core_ro_size,
70644- mod->core_size);
70645+ set_section_ro_nx(mod->module_core_rx,
70646+ mod->core_size_rx,
70647+ mod->core_size_rx,
70648+ mod->core_size_rx);
70649
70650 /* Set RO and NX regions for init */
70651- set_section_ro_nx(mod->module_init,
70652- mod->init_text_size,
70653- mod->init_ro_size,
70654- mod->init_size);
70655+ set_section_ro_nx(mod->module_init_rx,
70656+ mod->init_size_rx,
70657+ mod->init_size_rx,
70658+ mod->init_size_rx);
70659
70660 do_mod_ctors(mod);
70661 /* Start the module */
70662@@ -3209,11 +3342,12 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
70663 mod->strtab = mod->core_strtab;
70664 #endif
70665 unset_module_init_ro_nx(mod);
70666- module_free(mod, mod->module_init);
70667- mod->module_init = NULL;
70668- mod->init_size = 0;
70669- mod->init_ro_size = 0;
70670- mod->init_text_size = 0;
70671+ module_free(mod, mod->module_init_rw);
70672+ module_free_exec(mod, mod->module_init_rx);
70673+ mod->module_init_rw = NULL;
70674+ mod->module_init_rx = NULL;
70675+ mod->init_size_rw = 0;
70676+ mod->init_size_rx = 0;
70677 mutex_unlock(&module_mutex);
70678 wake_up_all(&module_wq);
70679
70680@@ -3245,10 +3379,16 @@ static const char *get_ksymbol(struct module *mod,
70681 unsigned long nextval;
70682
70683 /* At worse, next value is at end of module */
70684- if (within_module_init(addr, mod))
70685- nextval = (unsigned long)mod->module_init+mod->init_text_size;
70686+ if (within_module_init_rx(addr, mod))
70687+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
70688+ else if (within_module_init_rw(addr, mod))
70689+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
70690+ else if (within_module_core_rx(addr, mod))
70691+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
70692+ else if (within_module_core_rw(addr, mod))
70693+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
70694 else
70695- nextval = (unsigned long)mod->module_core+mod->core_text_size;
70696+ return NULL;
70697
70698 /* Scan for closest preceding symbol, and next symbol. (ELF
70699 starts real symbols at 1). */
70700@@ -3501,7 +3641,7 @@ static int m_show(struct seq_file *m, void *p)
70701 return 0;
70702
70703 seq_printf(m, "%s %u",
70704- mod->name, mod->init_size + mod->core_size);
70705+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
70706 print_unload_info(m, mod);
70707
70708 /* Informative for users. */
70709@@ -3510,7 +3650,7 @@ static int m_show(struct seq_file *m, void *p)
70710 mod->state == MODULE_STATE_COMING ? "Loading":
70711 "Live");
70712 /* Used by oprofile and other similar tools. */
70713- seq_printf(m, " 0x%pK", mod->module_core);
70714+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
70715
70716 /* Taints info */
70717 if (mod->taints)
70718@@ -3546,7 +3686,17 @@ static const struct file_operations proc_modules_operations = {
70719
70720 static int __init proc_modules_init(void)
70721 {
70722+#ifndef CONFIG_GRKERNSEC_HIDESYM
70723+#ifdef CONFIG_GRKERNSEC_PROC_USER
70724+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
70725+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
70726+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
70727+#else
70728 proc_create("modules", 0, NULL, &proc_modules_operations);
70729+#endif
70730+#else
70731+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
70732+#endif
70733 return 0;
70734 }
70735 module_init(proc_modules_init);
70736@@ -3607,14 +3757,14 @@ struct module *__module_address(unsigned long addr)
70737 {
70738 struct module *mod;
70739
70740- if (addr < module_addr_min || addr > module_addr_max)
70741+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
70742+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
70743 return NULL;
70744
70745 list_for_each_entry_rcu(mod, &modules, list) {
70746 if (mod->state == MODULE_STATE_UNFORMED)
70747 continue;
70748- if (within_module_core(addr, mod)
70749- || within_module_init(addr, mod))
70750+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
70751 return mod;
70752 }
70753 return NULL;
70754@@ -3649,11 +3799,20 @@ bool is_module_text_address(unsigned long addr)
70755 */
70756 struct module *__module_text_address(unsigned long addr)
70757 {
70758- struct module *mod = __module_address(addr);
70759+ struct module *mod;
70760+
70761+#ifdef CONFIG_X86_32
70762+ addr = ktla_ktva(addr);
70763+#endif
70764+
70765+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
70766+ return NULL;
70767+
70768+ mod = __module_address(addr);
70769+
70770 if (mod) {
70771 /* Make sure it's within the text section. */
70772- if (!within(addr, mod->module_init, mod->init_text_size)
70773- && !within(addr, mod->module_core, mod->core_text_size))
70774+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
70775 mod = NULL;
70776 }
70777 return mod;
70778diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
70779index 7e3443f..b2a1e6b 100644
70780--- a/kernel/mutex-debug.c
70781+++ b/kernel/mutex-debug.c
70782@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
70783 }
70784
70785 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
70786- struct thread_info *ti)
70787+ struct task_struct *task)
70788 {
70789 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
70790
70791 /* Mark the current thread as blocked on the lock: */
70792- ti->task->blocked_on = waiter;
70793+ task->blocked_on = waiter;
70794 }
70795
70796 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
70797- struct thread_info *ti)
70798+ struct task_struct *task)
70799 {
70800 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
70801- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
70802- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
70803- ti->task->blocked_on = NULL;
70804+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
70805+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
70806+ task->blocked_on = NULL;
70807
70808 list_del_init(&waiter->list);
70809 waiter->task = NULL;
70810diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
70811index 0799fd3..d06ae3b 100644
70812--- a/kernel/mutex-debug.h
70813+++ b/kernel/mutex-debug.h
70814@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
70815 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
70816 extern void debug_mutex_add_waiter(struct mutex *lock,
70817 struct mutex_waiter *waiter,
70818- struct thread_info *ti);
70819+ struct task_struct *task);
70820 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
70821- struct thread_info *ti);
70822+ struct task_struct *task);
70823 extern void debug_mutex_unlock(struct mutex *lock);
70824 extern void debug_mutex_init(struct mutex *lock, const char *name,
70825 struct lock_class_key *key);
70826diff --git a/kernel/mutex.c b/kernel/mutex.c
70827index a307cc9..27fd2e9 100644
70828--- a/kernel/mutex.c
70829+++ b/kernel/mutex.c
70830@@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
70831 spin_lock_mutex(&lock->wait_lock, flags);
70832
70833 debug_mutex_lock_common(lock, &waiter);
70834- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
70835+ debug_mutex_add_waiter(lock, &waiter, task);
70836
70837 /* add waiting tasks to the end of the waitqueue (FIFO): */
70838 list_add_tail(&waiter.list, &lock->wait_list);
70839@@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
70840 * TASK_UNINTERRUPTIBLE case.)
70841 */
70842 if (unlikely(signal_pending_state(state, task))) {
70843- mutex_remove_waiter(lock, &waiter,
70844- task_thread_info(task));
70845+ mutex_remove_waiter(lock, &waiter, task);
70846 mutex_release(&lock->dep_map, 1, ip);
70847 spin_unlock_mutex(&lock->wait_lock, flags);
70848
70849@@ -247,7 +246,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
70850 done:
70851 lock_acquired(&lock->dep_map, ip);
70852 /* got the lock - rejoice! */
70853- mutex_remove_waiter(lock, &waiter, current_thread_info());
70854+ mutex_remove_waiter(lock, &waiter, task);
70855 mutex_set_owner(lock);
70856
70857 /* set it to 0 if there are no waiters left: */
70858diff --git a/kernel/notifier.c b/kernel/notifier.c
70859index 2d5cc4c..d9ea600 100644
70860--- a/kernel/notifier.c
70861+++ b/kernel/notifier.c
70862@@ -5,6 +5,7 @@
70863 #include <linux/rcupdate.h>
70864 #include <linux/vmalloc.h>
70865 #include <linux/reboot.h>
70866+#include <linux/mm.h>
70867
70868 /*
70869 * Notifier list for kernel code which wants to be called
70870@@ -24,10 +25,12 @@ static int notifier_chain_register(struct notifier_block **nl,
70871 while ((*nl) != NULL) {
70872 if (n->priority > (*nl)->priority)
70873 break;
70874- nl = &((*nl)->next);
70875+ nl = (struct notifier_block **)&((*nl)->next);
70876 }
70877- n->next = *nl;
70878+ pax_open_kernel();
70879+ *(const void **)&n->next = *nl;
70880 rcu_assign_pointer(*nl, n);
70881+ pax_close_kernel();
70882 return 0;
70883 }
70884
70885@@ -39,10 +42,12 @@ static int notifier_chain_cond_register(struct notifier_block **nl,
70886 return 0;
70887 if (n->priority > (*nl)->priority)
70888 break;
70889- nl = &((*nl)->next);
70890+ nl = (struct notifier_block **)&((*nl)->next);
70891 }
70892- n->next = *nl;
70893+ pax_open_kernel();
70894+ *(const void **)&n->next = *nl;
70895 rcu_assign_pointer(*nl, n);
70896+ pax_close_kernel();
70897 return 0;
70898 }
70899
70900@@ -51,10 +56,12 @@ static int notifier_chain_unregister(struct notifier_block **nl,
70901 {
70902 while ((*nl) != NULL) {
70903 if ((*nl) == n) {
70904+ pax_open_kernel();
70905 rcu_assign_pointer(*nl, n->next);
70906+ pax_close_kernel();
70907 return 0;
70908 }
70909- nl = &((*nl)->next);
70910+ nl = (struct notifier_block **)&((*nl)->next);
70911 }
70912 return -ENOENT;
70913 }
70914diff --git a/kernel/panic.c b/kernel/panic.c
70915index e1b2822..5edc1d9 100644
70916--- a/kernel/panic.c
70917+++ b/kernel/panic.c
70918@@ -410,7 +410,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
70919 const char *board;
70920
70921 printk(KERN_WARNING "------------[ cut here ]------------\n");
70922- printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
70923+ printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
70924 board = dmi_get_system_info(DMI_PRODUCT_NAME);
70925 if (board)
70926 printk(KERN_WARNING "Hardware name: %s\n", board);
70927@@ -465,7 +465,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
70928 */
70929 void __stack_chk_fail(void)
70930 {
70931- panic("stack-protector: Kernel stack is corrupted in: %p\n",
70932+ dump_stack();
70933+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
70934 __builtin_return_address(0));
70935 }
70936 EXPORT_SYMBOL(__stack_chk_fail);
70937diff --git a/kernel/pid.c b/kernel/pid.c
70938index aebd4f5..1693c13 100644
70939--- a/kernel/pid.c
70940+++ b/kernel/pid.c
70941@@ -33,6 +33,7 @@
70942 #include <linux/rculist.h>
70943 #include <linux/bootmem.h>
70944 #include <linux/hash.h>
70945+#include <linux/security.h>
70946 #include <linux/pid_namespace.h>
70947 #include <linux/init_task.h>
70948 #include <linux/syscalls.h>
70949@@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
70950
70951 int pid_max = PID_MAX_DEFAULT;
70952
70953-#define RESERVED_PIDS 300
70954+#define RESERVED_PIDS 500
70955
70956 int pid_max_min = RESERVED_PIDS + 1;
70957 int pid_max_max = PID_MAX_LIMIT;
70958@@ -420,10 +421,18 @@ EXPORT_SYMBOL(pid_task);
70959 */
70960 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
70961 {
70962+ struct task_struct *task;
70963+
70964 rcu_lockdep_assert(rcu_read_lock_held(),
70965 "find_task_by_pid_ns() needs rcu_read_lock()"
70966 " protection");
70967- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
70968+
70969+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
70970+
70971+ if (gr_pid_is_chrooted(task))
70972+ return NULL;
70973+
70974+ return task;
70975 }
70976
70977 struct task_struct *find_task_by_vpid(pid_t vnr)
70978@@ -431,6 +440,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
70979 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
70980 }
70981
70982+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
70983+{
70984+ rcu_lockdep_assert(rcu_read_lock_held(),
70985+ "find_task_by_pid_ns() needs rcu_read_lock()"
70986+ " protection");
70987+ return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
70988+}
70989+
70990 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
70991 {
70992 struct pid *pid;
70993diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
70994index 125cb67..2e5c8ad 100644
70995--- a/kernel/posix-cpu-timers.c
70996+++ b/kernel/posix-cpu-timers.c
70997@@ -6,9 +6,11 @@
70998 #include <linux/posix-timers.h>
70999 #include <linux/errno.h>
71000 #include <linux/math64.h>
71001+#include <linux/security.h>
71002 #include <asm/uaccess.h>
71003 #include <linux/kernel_stat.h>
71004 #include <trace/events/timer.h>
71005+#include <linux/random.h>
71006
71007 /*
71008 * Called after updating RLIMIT_CPU to run cpu timer and update
71009@@ -494,6 +496,8 @@ static void cleanup_timers(struct list_head *head,
71010 */
71011 void posix_cpu_timers_exit(struct task_struct *tsk)
71012 {
71013+ add_device_randomness((const void*) &tsk->se.sum_exec_runtime,
71014+ sizeof(unsigned long long));
71015 cleanup_timers(tsk->cpu_timers,
71016 tsk->utime, tsk->stime, tsk->se.sum_exec_runtime);
71017
71018@@ -1578,14 +1582,14 @@ struct k_clock clock_posix_cpu = {
71019
71020 static __init int init_posix_cpu_timers(void)
71021 {
71022- struct k_clock process = {
71023+ static struct k_clock process = {
71024 .clock_getres = process_cpu_clock_getres,
71025 .clock_get = process_cpu_clock_get,
71026 .timer_create = process_cpu_timer_create,
71027 .nsleep = process_cpu_nsleep,
71028 .nsleep_restart = process_cpu_nsleep_restart,
71029 };
71030- struct k_clock thread = {
71031+ static struct k_clock thread = {
71032 .clock_getres = thread_cpu_clock_getres,
71033 .clock_get = thread_cpu_clock_get,
71034 .timer_create = thread_cpu_timer_create,
71035diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
71036index 69185ae..cc2847a 100644
71037--- a/kernel/posix-timers.c
71038+++ b/kernel/posix-timers.c
71039@@ -43,6 +43,7 @@
71040 #include <linux/idr.h>
71041 #include <linux/posix-clock.h>
71042 #include <linux/posix-timers.h>
71043+#include <linux/grsecurity.h>
71044 #include <linux/syscalls.h>
71045 #include <linux/wait.h>
71046 #include <linux/workqueue.h>
71047@@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
71048 * which we beg off on and pass to do_sys_settimeofday().
71049 */
71050
71051-static struct k_clock posix_clocks[MAX_CLOCKS];
71052+static struct k_clock *posix_clocks[MAX_CLOCKS];
71053
71054 /*
71055 * These ones are defined below.
71056@@ -227,7 +228,7 @@ static int posix_get_boottime(const clockid_t which_clock, struct timespec *tp)
71057 */
71058 static __init int init_posix_timers(void)
71059 {
71060- struct k_clock clock_realtime = {
71061+ static struct k_clock clock_realtime = {
71062 .clock_getres = hrtimer_get_res,
71063 .clock_get = posix_clock_realtime_get,
71064 .clock_set = posix_clock_realtime_set,
71065@@ -239,7 +240,7 @@ static __init int init_posix_timers(void)
71066 .timer_get = common_timer_get,
71067 .timer_del = common_timer_del,
71068 };
71069- struct k_clock clock_monotonic = {
71070+ static struct k_clock clock_monotonic = {
71071 .clock_getres = hrtimer_get_res,
71072 .clock_get = posix_ktime_get_ts,
71073 .nsleep = common_nsleep,
71074@@ -249,19 +250,19 @@ static __init int init_posix_timers(void)
71075 .timer_get = common_timer_get,
71076 .timer_del = common_timer_del,
71077 };
71078- struct k_clock clock_monotonic_raw = {
71079+ static struct k_clock clock_monotonic_raw = {
71080 .clock_getres = hrtimer_get_res,
71081 .clock_get = posix_get_monotonic_raw,
71082 };
71083- struct k_clock clock_realtime_coarse = {
71084+ static struct k_clock clock_realtime_coarse = {
71085 .clock_getres = posix_get_coarse_res,
71086 .clock_get = posix_get_realtime_coarse,
71087 };
71088- struct k_clock clock_monotonic_coarse = {
71089+ static struct k_clock clock_monotonic_coarse = {
71090 .clock_getres = posix_get_coarse_res,
71091 .clock_get = posix_get_monotonic_coarse,
71092 };
71093- struct k_clock clock_boottime = {
71094+ static struct k_clock clock_boottime = {
71095 .clock_getres = hrtimer_get_res,
71096 .clock_get = posix_get_boottime,
71097 .nsleep = common_nsleep,
71098@@ -473,7 +474,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
71099 return;
71100 }
71101
71102- posix_clocks[clock_id] = *new_clock;
71103+ posix_clocks[clock_id] = new_clock;
71104 }
71105 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
71106
71107@@ -519,9 +520,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
71108 return (id & CLOCKFD_MASK) == CLOCKFD ?
71109 &clock_posix_dynamic : &clock_posix_cpu;
71110
71111- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
71112+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
71113 return NULL;
71114- return &posix_clocks[id];
71115+ return posix_clocks[id];
71116 }
71117
71118 static int common_timer_create(struct k_itimer *new_timer)
71119@@ -959,6 +960,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
71120 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
71121 return -EFAULT;
71122
71123+ /* only the CLOCK_REALTIME clock can be set, all other clocks
71124+ have their clock_set fptr set to a nosettime dummy function
71125+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
71126+ call common_clock_set, which calls do_sys_settimeofday, which
71127+ we hook
71128+ */
71129+
71130 return kc->clock_set(which_clock, &new_tp);
71131 }
71132
71133diff --git a/kernel/power/process.c b/kernel/power/process.c
71134index 87da817..30ddd13 100644
71135--- a/kernel/power/process.c
71136+++ b/kernel/power/process.c
71137@@ -33,6 +33,7 @@ static int try_to_freeze_tasks(bool user_only)
71138 u64 elapsed_csecs64;
71139 unsigned int elapsed_csecs;
71140 bool wakeup = false;
71141+ bool timedout = false;
71142
71143 do_gettimeofday(&start);
71144
71145@@ -43,6 +44,8 @@ static int try_to_freeze_tasks(bool user_only)
71146
71147 while (true) {
71148 todo = 0;
71149+ if (time_after(jiffies, end_time))
71150+ timedout = true;
71151 read_lock(&tasklist_lock);
71152 do_each_thread(g, p) {
71153 if (p == current || !freeze_task(p))
71154@@ -58,9 +61,13 @@ static int try_to_freeze_tasks(bool user_only)
71155 * guaranteed that TASK_STOPPED/TRACED -> TASK_RUNNING
71156 * transition can't race with task state testing here.
71157 */
71158- if (!task_is_stopped_or_traced(p) &&
71159- !freezer_should_skip(p))
71160+ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
71161 todo++;
71162+ if (timedout) {
71163+ printk(KERN_ERR "Task refusing to freeze:\n");
71164+ sched_show_task(p);
71165+ }
71166+ }
71167 } while_each_thread(g, p);
71168 read_unlock(&tasklist_lock);
71169
71170@@ -69,7 +76,7 @@ static int try_to_freeze_tasks(bool user_only)
71171 todo += wq_busy;
71172 }
71173
71174- if (!todo || time_after(jiffies, end_time))
71175+ if (!todo || timedout)
71176 break;
71177
71178 if (pm_wakeup_pending()) {
71179diff --git a/kernel/printk.c b/kernel/printk.c
71180index f8e0b5a..dda2a5c 100644
71181--- a/kernel/printk.c
71182+++ b/kernel/printk.c
71183@@ -817,6 +817,11 @@ static int check_syslog_permissions(int type, bool from_file)
71184 if (from_file && type != SYSLOG_ACTION_OPEN)
71185 return 0;
71186
71187+#ifdef CONFIG_GRKERNSEC_DMESG
71188+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
71189+ return -EPERM;
71190+#endif
71191+
71192 if (syslog_action_restricted(type)) {
71193 if (capable(CAP_SYSLOG))
71194 return 0;
71195diff --git a/kernel/profile.c b/kernel/profile.c
71196index 76b8e77..a2930e8 100644
71197--- a/kernel/profile.c
71198+++ b/kernel/profile.c
71199@@ -39,7 +39,7 @@ struct profile_hit {
71200 /* Oprofile timer tick hook */
71201 static int (*timer_hook)(struct pt_regs *) __read_mostly;
71202
71203-static atomic_t *prof_buffer;
71204+static atomic_unchecked_t *prof_buffer;
71205 static unsigned long prof_len, prof_shift;
71206
71207 int prof_on __read_mostly;
71208@@ -281,7 +281,7 @@ static void profile_flip_buffers(void)
71209 hits[i].pc = 0;
71210 continue;
71211 }
71212- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
71213+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
71214 hits[i].hits = hits[i].pc = 0;
71215 }
71216 }
71217@@ -342,9 +342,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
71218 * Add the current hit(s) and flush the write-queue out
71219 * to the global buffer:
71220 */
71221- atomic_add(nr_hits, &prof_buffer[pc]);
71222+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
71223 for (i = 0; i < NR_PROFILE_HIT; ++i) {
71224- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
71225+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
71226 hits[i].pc = hits[i].hits = 0;
71227 }
71228 out:
71229@@ -419,7 +419,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
71230 {
71231 unsigned long pc;
71232 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
71233- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
71234+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
71235 }
71236 #endif /* !CONFIG_SMP */
71237
71238@@ -517,7 +517,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
71239 return -EFAULT;
71240 buf++; p++; count--; read++;
71241 }
71242- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
71243+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
71244 if (copy_to_user(buf, (void *)pnt, count))
71245 return -EFAULT;
71246 read += count;
71247@@ -548,7 +548,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
71248 }
71249 #endif
71250 profile_discard_flip_buffers();
71251- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
71252+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
71253 return count;
71254 }
71255
71256diff --git a/kernel/ptrace.c b/kernel/ptrace.c
71257index fbea91d..9bf15e8 100644
71258--- a/kernel/ptrace.c
71259+++ b/kernel/ptrace.c
71260@@ -319,7 +319,7 @@ static int ptrace_attach(struct task_struct *task, long request,
71261
71262 if (seize)
71263 flags |= PT_SEIZED;
71264- if (ns_capable(task_user_ns(task), CAP_SYS_PTRACE))
71265+ if (ns_capable_nolog(task_user_ns(task), CAP_SYS_PTRACE))
71266 flags |= PT_PTRACE_CAP;
71267 task->ptrace = flags;
71268
71269@@ -526,7 +526,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
71270 break;
71271 return -EIO;
71272 }
71273- if (copy_to_user(dst, buf, retval))
71274+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
71275 return -EFAULT;
71276 copied += retval;
71277 src += retval;
71278@@ -711,7 +711,7 @@ int ptrace_request(struct task_struct *child, long request,
71279 bool seized = child->ptrace & PT_SEIZED;
71280 int ret = -EIO;
71281 siginfo_t siginfo, *si;
71282- void __user *datavp = (void __user *) data;
71283+ void __user *datavp = (__force void __user *) data;
71284 unsigned long __user *datalp = datavp;
71285 unsigned long flags;
71286
71287@@ -913,14 +913,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
71288 goto out;
71289 }
71290
71291+ if (gr_handle_ptrace(child, request)) {
71292+ ret = -EPERM;
71293+ goto out_put_task_struct;
71294+ }
71295+
71296 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
71297 ret = ptrace_attach(child, request, addr, data);
71298 /*
71299 * Some architectures need to do book-keeping after
71300 * a ptrace attach.
71301 */
71302- if (!ret)
71303+ if (!ret) {
71304 arch_ptrace_attach(child);
71305+ gr_audit_ptrace(child);
71306+ }
71307 goto out_put_task_struct;
71308 }
71309
71310@@ -948,7 +955,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
71311 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
71312 if (copied != sizeof(tmp))
71313 return -EIO;
71314- return put_user(tmp, (unsigned long __user *)data);
71315+ return put_user(tmp, (__force unsigned long __user *)data);
71316 }
71317
71318 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
71319@@ -1058,14 +1065,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
71320 goto out;
71321 }
71322
71323+ if (gr_handle_ptrace(child, request)) {
71324+ ret = -EPERM;
71325+ goto out_put_task_struct;
71326+ }
71327+
71328 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
71329 ret = ptrace_attach(child, request, addr, data);
71330 /*
71331 * Some architectures need to do book-keeping after
71332 * a ptrace attach.
71333 */
71334- if (!ret)
71335+ if (!ret) {
71336 arch_ptrace_attach(child);
71337+ gr_audit_ptrace(child);
71338+ }
71339 goto out_put_task_struct;
71340 }
71341
71342diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
71343index e4c6a59..c86621a 100644
71344--- a/kernel/rcutiny.c
71345+++ b/kernel/rcutiny.c
71346@@ -46,7 +46,7 @@
71347 struct rcu_ctrlblk;
71348 static void invoke_rcu_callbacks(void);
71349 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
71350-static void rcu_process_callbacks(struct softirq_action *unused);
71351+static void rcu_process_callbacks(void);
71352 static void __call_rcu(struct rcu_head *head,
71353 void (*func)(struct rcu_head *rcu),
71354 struct rcu_ctrlblk *rcp);
71355@@ -310,7 +310,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
71356 rcu_is_callbacks_kthread()));
71357 }
71358
71359-static void rcu_process_callbacks(struct softirq_action *unused)
71360+static void rcu_process_callbacks(void)
71361 {
71362 __rcu_process_callbacks(&rcu_sched_ctrlblk);
71363 __rcu_process_callbacks(&rcu_bh_ctrlblk);
71364diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h
71365index 3d01902..afbf46e 100644
71366--- a/kernel/rcutiny_plugin.h
71367+++ b/kernel/rcutiny_plugin.h
71368@@ -893,7 +893,7 @@ static int rcu_kthread(void *arg)
71369 have_rcu_kthread_work = morework;
71370 local_irq_restore(flags);
71371 if (work)
71372- rcu_process_callbacks(NULL);
71373+ rcu_process_callbacks();
71374 schedule_timeout_interruptible(1); /* Leave CPU for others. */
71375 }
71376
71377diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
71378index aaa7b9f..055ff1e 100644
71379--- a/kernel/rcutorture.c
71380+++ b/kernel/rcutorture.c
71381@@ -163,12 +163,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
71382 { 0 };
71383 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
71384 { 0 };
71385-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
71386-static atomic_t n_rcu_torture_alloc;
71387-static atomic_t n_rcu_torture_alloc_fail;
71388-static atomic_t n_rcu_torture_free;
71389-static atomic_t n_rcu_torture_mberror;
71390-static atomic_t n_rcu_torture_error;
71391+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
71392+static atomic_unchecked_t n_rcu_torture_alloc;
71393+static atomic_unchecked_t n_rcu_torture_alloc_fail;
71394+static atomic_unchecked_t n_rcu_torture_free;
71395+static atomic_unchecked_t n_rcu_torture_mberror;
71396+static atomic_unchecked_t n_rcu_torture_error;
71397 static long n_rcu_torture_barrier_error;
71398 static long n_rcu_torture_boost_ktrerror;
71399 static long n_rcu_torture_boost_rterror;
71400@@ -272,11 +272,11 @@ rcu_torture_alloc(void)
71401
71402 spin_lock_bh(&rcu_torture_lock);
71403 if (list_empty(&rcu_torture_freelist)) {
71404- atomic_inc(&n_rcu_torture_alloc_fail);
71405+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
71406 spin_unlock_bh(&rcu_torture_lock);
71407 return NULL;
71408 }
71409- atomic_inc(&n_rcu_torture_alloc);
71410+ atomic_inc_unchecked(&n_rcu_torture_alloc);
71411 p = rcu_torture_freelist.next;
71412 list_del_init(p);
71413 spin_unlock_bh(&rcu_torture_lock);
71414@@ -289,7 +289,7 @@ rcu_torture_alloc(void)
71415 static void
71416 rcu_torture_free(struct rcu_torture *p)
71417 {
71418- atomic_inc(&n_rcu_torture_free);
71419+ atomic_inc_unchecked(&n_rcu_torture_free);
71420 spin_lock_bh(&rcu_torture_lock);
71421 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
71422 spin_unlock_bh(&rcu_torture_lock);
71423@@ -410,7 +410,7 @@ rcu_torture_cb(struct rcu_head *p)
71424 i = rp->rtort_pipe_count;
71425 if (i > RCU_TORTURE_PIPE_LEN)
71426 i = RCU_TORTURE_PIPE_LEN;
71427- atomic_inc(&rcu_torture_wcount[i]);
71428+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
71429 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
71430 rp->rtort_mbtest = 0;
71431 rcu_torture_free(rp);
71432@@ -459,7 +459,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
71433 i = rp->rtort_pipe_count;
71434 if (i > RCU_TORTURE_PIPE_LEN)
71435 i = RCU_TORTURE_PIPE_LEN;
71436- atomic_inc(&rcu_torture_wcount[i]);
71437+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
71438 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
71439 rp->rtort_mbtest = 0;
71440 list_del(&rp->rtort_free);
71441@@ -1002,7 +1002,7 @@ rcu_torture_writer(void *arg)
71442 i = old_rp->rtort_pipe_count;
71443 if (i > RCU_TORTURE_PIPE_LEN)
71444 i = RCU_TORTURE_PIPE_LEN;
71445- atomic_inc(&rcu_torture_wcount[i]);
71446+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
71447 old_rp->rtort_pipe_count++;
71448 cur_ops->deferred_free(old_rp);
71449 }
71450@@ -1087,7 +1087,7 @@ static void rcu_torture_timer(unsigned long unused)
71451 }
71452 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);
71453 if (p->rtort_mbtest == 0)
71454- atomic_inc(&n_rcu_torture_mberror);
71455+ atomic_inc_unchecked(&n_rcu_torture_mberror);
71456 spin_lock(&rand_lock);
71457 cur_ops->read_delay(&rand);
71458 n_rcu_torture_timers++;
71459@@ -1151,7 +1151,7 @@ rcu_torture_reader(void *arg)
71460 }
71461 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);
71462 if (p->rtort_mbtest == 0)
71463- atomic_inc(&n_rcu_torture_mberror);
71464+ atomic_inc_unchecked(&n_rcu_torture_mberror);
71465 cur_ops->read_delay(&rand);
71466 preempt_disable();
71467 pipe_count = p->rtort_pipe_count;
71468@@ -1210,11 +1210,11 @@ rcu_torture_printk(char *page)
71469 rcu_torture_current,
71470 rcu_torture_current_version,
71471 list_empty(&rcu_torture_freelist),
71472- atomic_read(&n_rcu_torture_alloc),
71473- atomic_read(&n_rcu_torture_alloc_fail),
71474- atomic_read(&n_rcu_torture_free));
71475+ atomic_read_unchecked(&n_rcu_torture_alloc),
71476+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
71477+ atomic_read_unchecked(&n_rcu_torture_free));
71478 cnt += sprintf(&page[cnt], "rtmbe: %d rtbke: %ld rtbre: %ld ",
71479- atomic_read(&n_rcu_torture_mberror),
71480+ atomic_read_unchecked(&n_rcu_torture_mberror),
71481 n_rcu_torture_boost_ktrerror,
71482 n_rcu_torture_boost_rterror);
71483 cnt += sprintf(&page[cnt], "rtbf: %ld rtb: %ld nt: %ld ",
71484@@ -1233,14 +1233,14 @@ rcu_torture_printk(char *page)
71485 n_barrier_attempts,
71486 n_rcu_torture_barrier_error);
71487 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
71488- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
71489+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
71490 n_rcu_torture_barrier_error != 0 ||
71491 n_rcu_torture_boost_ktrerror != 0 ||
71492 n_rcu_torture_boost_rterror != 0 ||
71493 n_rcu_torture_boost_failure != 0 ||
71494 i > 1) {
71495 cnt += sprintf(&page[cnt], "!!! ");
71496- atomic_inc(&n_rcu_torture_error);
71497+ atomic_inc_unchecked(&n_rcu_torture_error);
71498 WARN_ON_ONCE(1);
71499 }
71500 cnt += sprintf(&page[cnt], "Reader Pipe: ");
71501@@ -1254,7 +1254,7 @@ rcu_torture_printk(char *page)
71502 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
71503 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
71504 cnt += sprintf(&page[cnt], " %d",
71505- atomic_read(&rcu_torture_wcount[i]));
71506+ atomic_read_unchecked(&rcu_torture_wcount[i]));
71507 }
71508 cnt += sprintf(&page[cnt], "\n");
71509 if (cur_ops->stats)
71510@@ -1938,7 +1938,7 @@ rcu_torture_cleanup(void)
71511
71512 if (cur_ops->cleanup)
71513 cur_ops->cleanup();
71514- if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
71515+ if (atomic_read_unchecked(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
71516 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
71517 else if (n_online_successes != n_online_attempts ||
71518 n_offline_successes != n_offline_attempts)
71519@@ -2007,18 +2007,18 @@ rcu_torture_init(void)
71520
71521 rcu_torture_current = NULL;
71522 rcu_torture_current_version = 0;
71523- atomic_set(&n_rcu_torture_alloc, 0);
71524- atomic_set(&n_rcu_torture_alloc_fail, 0);
71525- atomic_set(&n_rcu_torture_free, 0);
71526- atomic_set(&n_rcu_torture_mberror, 0);
71527- atomic_set(&n_rcu_torture_error, 0);
71528+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
71529+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
71530+ atomic_set_unchecked(&n_rcu_torture_free, 0);
71531+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
71532+ atomic_set_unchecked(&n_rcu_torture_error, 0);
71533 n_rcu_torture_barrier_error = 0;
71534 n_rcu_torture_boost_ktrerror = 0;
71535 n_rcu_torture_boost_rterror = 0;
71536 n_rcu_torture_boost_failure = 0;
71537 n_rcu_torture_boosts = 0;
71538 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
71539- atomic_set(&rcu_torture_wcount[i], 0);
71540+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
71541 for_each_possible_cpu(cpu) {
71542 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
71543 per_cpu(rcu_torture_count, cpu)[i] = 0;
71544diff --git a/kernel/rcutree.c b/kernel/rcutree.c
71545index 2682295..0f2297e 100644
71546--- a/kernel/rcutree.c
71547+++ b/kernel/rcutree.c
71548@@ -348,9 +348,9 @@ static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
71549 rcu_prepare_for_idle(smp_processor_id());
71550 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
71551 smp_mb__before_atomic_inc(); /* See above. */
71552- atomic_inc(&rdtp->dynticks);
71553+ atomic_inc_unchecked(&rdtp->dynticks);
71554 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
71555- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
71556+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
71557
71558 /*
71559 * It is illegal to enter an extended quiescent state while
71560@@ -508,10 +508,10 @@ static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval,
71561 int user)
71562 {
71563 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
71564- atomic_inc(&rdtp->dynticks);
71565+ atomic_inc_unchecked(&rdtp->dynticks);
71566 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
71567 smp_mb__after_atomic_inc(); /* See above. */
71568- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
71569+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
71570 rcu_cleanup_after_idle(smp_processor_id());
71571 trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting);
71572 if (!user && !is_idle_task(current)) {
71573@@ -670,14 +670,14 @@ void rcu_nmi_enter(void)
71574 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
71575
71576 if (rdtp->dynticks_nmi_nesting == 0 &&
71577- (atomic_read(&rdtp->dynticks) & 0x1))
71578+ (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
71579 return;
71580 rdtp->dynticks_nmi_nesting++;
71581 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
71582- atomic_inc(&rdtp->dynticks);
71583+ atomic_inc_unchecked(&rdtp->dynticks);
71584 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
71585 smp_mb__after_atomic_inc(); /* See above. */
71586- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
71587+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
71588 }
71589
71590 /**
71591@@ -696,9 +696,9 @@ void rcu_nmi_exit(void)
71592 return;
71593 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
71594 smp_mb__before_atomic_inc(); /* See above. */
71595- atomic_inc(&rdtp->dynticks);
71596+ atomic_inc_unchecked(&rdtp->dynticks);
71597 smp_mb__after_atomic_inc(); /* Force delay to next write. */
71598- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
71599+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
71600 }
71601
71602 /**
71603@@ -712,7 +712,7 @@ int rcu_is_cpu_idle(void)
71604 int ret;
71605
71606 preempt_disable();
71607- ret = (atomic_read(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
71608+ ret = (atomic_read_unchecked(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
71609 preempt_enable();
71610 return ret;
71611 }
71612@@ -795,7 +795,7 @@ int rcu_is_cpu_rrupt_from_idle(void)
71613 */
71614 static int dyntick_save_progress_counter(struct rcu_data *rdp)
71615 {
71616- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
71617+ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
71618 return (rdp->dynticks_snap & 0x1) == 0;
71619 }
71620
71621@@ -810,7 +810,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
71622 unsigned int curr;
71623 unsigned int snap;
71624
71625- curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
71626+ curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
71627 snap = (unsigned int)rdp->dynticks_snap;
71628
71629 /*
71630@@ -858,10 +858,10 @@ static int jiffies_till_stall_check(void)
71631 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
71632 */
71633 if (till_stall_check < 3) {
71634- ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
71635+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 3;
71636 till_stall_check = 3;
71637 } else if (till_stall_check > 300) {
71638- ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
71639+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 300;
71640 till_stall_check = 300;
71641 }
71642 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
71643@@ -1589,7 +1589,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
71644 rsp->qlen += rdp->qlen;
71645 rdp->n_cbs_orphaned += rdp->qlen;
71646 rdp->qlen_lazy = 0;
71647- ACCESS_ONCE(rdp->qlen) = 0;
71648+ ACCESS_ONCE_RW(rdp->qlen) = 0;
71649 }
71650
71651 /*
71652@@ -1831,7 +1831,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
71653 }
71654 smp_mb(); /* List handling before counting for rcu_barrier(). */
71655 rdp->qlen_lazy -= count_lazy;
71656- ACCESS_ONCE(rdp->qlen) -= count;
71657+ ACCESS_ONCE_RW(rdp->qlen) -= count;
71658 rdp->n_cbs_invoked += count;
71659
71660 /* Reinstate batch limit if we have worked down the excess. */
71661@@ -2024,7 +2024,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
71662 /*
71663 * Do RCU core processing for the current CPU.
71664 */
71665-static void rcu_process_callbacks(struct softirq_action *unused)
71666+static void rcu_process_callbacks(void)
71667 {
71668 struct rcu_state *rsp;
71669
71670@@ -2136,7 +2136,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
71671 local_irq_restore(flags);
71672 return;
71673 }
71674- ACCESS_ONCE(rdp->qlen)++;
71675+ ACCESS_ONCE_RW(rdp->qlen)++;
71676 if (lazy)
71677 rdp->qlen_lazy++;
71678 else
71679@@ -2250,8 +2250,8 @@ void synchronize_rcu_bh(void)
71680 }
71681 EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
71682
71683-static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
71684-static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
71685+static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
71686+static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
71687
71688 static int synchronize_sched_expedited_cpu_stop(void *data)
71689 {
71690@@ -2312,7 +2312,7 @@ void synchronize_sched_expedited(void)
71691 int firstsnap, s, snap, trycount = 0;
71692
71693 /* Note that atomic_inc_return() implies full memory barrier. */
71694- firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
71695+ firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
71696 get_online_cpus();
71697 WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
71698
71699@@ -2334,7 +2334,7 @@ void synchronize_sched_expedited(void)
71700 }
71701
71702 /* Check to see if someone else did our work for us. */
71703- s = atomic_read(&sync_sched_expedited_done);
71704+ s = atomic_read_unchecked(&sync_sched_expedited_done);
71705 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
71706 smp_mb(); /* ensure test happens before caller kfree */
71707 return;
71708@@ -2349,7 +2349,7 @@ void synchronize_sched_expedited(void)
71709 * grace period works for us.
71710 */
71711 get_online_cpus();
71712- snap = atomic_read(&sync_sched_expedited_started);
71713+ snap = atomic_read_unchecked(&sync_sched_expedited_started);
71714 smp_mb(); /* ensure read is before try_stop_cpus(). */
71715 }
71716
71717@@ -2360,12 +2360,12 @@ void synchronize_sched_expedited(void)
71718 * than we did beat us to the punch.
71719 */
71720 do {
71721- s = atomic_read(&sync_sched_expedited_done);
71722+ s = atomic_read_unchecked(&sync_sched_expedited_done);
71723 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
71724 smp_mb(); /* ensure test happens before caller kfree */
71725 break;
71726 }
71727- } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
71728+ } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
71729
71730 put_online_cpus();
71731 }
71732@@ -2539,7 +2539,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
71733 * ACCESS_ONCE() to prevent the compiler from speculating
71734 * the increment to precede the early-exit check.
71735 */
71736- ACCESS_ONCE(rsp->n_barrier_done)++;
71737+ ACCESS_ONCE_RW(rsp->n_barrier_done)++;
71738 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
71739 _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
71740 smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
71741@@ -2581,7 +2581,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
71742
71743 /* Increment ->n_barrier_done to prevent duplicate work. */
71744 smp_mb(); /* Keep increment after above mechanism. */
71745- ACCESS_ONCE(rsp->n_barrier_done)++;
71746+ ACCESS_ONCE_RW(rsp->n_barrier_done)++;
71747 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
71748 _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
71749 smp_mb(); /* Keep increment before caller's subsequent code. */
71750@@ -2626,10 +2626,10 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
71751 rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
71752 init_callback_list(rdp);
71753 rdp->qlen_lazy = 0;
71754- ACCESS_ONCE(rdp->qlen) = 0;
71755+ ACCESS_ONCE_RW(rdp->qlen) = 0;
71756 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
71757 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
71758- WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
71759+ WARN_ON_ONCE(atomic_read_unchecked(&rdp->dynticks->dynticks) != 1);
71760 #ifdef CONFIG_RCU_USER_QS
71761 WARN_ON_ONCE(rdp->dynticks->in_user);
71762 #endif
71763@@ -2664,8 +2664,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
71764 rdp->blimit = blimit;
71765 init_callback_list(rdp); /* Re-enable callbacks on this CPU. */
71766 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
71767- atomic_set(&rdp->dynticks->dynticks,
71768- (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
71769+ atomic_set_unchecked(&rdp->dynticks->dynticks,
71770+ (atomic_read_unchecked(&rdp->dynticks->dynticks) & ~0x1) + 1);
71771 rcu_prepare_for_idle_init(cpu);
71772 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
71773
71774diff --git a/kernel/rcutree.h b/kernel/rcutree.h
71775index a240f03..d469618 100644
71776--- a/kernel/rcutree.h
71777+++ b/kernel/rcutree.h
71778@@ -86,7 +86,7 @@ struct rcu_dynticks {
71779 long long dynticks_nesting; /* Track irq/process nesting level. */
71780 /* Process level is worth LLONG_MAX/2. */
71781 int dynticks_nmi_nesting; /* Track NMI nesting level. */
71782- atomic_t dynticks; /* Even value for idle, else odd. */
71783+ atomic_unchecked_t dynticks;/* Even value for idle, else odd. */
71784 #ifdef CONFIG_RCU_FAST_NO_HZ
71785 int dyntick_drain; /* Prepare-for-idle state variable. */
71786 unsigned long dyntick_holdoff;
71787diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
71788index f921154..34c4873 100644
71789--- a/kernel/rcutree_plugin.h
71790+++ b/kernel/rcutree_plugin.h
71791@@ -865,7 +865,7 @@ void synchronize_rcu_expedited(void)
71792
71793 /* Clean up and exit. */
71794 smp_mb(); /* ensure expedited GP seen before counter increment. */
71795- ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
71796+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
71797 unlock_mb_ret:
71798 mutex_unlock(&sync_rcu_preempt_exp_mutex);
71799 mb_ret:
71800@@ -2040,7 +2040,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
71801 print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
71802 printk(KERN_ERR "\t%d: (%lu %s) idle=%03x/%llx/%d %s\n",
71803 cpu, ticks_value, ticks_title,
71804- atomic_read(&rdtp->dynticks) & 0xfff,
71805+ atomic_read_unchecked(&rdtp->dynticks) & 0xfff,
71806 rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
71807 fast_no_hz);
71808 }
71809diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
71810index 693513b..b9f1d63 100644
71811--- a/kernel/rcutree_trace.c
71812+++ b/kernel/rcutree_trace.c
71813@@ -92,7 +92,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
71814 rdp->completed, rdp->gpnum,
71815 rdp->passed_quiesce, rdp->qs_pending);
71816 seq_printf(m, " dt=%d/%llx/%d df=%lu",
71817- atomic_read(&rdp->dynticks->dynticks),
71818+ atomic_read_unchecked(&rdp->dynticks->dynticks),
71819 rdp->dynticks->dynticks_nesting,
71820 rdp->dynticks->dynticks_nmi_nesting,
71821 rdp->dynticks_fqs);
71822@@ -154,7 +154,7 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
71823 rdp->completed, rdp->gpnum,
71824 rdp->passed_quiesce, rdp->qs_pending);
71825 seq_printf(m, ",%d,%llx,%d,%lu",
71826- atomic_read(&rdp->dynticks->dynticks),
71827+ atomic_read_unchecked(&rdp->dynticks->dynticks),
71828 rdp->dynticks->dynticks_nesting,
71829 rdp->dynticks->dynticks_nmi_nesting,
71830 rdp->dynticks_fqs);
71831diff --git a/kernel/resource.c b/kernel/resource.c
71832index 73f35d4..4684fc4 100644
71833--- a/kernel/resource.c
71834+++ b/kernel/resource.c
71835@@ -143,8 +143,18 @@ static const struct file_operations proc_iomem_operations = {
71836
71837 static int __init ioresources_init(void)
71838 {
71839+#ifdef CONFIG_GRKERNSEC_PROC_ADD
71840+#ifdef CONFIG_GRKERNSEC_PROC_USER
71841+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
71842+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
71843+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
71844+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
71845+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
71846+#endif
71847+#else
71848 proc_create("ioports", 0, NULL, &proc_ioports_operations);
71849 proc_create("iomem", 0, NULL, &proc_iomem_operations);
71850+#endif
71851 return 0;
71852 }
71853 __initcall(ioresources_init);
71854diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
71855index 98ec494..4241d6d 100644
71856--- a/kernel/rtmutex-tester.c
71857+++ b/kernel/rtmutex-tester.c
71858@@ -20,7 +20,7 @@
71859 #define MAX_RT_TEST_MUTEXES 8
71860
71861 static spinlock_t rttest_lock;
71862-static atomic_t rttest_event;
71863+static atomic_unchecked_t rttest_event;
71864
71865 struct test_thread_data {
71866 int opcode;
71867@@ -61,7 +61,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
71868
71869 case RTTEST_LOCKCONT:
71870 td->mutexes[td->opdata] = 1;
71871- td->event = atomic_add_return(1, &rttest_event);
71872+ td->event = atomic_add_return_unchecked(1, &rttest_event);
71873 return 0;
71874
71875 case RTTEST_RESET:
71876@@ -74,7 +74,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
71877 return 0;
71878
71879 case RTTEST_RESETEVENT:
71880- atomic_set(&rttest_event, 0);
71881+ atomic_set_unchecked(&rttest_event, 0);
71882 return 0;
71883
71884 default:
71885@@ -91,9 +91,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
71886 return ret;
71887
71888 td->mutexes[id] = 1;
71889- td->event = atomic_add_return(1, &rttest_event);
71890+ td->event = atomic_add_return_unchecked(1, &rttest_event);
71891 rt_mutex_lock(&mutexes[id]);
71892- td->event = atomic_add_return(1, &rttest_event);
71893+ td->event = atomic_add_return_unchecked(1, &rttest_event);
71894 td->mutexes[id] = 4;
71895 return 0;
71896
71897@@ -104,9 +104,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
71898 return ret;
71899
71900 td->mutexes[id] = 1;
71901- td->event = atomic_add_return(1, &rttest_event);
71902+ td->event = atomic_add_return_unchecked(1, &rttest_event);
71903 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
71904- td->event = atomic_add_return(1, &rttest_event);
71905+ td->event = atomic_add_return_unchecked(1, &rttest_event);
71906 td->mutexes[id] = ret ? 0 : 4;
71907 return ret ? -EINTR : 0;
71908
71909@@ -115,9 +115,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
71910 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
71911 return ret;
71912
71913- td->event = atomic_add_return(1, &rttest_event);
71914+ td->event = atomic_add_return_unchecked(1, &rttest_event);
71915 rt_mutex_unlock(&mutexes[id]);
71916- td->event = atomic_add_return(1, &rttest_event);
71917+ td->event = atomic_add_return_unchecked(1, &rttest_event);
71918 td->mutexes[id] = 0;
71919 return 0;
71920
71921@@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
71922 break;
71923
71924 td->mutexes[dat] = 2;
71925- td->event = atomic_add_return(1, &rttest_event);
71926+ td->event = atomic_add_return_unchecked(1, &rttest_event);
71927 break;
71928
71929 default:
71930@@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
71931 return;
71932
71933 td->mutexes[dat] = 3;
71934- td->event = atomic_add_return(1, &rttest_event);
71935+ td->event = atomic_add_return_unchecked(1, &rttest_event);
71936 break;
71937
71938 case RTTEST_LOCKNOWAIT:
71939@@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
71940 return;
71941
71942 td->mutexes[dat] = 1;
71943- td->event = atomic_add_return(1, &rttest_event);
71944+ td->event = atomic_add_return_unchecked(1, &rttest_event);
71945 return;
71946
71947 default:
71948diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
71949index 15f60d0..7e50319 100644
71950--- a/kernel/sched/auto_group.c
71951+++ b/kernel/sched/auto_group.c
71952@@ -11,7 +11,7 @@
71953
71954 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
71955 static struct autogroup autogroup_default;
71956-static atomic_t autogroup_seq_nr;
71957+static atomic_unchecked_t autogroup_seq_nr;
71958
71959 void __init autogroup_init(struct task_struct *init_task)
71960 {
71961@@ -78,7 +78,7 @@ static inline struct autogroup *autogroup_create(void)
71962
71963 kref_init(&ag->kref);
71964 init_rwsem(&ag->lock);
71965- ag->id = atomic_inc_return(&autogroup_seq_nr);
71966+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
71967 ag->tg = tg;
71968 #ifdef CONFIG_RT_GROUP_SCHED
71969 /*
71970diff --git a/kernel/sched/core.c b/kernel/sched/core.c
71971index c529d00..d00b4f3 100644
71972--- a/kernel/sched/core.c
71973+++ b/kernel/sched/core.c
71974@@ -3563,6 +3563,8 @@ int can_nice(const struct task_struct *p, const int nice)
71975 /* convert nice value [19,-20] to rlimit style value [1,40] */
71976 int nice_rlim = 20 - nice;
71977
71978+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
71979+
71980 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
71981 capable(CAP_SYS_NICE));
71982 }
71983@@ -3596,7 +3598,8 @@ SYSCALL_DEFINE1(nice, int, increment)
71984 if (nice > 19)
71985 nice = 19;
71986
71987- if (increment < 0 && !can_nice(current, nice))
71988+ if (increment < 0 && (!can_nice(current, nice) ||
71989+ gr_handle_chroot_nice()))
71990 return -EPERM;
71991
71992 retval = security_task_setnice(current, nice);
71993@@ -3750,6 +3753,7 @@ recheck:
71994 unsigned long rlim_rtprio =
71995 task_rlimit(p, RLIMIT_RTPRIO);
71996
71997+ gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
71998 /* can't set/change the rt policy */
71999 if (policy != p->policy && !rlim_rtprio)
72000 return -EPERM;
72001diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
72002index 6b800a1..0c36227 100644
72003--- a/kernel/sched/fair.c
72004+++ b/kernel/sched/fair.c
72005@@ -4890,7 +4890,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
72006 * run_rebalance_domains is triggered when needed from the scheduler tick.
72007 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
72008 */
72009-static void run_rebalance_domains(struct softirq_action *h)
72010+static void run_rebalance_domains(void)
72011 {
72012 int this_cpu = smp_processor_id();
72013 struct rq *this_rq = cpu_rq(this_cpu);
72014diff --git a/kernel/signal.c b/kernel/signal.c
72015index 57dde52..2c561f0 100644
72016--- a/kernel/signal.c
72017+++ b/kernel/signal.c
72018@@ -49,12 +49,12 @@ static struct kmem_cache *sigqueue_cachep;
72019
72020 int print_fatal_signals __read_mostly;
72021
72022-static void __user *sig_handler(struct task_struct *t, int sig)
72023+static __sighandler_t sig_handler(struct task_struct *t, int sig)
72024 {
72025 return t->sighand->action[sig - 1].sa.sa_handler;
72026 }
72027
72028-static int sig_handler_ignored(void __user *handler, int sig)
72029+static int sig_handler_ignored(__sighandler_t handler, int sig)
72030 {
72031 /* Is it explicitly or implicitly ignored? */
72032 return handler == SIG_IGN ||
72033@@ -63,7 +63,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
72034
72035 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
72036 {
72037- void __user *handler;
72038+ __sighandler_t handler;
72039
72040 handler = sig_handler(t, sig);
72041
72042@@ -367,6 +367,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
72043 atomic_inc(&user->sigpending);
72044 rcu_read_unlock();
72045
72046+ if (!override_rlimit)
72047+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
72048+
72049 if (override_rlimit ||
72050 atomic_read(&user->sigpending) <=
72051 task_rlimit(t, RLIMIT_SIGPENDING)) {
72052@@ -491,7 +494,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
72053
72054 int unhandled_signal(struct task_struct *tsk, int sig)
72055 {
72056- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
72057+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
72058 if (is_global_init(tsk))
72059 return 1;
72060 if (handler != SIG_IGN && handler != SIG_DFL)
72061@@ -811,6 +814,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
72062 }
72063 }
72064
72065+ /* allow glibc communication via tgkill to other threads in our
72066+ thread group */
72067+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
72068+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
72069+ && gr_handle_signal(t, sig))
72070+ return -EPERM;
72071+
72072 return security_task_kill(t, info, sig, 0);
72073 }
72074
72075@@ -1192,7 +1202,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
72076 return send_signal(sig, info, p, 1);
72077 }
72078
72079-static int
72080+int
72081 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
72082 {
72083 return send_signal(sig, info, t, 0);
72084@@ -1229,6 +1239,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
72085 unsigned long int flags;
72086 int ret, blocked, ignored;
72087 struct k_sigaction *action;
72088+ int is_unhandled = 0;
72089
72090 spin_lock_irqsave(&t->sighand->siglock, flags);
72091 action = &t->sighand->action[sig-1];
72092@@ -1243,9 +1254,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
72093 }
72094 if (action->sa.sa_handler == SIG_DFL)
72095 t->signal->flags &= ~SIGNAL_UNKILLABLE;
72096+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
72097+ is_unhandled = 1;
72098 ret = specific_send_sig_info(sig, info, t);
72099 spin_unlock_irqrestore(&t->sighand->siglock, flags);
72100
72101+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
72102+ normal operation */
72103+ if (is_unhandled) {
72104+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
72105+ gr_handle_crash(t, sig);
72106+ }
72107+
72108 return ret;
72109 }
72110
72111@@ -1312,8 +1332,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
72112 ret = check_kill_permission(sig, info, p);
72113 rcu_read_unlock();
72114
72115- if (!ret && sig)
72116+ if (!ret && sig) {
72117 ret = do_send_sig_info(sig, info, p, true);
72118+ if (!ret)
72119+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
72120+ }
72121
72122 return ret;
72123 }
72124@@ -2863,7 +2886,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
72125 int error = -ESRCH;
72126
72127 rcu_read_lock();
72128- p = find_task_by_vpid(pid);
72129+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
72130+ /* allow glibc communication via tgkill to other threads in our
72131+ thread group */
72132+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
72133+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
72134+ p = find_task_by_vpid_unrestricted(pid);
72135+ else
72136+#endif
72137+ p = find_task_by_vpid(pid);
72138 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
72139 error = check_kill_permission(sig, info, p);
72140 /*
72141diff --git a/kernel/softirq.c b/kernel/softirq.c
72142index cc96bdc..6a96894 100644
72143--- a/kernel/softirq.c
72144+++ b/kernel/softirq.c
72145@@ -53,11 +53,11 @@ irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
72146 EXPORT_SYMBOL(irq_stat);
72147 #endif
72148
72149-static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
72150+static struct softirq_action softirq_vec[NR_SOFTIRQS] __read_only __aligned(PAGE_SIZE);
72151
72152 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
72153
72154-char *softirq_to_name[NR_SOFTIRQS] = {
72155+const char * const softirq_to_name[NR_SOFTIRQS] = {
72156 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
72157 "TASKLET", "SCHED", "HRTIMER", "RCU"
72158 };
72159@@ -244,7 +244,7 @@ restart:
72160 kstat_incr_softirqs_this_cpu(vec_nr);
72161
72162 trace_softirq_entry(vec_nr);
72163- h->action(h);
72164+ h->action();
72165 trace_softirq_exit(vec_nr);
72166 if (unlikely(prev_count != preempt_count())) {
72167 printk(KERN_ERR "huh, entered softirq %u %s %p"
72168@@ -391,7 +391,7 @@ void __raise_softirq_irqoff(unsigned int nr)
72169 or_softirq_pending(1UL << nr);
72170 }
72171
72172-void open_softirq(int nr, void (*action)(struct softirq_action *))
72173+void __init open_softirq(int nr, void (*action)(void))
72174 {
72175 softirq_vec[nr].action = action;
72176 }
72177@@ -447,7 +447,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
72178
72179 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
72180
72181-static void tasklet_action(struct softirq_action *a)
72182+static void tasklet_action(void)
72183 {
72184 struct tasklet_struct *list;
72185
72186@@ -482,7 +482,7 @@ static void tasklet_action(struct softirq_action *a)
72187 }
72188 }
72189
72190-static void tasklet_hi_action(struct softirq_action *a)
72191+static void tasklet_hi_action(void)
72192 {
72193 struct tasklet_struct *list;
72194
72195diff --git a/kernel/srcu.c b/kernel/srcu.c
72196index 97c465e..d83f3bb 100644
72197--- a/kernel/srcu.c
72198+++ b/kernel/srcu.c
72199@@ -302,9 +302,9 @@ int __srcu_read_lock(struct srcu_struct *sp)
72200 preempt_disable();
72201 idx = rcu_dereference_index_check(sp->completed,
72202 rcu_read_lock_sched_held()) & 0x1;
72203- ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
72204+ ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
72205 smp_mb(); /* B */ /* Avoid leaking the critical section. */
72206- ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
72207+ ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
72208 preempt_enable();
72209 return idx;
72210 }
72211@@ -320,7 +320,7 @@ void __srcu_read_unlock(struct srcu_struct *sp, int idx)
72212 {
72213 preempt_disable();
72214 smp_mb(); /* C */ /* Avoid leaking the critical section. */
72215- ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) -= 1;
72216+ ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) -= 1;
72217 preempt_enable();
72218 }
72219 EXPORT_SYMBOL_GPL(__srcu_read_unlock);
72220diff --git a/kernel/sys.c b/kernel/sys.c
72221index e6e0ece..1f2e413 100644
72222--- a/kernel/sys.c
72223+++ b/kernel/sys.c
72224@@ -157,6 +157,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
72225 error = -EACCES;
72226 goto out;
72227 }
72228+
72229+ if (gr_handle_chroot_setpriority(p, niceval)) {
72230+ error = -EACCES;
72231+ goto out;
72232+ }
72233+
72234 no_nice = security_task_setnice(p, niceval);
72235 if (no_nice) {
72236 error = no_nice;
72237@@ -595,6 +601,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
72238 goto error;
72239 }
72240
72241+ if (gr_check_group_change(new->gid, new->egid, -1))
72242+ goto error;
72243+
72244 if (rgid != (gid_t) -1 ||
72245 (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
72246 new->sgid = new->egid;
72247@@ -630,6 +639,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
72248 old = current_cred();
72249
72250 retval = -EPERM;
72251+
72252+ if (gr_check_group_change(kgid, kgid, kgid))
72253+ goto error;
72254+
72255 if (nsown_capable(CAP_SETGID))
72256 new->gid = new->egid = new->sgid = new->fsgid = kgid;
72257 else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
72258@@ -647,7 +660,7 @@ error:
72259 /*
72260 * change the user struct in a credentials set to match the new UID
72261 */
72262-static int set_user(struct cred *new)
72263+int set_user(struct cred *new)
72264 {
72265 struct user_struct *new_user;
72266
72267@@ -727,6 +740,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
72268 goto error;
72269 }
72270
72271+ if (gr_check_user_change(new->uid, new->euid, -1))
72272+ goto error;
72273+
72274 if (!uid_eq(new->uid, old->uid)) {
72275 retval = set_user(new);
72276 if (retval < 0)
72277@@ -777,6 +793,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
72278 old = current_cred();
72279
72280 retval = -EPERM;
72281+
72282+ if (gr_check_crash_uid(kuid))
72283+ goto error;
72284+ if (gr_check_user_change(kuid, kuid, kuid))
72285+ goto error;
72286+
72287 if (nsown_capable(CAP_SETUID)) {
72288 new->suid = new->uid = kuid;
72289 if (!uid_eq(kuid, old->uid)) {
72290@@ -846,6 +868,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
72291 goto error;
72292 }
72293
72294+ if (gr_check_user_change(kruid, keuid, -1))
72295+ goto error;
72296+
72297 if (ruid != (uid_t) -1) {
72298 new->uid = kruid;
72299 if (!uid_eq(kruid, old->uid)) {
72300@@ -928,6 +953,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
72301 goto error;
72302 }
72303
72304+ if (gr_check_group_change(krgid, kegid, -1))
72305+ goto error;
72306+
72307 if (rgid != (gid_t) -1)
72308 new->gid = krgid;
72309 if (egid != (gid_t) -1)
72310@@ -981,6 +1009,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
72311 if (!uid_valid(kuid))
72312 return old_fsuid;
72313
72314+ if (gr_check_user_change(-1, -1, kuid))
72315+ goto error;
72316+
72317 new = prepare_creds();
72318 if (!new)
72319 return old_fsuid;
72320@@ -995,6 +1026,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
72321 }
72322 }
72323
72324+error:
72325 abort_creds(new);
72326 return old_fsuid;
72327
72328@@ -1027,12 +1059,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
72329 if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) ||
72330 gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
72331 nsown_capable(CAP_SETGID)) {
72332+ if (gr_check_group_change(-1, -1, kgid))
72333+ goto error;
72334+
72335 if (!gid_eq(kgid, old->fsgid)) {
72336 new->fsgid = kgid;
72337 goto change_okay;
72338 }
72339 }
72340
72341+error:
72342 abort_creds(new);
72343 return old_fsgid;
72344
72345@@ -1340,19 +1376,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
72346 return -EFAULT;
72347
72348 down_read(&uts_sem);
72349- error = __copy_to_user(&name->sysname, &utsname()->sysname,
72350+ error = __copy_to_user(name->sysname, &utsname()->sysname,
72351 __OLD_UTS_LEN);
72352 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
72353- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
72354+ error |= __copy_to_user(name->nodename, &utsname()->nodename,
72355 __OLD_UTS_LEN);
72356 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
72357- error |= __copy_to_user(&name->release, &utsname()->release,
72358+ error |= __copy_to_user(name->release, &utsname()->release,
72359 __OLD_UTS_LEN);
72360 error |= __put_user(0, name->release + __OLD_UTS_LEN);
72361- error |= __copy_to_user(&name->version, &utsname()->version,
72362+ error |= __copy_to_user(name->version, &utsname()->version,
72363 __OLD_UTS_LEN);
72364 error |= __put_user(0, name->version + __OLD_UTS_LEN);
72365- error |= __copy_to_user(&name->machine, &utsname()->machine,
72366+ error |= __copy_to_user(name->machine, &utsname()->machine,
72367 __OLD_UTS_LEN);
72368 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
72369 up_read(&uts_sem);
72370@@ -2026,7 +2062,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
72371 error = get_dumpable(me->mm);
72372 break;
72373 case PR_SET_DUMPABLE:
72374- if (arg2 < 0 || arg2 > 1) {
72375+ if (arg2 > 1) {
72376 error = -EINVAL;
72377 break;
72378 }
72379diff --git a/kernel/sysctl.c b/kernel/sysctl.c
72380index 26f65ea..df8e5ad 100644
72381--- a/kernel/sysctl.c
72382+++ b/kernel/sysctl.c
72383@@ -92,7 +92,6 @@
72384
72385
72386 #if defined(CONFIG_SYSCTL)
72387-
72388 /* External variables not in a header file. */
72389 extern int sysctl_overcommit_memory;
72390 extern int sysctl_overcommit_ratio;
72391@@ -172,10 +171,8 @@ static int proc_taint(struct ctl_table *table, int write,
72392 void __user *buffer, size_t *lenp, loff_t *ppos);
72393 #endif
72394
72395-#ifdef CONFIG_PRINTK
72396 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
72397 void __user *buffer, size_t *lenp, loff_t *ppos);
72398-#endif
72399
72400 static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write,
72401 void __user *buffer, size_t *lenp, loff_t *ppos);
72402@@ -206,6 +203,8 @@ static int sysrq_sysctl_handler(ctl_table *table, int write,
72403
72404 #endif
72405
72406+extern struct ctl_table grsecurity_table[];
72407+
72408 static struct ctl_table kern_table[];
72409 static struct ctl_table vm_table[];
72410 static struct ctl_table fs_table[];
72411@@ -220,6 +219,20 @@ extern struct ctl_table epoll_table[];
72412 int sysctl_legacy_va_layout;
72413 #endif
72414
72415+#ifdef CONFIG_PAX_SOFTMODE
72416+static ctl_table pax_table[] = {
72417+ {
72418+ .procname = "softmode",
72419+ .data = &pax_softmode,
72420+ .maxlen = sizeof(unsigned int),
72421+ .mode = 0600,
72422+ .proc_handler = &proc_dointvec,
72423+ },
72424+
72425+ { }
72426+};
72427+#endif
72428+
72429 /* The default sysctl tables: */
72430
72431 static struct ctl_table sysctl_base_table[] = {
72432@@ -266,6 +279,22 @@ static int max_extfrag_threshold = 1000;
72433 #endif
72434
72435 static struct ctl_table kern_table[] = {
72436+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
72437+ {
72438+ .procname = "grsecurity",
72439+ .mode = 0500,
72440+ .child = grsecurity_table,
72441+ },
72442+#endif
72443+
72444+#ifdef CONFIG_PAX_SOFTMODE
72445+ {
72446+ .procname = "pax",
72447+ .mode = 0500,
72448+ .child = pax_table,
72449+ },
72450+#endif
72451+
72452 {
72453 .procname = "sched_child_runs_first",
72454 .data = &sysctl_sched_child_runs_first,
72455@@ -552,7 +581,7 @@ static struct ctl_table kern_table[] = {
72456 .data = &modprobe_path,
72457 .maxlen = KMOD_PATH_LEN,
72458 .mode = 0644,
72459- .proc_handler = proc_dostring,
72460+ .proc_handler = proc_dostring_modpriv,
72461 },
72462 {
72463 .procname = "modules_disabled",
72464@@ -719,16 +748,20 @@ static struct ctl_table kern_table[] = {
72465 .extra1 = &zero,
72466 .extra2 = &one,
72467 },
72468+#endif
72469 {
72470 .procname = "kptr_restrict",
72471 .data = &kptr_restrict,
72472 .maxlen = sizeof(int),
72473 .mode = 0644,
72474 .proc_handler = proc_dointvec_minmax_sysadmin,
72475+#ifdef CONFIG_GRKERNSEC_HIDESYM
72476+ .extra1 = &two,
72477+#else
72478 .extra1 = &zero,
72479+#endif
72480 .extra2 = &two,
72481 },
72482-#endif
72483 {
72484 .procname = "ngroups_max",
72485 .data = &ngroups_max,
72486@@ -1225,6 +1258,13 @@ static struct ctl_table vm_table[] = {
72487 .proc_handler = proc_dointvec_minmax,
72488 .extra1 = &zero,
72489 },
72490+ {
72491+ .procname = "heap_stack_gap",
72492+ .data = &sysctl_heap_stack_gap,
72493+ .maxlen = sizeof(sysctl_heap_stack_gap),
72494+ .mode = 0644,
72495+ .proc_handler = proc_doulongvec_minmax,
72496+ },
72497 #else
72498 {
72499 .procname = "nr_trim_pages",
72500@@ -1675,6 +1715,16 @@ int proc_dostring(struct ctl_table *table, int write,
72501 buffer, lenp, ppos);
72502 }
72503
72504+int proc_dostring_modpriv(struct ctl_table *table, int write,
72505+ void __user *buffer, size_t *lenp, loff_t *ppos)
72506+{
72507+ if (write && !capable(CAP_SYS_MODULE))
72508+ return -EPERM;
72509+
72510+ return _proc_do_string(table->data, table->maxlen, write,
72511+ buffer, lenp, ppos);
72512+}
72513+
72514 static size_t proc_skip_spaces(char **buf)
72515 {
72516 size_t ret;
72517@@ -1780,6 +1830,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
72518 len = strlen(tmp);
72519 if (len > *size)
72520 len = *size;
72521+ if (len > sizeof(tmp))
72522+ len = sizeof(tmp);
72523 if (copy_to_user(*buf, tmp, len))
72524 return -EFAULT;
72525 *size -= len;
72526@@ -1972,7 +2024,6 @@ static int proc_taint(struct ctl_table *table, int write,
72527 return err;
72528 }
72529
72530-#ifdef CONFIG_PRINTK
72531 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
72532 void __user *buffer, size_t *lenp, loff_t *ppos)
72533 {
72534@@ -1981,7 +2032,6 @@ static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
72535
72536 return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
72537 }
72538-#endif
72539
72540 struct do_proc_dointvec_minmax_conv_param {
72541 int *min;
72542@@ -2128,8 +2178,11 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
72543 *i = val;
72544 } else {
72545 val = convdiv * (*i) / convmul;
72546- if (!first)
72547+ if (!first) {
72548 err = proc_put_char(&buffer, &left, '\t');
72549+ if (err)
72550+ break;
72551+ }
72552 err = proc_put_long(&buffer, &left, val, false);
72553 if (err)
72554 break;
72555@@ -2521,6 +2574,12 @@ int proc_dostring(struct ctl_table *table, int write,
72556 return -ENOSYS;
72557 }
72558
72559+int proc_dostring_modpriv(struct ctl_table *table, int write,
72560+ void __user *buffer, size_t *lenp, loff_t *ppos)
72561+{
72562+ return -ENOSYS;
72563+}
72564+
72565 int proc_dointvec(struct ctl_table *table, int write,
72566 void __user *buffer, size_t *lenp, loff_t *ppos)
72567 {
72568@@ -2577,5 +2636,6 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
72569 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
72570 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
72571 EXPORT_SYMBOL(proc_dostring);
72572+EXPORT_SYMBOL(proc_dostring_modpriv);
72573 EXPORT_SYMBOL(proc_doulongvec_minmax);
72574 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
72575diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
72576index 65bdcf1..21eb831 100644
72577--- a/kernel/sysctl_binary.c
72578+++ b/kernel/sysctl_binary.c
72579@@ -989,7 +989,7 @@ static ssize_t bin_intvec(struct file *file,
72580 int i;
72581
72582 set_fs(KERNEL_DS);
72583- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
72584+ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
72585 set_fs(old_fs);
72586 if (result < 0)
72587 goto out_kfree;
72588@@ -1034,7 +1034,7 @@ static ssize_t bin_intvec(struct file *file,
72589 }
72590
72591 set_fs(KERNEL_DS);
72592- result = vfs_write(file, buffer, str - buffer, &pos);
72593+ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
72594 set_fs(old_fs);
72595 if (result < 0)
72596 goto out_kfree;
72597@@ -1067,7 +1067,7 @@ static ssize_t bin_ulongvec(struct file *file,
72598 int i;
72599
72600 set_fs(KERNEL_DS);
72601- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
72602+ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
72603 set_fs(old_fs);
72604 if (result < 0)
72605 goto out_kfree;
72606@@ -1112,7 +1112,7 @@ static ssize_t bin_ulongvec(struct file *file,
72607 }
72608
72609 set_fs(KERNEL_DS);
72610- result = vfs_write(file, buffer, str - buffer, &pos);
72611+ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
72612 set_fs(old_fs);
72613 if (result < 0)
72614 goto out_kfree;
72615@@ -1138,7 +1138,7 @@ static ssize_t bin_uuid(struct file *file,
72616 int i;
72617
72618 set_fs(KERNEL_DS);
72619- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
72620+ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
72621 set_fs(old_fs);
72622 if (result < 0)
72623 goto out;
72624@@ -1185,7 +1185,7 @@ static ssize_t bin_dn_node_address(struct file *file,
72625 __le16 dnaddr;
72626
72627 set_fs(KERNEL_DS);
72628- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
72629+ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
72630 set_fs(old_fs);
72631 if (result < 0)
72632 goto out;
72633@@ -1233,7 +1233,7 @@ static ssize_t bin_dn_node_address(struct file *file,
72634 le16_to_cpu(dnaddr) & 0x3ff);
72635
72636 set_fs(KERNEL_DS);
72637- result = vfs_write(file, buf, len, &pos);
72638+ result = vfs_write(file, (const char __force_user *)buf, len, &pos);
72639 set_fs(old_fs);
72640 if (result < 0)
72641 goto out;
72642diff --git a/kernel/taskstats.c b/kernel/taskstats.c
72643index 145bb4d..b2aa969 100644
72644--- a/kernel/taskstats.c
72645+++ b/kernel/taskstats.c
72646@@ -28,9 +28,12 @@
72647 #include <linux/fs.h>
72648 #include <linux/file.h>
72649 #include <linux/pid_namespace.h>
72650+#include <linux/grsecurity.h>
72651 #include <net/genetlink.h>
72652 #include <linux/atomic.h>
72653
72654+extern int gr_is_taskstats_denied(int pid);
72655+
72656 /*
72657 * Maximum length of a cpumask that can be specified in
72658 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
72659@@ -570,6 +573,9 @@ err:
72660
72661 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
72662 {
72663+ if (gr_is_taskstats_denied(current->pid))
72664+ return -EACCES;
72665+
72666 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
72667 return cmd_attr_register_cpumask(info);
72668 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
72669diff --git a/kernel/time.c b/kernel/time.c
72670index d226c6a..c7c0960 100644
72671--- a/kernel/time.c
72672+++ b/kernel/time.c
72673@@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
72674 return error;
72675
72676 if (tz) {
72677+ /* we log in do_settimeofday called below, so don't log twice
72678+ */
72679+ if (!tv)
72680+ gr_log_timechange();
72681+
72682 sys_tz = *tz;
72683 update_vsyscall_tz();
72684 if (firsttime) {
72685diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
72686index f11d83b..d016d91 100644
72687--- a/kernel/time/alarmtimer.c
72688+++ b/kernel/time/alarmtimer.c
72689@@ -750,7 +750,7 @@ static int __init alarmtimer_init(void)
72690 struct platform_device *pdev;
72691 int error = 0;
72692 int i;
72693- struct k_clock alarm_clock = {
72694+ static struct k_clock alarm_clock = {
72695 .clock_getres = alarm_clock_getres,
72696 .clock_get = alarm_clock_get,
72697 .timer_create = alarm_timer_create,
72698diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
72699index f113755..ec24223 100644
72700--- a/kernel/time/tick-broadcast.c
72701+++ b/kernel/time/tick-broadcast.c
72702@@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
72703 * then clear the broadcast bit.
72704 */
72705 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
72706- int cpu = smp_processor_id();
72707+ cpu = smp_processor_id();
72708
72709 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
72710 tick_broadcast_clear_oneshot(cpu);
72711diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
72712index e424970..4c7962b 100644
72713--- a/kernel/time/timekeeping.c
72714+++ b/kernel/time/timekeeping.c
72715@@ -15,6 +15,7 @@
72716 #include <linux/init.h>
72717 #include <linux/mm.h>
72718 #include <linux/sched.h>
72719+#include <linux/grsecurity.h>
72720 #include <linux/syscore_ops.h>
72721 #include <linux/clocksource.h>
72722 #include <linux/jiffies.h>
72723@@ -368,6 +369,8 @@ int do_settimeofday(const struct timespec *tv)
72724 if (!timespec_valid_strict(tv))
72725 return -EINVAL;
72726
72727+ gr_log_timechange();
72728+
72729 write_seqlock_irqsave(&tk->lock, flags);
72730
72731 timekeeping_forward_now(tk);
72732diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
72733index af5a7e9..715611a 100644
72734--- a/kernel/time/timer_list.c
72735+++ b/kernel/time/timer_list.c
72736@@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
72737
72738 static void print_name_offset(struct seq_file *m, void *sym)
72739 {
72740+#ifdef CONFIG_GRKERNSEC_HIDESYM
72741+ SEQ_printf(m, "<%p>", NULL);
72742+#else
72743 char symname[KSYM_NAME_LEN];
72744
72745 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
72746 SEQ_printf(m, "<%pK>", sym);
72747 else
72748 SEQ_printf(m, "%s", symname);
72749+#endif
72750 }
72751
72752 static void
72753@@ -112,7 +116,11 @@ next_one:
72754 static void
72755 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
72756 {
72757+#ifdef CONFIG_GRKERNSEC_HIDESYM
72758+ SEQ_printf(m, " .base: %p\n", NULL);
72759+#else
72760 SEQ_printf(m, " .base: %pK\n", base);
72761+#endif
72762 SEQ_printf(m, " .index: %d\n",
72763 base->index);
72764 SEQ_printf(m, " .resolution: %Lu nsecs\n",
72765@@ -293,7 +301,11 @@ static int __init init_timer_list_procfs(void)
72766 {
72767 struct proc_dir_entry *pe;
72768
72769+#ifdef CONFIG_GRKERNSEC_PROC_ADD
72770+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
72771+#else
72772 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
72773+#endif
72774 if (!pe)
72775 return -ENOMEM;
72776 return 0;
72777diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
72778index 0b537f2..40d6c20 100644
72779--- a/kernel/time/timer_stats.c
72780+++ b/kernel/time/timer_stats.c
72781@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
72782 static unsigned long nr_entries;
72783 static struct entry entries[MAX_ENTRIES];
72784
72785-static atomic_t overflow_count;
72786+static atomic_unchecked_t overflow_count;
72787
72788 /*
72789 * The entries are in a hash-table, for fast lookup:
72790@@ -140,7 +140,7 @@ static void reset_entries(void)
72791 nr_entries = 0;
72792 memset(entries, 0, sizeof(entries));
72793 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
72794- atomic_set(&overflow_count, 0);
72795+ atomic_set_unchecked(&overflow_count, 0);
72796 }
72797
72798 static struct entry *alloc_entry(void)
72799@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
72800 if (likely(entry))
72801 entry->count++;
72802 else
72803- atomic_inc(&overflow_count);
72804+ atomic_inc_unchecked(&overflow_count);
72805
72806 out_unlock:
72807 raw_spin_unlock_irqrestore(lock, flags);
72808@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
72809
72810 static void print_name_offset(struct seq_file *m, unsigned long addr)
72811 {
72812+#ifdef CONFIG_GRKERNSEC_HIDESYM
72813+ seq_printf(m, "<%p>", NULL);
72814+#else
72815 char symname[KSYM_NAME_LEN];
72816
72817 if (lookup_symbol_name(addr, symname) < 0)
72818- seq_printf(m, "<%p>", (void *)addr);
72819+ seq_printf(m, "<%pK>", (void *)addr);
72820 else
72821 seq_printf(m, "%s", symname);
72822+#endif
72823 }
72824
72825 static int tstats_show(struct seq_file *m, void *v)
72826@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
72827
72828 seq_puts(m, "Timer Stats Version: v0.2\n");
72829 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
72830- if (atomic_read(&overflow_count))
72831+ if (atomic_read_unchecked(&overflow_count))
72832 seq_printf(m, "Overflow: %d entries\n",
72833- atomic_read(&overflow_count));
72834+ atomic_read_unchecked(&overflow_count));
72835
72836 for (i = 0; i < nr_entries; i++) {
72837 entry = entries + i;
72838@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
72839 {
72840 struct proc_dir_entry *pe;
72841
72842+#ifdef CONFIG_GRKERNSEC_PROC_ADD
72843+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
72844+#else
72845 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
72846+#endif
72847 if (!pe)
72848 return -ENOMEM;
72849 return 0;
72850diff --git a/kernel/timer.c b/kernel/timer.c
72851index 367d008..46857a0 100644
72852--- a/kernel/timer.c
72853+++ b/kernel/timer.c
72854@@ -1363,7 +1363,7 @@ void update_process_times(int user_tick)
72855 /*
72856 * This function runs timers and the timer-tq in bottom half context.
72857 */
72858-static void run_timer_softirq(struct softirq_action *h)
72859+static void run_timer_softirq(void)
72860 {
72861 struct tvec_base *base = __this_cpu_read(tvec_bases);
72862
72863@@ -1772,7 +1772,7 @@ static int __cpuinit timer_cpu_notify(struct notifier_block *self,
72864 return NOTIFY_OK;
72865 }
72866
72867-static struct notifier_block __cpuinitdata timers_nb = {
72868+static struct notifier_block __cpuinitconst timers_nb = {
72869 .notifier_call = timer_cpu_notify,
72870 };
72871
72872diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
72873index c0bd030..62a1927 100644
72874--- a/kernel/trace/blktrace.c
72875+++ b/kernel/trace/blktrace.c
72876@@ -317,7 +317,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
72877 struct blk_trace *bt = filp->private_data;
72878 char buf[16];
72879
72880- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
72881+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
72882
72883 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
72884 }
72885@@ -375,7 +375,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
72886 return 1;
72887
72888 bt = buf->chan->private_data;
72889- atomic_inc(&bt->dropped);
72890+ atomic_inc_unchecked(&bt->dropped);
72891 return 0;
72892 }
72893
72894@@ -476,7 +476,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
72895
72896 bt->dir = dir;
72897 bt->dev = dev;
72898- atomic_set(&bt->dropped, 0);
72899+ atomic_set_unchecked(&bt->dropped, 0);
72900
72901 ret = -EIO;
72902 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
72903diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
72904index 356bc2f..7c94fc0 100644
72905--- a/kernel/trace/ftrace.c
72906+++ b/kernel/trace/ftrace.c
72907@@ -1874,12 +1874,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
72908 if (unlikely(ftrace_disabled))
72909 return 0;
72910
72911+ ret = ftrace_arch_code_modify_prepare();
72912+ FTRACE_WARN_ON(ret);
72913+ if (ret)
72914+ return 0;
72915+
72916 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
72917+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
72918 if (ret) {
72919 ftrace_bug(ret, ip);
72920- return 0;
72921 }
72922- return 1;
72923+ return ret ? 0 : 1;
72924 }
72925
72926 /*
72927@@ -2965,7 +2970,7 @@ static void ftrace_free_entry_rcu(struct rcu_head *rhp)
72928
72929 int
72930 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
72931- void *data)
72932+ void *data)
72933 {
72934 struct ftrace_func_probe *entry;
72935 struct ftrace_page *pg;
72936@@ -3832,8 +3837,10 @@ static int ftrace_process_locs(struct module *mod,
72937 if (!count)
72938 return 0;
72939
72940+ pax_open_kernel();
72941 sort(start, count, sizeof(*start),
72942 ftrace_cmp_ips, ftrace_swap_ips);
72943+ pax_close_kernel();
72944
72945 start_pg = ftrace_allocate_pages(count);
72946 if (!start_pg)
72947@@ -4541,8 +4548,6 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
72948 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
72949
72950 static int ftrace_graph_active;
72951-static struct notifier_block ftrace_suspend_notifier;
72952-
72953 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
72954 {
72955 return 0;
72956@@ -4686,6 +4691,10 @@ ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
72957 return NOTIFY_DONE;
72958 }
72959
72960+static struct notifier_block ftrace_suspend_notifier = {
72961+ .notifier_call = ftrace_suspend_notifier_call
72962+};
72963+
72964 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
72965 trace_func_graph_ent_t entryfunc)
72966 {
72967@@ -4699,7 +4708,6 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
72968 goto out;
72969 }
72970
72971- ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
72972 register_pm_notifier(&ftrace_suspend_notifier);
72973
72974 ftrace_graph_active++;
72975diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
72976index 4cb5e51..e7e05d9 100644
72977--- a/kernel/trace/ring_buffer.c
72978+++ b/kernel/trace/ring_buffer.c
72979@@ -346,9 +346,9 @@ struct buffer_data_page {
72980 */
72981 struct buffer_page {
72982 struct list_head list; /* list of buffer pages */
72983- local_t write; /* index for next write */
72984+ local_unchecked_t write; /* index for next write */
72985 unsigned read; /* index for next read */
72986- local_t entries; /* entries on this page */
72987+ local_unchecked_t entries; /* entries on this page */
72988 unsigned long real_end; /* real end of data */
72989 struct buffer_data_page *page; /* Actual data page */
72990 };
72991@@ -460,8 +460,8 @@ struct ring_buffer_per_cpu {
72992 unsigned long lost_events;
72993 unsigned long last_overrun;
72994 local_t entries_bytes;
72995- local_t commit_overrun;
72996- local_t overrun;
72997+ local_unchecked_t commit_overrun;
72998+ local_unchecked_t overrun;
72999 local_t entries;
73000 local_t committing;
73001 local_t commits;
73002@@ -860,8 +860,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
73003 *
73004 * We add a counter to the write field to denote this.
73005 */
73006- old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
73007- old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
73008+ old_write = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->write);
73009+ old_entries = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->entries);
73010
73011 /*
73012 * Just make sure we have seen our old_write and synchronize
73013@@ -889,8 +889,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
73014 * cmpxchg to only update if an interrupt did not already
73015 * do it for us. If the cmpxchg fails, we don't care.
73016 */
73017- (void)local_cmpxchg(&next_page->write, old_write, val);
73018- (void)local_cmpxchg(&next_page->entries, old_entries, eval);
73019+ (void)local_cmpxchg_unchecked(&next_page->write, old_write, val);
73020+ (void)local_cmpxchg_unchecked(&next_page->entries, old_entries, eval);
73021
73022 /*
73023 * No need to worry about races with clearing out the commit.
73024@@ -1249,12 +1249,12 @@ static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
73025
73026 static inline unsigned long rb_page_entries(struct buffer_page *bpage)
73027 {
73028- return local_read(&bpage->entries) & RB_WRITE_MASK;
73029+ return local_read_unchecked(&bpage->entries) & RB_WRITE_MASK;
73030 }
73031
73032 static inline unsigned long rb_page_write(struct buffer_page *bpage)
73033 {
73034- return local_read(&bpage->write) & RB_WRITE_MASK;
73035+ return local_read_unchecked(&bpage->write) & RB_WRITE_MASK;
73036 }
73037
73038 static int
73039@@ -1349,7 +1349,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
73040 * bytes consumed in ring buffer from here.
73041 * Increment overrun to account for the lost events.
73042 */
73043- local_add(page_entries, &cpu_buffer->overrun);
73044+ local_add_unchecked(page_entries, &cpu_buffer->overrun);
73045 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
73046 }
73047
73048@@ -1905,7 +1905,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
73049 * it is our responsibility to update
73050 * the counters.
73051 */
73052- local_add(entries, &cpu_buffer->overrun);
73053+ local_add_unchecked(entries, &cpu_buffer->overrun);
73054 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
73055
73056 /*
73057@@ -2055,7 +2055,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
73058 if (tail == BUF_PAGE_SIZE)
73059 tail_page->real_end = 0;
73060
73061- local_sub(length, &tail_page->write);
73062+ local_sub_unchecked(length, &tail_page->write);
73063 return;
73064 }
73065
73066@@ -2090,7 +2090,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
73067 rb_event_set_padding(event);
73068
73069 /* Set the write back to the previous setting */
73070- local_sub(length, &tail_page->write);
73071+ local_sub_unchecked(length, &tail_page->write);
73072 return;
73073 }
73074
73075@@ -2102,7 +2102,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
73076
73077 /* Set write to end of buffer */
73078 length = (tail + length) - BUF_PAGE_SIZE;
73079- local_sub(length, &tail_page->write);
73080+ local_sub_unchecked(length, &tail_page->write);
73081 }
73082
73083 /*
73084@@ -2128,7 +2128,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
73085 * about it.
73086 */
73087 if (unlikely(next_page == commit_page)) {
73088- local_inc(&cpu_buffer->commit_overrun);
73089+ local_inc_unchecked(&cpu_buffer->commit_overrun);
73090 goto out_reset;
73091 }
73092
73093@@ -2182,7 +2182,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
73094 cpu_buffer->tail_page) &&
73095 (cpu_buffer->commit_page ==
73096 cpu_buffer->reader_page))) {
73097- local_inc(&cpu_buffer->commit_overrun);
73098+ local_inc_unchecked(&cpu_buffer->commit_overrun);
73099 goto out_reset;
73100 }
73101 }
73102@@ -2230,7 +2230,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
73103 length += RB_LEN_TIME_EXTEND;
73104
73105 tail_page = cpu_buffer->tail_page;
73106- write = local_add_return(length, &tail_page->write);
73107+ write = local_add_return_unchecked(length, &tail_page->write);
73108
73109 /* set write to only the index of the write */
73110 write &= RB_WRITE_MASK;
73111@@ -2247,7 +2247,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
73112 kmemcheck_annotate_bitfield(event, bitfield);
73113 rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
73114
73115- local_inc(&tail_page->entries);
73116+ local_inc_unchecked(&tail_page->entries);
73117
73118 /*
73119 * If this is the first commit on the page, then update
73120@@ -2280,7 +2280,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
73121
73122 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
73123 unsigned long write_mask =
73124- local_read(&bpage->write) & ~RB_WRITE_MASK;
73125+ local_read_unchecked(&bpage->write) & ~RB_WRITE_MASK;
73126 unsigned long event_length = rb_event_length(event);
73127 /*
73128 * This is on the tail page. It is possible that
73129@@ -2290,7 +2290,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
73130 */
73131 old_index += write_mask;
73132 new_index += write_mask;
73133- index = local_cmpxchg(&bpage->write, old_index, new_index);
73134+ index = local_cmpxchg_unchecked(&bpage->write, old_index, new_index);
73135 if (index == old_index) {
73136 /* update counters */
73137 local_sub(event_length, &cpu_buffer->entries_bytes);
73138@@ -2629,7 +2629,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
73139
73140 /* Do the likely case first */
73141 if (likely(bpage->page == (void *)addr)) {
73142- local_dec(&bpage->entries);
73143+ local_dec_unchecked(&bpage->entries);
73144 return;
73145 }
73146
73147@@ -2641,7 +2641,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
73148 start = bpage;
73149 do {
73150 if (bpage->page == (void *)addr) {
73151- local_dec(&bpage->entries);
73152+ local_dec_unchecked(&bpage->entries);
73153 return;
73154 }
73155 rb_inc_page(cpu_buffer, &bpage);
73156@@ -2923,7 +2923,7 @@ static inline unsigned long
73157 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
73158 {
73159 return local_read(&cpu_buffer->entries) -
73160- (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
73161+ (local_read_unchecked(&cpu_buffer->overrun) + cpu_buffer->read);
73162 }
73163
73164 /**
73165@@ -3011,7 +3011,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
73166 return 0;
73167
73168 cpu_buffer = buffer->buffers[cpu];
73169- ret = local_read(&cpu_buffer->overrun);
73170+ ret = local_read_unchecked(&cpu_buffer->overrun);
73171
73172 return ret;
73173 }
73174@@ -3032,7 +3032,7 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
73175 return 0;
73176
73177 cpu_buffer = buffer->buffers[cpu];
73178- ret = local_read(&cpu_buffer->commit_overrun);
73179+ ret = local_read_unchecked(&cpu_buffer->commit_overrun);
73180
73181 return ret;
73182 }
73183@@ -3077,7 +3077,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
73184 /* if you care about this being correct, lock the buffer */
73185 for_each_buffer_cpu(buffer, cpu) {
73186 cpu_buffer = buffer->buffers[cpu];
73187- overruns += local_read(&cpu_buffer->overrun);
73188+ overruns += local_read_unchecked(&cpu_buffer->overrun);
73189 }
73190
73191 return overruns;
73192@@ -3253,8 +3253,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
73193 /*
73194 * Reset the reader page to size zero.
73195 */
73196- local_set(&cpu_buffer->reader_page->write, 0);
73197- local_set(&cpu_buffer->reader_page->entries, 0);
73198+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
73199+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
73200 local_set(&cpu_buffer->reader_page->page->commit, 0);
73201 cpu_buffer->reader_page->real_end = 0;
73202
73203@@ -3288,7 +3288,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
73204 * want to compare with the last_overrun.
73205 */
73206 smp_mb();
73207- overwrite = local_read(&(cpu_buffer->overrun));
73208+ overwrite = local_read_unchecked(&(cpu_buffer->overrun));
73209
73210 /*
73211 * Here's the tricky part.
73212@@ -3858,8 +3858,8 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
73213
73214 cpu_buffer->head_page
73215 = list_entry(cpu_buffer->pages, struct buffer_page, list);
73216- local_set(&cpu_buffer->head_page->write, 0);
73217- local_set(&cpu_buffer->head_page->entries, 0);
73218+ local_set_unchecked(&cpu_buffer->head_page->write, 0);
73219+ local_set_unchecked(&cpu_buffer->head_page->entries, 0);
73220 local_set(&cpu_buffer->head_page->page->commit, 0);
73221
73222 cpu_buffer->head_page->read = 0;
73223@@ -3869,14 +3869,14 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
73224
73225 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
73226 INIT_LIST_HEAD(&cpu_buffer->new_pages);
73227- local_set(&cpu_buffer->reader_page->write, 0);
73228- local_set(&cpu_buffer->reader_page->entries, 0);
73229+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
73230+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
73231 local_set(&cpu_buffer->reader_page->page->commit, 0);
73232 cpu_buffer->reader_page->read = 0;
73233
73234- local_set(&cpu_buffer->commit_overrun, 0);
73235+ local_set_unchecked(&cpu_buffer->commit_overrun, 0);
73236 local_set(&cpu_buffer->entries_bytes, 0);
73237- local_set(&cpu_buffer->overrun, 0);
73238+ local_set_unchecked(&cpu_buffer->overrun, 0);
73239 local_set(&cpu_buffer->entries, 0);
73240 local_set(&cpu_buffer->committing, 0);
73241 local_set(&cpu_buffer->commits, 0);
73242@@ -4279,8 +4279,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
73243 rb_init_page(bpage);
73244 bpage = reader->page;
73245 reader->page = *data_page;
73246- local_set(&reader->write, 0);
73247- local_set(&reader->entries, 0);
73248+ local_set_unchecked(&reader->write, 0);
73249+ local_set_unchecked(&reader->entries, 0);
73250 reader->read = 0;
73251 *data_page = bpage;
73252
73253diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
73254index 31e4f55..62da00f 100644
73255--- a/kernel/trace/trace.c
73256+++ b/kernel/trace/trace.c
73257@@ -4436,10 +4436,9 @@ static const struct file_operations tracing_dyn_info_fops = {
73258 };
73259 #endif
73260
73261-static struct dentry *d_tracer;
73262-
73263 struct dentry *tracing_init_dentry(void)
73264 {
73265+ static struct dentry *d_tracer;
73266 static int once;
73267
73268 if (d_tracer)
73269@@ -4459,10 +4458,9 @@ struct dentry *tracing_init_dentry(void)
73270 return d_tracer;
73271 }
73272
73273-static struct dentry *d_percpu;
73274-
73275 struct dentry *tracing_dentry_percpu(void)
73276 {
73277+ static struct dentry *d_percpu;
73278 static int once;
73279 struct dentry *d_tracer;
73280
73281diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
73282index d608d09..bd3801f 100644
73283--- a/kernel/trace/trace_events.c
73284+++ b/kernel/trace/trace_events.c
73285@@ -1320,10 +1320,6 @@ static LIST_HEAD(ftrace_module_file_list);
73286 struct ftrace_module_file_ops {
73287 struct list_head list;
73288 struct module *mod;
73289- struct file_operations id;
73290- struct file_operations enable;
73291- struct file_operations format;
73292- struct file_operations filter;
73293 };
73294
73295 static struct ftrace_module_file_ops *
73296@@ -1344,17 +1340,12 @@ trace_create_file_ops(struct module *mod)
73297
73298 file_ops->mod = mod;
73299
73300- file_ops->id = ftrace_event_id_fops;
73301- file_ops->id.owner = mod;
73302-
73303- file_ops->enable = ftrace_enable_fops;
73304- file_ops->enable.owner = mod;
73305-
73306- file_ops->filter = ftrace_event_filter_fops;
73307- file_ops->filter.owner = mod;
73308-
73309- file_ops->format = ftrace_event_format_fops;
73310- file_ops->format.owner = mod;
73311+ pax_open_kernel();
73312+ mod->trace_id.owner = mod;
73313+ mod->trace_enable.owner = mod;
73314+ mod->trace_filter.owner = mod;
73315+ mod->trace_format.owner = mod;
73316+ pax_close_kernel();
73317
73318 list_add(&file_ops->list, &ftrace_module_file_list);
73319
73320@@ -1378,8 +1369,8 @@ static void trace_module_add_events(struct module *mod)
73321
73322 for_each_event(call, start, end) {
73323 __trace_add_event_call(*call, mod,
73324- &file_ops->id, &file_ops->enable,
73325- &file_ops->filter, &file_ops->format);
73326+ &mod->trace_id, &mod->trace_enable,
73327+ &mod->trace_filter, &mod->trace_format);
73328 }
73329 }
73330
73331diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
73332index fd3c8aa..5f324a6 100644
73333--- a/kernel/trace/trace_mmiotrace.c
73334+++ b/kernel/trace/trace_mmiotrace.c
73335@@ -24,7 +24,7 @@ struct header_iter {
73336 static struct trace_array *mmio_trace_array;
73337 static bool overrun_detected;
73338 static unsigned long prev_overruns;
73339-static atomic_t dropped_count;
73340+static atomic_unchecked_t dropped_count;
73341
73342 static void mmio_reset_data(struct trace_array *tr)
73343 {
73344@@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
73345
73346 static unsigned long count_overruns(struct trace_iterator *iter)
73347 {
73348- unsigned long cnt = atomic_xchg(&dropped_count, 0);
73349+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
73350 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
73351
73352 if (over > prev_overruns)
73353@@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
73354 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
73355 sizeof(*entry), 0, pc);
73356 if (!event) {
73357- atomic_inc(&dropped_count);
73358+ atomic_inc_unchecked(&dropped_count);
73359 return;
73360 }
73361 entry = ring_buffer_event_data(event);
73362@@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
73363 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
73364 sizeof(*entry), 0, pc);
73365 if (!event) {
73366- atomic_inc(&dropped_count);
73367+ atomic_inc_unchecked(&dropped_count);
73368 return;
73369 }
73370 entry = ring_buffer_event_data(event);
73371diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
73372index 123b189..1e9e2a6 100644
73373--- a/kernel/trace/trace_output.c
73374+++ b/kernel/trace/trace_output.c
73375@@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s, const struct path *path)
73376
73377 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
73378 if (!IS_ERR(p)) {
73379- p = mangle_path(s->buffer + s->len, p, "\n");
73380+ p = mangle_path(s->buffer + s->len, p, "\n\\");
73381 if (p) {
73382 s->len = p - s->buffer;
73383 return 1;
73384@@ -824,14 +824,16 @@ int register_ftrace_event(struct trace_event *event)
73385 goto out;
73386 }
73387
73388+ pax_open_kernel();
73389 if (event->funcs->trace == NULL)
73390- event->funcs->trace = trace_nop_print;
73391+ *(void **)&event->funcs->trace = trace_nop_print;
73392 if (event->funcs->raw == NULL)
73393- event->funcs->raw = trace_nop_print;
73394+ *(void **)&event->funcs->raw = trace_nop_print;
73395 if (event->funcs->hex == NULL)
73396- event->funcs->hex = trace_nop_print;
73397+ *(void **)&event->funcs->hex = trace_nop_print;
73398 if (event->funcs->binary == NULL)
73399- event->funcs->binary = trace_nop_print;
73400+ *(void **)&event->funcs->binary = trace_nop_print;
73401+ pax_close_kernel();
73402
73403 key = event->type & (EVENT_HASHSIZE - 1);
73404
73405diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
73406index 0c1b1657..95337e9 100644
73407--- a/kernel/trace/trace_stack.c
73408+++ b/kernel/trace/trace_stack.c
73409@@ -53,7 +53,7 @@ static inline void check_stack(void)
73410 return;
73411
73412 /* we do not handle interrupt stacks yet */
73413- if (!object_is_on_stack(&this_size))
73414+ if (!object_starts_on_stack(&this_size))
73415 return;
73416
73417 local_irq_save(flags);
73418diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
73419index 28e9d6c9..50381bd 100644
73420--- a/lib/Kconfig.debug
73421+++ b/lib/Kconfig.debug
73422@@ -1278,6 +1278,7 @@ config LATENCYTOP
73423 depends on DEBUG_KERNEL
73424 depends on STACKTRACE_SUPPORT
73425 depends on PROC_FS
73426+ depends on !GRKERNSEC_HIDESYM
73427 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND
73428 select KALLSYMS
73429 select KALLSYMS_ALL
73430@@ -1306,7 +1307,7 @@ config INTERVAL_TREE_TEST
73431
73432 config PROVIDE_OHCI1394_DMA_INIT
73433 bool "Remote debugging over FireWire early on boot"
73434- depends on PCI && X86
73435+ depends on PCI && X86 && !GRKERNSEC
73436 help
73437 If you want to debug problems which hang or crash the kernel early
73438 on boot and the crashing machine has a FireWire port, you can use
73439@@ -1335,7 +1336,7 @@ config PROVIDE_OHCI1394_DMA_INIT
73440
73441 config FIREWIRE_OHCI_REMOTE_DMA
73442 bool "Remote debugging over FireWire with firewire-ohci"
73443- depends on FIREWIRE_OHCI
73444+ depends on FIREWIRE_OHCI && !GRKERNSEC
73445 help
73446 This option lets you use the FireWire bus for remote debugging
73447 with help of the firewire-ohci driver. It enables unfiltered
73448diff --git a/lib/Makefile b/lib/Makefile
73449index a08b791..a3ff1eb 100644
73450--- a/lib/Makefile
73451+++ b/lib/Makefile
73452@@ -46,7 +46,7 @@ obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
73453
73454 obj-$(CONFIG_BTREE) += btree.o
73455 obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
73456-obj-$(CONFIG_DEBUG_LIST) += list_debug.o
73457+obj-y += list_debug.o
73458 obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
73459
73460 ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
73461diff --git a/lib/bitmap.c b/lib/bitmap.c
73462index 06fdfa1..97c5c7d 100644
73463--- a/lib/bitmap.c
73464+++ b/lib/bitmap.c
73465@@ -422,7 +422,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
73466 {
73467 int c, old_c, totaldigits, ndigits, nchunks, nbits;
73468 u32 chunk;
73469- const char __user __force *ubuf = (const char __user __force *)buf;
73470+ const char __user *ubuf = (const char __force_user *)buf;
73471
73472 bitmap_zero(maskp, nmaskbits);
73473
73474@@ -507,7 +507,7 @@ int bitmap_parse_user(const char __user *ubuf,
73475 {
73476 if (!access_ok(VERIFY_READ, ubuf, ulen))
73477 return -EFAULT;
73478- return __bitmap_parse((const char __force *)ubuf,
73479+ return __bitmap_parse((const char __force_kernel *)ubuf,
73480 ulen, 1, maskp, nmaskbits);
73481
73482 }
73483@@ -598,7 +598,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
73484 {
73485 unsigned a, b;
73486 int c, old_c, totaldigits;
73487- const char __user __force *ubuf = (const char __user __force *)buf;
73488+ const char __user *ubuf = (const char __force_user *)buf;
73489 int exp_digit, in_range;
73490
73491 totaldigits = c = 0;
73492@@ -698,7 +698,7 @@ int bitmap_parselist_user(const char __user *ubuf,
73493 {
73494 if (!access_ok(VERIFY_READ, ubuf, ulen))
73495 return -EFAULT;
73496- return __bitmap_parselist((const char __force *)ubuf,
73497+ return __bitmap_parselist((const char __force_kernel *)ubuf,
73498 ulen, 1, maskp, nmaskbits);
73499 }
73500 EXPORT_SYMBOL(bitmap_parselist_user);
73501diff --git a/lib/bug.c b/lib/bug.c
73502index d0cdf14..4d07bd2 100644
73503--- a/lib/bug.c
73504+++ b/lib/bug.c
73505@@ -134,6 +134,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
73506 return BUG_TRAP_TYPE_NONE;
73507
73508 bug = find_bug(bugaddr);
73509+ if (!bug)
73510+ return BUG_TRAP_TYPE_NONE;
73511
73512 file = NULL;
73513 line = 0;
73514diff --git a/lib/debugobjects.c b/lib/debugobjects.c
73515index d11808c..dc2d6f8 100644
73516--- a/lib/debugobjects.c
73517+++ b/lib/debugobjects.c
73518@@ -287,7 +287,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
73519 if (limit > 4)
73520 return;
73521
73522- is_on_stack = object_is_on_stack(addr);
73523+ is_on_stack = object_starts_on_stack(addr);
73524 if (is_on_stack == onstack)
73525 return;
73526
73527diff --git a/lib/devres.c b/lib/devres.c
73528index 80b9c76..9e32279 100644
73529--- a/lib/devres.c
73530+++ b/lib/devres.c
73531@@ -80,7 +80,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
73532 void devm_iounmap(struct device *dev, void __iomem *addr)
73533 {
73534 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
73535- (void *)addr));
73536+ (void __force *)addr));
73537 iounmap(addr);
73538 }
73539 EXPORT_SYMBOL(devm_iounmap);
73540@@ -192,7 +192,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
73541 {
73542 ioport_unmap(addr);
73543 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
73544- devm_ioport_map_match, (void *)addr));
73545+ devm_ioport_map_match, (void __force *)addr));
73546 }
73547 EXPORT_SYMBOL(devm_ioport_unmap);
73548
73549diff --git a/lib/dma-debug.c b/lib/dma-debug.c
73550index d84beb9..da44791 100644
73551--- a/lib/dma-debug.c
73552+++ b/lib/dma-debug.c
73553@@ -754,7 +754,7 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti
73554
73555 void dma_debug_add_bus(struct bus_type *bus)
73556 {
73557- struct notifier_block *nb;
73558+ notifier_block_no_const *nb;
73559
73560 if (global_disable)
73561 return;
73562@@ -919,7 +919,7 @@ out:
73563
73564 static void check_for_stack(struct device *dev, void *addr)
73565 {
73566- if (object_is_on_stack(addr))
73567+ if (object_starts_on_stack(addr))
73568 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
73569 "stack [addr=%p]\n", addr);
73570 }
73571diff --git a/lib/inflate.c b/lib/inflate.c
73572index 013a761..c28f3fc 100644
73573--- a/lib/inflate.c
73574+++ b/lib/inflate.c
73575@@ -269,7 +269,7 @@ static void free(void *where)
73576 malloc_ptr = free_mem_ptr;
73577 }
73578 #else
73579-#define malloc(a) kmalloc(a, GFP_KERNEL)
73580+#define malloc(a) kmalloc((a), GFP_KERNEL)
73581 #define free(a) kfree(a)
73582 #endif
73583
73584diff --git a/lib/ioremap.c b/lib/ioremap.c
73585index 0c9216c..863bd89 100644
73586--- a/lib/ioremap.c
73587+++ b/lib/ioremap.c
73588@@ -38,7 +38,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
73589 unsigned long next;
73590
73591 phys_addr -= addr;
73592- pmd = pmd_alloc(&init_mm, pud, addr);
73593+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
73594 if (!pmd)
73595 return -ENOMEM;
73596 do {
73597@@ -56,7 +56,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
73598 unsigned long next;
73599
73600 phys_addr -= addr;
73601- pud = pud_alloc(&init_mm, pgd, addr);
73602+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
73603 if (!pud)
73604 return -ENOMEM;
73605 do {
73606diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
73607index bd2bea9..6b3c95e 100644
73608--- a/lib/is_single_threaded.c
73609+++ b/lib/is_single_threaded.c
73610@@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
73611 struct task_struct *p, *t;
73612 bool ret;
73613
73614+ if (!mm)
73615+ return true;
73616+
73617 if (atomic_read(&task->signal->live) != 1)
73618 return false;
73619
73620diff --git a/lib/list_debug.c b/lib/list_debug.c
73621index c24c2f7..3fc5da0 100644
73622--- a/lib/list_debug.c
73623+++ b/lib/list_debug.c
73624@@ -11,7 +11,9 @@
73625 #include <linux/bug.h>
73626 #include <linux/kernel.h>
73627 #include <linux/rculist.h>
73628+#include <linux/mm.h>
73629
73630+#ifdef CONFIG_DEBUG_LIST
73631 /*
73632 * Insert a new entry between two known consecutive entries.
73633 *
73634@@ -19,21 +21,32 @@
73635 * the prev/next entries already!
73636 */
73637
73638-void __list_add(struct list_head *new,
73639- struct list_head *prev,
73640- struct list_head *next)
73641+static bool __list_add_debug(struct list_head *new,
73642+ struct list_head *prev,
73643+ struct list_head *next)
73644 {
73645- WARN(next->prev != prev,
73646+ if (WARN(next->prev != prev,
73647 "list_add corruption. next->prev should be "
73648 "prev (%p), but was %p. (next=%p).\n",
73649- prev, next->prev, next);
73650- WARN(prev->next != next,
73651+ prev, next->prev, next) ||
73652+ WARN(prev->next != next,
73653 "list_add corruption. prev->next should be "
73654 "next (%p), but was %p. (prev=%p).\n",
73655- next, prev->next, prev);
73656- WARN(new == prev || new == next,
73657+ next, prev->next, prev) ||
73658+ WARN(new == prev || new == next,
73659 "list_add double add: new=%p, prev=%p, next=%p.\n",
73660- new, prev, next);
73661+ new, prev, next))
73662+ return false;
73663+ return true;
73664+}
73665+
73666+void __list_add(struct list_head *new,
73667+ struct list_head *prev,
73668+ struct list_head *next)
73669+{
73670+ if (!__list_add_debug(new, prev, next))
73671+ return;
73672+
73673 next->prev = new;
73674 new->next = next;
73675 new->prev = prev;
73676@@ -41,7 +54,7 @@ void __list_add(struct list_head *new,
73677 }
73678 EXPORT_SYMBOL(__list_add);
73679
73680-void __list_del_entry(struct list_head *entry)
73681+static bool __list_del_entry_debug(struct list_head *entry)
73682 {
73683 struct list_head *prev, *next;
73684
73685@@ -60,9 +73,16 @@ void __list_del_entry(struct list_head *entry)
73686 WARN(next->prev != entry,
73687 "list_del corruption. next->prev should be %p, "
73688 "but was %p\n", entry, next->prev))
73689+ return false;
73690+ return true;
73691+}
73692+
73693+void __list_del_entry(struct list_head *entry)
73694+{
73695+ if (!__list_del_entry_debug(entry))
73696 return;
73697
73698- __list_del(prev, next);
73699+ __list_del(entry->prev, entry->next);
73700 }
73701 EXPORT_SYMBOL(__list_del_entry);
73702
73703@@ -86,15 +106,54 @@ EXPORT_SYMBOL(list_del);
73704 void __list_add_rcu(struct list_head *new,
73705 struct list_head *prev, struct list_head *next)
73706 {
73707- WARN(next->prev != prev,
73708+ if (WARN(next->prev != prev,
73709 "list_add_rcu corruption. next->prev should be prev (%p), but was %p. (next=%p).\n",
73710- prev, next->prev, next);
73711- WARN(prev->next != next,
73712+ prev, next->prev, next) ||
73713+ WARN(prev->next != next,
73714 "list_add_rcu corruption. prev->next should be next (%p), but was %p. (prev=%p).\n",
73715- next, prev->next, prev);
73716+ next, prev->next, prev))
73717+ return;
73718+
73719 new->next = next;
73720 new->prev = prev;
73721 rcu_assign_pointer(list_next_rcu(prev), new);
73722 next->prev = new;
73723 }
73724 EXPORT_SYMBOL(__list_add_rcu);
73725+#endif
73726+
73727+void pax_list_add_tail(struct list_head *new, struct list_head *head)
73728+{
73729+ struct list_head *prev, *next;
73730+
73731+ prev = head->prev;
73732+ next = head;
73733+
73734+#ifdef CONFIG_DEBUG_LIST
73735+ if (!__list_add_debug(new, prev, next))
73736+ return;
73737+#endif
73738+
73739+ pax_open_kernel();
73740+ next->prev = new;
73741+ new->next = next;
73742+ new->prev = prev;
73743+ prev->next = new;
73744+ pax_close_kernel();
73745+}
73746+EXPORT_SYMBOL(pax_list_add_tail);
73747+
73748+void pax_list_del(struct list_head *entry)
73749+{
73750+#ifdef CONFIG_DEBUG_LIST
73751+ if (!__list_del_entry_debug(entry))
73752+ return;
73753+#endif
73754+
73755+ pax_open_kernel();
73756+ __list_del(entry->prev, entry->next);
73757+ entry->next = LIST_POISON1;
73758+ entry->prev = LIST_POISON2;
73759+ pax_close_kernel();
73760+}
73761+EXPORT_SYMBOL(pax_list_del);
73762diff --git a/lib/radix-tree.c b/lib/radix-tree.c
73763index e796429..6e38f9f 100644
73764--- a/lib/radix-tree.c
73765+++ b/lib/radix-tree.c
73766@@ -92,7 +92,7 @@ struct radix_tree_preload {
73767 int nr;
73768 struct radix_tree_node *nodes[RADIX_TREE_PRELOAD_SIZE];
73769 };
73770-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
73771+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
73772
73773 static inline void *ptr_to_indirect(void *ptr)
73774 {
73775diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
73776index bb2b201..46abaf9 100644
73777--- a/lib/strncpy_from_user.c
73778+++ b/lib/strncpy_from_user.c
73779@@ -21,7 +21,7 @@
73780 */
73781 static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max)
73782 {
73783- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
73784+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
73785 long res = 0;
73786
73787 /*
73788diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
73789index a28df52..3d55877 100644
73790--- a/lib/strnlen_user.c
73791+++ b/lib/strnlen_user.c
73792@@ -26,7 +26,7 @@
73793 */
73794 static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max)
73795 {
73796- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
73797+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
73798 long align, res = 0;
73799 unsigned long c;
73800
73801diff --git a/lib/vsprintf.c b/lib/vsprintf.c
73802index 39c99fe..18f060b 100644
73803--- a/lib/vsprintf.c
73804+++ b/lib/vsprintf.c
73805@@ -16,6 +16,9 @@
73806 * - scnprintf and vscnprintf
73807 */
73808
73809+#ifdef CONFIG_GRKERNSEC_HIDESYM
73810+#define __INCLUDED_BY_HIDESYM 1
73811+#endif
73812 #include <stdarg.h>
73813 #include <linux/module.h> /* for KSYM_SYMBOL_LEN */
73814 #include <linux/types.h>
73815@@ -533,7 +536,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
73816 char sym[KSYM_SYMBOL_LEN];
73817 if (ext == 'B')
73818 sprint_backtrace(sym, value);
73819- else if (ext != 'f' && ext != 's')
73820+ else if (ext != 'f' && ext != 's' && ext != 'a')
73821 sprint_symbol(sym, value);
73822 else
73823 sprint_symbol_no_offset(sym, value);
73824@@ -966,7 +969,11 @@ char *netdev_feature_string(char *buf, char *end, const u8 *addr,
73825 return number(buf, end, *(const netdev_features_t *)addr, spec);
73826 }
73827
73828+#ifdef CONFIG_GRKERNSEC_HIDESYM
73829+int kptr_restrict __read_mostly = 2;
73830+#else
73831 int kptr_restrict __read_mostly;
73832+#endif
73833
73834 /*
73835 * Show a '%p' thing. A kernel extension is that the '%p' is followed
73836@@ -980,6 +987,8 @@ int kptr_restrict __read_mostly;
73837 * - 'S' For symbolic direct pointers with offset
73838 * - 's' For symbolic direct pointers without offset
73839 * - 'B' For backtraced symbolic direct pointers with offset
73840+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
73841+ * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
73842 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
73843 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
73844 * - 'M' For a 6-byte MAC address, it prints the address in the
73845@@ -1035,12 +1044,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
73846
73847 if (!ptr && *fmt != 'K') {
73848 /*
73849- * Print (null) with the same width as a pointer so it makes
73850+ * Print (nil) with the same width as a pointer so it makes
73851 * tabular output look nice.
73852 */
73853 if (spec.field_width == -1)
73854 spec.field_width = default_width;
73855- return string(buf, end, "(null)", spec);
73856+ return string(buf, end, "(nil)", spec);
73857 }
73858
73859 switch (*fmt) {
73860@@ -1050,6 +1059,13 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
73861 /* Fallthrough */
73862 case 'S':
73863 case 's':
73864+#ifdef CONFIG_GRKERNSEC_HIDESYM
73865+ break;
73866+#else
73867+ return symbol_string(buf, end, ptr, spec, *fmt);
73868+#endif
73869+ case 'A':
73870+ case 'a':
73871 case 'B':
73872 return symbol_string(buf, end, ptr, spec, *fmt);
73873 case 'R':
73874@@ -1090,6 +1106,8 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
73875 va_end(va);
73876 return buf;
73877 }
73878+ case 'P':
73879+ break;
73880 case 'K':
73881 /*
73882 * %pK cannot be used in IRQ context because its test
73883@@ -1113,6 +1131,21 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
73884 }
73885 break;
73886 }
73887+
73888+#ifdef CONFIG_GRKERNSEC_HIDESYM
73889+ /* 'P' = approved pointers to copy to userland,
73890+ as in the /proc/kallsyms case, as we make it display nothing
73891+ for non-root users, and the real contents for root users
73892+ Also ignore 'K' pointers, since we force their NULLing for non-root users
73893+ above
73894+ */
73895+ if ((unsigned long)ptr > TASK_SIZE && *fmt != 'P' && *fmt != 'K' && is_usercopy_object(buf)) {
73896+ printk(KERN_ALERT "grsec: kernel infoleak detected! Please report this log to spender@grsecurity.net.\n");
73897+ dump_stack();
73898+ ptr = NULL;
73899+ }
73900+#endif
73901+
73902 spec.flags |= SMALL;
73903 if (spec.field_width == -1) {
73904 spec.field_width = default_width;
73905@@ -1831,11 +1864,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
73906 typeof(type) value; \
73907 if (sizeof(type) == 8) { \
73908 args = PTR_ALIGN(args, sizeof(u32)); \
73909- *(u32 *)&value = *(u32 *)args; \
73910- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
73911+ *(u32 *)&value = *(const u32 *)args; \
73912+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
73913 } else { \
73914 args = PTR_ALIGN(args, sizeof(type)); \
73915- value = *(typeof(type) *)args; \
73916+ value = *(const typeof(type) *)args; \
73917 } \
73918 args += sizeof(type); \
73919 value; \
73920@@ -1898,7 +1931,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
73921 case FORMAT_TYPE_STR: {
73922 const char *str_arg = args;
73923 args += strlen(str_arg) + 1;
73924- str = string(str, end, (char *)str_arg, spec);
73925+ str = string(str, end, str_arg, spec);
73926 break;
73927 }
73928
73929diff --git a/localversion-grsec b/localversion-grsec
73930new file mode 100644
73931index 0000000..7cd6065
73932--- /dev/null
73933+++ b/localversion-grsec
73934@@ -0,0 +1 @@
73935+-grsec
73936diff --git a/mm/Kconfig b/mm/Kconfig
73937index a3f8ddd..f31e92e 100644
73938--- a/mm/Kconfig
73939+++ b/mm/Kconfig
73940@@ -252,10 +252,10 @@ config KSM
73941 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
73942
73943 config DEFAULT_MMAP_MIN_ADDR
73944- int "Low address space to protect from user allocation"
73945+ int "Low address space to protect from user allocation"
73946 depends on MMU
73947- default 4096
73948- help
73949+ default 65536
73950+ help
73951 This is the portion of low virtual memory which should be protected
73952 from userspace allocation. Keeping a user from writing to low pages
73953 can help reduce the impact of kernel NULL pointer bugs.
73954@@ -286,7 +286,7 @@ config MEMORY_FAILURE
73955
73956 config HWPOISON_INJECT
73957 tristate "HWPoison pages injector"
73958- depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
73959+ depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS && !GRKERNSEC
73960 select PROC_PAGE_MONITOR
73961
73962 config NOMMU_INITIAL_TRIM_EXCESS
73963diff --git a/mm/filemap.c b/mm/filemap.c
73964index 83efee7..3f99381 100644
73965--- a/mm/filemap.c
73966+++ b/mm/filemap.c
73967@@ -1747,7 +1747,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
73968 struct address_space *mapping = file->f_mapping;
73969
73970 if (!mapping->a_ops->readpage)
73971- return -ENOEXEC;
73972+ return -ENODEV;
73973 file_accessed(file);
73974 vma->vm_ops = &generic_file_vm_ops;
73975 return 0;
73976@@ -2087,6 +2087,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
73977 *pos = i_size_read(inode);
73978
73979 if (limit != RLIM_INFINITY) {
73980+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
73981 if (*pos >= limit) {
73982 send_sig(SIGXFSZ, current, 0);
73983 return -EFBIG;
73984diff --git a/mm/fremap.c b/mm/fremap.c
73985index a0aaf0e..20325c3 100644
73986--- a/mm/fremap.c
73987+++ b/mm/fremap.c
73988@@ -157,6 +157,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
73989 retry:
73990 vma = find_vma(mm, start);
73991
73992+#ifdef CONFIG_PAX_SEGMEXEC
73993+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
73994+ goto out;
73995+#endif
73996+
73997 /*
73998 * Make sure the vma is shared, that it supports prefaulting,
73999 * and that the remapped range is valid and fully within
74000diff --git a/mm/highmem.c b/mm/highmem.c
74001index 09fc744..3936897 100644
74002--- a/mm/highmem.c
74003+++ b/mm/highmem.c
74004@@ -138,9 +138,10 @@ static void flush_all_zero_pkmaps(void)
74005 * So no dangers, even with speculative execution.
74006 */
74007 page = pte_page(pkmap_page_table[i]);
74008+ pax_open_kernel();
74009 pte_clear(&init_mm, (unsigned long)page_address(page),
74010 &pkmap_page_table[i]);
74011-
74012+ pax_close_kernel();
74013 set_page_address(page, NULL);
74014 need_flush = 1;
74015 }
74016@@ -199,9 +200,11 @@ start:
74017 }
74018 }
74019 vaddr = PKMAP_ADDR(last_pkmap_nr);
74020+
74021+ pax_open_kernel();
74022 set_pte_at(&init_mm, vaddr,
74023 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
74024-
74025+ pax_close_kernel();
74026 pkmap_count[last_pkmap_nr] = 1;
74027 set_page_address(page, (void *)vaddr);
74028
74029diff --git a/mm/huge_memory.c b/mm/huge_memory.c
74030index 40f17c3..c1cc011 100644
74031--- a/mm/huge_memory.c
74032+++ b/mm/huge_memory.c
74033@@ -710,7 +710,7 @@ out:
74034 * run pte_offset_map on the pmd, if an huge pmd could
74035 * materialize from under us from a different thread.
74036 */
74037- if (unlikely(__pte_alloc(mm, vma, pmd, address)))
74038+ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
74039 return VM_FAULT_OOM;
74040 /* if an huge pmd materialized from under us just retry later */
74041 if (unlikely(pmd_trans_huge(*pmd)))
74042diff --git a/mm/hugetlb.c b/mm/hugetlb.c
74043index b969ed4..10e3e37 100644
74044--- a/mm/hugetlb.c
74045+++ b/mm/hugetlb.c
74046@@ -2509,6 +2509,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
74047 return 1;
74048 }
74049
74050+#ifdef CONFIG_PAX_SEGMEXEC
74051+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
74052+{
74053+ struct mm_struct *mm = vma->vm_mm;
74054+ struct vm_area_struct *vma_m;
74055+ unsigned long address_m;
74056+ pte_t *ptep_m;
74057+
74058+ vma_m = pax_find_mirror_vma(vma);
74059+ if (!vma_m)
74060+ return;
74061+
74062+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
74063+ address_m = address + SEGMEXEC_TASK_SIZE;
74064+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
74065+ get_page(page_m);
74066+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
74067+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
74068+}
74069+#endif
74070+
74071 /*
74072 * Hugetlb_cow() should be called with page lock of the original hugepage held.
74073 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
74074@@ -2627,6 +2648,11 @@ retry_avoidcopy:
74075 make_huge_pte(vma, new_page, 1));
74076 page_remove_rmap(old_page);
74077 hugepage_add_new_anon_rmap(new_page, vma, address);
74078+
74079+#ifdef CONFIG_PAX_SEGMEXEC
74080+ pax_mirror_huge_pte(vma, address, new_page);
74081+#endif
74082+
74083 /* Make the old page be freed below */
74084 new_page = old_page;
74085 }
74086@@ -2786,6 +2812,10 @@ retry:
74087 && (vma->vm_flags & VM_SHARED)));
74088 set_huge_pte_at(mm, address, ptep, new_pte);
74089
74090+#ifdef CONFIG_PAX_SEGMEXEC
74091+ pax_mirror_huge_pte(vma, address, page);
74092+#endif
74093+
74094 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
74095 /* Optimization, do the COW without a second fault */
74096 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
74097@@ -2815,6 +2845,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
74098 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
74099 struct hstate *h = hstate_vma(vma);
74100
74101+#ifdef CONFIG_PAX_SEGMEXEC
74102+ struct vm_area_struct *vma_m;
74103+#endif
74104+
74105 address &= huge_page_mask(h);
74106
74107 ptep = huge_pte_offset(mm, address);
74108@@ -2828,6 +2862,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
74109 VM_FAULT_SET_HINDEX(hstate_index(h));
74110 }
74111
74112+#ifdef CONFIG_PAX_SEGMEXEC
74113+ vma_m = pax_find_mirror_vma(vma);
74114+ if (vma_m) {
74115+ unsigned long address_m;
74116+
74117+ if (vma->vm_start > vma_m->vm_start) {
74118+ address_m = address;
74119+ address -= SEGMEXEC_TASK_SIZE;
74120+ vma = vma_m;
74121+ h = hstate_vma(vma);
74122+ } else
74123+ address_m = address + SEGMEXEC_TASK_SIZE;
74124+
74125+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
74126+ return VM_FAULT_OOM;
74127+ address_m &= HPAGE_MASK;
74128+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
74129+ }
74130+#endif
74131+
74132 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
74133 if (!ptep)
74134 return VM_FAULT_OOM;
74135diff --git a/mm/internal.h b/mm/internal.h
74136index 3c5197d..08d0065 100644
74137--- a/mm/internal.h
74138+++ b/mm/internal.h
74139@@ -95,6 +95,7 @@ extern void putback_lru_page(struct page *page);
74140 * in mm/page_alloc.c
74141 */
74142 extern void __free_pages_bootmem(struct page *page, unsigned int order);
74143+extern void free_compound_page(struct page *page);
74144 extern void prep_compound_page(struct page *page, unsigned long order);
74145 #ifdef CONFIG_MEMORY_FAILURE
74146 extern bool is_free_buddy_page(struct page *page);
74147diff --git a/mm/kmemleak.c b/mm/kmemleak.c
74148index a217cc5..44b2b35 100644
74149--- a/mm/kmemleak.c
74150+++ b/mm/kmemleak.c
74151@@ -363,7 +363,7 @@ static void print_unreferenced(struct seq_file *seq,
74152
74153 for (i = 0; i < object->trace_len; i++) {
74154 void *ptr = (void *)object->trace[i];
74155- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
74156+ seq_printf(seq, " [<%pP>] %pA\n", ptr, ptr);
74157 }
74158 }
74159
74160@@ -1852,7 +1852,7 @@ static int __init kmemleak_late_init(void)
74161 return -ENOMEM;
74162 }
74163
74164- dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
74165+ dentry = debugfs_create_file("kmemleak", S_IRUSR, NULL, NULL,
74166 &kmemleak_fops);
74167 if (!dentry)
74168 pr_warning("Failed to create the debugfs kmemleak file\n");
74169diff --git a/mm/maccess.c b/mm/maccess.c
74170index d53adf9..03a24bf 100644
74171--- a/mm/maccess.c
74172+++ b/mm/maccess.c
74173@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
74174 set_fs(KERNEL_DS);
74175 pagefault_disable();
74176 ret = __copy_from_user_inatomic(dst,
74177- (__force const void __user *)src, size);
74178+ (const void __force_user *)src, size);
74179 pagefault_enable();
74180 set_fs(old_fs);
74181
74182@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
74183
74184 set_fs(KERNEL_DS);
74185 pagefault_disable();
74186- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
74187+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
74188 pagefault_enable();
74189 set_fs(old_fs);
74190
74191diff --git a/mm/madvise.c b/mm/madvise.c
74192index 03dfa5c..b032917 100644
74193--- a/mm/madvise.c
74194+++ b/mm/madvise.c
74195@@ -48,6 +48,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
74196 pgoff_t pgoff;
74197 unsigned long new_flags = vma->vm_flags;
74198
74199+#ifdef CONFIG_PAX_SEGMEXEC
74200+ struct vm_area_struct *vma_m;
74201+#endif
74202+
74203 switch (behavior) {
74204 case MADV_NORMAL:
74205 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
74206@@ -123,6 +127,13 @@ success:
74207 /*
74208 * vm_flags is protected by the mmap_sem held in write mode.
74209 */
74210+
74211+#ifdef CONFIG_PAX_SEGMEXEC
74212+ vma_m = pax_find_mirror_vma(vma);
74213+ if (vma_m)
74214+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
74215+#endif
74216+
74217 vma->vm_flags = new_flags;
74218
74219 out:
74220@@ -181,6 +192,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
74221 struct vm_area_struct ** prev,
74222 unsigned long start, unsigned long end)
74223 {
74224+
74225+#ifdef CONFIG_PAX_SEGMEXEC
74226+ struct vm_area_struct *vma_m;
74227+#endif
74228+
74229 *prev = vma;
74230 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
74231 return -EINVAL;
74232@@ -193,6 +209,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
74233 zap_page_range(vma, start, end - start, &details);
74234 } else
74235 zap_page_range(vma, start, end - start, NULL);
74236+
74237+#ifdef CONFIG_PAX_SEGMEXEC
74238+ vma_m = pax_find_mirror_vma(vma);
74239+ if (vma_m) {
74240+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
74241+ struct zap_details details = {
74242+ .nonlinear_vma = vma_m,
74243+ .last_index = ULONG_MAX,
74244+ };
74245+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
74246+ } else
74247+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
74248+ }
74249+#endif
74250+
74251 return 0;
74252 }
74253
74254@@ -397,6 +428,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
74255 if (end < start)
74256 goto out;
74257
74258+#ifdef CONFIG_PAX_SEGMEXEC
74259+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
74260+ if (end > SEGMEXEC_TASK_SIZE)
74261+ goto out;
74262+ } else
74263+#endif
74264+
74265+ if (end > TASK_SIZE)
74266+ goto out;
74267+
74268 error = 0;
74269 if (end == start)
74270 goto out;
74271diff --git a/mm/memory-failure.c b/mm/memory-failure.c
74272index 8b20278..05dac18 100644
74273--- a/mm/memory-failure.c
74274+++ b/mm/memory-failure.c
74275@@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
74276
74277 int sysctl_memory_failure_recovery __read_mostly = 1;
74278
74279-atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
74280+atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
74281
74282 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
74283
74284@@ -202,7 +202,7 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
74285 pfn, t->comm, t->pid);
74286 si.si_signo = SIGBUS;
74287 si.si_errno = 0;
74288- si.si_addr = (void *)addr;
74289+ si.si_addr = (void __user *)addr;
74290 #ifdef __ARCH_SI_TRAPNO
74291 si.si_trapno = trapno;
74292 #endif
74293@@ -1040,7 +1040,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
74294 }
74295
74296 nr_pages = 1 << compound_trans_order(hpage);
74297- atomic_long_add(nr_pages, &mce_bad_pages);
74298+ atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
74299
74300 /*
74301 * We need/can do nothing about count=0 pages.
74302@@ -1070,7 +1070,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
74303 if (!PageHWPoison(hpage)
74304 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
74305 || (p != hpage && TestSetPageHWPoison(hpage))) {
74306- atomic_long_sub(nr_pages, &mce_bad_pages);
74307+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
74308 return 0;
74309 }
74310 set_page_hwpoison_huge_page(hpage);
74311@@ -1128,7 +1128,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
74312 }
74313 if (hwpoison_filter(p)) {
74314 if (TestClearPageHWPoison(p))
74315- atomic_long_sub(nr_pages, &mce_bad_pages);
74316+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
74317 unlock_page(hpage);
74318 put_page(hpage);
74319 return 0;
74320@@ -1323,7 +1323,7 @@ int unpoison_memory(unsigned long pfn)
74321 return 0;
74322 }
74323 if (TestClearPageHWPoison(p))
74324- atomic_long_sub(nr_pages, &mce_bad_pages);
74325+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
74326 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
74327 return 0;
74328 }
74329@@ -1337,7 +1337,7 @@ int unpoison_memory(unsigned long pfn)
74330 */
74331 if (TestClearPageHWPoison(page)) {
74332 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
74333- atomic_long_sub(nr_pages, &mce_bad_pages);
74334+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
74335 freeit = 1;
74336 if (PageHuge(page))
74337 clear_page_hwpoison_huge_page(page);
74338@@ -1442,7 +1442,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
74339 }
74340 done:
74341 if (!PageHWPoison(hpage))
74342- atomic_long_add(1 << compound_trans_order(hpage),
74343+ atomic_long_add_unchecked(1 << compound_trans_order(hpage),
74344 &mce_bad_pages);
74345 set_page_hwpoison_huge_page(hpage);
74346 dequeue_hwpoisoned_huge_page(hpage);
74347@@ -1582,7 +1582,7 @@ int soft_offline_page(struct page *page, int flags)
74348 return ret;
74349
74350 done:
74351- atomic_long_add(1, &mce_bad_pages);
74352+ atomic_long_add_unchecked(1, &mce_bad_pages);
74353 SetPageHWPoison(page);
74354 /* keep elevated page count for bad page */
74355 return ret;
74356diff --git a/mm/memory.c b/mm/memory.c
74357index f2973b2..fd020a7 100644
74358--- a/mm/memory.c
74359+++ b/mm/memory.c
74360@@ -431,6 +431,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
74361 free_pte_range(tlb, pmd, addr);
74362 } while (pmd++, addr = next, addr != end);
74363
74364+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
74365 start &= PUD_MASK;
74366 if (start < floor)
74367 return;
74368@@ -445,6 +446,8 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
74369 pmd = pmd_offset(pud, start);
74370 pud_clear(pud);
74371 pmd_free_tlb(tlb, pmd, start);
74372+#endif
74373+
74374 }
74375
74376 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
74377@@ -464,6 +467,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
74378 free_pmd_range(tlb, pud, addr, next, floor, ceiling);
74379 } while (pud++, addr = next, addr != end);
74380
74381+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
74382 start &= PGDIR_MASK;
74383 if (start < floor)
74384 return;
74385@@ -478,6 +482,8 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
74386 pud = pud_offset(pgd, start);
74387 pgd_clear(pgd);
74388 pud_free_tlb(tlb, pud, start);
74389+#endif
74390+
74391 }
74392
74393 /*
74394@@ -1626,12 +1632,6 @@ no_page_table:
74395 return page;
74396 }
74397
74398-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
74399-{
74400- return stack_guard_page_start(vma, addr) ||
74401- stack_guard_page_end(vma, addr+PAGE_SIZE);
74402-}
74403-
74404 /**
74405 * __get_user_pages() - pin user pages in memory
74406 * @tsk: task_struct of target task
74407@@ -1704,10 +1704,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
74408 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
74409 i = 0;
74410
74411- do {
74412+ while (nr_pages) {
74413 struct vm_area_struct *vma;
74414
74415- vma = find_extend_vma(mm, start);
74416+ vma = find_vma(mm, start);
74417 if (!vma && in_gate_area(mm, start)) {
74418 unsigned long pg = start & PAGE_MASK;
74419 pgd_t *pgd;
74420@@ -1755,7 +1755,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
74421 goto next_page;
74422 }
74423
74424- if (!vma ||
74425+ if (!vma || start < vma->vm_start ||
74426 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
74427 !(vm_flags & vma->vm_flags))
74428 return i ? : -EFAULT;
74429@@ -1782,11 +1782,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
74430 int ret;
74431 unsigned int fault_flags = 0;
74432
74433- /* For mlock, just skip the stack guard page. */
74434- if (foll_flags & FOLL_MLOCK) {
74435- if (stack_guard_page(vma, start))
74436- goto next_page;
74437- }
74438 if (foll_flags & FOLL_WRITE)
74439 fault_flags |= FAULT_FLAG_WRITE;
74440 if (nonblocking)
74441@@ -1860,7 +1855,7 @@ next_page:
74442 start += PAGE_SIZE;
74443 nr_pages--;
74444 } while (nr_pages && start < vma->vm_end);
74445- } while (nr_pages);
74446+ }
74447 return i;
74448 }
74449 EXPORT_SYMBOL(__get_user_pages);
74450@@ -2067,6 +2062,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
74451 page_add_file_rmap(page);
74452 set_pte_at(mm, addr, pte, mk_pte(page, prot));
74453
74454+#ifdef CONFIG_PAX_SEGMEXEC
74455+ pax_mirror_file_pte(vma, addr, page, ptl);
74456+#endif
74457+
74458 retval = 0;
74459 pte_unmap_unlock(pte, ptl);
74460 return retval;
74461@@ -2111,9 +2110,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
74462 if (!page_count(page))
74463 return -EINVAL;
74464 if (!(vma->vm_flags & VM_MIXEDMAP)) {
74465+
74466+#ifdef CONFIG_PAX_SEGMEXEC
74467+ struct vm_area_struct *vma_m;
74468+#endif
74469+
74470 BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
74471 BUG_ON(vma->vm_flags & VM_PFNMAP);
74472 vma->vm_flags |= VM_MIXEDMAP;
74473+
74474+#ifdef CONFIG_PAX_SEGMEXEC
74475+ vma_m = pax_find_mirror_vma(vma);
74476+ if (vma_m)
74477+ vma_m->vm_flags |= VM_MIXEDMAP;
74478+#endif
74479+
74480 }
74481 return insert_page(vma, addr, page, vma->vm_page_prot);
74482 }
74483@@ -2196,6 +2207,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
74484 unsigned long pfn)
74485 {
74486 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
74487+ BUG_ON(vma->vm_mirror);
74488
74489 if (addr < vma->vm_start || addr >= vma->vm_end)
74490 return -EFAULT;
74491@@ -2396,7 +2408,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
74492
74493 BUG_ON(pud_huge(*pud));
74494
74495- pmd = pmd_alloc(mm, pud, addr);
74496+ pmd = (mm == &init_mm) ?
74497+ pmd_alloc_kernel(mm, pud, addr) :
74498+ pmd_alloc(mm, pud, addr);
74499 if (!pmd)
74500 return -ENOMEM;
74501 do {
74502@@ -2416,7 +2430,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
74503 unsigned long next;
74504 int err;
74505
74506- pud = pud_alloc(mm, pgd, addr);
74507+ pud = (mm == &init_mm) ?
74508+ pud_alloc_kernel(mm, pgd, addr) :
74509+ pud_alloc(mm, pgd, addr);
74510 if (!pud)
74511 return -ENOMEM;
74512 do {
74513@@ -2504,6 +2520,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
74514 copy_user_highpage(dst, src, va, vma);
74515 }
74516
74517+#ifdef CONFIG_PAX_SEGMEXEC
74518+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
74519+{
74520+ struct mm_struct *mm = vma->vm_mm;
74521+ spinlock_t *ptl;
74522+ pte_t *pte, entry;
74523+
74524+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
74525+ entry = *pte;
74526+ if (!pte_present(entry)) {
74527+ if (!pte_none(entry)) {
74528+ BUG_ON(pte_file(entry));
74529+ free_swap_and_cache(pte_to_swp_entry(entry));
74530+ pte_clear_not_present_full(mm, address, pte, 0);
74531+ }
74532+ } else {
74533+ struct page *page;
74534+
74535+ flush_cache_page(vma, address, pte_pfn(entry));
74536+ entry = ptep_clear_flush(vma, address, pte);
74537+ BUG_ON(pte_dirty(entry));
74538+ page = vm_normal_page(vma, address, entry);
74539+ if (page) {
74540+ update_hiwater_rss(mm);
74541+ if (PageAnon(page))
74542+ dec_mm_counter_fast(mm, MM_ANONPAGES);
74543+ else
74544+ dec_mm_counter_fast(mm, MM_FILEPAGES);
74545+ page_remove_rmap(page);
74546+ page_cache_release(page);
74547+ }
74548+ }
74549+ pte_unmap_unlock(pte, ptl);
74550+}
74551+
74552+/* PaX: if vma is mirrored, synchronize the mirror's PTE
74553+ *
74554+ * the ptl of the lower mapped page is held on entry and is not released on exit
74555+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
74556+ */
74557+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
74558+{
74559+ struct mm_struct *mm = vma->vm_mm;
74560+ unsigned long address_m;
74561+ spinlock_t *ptl_m;
74562+ struct vm_area_struct *vma_m;
74563+ pmd_t *pmd_m;
74564+ pte_t *pte_m, entry_m;
74565+
74566+ BUG_ON(!page_m || !PageAnon(page_m));
74567+
74568+ vma_m = pax_find_mirror_vma(vma);
74569+ if (!vma_m)
74570+ return;
74571+
74572+ BUG_ON(!PageLocked(page_m));
74573+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
74574+ address_m = address + SEGMEXEC_TASK_SIZE;
74575+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
74576+ pte_m = pte_offset_map(pmd_m, address_m);
74577+ ptl_m = pte_lockptr(mm, pmd_m);
74578+ if (ptl != ptl_m) {
74579+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
74580+ if (!pte_none(*pte_m))
74581+ goto out;
74582+ }
74583+
74584+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
74585+ page_cache_get(page_m);
74586+ page_add_anon_rmap(page_m, vma_m, address_m);
74587+ inc_mm_counter_fast(mm, MM_ANONPAGES);
74588+ set_pte_at(mm, address_m, pte_m, entry_m);
74589+ update_mmu_cache(vma_m, address_m, entry_m);
74590+out:
74591+ if (ptl != ptl_m)
74592+ spin_unlock(ptl_m);
74593+ pte_unmap(pte_m);
74594+ unlock_page(page_m);
74595+}
74596+
74597+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
74598+{
74599+ struct mm_struct *mm = vma->vm_mm;
74600+ unsigned long address_m;
74601+ spinlock_t *ptl_m;
74602+ struct vm_area_struct *vma_m;
74603+ pmd_t *pmd_m;
74604+ pte_t *pte_m, entry_m;
74605+
74606+ BUG_ON(!page_m || PageAnon(page_m));
74607+
74608+ vma_m = pax_find_mirror_vma(vma);
74609+ if (!vma_m)
74610+ return;
74611+
74612+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
74613+ address_m = address + SEGMEXEC_TASK_SIZE;
74614+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
74615+ pte_m = pte_offset_map(pmd_m, address_m);
74616+ ptl_m = pte_lockptr(mm, pmd_m);
74617+ if (ptl != ptl_m) {
74618+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
74619+ if (!pte_none(*pte_m))
74620+ goto out;
74621+ }
74622+
74623+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
74624+ page_cache_get(page_m);
74625+ page_add_file_rmap(page_m);
74626+ inc_mm_counter_fast(mm, MM_FILEPAGES);
74627+ set_pte_at(mm, address_m, pte_m, entry_m);
74628+ update_mmu_cache(vma_m, address_m, entry_m);
74629+out:
74630+ if (ptl != ptl_m)
74631+ spin_unlock(ptl_m);
74632+ pte_unmap(pte_m);
74633+}
74634+
74635+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
74636+{
74637+ struct mm_struct *mm = vma->vm_mm;
74638+ unsigned long address_m;
74639+ spinlock_t *ptl_m;
74640+ struct vm_area_struct *vma_m;
74641+ pmd_t *pmd_m;
74642+ pte_t *pte_m, entry_m;
74643+
74644+ vma_m = pax_find_mirror_vma(vma);
74645+ if (!vma_m)
74646+ return;
74647+
74648+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
74649+ address_m = address + SEGMEXEC_TASK_SIZE;
74650+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
74651+ pte_m = pte_offset_map(pmd_m, address_m);
74652+ ptl_m = pte_lockptr(mm, pmd_m);
74653+ if (ptl != ptl_m) {
74654+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
74655+ if (!pte_none(*pte_m))
74656+ goto out;
74657+ }
74658+
74659+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
74660+ set_pte_at(mm, address_m, pte_m, entry_m);
74661+out:
74662+ if (ptl != ptl_m)
74663+ spin_unlock(ptl_m);
74664+ pte_unmap(pte_m);
74665+}
74666+
74667+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
74668+{
74669+ struct page *page_m;
74670+ pte_t entry;
74671+
74672+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
74673+ goto out;
74674+
74675+ entry = *pte;
74676+ page_m = vm_normal_page(vma, address, entry);
74677+ if (!page_m)
74678+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
74679+ else if (PageAnon(page_m)) {
74680+ if (pax_find_mirror_vma(vma)) {
74681+ pte_unmap_unlock(pte, ptl);
74682+ lock_page(page_m);
74683+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
74684+ if (pte_same(entry, *pte))
74685+ pax_mirror_anon_pte(vma, address, page_m, ptl);
74686+ else
74687+ unlock_page(page_m);
74688+ }
74689+ } else
74690+ pax_mirror_file_pte(vma, address, page_m, ptl);
74691+
74692+out:
74693+ pte_unmap_unlock(pte, ptl);
74694+}
74695+#endif
74696+
74697 /*
74698 * This routine handles present pages, when users try to write
74699 * to a shared page. It is done by copying the page to a new address
74700@@ -2720,6 +2916,12 @@ gotten:
74701 */
74702 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
74703 if (likely(pte_same(*page_table, orig_pte))) {
74704+
74705+#ifdef CONFIG_PAX_SEGMEXEC
74706+ if (pax_find_mirror_vma(vma))
74707+ BUG_ON(!trylock_page(new_page));
74708+#endif
74709+
74710 if (old_page) {
74711 if (!PageAnon(old_page)) {
74712 dec_mm_counter_fast(mm, MM_FILEPAGES);
74713@@ -2771,6 +2973,10 @@ gotten:
74714 page_remove_rmap(old_page);
74715 }
74716
74717+#ifdef CONFIG_PAX_SEGMEXEC
74718+ pax_mirror_anon_pte(vma, address, new_page, ptl);
74719+#endif
74720+
74721 /* Free the old page.. */
74722 new_page = old_page;
74723 ret |= VM_FAULT_WRITE;
74724@@ -3051,6 +3257,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
74725 swap_free(entry);
74726 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
74727 try_to_free_swap(page);
74728+
74729+#ifdef CONFIG_PAX_SEGMEXEC
74730+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
74731+#endif
74732+
74733 unlock_page(page);
74734 if (swapcache) {
74735 /*
74736@@ -3074,6 +3285,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
74737
74738 /* No need to invalidate - it was non-present before */
74739 update_mmu_cache(vma, address, page_table);
74740+
74741+#ifdef CONFIG_PAX_SEGMEXEC
74742+ pax_mirror_anon_pte(vma, address, page, ptl);
74743+#endif
74744+
74745 unlock:
74746 pte_unmap_unlock(page_table, ptl);
74747 out:
74748@@ -3093,40 +3309,6 @@ out_release:
74749 }
74750
74751 /*
74752- * This is like a special single-page "expand_{down|up}wards()",
74753- * except we must first make sure that 'address{-|+}PAGE_SIZE'
74754- * doesn't hit another vma.
74755- */
74756-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
74757-{
74758- address &= PAGE_MASK;
74759- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
74760- struct vm_area_struct *prev = vma->vm_prev;
74761-
74762- /*
74763- * Is there a mapping abutting this one below?
74764- *
74765- * That's only ok if it's the same stack mapping
74766- * that has gotten split..
74767- */
74768- if (prev && prev->vm_end == address)
74769- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
74770-
74771- expand_downwards(vma, address - PAGE_SIZE);
74772- }
74773- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
74774- struct vm_area_struct *next = vma->vm_next;
74775-
74776- /* As VM_GROWSDOWN but s/below/above/ */
74777- if (next && next->vm_start == address + PAGE_SIZE)
74778- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
74779-
74780- expand_upwards(vma, address + PAGE_SIZE);
74781- }
74782- return 0;
74783-}
74784-
74785-/*
74786 * We enter with non-exclusive mmap_sem (to exclude vma changes,
74787 * but allow concurrent faults), and pte mapped but not yet locked.
74788 * We return with mmap_sem still held, but pte unmapped and unlocked.
74789@@ -3135,27 +3317,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
74790 unsigned long address, pte_t *page_table, pmd_t *pmd,
74791 unsigned int flags)
74792 {
74793- struct page *page;
74794+ struct page *page = NULL;
74795 spinlock_t *ptl;
74796 pte_t entry;
74797
74798- pte_unmap(page_table);
74799-
74800- /* Check if we need to add a guard page to the stack */
74801- if (check_stack_guard_page(vma, address) < 0)
74802- return VM_FAULT_SIGBUS;
74803-
74804- /* Use the zero-page for reads */
74805 if (!(flags & FAULT_FLAG_WRITE)) {
74806 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
74807 vma->vm_page_prot));
74808- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
74809+ ptl = pte_lockptr(mm, pmd);
74810+ spin_lock(ptl);
74811 if (!pte_none(*page_table))
74812 goto unlock;
74813 goto setpte;
74814 }
74815
74816 /* Allocate our own private page. */
74817+ pte_unmap(page_table);
74818+
74819 if (unlikely(anon_vma_prepare(vma)))
74820 goto oom;
74821 page = alloc_zeroed_user_highpage_movable(vma, address);
74822@@ -3174,6 +3352,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
74823 if (!pte_none(*page_table))
74824 goto release;
74825
74826+#ifdef CONFIG_PAX_SEGMEXEC
74827+ if (pax_find_mirror_vma(vma))
74828+ BUG_ON(!trylock_page(page));
74829+#endif
74830+
74831 inc_mm_counter_fast(mm, MM_ANONPAGES);
74832 page_add_new_anon_rmap(page, vma, address);
74833 setpte:
74834@@ -3181,6 +3364,12 @@ setpte:
74835
74836 /* No need to invalidate - it was non-present before */
74837 update_mmu_cache(vma, address, page_table);
74838+
74839+#ifdef CONFIG_PAX_SEGMEXEC
74840+ if (page)
74841+ pax_mirror_anon_pte(vma, address, page, ptl);
74842+#endif
74843+
74844 unlock:
74845 pte_unmap_unlock(page_table, ptl);
74846 return 0;
74847@@ -3324,6 +3513,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
74848 */
74849 /* Only go through if we didn't race with anybody else... */
74850 if (likely(pte_same(*page_table, orig_pte))) {
74851+
74852+#ifdef CONFIG_PAX_SEGMEXEC
74853+ if (anon && pax_find_mirror_vma(vma))
74854+ BUG_ON(!trylock_page(page));
74855+#endif
74856+
74857 flush_icache_page(vma, page);
74858 entry = mk_pte(page, vma->vm_page_prot);
74859 if (flags & FAULT_FLAG_WRITE)
74860@@ -3343,6 +3538,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
74861
74862 /* no need to invalidate: a not-present page won't be cached */
74863 update_mmu_cache(vma, address, page_table);
74864+
74865+#ifdef CONFIG_PAX_SEGMEXEC
74866+ if (anon)
74867+ pax_mirror_anon_pte(vma, address, page, ptl);
74868+ else
74869+ pax_mirror_file_pte(vma, address, page, ptl);
74870+#endif
74871+
74872 } else {
74873 if (cow_page)
74874 mem_cgroup_uncharge_page(cow_page);
74875@@ -3497,6 +3700,12 @@ int handle_pte_fault(struct mm_struct *mm,
74876 if (flags & FAULT_FLAG_WRITE)
74877 flush_tlb_fix_spurious_fault(vma, address);
74878 }
74879+
74880+#ifdef CONFIG_PAX_SEGMEXEC
74881+ pax_mirror_pte(vma, address, pte, pmd, ptl);
74882+ return 0;
74883+#endif
74884+
74885 unlock:
74886 pte_unmap_unlock(pte, ptl);
74887 return 0;
74888@@ -3513,6 +3722,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
74889 pmd_t *pmd;
74890 pte_t *pte;
74891
74892+#ifdef CONFIG_PAX_SEGMEXEC
74893+ struct vm_area_struct *vma_m;
74894+#endif
74895+
74896 __set_current_state(TASK_RUNNING);
74897
74898 count_vm_event(PGFAULT);
74899@@ -3524,6 +3737,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
74900 if (unlikely(is_vm_hugetlb_page(vma)))
74901 return hugetlb_fault(mm, vma, address, flags);
74902
74903+#ifdef CONFIG_PAX_SEGMEXEC
74904+ vma_m = pax_find_mirror_vma(vma);
74905+ if (vma_m) {
74906+ unsigned long address_m;
74907+ pgd_t *pgd_m;
74908+ pud_t *pud_m;
74909+ pmd_t *pmd_m;
74910+
74911+ if (vma->vm_start > vma_m->vm_start) {
74912+ address_m = address;
74913+ address -= SEGMEXEC_TASK_SIZE;
74914+ vma = vma_m;
74915+ } else
74916+ address_m = address + SEGMEXEC_TASK_SIZE;
74917+
74918+ pgd_m = pgd_offset(mm, address_m);
74919+ pud_m = pud_alloc(mm, pgd_m, address_m);
74920+ if (!pud_m)
74921+ return VM_FAULT_OOM;
74922+ pmd_m = pmd_alloc(mm, pud_m, address_m);
74923+ if (!pmd_m)
74924+ return VM_FAULT_OOM;
74925+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
74926+ return VM_FAULT_OOM;
74927+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
74928+ }
74929+#endif
74930+
74931 retry:
74932 pgd = pgd_offset(mm, address);
74933 pud = pud_alloc(mm, pgd, address);
74934@@ -3565,7 +3806,7 @@ retry:
74935 * run pte_offset_map on the pmd, if an huge pmd could
74936 * materialize from under us from a different thread.
74937 */
74938- if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
74939+ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
74940 return VM_FAULT_OOM;
74941 /* if an huge pmd materialized from under us just retry later */
74942 if (unlikely(pmd_trans_huge(*pmd)))
74943@@ -3602,6 +3843,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
74944 spin_unlock(&mm->page_table_lock);
74945 return 0;
74946 }
74947+
74948+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
74949+{
74950+ pud_t *new = pud_alloc_one(mm, address);
74951+ if (!new)
74952+ return -ENOMEM;
74953+
74954+ smp_wmb(); /* See comment in __pte_alloc */
74955+
74956+ spin_lock(&mm->page_table_lock);
74957+ if (pgd_present(*pgd)) /* Another has populated it */
74958+ pud_free(mm, new);
74959+ else
74960+ pgd_populate_kernel(mm, pgd, new);
74961+ spin_unlock(&mm->page_table_lock);
74962+ return 0;
74963+}
74964 #endif /* __PAGETABLE_PUD_FOLDED */
74965
74966 #ifndef __PAGETABLE_PMD_FOLDED
74967@@ -3632,6 +3890,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
74968 spin_unlock(&mm->page_table_lock);
74969 return 0;
74970 }
74971+
74972+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
74973+{
74974+ pmd_t *new = pmd_alloc_one(mm, address);
74975+ if (!new)
74976+ return -ENOMEM;
74977+
74978+ smp_wmb(); /* See comment in __pte_alloc */
74979+
74980+ spin_lock(&mm->page_table_lock);
74981+#ifndef __ARCH_HAS_4LEVEL_HACK
74982+ if (pud_present(*pud)) /* Another has populated it */
74983+ pmd_free(mm, new);
74984+ else
74985+ pud_populate_kernel(mm, pud, new);
74986+#else
74987+ if (pgd_present(*pud)) /* Another has populated it */
74988+ pmd_free(mm, new);
74989+ else
74990+ pgd_populate_kernel(mm, pud, new);
74991+#endif /* __ARCH_HAS_4LEVEL_HACK */
74992+ spin_unlock(&mm->page_table_lock);
74993+ return 0;
74994+}
74995 #endif /* __PAGETABLE_PMD_FOLDED */
74996
74997 int make_pages_present(unsigned long addr, unsigned long end)
74998@@ -3669,7 +3951,7 @@ static int __init gate_vma_init(void)
74999 gate_vma.vm_start = FIXADDR_USER_START;
75000 gate_vma.vm_end = FIXADDR_USER_END;
75001 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
75002- gate_vma.vm_page_prot = __P101;
75003+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
75004
75005 return 0;
75006 }
75007diff --git a/mm/mempolicy.c b/mm/mempolicy.c
75008index 002c281..9429765 100644
75009--- a/mm/mempolicy.c
75010+++ b/mm/mempolicy.c
75011@@ -655,6 +655,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
75012 unsigned long vmstart;
75013 unsigned long vmend;
75014
75015+#ifdef CONFIG_PAX_SEGMEXEC
75016+ struct vm_area_struct *vma_m;
75017+#endif
75018+
75019 vma = find_vma(mm, start);
75020 if (!vma || vma->vm_start > start)
75021 return -EFAULT;
75022@@ -691,9 +695,20 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
75023 if (err)
75024 goto out;
75025 }
75026+
75027 err = vma_replace_policy(vma, new_pol);
75028 if (err)
75029 goto out;
75030+
75031+#ifdef CONFIG_PAX_SEGMEXEC
75032+ vma_m = pax_find_mirror_vma(vma);
75033+ if (vma_m) {
75034+ err = vma_replace_policy(vma_m, new_pol);
75035+ if (err)
75036+ goto out;
75037+ }
75038+#endif
75039+
75040 }
75041
75042 out:
75043@@ -1150,6 +1165,17 @@ static long do_mbind(unsigned long start, unsigned long len,
75044
75045 if (end < start)
75046 return -EINVAL;
75047+
75048+#ifdef CONFIG_PAX_SEGMEXEC
75049+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
75050+ if (end > SEGMEXEC_TASK_SIZE)
75051+ return -EINVAL;
75052+ } else
75053+#endif
75054+
75055+ if (end > TASK_SIZE)
75056+ return -EINVAL;
75057+
75058 if (end == start)
75059 return 0;
75060
75061@@ -1373,8 +1399,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
75062 */
75063 tcred = __task_cred(task);
75064 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
75065- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
75066- !capable(CAP_SYS_NICE)) {
75067+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
75068 rcu_read_unlock();
75069 err = -EPERM;
75070 goto out_put;
75071@@ -1405,6 +1430,15 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
75072 goto out;
75073 }
75074
75075+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
75076+ if (mm != current->mm &&
75077+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
75078+ mmput(mm);
75079+ err = -EPERM;
75080+ goto out;
75081+ }
75082+#endif
75083+
75084 err = do_migrate_pages(mm, old, new,
75085 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
75086
75087diff --git a/mm/migrate.c b/mm/migrate.c
75088index 346d32d..d7adff2 100644
75089--- a/mm/migrate.c
75090+++ b/mm/migrate.c
75091@@ -1352,8 +1352,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
75092 */
75093 tcred = __task_cred(task);
75094 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
75095- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
75096- !capable(CAP_SYS_NICE)) {
75097+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
75098 rcu_read_unlock();
75099 err = -EPERM;
75100 goto out;
75101diff --git a/mm/mlock.c b/mm/mlock.c
75102index c9bd528..da8d069 100644
75103--- a/mm/mlock.c
75104+++ b/mm/mlock.c
75105@@ -13,6 +13,7 @@
75106 #include <linux/pagemap.h>
75107 #include <linux/mempolicy.h>
75108 #include <linux/syscalls.h>
75109+#include <linux/security.h>
75110 #include <linux/sched.h>
75111 #include <linux/export.h>
75112 #include <linux/rmap.h>
75113@@ -369,7 +370,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
75114 {
75115 unsigned long nstart, end, tmp;
75116 struct vm_area_struct * vma, * prev;
75117- int error;
75118+ int error = 0;
75119
75120 VM_BUG_ON(start & ~PAGE_MASK);
75121 VM_BUG_ON(len != PAGE_ALIGN(len));
75122@@ -378,6 +379,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
75123 return -EINVAL;
75124 if (end == start)
75125 return 0;
75126+ if (end > TASK_SIZE)
75127+ return -EINVAL;
75128+
75129 vma = find_vma(current->mm, start);
75130 if (!vma || vma->vm_start > start)
75131 return -ENOMEM;
75132@@ -389,6 +393,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
75133 for (nstart = start ; ; ) {
75134 vm_flags_t newflags;
75135
75136+#ifdef CONFIG_PAX_SEGMEXEC
75137+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
75138+ break;
75139+#endif
75140+
75141 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
75142
75143 newflags = vma->vm_flags | VM_LOCKED;
75144@@ -494,6 +503,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
75145 lock_limit >>= PAGE_SHIFT;
75146
75147 /* check against resource limits */
75148+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
75149 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
75150 error = do_mlock(start, len, 1);
75151 up_write(&current->mm->mmap_sem);
75152@@ -528,6 +538,12 @@ static int do_mlockall(int flags)
75153 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
75154 vm_flags_t newflags;
75155
75156+#ifdef CONFIG_PAX_SEGMEXEC
75157+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
75158+ break;
75159+#endif
75160+
75161+ BUG_ON(vma->vm_end > TASK_SIZE);
75162 newflags = vma->vm_flags | VM_LOCKED;
75163 if (!(flags & MCL_CURRENT))
75164 newflags &= ~VM_LOCKED;
75165@@ -560,6 +576,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
75166 lock_limit >>= PAGE_SHIFT;
75167
75168 ret = -ENOMEM;
75169+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
75170 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
75171 capable(CAP_IPC_LOCK))
75172 ret = do_mlockall(flags);
75173diff --git a/mm/mmap.c b/mm/mmap.c
75174index 9a796c4..e2c9724 100644
75175--- a/mm/mmap.c
75176+++ b/mm/mmap.c
75177@@ -31,6 +31,7 @@
75178 #include <linux/audit.h>
75179 #include <linux/khugepaged.h>
75180 #include <linux/uprobes.h>
75181+#include <linux/random.h>
75182
75183 #include <asm/uaccess.h>
75184 #include <asm/cacheflush.h>
75185@@ -47,6 +48,16 @@
75186 #define arch_rebalance_pgtables(addr, len) (addr)
75187 #endif
75188
75189+static inline void verify_mm_writelocked(struct mm_struct *mm)
75190+{
75191+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
75192+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
75193+ up_read(&mm->mmap_sem);
75194+ BUG();
75195+ }
75196+#endif
75197+}
75198+
75199 static void unmap_region(struct mm_struct *mm,
75200 struct vm_area_struct *vma, struct vm_area_struct *prev,
75201 unsigned long start, unsigned long end);
75202@@ -66,22 +77,32 @@ static void unmap_region(struct mm_struct *mm,
75203 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
75204 *
75205 */
75206-pgprot_t protection_map[16] = {
75207+pgprot_t protection_map[16] __read_only = {
75208 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
75209 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
75210 };
75211
75212-pgprot_t vm_get_page_prot(unsigned long vm_flags)
75213+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
75214 {
75215- return __pgprot(pgprot_val(protection_map[vm_flags &
75216+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
75217 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
75218 pgprot_val(arch_vm_get_page_prot(vm_flags)));
75219+
75220+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
75221+ if (!(__supported_pte_mask & _PAGE_NX) &&
75222+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
75223+ (vm_flags & (VM_READ | VM_WRITE)))
75224+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
75225+#endif
75226+
75227+ return prot;
75228 }
75229 EXPORT_SYMBOL(vm_get_page_prot);
75230
75231 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
75232 int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
75233 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
75234+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
75235 /*
75236 * Make sure vm_committed_as in one cacheline and not cacheline shared with
75237 * other variables. It can be updated by several CPUs frequently.
75238@@ -223,6 +244,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
75239 struct vm_area_struct *next = vma->vm_next;
75240
75241 might_sleep();
75242+ BUG_ON(vma->vm_mirror);
75243 if (vma->vm_ops && vma->vm_ops->close)
75244 vma->vm_ops->close(vma);
75245 if (vma->vm_file)
75246@@ -266,6 +288,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
75247 * not page aligned -Ram Gupta
75248 */
75249 rlim = rlimit(RLIMIT_DATA);
75250+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
75251 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
75252 (mm->end_data - mm->start_data) > rlim)
75253 goto out;
75254@@ -736,6 +759,12 @@ static int
75255 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
75256 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
75257 {
75258+
75259+#ifdef CONFIG_PAX_SEGMEXEC
75260+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
75261+ return 0;
75262+#endif
75263+
75264 if (is_mergeable_vma(vma, file, vm_flags) &&
75265 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
75266 if (vma->vm_pgoff == vm_pgoff)
75267@@ -755,6 +784,12 @@ static int
75268 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
75269 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
75270 {
75271+
75272+#ifdef CONFIG_PAX_SEGMEXEC
75273+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
75274+ return 0;
75275+#endif
75276+
75277 if (is_mergeable_vma(vma, file, vm_flags) &&
75278 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
75279 pgoff_t vm_pglen;
75280@@ -797,13 +832,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
75281 struct vm_area_struct *vma_merge(struct mm_struct *mm,
75282 struct vm_area_struct *prev, unsigned long addr,
75283 unsigned long end, unsigned long vm_flags,
75284- struct anon_vma *anon_vma, struct file *file,
75285+ struct anon_vma *anon_vma, struct file *file,
75286 pgoff_t pgoff, struct mempolicy *policy)
75287 {
75288 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
75289 struct vm_area_struct *area, *next;
75290 int err;
75291
75292+#ifdef CONFIG_PAX_SEGMEXEC
75293+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
75294+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
75295+
75296+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
75297+#endif
75298+
75299 /*
75300 * We later require that vma->vm_flags == vm_flags,
75301 * so this tests vma->vm_flags & VM_SPECIAL, too.
75302@@ -819,6 +861,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
75303 if (next && next->vm_end == end) /* cases 6, 7, 8 */
75304 next = next->vm_next;
75305
75306+#ifdef CONFIG_PAX_SEGMEXEC
75307+ if (prev)
75308+ prev_m = pax_find_mirror_vma(prev);
75309+ if (area)
75310+ area_m = pax_find_mirror_vma(area);
75311+ if (next)
75312+ next_m = pax_find_mirror_vma(next);
75313+#endif
75314+
75315 /*
75316 * Can it merge with the predecessor?
75317 */
75318@@ -838,9 +889,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
75319 /* cases 1, 6 */
75320 err = vma_adjust(prev, prev->vm_start,
75321 next->vm_end, prev->vm_pgoff, NULL);
75322- } else /* cases 2, 5, 7 */
75323+
75324+#ifdef CONFIG_PAX_SEGMEXEC
75325+ if (!err && prev_m)
75326+ err = vma_adjust(prev_m, prev_m->vm_start,
75327+ next_m->vm_end, prev_m->vm_pgoff, NULL);
75328+#endif
75329+
75330+ } else { /* cases 2, 5, 7 */
75331 err = vma_adjust(prev, prev->vm_start,
75332 end, prev->vm_pgoff, NULL);
75333+
75334+#ifdef CONFIG_PAX_SEGMEXEC
75335+ if (!err && prev_m)
75336+ err = vma_adjust(prev_m, prev_m->vm_start,
75337+ end_m, prev_m->vm_pgoff, NULL);
75338+#endif
75339+
75340+ }
75341 if (err)
75342 return NULL;
75343 khugepaged_enter_vma_merge(prev);
75344@@ -854,12 +920,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
75345 mpol_equal(policy, vma_policy(next)) &&
75346 can_vma_merge_before(next, vm_flags,
75347 anon_vma, file, pgoff+pglen)) {
75348- if (prev && addr < prev->vm_end) /* case 4 */
75349+ if (prev && addr < prev->vm_end) { /* case 4 */
75350 err = vma_adjust(prev, prev->vm_start,
75351 addr, prev->vm_pgoff, NULL);
75352- else /* cases 3, 8 */
75353+
75354+#ifdef CONFIG_PAX_SEGMEXEC
75355+ if (!err && prev_m)
75356+ err = vma_adjust(prev_m, prev_m->vm_start,
75357+ addr_m, prev_m->vm_pgoff, NULL);
75358+#endif
75359+
75360+ } else { /* cases 3, 8 */
75361 err = vma_adjust(area, addr, next->vm_end,
75362 next->vm_pgoff - pglen, NULL);
75363+
75364+#ifdef CONFIG_PAX_SEGMEXEC
75365+ if (!err && area_m)
75366+ err = vma_adjust(area_m, addr_m, next_m->vm_end,
75367+ next_m->vm_pgoff - pglen, NULL);
75368+#endif
75369+
75370+ }
75371 if (err)
75372 return NULL;
75373 khugepaged_enter_vma_merge(area);
75374@@ -968,16 +1049,13 @@ none:
75375 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
75376 struct file *file, long pages)
75377 {
75378- const unsigned long stack_flags
75379- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
75380-
75381 mm->total_vm += pages;
75382
75383 if (file) {
75384 mm->shared_vm += pages;
75385 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
75386 mm->exec_vm += pages;
75387- } else if (flags & stack_flags)
75388+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
75389 mm->stack_vm += pages;
75390 }
75391 #endif /* CONFIG_PROC_FS */
75392@@ -1013,7 +1091,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
75393 * (the exception is when the underlying filesystem is noexec
75394 * mounted, in which case we dont add PROT_EXEC.)
75395 */
75396- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
75397+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
75398 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
75399 prot |= PROT_EXEC;
75400
75401@@ -1039,7 +1117,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
75402 /* Obtain the address to map to. we verify (or select) it and ensure
75403 * that it represents a valid section of the address space.
75404 */
75405- addr = get_unmapped_area(file, addr, len, pgoff, flags);
75406+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
75407 if (addr & ~PAGE_MASK)
75408 return addr;
75409
75410@@ -1050,6 +1128,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
75411 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
75412 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
75413
75414+#ifdef CONFIG_PAX_MPROTECT
75415+ if (mm->pax_flags & MF_PAX_MPROTECT) {
75416+#ifndef CONFIG_PAX_MPROTECT_COMPAT
75417+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
75418+ gr_log_rwxmmap(file);
75419+
75420+#ifdef CONFIG_PAX_EMUPLT
75421+ vm_flags &= ~VM_EXEC;
75422+#else
75423+ return -EPERM;
75424+#endif
75425+
75426+ }
75427+
75428+ if (!(vm_flags & VM_EXEC))
75429+ vm_flags &= ~VM_MAYEXEC;
75430+#else
75431+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
75432+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
75433+#endif
75434+ else
75435+ vm_flags &= ~VM_MAYWRITE;
75436+ }
75437+#endif
75438+
75439+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
75440+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
75441+ vm_flags &= ~VM_PAGEEXEC;
75442+#endif
75443+
75444 if (flags & MAP_LOCKED)
75445 if (!can_do_mlock())
75446 return -EPERM;
75447@@ -1061,6 +1169,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
75448 locked += mm->locked_vm;
75449 lock_limit = rlimit(RLIMIT_MEMLOCK);
75450 lock_limit >>= PAGE_SHIFT;
75451+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
75452 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
75453 return -EAGAIN;
75454 }
75455@@ -1127,6 +1236,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
75456 }
75457 }
75458
75459+ if (!gr_acl_handle_mmap(file, prot))
75460+ return -EACCES;
75461+
75462 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
75463 }
75464
75465@@ -1203,7 +1315,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
75466 vm_flags_t vm_flags = vma->vm_flags;
75467
75468 /* If it was private or non-writable, the write bit is already clear */
75469- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
75470+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
75471 return 0;
75472
75473 /* The backer wishes to know when pages are first written to? */
75474@@ -1252,13 +1364,22 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
75475 unsigned long charged = 0;
75476 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
75477
75478+#ifdef CONFIG_PAX_SEGMEXEC
75479+ struct vm_area_struct *vma_m = NULL;
75480+#endif
75481+
75482+ /*
75483+ * mm->mmap_sem is required to protect against another thread
75484+ * changing the mappings in case we sleep.
75485+ */
75486+ verify_mm_writelocked(mm);
75487+
75488 /* Clear old maps */
75489 error = -ENOMEM;
75490-munmap_back:
75491 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
75492 if (do_munmap(mm, addr, len))
75493 return -ENOMEM;
75494- goto munmap_back;
75495+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
75496 }
75497
75498 /* Check against address space limit. */
75499@@ -1307,6 +1428,16 @@ munmap_back:
75500 goto unacct_error;
75501 }
75502
75503+#ifdef CONFIG_PAX_SEGMEXEC
75504+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
75505+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
75506+ if (!vma_m) {
75507+ error = -ENOMEM;
75508+ goto free_vma;
75509+ }
75510+ }
75511+#endif
75512+
75513 vma->vm_mm = mm;
75514 vma->vm_start = addr;
75515 vma->vm_end = addr + len;
75516@@ -1331,6 +1462,13 @@ munmap_back:
75517 if (error)
75518 goto unmap_and_free_vma;
75519
75520+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
75521+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
75522+ vma->vm_flags |= VM_PAGEEXEC;
75523+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
75524+ }
75525+#endif
75526+
75527 /* Can addr have changed??
75528 *
75529 * Answer: Yes, several device drivers can do it in their
75530@@ -1365,6 +1503,11 @@ munmap_back:
75531 vma_link(mm, vma, prev, rb_link, rb_parent);
75532 file = vma->vm_file;
75533
75534+#ifdef CONFIG_PAX_SEGMEXEC
75535+ if (vma_m)
75536+ BUG_ON(pax_mirror_vma(vma_m, vma));
75537+#endif
75538+
75539 /* Once vma denies write, undo our temporary denial count */
75540 if (correct_wcount)
75541 atomic_inc(&inode->i_writecount);
75542@@ -1372,6 +1515,7 @@ out:
75543 perf_event_mmap(vma);
75544
75545 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
75546+ track_exec_limit(mm, addr, addr + len, vm_flags);
75547 if (vm_flags & VM_LOCKED) {
75548 if (!mlock_vma_pages_range(vma, addr, addr + len))
75549 mm->locked_vm += (len >> PAGE_SHIFT);
75550@@ -1393,6 +1537,12 @@ unmap_and_free_vma:
75551 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
75552 charged = 0;
75553 free_vma:
75554+
75555+#ifdef CONFIG_PAX_SEGMEXEC
75556+ if (vma_m)
75557+ kmem_cache_free(vm_area_cachep, vma_m);
75558+#endif
75559+
75560 kmem_cache_free(vm_area_cachep, vma);
75561 unacct_error:
75562 if (charged)
75563@@ -1400,6 +1550,62 @@ unacct_error:
75564 return error;
75565 }
75566
75567+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
75568+unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
75569+{
75570+ if ((mm->pax_flags & MF_PAX_RANDMMAP) && !filp && (flags & MAP_STACK))
75571+ return (random32() & 0xFF) << PAGE_SHIFT;
75572+
75573+ return 0;
75574+}
75575+#endif
75576+
75577+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset)
75578+{
75579+ if (!vma) {
75580+#ifdef CONFIG_STACK_GROWSUP
75581+ if (addr > sysctl_heap_stack_gap)
75582+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
75583+ else
75584+ vma = find_vma(current->mm, 0);
75585+ if (vma && (vma->vm_flags & VM_GROWSUP))
75586+ return false;
75587+#endif
75588+ return true;
75589+ }
75590+
75591+ if (addr + len > vma->vm_start)
75592+ return false;
75593+
75594+ if (vma->vm_flags & VM_GROWSDOWN)
75595+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
75596+#ifdef CONFIG_STACK_GROWSUP
75597+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
75598+ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
75599+#endif
75600+ else if (offset)
75601+ return offset <= vma->vm_start - addr - len;
75602+
75603+ return true;
75604+}
75605+
75606+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset)
75607+{
75608+ if (vma->vm_start < len)
75609+ return -ENOMEM;
75610+
75611+ if (!(vma->vm_flags & VM_GROWSDOWN)) {
75612+ if (offset <= vma->vm_start - len)
75613+ return vma->vm_start - len - offset;
75614+ else
75615+ return -ENOMEM;
75616+ }
75617+
75618+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
75619+ return vma->vm_start - len - sysctl_heap_stack_gap;
75620+ return -ENOMEM;
75621+}
75622+
75623 /* Get an address range which is currently unmapped.
75624 * For shmat() with addr=0.
75625 *
75626@@ -1419,6 +1625,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
75627 struct mm_struct *mm = current->mm;
75628 struct vm_area_struct *vma;
75629 unsigned long start_addr;
75630+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
75631
75632 if (len > TASK_SIZE)
75633 return -ENOMEM;
75634@@ -1426,18 +1633,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
75635 if (flags & MAP_FIXED)
75636 return addr;
75637
75638+#ifdef CONFIG_PAX_RANDMMAP
75639+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
75640+#endif
75641+
75642 if (addr) {
75643 addr = PAGE_ALIGN(addr);
75644- vma = find_vma(mm, addr);
75645- if (TASK_SIZE - len >= addr &&
75646- (!vma || addr + len <= vma->vm_start))
75647- return addr;
75648+ if (TASK_SIZE - len >= addr) {
75649+ vma = find_vma(mm, addr);
75650+ if (check_heap_stack_gap(vma, addr, len, offset))
75651+ return addr;
75652+ }
75653 }
75654 if (len > mm->cached_hole_size) {
75655- start_addr = addr = mm->free_area_cache;
75656+ start_addr = addr = mm->free_area_cache;
75657 } else {
75658- start_addr = addr = TASK_UNMAPPED_BASE;
75659- mm->cached_hole_size = 0;
75660+ start_addr = addr = mm->mmap_base;
75661+ mm->cached_hole_size = 0;
75662 }
75663
75664 full_search:
75665@@ -1448,34 +1660,40 @@ full_search:
75666 * Start a new search - just in case we missed
75667 * some holes.
75668 */
75669- if (start_addr != TASK_UNMAPPED_BASE) {
75670- addr = TASK_UNMAPPED_BASE;
75671- start_addr = addr;
75672+ if (start_addr != mm->mmap_base) {
75673+ start_addr = addr = mm->mmap_base;
75674 mm->cached_hole_size = 0;
75675 goto full_search;
75676 }
75677 return -ENOMEM;
75678 }
75679- if (!vma || addr + len <= vma->vm_start) {
75680- /*
75681- * Remember the place where we stopped the search:
75682- */
75683- mm->free_area_cache = addr + len;
75684- return addr;
75685- }
75686+ if (check_heap_stack_gap(vma, addr, len, offset))
75687+ break;
75688 if (addr + mm->cached_hole_size < vma->vm_start)
75689 mm->cached_hole_size = vma->vm_start - addr;
75690 addr = vma->vm_end;
75691 }
75692+
75693+ /*
75694+ * Remember the place where we stopped the search:
75695+ */
75696+ mm->free_area_cache = addr + len;
75697+ return addr;
75698 }
75699 #endif
75700
75701 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
75702 {
75703+
75704+#ifdef CONFIG_PAX_SEGMEXEC
75705+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
75706+ return;
75707+#endif
75708+
75709 /*
75710 * Is this a new hole at the lowest possible address?
75711 */
75712- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache)
75713+ if (addr >= mm->mmap_base && addr < mm->free_area_cache)
75714 mm->free_area_cache = addr;
75715 }
75716
75717@@ -1491,7 +1709,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
75718 {
75719 struct vm_area_struct *vma;
75720 struct mm_struct *mm = current->mm;
75721- unsigned long addr = addr0, start_addr;
75722+ unsigned long base = mm->mmap_base, addr = addr0, start_addr;
75723+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
75724
75725 /* requested length too big for entire address space */
75726 if (len > TASK_SIZE)
75727@@ -1500,13 +1719,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
75728 if (flags & MAP_FIXED)
75729 return addr;
75730
75731+#ifdef CONFIG_PAX_RANDMMAP
75732+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
75733+#endif
75734+
75735 /* requesting a specific address */
75736 if (addr) {
75737 addr = PAGE_ALIGN(addr);
75738- vma = find_vma(mm, addr);
75739- if (TASK_SIZE - len >= addr &&
75740- (!vma || addr + len <= vma->vm_start))
75741- return addr;
75742+ if (TASK_SIZE - len >= addr) {
75743+ vma = find_vma(mm, addr);
75744+ if (check_heap_stack_gap(vma, addr, len, offset))
75745+ return addr;
75746+ }
75747 }
75748
75749 /* check if free_area_cache is useful for us */
75750@@ -1530,7 +1754,7 @@ try_again:
75751 * return with success:
75752 */
75753 vma = find_vma(mm, addr);
75754- if (!vma || addr+len <= vma->vm_start)
75755+ if (check_heap_stack_gap(vma, addr, len, offset))
75756 /* remember the address as a hint for next time */
75757 return (mm->free_area_cache = addr);
75758
75759@@ -1539,8 +1763,8 @@ try_again:
75760 mm->cached_hole_size = vma->vm_start - addr;
75761
75762 /* try just below the current vma->vm_start */
75763- addr = vma->vm_start-len;
75764- } while (len < vma->vm_start);
75765+ addr = skip_heap_stack_gap(vma, len, offset);
75766+ } while (!IS_ERR_VALUE(addr));
75767
75768 fail:
75769 /*
75770@@ -1563,13 +1787,21 @@ fail:
75771 * can happen with large stack limits and large mmap()
75772 * allocations.
75773 */
75774+ mm->mmap_base = TASK_UNMAPPED_BASE;
75775+
75776+#ifdef CONFIG_PAX_RANDMMAP
75777+ if (mm->pax_flags & MF_PAX_RANDMMAP)
75778+ mm->mmap_base += mm->delta_mmap;
75779+#endif
75780+
75781+ mm->free_area_cache = mm->mmap_base;
75782 mm->cached_hole_size = ~0UL;
75783- mm->free_area_cache = TASK_UNMAPPED_BASE;
75784 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
75785 /*
75786 * Restore the topdown base:
75787 */
75788- mm->free_area_cache = mm->mmap_base;
75789+ mm->mmap_base = base;
75790+ mm->free_area_cache = base;
75791 mm->cached_hole_size = ~0UL;
75792
75793 return addr;
75794@@ -1578,6 +1810,12 @@ fail:
75795
75796 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
75797 {
75798+
75799+#ifdef CONFIG_PAX_SEGMEXEC
75800+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
75801+ return;
75802+#endif
75803+
75804 /*
75805 * Is this a new hole at the highest possible address?
75806 */
75807@@ -1585,8 +1823,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
75808 mm->free_area_cache = addr;
75809
75810 /* dont allow allocations above current base */
75811- if (mm->free_area_cache > mm->mmap_base)
75812+ if (mm->free_area_cache > mm->mmap_base) {
75813 mm->free_area_cache = mm->mmap_base;
75814+ mm->cached_hole_size = ~0UL;
75815+ }
75816 }
75817
75818 unsigned long
75819@@ -1685,6 +1925,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
75820 return vma;
75821 }
75822
75823+#ifdef CONFIG_PAX_SEGMEXEC
75824+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
75825+{
75826+ struct vm_area_struct *vma_m;
75827+
75828+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
75829+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
75830+ BUG_ON(vma->vm_mirror);
75831+ return NULL;
75832+ }
75833+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
75834+ vma_m = vma->vm_mirror;
75835+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
75836+ BUG_ON(vma->vm_file != vma_m->vm_file);
75837+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
75838+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
75839+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
75840+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED));
75841+ return vma_m;
75842+}
75843+#endif
75844+
75845 /*
75846 * Verify that the stack growth is acceptable and
75847 * update accounting. This is shared with both the
75848@@ -1701,6 +1963,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
75849 return -ENOMEM;
75850
75851 /* Stack limit test */
75852+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
75853 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
75854 return -ENOMEM;
75855
75856@@ -1711,6 +1974,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
75857 locked = mm->locked_vm + grow;
75858 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
75859 limit >>= PAGE_SHIFT;
75860+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
75861 if (locked > limit && !capable(CAP_IPC_LOCK))
75862 return -ENOMEM;
75863 }
75864@@ -1740,37 +2004,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
75865 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
75866 * vma is the last one with address > vma->vm_end. Have to extend vma.
75867 */
75868+#ifndef CONFIG_IA64
75869+static
75870+#endif
75871 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
75872 {
75873 int error;
75874+ bool locknext;
75875
75876 if (!(vma->vm_flags & VM_GROWSUP))
75877 return -EFAULT;
75878
75879+ /* Also guard against wrapping around to address 0. */
75880+ if (address < PAGE_ALIGN(address+1))
75881+ address = PAGE_ALIGN(address+1);
75882+ else
75883+ return -ENOMEM;
75884+
75885 /*
75886 * We must make sure the anon_vma is allocated
75887 * so that the anon_vma locking is not a noop.
75888 */
75889 if (unlikely(anon_vma_prepare(vma)))
75890 return -ENOMEM;
75891+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
75892+ if (locknext && anon_vma_prepare(vma->vm_next))
75893+ return -ENOMEM;
75894 vma_lock_anon_vma(vma);
75895+ if (locknext)
75896+ vma_lock_anon_vma(vma->vm_next);
75897
75898 /*
75899 * vma->vm_start/vm_end cannot change under us because the caller
75900 * is required to hold the mmap_sem in read mode. We need the
75901- * anon_vma lock to serialize against concurrent expand_stacks.
75902- * Also guard against wrapping around to address 0.
75903+ * anon_vma locks to serialize against concurrent expand_stacks
75904+ * and expand_upwards.
75905 */
75906- if (address < PAGE_ALIGN(address+4))
75907- address = PAGE_ALIGN(address+4);
75908- else {
75909- vma_unlock_anon_vma(vma);
75910- return -ENOMEM;
75911- }
75912 error = 0;
75913
75914 /* Somebody else might have raced and expanded it already */
75915- if (address > vma->vm_end) {
75916+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
75917+ error = -ENOMEM;
75918+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
75919 unsigned long size, grow;
75920
75921 size = address - vma->vm_start;
75922@@ -1787,6 +2062,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
75923 }
75924 }
75925 }
75926+ if (locknext)
75927+ vma_unlock_anon_vma(vma->vm_next);
75928 vma_unlock_anon_vma(vma);
75929 khugepaged_enter_vma_merge(vma);
75930 validate_mm(vma->vm_mm);
75931@@ -1801,6 +2078,8 @@ int expand_downwards(struct vm_area_struct *vma,
75932 unsigned long address)
75933 {
75934 int error;
75935+ bool lockprev = false;
75936+ struct vm_area_struct *prev;
75937
75938 /*
75939 * We must make sure the anon_vma is allocated
75940@@ -1814,6 +2093,15 @@ int expand_downwards(struct vm_area_struct *vma,
75941 if (error)
75942 return error;
75943
75944+ prev = vma->vm_prev;
75945+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
75946+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
75947+#endif
75948+ if (lockprev && anon_vma_prepare(prev))
75949+ return -ENOMEM;
75950+ if (lockprev)
75951+ vma_lock_anon_vma(prev);
75952+
75953 vma_lock_anon_vma(vma);
75954
75955 /*
75956@@ -1823,9 +2111,17 @@ int expand_downwards(struct vm_area_struct *vma,
75957 */
75958
75959 /* Somebody else might have raced and expanded it already */
75960- if (address < vma->vm_start) {
75961+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
75962+ error = -ENOMEM;
75963+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
75964 unsigned long size, grow;
75965
75966+#ifdef CONFIG_PAX_SEGMEXEC
75967+ struct vm_area_struct *vma_m;
75968+
75969+ vma_m = pax_find_mirror_vma(vma);
75970+#endif
75971+
75972 size = vma->vm_end - address;
75973 grow = (vma->vm_start - address) >> PAGE_SHIFT;
75974
75975@@ -1837,6 +2133,17 @@ int expand_downwards(struct vm_area_struct *vma,
75976 vma->vm_start = address;
75977 vma->vm_pgoff -= grow;
75978 anon_vma_interval_tree_post_update_vma(vma);
75979+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
75980+
75981+#ifdef CONFIG_PAX_SEGMEXEC
75982+ if (vma_m) {
75983+ anon_vma_interval_tree_pre_update_vma(vma_m);
75984+ vma_m->vm_start -= grow << PAGE_SHIFT;
75985+ vma_m->vm_pgoff -= grow;
75986+ anon_vma_interval_tree_post_update_vma(vma_m);
75987+ }
75988+#endif
75989+
75990 perf_event_mmap(vma);
75991 }
75992 }
75993@@ -1914,6 +2221,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
75994 do {
75995 long nrpages = vma_pages(vma);
75996
75997+#ifdef CONFIG_PAX_SEGMEXEC
75998+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
75999+ vma = remove_vma(vma);
76000+ continue;
76001+ }
76002+#endif
76003+
76004 if (vma->vm_flags & VM_ACCOUNT)
76005 nr_accounted += nrpages;
76006 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
76007@@ -1959,6 +2273,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
76008 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
76009 vma->vm_prev = NULL;
76010 do {
76011+
76012+#ifdef CONFIG_PAX_SEGMEXEC
76013+ if (vma->vm_mirror) {
76014+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
76015+ vma->vm_mirror->vm_mirror = NULL;
76016+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
76017+ vma->vm_mirror = NULL;
76018+ }
76019+#endif
76020+
76021 rb_erase(&vma->vm_rb, &mm->mm_rb);
76022 mm->map_count--;
76023 tail_vma = vma;
76024@@ -1987,14 +2311,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
76025 struct vm_area_struct *new;
76026 int err = -ENOMEM;
76027
76028+#ifdef CONFIG_PAX_SEGMEXEC
76029+ struct vm_area_struct *vma_m, *new_m = NULL;
76030+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
76031+#endif
76032+
76033 if (is_vm_hugetlb_page(vma) && (addr &
76034 ~(huge_page_mask(hstate_vma(vma)))))
76035 return -EINVAL;
76036
76037+#ifdef CONFIG_PAX_SEGMEXEC
76038+ vma_m = pax_find_mirror_vma(vma);
76039+#endif
76040+
76041 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
76042 if (!new)
76043 goto out_err;
76044
76045+#ifdef CONFIG_PAX_SEGMEXEC
76046+ if (vma_m) {
76047+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
76048+ if (!new_m) {
76049+ kmem_cache_free(vm_area_cachep, new);
76050+ goto out_err;
76051+ }
76052+ }
76053+#endif
76054+
76055 /* most fields are the same, copy all, and then fixup */
76056 *new = *vma;
76057
76058@@ -2007,6 +2350,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
76059 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
76060 }
76061
76062+#ifdef CONFIG_PAX_SEGMEXEC
76063+ if (vma_m) {
76064+ *new_m = *vma_m;
76065+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
76066+ new_m->vm_mirror = new;
76067+ new->vm_mirror = new_m;
76068+
76069+ if (new_below)
76070+ new_m->vm_end = addr_m;
76071+ else {
76072+ new_m->vm_start = addr_m;
76073+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
76074+ }
76075+ }
76076+#endif
76077+
76078 pol = mpol_dup(vma_policy(vma));
76079 if (IS_ERR(pol)) {
76080 err = PTR_ERR(pol);
76081@@ -2029,6 +2388,36 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
76082 else
76083 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
76084
76085+#ifdef CONFIG_PAX_SEGMEXEC
76086+ if (!err && vma_m) {
76087+ if (anon_vma_clone(new_m, vma_m))
76088+ goto out_free_mpol;
76089+
76090+ mpol_get(pol);
76091+ vma_set_policy(new_m, pol);
76092+
76093+ if (new_m->vm_file)
76094+ get_file(new_m->vm_file);
76095+
76096+ if (new_m->vm_ops && new_m->vm_ops->open)
76097+ new_m->vm_ops->open(new_m);
76098+
76099+ if (new_below)
76100+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
76101+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
76102+ else
76103+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
76104+
76105+ if (err) {
76106+ if (new_m->vm_ops && new_m->vm_ops->close)
76107+ new_m->vm_ops->close(new_m);
76108+ if (new_m->vm_file)
76109+ fput(new_m->vm_file);
76110+ mpol_put(pol);
76111+ }
76112+ }
76113+#endif
76114+
76115 /* Success. */
76116 if (!err)
76117 return 0;
76118@@ -2038,10 +2427,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
76119 new->vm_ops->close(new);
76120 if (new->vm_file)
76121 fput(new->vm_file);
76122- unlink_anon_vmas(new);
76123 out_free_mpol:
76124 mpol_put(pol);
76125 out_free_vma:
76126+
76127+#ifdef CONFIG_PAX_SEGMEXEC
76128+ if (new_m) {
76129+ unlink_anon_vmas(new_m);
76130+ kmem_cache_free(vm_area_cachep, new_m);
76131+ }
76132+#endif
76133+
76134+ unlink_anon_vmas(new);
76135 kmem_cache_free(vm_area_cachep, new);
76136 out_err:
76137 return err;
76138@@ -2054,6 +2451,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
76139 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
76140 unsigned long addr, int new_below)
76141 {
76142+
76143+#ifdef CONFIG_PAX_SEGMEXEC
76144+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
76145+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
76146+ if (mm->map_count >= sysctl_max_map_count-1)
76147+ return -ENOMEM;
76148+ } else
76149+#endif
76150+
76151 if (mm->map_count >= sysctl_max_map_count)
76152 return -ENOMEM;
76153
76154@@ -2065,11 +2471,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
76155 * work. This now handles partial unmappings.
76156 * Jeremy Fitzhardinge <jeremy@goop.org>
76157 */
76158+#ifdef CONFIG_PAX_SEGMEXEC
76159 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
76160 {
76161+ int ret = __do_munmap(mm, start, len);
76162+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
76163+ return ret;
76164+
76165+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
76166+}
76167+
76168+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
76169+#else
76170+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
76171+#endif
76172+{
76173 unsigned long end;
76174 struct vm_area_struct *vma, *prev, *last;
76175
76176+ /*
76177+ * mm->mmap_sem is required to protect against another thread
76178+ * changing the mappings in case we sleep.
76179+ */
76180+ verify_mm_writelocked(mm);
76181+
76182 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
76183 return -EINVAL;
76184
76185@@ -2144,6 +2569,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
76186 /* Fix up all other VM information */
76187 remove_vma_list(mm, vma);
76188
76189+ track_exec_limit(mm, start, end, 0UL);
76190+
76191 return 0;
76192 }
76193
76194@@ -2152,6 +2579,13 @@ int vm_munmap(unsigned long start, size_t len)
76195 int ret;
76196 struct mm_struct *mm = current->mm;
76197
76198+
76199+#ifdef CONFIG_PAX_SEGMEXEC
76200+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
76201+ (len > SEGMEXEC_TASK_SIZE || start > SEGMEXEC_TASK_SIZE-len))
76202+ return -EINVAL;
76203+#endif
76204+
76205 down_write(&mm->mmap_sem);
76206 ret = do_munmap(mm, start, len);
76207 up_write(&mm->mmap_sem);
76208@@ -2165,16 +2599,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
76209 return vm_munmap(addr, len);
76210 }
76211
76212-static inline void verify_mm_writelocked(struct mm_struct *mm)
76213-{
76214-#ifdef CONFIG_DEBUG_VM
76215- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
76216- WARN_ON(1);
76217- up_read(&mm->mmap_sem);
76218- }
76219-#endif
76220-}
76221-
76222 /*
76223 * this is really a simplified "do_mmap". it only handles
76224 * anonymous maps. eventually we may be able to do some
76225@@ -2188,6 +2612,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
76226 struct rb_node ** rb_link, * rb_parent;
76227 pgoff_t pgoff = addr >> PAGE_SHIFT;
76228 int error;
76229+ unsigned long charged;
76230
76231 len = PAGE_ALIGN(len);
76232 if (!len)
76233@@ -2195,16 +2620,30 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
76234
76235 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
76236
76237+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
76238+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
76239+ flags &= ~VM_EXEC;
76240+
76241+#ifdef CONFIG_PAX_MPROTECT
76242+ if (mm->pax_flags & MF_PAX_MPROTECT)
76243+ flags &= ~VM_MAYEXEC;
76244+#endif
76245+
76246+ }
76247+#endif
76248+
76249 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
76250 if (error & ~PAGE_MASK)
76251 return error;
76252
76253+ charged = len >> PAGE_SHIFT;
76254+
76255 /*
76256 * mlock MCL_FUTURE?
76257 */
76258 if (mm->def_flags & VM_LOCKED) {
76259 unsigned long locked, lock_limit;
76260- locked = len >> PAGE_SHIFT;
76261+ locked = charged;
76262 locked += mm->locked_vm;
76263 lock_limit = rlimit(RLIMIT_MEMLOCK);
76264 lock_limit >>= PAGE_SHIFT;
76265@@ -2221,21 +2660,20 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
76266 /*
76267 * Clear old maps. this also does some error checking for us
76268 */
76269- munmap_back:
76270 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
76271 if (do_munmap(mm, addr, len))
76272 return -ENOMEM;
76273- goto munmap_back;
76274+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
76275 }
76276
76277 /* Check against address space limits *after* clearing old maps... */
76278- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
76279+ if (!may_expand_vm(mm, charged))
76280 return -ENOMEM;
76281
76282 if (mm->map_count > sysctl_max_map_count)
76283 return -ENOMEM;
76284
76285- if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
76286+ if (security_vm_enough_memory_mm(mm, charged))
76287 return -ENOMEM;
76288
76289 /* Can we just expand an old private anonymous mapping? */
76290@@ -2249,7 +2687,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
76291 */
76292 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
76293 if (!vma) {
76294- vm_unacct_memory(len >> PAGE_SHIFT);
76295+ vm_unacct_memory(charged);
76296 return -ENOMEM;
76297 }
76298
76299@@ -2263,11 +2701,12 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
76300 vma_link(mm, vma, prev, rb_link, rb_parent);
76301 out:
76302 perf_event_mmap(vma);
76303- mm->total_vm += len >> PAGE_SHIFT;
76304+ mm->total_vm += charged;
76305 if (flags & VM_LOCKED) {
76306 if (!mlock_vma_pages_range(vma, addr, addr + len))
76307- mm->locked_vm += (len >> PAGE_SHIFT);
76308+ mm->locked_vm += charged;
76309 }
76310+ track_exec_limit(mm, addr, addr + len, flags);
76311 return addr;
76312 }
76313
76314@@ -2325,6 +2764,7 @@ void exit_mmap(struct mm_struct *mm)
76315 while (vma) {
76316 if (vma->vm_flags & VM_ACCOUNT)
76317 nr_accounted += vma_pages(vma);
76318+ vma->vm_mirror = NULL;
76319 vma = remove_vma(vma);
76320 }
76321 vm_unacct_memory(nr_accounted);
76322@@ -2341,6 +2781,13 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
76323 struct vm_area_struct *prev;
76324 struct rb_node **rb_link, *rb_parent;
76325
76326+#ifdef CONFIG_PAX_SEGMEXEC
76327+ struct vm_area_struct *vma_m = NULL;
76328+#endif
76329+
76330+ if (security_mmap_addr(vma->vm_start))
76331+ return -EPERM;
76332+
76333 /*
76334 * The vm_pgoff of a purely anonymous vma should be irrelevant
76335 * until its first write fault, when page's anon_vma and index
76336@@ -2364,7 +2811,21 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
76337 security_vm_enough_memory_mm(mm, vma_pages(vma)))
76338 return -ENOMEM;
76339
76340+#ifdef CONFIG_PAX_SEGMEXEC
76341+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
76342+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
76343+ if (!vma_m)
76344+ return -ENOMEM;
76345+ }
76346+#endif
76347+
76348 vma_link(mm, vma, prev, rb_link, rb_parent);
76349+
76350+#ifdef CONFIG_PAX_SEGMEXEC
76351+ if (vma_m)
76352+ BUG_ON(pax_mirror_vma(vma_m, vma));
76353+#endif
76354+
76355 return 0;
76356 }
76357
76358@@ -2384,6 +2845,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
76359 struct mempolicy *pol;
76360 bool faulted_in_anon_vma = true;
76361
76362+ BUG_ON(vma->vm_mirror);
76363+
76364 /*
76365 * If anonymous vma has not yet been faulted, update new pgoff
76366 * to match new location, to increase its chance of merging.
76367@@ -2450,6 +2913,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
76368 return NULL;
76369 }
76370
76371+#ifdef CONFIG_PAX_SEGMEXEC
76372+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
76373+{
76374+ struct vm_area_struct *prev_m;
76375+ struct rb_node **rb_link_m, *rb_parent_m;
76376+ struct mempolicy *pol_m;
76377+
76378+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
76379+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
76380+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
76381+ *vma_m = *vma;
76382+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
76383+ if (anon_vma_clone(vma_m, vma))
76384+ return -ENOMEM;
76385+ pol_m = vma_policy(vma_m);
76386+ mpol_get(pol_m);
76387+ vma_set_policy(vma_m, pol_m);
76388+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
76389+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
76390+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
76391+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
76392+ if (vma_m->vm_file)
76393+ get_file(vma_m->vm_file);
76394+ if (vma_m->vm_ops && vma_m->vm_ops->open)
76395+ vma_m->vm_ops->open(vma_m);
76396+ BUG_ON(find_vma_links(vma->vm_mm, vma_m->vm_start, vma_m->vm_end, &prev_m, &rb_link_m, &rb_parent_m));
76397+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
76398+ vma_m->vm_mirror = vma;
76399+ vma->vm_mirror = vma_m;
76400+ return 0;
76401+}
76402+#endif
76403+
76404 /*
76405 * Return true if the calling process may expand its vm space by the passed
76406 * number of pages
76407@@ -2461,6 +2957,12 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
76408
76409 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
76410
76411+#ifdef CONFIG_PAX_RANDMMAP
76412+ if (mm->pax_flags & MF_PAX_RANDMMAP)
76413+ cur -= mm->brk_gap;
76414+#endif
76415+
76416+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
76417 if (cur + npages > lim)
76418 return 0;
76419 return 1;
76420@@ -2531,6 +3033,22 @@ int install_special_mapping(struct mm_struct *mm,
76421 vma->vm_start = addr;
76422 vma->vm_end = addr + len;
76423
76424+#ifdef CONFIG_PAX_MPROTECT
76425+ if (mm->pax_flags & MF_PAX_MPROTECT) {
76426+#ifndef CONFIG_PAX_MPROTECT_COMPAT
76427+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
76428+ return -EPERM;
76429+ if (!(vm_flags & VM_EXEC))
76430+ vm_flags &= ~VM_MAYEXEC;
76431+#else
76432+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
76433+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
76434+#endif
76435+ else
76436+ vm_flags &= ~VM_MAYWRITE;
76437+ }
76438+#endif
76439+
76440 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
76441 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
76442
76443diff --git a/mm/mprotect.c b/mm/mprotect.c
76444index a409926..8b32e6d 100644
76445--- a/mm/mprotect.c
76446+++ b/mm/mprotect.c
76447@@ -23,10 +23,17 @@
76448 #include <linux/mmu_notifier.h>
76449 #include <linux/migrate.h>
76450 #include <linux/perf_event.h>
76451+
76452+#ifdef CONFIG_PAX_MPROTECT
76453+#include <linux/elf.h>
76454+#include <linux/binfmts.h>
76455+#endif
76456+
76457 #include <asm/uaccess.h>
76458 #include <asm/pgtable.h>
76459 #include <asm/cacheflush.h>
76460 #include <asm/tlbflush.h>
76461+#include <asm/mmu_context.h>
76462
76463 #ifndef pgprot_modify
76464 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
76465@@ -141,6 +148,48 @@ static void change_protection(struct vm_area_struct *vma,
76466 flush_tlb_range(vma, start, end);
76467 }
76468
76469+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
76470+/* called while holding the mmap semaphor for writing except stack expansion */
76471+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
76472+{
76473+ unsigned long oldlimit, newlimit = 0UL;
76474+
76475+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
76476+ return;
76477+
76478+ spin_lock(&mm->page_table_lock);
76479+ oldlimit = mm->context.user_cs_limit;
76480+ if ((prot & VM_EXEC) && oldlimit < end)
76481+ /* USER_CS limit moved up */
76482+ newlimit = end;
76483+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
76484+ /* USER_CS limit moved down */
76485+ newlimit = start;
76486+
76487+ if (newlimit) {
76488+ mm->context.user_cs_limit = newlimit;
76489+
76490+#ifdef CONFIG_SMP
76491+ wmb();
76492+ cpus_clear(mm->context.cpu_user_cs_mask);
76493+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
76494+#endif
76495+
76496+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
76497+ }
76498+ spin_unlock(&mm->page_table_lock);
76499+ if (newlimit == end) {
76500+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
76501+
76502+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
76503+ if (is_vm_hugetlb_page(vma))
76504+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
76505+ else
76506+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
76507+ }
76508+}
76509+#endif
76510+
76511 int
76512 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
76513 unsigned long start, unsigned long end, unsigned long newflags)
76514@@ -153,11 +202,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
76515 int error;
76516 int dirty_accountable = 0;
76517
76518+#ifdef CONFIG_PAX_SEGMEXEC
76519+ struct vm_area_struct *vma_m = NULL;
76520+ unsigned long start_m, end_m;
76521+
76522+ start_m = start + SEGMEXEC_TASK_SIZE;
76523+ end_m = end + SEGMEXEC_TASK_SIZE;
76524+#endif
76525+
76526 if (newflags == oldflags) {
76527 *pprev = vma;
76528 return 0;
76529 }
76530
76531+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
76532+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
76533+
76534+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
76535+ return -ENOMEM;
76536+
76537+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
76538+ return -ENOMEM;
76539+ }
76540+
76541 /*
76542 * If we make a private mapping writable we increase our commit;
76543 * but (without finer accounting) cannot reduce our commit if we
76544@@ -174,6 +241,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
76545 }
76546 }
76547
76548+#ifdef CONFIG_PAX_SEGMEXEC
76549+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
76550+ if (start != vma->vm_start) {
76551+ error = split_vma(mm, vma, start, 1);
76552+ if (error)
76553+ goto fail;
76554+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
76555+ *pprev = (*pprev)->vm_next;
76556+ }
76557+
76558+ if (end != vma->vm_end) {
76559+ error = split_vma(mm, vma, end, 0);
76560+ if (error)
76561+ goto fail;
76562+ }
76563+
76564+ if (pax_find_mirror_vma(vma)) {
76565+ error = __do_munmap(mm, start_m, end_m - start_m);
76566+ if (error)
76567+ goto fail;
76568+ } else {
76569+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
76570+ if (!vma_m) {
76571+ error = -ENOMEM;
76572+ goto fail;
76573+ }
76574+ vma->vm_flags = newflags;
76575+ error = pax_mirror_vma(vma_m, vma);
76576+ if (error) {
76577+ vma->vm_flags = oldflags;
76578+ goto fail;
76579+ }
76580+ }
76581+ }
76582+#endif
76583+
76584 /*
76585 * First try to merge with previous and/or next vma.
76586 */
76587@@ -204,9 +307,21 @@ success:
76588 * vm_flags and vm_page_prot are protected by the mmap_sem
76589 * held in write mode.
76590 */
76591+
76592+#ifdef CONFIG_PAX_SEGMEXEC
76593+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
76594+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
76595+#endif
76596+
76597 vma->vm_flags = newflags;
76598+
76599+#ifdef CONFIG_PAX_MPROTECT
76600+ if (mm->binfmt && mm->binfmt->handle_mprotect)
76601+ mm->binfmt->handle_mprotect(vma, newflags);
76602+#endif
76603+
76604 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
76605- vm_get_page_prot(newflags));
76606+ vm_get_page_prot(vma->vm_flags));
76607
76608 if (vma_wants_writenotify(vma)) {
76609 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
76610@@ -248,6 +363,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
76611 end = start + len;
76612 if (end <= start)
76613 return -ENOMEM;
76614+
76615+#ifdef CONFIG_PAX_SEGMEXEC
76616+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
76617+ if (end > SEGMEXEC_TASK_SIZE)
76618+ return -EINVAL;
76619+ } else
76620+#endif
76621+
76622+ if (end > TASK_SIZE)
76623+ return -EINVAL;
76624+
76625 if (!arch_validate_prot(prot))
76626 return -EINVAL;
76627
76628@@ -255,7 +381,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
76629 /*
76630 * Does the application expect PROT_READ to imply PROT_EXEC:
76631 */
76632- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
76633+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
76634 prot |= PROT_EXEC;
76635
76636 vm_flags = calc_vm_prot_bits(prot);
76637@@ -288,6 +414,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
76638 if (start > vma->vm_start)
76639 prev = vma;
76640
76641+#ifdef CONFIG_PAX_MPROTECT
76642+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
76643+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
76644+#endif
76645+
76646 for (nstart = start ; ; ) {
76647 unsigned long newflags;
76648
76649@@ -297,6 +428,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
76650
76651 /* newflags >> 4 shift VM_MAY% in place of VM_% */
76652 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
76653+ if (prot & (PROT_WRITE | PROT_EXEC))
76654+ gr_log_rwxmprotect(vma->vm_file);
76655+
76656+ error = -EACCES;
76657+ goto out;
76658+ }
76659+
76660+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
76661 error = -EACCES;
76662 goto out;
76663 }
76664@@ -311,6 +450,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
76665 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
76666 if (error)
76667 goto out;
76668+
76669+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
76670+
76671 nstart = tmp;
76672
76673 if (nstart < prev->vm_end)
76674diff --git a/mm/mremap.c b/mm/mremap.c
76675index 1b61c2d..1cc0e3c 100644
76676--- a/mm/mremap.c
76677+++ b/mm/mremap.c
76678@@ -125,6 +125,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
76679 continue;
76680 pte = ptep_get_and_clear(mm, old_addr, old_pte);
76681 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
76682+
76683+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
76684+ if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
76685+ pte = pte_exprotect(pte);
76686+#endif
76687+
76688 set_pte_at(mm, new_addr, new_pte, pte);
76689 }
76690
76691@@ -319,6 +325,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
76692 if (is_vm_hugetlb_page(vma))
76693 goto Einval;
76694
76695+#ifdef CONFIG_PAX_SEGMEXEC
76696+ if (pax_find_mirror_vma(vma))
76697+ goto Einval;
76698+#endif
76699+
76700 /* We can't remap across vm area boundaries */
76701 if (old_len > vma->vm_end - addr)
76702 goto Efault;
76703@@ -375,20 +386,25 @@ static unsigned long mremap_to(unsigned long addr,
76704 unsigned long ret = -EINVAL;
76705 unsigned long charged = 0;
76706 unsigned long map_flags;
76707+ unsigned long pax_task_size = TASK_SIZE;
76708
76709 if (new_addr & ~PAGE_MASK)
76710 goto out;
76711
76712- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
76713+#ifdef CONFIG_PAX_SEGMEXEC
76714+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
76715+ pax_task_size = SEGMEXEC_TASK_SIZE;
76716+#endif
76717+
76718+ pax_task_size -= PAGE_SIZE;
76719+
76720+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
76721 goto out;
76722
76723 /* Check if the location we're moving into overlaps the
76724 * old location at all, and fail if it does.
76725 */
76726- if ((new_addr <= addr) && (new_addr+new_len) > addr)
76727- goto out;
76728-
76729- if ((addr <= new_addr) && (addr+old_len) > new_addr)
76730+ if (addr + old_len > new_addr && new_addr + new_len > addr)
76731 goto out;
76732
76733 ret = do_munmap(mm, new_addr, new_len);
76734@@ -456,6 +472,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
76735 struct vm_area_struct *vma;
76736 unsigned long ret = -EINVAL;
76737 unsigned long charged = 0;
76738+ unsigned long pax_task_size = TASK_SIZE;
76739
76740 down_write(&current->mm->mmap_sem);
76741
76742@@ -476,6 +493,17 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
76743 if (!new_len)
76744 goto out;
76745
76746+#ifdef CONFIG_PAX_SEGMEXEC
76747+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
76748+ pax_task_size = SEGMEXEC_TASK_SIZE;
76749+#endif
76750+
76751+ pax_task_size -= PAGE_SIZE;
76752+
76753+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
76754+ old_len > pax_task_size || addr > pax_task_size-old_len)
76755+ goto out;
76756+
76757 if (flags & MREMAP_FIXED) {
76758 if (flags & MREMAP_MAYMOVE)
76759 ret = mremap_to(addr, old_len, new_addr, new_len);
76760@@ -524,6 +552,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
76761 addr + new_len);
76762 }
76763 ret = addr;
76764+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
76765 goto out;
76766 }
76767 }
76768@@ -547,7 +576,12 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
76769 goto out;
76770 }
76771
76772+ map_flags = vma->vm_flags;
76773 ret = move_vma(vma, addr, old_len, new_len, new_addr);
76774+ if (!(ret & ~PAGE_MASK)) {
76775+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
76776+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
76777+ }
76778 }
76779 out:
76780 if (ret & ~PAGE_MASK)
76781diff --git a/mm/nommu.c b/mm/nommu.c
76782index 45131b4..c521665 100644
76783--- a/mm/nommu.c
76784+++ b/mm/nommu.c
76785@@ -62,7 +62,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
76786 int sysctl_overcommit_ratio = 50; /* default is 50% */
76787 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
76788 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
76789-int heap_stack_gap = 0;
76790
76791 atomic_long_t mmap_pages_allocated;
76792
76793@@ -824,15 +823,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
76794 EXPORT_SYMBOL(find_vma);
76795
76796 /*
76797- * find a VMA
76798- * - we don't extend stack VMAs under NOMMU conditions
76799- */
76800-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
76801-{
76802- return find_vma(mm, addr);
76803-}
76804-
76805-/*
76806 * expand a stack to a given address
76807 * - not supported under NOMMU conditions
76808 */
76809@@ -1540,6 +1530,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
76810
76811 /* most fields are the same, copy all, and then fixup */
76812 *new = *vma;
76813+ INIT_LIST_HEAD(&new->anon_vma_chain);
76814 *region = *vma->vm_region;
76815 new->vm_region = region;
76816
76817diff --git a/mm/page_alloc.c b/mm/page_alloc.c
76818index ceb4168..d7774f2 100644
76819--- a/mm/page_alloc.c
76820+++ b/mm/page_alloc.c
76821@@ -340,7 +340,7 @@ out:
76822 * This usage means that zero-order pages may not be compound.
76823 */
76824
76825-static void free_compound_page(struct page *page)
76826+void free_compound_page(struct page *page)
76827 {
76828 __free_pages_ok(page, compound_order(page));
76829 }
76830@@ -693,6 +693,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
76831 int i;
76832 int bad = 0;
76833
76834+#ifdef CONFIG_PAX_MEMORY_SANITIZE
76835+ unsigned long index = 1UL << order;
76836+#endif
76837+
76838 trace_mm_page_free(page, order);
76839 kmemcheck_free_shadow(page, order);
76840
76841@@ -708,6 +712,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
76842 debug_check_no_obj_freed(page_address(page),
76843 PAGE_SIZE << order);
76844 }
76845+
76846+#ifdef CONFIG_PAX_MEMORY_SANITIZE
76847+ for (; index; --index)
76848+ sanitize_highpage(page + index - 1);
76849+#endif
76850+
76851 arch_free_page(page, order);
76852 kernel_map_pages(page, 1 << order, 0);
76853
76854@@ -849,8 +859,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
76855 arch_alloc_page(page, order);
76856 kernel_map_pages(page, 1 << order, 1);
76857
76858+#ifndef CONFIG_PAX_MEMORY_SANITIZE
76859 if (gfp_flags & __GFP_ZERO)
76860 prep_zero_page(page, order, gfp_flags);
76861+#endif
76862
76863 if (order && (gfp_flags & __GFP_COMP))
76864 prep_compound_page(page, order);
76865@@ -3684,7 +3696,13 @@ static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
76866 unsigned long pfn;
76867
76868 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
76869+#ifdef CONFIG_X86_32
76870+ /* boot failures in VMware 8 on 32bit vanilla since
76871+ this change */
76872+ if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn)))
76873+#else
76874 if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
76875+#endif
76876 return 1;
76877 }
76878 return 0;
76879diff --git a/mm/percpu.c b/mm/percpu.c
76880index ddc5efb..f632d2c 100644
76881--- a/mm/percpu.c
76882+++ b/mm/percpu.c
76883@@ -122,7 +122,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
76884 static unsigned int pcpu_high_unit_cpu __read_mostly;
76885
76886 /* the address of the first chunk which starts with the kernel static area */
76887-void *pcpu_base_addr __read_mostly;
76888+void *pcpu_base_addr __read_only;
76889 EXPORT_SYMBOL_GPL(pcpu_base_addr);
76890
76891 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
76892diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
76893index 926b466..b23df53 100644
76894--- a/mm/process_vm_access.c
76895+++ b/mm/process_vm_access.c
76896@@ -13,6 +13,7 @@
76897 #include <linux/uio.h>
76898 #include <linux/sched.h>
76899 #include <linux/highmem.h>
76900+#include <linux/security.h>
76901 #include <linux/ptrace.h>
76902 #include <linux/slab.h>
76903 #include <linux/syscalls.h>
76904@@ -258,19 +259,19 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
76905 size_t iov_l_curr_offset = 0;
76906 ssize_t iov_len;
76907
76908+ return -ENOSYS; // PaX: until properly audited
76909+
76910 /*
76911 * Work out how many pages of struct pages we're going to need
76912 * when eventually calling get_user_pages
76913 */
76914 for (i = 0; i < riovcnt; i++) {
76915 iov_len = rvec[i].iov_len;
76916- if (iov_len > 0) {
76917- nr_pages_iov = ((unsigned long)rvec[i].iov_base
76918- + iov_len)
76919- / PAGE_SIZE - (unsigned long)rvec[i].iov_base
76920- / PAGE_SIZE + 1;
76921- nr_pages = max(nr_pages, nr_pages_iov);
76922- }
76923+ if (iov_len <= 0)
76924+ continue;
76925+ nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
76926+ (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
76927+ nr_pages = max(nr_pages, nr_pages_iov);
76928 }
76929
76930 if (nr_pages == 0)
76931@@ -298,6 +299,11 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
76932 goto free_proc_pages;
76933 }
76934
76935+ if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
76936+ rc = -EPERM;
76937+ goto put_task_struct;
76938+ }
76939+
76940 mm = mm_access(task, PTRACE_MODE_ATTACH);
76941 if (!mm || IS_ERR(mm)) {
76942 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
76943diff --git a/mm/rmap.c b/mm/rmap.c
76944index 2ee1ef0..2e175ba 100644
76945--- a/mm/rmap.c
76946+++ b/mm/rmap.c
76947@@ -163,6 +163,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
76948 struct anon_vma *anon_vma = vma->anon_vma;
76949 struct anon_vma_chain *avc;
76950
76951+#ifdef CONFIG_PAX_SEGMEXEC
76952+ struct anon_vma_chain *avc_m = NULL;
76953+#endif
76954+
76955 might_sleep();
76956 if (unlikely(!anon_vma)) {
76957 struct mm_struct *mm = vma->vm_mm;
76958@@ -172,6 +176,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
76959 if (!avc)
76960 goto out_enomem;
76961
76962+#ifdef CONFIG_PAX_SEGMEXEC
76963+ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
76964+ if (!avc_m)
76965+ goto out_enomem_free_avc;
76966+#endif
76967+
76968 anon_vma = find_mergeable_anon_vma(vma);
76969 allocated = NULL;
76970 if (!anon_vma) {
76971@@ -185,6 +195,18 @@ int anon_vma_prepare(struct vm_area_struct *vma)
76972 /* page_table_lock to protect against threads */
76973 spin_lock(&mm->page_table_lock);
76974 if (likely(!vma->anon_vma)) {
76975+
76976+#ifdef CONFIG_PAX_SEGMEXEC
76977+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
76978+
76979+ if (vma_m) {
76980+ BUG_ON(vma_m->anon_vma);
76981+ vma_m->anon_vma = anon_vma;
76982+ anon_vma_chain_link(vma_m, avc_m, anon_vma);
76983+ avc_m = NULL;
76984+ }
76985+#endif
76986+
76987 vma->anon_vma = anon_vma;
76988 anon_vma_chain_link(vma, avc, anon_vma);
76989 allocated = NULL;
76990@@ -195,12 +217,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
76991
76992 if (unlikely(allocated))
76993 put_anon_vma(allocated);
76994+
76995+#ifdef CONFIG_PAX_SEGMEXEC
76996+ if (unlikely(avc_m))
76997+ anon_vma_chain_free(avc_m);
76998+#endif
76999+
77000 if (unlikely(avc))
77001 anon_vma_chain_free(avc);
77002 }
77003 return 0;
77004
77005 out_enomem_free_avc:
77006+
77007+#ifdef CONFIG_PAX_SEGMEXEC
77008+ if (avc_m)
77009+ anon_vma_chain_free(avc_m);
77010+#endif
77011+
77012 anon_vma_chain_free(avc);
77013 out_enomem:
77014 return -ENOMEM;
77015@@ -236,7 +270,7 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
77016 * Attach the anon_vmas from src to dst.
77017 * Returns 0 on success, -ENOMEM on failure.
77018 */
77019-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
77020+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
77021 {
77022 struct anon_vma_chain *avc, *pavc;
77023 struct anon_vma *root = NULL;
77024@@ -269,7 +303,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
77025 * the corresponding VMA in the parent process is attached to.
77026 * Returns 0 on success, non-zero on failure.
77027 */
77028-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
77029+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
77030 {
77031 struct anon_vma_chain *avc;
77032 struct anon_vma *anon_vma;
77033diff --git a/mm/shmem.c b/mm/shmem.c
77034index 50c5b8f..0bc87f7 100644
77035--- a/mm/shmem.c
77036+++ b/mm/shmem.c
77037@@ -31,7 +31,7 @@
77038 #include <linux/export.h>
77039 #include <linux/swap.h>
77040
77041-static struct vfsmount *shm_mnt;
77042+struct vfsmount *shm_mnt;
77043
77044 #ifdef CONFIG_SHMEM
77045 /*
77046@@ -75,7 +75,7 @@ static struct vfsmount *shm_mnt;
77047 #define BOGO_DIRENT_SIZE 20
77048
77049 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
77050-#define SHORT_SYMLINK_LEN 128
77051+#define SHORT_SYMLINK_LEN 64
77052
77053 /*
77054 * shmem_fallocate and shmem_writepage communicate via inode->i_private
77055@@ -2112,6 +2112,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
77056 static int shmem_xattr_validate(const char *name)
77057 {
77058 struct { const char *prefix; size_t len; } arr[] = {
77059+
77060+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
77061+ { XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN},
77062+#endif
77063+
77064 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
77065 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
77066 };
77067@@ -2167,6 +2172,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
77068 if (err)
77069 return err;
77070
77071+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
77072+ if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) {
77073+ if (strcmp(name, XATTR_NAME_PAX_FLAGS))
77074+ return -EOPNOTSUPP;
77075+ if (size > 8)
77076+ return -EINVAL;
77077+ }
77078+#endif
77079+
77080 return simple_xattr_set(&info->xattrs, name, value, size, flags);
77081 }
77082
77083@@ -2466,8 +2480,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
77084 int err = -ENOMEM;
77085
77086 /* Round up to L1_CACHE_BYTES to resist false sharing */
77087- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
77088- L1_CACHE_BYTES), GFP_KERNEL);
77089+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
77090 if (!sbinfo)
77091 return -ENOMEM;
77092
77093diff --git a/mm/slab.c b/mm/slab.c
77094index 33d3363..3851c61 100644
77095--- a/mm/slab.c
77096+++ b/mm/slab.c
77097@@ -164,7 +164,7 @@ static bool pfmemalloc_active __read_mostly;
77098
77099 /* Legal flag mask for kmem_cache_create(). */
77100 #if DEBUG
77101-# define CREATE_MASK (SLAB_RED_ZONE | \
77102+# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
77103 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
77104 SLAB_CACHE_DMA | \
77105 SLAB_STORE_USER | \
77106@@ -172,7 +172,7 @@ static bool pfmemalloc_active __read_mostly;
77107 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
77108 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
77109 #else
77110-# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
77111+# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
77112 SLAB_CACHE_DMA | \
77113 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
77114 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
77115@@ -322,7 +322,7 @@ struct kmem_list3 {
77116 * Need this for bootstrapping a per node allocator.
77117 */
77118 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
77119-static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
77120+static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
77121 #define CACHE_CACHE 0
77122 #define SIZE_AC MAX_NUMNODES
77123 #define SIZE_L3 (2 * MAX_NUMNODES)
77124@@ -423,10 +423,10 @@ static void kmem_list3_init(struct kmem_list3 *parent)
77125 if ((x)->max_freeable < i) \
77126 (x)->max_freeable = i; \
77127 } while (0)
77128-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
77129-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
77130-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
77131-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
77132+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
77133+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
77134+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
77135+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
77136 #else
77137 #define STATS_INC_ACTIVE(x) do { } while (0)
77138 #define STATS_DEC_ACTIVE(x) do { } while (0)
77139@@ -534,7 +534,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
77140 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
77141 */
77142 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
77143- const struct slab *slab, void *obj)
77144+ const struct slab *slab, const void *obj)
77145 {
77146 u32 offset = (obj - slab->s_mem);
77147 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
77148@@ -555,12 +555,13 @@ EXPORT_SYMBOL(malloc_sizes);
77149 struct cache_names {
77150 char *name;
77151 char *name_dma;
77152+ char *name_usercopy;
77153 };
77154
77155 static struct cache_names __initdata cache_names[] = {
77156-#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
77157+#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)", .name_usercopy = "size-" #x "(USERCOPY)" },
77158 #include <linux/kmalloc_sizes.h>
77159- {NULL,}
77160+ {NULL}
77161 #undef CACHE
77162 };
77163
77164@@ -721,6 +722,12 @@ static inline struct kmem_cache *__find_general_cachep(size_t size,
77165 if (unlikely(gfpflags & GFP_DMA))
77166 return csizep->cs_dmacachep;
77167 #endif
77168+
77169+#ifdef CONFIG_PAX_USERCOPY_SLABS
77170+ if (unlikely(gfpflags & GFP_USERCOPY))
77171+ return csizep->cs_usercopycachep;
77172+#endif
77173+
77174 return csizep->cs_cachep;
77175 }
77176
77177@@ -1676,7 +1683,7 @@ void __init kmem_cache_init(void)
77178 sizes[INDEX_AC].cs_cachep->size = sizes[INDEX_AC].cs_size;
77179 sizes[INDEX_AC].cs_cachep->object_size = sizes[INDEX_AC].cs_size;
77180 sizes[INDEX_AC].cs_cachep->align = ARCH_KMALLOC_MINALIGN;
77181- __kmem_cache_create(sizes[INDEX_AC].cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC);
77182+ __kmem_cache_create(sizes[INDEX_AC].cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY);
77183 list_add(&sizes[INDEX_AC].cs_cachep->list, &slab_caches);
77184
77185 if (INDEX_AC != INDEX_L3) {
77186@@ -1685,7 +1692,7 @@ void __init kmem_cache_init(void)
77187 sizes[INDEX_L3].cs_cachep->size = sizes[INDEX_L3].cs_size;
77188 sizes[INDEX_L3].cs_cachep->object_size = sizes[INDEX_L3].cs_size;
77189 sizes[INDEX_L3].cs_cachep->align = ARCH_KMALLOC_MINALIGN;
77190- __kmem_cache_create(sizes[INDEX_L3].cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC);
77191+ __kmem_cache_create(sizes[INDEX_L3].cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY);
77192 list_add(&sizes[INDEX_L3].cs_cachep->list, &slab_caches);
77193 }
77194
77195@@ -1705,7 +1712,7 @@ void __init kmem_cache_init(void)
77196 sizes->cs_cachep->size = sizes->cs_size;
77197 sizes->cs_cachep->object_size = sizes->cs_size;
77198 sizes->cs_cachep->align = ARCH_KMALLOC_MINALIGN;
77199- __kmem_cache_create(sizes->cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC);
77200+ __kmem_cache_create(sizes->cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY);
77201 list_add(&sizes->cs_cachep->list, &slab_caches);
77202 }
77203 #ifdef CONFIG_ZONE_DMA
77204@@ -1718,6 +1725,17 @@ void __init kmem_cache_init(void)
77205 ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA| SLAB_PANIC);
77206 list_add(&sizes->cs_dmacachep->list, &slab_caches);
77207 #endif
77208+
77209+#ifdef CONFIG_PAX_USERCOPY_SLABS
77210+ sizes->cs_usercopycachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
77211+ sizes->cs_usercopycachep->name = names->name_usercopy;
77212+ sizes->cs_usercopycachep->size = sizes->cs_size;
77213+ sizes->cs_usercopycachep->object_size = sizes->cs_size;
77214+ sizes->cs_usercopycachep->align = ARCH_KMALLOC_MINALIGN;
77215+ __kmem_cache_create(sizes->cs_usercopycachep, ARCH_KMALLOC_FLAGS| SLAB_PANIC|SLAB_USERCOPY);
77216+ list_add(&sizes->cs_usercopycachep->list, &slab_caches);
77217+#endif
77218+
77219 sizes++;
77220 names++;
77221 }
77222@@ -4405,10 +4423,10 @@ static int s_show(struct seq_file *m, void *p)
77223 }
77224 /* cpu stats */
77225 {
77226- unsigned long allochit = atomic_read(&cachep->allochit);
77227- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
77228- unsigned long freehit = atomic_read(&cachep->freehit);
77229- unsigned long freemiss = atomic_read(&cachep->freemiss);
77230+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
77231+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
77232+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
77233+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
77234
77235 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
77236 allochit, allocmiss, freehit, freemiss);
77237@@ -4667,13 +4685,71 @@ static int __init slab_proc_init(void)
77238 {
77239 proc_create("slabinfo",S_IWUSR|S_IRUSR,NULL,&proc_slabinfo_operations);
77240 #ifdef CONFIG_DEBUG_SLAB_LEAK
77241- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
77242+ proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
77243 #endif
77244 return 0;
77245 }
77246 module_init(slab_proc_init);
77247 #endif
77248
77249+bool is_usercopy_object(const void *ptr)
77250+{
77251+ struct page *page;
77252+ struct kmem_cache *cachep;
77253+
77254+ if (ZERO_OR_NULL_PTR(ptr))
77255+ return false;
77256+
77257+ if (!slab_is_available())
77258+ return false;
77259+
77260+ if (!virt_addr_valid(ptr))
77261+ return false;
77262+
77263+ page = virt_to_head_page(ptr);
77264+
77265+ if (!PageSlab(page))
77266+ return false;
77267+
77268+ cachep = page->slab_cache;
77269+ return cachep->flags & SLAB_USERCOPY;
77270+}
77271+
77272+#ifdef CONFIG_PAX_USERCOPY
77273+const char *check_heap_object(const void *ptr, unsigned long n)
77274+{
77275+ struct page *page;
77276+ struct kmem_cache *cachep;
77277+ struct slab *slabp;
77278+ unsigned int objnr;
77279+ unsigned long offset;
77280+
77281+ if (ZERO_OR_NULL_PTR(ptr))
77282+ return "<null>";
77283+
77284+ if (!virt_addr_valid(ptr))
77285+ return NULL;
77286+
77287+ page = virt_to_head_page(ptr);
77288+
77289+ if (!PageSlab(page))
77290+ return NULL;
77291+
77292+ cachep = page->slab_cache;
77293+ if (!(cachep->flags & SLAB_USERCOPY))
77294+ return cachep->name;
77295+
77296+ slabp = page->slab_page;
77297+ objnr = obj_to_index(cachep, slabp, ptr);
77298+ BUG_ON(objnr >= cachep->num);
77299+ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
77300+ if (offset <= cachep->object_size && n <= cachep->object_size - offset)
77301+ return NULL;
77302+
77303+ return cachep->name;
77304+}
77305+#endif
77306+
77307 /**
77308 * ksize - get the actual amount of memory allocated for a given object
77309 * @objp: Pointer to the object
77310diff --git a/mm/slab_common.c b/mm/slab_common.c
77311index 069a24e6..226a310 100644
77312--- a/mm/slab_common.c
77313+++ b/mm/slab_common.c
77314@@ -127,7 +127,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align
77315 err = __kmem_cache_create(s, flags);
77316 if (!err) {
77317
77318- s->refcount = 1;
77319+ atomic_set(&s->refcount, 1);
77320 list_add(&s->list, &slab_caches);
77321
77322 } else {
77323@@ -163,8 +163,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
77324 {
77325 get_online_cpus();
77326 mutex_lock(&slab_mutex);
77327- s->refcount--;
77328- if (!s->refcount) {
77329+ if (atomic_dec_and_test(&s->refcount)) {
77330 list_del(&s->list);
77331
77332 if (!__kmem_cache_shutdown(s)) {
77333diff --git a/mm/slob.c b/mm/slob.c
77334index 1e921c5..1ce12c2 100644
77335--- a/mm/slob.c
77336+++ b/mm/slob.c
77337@@ -159,7 +159,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
77338 /*
77339 * Return the size of a slob block.
77340 */
77341-static slobidx_t slob_units(slob_t *s)
77342+static slobidx_t slob_units(const slob_t *s)
77343 {
77344 if (s->units > 0)
77345 return s->units;
77346@@ -169,7 +169,7 @@ static slobidx_t slob_units(slob_t *s)
77347 /*
77348 * Return the next free slob block pointer after this one.
77349 */
77350-static slob_t *slob_next(slob_t *s)
77351+static slob_t *slob_next(const slob_t *s)
77352 {
77353 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
77354 slobidx_t next;
77355@@ -184,14 +184,14 @@ static slob_t *slob_next(slob_t *s)
77356 /*
77357 * Returns true if s is the last free block in its page.
77358 */
77359-static int slob_last(slob_t *s)
77360+static int slob_last(const slob_t *s)
77361 {
77362 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
77363 }
77364
77365-static void *slob_new_pages(gfp_t gfp, int order, int node)
77366+static struct page *slob_new_pages(gfp_t gfp, unsigned int order, int node)
77367 {
77368- void *page;
77369+ struct page *page;
77370
77371 #ifdef CONFIG_NUMA
77372 if (node != NUMA_NO_NODE)
77373@@ -203,14 +203,18 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
77374 if (!page)
77375 return NULL;
77376
77377- return page_address(page);
77378+ __SetPageSlab(page);
77379+ return page;
77380 }
77381
77382-static void slob_free_pages(void *b, int order)
77383+static void slob_free_pages(struct page *sp, int order)
77384 {
77385 if (current->reclaim_state)
77386 current->reclaim_state->reclaimed_slab += 1 << order;
77387- free_pages((unsigned long)b, order);
77388+ __ClearPageSlab(sp);
77389+ reset_page_mapcount(sp);
77390+ sp->private = 0;
77391+ __free_pages(sp, order);
77392 }
77393
77394 /*
77395@@ -315,15 +319,15 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
77396
77397 /* Not enough space: must allocate a new page */
77398 if (!b) {
77399- b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
77400- if (!b)
77401+ sp = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
77402+ if (!sp)
77403 return NULL;
77404- sp = virt_to_page(b);
77405- __SetPageSlab(sp);
77406+ b = page_address(sp);
77407
77408 spin_lock_irqsave(&slob_lock, flags);
77409 sp->units = SLOB_UNITS(PAGE_SIZE);
77410 sp->freelist = b;
77411+ sp->private = 0;
77412 INIT_LIST_HEAD(&sp->list);
77413 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
77414 set_slob_page_free(sp, slob_list);
77415@@ -361,9 +365,7 @@ static void slob_free(void *block, int size)
77416 if (slob_page_free(sp))
77417 clear_slob_page_free(sp);
77418 spin_unlock_irqrestore(&slob_lock, flags);
77419- __ClearPageSlab(sp);
77420- reset_page_mapcount(sp);
77421- slob_free_pages(b, 0);
77422+ slob_free_pages(sp, 0);
77423 return;
77424 }
77425
77426@@ -426,11 +428,10 @@ out:
77427 */
77428
77429 static __always_inline void *
77430-__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
77431+__do_kmalloc_node_align(size_t size, gfp_t gfp, int node, unsigned long caller, int align)
77432 {
77433- unsigned int *m;
77434- int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
77435- void *ret;
77436+ slob_t *m;
77437+ void *ret = NULL;
77438
77439 gfp &= gfp_allowed_mask;
77440
77441@@ -444,20 +445,23 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
77442
77443 if (!m)
77444 return NULL;
77445- *m = size;
77446+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
77447+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
77448+ m[0].units = size;
77449+ m[1].units = align;
77450 ret = (void *)m + align;
77451
77452 trace_kmalloc_node(caller, ret,
77453 size, size + align, gfp, node);
77454 } else {
77455 unsigned int order = get_order(size);
77456+ struct page *page;
77457
77458 if (likely(order))
77459 gfp |= __GFP_COMP;
77460- ret = slob_new_pages(gfp, order, node);
77461- if (ret) {
77462- struct page *page;
77463- page = virt_to_page(ret);
77464+ page = slob_new_pages(gfp, order, node);
77465+ if (page) {
77466+ ret = page_address(page);
77467 page->private = size;
77468 }
77469
77470@@ -465,7 +469,17 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
77471 size, PAGE_SIZE << order, gfp, node);
77472 }
77473
77474- kmemleak_alloc(ret, size, 1, gfp);
77475+ return ret;
77476+}
77477+
77478+static __always_inline void *
77479+__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
77480+{
77481+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
77482+ void *ret = __do_kmalloc_node_align(size, gfp, node, caller, align);
77483+
77484+ if (!ZERO_OR_NULL_PTR(ret))
77485+ kmemleak_alloc(ret, size, 1, gfp);
77486 return ret;
77487 }
77488
77489@@ -501,15 +515,91 @@ void kfree(const void *block)
77490 kmemleak_free(block);
77491
77492 sp = virt_to_page(block);
77493- if (PageSlab(sp)) {
77494+ VM_BUG_ON(!PageSlab(sp));
77495+ if (!sp->private) {
77496 int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
77497- unsigned int *m = (unsigned int *)(block - align);
77498- slob_free(m, *m + align);
77499- } else
77500+ slob_t *m = (slob_t *)(block - align);
77501+ slob_free(m, m[0].units + align);
77502+ } else {
77503+ __ClearPageSlab(sp);
77504+ reset_page_mapcount(sp);
77505+ sp->private = 0;
77506 put_page(sp);
77507+ }
77508 }
77509 EXPORT_SYMBOL(kfree);
77510
77511+bool is_usercopy_object(const void *ptr)
77512+{
77513+ if (!slab_is_available())
77514+ return false;
77515+
77516+ // PAX: TODO
77517+
77518+ return false;
77519+}
77520+
77521+#ifdef CONFIG_PAX_USERCOPY
77522+const char *check_heap_object(const void *ptr, unsigned long n)
77523+{
77524+ struct page *page;
77525+ const slob_t *free;
77526+ const void *base;
77527+ unsigned long flags;
77528+
77529+ if (ZERO_OR_NULL_PTR(ptr))
77530+ return "<null>";
77531+
77532+ if (!virt_addr_valid(ptr))
77533+ return NULL;
77534+
77535+ page = virt_to_head_page(ptr);
77536+ if (!PageSlab(page))
77537+ return NULL;
77538+
77539+ if (page->private) {
77540+ base = page;
77541+ if (base <= ptr && n <= page->private - (ptr - base))
77542+ return NULL;
77543+ return "<slob>";
77544+ }
77545+
77546+ /* some tricky double walking to find the chunk */
77547+ spin_lock_irqsave(&slob_lock, flags);
77548+ base = (void *)((unsigned long)ptr & PAGE_MASK);
77549+ free = page->freelist;
77550+
77551+ while (!slob_last(free) && (void *)free <= ptr) {
77552+ base = free + slob_units(free);
77553+ free = slob_next(free);
77554+ }
77555+
77556+ while (base < (void *)free) {
77557+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
77558+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
77559+ int offset;
77560+
77561+ if (ptr < base + align)
77562+ break;
77563+
77564+ offset = ptr - base - align;
77565+ if (offset >= m) {
77566+ base += size;
77567+ continue;
77568+ }
77569+
77570+ if (n > m - offset)
77571+ break;
77572+
77573+ spin_unlock_irqrestore(&slob_lock, flags);
77574+ return NULL;
77575+ }
77576+
77577+ spin_unlock_irqrestore(&slob_lock, flags);
77578+ return "<slob>";
77579+}
77580+#endif
77581+
77582 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
77583 size_t ksize(const void *block)
77584 {
77585@@ -520,10 +610,11 @@ size_t ksize(const void *block)
77586 return 0;
77587
77588 sp = virt_to_page(block);
77589- if (PageSlab(sp)) {
77590+ VM_BUG_ON(!PageSlab(sp));
77591+ if (!sp->private) {
77592 int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
77593- unsigned int *m = (unsigned int *)(block - align);
77594- return SLOB_UNITS(*m) * SLOB_UNIT;
77595+ slob_t *m = (slob_t *)(block - align);
77596+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
77597 } else
77598 return sp->private;
77599 }
77600@@ -550,23 +641,33 @@ int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
77601
77602 void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
77603 {
77604- void *b;
77605+ void *b = NULL;
77606
77607 flags &= gfp_allowed_mask;
77608
77609 lockdep_trace_alloc(flags);
77610
77611+#ifdef CONFIG_PAX_USERCOPY_SLABS
77612+ b = __do_kmalloc_node_align(c->size, flags, node, _RET_IP_, c->align);
77613+#else
77614 if (c->size < PAGE_SIZE) {
77615 b = slob_alloc(c->size, flags, c->align, node);
77616 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
77617 SLOB_UNITS(c->size) * SLOB_UNIT,
77618 flags, node);
77619 } else {
77620- b = slob_new_pages(flags, get_order(c->size), node);
77621+ struct page *sp;
77622+
77623+ sp = slob_new_pages(flags, get_order(c->size), node);
77624+ if (sp) {
77625+ b = page_address(sp);
77626+ sp->private = c->size;
77627+ }
77628 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
77629 PAGE_SIZE << get_order(c->size),
77630 flags, node);
77631 }
77632+#endif
77633
77634 if (c->ctor)
77635 c->ctor(b);
77636@@ -578,10 +679,14 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
77637
77638 static void __kmem_cache_free(void *b, int size)
77639 {
77640- if (size < PAGE_SIZE)
77641+ struct page *sp;
77642+
77643+ sp = virt_to_page(b);
77644+ BUG_ON(!PageSlab(sp));
77645+ if (!sp->private)
77646 slob_free(b, size);
77647 else
77648- slob_free_pages(b, get_order(size));
77649+ slob_free_pages(sp, get_order(size));
77650 }
77651
77652 static void kmem_rcu_free(struct rcu_head *head)
77653@@ -594,17 +699,31 @@ static void kmem_rcu_free(struct rcu_head *head)
77654
77655 void kmem_cache_free(struct kmem_cache *c, void *b)
77656 {
77657+ int size = c->size;
77658+
77659+#ifdef CONFIG_PAX_USERCOPY_SLABS
77660+ if (size + c->align < PAGE_SIZE) {
77661+ size += c->align;
77662+ b -= c->align;
77663+ }
77664+#endif
77665+
77666 kmemleak_free_recursive(b, c->flags);
77667 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
77668 struct slob_rcu *slob_rcu;
77669- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
77670- slob_rcu->size = c->size;
77671+ slob_rcu = b + (size - sizeof(struct slob_rcu));
77672+ slob_rcu->size = size;
77673 call_rcu(&slob_rcu->head, kmem_rcu_free);
77674 } else {
77675- __kmem_cache_free(b, c->size);
77676+ __kmem_cache_free(b, size);
77677 }
77678
77679+#ifdef CONFIG_PAX_USERCOPY_SLABS
77680+ trace_kfree(_RET_IP_, b);
77681+#else
77682 trace_kmem_cache_free(_RET_IP_, b);
77683+#endif
77684+
77685 }
77686 EXPORT_SYMBOL(kmem_cache_free);
77687
77688diff --git a/mm/slub.c b/mm/slub.c
77689index 321afab..9595170 100644
77690--- a/mm/slub.c
77691+++ b/mm/slub.c
77692@@ -201,7 +201,7 @@ struct track {
77693
77694 enum track_item { TRACK_ALLOC, TRACK_FREE };
77695
77696-#ifdef CONFIG_SYSFS
77697+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
77698 static int sysfs_slab_add(struct kmem_cache *);
77699 static int sysfs_slab_alias(struct kmem_cache *, const char *);
77700 static void sysfs_slab_remove(struct kmem_cache *);
77701@@ -521,7 +521,7 @@ static void print_track(const char *s, struct track *t)
77702 if (!t->addr)
77703 return;
77704
77705- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
77706+ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
77707 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
77708 #ifdef CONFIG_STACKTRACE
77709 {
77710@@ -2623,6 +2623,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
77711
77712 page = virt_to_head_page(x);
77713
77714+ BUG_ON(!PageSlab(page));
77715+
77716 if (kmem_cache_debug(s) && page->slab != s) {
77717 pr_err("kmem_cache_free: Wrong slab cache. %s but object"
77718 " is from %s\n", page->slab->name, s->name);
77719@@ -2663,7 +2665,7 @@ static int slub_min_objects;
77720 * Merge control. If this is set then no merging of slab caches will occur.
77721 * (Could be removed. This was introduced to pacify the merge skeptics.)
77722 */
77723-static int slub_nomerge;
77724+static int slub_nomerge = 1;
77725
77726 /*
77727 * Calculate the order of allocation given an slab object size.
77728@@ -3225,6 +3227,10 @@ EXPORT_SYMBOL(kmalloc_caches);
77729 static struct kmem_cache *kmalloc_dma_caches[SLUB_PAGE_SHIFT];
77730 #endif
77731
77732+#ifdef CONFIG_PAX_USERCOPY_SLABS
77733+static struct kmem_cache *kmalloc_usercopy_caches[SLUB_PAGE_SHIFT];
77734+#endif
77735+
77736 static int __init setup_slub_min_order(char *str)
77737 {
77738 get_option(&str, &slub_min_order);
77739@@ -3279,7 +3285,7 @@ static struct kmem_cache *__init create_kmalloc_cache(const char *name,
77740 if (kmem_cache_open(s, flags))
77741 goto panic;
77742
77743- s->refcount = 1;
77744+ atomic_set(&s->refcount, 1);
77745 list_add(&s->list, &slab_caches);
77746 return s;
77747
77748@@ -3343,6 +3349,13 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags)
77749 return kmalloc_dma_caches[index];
77750
77751 #endif
77752+
77753+#ifdef CONFIG_PAX_USERCOPY_SLABS
77754+ if (flags & SLAB_USERCOPY)
77755+ return kmalloc_usercopy_caches[index];
77756+
77757+#endif
77758+
77759 return kmalloc_caches[index];
77760 }
77761
77762@@ -3411,6 +3424,59 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
77763 EXPORT_SYMBOL(__kmalloc_node);
77764 #endif
77765
77766+bool is_usercopy_object(const void *ptr)
77767+{
77768+ struct page *page;
77769+ struct kmem_cache *s;
77770+
77771+ if (ZERO_OR_NULL_PTR(ptr))
77772+ return false;
77773+
77774+ if (!slab_is_available())
77775+ return false;
77776+
77777+ if (!virt_addr_valid(ptr))
77778+ return false;
77779+
77780+ page = virt_to_head_page(ptr);
77781+
77782+ if (!PageSlab(page))
77783+ return false;
77784+
77785+ s = page->slab;
77786+ return s->flags & SLAB_USERCOPY;
77787+}
77788+
77789+#ifdef CONFIG_PAX_USERCOPY
77790+const char *check_heap_object(const void *ptr, unsigned long n)
77791+{
77792+ struct page *page;
77793+ struct kmem_cache *s;
77794+ unsigned long offset;
77795+
77796+ if (ZERO_OR_NULL_PTR(ptr))
77797+ return "<null>";
77798+
77799+ if (!virt_addr_valid(ptr))
77800+ return NULL;
77801+
77802+ page = virt_to_head_page(ptr);
77803+
77804+ if (!PageSlab(page))
77805+ return NULL;
77806+
77807+ s = page->slab;
77808+ if (!(s->flags & SLAB_USERCOPY))
77809+ return s->name;
77810+
77811+ offset = (ptr - page_address(page)) % s->size;
77812+ if (offset <= s->object_size && n <= s->object_size - offset)
77813+ return NULL;
77814+
77815+ return s->name;
77816+}
77817+#endif
77818+
77819 size_t ksize(const void *object)
77820 {
77821 struct page *page;
77822@@ -3685,7 +3751,7 @@ static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
77823 int node;
77824
77825 list_add(&s->list, &slab_caches);
77826- s->refcount = -1;
77827+ atomic_set(&s->refcount, -1);
77828
77829 for_each_node_state(node, N_NORMAL_MEMORY) {
77830 struct kmem_cache_node *n = get_node(s, node);
77831@@ -3808,17 +3874,17 @@ void __init kmem_cache_init(void)
77832
77833 /* Caches that are not of the two-to-the-power-of size */
77834 if (KMALLOC_MIN_SIZE <= 32) {
77835- kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
77836+ kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
77837 caches++;
77838 }
77839
77840 if (KMALLOC_MIN_SIZE <= 64) {
77841- kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
77842+ kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
77843 caches++;
77844 }
77845
77846 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
77847- kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
77848+ kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
77849 caches++;
77850 }
77851
77852@@ -3860,6 +3926,22 @@ void __init kmem_cache_init(void)
77853 }
77854 }
77855 #endif
77856+
77857+#ifdef CONFIG_PAX_USERCOPY_SLABS
77858+ for (i = 0; i < SLUB_PAGE_SHIFT; i++) {
77859+ struct kmem_cache *s = kmalloc_caches[i];
77860+
77861+ if (s && s->size) {
77862+ char *name = kasprintf(GFP_NOWAIT,
77863+ "usercopy-kmalloc-%d", s->object_size);
77864+
77865+ BUG_ON(!name);
77866+ kmalloc_usercopy_caches[i] = create_kmalloc_cache(name,
77867+ s->object_size, SLAB_USERCOPY);
77868+ }
77869+ }
77870+#endif
77871+
77872 printk(KERN_INFO
77873 "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
77874 " CPUs=%d, Nodes=%d\n",
77875@@ -3886,7 +3968,7 @@ static int slab_unmergeable(struct kmem_cache *s)
77876 /*
77877 * We may have set a slab to be unmergeable during bootstrap.
77878 */
77879- if (s->refcount < 0)
77880+ if (atomic_read(&s->refcount) < 0)
77881 return 1;
77882
77883 return 0;
77884@@ -3940,7 +4022,7 @@ struct kmem_cache *__kmem_cache_alias(const char *name, size_t size,
77885
77886 s = find_mergeable(size, align, flags, name, ctor);
77887 if (s) {
77888- s->refcount++;
77889+ atomic_inc(&s->refcount);
77890 /*
77891 * Adjust the object sizes so that we clear
77892 * the complete object on kzalloc.
77893@@ -3949,7 +4031,7 @@ struct kmem_cache *__kmem_cache_alias(const char *name, size_t size,
77894 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
77895
77896 if (sysfs_slab_alias(s, name)) {
77897- s->refcount--;
77898+ atomic_dec(&s->refcount);
77899 s = NULL;
77900 }
77901 }
77902@@ -4064,7 +4146,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
77903 }
77904 #endif
77905
77906-#ifdef CONFIG_SYSFS
77907+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
77908 static int count_inuse(struct page *page)
77909 {
77910 return page->inuse;
77911@@ -4451,12 +4533,12 @@ static void resiliency_test(void)
77912 validate_slab_cache(kmalloc_caches[9]);
77913 }
77914 #else
77915-#ifdef CONFIG_SYSFS
77916+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
77917 static void resiliency_test(void) {};
77918 #endif
77919 #endif
77920
77921-#ifdef CONFIG_SYSFS
77922+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
77923 enum slab_stat_type {
77924 SL_ALL, /* All slabs */
77925 SL_PARTIAL, /* Only partially allocated slabs */
77926@@ -4700,7 +4782,7 @@ SLAB_ATTR_RO(ctor);
77927
77928 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
77929 {
77930- return sprintf(buf, "%d\n", s->refcount - 1);
77931+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
77932 }
77933 SLAB_ATTR_RO(aliases);
77934
77935@@ -5262,6 +5344,7 @@ static char *create_unique_id(struct kmem_cache *s)
77936 return name;
77937 }
77938
77939+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
77940 static int sysfs_slab_add(struct kmem_cache *s)
77941 {
77942 int err;
77943@@ -5324,6 +5407,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
77944 kobject_del(&s->kobj);
77945 kobject_put(&s->kobj);
77946 }
77947+#endif
77948
77949 /*
77950 * Need to buffer aliases during bootup until sysfs becomes
77951@@ -5337,6 +5421,7 @@ struct saved_alias {
77952
77953 static struct saved_alias *alias_list;
77954
77955+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
77956 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
77957 {
77958 struct saved_alias *al;
77959@@ -5359,6 +5444,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
77960 alias_list = al;
77961 return 0;
77962 }
77963+#endif
77964
77965 static int __init slab_sysfs_init(void)
77966 {
77967diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
77968index 1b7e22a..3fcd4f3 100644
77969--- a/mm/sparse-vmemmap.c
77970+++ b/mm/sparse-vmemmap.c
77971@@ -128,7 +128,7 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
77972 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
77973 if (!p)
77974 return NULL;
77975- pud_populate(&init_mm, pud, p);
77976+ pud_populate_kernel(&init_mm, pud, p);
77977 }
77978 return pud;
77979 }
77980@@ -140,7 +140,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
77981 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
77982 if (!p)
77983 return NULL;
77984- pgd_populate(&init_mm, pgd, p);
77985+ pgd_populate_kernel(&init_mm, pgd, p);
77986 }
77987 return pgd;
77988 }
77989diff --git a/mm/swap.c b/mm/swap.c
77990index 6310dc2..3662b3f 100644
77991--- a/mm/swap.c
77992+++ b/mm/swap.c
77993@@ -30,6 +30,7 @@
77994 #include <linux/backing-dev.h>
77995 #include <linux/memcontrol.h>
77996 #include <linux/gfp.h>
77997+#include <linux/hugetlb.h>
77998
77999 #include "internal.h"
78000
78001@@ -72,6 +73,8 @@ static void __put_compound_page(struct page *page)
78002
78003 __page_cache_release(page);
78004 dtor = get_compound_page_dtor(page);
78005+ if (!PageHuge(page))
78006+ BUG_ON(dtor != free_compound_page);
78007 (*dtor)(page);
78008 }
78009
78010diff --git a/mm/swapfile.c b/mm/swapfile.c
78011index f91a255..9dcac21 100644
78012--- a/mm/swapfile.c
78013+++ b/mm/swapfile.c
78014@@ -64,7 +64,7 @@ static DEFINE_MUTEX(swapon_mutex);
78015
78016 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
78017 /* Activity counter to indicate that a swapon or swapoff has occurred */
78018-static atomic_t proc_poll_event = ATOMIC_INIT(0);
78019+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
78020
78021 static inline unsigned char swap_count(unsigned char ent)
78022 {
78023@@ -1601,7 +1601,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
78024 }
78025 filp_close(swap_file, NULL);
78026 err = 0;
78027- atomic_inc(&proc_poll_event);
78028+ atomic_inc_unchecked(&proc_poll_event);
78029 wake_up_interruptible(&proc_poll_wait);
78030
78031 out_dput:
78032@@ -1618,8 +1618,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
78033
78034 poll_wait(file, &proc_poll_wait, wait);
78035
78036- if (seq->poll_event != atomic_read(&proc_poll_event)) {
78037- seq->poll_event = atomic_read(&proc_poll_event);
78038+ if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
78039+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
78040 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
78041 }
78042
78043@@ -1717,7 +1717,7 @@ static int swaps_open(struct inode *inode, struct file *file)
78044 return ret;
78045
78046 seq = file->private_data;
78047- seq->poll_event = atomic_read(&proc_poll_event);
78048+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
78049 return 0;
78050 }
78051
78052@@ -2059,7 +2059,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
78053 (frontswap_map) ? "FS" : "");
78054
78055 mutex_unlock(&swapon_mutex);
78056- atomic_inc(&proc_poll_event);
78057+ atomic_inc_unchecked(&proc_poll_event);
78058 wake_up_interruptible(&proc_poll_wait);
78059
78060 if (S_ISREG(inode->i_mode))
78061diff --git a/mm/util.c b/mm/util.c
78062index dc3036c..b6c7c9d 100644
78063--- a/mm/util.c
78064+++ b/mm/util.c
78065@@ -292,6 +292,12 @@ done:
78066 void arch_pick_mmap_layout(struct mm_struct *mm)
78067 {
78068 mm->mmap_base = TASK_UNMAPPED_BASE;
78069+
78070+#ifdef CONFIG_PAX_RANDMMAP
78071+ if (mm->pax_flags & MF_PAX_RANDMMAP)
78072+ mm->mmap_base += mm->delta_mmap;
78073+#endif
78074+
78075 mm->get_unmapped_area = arch_get_unmapped_area;
78076 mm->unmap_area = arch_unmap_area;
78077 }
78078diff --git a/mm/vmalloc.c b/mm/vmalloc.c
78079index 78e0830..bc6bbd8 100644
78080--- a/mm/vmalloc.c
78081+++ b/mm/vmalloc.c
78082@@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
78083
78084 pte = pte_offset_kernel(pmd, addr);
78085 do {
78086- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
78087- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
78088+
78089+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
78090+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
78091+ BUG_ON(!pte_exec(*pte));
78092+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
78093+ continue;
78094+ }
78095+#endif
78096+
78097+ {
78098+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
78099+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
78100+ }
78101 } while (pte++, addr += PAGE_SIZE, addr != end);
78102 }
78103
78104@@ -100,16 +111,29 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
78105 pte = pte_alloc_kernel(pmd, addr);
78106 if (!pte)
78107 return -ENOMEM;
78108+
78109+ pax_open_kernel();
78110 do {
78111 struct page *page = pages[*nr];
78112
78113- if (WARN_ON(!pte_none(*pte)))
78114+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
78115+ if (pgprot_val(prot) & _PAGE_NX)
78116+#endif
78117+
78118+ if (!pte_none(*pte)) {
78119+ pax_close_kernel();
78120+ WARN_ON(1);
78121 return -EBUSY;
78122- if (WARN_ON(!page))
78123+ }
78124+ if (!page) {
78125+ pax_close_kernel();
78126+ WARN_ON(1);
78127 return -ENOMEM;
78128+ }
78129 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
78130 (*nr)++;
78131 } while (pte++, addr += PAGE_SIZE, addr != end);
78132+ pax_close_kernel();
78133 return 0;
78134 }
78135
78136@@ -119,7 +143,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
78137 pmd_t *pmd;
78138 unsigned long next;
78139
78140- pmd = pmd_alloc(&init_mm, pud, addr);
78141+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
78142 if (!pmd)
78143 return -ENOMEM;
78144 do {
78145@@ -136,7 +160,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
78146 pud_t *pud;
78147 unsigned long next;
78148
78149- pud = pud_alloc(&init_mm, pgd, addr);
78150+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
78151 if (!pud)
78152 return -ENOMEM;
78153 do {
78154@@ -191,11 +215,20 @@ int is_vmalloc_or_module_addr(const void *x)
78155 * and fall back on vmalloc() if that fails. Others
78156 * just put it in the vmalloc space.
78157 */
78158-#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
78159+#ifdef CONFIG_MODULES
78160+#ifdef MODULES_VADDR
78161 unsigned long addr = (unsigned long)x;
78162 if (addr >= MODULES_VADDR && addr < MODULES_END)
78163 return 1;
78164 #endif
78165+
78166+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
78167+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
78168+ return 1;
78169+#endif
78170+
78171+#endif
78172+
78173 return is_vmalloc_addr(x);
78174 }
78175
78176@@ -216,8 +249,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
78177
78178 if (!pgd_none(*pgd)) {
78179 pud_t *pud = pud_offset(pgd, addr);
78180+#ifdef CONFIG_X86
78181+ if (!pud_large(*pud))
78182+#endif
78183 if (!pud_none(*pud)) {
78184 pmd_t *pmd = pmd_offset(pud, addr);
78185+#ifdef CONFIG_X86
78186+ if (!pmd_large(*pmd))
78187+#endif
78188 if (!pmd_none(*pmd)) {
78189 pte_t *ptep, pte;
78190
78191@@ -329,7 +368,7 @@ static void purge_vmap_area_lazy(void);
78192 * Allocate a region of KVA of the specified size and alignment, within the
78193 * vstart and vend.
78194 */
78195-static struct vmap_area *alloc_vmap_area(unsigned long size,
78196+static __size_overflow(1) struct vmap_area *alloc_vmap_area(unsigned long size,
78197 unsigned long align,
78198 unsigned long vstart, unsigned long vend,
78199 int node, gfp_t gfp_mask)
78200@@ -1328,6 +1367,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
78201 struct vm_struct *area;
78202
78203 BUG_ON(in_interrupt());
78204+
78205+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
78206+ if (flags & VM_KERNEXEC) {
78207+ if (start != VMALLOC_START || end != VMALLOC_END)
78208+ return NULL;
78209+ start = (unsigned long)MODULES_EXEC_VADDR;
78210+ end = (unsigned long)MODULES_EXEC_END;
78211+ }
78212+#endif
78213+
78214 if (flags & VM_IOREMAP) {
78215 int bit = fls(size);
78216
78217@@ -1568,6 +1617,11 @@ void *vmap(struct page **pages, unsigned int count,
78218 if (count > totalram_pages)
78219 return NULL;
78220
78221+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
78222+ if (!(pgprot_val(prot) & _PAGE_NX))
78223+ flags |= VM_KERNEXEC;
78224+#endif
78225+
78226 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
78227 __builtin_return_address(0));
78228 if (!area)
78229@@ -1669,6 +1723,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
78230 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
78231 goto fail;
78232
78233+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
78234+ if (!(pgprot_val(prot) & _PAGE_NX))
78235+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
78236+ VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
78237+ else
78238+#endif
78239+
78240 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
78241 start, end, node, gfp_mask, caller);
78242 if (!area)
78243@@ -1842,10 +1903,9 @@ EXPORT_SYMBOL(vzalloc_node);
78244 * For tight control over page level allocator and protection flags
78245 * use __vmalloc() instead.
78246 */
78247-
78248 void *vmalloc_exec(unsigned long size)
78249 {
78250- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
78251+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
78252 -1, __builtin_return_address(0));
78253 }
78254
78255@@ -2136,6 +2196,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
78256 unsigned long uaddr = vma->vm_start;
78257 unsigned long usize = vma->vm_end - vma->vm_start;
78258
78259+ BUG_ON(vma->vm_mirror);
78260+
78261 if ((PAGE_SIZE-1) & (unsigned long)addr)
78262 return -EINVAL;
78263
78264@@ -2575,7 +2637,11 @@ static int s_show(struct seq_file *m, void *p)
78265 v->addr, v->addr + v->size, v->size);
78266
78267 if (v->caller)
78268+#ifdef CONFIG_GRKERNSEC_HIDESYM
78269+ seq_printf(m, " %pK", v->caller);
78270+#else
78271 seq_printf(m, " %pS", v->caller);
78272+#endif
78273
78274 if (v->nr_pages)
78275 seq_printf(m, " pages=%d", v->nr_pages);
78276diff --git a/mm/vmstat.c b/mm/vmstat.c
78277index c737057..a49753a 100644
78278--- a/mm/vmstat.c
78279+++ b/mm/vmstat.c
78280@@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
78281 *
78282 * vm_stat contains the global counters
78283 */
78284-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
78285+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
78286 EXPORT_SYMBOL(vm_stat);
78287
78288 #ifdef CONFIG_SMP
78289@@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
78290 v = p->vm_stat_diff[i];
78291 p->vm_stat_diff[i] = 0;
78292 local_irq_restore(flags);
78293- atomic_long_add(v, &zone->vm_stat[i]);
78294+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
78295 global_diff[i] += v;
78296 #ifdef CONFIG_NUMA
78297 /* 3 seconds idle till flush */
78298@@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
78299
78300 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
78301 if (global_diff[i])
78302- atomic_long_add(global_diff[i], &vm_stat[i]);
78303+ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
78304 }
78305
78306 void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
78307@@ -503,8 +503,8 @@ void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
78308 if (pset->vm_stat_diff[i]) {
78309 int v = pset->vm_stat_diff[i];
78310 pset->vm_stat_diff[i] = 0;
78311- atomic_long_add(v, &zone->vm_stat[i]);
78312- atomic_long_add(v, &vm_stat[i]);
78313+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
78314+ atomic_long_add_unchecked(v, &vm_stat[i]);
78315 }
78316 }
78317 #endif
78318@@ -1224,10 +1224,20 @@ static int __init setup_vmstat(void)
78319 start_cpu_timer(cpu);
78320 #endif
78321 #ifdef CONFIG_PROC_FS
78322- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
78323- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
78324- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
78325- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
78326+ {
78327+ mode_t gr_mode = S_IRUGO;
78328+#ifdef CONFIG_GRKERNSEC_PROC_ADD
78329+ gr_mode = S_IRUSR;
78330+#endif
78331+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
78332+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
78333+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
78334+ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
78335+#else
78336+ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
78337+#endif
78338+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
78339+ }
78340 #endif
78341 return 0;
78342 }
78343diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
78344index ee07072..593e3fd 100644
78345--- a/net/8021q/vlan.c
78346+++ b/net/8021q/vlan.c
78347@@ -484,7 +484,7 @@ out:
78348 return NOTIFY_DONE;
78349 }
78350
78351-static struct notifier_block vlan_notifier_block __read_mostly = {
78352+static struct notifier_block vlan_notifier_block = {
78353 .notifier_call = vlan_device_event,
78354 };
78355
78356@@ -559,8 +559,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
78357 err = -EPERM;
78358 if (!capable(CAP_NET_ADMIN))
78359 break;
78360- if ((args.u.name_type >= 0) &&
78361- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
78362+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
78363 struct vlan_net *vn;
78364
78365 vn = net_generic(net, vlan_net_id);
78366diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
78367index 02efb25..41541a9 100644
78368--- a/net/9p/trans_fd.c
78369+++ b/net/9p/trans_fd.c
78370@@ -425,7 +425,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
78371 oldfs = get_fs();
78372 set_fs(get_ds());
78373 /* The cast to a user pointer is valid due to the set_fs() */
78374- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
78375+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
78376 set_fs(oldfs);
78377
78378 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
78379diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
78380index 876fbe8..8bbea9f 100644
78381--- a/net/atm/atm_misc.c
78382+++ b/net/atm/atm_misc.c
78383@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
78384 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
78385 return 1;
78386 atm_return(vcc, truesize);
78387- atomic_inc(&vcc->stats->rx_drop);
78388+ atomic_inc_unchecked(&vcc->stats->rx_drop);
78389 return 0;
78390 }
78391 EXPORT_SYMBOL(atm_charge);
78392@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
78393 }
78394 }
78395 atm_return(vcc, guess);
78396- atomic_inc(&vcc->stats->rx_drop);
78397+ atomic_inc_unchecked(&vcc->stats->rx_drop);
78398 return NULL;
78399 }
78400 EXPORT_SYMBOL(atm_alloc_charge);
78401@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
78402
78403 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
78404 {
78405-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
78406+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
78407 __SONET_ITEMS
78408 #undef __HANDLE_ITEM
78409 }
78410@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
78411
78412 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
78413 {
78414-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
78415+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
78416 __SONET_ITEMS
78417 #undef __HANDLE_ITEM
78418 }
78419diff --git a/net/atm/lec.h b/net/atm/lec.h
78420index a86aff9..3a0d6f6 100644
78421--- a/net/atm/lec.h
78422+++ b/net/atm/lec.h
78423@@ -48,7 +48,7 @@ struct lane2_ops {
78424 const u8 *tlvs, u32 sizeoftlvs);
78425 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
78426 const u8 *tlvs, u32 sizeoftlvs);
78427-};
78428+} __no_const;
78429
78430 /*
78431 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
78432diff --git a/net/atm/proc.c b/net/atm/proc.c
78433index 0d020de..011c7bb 100644
78434--- a/net/atm/proc.c
78435+++ b/net/atm/proc.c
78436@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
78437 const struct k_atm_aal_stats *stats)
78438 {
78439 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
78440- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
78441- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
78442- atomic_read(&stats->rx_drop));
78443+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
78444+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
78445+ atomic_read_unchecked(&stats->rx_drop));
78446 }
78447
78448 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
78449diff --git a/net/atm/resources.c b/net/atm/resources.c
78450index 0447d5d..3cf4728 100644
78451--- a/net/atm/resources.c
78452+++ b/net/atm/resources.c
78453@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
78454 static void copy_aal_stats(struct k_atm_aal_stats *from,
78455 struct atm_aal_stats *to)
78456 {
78457-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
78458+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
78459 __AAL_STAT_ITEMS
78460 #undef __HANDLE_ITEM
78461 }
78462@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
78463 static void subtract_aal_stats(struct k_atm_aal_stats *from,
78464 struct atm_aal_stats *to)
78465 {
78466-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
78467+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
78468 __AAL_STAT_ITEMS
78469 #undef __HANDLE_ITEM
78470 }
78471diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
78472index c6fcc76..1270d14 100644
78473--- a/net/batman-adv/bat_iv_ogm.c
78474+++ b/net/batman-adv/bat_iv_ogm.c
78475@@ -62,7 +62,7 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
78476
78477 /* randomize initial seqno to avoid collision */
78478 get_random_bytes(&random_seqno, sizeof(random_seqno));
78479- atomic_set(&hard_iface->seqno, random_seqno);
78480+ atomic_set_unchecked(&hard_iface->seqno, random_seqno);
78481
78482 hard_iface->packet_len = BATADV_OGM_HLEN;
78483 hard_iface->packet_buff = kmalloc(hard_iface->packet_len, GFP_ATOMIC);
78484@@ -608,9 +608,9 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
78485 batadv_ogm_packet = (struct batadv_ogm_packet *)hard_iface->packet_buff;
78486
78487 /* change sequence number to network order */
78488- seqno = (uint32_t)atomic_read(&hard_iface->seqno);
78489+ seqno = (uint32_t)atomic_read_unchecked(&hard_iface->seqno);
78490 batadv_ogm_packet->seqno = htonl(seqno);
78491- atomic_inc(&hard_iface->seqno);
78492+ atomic_inc_unchecked(&hard_iface->seqno);
78493
78494 batadv_ogm_packet->ttvn = atomic_read(&bat_priv->tt.vn);
78495 batadv_ogm_packet->tt_crc = htons(bat_priv->tt.local_crc);
78496@@ -1015,7 +1015,7 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
78497 return;
78498
78499 /* could be changed by schedule_own_packet() */
78500- if_incoming_seqno = atomic_read(&if_incoming->seqno);
78501+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
78502
78503 if (batadv_ogm_packet->flags & BATADV_DIRECTLINK)
78504 has_directlink_flag = 1;
78505diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
78506index d112fd6..686a447 100644
78507--- a/net/batman-adv/hard-interface.c
78508+++ b/net/batman-adv/hard-interface.c
78509@@ -327,7 +327,7 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
78510 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
78511 dev_add_pack(&hard_iface->batman_adv_ptype);
78512
78513- atomic_set(&hard_iface->frag_seqno, 1);
78514+ atomic_set_unchecked(&hard_iface->frag_seqno, 1);
78515 batadv_info(hard_iface->soft_iface, "Adding interface: %s\n",
78516 hard_iface->net_dev->name);
78517
78518@@ -450,7 +450,7 @@ batadv_hardif_add_interface(struct net_device *net_dev)
78519 /* This can't be called via a bat_priv callback because
78520 * we have no bat_priv yet.
78521 */
78522- atomic_set(&hard_iface->seqno, 1);
78523+ atomic_set_unchecked(&hard_iface->seqno, 1);
78524 hard_iface->packet_buff = NULL;
78525
78526 return hard_iface;
78527diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
78528index ce0684a..4a0cbf1 100644
78529--- a/net/batman-adv/soft-interface.c
78530+++ b/net/batman-adv/soft-interface.c
78531@@ -234,7 +234,7 @@ static int batadv_interface_tx(struct sk_buff *skb,
78532 primary_if->net_dev->dev_addr, ETH_ALEN);
78533
78534 /* set broadcast sequence number */
78535- seqno = atomic_inc_return(&bat_priv->bcast_seqno);
78536+ seqno = atomic_inc_return_unchecked(&bat_priv->bcast_seqno);
78537 bcast_packet->seqno = htonl(seqno);
78538
78539 batadv_add_bcast_packet_to_list(bat_priv, skb, 1);
78540@@ -427,7 +427,7 @@ struct net_device *batadv_softif_create(const char *name)
78541 atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN);
78542
78543 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
78544- atomic_set(&bat_priv->bcast_seqno, 1);
78545+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
78546 atomic_set(&bat_priv->tt.vn, 0);
78547 atomic_set(&bat_priv->tt.local_changes, 0);
78548 atomic_set(&bat_priv->tt.ogm_append_cnt, 0);
78549diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
78550index ac1e07a..4c846e2 100644
78551--- a/net/batman-adv/types.h
78552+++ b/net/batman-adv/types.h
78553@@ -33,8 +33,8 @@ struct batadv_hard_iface {
78554 int16_t if_num;
78555 char if_status;
78556 struct net_device *net_dev;
78557- atomic_t seqno;
78558- atomic_t frag_seqno;
78559+ atomic_unchecked_t seqno;
78560+ atomic_unchecked_t frag_seqno;
78561 unsigned char *packet_buff;
78562 int packet_len;
78563 struct kobject *hardif_obj;
78564@@ -244,7 +244,7 @@ struct batadv_priv {
78565 atomic_t orig_interval; /* uint */
78566 atomic_t hop_penalty; /* uint */
78567 atomic_t log_level; /* uint */
78568- atomic_t bcast_seqno;
78569+ atomic_unchecked_t bcast_seqno;
78570 atomic_t bcast_queue_left;
78571 atomic_t batman_queue_left;
78572 char num_ifaces;
78573diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
78574index f397232..3206a33 100644
78575--- a/net/batman-adv/unicast.c
78576+++ b/net/batman-adv/unicast.c
78577@@ -272,7 +272,7 @@ int batadv_frag_send_skb(struct sk_buff *skb, struct batadv_priv *bat_priv,
78578 frag1->flags = BATADV_UNI_FRAG_HEAD | large_tail;
78579 frag2->flags = large_tail;
78580
78581- seqno = atomic_add_return(2, &hard_iface->frag_seqno);
78582+ seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
78583 frag1->seqno = htons(seqno - 1);
78584 frag2->seqno = htons(seqno);
78585
78586diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
78587index 07f0739..3c42e34 100644
78588--- a/net/bluetooth/hci_sock.c
78589+++ b/net/bluetooth/hci_sock.c
78590@@ -934,7 +934,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
78591 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
78592 }
78593
78594- len = min_t(unsigned int, len, sizeof(uf));
78595+ len = min((size_t)len, sizeof(uf));
78596 if (copy_from_user(&uf, optval, len)) {
78597 err = -EFAULT;
78598 break;
78599diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
78600index a91239d..d7ed533 100644
78601--- a/net/bluetooth/l2cap_core.c
78602+++ b/net/bluetooth/l2cap_core.c
78603@@ -3183,8 +3183,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
78604 break;
78605
78606 case L2CAP_CONF_RFC:
78607- if (olen == sizeof(rfc))
78608- memcpy(&rfc, (void *)val, olen);
78609+ if (olen != sizeof(rfc))
78610+ break;
78611+
78612+ memcpy(&rfc, (void *)val, olen);
78613
78614 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
78615 rfc.mode != chan->mode)
78616diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
78617index 083f2bf..799f9448 100644
78618--- a/net/bluetooth/l2cap_sock.c
78619+++ b/net/bluetooth/l2cap_sock.c
78620@@ -471,7 +471,8 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
78621 struct sock *sk = sock->sk;
78622 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
78623 struct l2cap_options opts;
78624- int len, err = 0;
78625+ int err = 0;
78626+ size_t len = optlen;
78627 u32 opt;
78628
78629 BT_DBG("sk %p", sk);
78630@@ -493,7 +494,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
78631 opts.max_tx = chan->max_tx;
78632 opts.txwin_size = chan->tx_win;
78633
78634- len = min_t(unsigned int, sizeof(opts), optlen);
78635+ len = min(sizeof(opts), len);
78636 if (copy_from_user((char *) &opts, optval, len)) {
78637 err = -EFAULT;
78638 break;
78639@@ -571,7 +572,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
78640 struct bt_security sec;
78641 struct bt_power pwr;
78642 struct l2cap_conn *conn;
78643- int len, err = 0;
78644+ int err = 0;
78645+ size_t len = optlen;
78646 u32 opt;
78647
78648 BT_DBG("sk %p", sk);
78649@@ -594,7 +596,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
78650
78651 sec.level = BT_SECURITY_LOW;
78652
78653- len = min_t(unsigned int, sizeof(sec), optlen);
78654+ len = min(sizeof(sec), len);
78655 if (copy_from_user((char *) &sec, optval, len)) {
78656 err = -EFAULT;
78657 break;
78658@@ -691,7 +693,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
78659
78660 pwr.force_active = BT_POWER_FORCE_ACTIVE_ON;
78661
78662- len = min_t(unsigned int, sizeof(pwr), optlen);
78663+ len = min(sizeof(pwr), len);
78664 if (copy_from_user((char *) &pwr, optval, len)) {
78665 err = -EFAULT;
78666 break;
78667diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
78668index 868a909..d044bc3 100644
78669--- a/net/bluetooth/rfcomm/sock.c
78670+++ b/net/bluetooth/rfcomm/sock.c
78671@@ -667,7 +667,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
78672 struct sock *sk = sock->sk;
78673 struct bt_security sec;
78674 int err = 0;
78675- size_t len;
78676+ size_t len = optlen;
78677 u32 opt;
78678
78679 BT_DBG("sk %p", sk);
78680@@ -689,7 +689,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
78681
78682 sec.level = BT_SECURITY_LOW;
78683
78684- len = min_t(unsigned int, sizeof(sec), optlen);
78685+ len = min(sizeof(sec), len);
78686 if (copy_from_user((char *) &sec, optval, len)) {
78687 err = -EFAULT;
78688 break;
78689diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
78690index ccc2487..921073d 100644
78691--- a/net/bluetooth/rfcomm/tty.c
78692+++ b/net/bluetooth/rfcomm/tty.c
78693@@ -309,7 +309,7 @@ static void rfcomm_dev_del(struct rfcomm_dev *dev)
78694 BUG_ON(test_and_set_bit(RFCOMM_TTY_RELEASED, &dev->flags));
78695
78696 spin_lock_irqsave(&dev->port.lock, flags);
78697- if (dev->port.count > 0) {
78698+ if (atomic_read(&dev->port.count) > 0) {
78699 spin_unlock_irqrestore(&dev->port.lock, flags);
78700 return;
78701 }
78702@@ -664,10 +664,10 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
78703 return -ENODEV;
78704
78705 BT_DBG("dev %p dst %s channel %d opened %d", dev, batostr(&dev->dst),
78706- dev->channel, dev->port.count);
78707+ dev->channel, atomic_read(&dev->port.count));
78708
78709 spin_lock_irqsave(&dev->port.lock, flags);
78710- if (++dev->port.count > 1) {
78711+ if (atomic_inc_return(&dev->port.count) > 1) {
78712 spin_unlock_irqrestore(&dev->port.lock, flags);
78713 return 0;
78714 }
78715@@ -732,10 +732,10 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp)
78716 return;
78717
78718 BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc,
78719- dev->port.count);
78720+ atomic_read(&dev->port.count));
78721
78722 spin_lock_irqsave(&dev->port.lock, flags);
78723- if (!--dev->port.count) {
78724+ if (!atomic_dec_return(&dev->port.count)) {
78725 spin_unlock_irqrestore(&dev->port.lock, flags);
78726 if (dev->tty_dev->parent)
78727 device_move(dev->tty_dev, NULL, DPM_ORDER_DEV_LAST);
78728diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
78729index 5fe2ff3..121d696 100644
78730--- a/net/bridge/netfilter/ebtables.c
78731+++ b/net/bridge/netfilter/ebtables.c
78732@@ -1523,7 +1523,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
78733 tmp.valid_hooks = t->table->valid_hooks;
78734 }
78735 mutex_unlock(&ebt_mutex);
78736- if (copy_to_user(user, &tmp, *len) != 0){
78737+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
78738 BUGPRINT("c2u Didn't work\n");
78739 ret = -EFAULT;
78740 break;
78741@@ -2327,7 +2327,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
78742 goto out;
78743 tmp.valid_hooks = t->valid_hooks;
78744
78745- if (copy_to_user(user, &tmp, *len) != 0) {
78746+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
78747 ret = -EFAULT;
78748 break;
78749 }
78750@@ -2338,7 +2338,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
78751 tmp.entries_size = t->table->entries_size;
78752 tmp.valid_hooks = t->table->valid_hooks;
78753
78754- if (copy_to_user(user, &tmp, *len) != 0) {
78755+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
78756 ret = -EFAULT;
78757 break;
78758 }
78759diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
78760index 44f270f..1f5602d 100644
78761--- a/net/caif/cfctrl.c
78762+++ b/net/caif/cfctrl.c
78763@@ -10,6 +10,7 @@
78764 #include <linux/spinlock.h>
78765 #include <linux/slab.h>
78766 #include <linux/pkt_sched.h>
78767+#include <linux/sched.h>
78768 #include <net/caif/caif_layer.h>
78769 #include <net/caif/cfpkt.h>
78770 #include <net/caif/cfctrl.h>
78771@@ -43,8 +44,8 @@ struct cflayer *cfctrl_create(void)
78772 memset(&dev_info, 0, sizeof(dev_info));
78773 dev_info.id = 0xff;
78774 cfsrvl_init(&this->serv, 0, &dev_info, false);
78775- atomic_set(&this->req_seq_no, 1);
78776- atomic_set(&this->rsp_seq_no, 1);
78777+ atomic_set_unchecked(&this->req_seq_no, 1);
78778+ atomic_set_unchecked(&this->rsp_seq_no, 1);
78779 this->serv.layer.receive = cfctrl_recv;
78780 sprintf(this->serv.layer.name, "ctrl");
78781 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
78782@@ -130,8 +131,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
78783 struct cfctrl_request_info *req)
78784 {
78785 spin_lock_bh(&ctrl->info_list_lock);
78786- atomic_inc(&ctrl->req_seq_no);
78787- req->sequence_no = atomic_read(&ctrl->req_seq_no);
78788+ atomic_inc_unchecked(&ctrl->req_seq_no);
78789+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
78790 list_add_tail(&req->list, &ctrl->list);
78791 spin_unlock_bh(&ctrl->info_list_lock);
78792 }
78793@@ -149,7 +150,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
78794 if (p != first)
78795 pr_warn("Requests are not received in order\n");
78796
78797- atomic_set(&ctrl->rsp_seq_no,
78798+ atomic_set_unchecked(&ctrl->rsp_seq_no,
78799 p->sequence_no);
78800 list_del(&p->list);
78801 goto out;
78802diff --git a/net/can/af_can.c b/net/can/af_can.c
78803index ddac1ee..3ee0a78 100644
78804--- a/net/can/af_can.c
78805+++ b/net/can/af_can.c
78806@@ -872,7 +872,7 @@ static const struct net_proto_family can_family_ops = {
78807 };
78808
78809 /* notifier block for netdevice event */
78810-static struct notifier_block can_netdev_notifier __read_mostly = {
78811+static struct notifier_block can_netdev_notifier = {
78812 .notifier_call = can_notifier,
78813 };
78814
78815diff --git a/net/can/gw.c b/net/can/gw.c
78816index 1f5c978..ef714c7 100644
78817--- a/net/can/gw.c
78818+++ b/net/can/gw.c
78819@@ -67,7 +67,6 @@ MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
78820 MODULE_ALIAS("can-gw");
78821
78822 static HLIST_HEAD(cgw_list);
78823-static struct notifier_block notifier;
78824
78825 static struct kmem_cache *cgw_cache __read_mostly;
78826
78827@@ -887,6 +886,10 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
78828 return err;
78829 }
78830
78831+static struct notifier_block notifier = {
78832+ .notifier_call = cgw_notifier
78833+};
78834+
78835 static __init int cgw_module_init(void)
78836 {
78837 printk(banner);
78838@@ -898,7 +901,6 @@ static __init int cgw_module_init(void)
78839 return -ENOMEM;
78840
78841 /* set notifier */
78842- notifier.notifier_call = cgw_notifier;
78843 register_netdevice_notifier(&notifier);
78844
78845 if (__rtnl_register(PF_CAN, RTM_GETROUTE, NULL, cgw_dump_jobs, NULL)) {
78846diff --git a/net/compat.c b/net/compat.c
78847index 79ae884..17c5c09 100644
78848--- a/net/compat.c
78849+++ b/net/compat.c
78850@@ -71,9 +71,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
78851 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
78852 __get_user(kmsg->msg_flags, &umsg->msg_flags))
78853 return -EFAULT;
78854- kmsg->msg_name = compat_ptr(tmp1);
78855- kmsg->msg_iov = compat_ptr(tmp2);
78856- kmsg->msg_control = compat_ptr(tmp3);
78857+ kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
78858+ kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
78859+ kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
78860 return 0;
78861 }
78862
78863@@ -85,7 +85,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
78864
78865 if (kern_msg->msg_namelen) {
78866 if (mode == VERIFY_READ) {
78867- int err = move_addr_to_kernel(kern_msg->msg_name,
78868+ int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
78869 kern_msg->msg_namelen,
78870 kern_address);
78871 if (err < 0)
78872@@ -96,7 +96,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
78873 kern_msg->msg_name = NULL;
78874
78875 tot_len = iov_from_user_compat_to_kern(kern_iov,
78876- (struct compat_iovec __user *)kern_msg->msg_iov,
78877+ (struct compat_iovec __force_user *)kern_msg->msg_iov,
78878 kern_msg->msg_iovlen);
78879 if (tot_len >= 0)
78880 kern_msg->msg_iov = kern_iov;
78881@@ -116,20 +116,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
78882
78883 #define CMSG_COMPAT_FIRSTHDR(msg) \
78884 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
78885- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
78886+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
78887 (struct compat_cmsghdr __user *)NULL)
78888
78889 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
78890 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
78891 (ucmlen) <= (unsigned long) \
78892 ((mhdr)->msg_controllen - \
78893- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
78894+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
78895
78896 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
78897 struct compat_cmsghdr __user *cmsg, int cmsg_len)
78898 {
78899 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
78900- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
78901+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
78902 msg->msg_controllen)
78903 return NULL;
78904 return (struct compat_cmsghdr __user *)ptr;
78905@@ -219,7 +219,7 @@ Efault:
78906
78907 int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data)
78908 {
78909- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
78910+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
78911 struct compat_cmsghdr cmhdr;
78912 struct compat_timeval ctv;
78913 struct compat_timespec cts[3];
78914@@ -275,7 +275,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
78915
78916 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
78917 {
78918- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
78919+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
78920 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
78921 int fdnum = scm->fp->count;
78922 struct file **fp = scm->fp->fp;
78923@@ -363,7 +363,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
78924 return -EFAULT;
78925 old_fs = get_fs();
78926 set_fs(KERNEL_DS);
78927- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
78928+ err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
78929 set_fs(old_fs);
78930
78931 return err;
78932@@ -424,7 +424,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
78933 len = sizeof(ktime);
78934 old_fs = get_fs();
78935 set_fs(KERNEL_DS);
78936- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
78937+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
78938 set_fs(old_fs);
78939
78940 if (!err) {
78941@@ -567,7 +567,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
78942 case MCAST_JOIN_GROUP:
78943 case MCAST_LEAVE_GROUP:
78944 {
78945- struct compat_group_req __user *gr32 = (void *)optval;
78946+ struct compat_group_req __user *gr32 = (void __user *)optval;
78947 struct group_req __user *kgr =
78948 compat_alloc_user_space(sizeof(struct group_req));
78949 u32 interface;
78950@@ -588,7 +588,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
78951 case MCAST_BLOCK_SOURCE:
78952 case MCAST_UNBLOCK_SOURCE:
78953 {
78954- struct compat_group_source_req __user *gsr32 = (void *)optval;
78955+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
78956 struct group_source_req __user *kgsr = compat_alloc_user_space(
78957 sizeof(struct group_source_req));
78958 u32 interface;
78959@@ -609,7 +609,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
78960 }
78961 case MCAST_MSFILTER:
78962 {
78963- struct compat_group_filter __user *gf32 = (void *)optval;
78964+ struct compat_group_filter __user *gf32 = (void __user *)optval;
78965 struct group_filter __user *kgf;
78966 u32 interface, fmode, numsrc;
78967
78968@@ -647,7 +647,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
78969 char __user *optval, int __user *optlen,
78970 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
78971 {
78972- struct compat_group_filter __user *gf32 = (void *)optval;
78973+ struct compat_group_filter __user *gf32 = (void __user *)optval;
78974 struct group_filter __user *kgf;
78975 int __user *koptlen;
78976 u32 interface, fmode, numsrc;
78977@@ -796,7 +796,7 @@ asmlinkage long compat_sys_socketcall(int call, u32 __user *args)
78978
78979 if (call < SYS_SOCKET || call > SYS_SENDMMSG)
78980 return -EINVAL;
78981- if (copy_from_user(a, args, nas[call]))
78982+ if (nas[call] > sizeof a || copy_from_user(a, args, nas[call]))
78983 return -EFAULT;
78984 a0 = a[0];
78985 a1 = a[1];
78986diff --git a/net/core/datagram.c b/net/core/datagram.c
78987index 0337e2b..f82d4a3 100644
78988--- a/net/core/datagram.c
78989+++ b/net/core/datagram.c
78990@@ -187,7 +187,7 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
78991 skb_queue_walk(queue, skb) {
78992 *peeked = skb->peeked;
78993 if (flags & MSG_PEEK) {
78994- if (*off >= skb->len) {
78995+ if (*off >= skb->len && skb->len) {
78996 *off -= skb->len;
78997 continue;
78998 }
78999@@ -289,7 +289,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
79000 }
79001
79002 kfree_skb(skb);
79003- atomic_inc(&sk->sk_drops);
79004+ atomic_inc_unchecked(&sk->sk_drops);
79005 sk_mem_reclaim_partial(sk);
79006
79007 return err;
79008diff --git a/net/core/dev.c b/net/core/dev.c
79009index 3470794..eb5008c 100644
79010--- a/net/core/dev.c
79011+++ b/net/core/dev.c
79012@@ -1162,9 +1162,13 @@ void dev_load(struct net *net, const char *name)
79013 if (no_module && capable(CAP_NET_ADMIN))
79014 no_module = request_module("netdev-%s", name);
79015 if (no_module && capable(CAP_SYS_MODULE)) {
79016+#ifdef CONFIG_GRKERNSEC_MODHARDEN
79017+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
79018+#else
79019 if (!request_module("%s", name))
79020 pr_warn("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
79021 name);
79022+#endif
79023 }
79024 }
79025 EXPORT_SYMBOL(dev_load);
79026@@ -1627,7 +1631,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
79027 {
79028 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
79029 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
79030- atomic_long_inc(&dev->rx_dropped);
79031+ atomic_long_inc_unchecked(&dev->rx_dropped);
79032 kfree_skb(skb);
79033 return NET_RX_DROP;
79034 }
79035@@ -1637,7 +1641,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
79036 nf_reset(skb);
79037
79038 if (unlikely(!is_skb_forwardable(dev, skb))) {
79039- atomic_long_inc(&dev->rx_dropped);
79040+ atomic_long_inc_unchecked(&dev->rx_dropped);
79041 kfree_skb(skb);
79042 return NET_RX_DROP;
79043 }
79044@@ -2093,7 +2097,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
79045
79046 struct dev_gso_cb {
79047 void (*destructor)(struct sk_buff *skb);
79048-};
79049+} __no_const;
79050
79051 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
79052
79053@@ -2955,7 +2959,7 @@ enqueue:
79054
79055 local_irq_restore(flags);
79056
79057- atomic_long_inc(&skb->dev->rx_dropped);
79058+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
79059 kfree_skb(skb);
79060 return NET_RX_DROP;
79061 }
79062@@ -3027,7 +3031,7 @@ int netif_rx_ni(struct sk_buff *skb)
79063 }
79064 EXPORT_SYMBOL(netif_rx_ni);
79065
79066-static void net_tx_action(struct softirq_action *h)
79067+static void net_tx_action(void)
79068 {
79069 struct softnet_data *sd = &__get_cpu_var(softnet_data);
79070
79071@@ -3358,7 +3362,7 @@ ncls:
79072 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
79073 } else {
79074 drop:
79075- atomic_long_inc(&skb->dev->rx_dropped);
79076+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
79077 kfree_skb(skb);
79078 /* Jamal, now you will not able to escape explaining
79079 * me how you were going to use this. :-)
79080@@ -3944,7 +3948,7 @@ void netif_napi_del(struct napi_struct *napi)
79081 }
79082 EXPORT_SYMBOL(netif_napi_del);
79083
79084-static void net_rx_action(struct softirq_action *h)
79085+static void net_rx_action(void)
79086 {
79087 struct softnet_data *sd = &__get_cpu_var(softnet_data);
79088 unsigned long time_limit = jiffies + 2;
79089@@ -4423,8 +4427,13 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
79090 else
79091 seq_printf(seq, "%04x", ntohs(pt->type));
79092
79093+#ifdef CONFIG_GRKERNSEC_HIDESYM
79094+ seq_printf(seq, " %-8s %p\n",
79095+ pt->dev ? pt->dev->name : "", NULL);
79096+#else
79097 seq_printf(seq, " %-8s %pF\n",
79098 pt->dev ? pt->dev->name : "", pt->func);
79099+#endif
79100 }
79101
79102 return 0;
79103@@ -5987,7 +5996,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
79104 } else {
79105 netdev_stats_to_stats64(storage, &dev->stats);
79106 }
79107- storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
79108+ storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
79109 return storage;
79110 }
79111 EXPORT_SYMBOL(dev_get_stats);
79112diff --git a/net/core/flow.c b/net/core/flow.c
79113index e318c7e..168b1d0 100644
79114--- a/net/core/flow.c
79115+++ b/net/core/flow.c
79116@@ -61,7 +61,7 @@ struct flow_cache {
79117 struct timer_list rnd_timer;
79118 };
79119
79120-atomic_t flow_cache_genid = ATOMIC_INIT(0);
79121+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
79122 EXPORT_SYMBOL(flow_cache_genid);
79123 static struct flow_cache flow_cache_global;
79124 static struct kmem_cache *flow_cachep __read_mostly;
79125@@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
79126
79127 static int flow_entry_valid(struct flow_cache_entry *fle)
79128 {
79129- if (atomic_read(&flow_cache_genid) != fle->genid)
79130+ if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
79131 return 0;
79132 if (fle->object && !fle->object->ops->check(fle->object))
79133 return 0;
79134@@ -259,7 +259,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
79135 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
79136 fcp->hash_count++;
79137 }
79138- } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
79139+ } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
79140 flo = fle->object;
79141 if (!flo)
79142 goto ret_object;
79143@@ -280,7 +280,7 @@ nocache:
79144 }
79145 flo = resolver(net, key, family, dir, flo, ctx);
79146 if (fle) {
79147- fle->genid = atomic_read(&flow_cache_genid);
79148+ fle->genid = atomic_read_unchecked(&flow_cache_genid);
79149 if (!IS_ERR(flo))
79150 fle->object = flo;
79151 else
79152diff --git a/net/core/iovec.c b/net/core/iovec.c
79153index 7e7aeb0..2a998cb 100644
79154--- a/net/core/iovec.c
79155+++ b/net/core/iovec.c
79156@@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
79157 if (m->msg_namelen) {
79158 if (mode == VERIFY_READ) {
79159 void __user *namep;
79160- namep = (void __user __force *) m->msg_name;
79161+ namep = (void __force_user *) m->msg_name;
79162 err = move_addr_to_kernel(namep, m->msg_namelen,
79163 address);
79164 if (err < 0)
79165@@ -54,7 +54,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
79166 }
79167
79168 size = m->msg_iovlen * sizeof(struct iovec);
79169- if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
79170+ if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
79171 return -EFAULT;
79172
79173 m->msg_iov = iov;
79174diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
79175index fad649a..df5891e 100644
79176--- a/net/core/rtnetlink.c
79177+++ b/net/core/rtnetlink.c
79178@@ -58,7 +58,7 @@ struct rtnl_link {
79179 rtnl_doit_func doit;
79180 rtnl_dumpit_func dumpit;
79181 rtnl_calcit_func calcit;
79182-};
79183+} __no_const;
79184
79185 static DEFINE_MUTEX(rtnl_mutex);
79186
79187diff --git a/net/core/scm.c b/net/core/scm.c
79188index ab57084..0190c8f 100644
79189--- a/net/core/scm.c
79190+++ b/net/core/scm.c
79191@@ -223,7 +223,7 @@ EXPORT_SYMBOL(__scm_send);
79192 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
79193 {
79194 struct cmsghdr __user *cm
79195- = (__force struct cmsghdr __user *)msg->msg_control;
79196+ = (struct cmsghdr __force_user *)msg->msg_control;
79197 struct cmsghdr cmhdr;
79198 int cmlen = CMSG_LEN(len);
79199 int err;
79200@@ -246,7 +246,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
79201 err = -EFAULT;
79202 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
79203 goto out;
79204- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
79205+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
79206 goto out;
79207 cmlen = CMSG_SPACE(len);
79208 if (msg->msg_controllen < cmlen)
79209@@ -262,7 +262,7 @@ EXPORT_SYMBOL(put_cmsg);
79210 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
79211 {
79212 struct cmsghdr __user *cm
79213- = (__force struct cmsghdr __user*)msg->msg_control;
79214+ = (struct cmsghdr __force_user *)msg->msg_control;
79215
79216 int fdmax = 0;
79217 int fdnum = scm->fp->count;
79218@@ -282,7 +282,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
79219 if (fdnum < fdmax)
79220 fdmax = fdnum;
79221
79222- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
79223+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
79224 i++, cmfptr++)
79225 {
79226 struct socket *sock;
79227diff --git a/net/core/sock.c b/net/core/sock.c
79228index 8a146cf..ee08914d 100644
79229--- a/net/core/sock.c
79230+++ b/net/core/sock.c
79231@@ -388,7 +388,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
79232 struct sk_buff_head *list = &sk->sk_receive_queue;
79233
79234 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
79235- atomic_inc(&sk->sk_drops);
79236+ atomic_inc_unchecked(&sk->sk_drops);
79237 trace_sock_rcvqueue_full(sk, skb);
79238 return -ENOMEM;
79239 }
79240@@ -398,7 +398,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
79241 return err;
79242
79243 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
79244- atomic_inc(&sk->sk_drops);
79245+ atomic_inc_unchecked(&sk->sk_drops);
79246 return -ENOBUFS;
79247 }
79248
79249@@ -418,7 +418,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
79250 skb_dst_force(skb);
79251
79252 spin_lock_irqsave(&list->lock, flags);
79253- skb->dropcount = atomic_read(&sk->sk_drops);
79254+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
79255 __skb_queue_tail(list, skb);
79256 spin_unlock_irqrestore(&list->lock, flags);
79257
79258@@ -438,7 +438,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
79259 skb->dev = NULL;
79260
79261 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
79262- atomic_inc(&sk->sk_drops);
79263+ atomic_inc_unchecked(&sk->sk_drops);
79264 goto discard_and_relse;
79265 }
79266 if (nested)
79267@@ -456,7 +456,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
79268 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
79269 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
79270 bh_unlock_sock(sk);
79271- atomic_inc(&sk->sk_drops);
79272+ atomic_inc_unchecked(&sk->sk_drops);
79273 goto discard_and_relse;
79274 }
79275
79276@@ -875,12 +875,12 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
79277 struct timeval tm;
79278 } v;
79279
79280- int lv = sizeof(int);
79281- int len;
79282+ unsigned int lv = sizeof(int);
79283+ unsigned int len;
79284
79285 if (get_user(len, optlen))
79286 return -EFAULT;
79287- if (len < 0)
79288+ if (len > INT_MAX)
79289 return -EINVAL;
79290
79291 memset(&v, 0, sizeof(v));
79292@@ -1028,11 +1028,11 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
79293
79294 case SO_PEERNAME:
79295 {
79296- char address[128];
79297+ char address[_K_SS_MAXSIZE];
79298
79299 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
79300 return -ENOTCONN;
79301- if (lv < len)
79302+ if (lv < len || sizeof address < len)
79303 return -EINVAL;
79304 if (copy_to_user(optval, address, len))
79305 return -EFAULT;
79306@@ -1080,7 +1080,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
79307
79308 if (len > lv)
79309 len = lv;
79310- if (copy_to_user(optval, &v, len))
79311+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
79312 return -EFAULT;
79313 lenout:
79314 if (put_user(len, optlen))
79315@@ -2212,7 +2212,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
79316 */
79317 smp_wmb();
79318 atomic_set(&sk->sk_refcnt, 1);
79319- atomic_set(&sk->sk_drops, 0);
79320+ atomic_set_unchecked(&sk->sk_drops, 0);
79321 }
79322 EXPORT_SYMBOL(sock_init_data);
79323
79324diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
79325index 602cd63..05c6c60 100644
79326--- a/net/core/sock_diag.c
79327+++ b/net/core/sock_diag.c
79328@@ -15,20 +15,27 @@ static DEFINE_MUTEX(sock_diag_table_mutex);
79329
79330 int sock_diag_check_cookie(void *sk, __u32 *cookie)
79331 {
79332+#ifndef CONFIG_GRKERNSEC_HIDESYM
79333 if ((cookie[0] != INET_DIAG_NOCOOKIE ||
79334 cookie[1] != INET_DIAG_NOCOOKIE) &&
79335 ((u32)(unsigned long)sk != cookie[0] ||
79336 (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
79337 return -ESTALE;
79338 else
79339+#endif
79340 return 0;
79341 }
79342 EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
79343
79344 void sock_diag_save_cookie(void *sk, __u32 *cookie)
79345 {
79346+#ifdef CONFIG_GRKERNSEC_HIDESYM
79347+ cookie[0] = 0;
79348+ cookie[1] = 0;
79349+#else
79350 cookie[0] = (u32)(unsigned long)sk;
79351 cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
79352+#endif
79353 }
79354 EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
79355
79356diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
79357index a55eecc..dd8428c 100644
79358--- a/net/decnet/sysctl_net_decnet.c
79359+++ b/net/decnet/sysctl_net_decnet.c
79360@@ -174,7 +174,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
79361
79362 if (len > *lenp) len = *lenp;
79363
79364- if (copy_to_user(buffer, addr, len))
79365+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
79366 return -EFAULT;
79367
79368 *lenp = len;
79369@@ -237,7 +237,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
79370
79371 if (len > *lenp) len = *lenp;
79372
79373- if (copy_to_user(buffer, devname, len))
79374+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
79375 return -EFAULT;
79376
79377 *lenp = len;
79378diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
79379index 4780045..078d472 100644
79380--- a/net/ipv4/arp.c
79381+++ b/net/ipv4/arp.c
79382@@ -930,24 +930,25 @@ static void parp_redo(struct sk_buff *skb)
79383 static int arp_rcv(struct sk_buff *skb, struct net_device *dev,
79384 struct packet_type *pt, struct net_device *orig_dev)
79385 {
79386- struct arphdr *arp;
79387+ const struct arphdr *arp;
79388
79389- /* ARP header, plus 2 device addresses, plus 2 IP addresses. */
79390- if (!pskb_may_pull(skb, arp_hdr_len(dev)))
79391- goto freeskb;
79392-
79393- arp = arp_hdr(skb);
79394- if (arp->ar_hln != dev->addr_len ||
79395- dev->flags & IFF_NOARP ||
79396+ if (dev->flags & IFF_NOARP ||
79397 skb->pkt_type == PACKET_OTHERHOST ||
79398- skb->pkt_type == PACKET_LOOPBACK ||
79399- arp->ar_pln != 4)
79400+ skb->pkt_type == PACKET_LOOPBACK)
79401 goto freeskb;
79402
79403 skb = skb_share_check(skb, GFP_ATOMIC);
79404- if (skb == NULL)
79405+ if (!skb)
79406 goto out_of_mem;
79407
79408+ /* ARP header, plus 2 device addresses, plus 2 IP addresses. */
79409+ if (!pskb_may_pull(skb, arp_hdr_len(dev)))
79410+ goto freeskb;
79411+
79412+ arp = arp_hdr(skb);
79413+ if (arp->ar_hln != dev->addr_len || arp->ar_pln != 4)
79414+ goto freeskb;
79415+
79416 memset(NEIGH_CB(skb), 0, sizeof(struct neighbour_cb));
79417
79418 return NF_HOOK(NFPROTO_ARP, NF_ARP_IN, skb, dev, NULL, arp_process);
79419diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
79420index 2a6abc1..c379ba7 100644
79421--- a/net/ipv4/devinet.c
79422+++ b/net/ipv4/devinet.c
79423@@ -822,9 +822,9 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg)
79424 if (!ifa) {
79425 ret = -ENOBUFS;
79426 ifa = inet_alloc_ifa();
79427+ if (!ifa)
79428+ break;
79429 INIT_HLIST_NODE(&ifa->hash);
79430- if (!ifa)
79431- break;
79432 if (colon)
79433 memcpy(ifa->ifa_label, ifr.ifr_name, IFNAMSIZ);
79434 else
79435diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
79436index 825c608..750ff29 100644
79437--- a/net/ipv4/fib_frontend.c
79438+++ b/net/ipv4/fib_frontend.c
79439@@ -1020,12 +1020,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
79440 #ifdef CONFIG_IP_ROUTE_MULTIPATH
79441 fib_sync_up(dev);
79442 #endif
79443- atomic_inc(&net->ipv4.dev_addr_genid);
79444+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
79445 rt_cache_flush(dev_net(dev));
79446 break;
79447 case NETDEV_DOWN:
79448 fib_del_ifaddr(ifa, NULL);
79449- atomic_inc(&net->ipv4.dev_addr_genid);
79450+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
79451 if (ifa->ifa_dev->ifa_list == NULL) {
79452 /* Last address was deleted from this interface.
79453 * Disable IP.
79454@@ -1061,7 +1061,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
79455 #ifdef CONFIG_IP_ROUTE_MULTIPATH
79456 fib_sync_up(dev);
79457 #endif
79458- atomic_inc(&net->ipv4.dev_addr_genid);
79459+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
79460 rt_cache_flush(net);
79461 break;
79462 case NETDEV_DOWN:
79463diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
79464index 71b125c..f4c70b0 100644
79465--- a/net/ipv4/fib_semantics.c
79466+++ b/net/ipv4/fib_semantics.c
79467@@ -767,7 +767,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
79468 nh->nh_saddr = inet_select_addr(nh->nh_dev,
79469 nh->nh_gw,
79470 nh->nh_parent->fib_scope);
79471- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
79472+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
79473
79474 return nh->nh_saddr;
79475 }
79476diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
79477index 7880af9..70f92a3 100644
79478--- a/net/ipv4/inet_hashtables.c
79479+++ b/net/ipv4/inet_hashtables.c
79480@@ -18,12 +18,15 @@
79481 #include <linux/sched.h>
79482 #include <linux/slab.h>
79483 #include <linux/wait.h>
79484+#include <linux/security.h>
79485
79486 #include <net/inet_connection_sock.h>
79487 #include <net/inet_hashtables.h>
79488 #include <net/secure_seq.h>
79489 #include <net/ip.h>
79490
79491+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
79492+
79493 /*
79494 * Allocate and initialize a new local port bind bucket.
79495 * The bindhash mutex for snum's hash chain must be held here.
79496@@ -530,6 +533,8 @@ ok:
79497 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
79498 spin_unlock(&head->lock);
79499
79500+ gr_update_task_in_ip_table(current, inet_sk(sk));
79501+
79502 if (tw) {
79503 inet_twsk_deschedule(tw, death_row);
79504 while (twrefcnt) {
79505diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
79506index 000e3d2..5472da3 100644
79507--- a/net/ipv4/inetpeer.c
79508+++ b/net/ipv4/inetpeer.c
79509@@ -503,8 +503,8 @@ relookup:
79510 if (p) {
79511 p->daddr = *daddr;
79512 atomic_set(&p->refcnt, 1);
79513- atomic_set(&p->rid, 0);
79514- atomic_set(&p->ip_id_count,
79515+ atomic_set_unchecked(&p->rid, 0);
79516+ atomic_set_unchecked(&p->ip_id_count,
79517 (daddr->family == AF_INET) ?
79518 secure_ip_id(daddr->addr.a4) :
79519 secure_ipv6_id(daddr->addr.a6));
79520diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
79521index 8d5cc75..821fd11 100644
79522--- a/net/ipv4/ip_fragment.c
79523+++ b/net/ipv4/ip_fragment.c
79524@@ -322,7 +322,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
79525 return 0;
79526
79527 start = qp->rid;
79528- end = atomic_inc_return(&peer->rid);
79529+ end = atomic_inc_return_unchecked(&peer->rid);
79530 qp->rid = end;
79531
79532 rc = qp->q.fragments && (end - start) > max;
79533diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
79534index e95d72b..5268ac0 100644
79535--- a/net/ipv4/ip_sockglue.c
79536+++ b/net/ipv4/ip_sockglue.c
79537@@ -1151,7 +1151,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
79538 len = min_t(unsigned int, len, opt->optlen);
79539 if (put_user(len, optlen))
79540 return -EFAULT;
79541- if (copy_to_user(optval, opt->__data, len))
79542+ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
79543+ copy_to_user(optval, opt->__data, len))
79544 return -EFAULT;
79545 return 0;
79546 }
79547@@ -1282,7 +1283,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
79548 if (sk->sk_type != SOCK_STREAM)
79549 return -ENOPROTOOPT;
79550
79551- msg.msg_control = optval;
79552+ msg.msg_control = (void __force_kernel *)optval;
79553 msg.msg_controllen = len;
79554 msg.msg_flags = flags;
79555
79556diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
79557index 798358b..73570b7 100644
79558--- a/net/ipv4/ipconfig.c
79559+++ b/net/ipv4/ipconfig.c
79560@@ -321,7 +321,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
79561
79562 mm_segment_t oldfs = get_fs();
79563 set_fs(get_ds());
79564- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
79565+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
79566 set_fs(oldfs);
79567 return res;
79568 }
79569@@ -332,7 +332,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
79570
79571 mm_segment_t oldfs = get_fs();
79572 set_fs(get_ds());
79573- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
79574+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
79575 set_fs(oldfs);
79576 return res;
79577 }
79578@@ -343,7 +343,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
79579
79580 mm_segment_t oldfs = get_fs();
79581 set_fs(get_ds());
79582- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
79583+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
79584 set_fs(oldfs);
79585 return res;
79586 }
79587diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
79588index 97e61ea..cac1bbb 100644
79589--- a/net/ipv4/netfilter/arp_tables.c
79590+++ b/net/ipv4/netfilter/arp_tables.c
79591@@ -879,14 +879,14 @@ static int compat_table_info(const struct xt_table_info *info,
79592 #endif
79593
79594 static int get_info(struct net *net, void __user *user,
79595- const int *len, int compat)
79596+ int len, int compat)
79597 {
79598 char name[XT_TABLE_MAXNAMELEN];
79599 struct xt_table *t;
79600 int ret;
79601
79602- if (*len != sizeof(struct arpt_getinfo)) {
79603- duprintf("length %u != %Zu\n", *len,
79604+ if (len != sizeof(struct arpt_getinfo)) {
79605+ duprintf("length %u != %Zu\n", len,
79606 sizeof(struct arpt_getinfo));
79607 return -EINVAL;
79608 }
79609@@ -923,7 +923,7 @@ static int get_info(struct net *net, void __user *user,
79610 info.size = private->size;
79611 strcpy(info.name, name);
79612
79613- if (copy_to_user(user, &info, *len) != 0)
79614+ if (copy_to_user(user, &info, len) != 0)
79615 ret = -EFAULT;
79616 else
79617 ret = 0;
79618@@ -1682,7 +1682,7 @@ static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user,
79619
79620 switch (cmd) {
79621 case ARPT_SO_GET_INFO:
79622- ret = get_info(sock_net(sk), user, len, 1);
79623+ ret = get_info(sock_net(sk), user, *len, 1);
79624 break;
79625 case ARPT_SO_GET_ENTRIES:
79626 ret = compat_get_entries(sock_net(sk), user, len);
79627@@ -1727,7 +1727,7 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
79628
79629 switch (cmd) {
79630 case ARPT_SO_GET_INFO:
79631- ret = get_info(sock_net(sk), user, len, 0);
79632+ ret = get_info(sock_net(sk), user, *len, 0);
79633 break;
79634
79635 case ARPT_SO_GET_ENTRIES:
79636diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
79637index 170b1fd..6105b91 100644
79638--- a/net/ipv4/netfilter/ip_tables.c
79639+++ b/net/ipv4/netfilter/ip_tables.c
79640@@ -1068,14 +1068,14 @@ static int compat_table_info(const struct xt_table_info *info,
79641 #endif
79642
79643 static int get_info(struct net *net, void __user *user,
79644- const int *len, int compat)
79645+ int len, int compat)
79646 {
79647 char name[XT_TABLE_MAXNAMELEN];
79648 struct xt_table *t;
79649 int ret;
79650
79651- if (*len != sizeof(struct ipt_getinfo)) {
79652- duprintf("length %u != %zu\n", *len,
79653+ if (len != sizeof(struct ipt_getinfo)) {
79654+ duprintf("length %u != %zu\n", len,
79655 sizeof(struct ipt_getinfo));
79656 return -EINVAL;
79657 }
79658@@ -1112,7 +1112,7 @@ static int get_info(struct net *net, void __user *user,
79659 info.size = private->size;
79660 strcpy(info.name, name);
79661
79662- if (copy_to_user(user, &info, *len) != 0)
79663+ if (copy_to_user(user, &info, len) != 0)
79664 ret = -EFAULT;
79665 else
79666 ret = 0;
79667@@ -1966,7 +1966,7 @@ compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
79668
79669 switch (cmd) {
79670 case IPT_SO_GET_INFO:
79671- ret = get_info(sock_net(sk), user, len, 1);
79672+ ret = get_info(sock_net(sk), user, *len, 1);
79673 break;
79674 case IPT_SO_GET_ENTRIES:
79675 ret = compat_get_entries(sock_net(sk), user, len);
79676@@ -2013,7 +2013,7 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
79677
79678 switch (cmd) {
79679 case IPT_SO_GET_INFO:
79680- ret = get_info(sock_net(sk), user, len, 0);
79681+ ret = get_info(sock_net(sk), user, *len, 0);
79682 break;
79683
79684 case IPT_SO_GET_ENTRIES:
79685diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
79686index 6f9c072..38ea6c6 100644
79687--- a/net/ipv4/ping.c
79688+++ b/net/ipv4/ping.c
79689@@ -844,7 +844,7 @@ static void ping_format_sock(struct sock *sp, struct seq_file *f,
79690 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
79691 0, sock_i_ino(sp),
79692 atomic_read(&sp->sk_refcnt), sp,
79693- atomic_read(&sp->sk_drops), len);
79694+ atomic_read_unchecked(&sp->sk_drops), len);
79695 }
79696
79697 static int ping_seq_show(struct seq_file *seq, void *v)
79698diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
79699index 6f08991..55867ad 100644
79700--- a/net/ipv4/raw.c
79701+++ b/net/ipv4/raw.c
79702@@ -311,7 +311,7 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
79703 int raw_rcv(struct sock *sk, struct sk_buff *skb)
79704 {
79705 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
79706- atomic_inc(&sk->sk_drops);
79707+ atomic_inc_unchecked(&sk->sk_drops);
79708 kfree_skb(skb);
79709 return NET_RX_DROP;
79710 }
79711@@ -747,16 +747,20 @@ static int raw_init(struct sock *sk)
79712
79713 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
79714 {
79715+ struct icmp_filter filter;
79716+
79717 if (optlen > sizeof(struct icmp_filter))
79718 optlen = sizeof(struct icmp_filter);
79719- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
79720+ if (copy_from_user(&filter, optval, optlen))
79721 return -EFAULT;
79722+ raw_sk(sk)->filter = filter;
79723 return 0;
79724 }
79725
79726 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
79727 {
79728 int len, ret = -EFAULT;
79729+ struct icmp_filter filter;
79730
79731 if (get_user(len, optlen))
79732 goto out;
79733@@ -766,8 +770,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
79734 if (len > sizeof(struct icmp_filter))
79735 len = sizeof(struct icmp_filter);
79736 ret = -EFAULT;
79737- if (put_user(len, optlen) ||
79738- copy_to_user(optval, &raw_sk(sk)->filter, len))
79739+ filter = raw_sk(sk)->filter;
79740+ if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
79741 goto out;
79742 ret = 0;
79743 out: return ret;
79744@@ -998,7 +1002,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
79745 0, 0L, 0,
79746 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
79747 0, sock_i_ino(sp),
79748- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
79749+ atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
79750 }
79751
79752 static int raw_seq_show(struct seq_file *seq, void *v)
79753diff --git a/net/ipv4/route.c b/net/ipv4/route.c
79754index 0fdfe4c..e7ea542 100644
79755--- a/net/ipv4/route.c
79756+++ b/net/ipv4/route.c
79757@@ -2579,7 +2579,7 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
79758
79759 static __net_init int rt_genid_init(struct net *net)
79760 {
79761- atomic_set(&net->rt_genid, 0);
79762+ atomic_set_unchecked(&net->rt_genid, 0);
79763 get_random_bytes(&net->ipv4.dev_addr_genid,
79764 sizeof(net->ipv4.dev_addr_genid));
79765 return 0;
79766diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
79767index beabc80..48a6a10 100644
79768--- a/net/ipv4/tcp_input.c
79769+++ b/net/ipv4/tcp_input.c
79770@@ -4709,7 +4709,7 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
79771 * simplifies code)
79772 */
79773 static void
79774-tcp_collapse(struct sock *sk, struct sk_buff_head *list,
79775+__intentional_overflow(5,6) tcp_collapse(struct sock *sk, struct sk_buff_head *list,
79776 struct sk_buff *head, struct sk_buff *tail,
79777 u32 start, u32 end)
79778 {
79779@@ -5541,6 +5541,9 @@ slow_path:
79780 if (len < (th->doff << 2) || tcp_checksum_complete_user(sk, skb))
79781 goto csum_error;
79782
79783+ if (!th->ack)
79784+ goto discard;
79785+
79786 /*
79787 * Standard slow path.
79788 */
79789@@ -5549,7 +5552,7 @@ slow_path:
79790 return 0;
79791
79792 step5:
79793- if (th->ack && tcp_ack(sk, skb, FLAG_SLOWPATH) < 0)
79794+ if (tcp_ack(sk, skb, FLAG_SLOWPATH) < 0)
79795 goto discard;
79796
79797 /* ts_recent update must be made after we are sure that the packet
79798@@ -5840,6 +5843,7 @@ discard:
79799 tcp_paws_reject(&tp->rx_opt, 0))
79800 goto discard_and_undo;
79801
79802+#ifndef CONFIG_GRKERNSEC_NO_SIMULT_CONNECT
79803 if (th->syn) {
79804 /* We see SYN without ACK. It is attempt of
79805 * simultaneous connect with crossed SYNs.
79806@@ -5890,6 +5894,7 @@ discard:
79807 goto discard;
79808 #endif
79809 }
79810+#endif
79811 /* "fifth, if neither of the SYN or RST bits is set then
79812 * drop the segment and return."
79813 */
79814@@ -5934,7 +5939,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
79815 goto discard;
79816
79817 if (th->syn) {
79818- if (th->fin)
79819+ if (th->fin || th->urg || th->psh)
79820 goto discard;
79821 if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
79822 return 1;
79823@@ -5981,11 +5986,15 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
79824 if (tcp_check_req(sk, skb, req, NULL, true) == NULL)
79825 goto discard;
79826 }
79827+
79828+ if (!th->ack)
79829+ goto discard;
79830+
79831 if (!tcp_validate_incoming(sk, skb, th, 0))
79832 return 0;
79833
79834 /* step 5: check the ACK field */
79835- if (th->ack) {
79836+ if (true) {
79837 int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH) > 0;
79838
79839 switch (sk->sk_state) {
79840@@ -6135,8 +6144,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
79841 }
79842 break;
79843 }
79844- } else
79845- goto discard;
79846+ }
79847
79848 /* ts_recent update must be made after we are sure that the packet
79849 * is in window.
79850diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
79851index e637770..364ff02 100644
79852--- a/net/ipv4/tcp_ipv4.c
79853+++ b/net/ipv4/tcp_ipv4.c
79854@@ -90,6 +90,10 @@ int sysctl_tcp_low_latency __read_mostly;
79855 EXPORT_SYMBOL(sysctl_tcp_low_latency);
79856
79857
79858+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
79859+extern int grsec_enable_blackhole;
79860+#endif
79861+
79862 #ifdef CONFIG_TCP_MD5SIG
79863 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
79864 __be32 daddr, __be32 saddr, const struct tcphdr *th);
79865@@ -1898,6 +1902,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
79866 return 0;
79867
79868 reset:
79869+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
79870+ if (!grsec_enable_blackhole)
79871+#endif
79872 tcp_v4_send_reset(rsk, skb);
79873 discard:
79874 kfree_skb(skb);
79875@@ -1998,12 +2005,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
79876 TCP_SKB_CB(skb)->sacked = 0;
79877
79878 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
79879- if (!sk)
79880+ if (!sk) {
79881+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
79882+ ret = 1;
79883+#endif
79884 goto no_tcp_socket;
79885-
79886+ }
79887 process:
79888- if (sk->sk_state == TCP_TIME_WAIT)
79889+ if (sk->sk_state == TCP_TIME_WAIT) {
79890+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
79891+ ret = 2;
79892+#endif
79893 goto do_time_wait;
79894+ }
79895
79896 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
79897 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
79898@@ -2054,6 +2068,10 @@ no_tcp_socket:
79899 bad_packet:
79900 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
79901 } else {
79902+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
79903+ if (!grsec_enable_blackhole || (ret == 1 &&
79904+ (skb->dev->flags & IFF_LOOPBACK)))
79905+#endif
79906 tcp_v4_send_reset(NULL, skb);
79907 }
79908
79909diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
79910index a7302d9..e3ec754 100644
79911--- a/net/ipv4/tcp_minisocks.c
79912+++ b/net/ipv4/tcp_minisocks.c
79913@@ -27,6 +27,10 @@
79914 #include <net/inet_common.h>
79915 #include <net/xfrm.h>
79916
79917+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
79918+extern int grsec_enable_blackhole;
79919+#endif
79920+
79921 int sysctl_tcp_syncookies __read_mostly = 1;
79922 EXPORT_SYMBOL(sysctl_tcp_syncookies);
79923
79924@@ -742,7 +746,10 @@ embryonic_reset:
79925 * avoid becoming vulnerable to outside attack aiming at
79926 * resetting legit local connections.
79927 */
79928- req->rsk_ops->send_reset(sk, skb);
79929+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
79930+ if (!grsec_enable_blackhole)
79931+#endif
79932+ req->rsk_ops->send_reset(sk, skb);
79933 } else if (fastopen) { /* received a valid RST pkt */
79934 reqsk_fastopen_remove(sk, req, true);
79935 tcp_reset(sk);
79936diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
79937index 4526fe6..1a34e43 100644
79938--- a/net/ipv4/tcp_probe.c
79939+++ b/net/ipv4/tcp_probe.c
79940@@ -204,7 +204,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
79941 if (cnt + width >= len)
79942 break;
79943
79944- if (copy_to_user(buf + cnt, tbuf, width))
79945+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
79946 return -EFAULT;
79947 cnt += width;
79948 }
79949diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
79950index d47c1b4..b0584de 100644
79951--- a/net/ipv4/tcp_timer.c
79952+++ b/net/ipv4/tcp_timer.c
79953@@ -22,6 +22,10 @@
79954 #include <linux/gfp.h>
79955 #include <net/tcp.h>
79956
79957+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
79958+extern int grsec_lastack_retries;
79959+#endif
79960+
79961 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
79962 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
79963 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
79964@@ -185,6 +189,13 @@ static int tcp_write_timeout(struct sock *sk)
79965 }
79966 }
79967
79968+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
79969+ if ((sk->sk_state == TCP_LAST_ACK) &&
79970+ (grsec_lastack_retries > 0) &&
79971+ (grsec_lastack_retries < retry_until))
79972+ retry_until = grsec_lastack_retries;
79973+#endif
79974+
79975 if (retransmits_timed_out(sk, retry_until,
79976 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
79977 /* Has it gone just too far? */
79978diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
79979index 1f4d405..3524677 100644
79980--- a/net/ipv4/udp.c
79981+++ b/net/ipv4/udp.c
79982@@ -87,6 +87,7 @@
79983 #include <linux/types.h>
79984 #include <linux/fcntl.h>
79985 #include <linux/module.h>
79986+#include <linux/security.h>
79987 #include <linux/socket.h>
79988 #include <linux/sockios.h>
79989 #include <linux/igmp.h>
79990@@ -111,6 +112,10 @@
79991 #include <trace/events/skb.h>
79992 #include "udp_impl.h"
79993
79994+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
79995+extern int grsec_enable_blackhole;
79996+#endif
79997+
79998 struct udp_table udp_table __read_mostly;
79999 EXPORT_SYMBOL(udp_table);
80000
80001@@ -569,6 +574,9 @@ found:
80002 return s;
80003 }
80004
80005+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
80006+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
80007+
80008 /*
80009 * This routine is called by the ICMP module when it gets some
80010 * sort of error condition. If err < 0 then the socket should
80011@@ -864,9 +872,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
80012 dport = usin->sin_port;
80013 if (dport == 0)
80014 return -EINVAL;
80015+
80016+ err = gr_search_udp_sendmsg(sk, usin);
80017+ if (err)
80018+ return err;
80019 } else {
80020 if (sk->sk_state != TCP_ESTABLISHED)
80021 return -EDESTADDRREQ;
80022+
80023+ err = gr_search_udp_sendmsg(sk, NULL);
80024+ if (err)
80025+ return err;
80026+
80027 daddr = inet->inet_daddr;
80028 dport = inet->inet_dport;
80029 /* Open fast path for connected socket.
80030@@ -1108,7 +1125,7 @@ static unsigned int first_packet_length(struct sock *sk)
80031 udp_lib_checksum_complete(skb)) {
80032 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
80033 IS_UDPLITE(sk));
80034- atomic_inc(&sk->sk_drops);
80035+ atomic_inc_unchecked(&sk->sk_drops);
80036 __skb_unlink(skb, rcvq);
80037 __skb_queue_tail(&list_kill, skb);
80038 }
80039@@ -1194,6 +1211,10 @@ try_again:
80040 if (!skb)
80041 goto out;
80042
80043+ err = gr_search_udp_recvmsg(sk, skb);
80044+ if (err)
80045+ goto out_free;
80046+
80047 ulen = skb->len - sizeof(struct udphdr);
80048 copied = len;
80049 if (copied > ulen)
80050@@ -1227,7 +1248,7 @@ try_again:
80051 if (unlikely(err)) {
80052 trace_kfree_skb(skb, udp_recvmsg);
80053 if (!peeked) {
80054- atomic_inc(&sk->sk_drops);
80055+ atomic_inc_unchecked(&sk->sk_drops);
80056 UDP_INC_STATS_USER(sock_net(sk),
80057 UDP_MIB_INERRORS, is_udplite);
80058 }
80059@@ -1510,7 +1531,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
80060
80061 drop:
80062 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
80063- atomic_inc(&sk->sk_drops);
80064+ atomic_inc_unchecked(&sk->sk_drops);
80065 kfree_skb(skb);
80066 return -1;
80067 }
80068@@ -1529,7 +1550,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
80069 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
80070
80071 if (!skb1) {
80072- atomic_inc(&sk->sk_drops);
80073+ atomic_inc_unchecked(&sk->sk_drops);
80074 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
80075 IS_UDPLITE(sk));
80076 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
80077@@ -1698,6 +1719,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
80078 goto csum_error;
80079
80080 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
80081+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80082+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
80083+#endif
80084 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
80085
80086 /*
80087@@ -2120,7 +2144,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
80088 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
80089 0, sock_i_ino(sp),
80090 atomic_read(&sp->sk_refcnt), sp,
80091- atomic_read(&sp->sk_drops), len);
80092+ atomic_read_unchecked(&sp->sk_drops), len);
80093 }
80094
80095 int udp4_seq_show(struct seq_file *seq, void *v)
80096diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
80097index a468a36..b50ffde 100644
80098--- a/net/ipv6/addrconf.c
80099+++ b/net/ipv6/addrconf.c
80100@@ -2121,7 +2121,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
80101 p.iph.ihl = 5;
80102 p.iph.protocol = IPPROTO_IPV6;
80103 p.iph.ttl = 64;
80104- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
80105+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
80106
80107 if (ops->ndo_do_ioctl) {
80108 mm_segment_t oldfs = get_fs();
80109diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
80110index a23350c..899c62c 100644
80111--- a/net/ipv6/ip6_gre.c
80112+++ b/net/ipv6/ip6_gre.c
80113@@ -1353,7 +1353,7 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev)
80114 }
80115
80116
80117-static struct inet6_protocol ip6gre_protocol __read_mostly = {
80118+static struct inet6_protocol ip6gre_protocol = {
80119 .handler = ip6gre_rcv,
80120 .err_handler = ip6gre_err,
80121 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
80122diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
80123index e02faed..9780f28 100644
80124--- a/net/ipv6/ipv6_sockglue.c
80125+++ b/net/ipv6/ipv6_sockglue.c
80126@@ -990,7 +990,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
80127 if (sk->sk_type != SOCK_STREAM)
80128 return -ENOPROTOOPT;
80129
80130- msg.msg_control = optval;
80131+ msg.msg_control = (void __force_kernel *)optval;
80132 msg.msg_controllen = len;
80133 msg.msg_flags = flags;
80134
80135diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
80136index d7cb045..8c0ded6 100644
80137--- a/net/ipv6/netfilter/ip6_tables.c
80138+++ b/net/ipv6/netfilter/ip6_tables.c
80139@@ -1078,14 +1078,14 @@ static int compat_table_info(const struct xt_table_info *info,
80140 #endif
80141
80142 static int get_info(struct net *net, void __user *user,
80143- const int *len, int compat)
80144+ int len, int compat)
80145 {
80146 char name[XT_TABLE_MAXNAMELEN];
80147 struct xt_table *t;
80148 int ret;
80149
80150- if (*len != sizeof(struct ip6t_getinfo)) {
80151- duprintf("length %u != %zu\n", *len,
80152+ if (len != sizeof(struct ip6t_getinfo)) {
80153+ duprintf("length %u != %zu\n", len,
80154 sizeof(struct ip6t_getinfo));
80155 return -EINVAL;
80156 }
80157@@ -1122,7 +1122,7 @@ static int get_info(struct net *net, void __user *user,
80158 info.size = private->size;
80159 strcpy(info.name, name);
80160
80161- if (copy_to_user(user, &info, *len) != 0)
80162+ if (copy_to_user(user, &info, len) != 0)
80163 ret = -EFAULT;
80164 else
80165 ret = 0;
80166@@ -1976,7 +1976,7 @@ compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
80167
80168 switch (cmd) {
80169 case IP6T_SO_GET_INFO:
80170- ret = get_info(sock_net(sk), user, len, 1);
80171+ ret = get_info(sock_net(sk), user, *len, 1);
80172 break;
80173 case IP6T_SO_GET_ENTRIES:
80174 ret = compat_get_entries(sock_net(sk), user, len);
80175@@ -2023,7 +2023,7 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
80176
80177 switch (cmd) {
80178 case IP6T_SO_GET_INFO:
80179- ret = get_info(sock_net(sk), user, len, 0);
80180+ ret = get_info(sock_net(sk), user, *len, 0);
80181 break;
80182
80183 case IP6T_SO_GET_ENTRIES:
80184diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
80185index d8e95c7..81422bc 100644
80186--- a/net/ipv6/raw.c
80187+++ b/net/ipv6/raw.c
80188@@ -379,7 +379,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
80189 {
80190 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
80191 skb_checksum_complete(skb)) {
80192- atomic_inc(&sk->sk_drops);
80193+ atomic_inc_unchecked(&sk->sk_drops);
80194 kfree_skb(skb);
80195 return NET_RX_DROP;
80196 }
80197@@ -407,7 +407,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
80198 struct raw6_sock *rp = raw6_sk(sk);
80199
80200 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
80201- atomic_inc(&sk->sk_drops);
80202+ atomic_inc_unchecked(&sk->sk_drops);
80203 kfree_skb(skb);
80204 return NET_RX_DROP;
80205 }
80206@@ -431,7 +431,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
80207
80208 if (inet->hdrincl) {
80209 if (skb_checksum_complete(skb)) {
80210- atomic_inc(&sk->sk_drops);
80211+ atomic_inc_unchecked(&sk->sk_drops);
80212 kfree_skb(skb);
80213 return NET_RX_DROP;
80214 }
80215@@ -604,7 +604,7 @@ out:
80216 return err;
80217 }
80218
80219-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
80220+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
80221 struct flowi6 *fl6, struct dst_entry **dstp,
80222 unsigned int flags)
80223 {
80224@@ -916,12 +916,15 @@ do_confirm:
80225 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
80226 char __user *optval, int optlen)
80227 {
80228+ struct icmp6_filter filter;
80229+
80230 switch (optname) {
80231 case ICMPV6_FILTER:
80232 if (optlen > sizeof(struct icmp6_filter))
80233 optlen = sizeof(struct icmp6_filter);
80234- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
80235+ if (copy_from_user(&filter, optval, optlen))
80236 return -EFAULT;
80237+ raw6_sk(sk)->filter = filter;
80238 return 0;
80239 default:
80240 return -ENOPROTOOPT;
80241@@ -934,6 +937,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
80242 char __user *optval, int __user *optlen)
80243 {
80244 int len;
80245+ struct icmp6_filter filter;
80246
80247 switch (optname) {
80248 case ICMPV6_FILTER:
80249@@ -945,7 +949,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
80250 len = sizeof(struct icmp6_filter);
80251 if (put_user(len, optlen))
80252 return -EFAULT;
80253- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
80254+ filter = raw6_sk(sk)->filter;
80255+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
80256 return -EFAULT;
80257 return 0;
80258 default:
80259@@ -1253,7 +1258,7 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
80260 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
80261 0,
80262 sock_i_ino(sp),
80263- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
80264+ atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
80265 }
80266
80267 static int raw6_seq_show(struct seq_file *seq, void *v)
80268diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
80269index 73f2a6b..f8049a1 100644
80270--- a/net/ipv6/tcp_ipv6.c
80271+++ b/net/ipv6/tcp_ipv6.c
80272@@ -106,6 +106,10 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
80273 inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
80274 }
80275
80276+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80277+extern int grsec_enable_blackhole;
80278+#endif
80279+
80280 static void tcp_v6_hash(struct sock *sk)
80281 {
80282 if (sk->sk_state != TCP_CLOSE) {
80283@@ -1525,6 +1529,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
80284 return 0;
80285
80286 reset:
80287+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80288+ if (!grsec_enable_blackhole)
80289+#endif
80290 tcp_v6_send_reset(sk, skb);
80291 discard:
80292 if (opt_skb)
80293@@ -1606,12 +1613,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
80294 TCP_SKB_CB(skb)->sacked = 0;
80295
80296 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
80297- if (!sk)
80298+ if (!sk) {
80299+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80300+ ret = 1;
80301+#endif
80302 goto no_tcp_socket;
80303+ }
80304
80305 process:
80306- if (sk->sk_state == TCP_TIME_WAIT)
80307+ if (sk->sk_state == TCP_TIME_WAIT) {
80308+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80309+ ret = 2;
80310+#endif
80311 goto do_time_wait;
80312+ }
80313
80314 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
80315 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
80316@@ -1660,6 +1675,10 @@ no_tcp_socket:
80317 bad_packet:
80318 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
80319 } else {
80320+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80321+ if (!grsec_enable_blackhole || (ret == 1 &&
80322+ (skb->dev->flags & IFF_LOOPBACK)))
80323+#endif
80324 tcp_v6_send_reset(NULL, skb);
80325 }
80326
80327diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
80328index fc99972..69397e8 100644
80329--- a/net/ipv6/udp.c
80330+++ b/net/ipv6/udp.c
80331@@ -51,6 +51,10 @@
80332 #include <trace/events/skb.h>
80333 #include "udp_impl.h"
80334
80335+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80336+extern int grsec_enable_blackhole;
80337+#endif
80338+
80339 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
80340 {
80341 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
80342@@ -395,7 +399,7 @@ try_again:
80343 if (unlikely(err)) {
80344 trace_kfree_skb(skb, udpv6_recvmsg);
80345 if (!peeked) {
80346- atomic_inc(&sk->sk_drops);
80347+ atomic_inc_unchecked(&sk->sk_drops);
80348 if (is_udp4)
80349 UDP_INC_STATS_USER(sock_net(sk),
80350 UDP_MIB_INERRORS,
80351@@ -633,7 +637,7 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
80352 return rc;
80353 drop:
80354 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
80355- atomic_inc(&sk->sk_drops);
80356+ atomic_inc_unchecked(&sk->sk_drops);
80357 kfree_skb(skb);
80358 return -1;
80359 }
80360@@ -691,7 +695,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
80361 if (likely(skb1 == NULL))
80362 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
80363 if (!skb1) {
80364- atomic_inc(&sk->sk_drops);
80365+ atomic_inc_unchecked(&sk->sk_drops);
80366 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
80367 IS_UDPLITE(sk));
80368 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
80369@@ -862,6 +866,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
80370 goto discard;
80371
80372 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
80373+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80374+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
80375+#endif
80376 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
80377
80378 kfree_skb(skb);
80379@@ -1473,7 +1480,7 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
80380 0,
80381 sock_i_ino(sp),
80382 atomic_read(&sp->sk_refcnt), sp,
80383- atomic_read(&sp->sk_drops));
80384+ atomic_read_unchecked(&sp->sk_drops));
80385 }
80386
80387 int udp6_seq_show(struct seq_file *seq, void *v)
80388diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
80389index 496ce2c..f79fac8 100644
80390--- a/net/irda/ircomm/ircomm_tty.c
80391+++ b/net/irda/ircomm/ircomm_tty.c
80392@@ -311,12 +311,12 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
80393 add_wait_queue(&port->open_wait, &wait);
80394
80395 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
80396- __FILE__, __LINE__, tty->driver->name, port->count);
80397+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
80398
80399 spin_lock_irqsave(&port->lock, flags);
80400 if (!tty_hung_up_p(filp)) {
80401 extra_count = 1;
80402- port->count--;
80403+ atomic_dec(&port->count);
80404 }
80405 spin_unlock_irqrestore(&port->lock, flags);
80406 port->blocked_open++;
80407@@ -352,7 +352,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
80408 }
80409
80410 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
80411- __FILE__, __LINE__, tty->driver->name, port->count);
80412+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
80413
80414 schedule();
80415 }
80416@@ -363,13 +363,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
80417 if (extra_count) {
80418 /* ++ is not atomic, so this should be protected - Jean II */
80419 spin_lock_irqsave(&port->lock, flags);
80420- port->count++;
80421+ atomic_inc(&port->count);
80422 spin_unlock_irqrestore(&port->lock, flags);
80423 }
80424 port->blocked_open--;
80425
80426 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
80427- __FILE__, __LINE__, tty->driver->name, port->count);
80428+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
80429
80430 if (!retval)
80431 port->flags |= ASYNC_NORMAL_ACTIVE;
80432@@ -443,12 +443,12 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
80433
80434 /* ++ is not atomic, so this should be protected - Jean II */
80435 spin_lock_irqsave(&self->port.lock, flags);
80436- self->port.count++;
80437+ atomic_inc(&self->port.count);
80438 spin_unlock_irqrestore(&self->port.lock, flags);
80439 tty_port_tty_set(&self->port, tty);
80440
80441 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
80442- self->line, self->port.count);
80443+ self->line, atomic_read(&self->port.count));
80444
80445 /* Not really used by us, but lets do it anyway */
80446 tty->low_latency = (self->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
80447@@ -985,7 +985,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
80448 tty_kref_put(port->tty);
80449 }
80450 port->tty = NULL;
80451- port->count = 0;
80452+ atomic_set(&port->count, 0);
80453 spin_unlock_irqrestore(&port->lock, flags);
80454
80455 wake_up_interruptible(&port->open_wait);
80456@@ -1342,7 +1342,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
80457 seq_putc(m, '\n');
80458
80459 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
80460- seq_printf(m, "Open count: %d\n", self->port.count);
80461+ seq_printf(m, "Open count: %d\n", atomic_read(&self->port.count));
80462 seq_printf(m, "Max data size: %d\n", self->max_data_size);
80463 seq_printf(m, "Max header size: %d\n", self->max_header_size);
80464
80465diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
80466index cd6f7a9..e63fe89 100644
80467--- a/net/iucv/af_iucv.c
80468+++ b/net/iucv/af_iucv.c
80469@@ -782,10 +782,10 @@ static int iucv_sock_autobind(struct sock *sk)
80470
80471 write_lock_bh(&iucv_sk_list.lock);
80472
80473- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
80474+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
80475 while (__iucv_get_sock_by_name(name)) {
80476 sprintf(name, "%08x",
80477- atomic_inc_return(&iucv_sk_list.autobind_name));
80478+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
80479 }
80480
80481 write_unlock_bh(&iucv_sk_list.lock);
80482diff --git a/net/key/af_key.c b/net/key/af_key.c
80483index 08897a3..0b812ab 100644
80484--- a/net/key/af_key.c
80485+++ b/net/key/af_key.c
80486@@ -3019,10 +3019,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
80487 static u32 get_acqseq(void)
80488 {
80489 u32 res;
80490- static atomic_t acqseq;
80491+ static atomic_unchecked_t acqseq;
80492
80493 do {
80494- res = atomic_inc_return(&acqseq);
80495+ res = atomic_inc_return_unchecked(&acqseq);
80496 } while (!res);
80497 return res;
80498 }
80499diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
80500index 494da7f..6ce2ffd 100644
80501--- a/net/mac80211/cfg.c
80502+++ b/net/mac80211/cfg.c
80503@@ -2604,7 +2604,7 @@ static void ieee80211_mgmt_frame_register(struct wiphy *wiphy,
80504 else
80505 local->probe_req_reg--;
80506
80507- if (!local->open_count)
80508+ if (!local_read(&local->open_count))
80509 break;
80510
80511 ieee80211_queue_work(&local->hw, &local->reconfig_filter);
80512diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
80513index 493e2e8..be76574 100644
80514--- a/net/mac80211/ieee80211_i.h
80515+++ b/net/mac80211/ieee80211_i.h
80516@@ -28,6 +28,7 @@
80517 #include <net/ieee80211_radiotap.h>
80518 #include <net/cfg80211.h>
80519 #include <net/mac80211.h>
80520+#include <asm/local.h>
80521 #include "key.h"
80522 #include "sta_info.h"
80523 #include "debug.h"
80524@@ -852,7 +853,7 @@ struct ieee80211_local {
80525 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
80526 spinlock_t queue_stop_reason_lock;
80527
80528- int open_count;
80529+ local_t open_count;
80530 int monitors, cooked_mntrs;
80531 /* number of interfaces with corresponding FIF_ flags */
80532 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
80533diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
80534index 0f5af91..4dba9e7 100644
80535--- a/net/mac80211/iface.c
80536+++ b/net/mac80211/iface.c
80537@@ -465,7 +465,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
80538 break;
80539 }
80540
80541- if (local->open_count == 0) {
80542+ if (local_read(&local->open_count) == 0) {
80543 res = drv_start(local);
80544 if (res)
80545 goto err_del_bss;
80546@@ -508,7 +508,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
80547 break;
80548 }
80549
80550- if (local->monitors == 0 && local->open_count == 0) {
80551+ if (local->monitors == 0 && local_read(&local->open_count) == 0) {
80552 res = ieee80211_add_virtual_monitor(local);
80553 if (res)
80554 goto err_stop;
80555@@ -616,7 +616,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
80556 mutex_unlock(&local->mtx);
80557
80558 if (coming_up)
80559- local->open_count++;
80560+ local_inc(&local->open_count);
80561
80562 if (hw_reconf_flags)
80563 ieee80211_hw_config(local, hw_reconf_flags);
80564@@ -630,7 +630,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
80565 err_del_interface:
80566 drv_remove_interface(local, sdata);
80567 err_stop:
80568- if (!local->open_count)
80569+ if (!local_read(&local->open_count))
80570 drv_stop(local);
80571 err_del_bss:
80572 sdata->bss = NULL;
80573@@ -762,7 +762,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
80574 }
80575
80576 if (going_down)
80577- local->open_count--;
80578+ local_dec(&local->open_count);
80579
80580 switch (sdata->vif.type) {
80581 case NL80211_IFTYPE_AP_VLAN:
80582@@ -818,7 +818,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
80583
80584 ieee80211_recalc_ps(local, -1);
80585
80586- if (local->open_count == 0) {
80587+ if (local_read(&local->open_count) == 0) {
80588 if (local->ops->napi_poll)
80589 napi_disable(&local->napi);
80590 ieee80211_clear_tx_pending(local);
80591@@ -850,7 +850,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
80592 }
80593 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
80594
80595- if (local->monitors == local->open_count && local->monitors > 0)
80596+ if (local->monitors == local_read(&local->open_count) && local->monitors > 0)
80597 ieee80211_add_virtual_monitor(local);
80598 }
80599
80600diff --git a/net/mac80211/main.c b/net/mac80211/main.c
80601index f57f597..e0a7c03 100644
80602--- a/net/mac80211/main.c
80603+++ b/net/mac80211/main.c
80604@@ -164,7 +164,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
80605 local->hw.conf.power_level = power;
80606 }
80607
80608- if (changed && local->open_count) {
80609+ if (changed && local_read(&local->open_count)) {
80610 ret = drv_config(local, changed);
80611 /*
80612 * Goal:
80613diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
80614index 5c572e7..ecf75ce 100644
80615--- a/net/mac80211/pm.c
80616+++ b/net/mac80211/pm.c
80617@@ -34,7 +34,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
80618 struct ieee80211_sub_if_data *sdata;
80619 struct sta_info *sta;
80620
80621- if (!local->open_count)
80622+ if (!local_read(&local->open_count))
80623 goto suspend;
80624
80625 ieee80211_scan_cancel(local);
80626@@ -72,7 +72,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
80627 cancel_work_sync(&local->dynamic_ps_enable_work);
80628 del_timer_sync(&local->dynamic_ps_timer);
80629
80630- local->wowlan = wowlan && local->open_count;
80631+ local->wowlan = wowlan && local_read(&local->open_count);
80632 if (local->wowlan) {
80633 int err = drv_suspend(local, wowlan);
80634 if (err < 0) {
80635@@ -143,7 +143,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
80636 drv_remove_interface(local, sdata);
80637
80638 /* stop hardware - this must stop RX */
80639- if (local->open_count)
80640+ if (local_read(&local->open_count))
80641 ieee80211_stop_device(local);
80642
80643 suspend:
80644diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
80645index 3313c11..bec9f17 100644
80646--- a/net/mac80211/rate.c
80647+++ b/net/mac80211/rate.c
80648@@ -494,7 +494,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
80649
80650 ASSERT_RTNL();
80651
80652- if (local->open_count)
80653+ if (local_read(&local->open_count))
80654 return -EBUSY;
80655
80656 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
80657diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
80658index c97a065..ff61928 100644
80659--- a/net/mac80211/rc80211_pid_debugfs.c
80660+++ b/net/mac80211/rc80211_pid_debugfs.c
80661@@ -193,7 +193,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
80662
80663 spin_unlock_irqrestore(&events->lock, status);
80664
80665- if (copy_to_user(buf, pb, p))
80666+ if (p > sizeof(pb) || copy_to_user(buf, pb, p))
80667 return -EFAULT;
80668
80669 return p;
80670diff --git a/net/mac80211/util.c b/net/mac80211/util.c
80671index 0151ae3..26709d3 100644
80672--- a/net/mac80211/util.c
80673+++ b/net/mac80211/util.c
80674@@ -1332,7 +1332,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
80675 }
80676 #endif
80677 /* everything else happens only if HW was up & running */
80678- if (!local->open_count)
80679+ if (!local_read(&local->open_count))
80680 goto wake_up;
80681
80682 /*
80683diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
80684index 49e96df..63a51c3 100644
80685--- a/net/netfilter/Kconfig
80686+++ b/net/netfilter/Kconfig
80687@@ -936,6 +936,16 @@ config NETFILTER_XT_MATCH_ESP
80688
80689 To compile it as a module, choose M here. If unsure, say N.
80690
80691+config NETFILTER_XT_MATCH_GRADM
80692+ tristate '"gradm" match support'
80693+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
80694+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
80695+ ---help---
80696+ The gradm match allows to match on grsecurity RBAC being enabled.
80697+ It is useful when iptables rules are applied early on bootup to
80698+ prevent connections to the machine (except from a trusted host)
80699+ while the RBAC system is disabled.
80700+
80701 config NETFILTER_XT_MATCH_HASHLIMIT
80702 tristate '"hashlimit" match support'
80703 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
80704diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
80705index 3259697..54d5393 100644
80706--- a/net/netfilter/Makefile
80707+++ b/net/netfilter/Makefile
80708@@ -109,6 +109,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
80709 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
80710 obj-$(CONFIG_NETFILTER_XT_MATCH_ECN) += xt_ecn.o
80711 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
80712+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
80713 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
80714 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
80715 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
80716diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
80717index 1548df9..98ad9b4 100644
80718--- a/net/netfilter/ipvs/ip_vs_conn.c
80719+++ b/net/netfilter/ipvs/ip_vs_conn.c
80720@@ -557,7 +557,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
80721 /* Increase the refcnt counter of the dest */
80722 atomic_inc(&dest->refcnt);
80723
80724- conn_flags = atomic_read(&dest->conn_flags);
80725+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
80726 if (cp->protocol != IPPROTO_UDP)
80727 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
80728 flags = cp->flags;
80729@@ -902,7 +902,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
80730 atomic_set(&cp->refcnt, 1);
80731
80732 atomic_set(&cp->n_control, 0);
80733- atomic_set(&cp->in_pkts, 0);
80734+ atomic_set_unchecked(&cp->in_pkts, 0);
80735
80736 atomic_inc(&ipvs->conn_count);
80737 if (flags & IP_VS_CONN_F_NO_CPORT)
80738@@ -1183,7 +1183,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
80739
80740 /* Don't drop the entry if its number of incoming packets is not
80741 located in [0, 8] */
80742- i = atomic_read(&cp->in_pkts);
80743+ i = atomic_read_unchecked(&cp->in_pkts);
80744 if (i > 8 || i < 0) return 0;
80745
80746 if (!todrop_rate[i]) return 0;
80747diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
80748index 58918e2..4d177a9 100644
80749--- a/net/netfilter/ipvs/ip_vs_core.c
80750+++ b/net/netfilter/ipvs/ip_vs_core.c
80751@@ -562,7 +562,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
80752 ret = cp->packet_xmit(skb, cp, pd->pp);
80753 /* do not touch skb anymore */
80754
80755- atomic_inc(&cp->in_pkts);
80756+ atomic_inc_unchecked(&cp->in_pkts);
80757 ip_vs_conn_put(cp);
80758 return ret;
80759 }
80760@@ -1681,7 +1681,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
80761 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
80762 pkts = sysctl_sync_threshold(ipvs);
80763 else
80764- pkts = atomic_add_return(1, &cp->in_pkts);
80765+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
80766
80767 if (ipvs->sync_state & IP_VS_STATE_MASTER)
80768 ip_vs_sync_conn(net, cp, pkts);
80769diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
80770index c4ee437..a774a74 100644
80771--- a/net/netfilter/ipvs/ip_vs_ctl.c
80772+++ b/net/netfilter/ipvs/ip_vs_ctl.c
80773@@ -787,7 +787,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
80774 ip_vs_rs_hash(ipvs, dest);
80775 write_unlock_bh(&ipvs->rs_lock);
80776 }
80777- atomic_set(&dest->conn_flags, conn_flags);
80778+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
80779
80780 /* bind the service */
80781 if (!dest->svc) {
80782@@ -2081,7 +2081,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
80783 " %-7s %-6d %-10d %-10d\n",
80784 &dest->addr.in6,
80785 ntohs(dest->port),
80786- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
80787+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
80788 atomic_read(&dest->weight),
80789 atomic_read(&dest->activeconns),
80790 atomic_read(&dest->inactconns));
80791@@ -2092,7 +2092,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
80792 "%-7s %-6d %-10d %-10d\n",
80793 ntohl(dest->addr.ip),
80794 ntohs(dest->port),
80795- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
80796+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
80797 atomic_read(&dest->weight),
80798 atomic_read(&dest->activeconns),
80799 atomic_read(&dest->inactconns));
80800@@ -2562,7 +2562,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
80801
80802 entry.addr = dest->addr.ip;
80803 entry.port = dest->port;
80804- entry.conn_flags = atomic_read(&dest->conn_flags);
80805+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
80806 entry.weight = atomic_read(&dest->weight);
80807 entry.u_threshold = dest->u_threshold;
80808 entry.l_threshold = dest->l_threshold;
80809@@ -3098,7 +3098,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
80810 if (nla_put(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr) ||
80811 nla_put_u16(skb, IPVS_DEST_ATTR_PORT, dest->port) ||
80812 nla_put_u32(skb, IPVS_DEST_ATTR_FWD_METHOD,
80813- (atomic_read(&dest->conn_flags) &
80814+ (atomic_read_unchecked(&dest->conn_flags) &
80815 IP_VS_CONN_F_FWD_MASK)) ||
80816 nla_put_u32(skb, IPVS_DEST_ATTR_WEIGHT,
80817 atomic_read(&dest->weight)) ||
80818diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
80819index effa10c..9058928 100644
80820--- a/net/netfilter/ipvs/ip_vs_sync.c
80821+++ b/net/netfilter/ipvs/ip_vs_sync.c
80822@@ -596,7 +596,7 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
80823 cp = cp->control;
80824 if (cp) {
80825 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
80826- pkts = atomic_add_return(1, &cp->in_pkts);
80827+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
80828 else
80829 pkts = sysctl_sync_threshold(ipvs);
80830 ip_vs_sync_conn(net, cp->control, pkts);
80831@@ -758,7 +758,7 @@ control:
80832 if (!cp)
80833 return;
80834 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
80835- pkts = atomic_add_return(1, &cp->in_pkts);
80836+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
80837 else
80838 pkts = sysctl_sync_threshold(ipvs);
80839 goto sloop;
80840@@ -885,7 +885,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
80841
80842 if (opt)
80843 memcpy(&cp->in_seq, opt, sizeof(*opt));
80844- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
80845+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
80846 cp->state = state;
80847 cp->old_state = cp->state;
80848 /*
80849diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
80850index cc4c809..50f8fe5 100644
80851--- a/net/netfilter/ipvs/ip_vs_xmit.c
80852+++ b/net/netfilter/ipvs/ip_vs_xmit.c
80853@@ -1202,7 +1202,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
80854 else
80855 rc = NF_ACCEPT;
80856 /* do not touch skb anymore */
80857- atomic_inc(&cp->in_pkts);
80858+ atomic_inc_unchecked(&cp->in_pkts);
80859 goto out;
80860 }
80861
80862@@ -1323,7 +1323,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
80863 else
80864 rc = NF_ACCEPT;
80865 /* do not touch skb anymore */
80866- atomic_inc(&cp->in_pkts);
80867+ atomic_inc_unchecked(&cp->in_pkts);
80868 goto out;
80869 }
80870
80871diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
80872index ec02168..f0caab6 100644
80873--- a/net/netfilter/nf_conntrack_core.c
80874+++ b/net/netfilter/nf_conntrack_core.c
80875@@ -1533,6 +1533,10 @@ err_extend:
80876 #define UNCONFIRMED_NULLS_VAL ((1<<30)+0)
80877 #define DYING_NULLS_VAL ((1<<30)+1)
80878
80879+#ifdef CONFIG_GRKERNSEC_HIDESYM
80880+static atomic_unchecked_t conntrack_cache_id = ATOMIC_INIT(0);
80881+#endif
80882+
80883 static int nf_conntrack_init_net(struct net *net)
80884 {
80885 int ret;
80886@@ -1546,7 +1550,11 @@ static int nf_conntrack_init_net(struct net *net)
80887 goto err_stat;
80888 }
80889
80890+#ifdef CONFIG_GRKERNSEC_HIDESYM
80891+ net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%08lx", atomic_inc_return_unchecked(&conntrack_cache_id));
80892+#else
80893 net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
80894+#endif
80895 if (!net->ct.slabname) {
80896 ret = -ENOMEM;
80897 goto err_slabname;
80898diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
80899index 92fd8ec..3f6ea4b 100644
80900--- a/net/netfilter/nfnetlink_log.c
80901+++ b/net/netfilter/nfnetlink_log.c
80902@@ -72,7 +72,7 @@ struct nfulnl_instance {
80903 };
80904
80905 static DEFINE_SPINLOCK(instances_lock);
80906-static atomic_t global_seq;
80907+static atomic_unchecked_t global_seq;
80908
80909 #define INSTANCE_BUCKETS 16
80910 static struct hlist_head instance_table[INSTANCE_BUCKETS];
80911@@ -537,7 +537,7 @@ __build_packet_message(struct nfulnl_instance *inst,
80912 /* global sequence number */
80913 if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) &&
80914 nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL,
80915- htonl(atomic_inc_return(&global_seq))))
80916+ htonl(atomic_inc_return_unchecked(&global_seq))))
80917 goto nla_put_failure;
80918
80919 if (data_len) {
80920diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
80921new file mode 100644
80922index 0000000..c566332
80923--- /dev/null
80924+++ b/net/netfilter/xt_gradm.c
80925@@ -0,0 +1,51 @@
80926+/*
80927+ * gradm match for netfilter
80928